1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale FSL CAAM support for crypto API over QI backend. 4 * Based on caamalg.c 5 * 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 * Copyright 2016-2019 NXP 8 */ 9 10 #include "compat.h" 11 #include "ctrl.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "desc_constr.h" 15 #include "error.h" 16 #include "sg_sw_qm.h" 17 #include "key_gen.h" 18 #include "qi.h" 19 #include "jr.h" 20 #include "caamalg_desc.h" 21 22 /* 23 * crypto alg 24 */ 25 #define CAAM_CRA_PRIORITY 2000 26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 28 SHA512_DIGEST_SIZE * 2) 29 30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 31 CAAM_MAX_KEY_SIZE) 32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 33 34 struct caam_alg_entry { 35 int class1_alg_type; 36 int class2_alg_type; 37 bool rfc3686; 38 bool geniv; 39 bool nodkp; 40 }; 41 42 struct caam_aead_alg { 43 struct aead_alg aead; 44 struct caam_alg_entry caam; 45 bool registered; 46 }; 47 48 struct caam_skcipher_alg { 49 struct skcipher_alg skcipher; 50 struct caam_alg_entry caam; 51 bool registered; 52 }; 53 54 /* 55 * per-session context 56 */ 57 struct caam_ctx { 58 struct device *jrdev; 59 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 60 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 61 u8 key[CAAM_MAX_KEY_SIZE]; 62 dma_addr_t key_dma; 63 enum dma_data_direction dir; 64 struct alginfo adata; 65 struct alginfo cdata; 66 unsigned int authsize; 67 struct device *qidev; 68 spinlock_t lock; /* Protects multiple init of driver context */ 69 struct caam_drv_ctx *drv_ctx[NUM_OP]; 70 }; 71 72 static int aead_set_sh_desc(struct crypto_aead *aead) 73 { 74 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 75 typeof(*alg), aead); 76 struct caam_ctx *ctx = crypto_aead_ctx(aead); 77 unsigned int ivsize = crypto_aead_ivsize(aead); 78 u32 ctx1_iv_off = 0; 79 u32 *nonce = NULL; 80 unsigned int data_len[2]; 81 u32 inl_mask; 82 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 83 OP_ALG_AAI_CTR_MOD128); 84 const bool is_rfc3686 = alg->caam.rfc3686; 85 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 86 87 if (!ctx->cdata.keylen || !ctx->authsize) 88 return 0; 89 90 /* 91 * AES-CTR needs to load IV in CONTEXT1 reg 92 * at an offset of 128bits (16bytes) 93 * CONTEXT1[255:128] = IV 94 */ 95 if (ctr_mode) 96 ctx1_iv_off = 16; 97 98 /* 99 * RFC3686 specific: 100 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 101 */ 102 if (is_rfc3686) { 103 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 104 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 106 } 107 108 /* 109 * In case |user key| > |derived key|, using DKP<imm,imm> would result 110 * in invalid opcodes (last bytes of user key) in the resulting 111 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key 112 * addresses are needed. 113 */ 114 ctx->adata.key_virt = ctx->key; 115 ctx->adata.key_dma = ctx->key_dma; 116 117 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 118 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 119 120 data_len[0] = ctx->adata.keylen_pad; 121 data_len[1] = ctx->cdata.keylen; 122 123 if (alg->caam.geniv) 124 goto skip_enc; 125 126 /* aead_encrypt shared descriptor */ 127 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 128 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 129 DESC_JOB_IO_LEN, data_len, &inl_mask, 130 ARRAY_SIZE(data_len)) < 0) 131 return -EINVAL; 132 133 ctx->adata.key_inline = !!(inl_mask & 1); 134 ctx->cdata.key_inline = !!(inl_mask & 2); 135 136 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 137 ivsize, ctx->authsize, is_rfc3686, nonce, 138 ctx1_iv_off, true, ctrlpriv->era); 139 140 skip_enc: 141 /* aead_decrypt shared descriptor */ 142 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 143 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 144 DESC_JOB_IO_LEN, data_len, &inl_mask, 145 ARRAY_SIZE(data_len)) < 0) 146 return -EINVAL; 147 148 ctx->adata.key_inline = !!(inl_mask & 1); 149 ctx->cdata.key_inline = !!(inl_mask & 2); 150 151 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 152 ivsize, ctx->authsize, alg->caam.geniv, 153 is_rfc3686, nonce, ctx1_iv_off, true, 154 ctrlpriv->era); 155 156 if (!alg->caam.geniv) 157 goto skip_givenc; 158 159 /* aead_givencrypt shared descriptor */ 160 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 161 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 162 DESC_JOB_IO_LEN, data_len, &inl_mask, 163 ARRAY_SIZE(data_len)) < 0) 164 return -EINVAL; 165 166 ctx->adata.key_inline = !!(inl_mask & 1); 167 ctx->cdata.key_inline = !!(inl_mask & 2); 168 169 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 170 ivsize, ctx->authsize, is_rfc3686, nonce, 171 ctx1_iv_off, true, ctrlpriv->era); 172 173 skip_givenc: 174 return 0; 175 } 176 177 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 178 { 179 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 180 181 ctx->authsize = authsize; 182 aead_set_sh_desc(authenc); 183 184 return 0; 185 } 186 187 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 188 unsigned int keylen) 189 { 190 struct caam_ctx *ctx = crypto_aead_ctx(aead); 191 struct device *jrdev = ctx->jrdev; 192 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 193 struct crypto_authenc_keys keys; 194 int ret = 0; 195 196 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 197 goto badkey; 198 199 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 200 keys.authkeylen + keys.enckeylen, keys.enckeylen, 201 keys.authkeylen); 202 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 203 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 204 205 /* 206 * If DKP is supported, use it in the shared descriptor to generate 207 * the split key. 208 */ 209 if (ctrlpriv->era >= 6) { 210 ctx->adata.keylen = keys.authkeylen; 211 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 212 OP_ALG_ALGSEL_MASK); 213 214 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 215 goto badkey; 216 217 memcpy(ctx->key, keys.authkey, keys.authkeylen); 218 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 219 keys.enckeylen); 220 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 221 ctx->adata.keylen_pad + 222 keys.enckeylen, ctx->dir); 223 goto skip_split_key; 224 } 225 226 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 227 keys.authkeylen, CAAM_MAX_KEY_SIZE - 228 keys.enckeylen); 229 if (ret) 230 goto badkey; 231 232 /* postpend encryption key to auth split key */ 233 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 234 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 235 ctx->adata.keylen_pad + keys.enckeylen, 236 ctx->dir); 237 #ifdef DEBUG 238 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 239 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 240 ctx->adata.keylen_pad + keys.enckeylen, 1); 241 #endif 242 243 skip_split_key: 244 ctx->cdata.keylen = keys.enckeylen; 245 246 ret = aead_set_sh_desc(aead); 247 if (ret) 248 goto badkey; 249 250 /* Now update the driver contexts with the new shared descriptor */ 251 if (ctx->drv_ctx[ENCRYPT]) { 252 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 253 ctx->sh_desc_enc); 254 if (ret) { 255 dev_err(jrdev, "driver enc context update failed\n"); 256 goto badkey; 257 } 258 } 259 260 if (ctx->drv_ctx[DECRYPT]) { 261 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 262 ctx->sh_desc_dec); 263 if (ret) { 264 dev_err(jrdev, "driver dec context update failed\n"); 265 goto badkey; 266 } 267 } 268 269 memzero_explicit(&keys, sizeof(keys)); 270 return ret; 271 badkey: 272 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 273 memzero_explicit(&keys, sizeof(keys)); 274 return -EINVAL; 275 } 276 277 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 278 unsigned int keylen) 279 { 280 struct crypto_authenc_keys keys; 281 int err; 282 283 err = crypto_authenc_extractkeys(&keys, key, keylen); 284 if (unlikely(err)) 285 return err; 286 287 err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: 288 aead_setkey(aead, key, keylen); 289 290 memzero_explicit(&keys, sizeof(keys)); 291 return err; 292 } 293 294 static int gcm_set_sh_desc(struct crypto_aead *aead) 295 { 296 struct caam_ctx *ctx = crypto_aead_ctx(aead); 297 unsigned int ivsize = crypto_aead_ivsize(aead); 298 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 299 ctx->cdata.keylen; 300 301 if (!ctx->cdata.keylen || !ctx->authsize) 302 return 0; 303 304 /* 305 * Job Descriptor and Shared Descriptor 306 * must fit into the 64-word Descriptor h/w Buffer 307 */ 308 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { 309 ctx->cdata.key_inline = true; 310 ctx->cdata.key_virt = ctx->key; 311 } else { 312 ctx->cdata.key_inline = false; 313 ctx->cdata.key_dma = ctx->key_dma; 314 } 315 316 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 317 ctx->authsize, true); 318 319 /* 320 * Job Descriptor and Shared Descriptor 321 * must fit into the 64-word Descriptor h/w Buffer 322 */ 323 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { 324 ctx->cdata.key_inline = true; 325 ctx->cdata.key_virt = ctx->key; 326 } else { 327 ctx->cdata.key_inline = false; 328 ctx->cdata.key_dma = ctx->key_dma; 329 } 330 331 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 332 ctx->authsize, true); 333 334 return 0; 335 } 336 337 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 338 { 339 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 340 int err; 341 342 err = crypto_gcm_check_authsize(authsize); 343 if (err) 344 return err; 345 346 ctx->authsize = authsize; 347 gcm_set_sh_desc(authenc); 348 349 return 0; 350 } 351 352 static int gcm_setkey(struct crypto_aead *aead, 353 const u8 *key, unsigned int keylen) 354 { 355 struct caam_ctx *ctx = crypto_aead_ctx(aead); 356 struct device *jrdev = ctx->jrdev; 357 int ret; 358 359 ret = aes_check_keylen(keylen); 360 if (ret) { 361 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 362 return ret; 363 } 364 365 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 366 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 367 368 memcpy(ctx->key, key, keylen); 369 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, 370 ctx->dir); 371 ctx->cdata.keylen = keylen; 372 373 ret = gcm_set_sh_desc(aead); 374 if (ret) 375 return ret; 376 377 /* Now update the driver contexts with the new shared descriptor */ 378 if (ctx->drv_ctx[ENCRYPT]) { 379 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 380 ctx->sh_desc_enc); 381 if (ret) { 382 dev_err(jrdev, "driver enc context update failed\n"); 383 return ret; 384 } 385 } 386 387 if (ctx->drv_ctx[DECRYPT]) { 388 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 389 ctx->sh_desc_dec); 390 if (ret) { 391 dev_err(jrdev, "driver dec context update failed\n"); 392 return ret; 393 } 394 } 395 396 return 0; 397 } 398 399 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 400 { 401 struct caam_ctx *ctx = crypto_aead_ctx(aead); 402 unsigned int ivsize = crypto_aead_ivsize(aead); 403 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 404 ctx->cdata.keylen; 405 406 if (!ctx->cdata.keylen || !ctx->authsize) 407 return 0; 408 409 ctx->cdata.key_virt = ctx->key; 410 411 /* 412 * Job Descriptor and Shared Descriptor 413 * must fit into the 64-word Descriptor h/w Buffer 414 */ 415 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { 416 ctx->cdata.key_inline = true; 417 } else { 418 ctx->cdata.key_inline = false; 419 ctx->cdata.key_dma = ctx->key_dma; 420 } 421 422 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 423 ctx->authsize, true); 424 425 /* 426 * Job Descriptor and Shared Descriptor 427 * must fit into the 64-word Descriptor h/w Buffer 428 */ 429 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { 430 ctx->cdata.key_inline = true; 431 } else { 432 ctx->cdata.key_inline = false; 433 ctx->cdata.key_dma = ctx->key_dma; 434 } 435 436 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 437 ctx->authsize, true); 438 439 return 0; 440 } 441 442 static int rfc4106_setauthsize(struct crypto_aead *authenc, 443 unsigned int authsize) 444 { 445 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 446 int err; 447 448 err = crypto_rfc4106_check_authsize(authsize); 449 if (err) 450 return err; 451 452 ctx->authsize = authsize; 453 rfc4106_set_sh_desc(authenc); 454 455 return 0; 456 } 457 458 static int rfc4106_setkey(struct crypto_aead *aead, 459 const u8 *key, unsigned int keylen) 460 { 461 struct caam_ctx *ctx = crypto_aead_ctx(aead); 462 struct device *jrdev = ctx->jrdev; 463 int ret; 464 465 ret = aes_check_keylen(keylen - 4); 466 if (ret) { 467 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 468 return ret; 469 } 470 471 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 472 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 473 474 memcpy(ctx->key, key, keylen); 475 /* 476 * The last four bytes of the key material are used as the salt value 477 * in the nonce. Update the AES key length. 478 */ 479 ctx->cdata.keylen = keylen - 4; 480 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 481 ctx->cdata.keylen, ctx->dir); 482 483 ret = rfc4106_set_sh_desc(aead); 484 if (ret) 485 return ret; 486 487 /* Now update the driver contexts with the new shared descriptor */ 488 if (ctx->drv_ctx[ENCRYPT]) { 489 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 490 ctx->sh_desc_enc); 491 if (ret) { 492 dev_err(jrdev, "driver enc context update failed\n"); 493 return ret; 494 } 495 } 496 497 if (ctx->drv_ctx[DECRYPT]) { 498 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 499 ctx->sh_desc_dec); 500 if (ret) { 501 dev_err(jrdev, "driver dec context update failed\n"); 502 return ret; 503 } 504 } 505 506 return 0; 507 } 508 509 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 510 { 511 struct caam_ctx *ctx = crypto_aead_ctx(aead); 512 unsigned int ivsize = crypto_aead_ivsize(aead); 513 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 514 ctx->cdata.keylen; 515 516 if (!ctx->cdata.keylen || !ctx->authsize) 517 return 0; 518 519 ctx->cdata.key_virt = ctx->key; 520 521 /* 522 * Job Descriptor and Shared Descriptor 523 * must fit into the 64-word Descriptor h/w Buffer 524 */ 525 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { 526 ctx->cdata.key_inline = true; 527 } else { 528 ctx->cdata.key_inline = false; 529 ctx->cdata.key_dma = ctx->key_dma; 530 } 531 532 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 533 ctx->authsize, true); 534 535 /* 536 * Job Descriptor and Shared Descriptor 537 * must fit into the 64-word Descriptor h/w Buffer 538 */ 539 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { 540 ctx->cdata.key_inline = true; 541 } else { 542 ctx->cdata.key_inline = false; 543 ctx->cdata.key_dma = ctx->key_dma; 544 } 545 546 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 547 ctx->authsize, true); 548 549 return 0; 550 } 551 552 static int rfc4543_setauthsize(struct crypto_aead *authenc, 553 unsigned int authsize) 554 { 555 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 556 557 if (authsize != 16) 558 return -EINVAL; 559 560 ctx->authsize = authsize; 561 rfc4543_set_sh_desc(authenc); 562 563 return 0; 564 } 565 566 static int rfc4543_setkey(struct crypto_aead *aead, 567 const u8 *key, unsigned int keylen) 568 { 569 struct caam_ctx *ctx = crypto_aead_ctx(aead); 570 struct device *jrdev = ctx->jrdev; 571 int ret; 572 573 ret = aes_check_keylen(keylen - 4); 574 if (ret) { 575 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 576 return ret; 577 } 578 579 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 580 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 581 582 memcpy(ctx->key, key, keylen); 583 /* 584 * The last four bytes of the key material are used as the salt value 585 * in the nonce. Update the AES key length. 586 */ 587 ctx->cdata.keylen = keylen - 4; 588 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 589 ctx->cdata.keylen, ctx->dir); 590 591 ret = rfc4543_set_sh_desc(aead); 592 if (ret) 593 return ret; 594 595 /* Now update the driver contexts with the new shared descriptor */ 596 if (ctx->drv_ctx[ENCRYPT]) { 597 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 598 ctx->sh_desc_enc); 599 if (ret) { 600 dev_err(jrdev, "driver enc context update failed\n"); 601 return ret; 602 } 603 } 604 605 if (ctx->drv_ctx[DECRYPT]) { 606 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 607 ctx->sh_desc_dec); 608 if (ret) { 609 dev_err(jrdev, "driver dec context update failed\n"); 610 return ret; 611 } 612 } 613 614 return 0; 615 } 616 617 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 618 unsigned int keylen, const u32 ctx1_iv_off) 619 { 620 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 621 struct caam_skcipher_alg *alg = 622 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 623 skcipher); 624 struct device *jrdev = ctx->jrdev; 625 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 626 const bool is_rfc3686 = alg->caam.rfc3686; 627 int ret = 0; 628 629 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 630 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 631 632 ctx->cdata.keylen = keylen; 633 ctx->cdata.key_virt = key; 634 ctx->cdata.key_inline = true; 635 636 /* skcipher encrypt, decrypt shared descriptors */ 637 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 638 is_rfc3686, ctx1_iv_off); 639 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 640 is_rfc3686, ctx1_iv_off); 641 642 /* Now update the driver contexts with the new shared descriptor */ 643 if (ctx->drv_ctx[ENCRYPT]) { 644 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 645 ctx->sh_desc_enc); 646 if (ret) { 647 dev_err(jrdev, "driver enc context update failed\n"); 648 goto badkey; 649 } 650 } 651 652 if (ctx->drv_ctx[DECRYPT]) { 653 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 654 ctx->sh_desc_dec); 655 if (ret) { 656 dev_err(jrdev, "driver dec context update failed\n"); 657 goto badkey; 658 } 659 } 660 661 return ret; 662 badkey: 663 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 664 return -EINVAL; 665 } 666 667 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, 668 const u8 *key, unsigned int keylen) 669 { 670 int err; 671 672 err = aes_check_keylen(keylen); 673 if (err) { 674 crypto_skcipher_set_flags(skcipher, 675 CRYPTO_TFM_RES_BAD_KEY_LEN); 676 return err; 677 } 678 679 return skcipher_setkey(skcipher, key, keylen, 0); 680 } 681 682 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, 683 const u8 *key, unsigned int keylen) 684 { 685 u32 ctx1_iv_off; 686 int err; 687 688 /* 689 * RFC3686 specific: 690 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 691 * | *key = {KEY, NONCE} 692 */ 693 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 694 keylen -= CTR_RFC3686_NONCE_SIZE; 695 696 err = aes_check_keylen(keylen); 697 if (err) { 698 crypto_skcipher_set_flags(skcipher, 699 CRYPTO_TFM_RES_BAD_KEY_LEN); 700 return err; 701 } 702 703 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 704 } 705 706 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, 707 const u8 *key, unsigned int keylen) 708 { 709 u32 ctx1_iv_off; 710 int err; 711 712 /* 713 * AES-CTR needs to load IV in CONTEXT1 reg 714 * at an offset of 128bits (16bytes) 715 * CONTEXT1[255:128] = IV 716 */ 717 ctx1_iv_off = 16; 718 719 err = aes_check_keylen(keylen); 720 if (err) { 721 crypto_skcipher_set_flags(skcipher, 722 CRYPTO_TFM_RES_BAD_KEY_LEN); 723 return err; 724 } 725 726 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); 727 } 728 729 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 730 const u8 *key, unsigned int keylen) 731 { 732 return verify_skcipher_des3_key(skcipher, key) ?: 733 skcipher_setkey(skcipher, key, keylen, 0); 734 } 735 736 static int des_skcipher_setkey(struct crypto_skcipher *skcipher, 737 const u8 *key, unsigned int keylen) 738 { 739 return verify_skcipher_des_key(skcipher, key) ?: 740 skcipher_setkey(skcipher, key, keylen, 0); 741 } 742 743 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 744 unsigned int keylen) 745 { 746 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 747 struct device *jrdev = ctx->jrdev; 748 int ret = 0; 749 750 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 751 dev_err(jrdev, "key size mismatch\n"); 752 goto badkey; 753 } 754 755 ctx->cdata.keylen = keylen; 756 ctx->cdata.key_virt = key; 757 ctx->cdata.key_inline = true; 758 759 /* xts skcipher encrypt, decrypt shared descriptors */ 760 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 761 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 762 763 /* Now update the driver contexts with the new shared descriptor */ 764 if (ctx->drv_ctx[ENCRYPT]) { 765 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 766 ctx->sh_desc_enc); 767 if (ret) { 768 dev_err(jrdev, "driver enc context update failed\n"); 769 goto badkey; 770 } 771 } 772 773 if (ctx->drv_ctx[DECRYPT]) { 774 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 775 ctx->sh_desc_dec); 776 if (ret) { 777 dev_err(jrdev, "driver dec context update failed\n"); 778 goto badkey; 779 } 780 } 781 782 return ret; 783 badkey: 784 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 785 return -EINVAL; 786 } 787 788 /* 789 * aead_edesc - s/w-extended aead descriptor 790 * @src_nents: number of segments in input scatterlist 791 * @dst_nents: number of segments in output scatterlist 792 * @iv_dma: dma address of iv for checking continuity and link table 793 * @qm_sg_bytes: length of dma mapped h/w link table 794 * @qm_sg_dma: bus physical mapped address of h/w link table 795 * @assoclen: associated data length, in CAAM endianness 796 * @assoclen_dma: bus physical mapped address of req->assoclen 797 * @drv_req: driver-specific request structure 798 * @sgt: the h/w link table, followed by IV 799 */ 800 struct aead_edesc { 801 int src_nents; 802 int dst_nents; 803 dma_addr_t iv_dma; 804 int qm_sg_bytes; 805 dma_addr_t qm_sg_dma; 806 unsigned int assoclen; 807 dma_addr_t assoclen_dma; 808 struct caam_drv_req drv_req; 809 struct qm_sg_entry sgt[0]; 810 }; 811 812 /* 813 * skcipher_edesc - s/w-extended skcipher descriptor 814 * @src_nents: number of segments in input scatterlist 815 * @dst_nents: number of segments in output scatterlist 816 * @iv_dma: dma address of iv for checking continuity and link table 817 * @qm_sg_bytes: length of dma mapped h/w link table 818 * @qm_sg_dma: bus physical mapped address of h/w link table 819 * @drv_req: driver-specific request structure 820 * @sgt: the h/w link table, followed by IV 821 */ 822 struct skcipher_edesc { 823 int src_nents; 824 int dst_nents; 825 dma_addr_t iv_dma; 826 int qm_sg_bytes; 827 dma_addr_t qm_sg_dma; 828 struct caam_drv_req drv_req; 829 struct qm_sg_entry sgt[0]; 830 }; 831 832 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 833 enum optype type) 834 { 835 /* 836 * This function is called on the fast path with values of 'type' 837 * known at compile time. Invalid arguments are not expected and 838 * thus no checks are made. 839 */ 840 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 841 u32 *desc; 842 843 if (unlikely(!drv_ctx)) { 844 spin_lock(&ctx->lock); 845 846 /* Read again to check if some other core init drv_ctx */ 847 drv_ctx = ctx->drv_ctx[type]; 848 if (!drv_ctx) { 849 int cpu; 850 851 if (type == ENCRYPT) 852 desc = ctx->sh_desc_enc; 853 else /* (type == DECRYPT) */ 854 desc = ctx->sh_desc_dec; 855 856 cpu = smp_processor_id(); 857 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 858 if (!IS_ERR_OR_NULL(drv_ctx)) 859 drv_ctx->op_type = type; 860 861 ctx->drv_ctx[type] = drv_ctx; 862 } 863 864 spin_unlock(&ctx->lock); 865 } 866 867 return drv_ctx; 868 } 869 870 static void caam_unmap(struct device *dev, struct scatterlist *src, 871 struct scatterlist *dst, int src_nents, 872 int dst_nents, dma_addr_t iv_dma, int ivsize, 873 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, 874 int qm_sg_bytes) 875 { 876 if (dst != src) { 877 if (src_nents) 878 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 879 if (dst_nents) 880 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 881 } else { 882 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 883 } 884 885 if (iv_dma) 886 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 887 if (qm_sg_bytes) 888 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 889 } 890 891 static void aead_unmap(struct device *dev, 892 struct aead_edesc *edesc, 893 struct aead_request *req) 894 { 895 struct crypto_aead *aead = crypto_aead_reqtfm(req); 896 int ivsize = crypto_aead_ivsize(aead); 897 898 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 899 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, 900 edesc->qm_sg_bytes); 901 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 902 } 903 904 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 905 struct skcipher_request *req) 906 { 907 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 908 int ivsize = crypto_skcipher_ivsize(skcipher); 909 910 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 911 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, 912 edesc->qm_sg_bytes); 913 } 914 915 static void aead_done(struct caam_drv_req *drv_req, u32 status) 916 { 917 struct device *qidev; 918 struct aead_edesc *edesc; 919 struct aead_request *aead_req = drv_req->app_ctx; 920 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 921 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); 922 int ecode = 0; 923 924 qidev = caam_ctx->qidev; 925 926 if (unlikely(status)) 927 ecode = caam_jr_strstatus(qidev, status); 928 929 edesc = container_of(drv_req, typeof(*edesc), drv_req); 930 aead_unmap(qidev, edesc, aead_req); 931 932 aead_request_complete(aead_req, ecode); 933 qi_cache_free(edesc); 934 } 935 936 /* 937 * allocate and map the aead extended descriptor 938 */ 939 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 940 bool encrypt) 941 { 942 struct crypto_aead *aead = crypto_aead_reqtfm(req); 943 struct caam_ctx *ctx = crypto_aead_ctx(aead); 944 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 945 typeof(*alg), aead); 946 struct device *qidev = ctx->qidev; 947 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 948 GFP_KERNEL : GFP_ATOMIC; 949 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 950 int src_len, dst_len = 0; 951 struct aead_edesc *edesc; 952 dma_addr_t qm_sg_dma, iv_dma = 0; 953 int ivsize = 0; 954 unsigned int authsize = ctx->authsize; 955 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 956 int in_len, out_len; 957 struct qm_sg_entry *sg_table, *fd_sgt; 958 struct caam_drv_ctx *drv_ctx; 959 960 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 961 if (IS_ERR_OR_NULL(drv_ctx)) 962 return (struct aead_edesc *)drv_ctx; 963 964 /* allocate space for base edesc and hw desc commands, link tables */ 965 edesc = qi_cache_alloc(GFP_DMA | flags); 966 if (unlikely(!edesc)) { 967 dev_err(qidev, "could not allocate extended descriptor\n"); 968 return ERR_PTR(-ENOMEM); 969 } 970 971 if (likely(req->src == req->dst)) { 972 src_len = req->assoclen + req->cryptlen + 973 (encrypt ? authsize : 0); 974 975 src_nents = sg_nents_for_len(req->src, src_len); 976 if (unlikely(src_nents < 0)) { 977 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 978 src_len); 979 qi_cache_free(edesc); 980 return ERR_PTR(src_nents); 981 } 982 983 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 984 DMA_BIDIRECTIONAL); 985 if (unlikely(!mapped_src_nents)) { 986 dev_err(qidev, "unable to map source\n"); 987 qi_cache_free(edesc); 988 return ERR_PTR(-ENOMEM); 989 } 990 } else { 991 src_len = req->assoclen + req->cryptlen; 992 dst_len = src_len + (encrypt ? authsize : (-authsize)); 993 994 src_nents = sg_nents_for_len(req->src, src_len); 995 if (unlikely(src_nents < 0)) { 996 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 997 src_len); 998 qi_cache_free(edesc); 999 return ERR_PTR(src_nents); 1000 } 1001 1002 dst_nents = sg_nents_for_len(req->dst, dst_len); 1003 if (unlikely(dst_nents < 0)) { 1004 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1005 dst_len); 1006 qi_cache_free(edesc); 1007 return ERR_PTR(dst_nents); 1008 } 1009 1010 if (src_nents) { 1011 mapped_src_nents = dma_map_sg(qidev, req->src, 1012 src_nents, DMA_TO_DEVICE); 1013 if (unlikely(!mapped_src_nents)) { 1014 dev_err(qidev, "unable to map source\n"); 1015 qi_cache_free(edesc); 1016 return ERR_PTR(-ENOMEM); 1017 } 1018 } else { 1019 mapped_src_nents = 0; 1020 } 1021 1022 if (dst_nents) { 1023 mapped_dst_nents = dma_map_sg(qidev, req->dst, 1024 dst_nents, 1025 DMA_FROM_DEVICE); 1026 if (unlikely(!mapped_dst_nents)) { 1027 dev_err(qidev, "unable to map destination\n"); 1028 dma_unmap_sg(qidev, req->src, src_nents, 1029 DMA_TO_DEVICE); 1030 qi_cache_free(edesc); 1031 return ERR_PTR(-ENOMEM); 1032 } 1033 } else { 1034 mapped_dst_nents = 0; 1035 } 1036 } 1037 1038 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) 1039 ivsize = crypto_aead_ivsize(aead); 1040 1041 /* 1042 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1043 * Input is not contiguous. 1044 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1045 * the end of the table by allocating more S/G entries. Logic: 1046 * if (src != dst && output S/G) 1047 * pad output S/G, if needed 1048 * else if (src == dst && S/G) 1049 * overlapping S/Gs; pad one of them 1050 * else if (input S/G) ... 1051 * pad input S/G, if needed 1052 */ 1053 qm_sg_ents = 1 + !!ivsize + mapped_src_nents; 1054 if (mapped_dst_nents > 1) 1055 qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1056 else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1057 qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1058 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); 1059 else 1060 qm_sg_ents = pad_sg_nents(qm_sg_ents); 1061 1062 sg_table = &edesc->sgt[0]; 1063 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1064 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1065 CAAM_QI_MEMCACHE_SIZE)) { 1066 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1067 qm_sg_ents, ivsize); 1068 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1069 0, DMA_NONE, 0, 0); 1070 qi_cache_free(edesc); 1071 return ERR_PTR(-ENOMEM); 1072 } 1073 1074 if (ivsize) { 1075 u8 *iv = (u8 *)(sg_table + qm_sg_ents); 1076 1077 /* Make sure IV is located in a DMAable area */ 1078 memcpy(iv, req->iv, ivsize); 1079 1080 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1081 if (dma_mapping_error(qidev, iv_dma)) { 1082 dev_err(qidev, "unable to map IV\n"); 1083 caam_unmap(qidev, req->src, req->dst, src_nents, 1084 dst_nents, 0, 0, DMA_NONE, 0, 0); 1085 qi_cache_free(edesc); 1086 return ERR_PTR(-ENOMEM); 1087 } 1088 } 1089 1090 edesc->src_nents = src_nents; 1091 edesc->dst_nents = dst_nents; 1092 edesc->iv_dma = iv_dma; 1093 edesc->drv_req.app_ctx = req; 1094 edesc->drv_req.cbk = aead_done; 1095 edesc->drv_req.drv_ctx = drv_ctx; 1096 1097 edesc->assoclen = cpu_to_caam32(req->assoclen); 1098 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 1099 DMA_TO_DEVICE); 1100 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1101 dev_err(qidev, "unable to map assoclen\n"); 1102 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1103 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1104 qi_cache_free(edesc); 1105 return ERR_PTR(-ENOMEM); 1106 } 1107 1108 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 1109 qm_sg_index++; 1110 if (ivsize) { 1111 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1112 qm_sg_index++; 1113 } 1114 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 1115 qm_sg_index += mapped_src_nents; 1116 1117 if (mapped_dst_nents > 1) 1118 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 1119 1120 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1121 if (dma_mapping_error(qidev, qm_sg_dma)) { 1122 dev_err(qidev, "unable to map S/G table\n"); 1123 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1124 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1125 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1126 qi_cache_free(edesc); 1127 return ERR_PTR(-ENOMEM); 1128 } 1129 1130 edesc->qm_sg_dma = qm_sg_dma; 1131 edesc->qm_sg_bytes = qm_sg_bytes; 1132 1133 out_len = req->assoclen + req->cryptlen + 1134 (encrypt ? ctx->authsize : (-ctx->authsize)); 1135 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 1136 1137 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1138 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 1139 1140 if (req->dst == req->src) { 1141 if (mapped_src_nents == 1) 1142 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 1143 out_len, 0); 1144 else 1145 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1146 (1 + !!ivsize) * sizeof(*sg_table), 1147 out_len, 0); 1148 } else if (mapped_dst_nents <= 1) { 1149 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1150 0); 1151 } else { 1152 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 1153 qm_sg_index, out_len, 0); 1154 } 1155 1156 return edesc; 1157 } 1158 1159 static inline int aead_crypt(struct aead_request *req, bool encrypt) 1160 { 1161 struct aead_edesc *edesc; 1162 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1163 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1164 int ret; 1165 1166 if (unlikely(caam_congested)) 1167 return -EAGAIN; 1168 1169 /* allocate extended descriptor */ 1170 edesc = aead_edesc_alloc(req, encrypt); 1171 if (IS_ERR_OR_NULL(edesc)) 1172 return PTR_ERR(edesc); 1173 1174 /* Create and submit job descriptor */ 1175 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1176 if (!ret) { 1177 ret = -EINPROGRESS; 1178 } else { 1179 aead_unmap(ctx->qidev, edesc, req); 1180 qi_cache_free(edesc); 1181 } 1182 1183 return ret; 1184 } 1185 1186 static int aead_encrypt(struct aead_request *req) 1187 { 1188 return aead_crypt(req, true); 1189 } 1190 1191 static int aead_decrypt(struct aead_request *req) 1192 { 1193 return aead_crypt(req, false); 1194 } 1195 1196 static int ipsec_gcm_encrypt(struct aead_request *req) 1197 { 1198 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1199 true); 1200 } 1201 1202 static int ipsec_gcm_decrypt(struct aead_request *req) 1203 { 1204 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, 1205 false); 1206 } 1207 1208 static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1209 { 1210 struct skcipher_edesc *edesc; 1211 struct skcipher_request *req = drv_req->app_ctx; 1212 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1213 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1214 struct device *qidev = caam_ctx->qidev; 1215 int ivsize = crypto_skcipher_ivsize(skcipher); 1216 int ecode = 0; 1217 1218 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1219 1220 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1221 1222 if (status) 1223 ecode = caam_jr_strstatus(qidev, status); 1224 1225 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1226 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1227 edesc->src_nents > 1 ? 100 : ivsize, 1); 1228 caam_dump_sg("dst @" __stringify(__LINE__)": ", 1229 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1230 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1231 1232 skcipher_unmap(qidev, edesc, req); 1233 1234 /* 1235 * The crypto API expects us to set the IV (req->iv) to the last 1236 * ciphertext block (CBC mode) or last counter (CTR mode). 1237 * This is used e.g. by the CTS mode. 1238 */ 1239 if (!ecode) 1240 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, 1241 ivsize); 1242 1243 qi_cache_free(edesc); 1244 skcipher_request_complete(req, ecode); 1245 } 1246 1247 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1248 bool encrypt) 1249 { 1250 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1251 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1252 struct device *qidev = ctx->qidev; 1253 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1254 GFP_KERNEL : GFP_ATOMIC; 1255 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1256 struct skcipher_edesc *edesc; 1257 dma_addr_t iv_dma; 1258 u8 *iv; 1259 int ivsize = crypto_skcipher_ivsize(skcipher); 1260 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1261 struct qm_sg_entry *sg_table, *fd_sgt; 1262 struct caam_drv_ctx *drv_ctx; 1263 1264 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 1265 if (IS_ERR_OR_NULL(drv_ctx)) 1266 return (struct skcipher_edesc *)drv_ctx; 1267 1268 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1269 if (unlikely(src_nents < 0)) { 1270 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1271 req->cryptlen); 1272 return ERR_PTR(src_nents); 1273 } 1274 1275 if (unlikely(req->src != req->dst)) { 1276 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 1277 if (unlikely(dst_nents < 0)) { 1278 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1279 req->cryptlen); 1280 return ERR_PTR(dst_nents); 1281 } 1282 1283 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1284 DMA_TO_DEVICE); 1285 if (unlikely(!mapped_src_nents)) { 1286 dev_err(qidev, "unable to map source\n"); 1287 return ERR_PTR(-ENOMEM); 1288 } 1289 1290 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1291 DMA_FROM_DEVICE); 1292 if (unlikely(!mapped_dst_nents)) { 1293 dev_err(qidev, "unable to map destination\n"); 1294 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1295 return ERR_PTR(-ENOMEM); 1296 } 1297 } else { 1298 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1299 DMA_BIDIRECTIONAL); 1300 if (unlikely(!mapped_src_nents)) { 1301 dev_err(qidev, "unable to map source\n"); 1302 return ERR_PTR(-ENOMEM); 1303 } 1304 } 1305 1306 qm_sg_ents = 1 + mapped_src_nents; 1307 dst_sg_idx = qm_sg_ents; 1308 1309 /* 1310 * Input, output HW S/G tables: [IV, src][dst, IV] 1311 * IV entries point to the same buffer 1312 * If src == dst, S/G entries are reused (S/G tables overlap) 1313 * 1314 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1315 * the end of the table by allocating more S/G entries. 1316 */ 1317 if (req->src != req->dst) 1318 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); 1319 else 1320 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1321 1322 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1323 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1324 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1325 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1326 qm_sg_ents, ivsize); 1327 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1328 0, DMA_NONE, 0, 0); 1329 return ERR_PTR(-ENOMEM); 1330 } 1331 1332 /* allocate space for base edesc, link tables and IV */ 1333 edesc = qi_cache_alloc(GFP_DMA | flags); 1334 if (unlikely(!edesc)) { 1335 dev_err(qidev, "could not allocate extended descriptor\n"); 1336 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1337 0, DMA_NONE, 0, 0); 1338 return ERR_PTR(-ENOMEM); 1339 } 1340 1341 /* Make sure IV is located in a DMAable area */ 1342 sg_table = &edesc->sgt[0]; 1343 iv = (u8 *)(sg_table + qm_sg_ents); 1344 memcpy(iv, req->iv, ivsize); 1345 1346 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); 1347 if (dma_mapping_error(qidev, iv_dma)) { 1348 dev_err(qidev, "unable to map IV\n"); 1349 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1350 0, DMA_NONE, 0, 0); 1351 qi_cache_free(edesc); 1352 return ERR_PTR(-ENOMEM); 1353 } 1354 1355 edesc->src_nents = src_nents; 1356 edesc->dst_nents = dst_nents; 1357 edesc->iv_dma = iv_dma; 1358 edesc->qm_sg_bytes = qm_sg_bytes; 1359 edesc->drv_req.app_ctx = req; 1360 edesc->drv_req.cbk = skcipher_done; 1361 edesc->drv_req.drv_ctx = drv_ctx; 1362 1363 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1364 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); 1365 1366 if (req->src != req->dst) 1367 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); 1368 1369 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, 1370 ivsize, 0); 1371 1372 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1373 DMA_TO_DEVICE); 1374 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1375 dev_err(qidev, "unable to map S/G table\n"); 1376 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1377 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); 1378 qi_cache_free(edesc); 1379 return ERR_PTR(-ENOMEM); 1380 } 1381 1382 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1383 1384 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1385 ivsize + req->cryptlen, 0); 1386 1387 if (req->src == req->dst) 1388 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1389 sizeof(*sg_table), req->cryptlen + ivsize, 1390 0); 1391 else 1392 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1393 sizeof(*sg_table), req->cryptlen + ivsize, 1394 0); 1395 1396 return edesc; 1397 } 1398 1399 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) 1400 { 1401 struct skcipher_edesc *edesc; 1402 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1403 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1404 int ret; 1405 1406 if (!req->cryptlen) 1407 return 0; 1408 1409 if (unlikely(caam_congested)) 1410 return -EAGAIN; 1411 1412 /* allocate extended descriptor */ 1413 edesc = skcipher_edesc_alloc(req, encrypt); 1414 if (IS_ERR(edesc)) 1415 return PTR_ERR(edesc); 1416 1417 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1418 if (!ret) { 1419 ret = -EINPROGRESS; 1420 } else { 1421 skcipher_unmap(ctx->qidev, edesc, req); 1422 qi_cache_free(edesc); 1423 } 1424 1425 return ret; 1426 } 1427 1428 static int skcipher_encrypt(struct skcipher_request *req) 1429 { 1430 return skcipher_crypt(req, true); 1431 } 1432 1433 static int skcipher_decrypt(struct skcipher_request *req) 1434 { 1435 return skcipher_crypt(req, false); 1436 } 1437 1438 static struct caam_skcipher_alg driver_algs[] = { 1439 { 1440 .skcipher = { 1441 .base = { 1442 .cra_name = "cbc(aes)", 1443 .cra_driver_name = "cbc-aes-caam-qi", 1444 .cra_blocksize = AES_BLOCK_SIZE, 1445 }, 1446 .setkey = aes_skcipher_setkey, 1447 .encrypt = skcipher_encrypt, 1448 .decrypt = skcipher_decrypt, 1449 .min_keysize = AES_MIN_KEY_SIZE, 1450 .max_keysize = AES_MAX_KEY_SIZE, 1451 .ivsize = AES_BLOCK_SIZE, 1452 }, 1453 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1454 }, 1455 { 1456 .skcipher = { 1457 .base = { 1458 .cra_name = "cbc(des3_ede)", 1459 .cra_driver_name = "cbc-3des-caam-qi", 1460 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1461 }, 1462 .setkey = des3_skcipher_setkey, 1463 .encrypt = skcipher_encrypt, 1464 .decrypt = skcipher_decrypt, 1465 .min_keysize = DES3_EDE_KEY_SIZE, 1466 .max_keysize = DES3_EDE_KEY_SIZE, 1467 .ivsize = DES3_EDE_BLOCK_SIZE, 1468 }, 1469 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1470 }, 1471 { 1472 .skcipher = { 1473 .base = { 1474 .cra_name = "cbc(des)", 1475 .cra_driver_name = "cbc-des-caam-qi", 1476 .cra_blocksize = DES_BLOCK_SIZE, 1477 }, 1478 .setkey = des_skcipher_setkey, 1479 .encrypt = skcipher_encrypt, 1480 .decrypt = skcipher_decrypt, 1481 .min_keysize = DES_KEY_SIZE, 1482 .max_keysize = DES_KEY_SIZE, 1483 .ivsize = DES_BLOCK_SIZE, 1484 }, 1485 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1486 }, 1487 { 1488 .skcipher = { 1489 .base = { 1490 .cra_name = "ctr(aes)", 1491 .cra_driver_name = "ctr-aes-caam-qi", 1492 .cra_blocksize = 1, 1493 }, 1494 .setkey = ctr_skcipher_setkey, 1495 .encrypt = skcipher_encrypt, 1496 .decrypt = skcipher_decrypt, 1497 .min_keysize = AES_MIN_KEY_SIZE, 1498 .max_keysize = AES_MAX_KEY_SIZE, 1499 .ivsize = AES_BLOCK_SIZE, 1500 .chunksize = AES_BLOCK_SIZE, 1501 }, 1502 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1503 OP_ALG_AAI_CTR_MOD128, 1504 }, 1505 { 1506 .skcipher = { 1507 .base = { 1508 .cra_name = "rfc3686(ctr(aes))", 1509 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1510 .cra_blocksize = 1, 1511 }, 1512 .setkey = rfc3686_skcipher_setkey, 1513 .encrypt = skcipher_encrypt, 1514 .decrypt = skcipher_decrypt, 1515 .min_keysize = AES_MIN_KEY_SIZE + 1516 CTR_RFC3686_NONCE_SIZE, 1517 .max_keysize = AES_MAX_KEY_SIZE + 1518 CTR_RFC3686_NONCE_SIZE, 1519 .ivsize = CTR_RFC3686_IV_SIZE, 1520 .chunksize = AES_BLOCK_SIZE, 1521 }, 1522 .caam = { 1523 .class1_alg_type = OP_ALG_ALGSEL_AES | 1524 OP_ALG_AAI_CTR_MOD128, 1525 .rfc3686 = true, 1526 }, 1527 }, 1528 { 1529 .skcipher = { 1530 .base = { 1531 .cra_name = "xts(aes)", 1532 .cra_driver_name = "xts-aes-caam-qi", 1533 .cra_blocksize = AES_BLOCK_SIZE, 1534 }, 1535 .setkey = xts_skcipher_setkey, 1536 .encrypt = skcipher_encrypt, 1537 .decrypt = skcipher_decrypt, 1538 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1539 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1540 .ivsize = AES_BLOCK_SIZE, 1541 }, 1542 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1543 }, 1544 }; 1545 1546 static struct caam_aead_alg driver_aeads[] = { 1547 { 1548 .aead = { 1549 .base = { 1550 .cra_name = "rfc4106(gcm(aes))", 1551 .cra_driver_name = "rfc4106-gcm-aes-caam-qi", 1552 .cra_blocksize = 1, 1553 }, 1554 .setkey = rfc4106_setkey, 1555 .setauthsize = rfc4106_setauthsize, 1556 .encrypt = ipsec_gcm_encrypt, 1557 .decrypt = ipsec_gcm_decrypt, 1558 .ivsize = 8, 1559 .maxauthsize = AES_BLOCK_SIZE, 1560 }, 1561 .caam = { 1562 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1563 .nodkp = true, 1564 }, 1565 }, 1566 { 1567 .aead = { 1568 .base = { 1569 .cra_name = "rfc4543(gcm(aes))", 1570 .cra_driver_name = "rfc4543-gcm-aes-caam-qi", 1571 .cra_blocksize = 1, 1572 }, 1573 .setkey = rfc4543_setkey, 1574 .setauthsize = rfc4543_setauthsize, 1575 .encrypt = ipsec_gcm_encrypt, 1576 .decrypt = ipsec_gcm_decrypt, 1577 .ivsize = 8, 1578 .maxauthsize = AES_BLOCK_SIZE, 1579 }, 1580 .caam = { 1581 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1582 .nodkp = true, 1583 }, 1584 }, 1585 /* Galois Counter Mode */ 1586 { 1587 .aead = { 1588 .base = { 1589 .cra_name = "gcm(aes)", 1590 .cra_driver_name = "gcm-aes-caam-qi", 1591 .cra_blocksize = 1, 1592 }, 1593 .setkey = gcm_setkey, 1594 .setauthsize = gcm_setauthsize, 1595 .encrypt = aead_encrypt, 1596 .decrypt = aead_decrypt, 1597 .ivsize = 12, 1598 .maxauthsize = AES_BLOCK_SIZE, 1599 }, 1600 .caam = { 1601 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1602 .nodkp = true, 1603 } 1604 }, 1605 /* single-pass ipsec_esp descriptor */ 1606 { 1607 .aead = { 1608 .base = { 1609 .cra_name = "authenc(hmac(md5),cbc(aes))", 1610 .cra_driver_name = "authenc-hmac-md5-" 1611 "cbc-aes-caam-qi", 1612 .cra_blocksize = AES_BLOCK_SIZE, 1613 }, 1614 .setkey = aead_setkey, 1615 .setauthsize = aead_setauthsize, 1616 .encrypt = aead_encrypt, 1617 .decrypt = aead_decrypt, 1618 .ivsize = AES_BLOCK_SIZE, 1619 .maxauthsize = MD5_DIGEST_SIZE, 1620 }, 1621 .caam = { 1622 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1623 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1624 OP_ALG_AAI_HMAC_PRECOMP, 1625 } 1626 }, 1627 { 1628 .aead = { 1629 .base = { 1630 .cra_name = "echainiv(authenc(hmac(md5)," 1631 "cbc(aes)))", 1632 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1633 "cbc-aes-caam-qi", 1634 .cra_blocksize = AES_BLOCK_SIZE, 1635 }, 1636 .setkey = aead_setkey, 1637 .setauthsize = aead_setauthsize, 1638 .encrypt = aead_encrypt, 1639 .decrypt = aead_decrypt, 1640 .ivsize = AES_BLOCK_SIZE, 1641 .maxauthsize = MD5_DIGEST_SIZE, 1642 }, 1643 .caam = { 1644 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1645 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1646 OP_ALG_AAI_HMAC_PRECOMP, 1647 .geniv = true, 1648 } 1649 }, 1650 { 1651 .aead = { 1652 .base = { 1653 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1654 .cra_driver_name = "authenc-hmac-sha1-" 1655 "cbc-aes-caam-qi", 1656 .cra_blocksize = AES_BLOCK_SIZE, 1657 }, 1658 .setkey = aead_setkey, 1659 .setauthsize = aead_setauthsize, 1660 .encrypt = aead_encrypt, 1661 .decrypt = aead_decrypt, 1662 .ivsize = AES_BLOCK_SIZE, 1663 .maxauthsize = SHA1_DIGEST_SIZE, 1664 }, 1665 .caam = { 1666 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1667 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1668 OP_ALG_AAI_HMAC_PRECOMP, 1669 } 1670 }, 1671 { 1672 .aead = { 1673 .base = { 1674 .cra_name = "echainiv(authenc(hmac(sha1)," 1675 "cbc(aes)))", 1676 .cra_driver_name = "echainiv-authenc-" 1677 "hmac-sha1-cbc-aes-caam-qi", 1678 .cra_blocksize = AES_BLOCK_SIZE, 1679 }, 1680 .setkey = aead_setkey, 1681 .setauthsize = aead_setauthsize, 1682 .encrypt = aead_encrypt, 1683 .decrypt = aead_decrypt, 1684 .ivsize = AES_BLOCK_SIZE, 1685 .maxauthsize = SHA1_DIGEST_SIZE, 1686 }, 1687 .caam = { 1688 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1689 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1690 OP_ALG_AAI_HMAC_PRECOMP, 1691 .geniv = true, 1692 }, 1693 }, 1694 { 1695 .aead = { 1696 .base = { 1697 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1698 .cra_driver_name = "authenc-hmac-sha224-" 1699 "cbc-aes-caam-qi", 1700 .cra_blocksize = AES_BLOCK_SIZE, 1701 }, 1702 .setkey = aead_setkey, 1703 .setauthsize = aead_setauthsize, 1704 .encrypt = aead_encrypt, 1705 .decrypt = aead_decrypt, 1706 .ivsize = AES_BLOCK_SIZE, 1707 .maxauthsize = SHA224_DIGEST_SIZE, 1708 }, 1709 .caam = { 1710 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1711 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1712 OP_ALG_AAI_HMAC_PRECOMP, 1713 } 1714 }, 1715 { 1716 .aead = { 1717 .base = { 1718 .cra_name = "echainiv(authenc(hmac(sha224)," 1719 "cbc(aes)))", 1720 .cra_driver_name = "echainiv-authenc-" 1721 "hmac-sha224-cbc-aes-caam-qi", 1722 .cra_blocksize = AES_BLOCK_SIZE, 1723 }, 1724 .setkey = aead_setkey, 1725 .setauthsize = aead_setauthsize, 1726 .encrypt = aead_encrypt, 1727 .decrypt = aead_decrypt, 1728 .ivsize = AES_BLOCK_SIZE, 1729 .maxauthsize = SHA224_DIGEST_SIZE, 1730 }, 1731 .caam = { 1732 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1733 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1734 OP_ALG_AAI_HMAC_PRECOMP, 1735 .geniv = true, 1736 } 1737 }, 1738 { 1739 .aead = { 1740 .base = { 1741 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1742 .cra_driver_name = "authenc-hmac-sha256-" 1743 "cbc-aes-caam-qi", 1744 .cra_blocksize = AES_BLOCK_SIZE, 1745 }, 1746 .setkey = aead_setkey, 1747 .setauthsize = aead_setauthsize, 1748 .encrypt = aead_encrypt, 1749 .decrypt = aead_decrypt, 1750 .ivsize = AES_BLOCK_SIZE, 1751 .maxauthsize = SHA256_DIGEST_SIZE, 1752 }, 1753 .caam = { 1754 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1755 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1756 OP_ALG_AAI_HMAC_PRECOMP, 1757 } 1758 }, 1759 { 1760 .aead = { 1761 .base = { 1762 .cra_name = "echainiv(authenc(hmac(sha256)," 1763 "cbc(aes)))", 1764 .cra_driver_name = "echainiv-authenc-" 1765 "hmac-sha256-cbc-aes-" 1766 "caam-qi", 1767 .cra_blocksize = AES_BLOCK_SIZE, 1768 }, 1769 .setkey = aead_setkey, 1770 .setauthsize = aead_setauthsize, 1771 .encrypt = aead_encrypt, 1772 .decrypt = aead_decrypt, 1773 .ivsize = AES_BLOCK_SIZE, 1774 .maxauthsize = SHA256_DIGEST_SIZE, 1775 }, 1776 .caam = { 1777 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1778 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1779 OP_ALG_AAI_HMAC_PRECOMP, 1780 .geniv = true, 1781 } 1782 }, 1783 { 1784 .aead = { 1785 .base = { 1786 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1787 .cra_driver_name = "authenc-hmac-sha384-" 1788 "cbc-aes-caam-qi", 1789 .cra_blocksize = AES_BLOCK_SIZE, 1790 }, 1791 .setkey = aead_setkey, 1792 .setauthsize = aead_setauthsize, 1793 .encrypt = aead_encrypt, 1794 .decrypt = aead_decrypt, 1795 .ivsize = AES_BLOCK_SIZE, 1796 .maxauthsize = SHA384_DIGEST_SIZE, 1797 }, 1798 .caam = { 1799 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1800 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1801 OP_ALG_AAI_HMAC_PRECOMP, 1802 } 1803 }, 1804 { 1805 .aead = { 1806 .base = { 1807 .cra_name = "echainiv(authenc(hmac(sha384)," 1808 "cbc(aes)))", 1809 .cra_driver_name = "echainiv-authenc-" 1810 "hmac-sha384-cbc-aes-" 1811 "caam-qi", 1812 .cra_blocksize = AES_BLOCK_SIZE, 1813 }, 1814 .setkey = aead_setkey, 1815 .setauthsize = aead_setauthsize, 1816 .encrypt = aead_encrypt, 1817 .decrypt = aead_decrypt, 1818 .ivsize = AES_BLOCK_SIZE, 1819 .maxauthsize = SHA384_DIGEST_SIZE, 1820 }, 1821 .caam = { 1822 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1823 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1824 OP_ALG_AAI_HMAC_PRECOMP, 1825 .geniv = true, 1826 } 1827 }, 1828 { 1829 .aead = { 1830 .base = { 1831 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1832 .cra_driver_name = "authenc-hmac-sha512-" 1833 "cbc-aes-caam-qi", 1834 .cra_blocksize = AES_BLOCK_SIZE, 1835 }, 1836 .setkey = aead_setkey, 1837 .setauthsize = aead_setauthsize, 1838 .encrypt = aead_encrypt, 1839 .decrypt = aead_decrypt, 1840 .ivsize = AES_BLOCK_SIZE, 1841 .maxauthsize = SHA512_DIGEST_SIZE, 1842 }, 1843 .caam = { 1844 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1845 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1846 OP_ALG_AAI_HMAC_PRECOMP, 1847 } 1848 }, 1849 { 1850 .aead = { 1851 .base = { 1852 .cra_name = "echainiv(authenc(hmac(sha512)," 1853 "cbc(aes)))", 1854 .cra_driver_name = "echainiv-authenc-" 1855 "hmac-sha512-cbc-aes-" 1856 "caam-qi", 1857 .cra_blocksize = AES_BLOCK_SIZE, 1858 }, 1859 .setkey = aead_setkey, 1860 .setauthsize = aead_setauthsize, 1861 .encrypt = aead_encrypt, 1862 .decrypt = aead_decrypt, 1863 .ivsize = AES_BLOCK_SIZE, 1864 .maxauthsize = SHA512_DIGEST_SIZE, 1865 }, 1866 .caam = { 1867 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1868 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1869 OP_ALG_AAI_HMAC_PRECOMP, 1870 .geniv = true, 1871 } 1872 }, 1873 { 1874 .aead = { 1875 .base = { 1876 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1877 .cra_driver_name = "authenc-hmac-md5-" 1878 "cbc-des3_ede-caam-qi", 1879 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1880 }, 1881 .setkey = des3_aead_setkey, 1882 .setauthsize = aead_setauthsize, 1883 .encrypt = aead_encrypt, 1884 .decrypt = aead_decrypt, 1885 .ivsize = DES3_EDE_BLOCK_SIZE, 1886 .maxauthsize = MD5_DIGEST_SIZE, 1887 }, 1888 .caam = { 1889 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1890 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1891 OP_ALG_AAI_HMAC_PRECOMP, 1892 } 1893 }, 1894 { 1895 .aead = { 1896 .base = { 1897 .cra_name = "echainiv(authenc(hmac(md5)," 1898 "cbc(des3_ede)))", 1899 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1900 "cbc-des3_ede-caam-qi", 1901 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1902 }, 1903 .setkey = des3_aead_setkey, 1904 .setauthsize = aead_setauthsize, 1905 .encrypt = aead_encrypt, 1906 .decrypt = aead_decrypt, 1907 .ivsize = DES3_EDE_BLOCK_SIZE, 1908 .maxauthsize = MD5_DIGEST_SIZE, 1909 }, 1910 .caam = { 1911 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1912 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1913 OP_ALG_AAI_HMAC_PRECOMP, 1914 .geniv = true, 1915 } 1916 }, 1917 { 1918 .aead = { 1919 .base = { 1920 .cra_name = "authenc(hmac(sha1)," 1921 "cbc(des3_ede))", 1922 .cra_driver_name = "authenc-hmac-sha1-" 1923 "cbc-des3_ede-caam-qi", 1924 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1925 }, 1926 .setkey = des3_aead_setkey, 1927 .setauthsize = aead_setauthsize, 1928 .encrypt = aead_encrypt, 1929 .decrypt = aead_decrypt, 1930 .ivsize = DES3_EDE_BLOCK_SIZE, 1931 .maxauthsize = SHA1_DIGEST_SIZE, 1932 }, 1933 .caam = { 1934 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1935 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1936 OP_ALG_AAI_HMAC_PRECOMP, 1937 }, 1938 }, 1939 { 1940 .aead = { 1941 .base = { 1942 .cra_name = "echainiv(authenc(hmac(sha1)," 1943 "cbc(des3_ede)))", 1944 .cra_driver_name = "echainiv-authenc-" 1945 "hmac-sha1-" 1946 "cbc-des3_ede-caam-qi", 1947 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1948 }, 1949 .setkey = des3_aead_setkey, 1950 .setauthsize = aead_setauthsize, 1951 .encrypt = aead_encrypt, 1952 .decrypt = aead_decrypt, 1953 .ivsize = DES3_EDE_BLOCK_SIZE, 1954 .maxauthsize = SHA1_DIGEST_SIZE, 1955 }, 1956 .caam = { 1957 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1958 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1959 OP_ALG_AAI_HMAC_PRECOMP, 1960 .geniv = true, 1961 } 1962 }, 1963 { 1964 .aead = { 1965 .base = { 1966 .cra_name = "authenc(hmac(sha224)," 1967 "cbc(des3_ede))", 1968 .cra_driver_name = "authenc-hmac-sha224-" 1969 "cbc-des3_ede-caam-qi", 1970 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1971 }, 1972 .setkey = des3_aead_setkey, 1973 .setauthsize = aead_setauthsize, 1974 .encrypt = aead_encrypt, 1975 .decrypt = aead_decrypt, 1976 .ivsize = DES3_EDE_BLOCK_SIZE, 1977 .maxauthsize = SHA224_DIGEST_SIZE, 1978 }, 1979 .caam = { 1980 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1981 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1982 OP_ALG_AAI_HMAC_PRECOMP, 1983 }, 1984 }, 1985 { 1986 .aead = { 1987 .base = { 1988 .cra_name = "echainiv(authenc(hmac(sha224)," 1989 "cbc(des3_ede)))", 1990 .cra_driver_name = "echainiv-authenc-" 1991 "hmac-sha224-" 1992 "cbc-des3_ede-caam-qi", 1993 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1994 }, 1995 .setkey = des3_aead_setkey, 1996 .setauthsize = aead_setauthsize, 1997 .encrypt = aead_encrypt, 1998 .decrypt = aead_decrypt, 1999 .ivsize = DES3_EDE_BLOCK_SIZE, 2000 .maxauthsize = SHA224_DIGEST_SIZE, 2001 }, 2002 .caam = { 2003 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2004 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2005 OP_ALG_AAI_HMAC_PRECOMP, 2006 .geniv = true, 2007 } 2008 }, 2009 { 2010 .aead = { 2011 .base = { 2012 .cra_name = "authenc(hmac(sha256)," 2013 "cbc(des3_ede))", 2014 .cra_driver_name = "authenc-hmac-sha256-" 2015 "cbc-des3_ede-caam-qi", 2016 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2017 }, 2018 .setkey = des3_aead_setkey, 2019 .setauthsize = aead_setauthsize, 2020 .encrypt = aead_encrypt, 2021 .decrypt = aead_decrypt, 2022 .ivsize = DES3_EDE_BLOCK_SIZE, 2023 .maxauthsize = SHA256_DIGEST_SIZE, 2024 }, 2025 .caam = { 2026 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2027 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2028 OP_ALG_AAI_HMAC_PRECOMP, 2029 }, 2030 }, 2031 { 2032 .aead = { 2033 .base = { 2034 .cra_name = "echainiv(authenc(hmac(sha256)," 2035 "cbc(des3_ede)))", 2036 .cra_driver_name = "echainiv-authenc-" 2037 "hmac-sha256-" 2038 "cbc-des3_ede-caam-qi", 2039 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2040 }, 2041 .setkey = des3_aead_setkey, 2042 .setauthsize = aead_setauthsize, 2043 .encrypt = aead_encrypt, 2044 .decrypt = aead_decrypt, 2045 .ivsize = DES3_EDE_BLOCK_SIZE, 2046 .maxauthsize = SHA256_DIGEST_SIZE, 2047 }, 2048 .caam = { 2049 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2050 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2051 OP_ALG_AAI_HMAC_PRECOMP, 2052 .geniv = true, 2053 } 2054 }, 2055 { 2056 .aead = { 2057 .base = { 2058 .cra_name = "authenc(hmac(sha384)," 2059 "cbc(des3_ede))", 2060 .cra_driver_name = "authenc-hmac-sha384-" 2061 "cbc-des3_ede-caam-qi", 2062 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2063 }, 2064 .setkey = des3_aead_setkey, 2065 .setauthsize = aead_setauthsize, 2066 .encrypt = aead_encrypt, 2067 .decrypt = aead_decrypt, 2068 .ivsize = DES3_EDE_BLOCK_SIZE, 2069 .maxauthsize = SHA384_DIGEST_SIZE, 2070 }, 2071 .caam = { 2072 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2073 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2074 OP_ALG_AAI_HMAC_PRECOMP, 2075 }, 2076 }, 2077 { 2078 .aead = { 2079 .base = { 2080 .cra_name = "echainiv(authenc(hmac(sha384)," 2081 "cbc(des3_ede)))", 2082 .cra_driver_name = "echainiv-authenc-" 2083 "hmac-sha384-" 2084 "cbc-des3_ede-caam-qi", 2085 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2086 }, 2087 .setkey = des3_aead_setkey, 2088 .setauthsize = aead_setauthsize, 2089 .encrypt = aead_encrypt, 2090 .decrypt = aead_decrypt, 2091 .ivsize = DES3_EDE_BLOCK_SIZE, 2092 .maxauthsize = SHA384_DIGEST_SIZE, 2093 }, 2094 .caam = { 2095 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2096 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2097 OP_ALG_AAI_HMAC_PRECOMP, 2098 .geniv = true, 2099 } 2100 }, 2101 { 2102 .aead = { 2103 .base = { 2104 .cra_name = "authenc(hmac(sha512)," 2105 "cbc(des3_ede))", 2106 .cra_driver_name = "authenc-hmac-sha512-" 2107 "cbc-des3_ede-caam-qi", 2108 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2109 }, 2110 .setkey = des3_aead_setkey, 2111 .setauthsize = aead_setauthsize, 2112 .encrypt = aead_encrypt, 2113 .decrypt = aead_decrypt, 2114 .ivsize = DES3_EDE_BLOCK_SIZE, 2115 .maxauthsize = SHA512_DIGEST_SIZE, 2116 }, 2117 .caam = { 2118 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2119 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2120 OP_ALG_AAI_HMAC_PRECOMP, 2121 }, 2122 }, 2123 { 2124 .aead = { 2125 .base = { 2126 .cra_name = "echainiv(authenc(hmac(sha512)," 2127 "cbc(des3_ede)))", 2128 .cra_driver_name = "echainiv-authenc-" 2129 "hmac-sha512-" 2130 "cbc-des3_ede-caam-qi", 2131 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2132 }, 2133 .setkey = des3_aead_setkey, 2134 .setauthsize = aead_setauthsize, 2135 .encrypt = aead_encrypt, 2136 .decrypt = aead_decrypt, 2137 .ivsize = DES3_EDE_BLOCK_SIZE, 2138 .maxauthsize = SHA512_DIGEST_SIZE, 2139 }, 2140 .caam = { 2141 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2142 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2143 OP_ALG_AAI_HMAC_PRECOMP, 2144 .geniv = true, 2145 } 2146 }, 2147 { 2148 .aead = { 2149 .base = { 2150 .cra_name = "authenc(hmac(md5),cbc(des))", 2151 .cra_driver_name = "authenc-hmac-md5-" 2152 "cbc-des-caam-qi", 2153 .cra_blocksize = DES_BLOCK_SIZE, 2154 }, 2155 .setkey = aead_setkey, 2156 .setauthsize = aead_setauthsize, 2157 .encrypt = aead_encrypt, 2158 .decrypt = aead_decrypt, 2159 .ivsize = DES_BLOCK_SIZE, 2160 .maxauthsize = MD5_DIGEST_SIZE, 2161 }, 2162 .caam = { 2163 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2164 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2165 OP_ALG_AAI_HMAC_PRECOMP, 2166 }, 2167 }, 2168 { 2169 .aead = { 2170 .base = { 2171 .cra_name = "echainiv(authenc(hmac(md5)," 2172 "cbc(des)))", 2173 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2174 "cbc-des-caam-qi", 2175 .cra_blocksize = DES_BLOCK_SIZE, 2176 }, 2177 .setkey = aead_setkey, 2178 .setauthsize = aead_setauthsize, 2179 .encrypt = aead_encrypt, 2180 .decrypt = aead_decrypt, 2181 .ivsize = DES_BLOCK_SIZE, 2182 .maxauthsize = MD5_DIGEST_SIZE, 2183 }, 2184 .caam = { 2185 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2186 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2187 OP_ALG_AAI_HMAC_PRECOMP, 2188 .geniv = true, 2189 } 2190 }, 2191 { 2192 .aead = { 2193 .base = { 2194 .cra_name = "authenc(hmac(sha1),cbc(des))", 2195 .cra_driver_name = "authenc-hmac-sha1-" 2196 "cbc-des-caam-qi", 2197 .cra_blocksize = DES_BLOCK_SIZE, 2198 }, 2199 .setkey = aead_setkey, 2200 .setauthsize = aead_setauthsize, 2201 .encrypt = aead_encrypt, 2202 .decrypt = aead_decrypt, 2203 .ivsize = DES_BLOCK_SIZE, 2204 .maxauthsize = SHA1_DIGEST_SIZE, 2205 }, 2206 .caam = { 2207 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2208 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2209 OP_ALG_AAI_HMAC_PRECOMP, 2210 }, 2211 }, 2212 { 2213 .aead = { 2214 .base = { 2215 .cra_name = "echainiv(authenc(hmac(sha1)," 2216 "cbc(des)))", 2217 .cra_driver_name = "echainiv-authenc-" 2218 "hmac-sha1-cbc-des-caam-qi", 2219 .cra_blocksize = DES_BLOCK_SIZE, 2220 }, 2221 .setkey = aead_setkey, 2222 .setauthsize = aead_setauthsize, 2223 .encrypt = aead_encrypt, 2224 .decrypt = aead_decrypt, 2225 .ivsize = DES_BLOCK_SIZE, 2226 .maxauthsize = SHA1_DIGEST_SIZE, 2227 }, 2228 .caam = { 2229 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2230 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2231 OP_ALG_AAI_HMAC_PRECOMP, 2232 .geniv = true, 2233 } 2234 }, 2235 { 2236 .aead = { 2237 .base = { 2238 .cra_name = "authenc(hmac(sha224),cbc(des))", 2239 .cra_driver_name = "authenc-hmac-sha224-" 2240 "cbc-des-caam-qi", 2241 .cra_blocksize = DES_BLOCK_SIZE, 2242 }, 2243 .setkey = aead_setkey, 2244 .setauthsize = aead_setauthsize, 2245 .encrypt = aead_encrypt, 2246 .decrypt = aead_decrypt, 2247 .ivsize = DES_BLOCK_SIZE, 2248 .maxauthsize = SHA224_DIGEST_SIZE, 2249 }, 2250 .caam = { 2251 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2252 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2253 OP_ALG_AAI_HMAC_PRECOMP, 2254 }, 2255 }, 2256 { 2257 .aead = { 2258 .base = { 2259 .cra_name = "echainiv(authenc(hmac(sha224)," 2260 "cbc(des)))", 2261 .cra_driver_name = "echainiv-authenc-" 2262 "hmac-sha224-cbc-des-" 2263 "caam-qi", 2264 .cra_blocksize = DES_BLOCK_SIZE, 2265 }, 2266 .setkey = aead_setkey, 2267 .setauthsize = aead_setauthsize, 2268 .encrypt = aead_encrypt, 2269 .decrypt = aead_decrypt, 2270 .ivsize = DES_BLOCK_SIZE, 2271 .maxauthsize = SHA224_DIGEST_SIZE, 2272 }, 2273 .caam = { 2274 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2275 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2276 OP_ALG_AAI_HMAC_PRECOMP, 2277 .geniv = true, 2278 } 2279 }, 2280 { 2281 .aead = { 2282 .base = { 2283 .cra_name = "authenc(hmac(sha256),cbc(des))", 2284 .cra_driver_name = "authenc-hmac-sha256-" 2285 "cbc-des-caam-qi", 2286 .cra_blocksize = DES_BLOCK_SIZE, 2287 }, 2288 .setkey = aead_setkey, 2289 .setauthsize = aead_setauthsize, 2290 .encrypt = aead_encrypt, 2291 .decrypt = aead_decrypt, 2292 .ivsize = DES_BLOCK_SIZE, 2293 .maxauthsize = SHA256_DIGEST_SIZE, 2294 }, 2295 .caam = { 2296 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2297 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2298 OP_ALG_AAI_HMAC_PRECOMP, 2299 }, 2300 }, 2301 { 2302 .aead = { 2303 .base = { 2304 .cra_name = "echainiv(authenc(hmac(sha256)," 2305 "cbc(des)))", 2306 .cra_driver_name = "echainiv-authenc-" 2307 "hmac-sha256-cbc-des-" 2308 "caam-qi", 2309 .cra_blocksize = DES_BLOCK_SIZE, 2310 }, 2311 .setkey = aead_setkey, 2312 .setauthsize = aead_setauthsize, 2313 .encrypt = aead_encrypt, 2314 .decrypt = aead_decrypt, 2315 .ivsize = DES_BLOCK_SIZE, 2316 .maxauthsize = SHA256_DIGEST_SIZE, 2317 }, 2318 .caam = { 2319 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2320 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2321 OP_ALG_AAI_HMAC_PRECOMP, 2322 .geniv = true, 2323 }, 2324 }, 2325 { 2326 .aead = { 2327 .base = { 2328 .cra_name = "authenc(hmac(sha384),cbc(des))", 2329 .cra_driver_name = "authenc-hmac-sha384-" 2330 "cbc-des-caam-qi", 2331 .cra_blocksize = DES_BLOCK_SIZE, 2332 }, 2333 .setkey = aead_setkey, 2334 .setauthsize = aead_setauthsize, 2335 .encrypt = aead_encrypt, 2336 .decrypt = aead_decrypt, 2337 .ivsize = DES_BLOCK_SIZE, 2338 .maxauthsize = SHA384_DIGEST_SIZE, 2339 }, 2340 .caam = { 2341 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2342 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2343 OP_ALG_AAI_HMAC_PRECOMP, 2344 }, 2345 }, 2346 { 2347 .aead = { 2348 .base = { 2349 .cra_name = "echainiv(authenc(hmac(sha384)," 2350 "cbc(des)))", 2351 .cra_driver_name = "echainiv-authenc-" 2352 "hmac-sha384-cbc-des-" 2353 "caam-qi", 2354 .cra_blocksize = DES_BLOCK_SIZE, 2355 }, 2356 .setkey = aead_setkey, 2357 .setauthsize = aead_setauthsize, 2358 .encrypt = aead_encrypt, 2359 .decrypt = aead_decrypt, 2360 .ivsize = DES_BLOCK_SIZE, 2361 .maxauthsize = SHA384_DIGEST_SIZE, 2362 }, 2363 .caam = { 2364 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2365 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2366 OP_ALG_AAI_HMAC_PRECOMP, 2367 .geniv = true, 2368 } 2369 }, 2370 { 2371 .aead = { 2372 .base = { 2373 .cra_name = "authenc(hmac(sha512),cbc(des))", 2374 .cra_driver_name = "authenc-hmac-sha512-" 2375 "cbc-des-caam-qi", 2376 .cra_blocksize = DES_BLOCK_SIZE, 2377 }, 2378 .setkey = aead_setkey, 2379 .setauthsize = aead_setauthsize, 2380 .encrypt = aead_encrypt, 2381 .decrypt = aead_decrypt, 2382 .ivsize = DES_BLOCK_SIZE, 2383 .maxauthsize = SHA512_DIGEST_SIZE, 2384 }, 2385 .caam = { 2386 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2387 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2388 OP_ALG_AAI_HMAC_PRECOMP, 2389 } 2390 }, 2391 { 2392 .aead = { 2393 .base = { 2394 .cra_name = "echainiv(authenc(hmac(sha512)," 2395 "cbc(des)))", 2396 .cra_driver_name = "echainiv-authenc-" 2397 "hmac-sha512-cbc-des-" 2398 "caam-qi", 2399 .cra_blocksize = DES_BLOCK_SIZE, 2400 }, 2401 .setkey = aead_setkey, 2402 .setauthsize = aead_setauthsize, 2403 .encrypt = aead_encrypt, 2404 .decrypt = aead_decrypt, 2405 .ivsize = DES_BLOCK_SIZE, 2406 .maxauthsize = SHA512_DIGEST_SIZE, 2407 }, 2408 .caam = { 2409 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2410 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2411 OP_ALG_AAI_HMAC_PRECOMP, 2412 .geniv = true, 2413 } 2414 }, 2415 }; 2416 2417 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2418 bool uses_dkp) 2419 { 2420 struct caam_drv_private *priv; 2421 struct device *dev; 2422 2423 /* 2424 * distribute tfms across job rings to ensure in-order 2425 * crypto request processing per tfm 2426 */ 2427 ctx->jrdev = caam_jr_alloc(); 2428 if (IS_ERR(ctx->jrdev)) { 2429 pr_err("Job Ring Device allocation for transform failed\n"); 2430 return PTR_ERR(ctx->jrdev); 2431 } 2432 2433 dev = ctx->jrdev->parent; 2434 priv = dev_get_drvdata(dev); 2435 if (priv->era >= 6 && uses_dkp) 2436 ctx->dir = DMA_BIDIRECTIONAL; 2437 else 2438 ctx->dir = DMA_TO_DEVICE; 2439 2440 ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), 2441 ctx->dir); 2442 if (dma_mapping_error(dev, ctx->key_dma)) { 2443 dev_err(dev, "unable to map key\n"); 2444 caam_jr_free(ctx->jrdev); 2445 return -ENOMEM; 2446 } 2447 2448 /* copy descriptor header template value */ 2449 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2450 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2451 2452 ctx->qidev = dev; 2453 2454 spin_lock_init(&ctx->lock); 2455 ctx->drv_ctx[ENCRYPT] = NULL; 2456 ctx->drv_ctx[DECRYPT] = NULL; 2457 2458 return 0; 2459 } 2460 2461 static int caam_cra_init(struct crypto_skcipher *tfm) 2462 { 2463 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 2464 struct caam_skcipher_alg *caam_alg = 2465 container_of(alg, typeof(*caam_alg), skcipher); 2466 2467 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, 2468 false); 2469 } 2470 2471 static int caam_aead_init(struct crypto_aead *tfm) 2472 { 2473 struct aead_alg *alg = crypto_aead_alg(tfm); 2474 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2475 aead); 2476 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2477 2478 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); 2479 } 2480 2481 static void caam_exit_common(struct caam_ctx *ctx) 2482 { 2483 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2484 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2485 2486 dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), 2487 ctx->dir); 2488 2489 caam_jr_free(ctx->jrdev); 2490 } 2491 2492 static void caam_cra_exit(struct crypto_skcipher *tfm) 2493 { 2494 caam_exit_common(crypto_skcipher_ctx(tfm)); 2495 } 2496 2497 static void caam_aead_exit(struct crypto_aead *tfm) 2498 { 2499 caam_exit_common(crypto_aead_ctx(tfm)); 2500 } 2501 2502 void caam_qi_algapi_exit(void) 2503 { 2504 int i; 2505 2506 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2507 struct caam_aead_alg *t_alg = driver_aeads + i; 2508 2509 if (t_alg->registered) 2510 crypto_unregister_aead(&t_alg->aead); 2511 } 2512 2513 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2514 struct caam_skcipher_alg *t_alg = driver_algs + i; 2515 2516 if (t_alg->registered) 2517 crypto_unregister_skcipher(&t_alg->skcipher); 2518 } 2519 } 2520 2521 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) 2522 { 2523 struct skcipher_alg *alg = &t_alg->skcipher; 2524 2525 alg->base.cra_module = THIS_MODULE; 2526 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2527 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2528 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2529 2530 alg->init = caam_cra_init; 2531 alg->exit = caam_cra_exit; 2532 } 2533 2534 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2535 { 2536 struct aead_alg *alg = &t_alg->aead; 2537 2538 alg->base.cra_module = THIS_MODULE; 2539 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2540 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2541 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2542 2543 alg->init = caam_aead_init; 2544 alg->exit = caam_aead_exit; 2545 } 2546 2547 int caam_qi_algapi_init(struct device *ctrldev) 2548 { 2549 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 2550 int i = 0, err = 0; 2551 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2552 unsigned int md_limit = SHA512_DIGEST_SIZE; 2553 bool registered = false; 2554 2555 /* Make sure this runs only on (DPAA 1.x) QI */ 2556 if (!priv->qi_present || caam_dpaa2) 2557 return 0; 2558 2559 /* 2560 * Register crypto algorithms the device supports. 2561 * First, detect presence and attributes of DES, AES, and MD blocks. 2562 */ 2563 if (priv->era < 10) { 2564 u32 cha_vid, cha_inst; 2565 2566 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2567 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 2568 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2569 2570 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2571 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> 2572 CHA_ID_LS_DES_SHIFT; 2573 aes_inst = cha_inst & CHA_ID_LS_AES_MASK; 2574 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2575 } else { 2576 u32 aesa, mdha; 2577 2578 aesa = rd_reg32(&priv->ctrl->vreg.aesa); 2579 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2580 2581 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2582 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2583 2584 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; 2585 aes_inst = aesa & CHA_VER_NUM_MASK; 2586 md_inst = mdha & CHA_VER_NUM_MASK; 2587 } 2588 2589 /* If MD is present, limit digest size based on LP256 */ 2590 if (md_inst && md_vid == CHA_VER_VID_MD_LP256) 2591 md_limit = SHA256_DIGEST_SIZE; 2592 2593 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2594 struct caam_skcipher_alg *t_alg = driver_algs + i; 2595 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; 2596 2597 /* Skip DES algorithms if not supported by device */ 2598 if (!des_inst && 2599 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2600 (alg_sel == OP_ALG_ALGSEL_DES))) 2601 continue; 2602 2603 /* Skip AES algorithms if not supported by device */ 2604 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2605 continue; 2606 2607 caam_skcipher_alg_init(t_alg); 2608 2609 err = crypto_register_skcipher(&t_alg->skcipher); 2610 if (err) { 2611 dev_warn(ctrldev, "%s alg registration failed\n", 2612 t_alg->skcipher.base.cra_driver_name); 2613 continue; 2614 } 2615 2616 t_alg->registered = true; 2617 registered = true; 2618 } 2619 2620 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2621 struct caam_aead_alg *t_alg = driver_aeads + i; 2622 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2623 OP_ALG_ALGSEL_MASK; 2624 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2625 OP_ALG_ALGSEL_MASK; 2626 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2627 2628 /* Skip DES algorithms if not supported by device */ 2629 if (!des_inst && 2630 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2631 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2632 continue; 2633 2634 /* Skip AES algorithms if not supported by device */ 2635 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2636 continue; 2637 2638 /* 2639 * Check support for AES algorithms not available 2640 * on LP devices. 2641 */ 2642 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) 2643 continue; 2644 2645 /* 2646 * Skip algorithms requiring message digests 2647 * if MD or MD size is not supported by device. 2648 */ 2649 if (c2_alg_sel && 2650 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2651 continue; 2652 2653 caam_aead_alg_init(t_alg); 2654 2655 err = crypto_register_aead(&t_alg->aead); 2656 if (err) { 2657 pr_warn("%s alg registration failed\n", 2658 t_alg->aead.base.cra_driver_name); 2659 continue; 2660 } 2661 2662 t_alg->registered = true; 2663 registered = true; 2664 } 2665 2666 if (registered) 2667 dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); 2668 2669 return err; 2670 } 2671