1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale FSL CAAM support for crypto API over QI backend. 4 * Based on caamalg.c 5 * 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 * Copyright 2016-2019 NXP 8 */ 9 10 #include "compat.h" 11 #include "ctrl.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "desc_constr.h" 15 #include "error.h" 16 #include "sg_sw_qm.h" 17 #include "key_gen.h" 18 #include "qi.h" 19 #include "jr.h" 20 #include "caamalg_desc.h" 21 22 /* 23 * crypto alg 24 */ 25 #define CAAM_CRA_PRIORITY 2000 26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 28 SHA512_DIGEST_SIZE * 2) 29 30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 31 CAAM_MAX_KEY_SIZE) 32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 33 34 struct caam_alg_entry { 35 int class1_alg_type; 36 int class2_alg_type; 37 bool rfc3686; 38 bool geniv; 39 bool nodkp; 40 }; 41 42 struct caam_aead_alg { 43 struct aead_alg aead; 44 struct caam_alg_entry caam; 45 bool registered; 46 }; 47 48 struct caam_skcipher_alg { 49 struct skcipher_alg skcipher; 50 struct caam_alg_entry caam; 51 bool registered; 52 }; 53 54 /* 55 * per-session context 56 */ 57 struct caam_ctx { 58 struct device *jrdev; 59 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 60 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 61 u8 key[CAAM_MAX_KEY_SIZE]; 62 dma_addr_t key_dma; 63 enum dma_data_direction dir; 64 struct alginfo adata; 65 struct alginfo cdata; 66 unsigned int authsize; 67 struct device *qidev; 68 spinlock_t lock; /* Protects multiple init of driver context */ 69 struct caam_drv_ctx *drv_ctx[NUM_OP]; 70 }; 71 72 static int aead_set_sh_desc(struct crypto_aead *aead) 73 { 74 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 75 typeof(*alg), aead); 76 struct caam_ctx *ctx = crypto_aead_ctx(aead); 77 unsigned int ivsize = crypto_aead_ivsize(aead); 78 u32 ctx1_iv_off = 0; 79 u32 *nonce = NULL; 80 unsigned int data_len[2]; 81 u32 inl_mask; 82 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 83 OP_ALG_AAI_CTR_MOD128); 84 const bool is_rfc3686 = alg->caam.rfc3686; 85 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 86 87 if (!ctx->cdata.keylen || !ctx->authsize) 88 return 0; 89 90 /* 91 * AES-CTR needs to load IV in CONTEXT1 reg 92 * at an offset of 128bits (16bytes) 93 * CONTEXT1[255:128] = IV 94 */ 95 if (ctr_mode) 96 ctx1_iv_off = 16; 97 98 /* 99 * RFC3686 specific: 100 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 101 */ 102 if (is_rfc3686) { 103 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 104 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 106 } 107 108 data_len[0] = ctx->adata.keylen_pad; 109 data_len[1] = ctx->cdata.keylen; 110 111 if (alg->caam.geniv) 112 goto skip_enc; 113 114 /* aead_encrypt shared descriptor */ 115 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 116 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 117 DESC_JOB_IO_LEN, data_len, &inl_mask, 118 ARRAY_SIZE(data_len)) < 0) 119 return -EINVAL; 120 121 if (inl_mask & 1) 122 ctx->adata.key_virt = ctx->key; 123 else 124 ctx->adata.key_dma = ctx->key_dma; 125 126 if (inl_mask & 2) 127 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 128 else 129 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 130 131 ctx->adata.key_inline = !!(inl_mask & 1); 132 ctx->cdata.key_inline = !!(inl_mask & 2); 133 134 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 135 ivsize, ctx->authsize, is_rfc3686, nonce, 136 ctx1_iv_off, true, ctrlpriv->era); 137 138 skip_enc: 139 /* aead_decrypt shared descriptor */ 140 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 141 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 142 DESC_JOB_IO_LEN, data_len, &inl_mask, 143 ARRAY_SIZE(data_len)) < 0) 144 return -EINVAL; 145 146 if (inl_mask & 1) 147 ctx->adata.key_virt = ctx->key; 148 else 149 ctx->adata.key_dma = ctx->key_dma; 150 151 if (inl_mask & 2) 152 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 153 else 154 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 155 156 ctx->adata.key_inline = !!(inl_mask & 1); 157 ctx->cdata.key_inline = !!(inl_mask & 2); 158 159 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 160 ivsize, ctx->authsize, alg->caam.geniv, 161 is_rfc3686, nonce, ctx1_iv_off, true, 162 ctrlpriv->era); 163 164 if (!alg->caam.geniv) 165 goto skip_givenc; 166 167 /* aead_givencrypt shared descriptor */ 168 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 169 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 170 DESC_JOB_IO_LEN, data_len, &inl_mask, 171 ARRAY_SIZE(data_len)) < 0) 172 return -EINVAL; 173 174 if (inl_mask & 1) 175 ctx->adata.key_virt = ctx->key; 176 else 177 ctx->adata.key_dma = ctx->key_dma; 178 179 if (inl_mask & 2) 180 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 181 else 182 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 183 184 ctx->adata.key_inline = !!(inl_mask & 1); 185 ctx->cdata.key_inline = !!(inl_mask & 2); 186 187 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 188 ivsize, ctx->authsize, is_rfc3686, nonce, 189 ctx1_iv_off, true, ctrlpriv->era); 190 191 skip_givenc: 192 return 0; 193 } 194 195 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 196 { 197 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 198 199 ctx->authsize = authsize; 200 aead_set_sh_desc(authenc); 201 202 return 0; 203 } 204 205 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 206 unsigned int keylen) 207 { 208 struct caam_ctx *ctx = crypto_aead_ctx(aead); 209 struct device *jrdev = ctx->jrdev; 210 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 211 struct crypto_authenc_keys keys; 212 int ret = 0; 213 214 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 215 goto badkey; 216 217 #ifdef DEBUG 218 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 219 keys.authkeylen + keys.enckeylen, keys.enckeylen, 220 keys.authkeylen); 221 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 222 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 223 #endif 224 225 /* 226 * If DKP is supported, use it in the shared descriptor to generate 227 * the split key. 228 */ 229 if (ctrlpriv->era >= 6) { 230 ctx->adata.keylen = keys.authkeylen; 231 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 232 OP_ALG_ALGSEL_MASK); 233 234 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 235 goto badkey; 236 237 memcpy(ctx->key, keys.authkey, keys.authkeylen); 238 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 239 keys.enckeylen); 240 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 241 ctx->adata.keylen_pad + 242 keys.enckeylen, ctx->dir); 243 goto skip_split_key; 244 } 245 246 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 247 keys.authkeylen, CAAM_MAX_KEY_SIZE - 248 keys.enckeylen); 249 if (ret) 250 goto badkey; 251 252 /* postpend encryption key to auth split key */ 253 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 254 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 255 ctx->adata.keylen_pad + keys.enckeylen, 256 ctx->dir); 257 #ifdef DEBUG 258 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 259 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 260 ctx->adata.keylen_pad + keys.enckeylen, 1); 261 #endif 262 263 skip_split_key: 264 ctx->cdata.keylen = keys.enckeylen; 265 266 ret = aead_set_sh_desc(aead); 267 if (ret) 268 goto badkey; 269 270 /* Now update the driver contexts with the new shared descriptor */ 271 if (ctx->drv_ctx[ENCRYPT]) { 272 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 273 ctx->sh_desc_enc); 274 if (ret) { 275 dev_err(jrdev, "driver enc context update failed\n"); 276 goto badkey; 277 } 278 } 279 280 if (ctx->drv_ctx[DECRYPT]) { 281 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 282 ctx->sh_desc_dec); 283 if (ret) { 284 dev_err(jrdev, "driver dec context update failed\n"); 285 goto badkey; 286 } 287 } 288 289 memzero_explicit(&keys, sizeof(keys)); 290 return ret; 291 badkey: 292 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 293 memzero_explicit(&keys, sizeof(keys)); 294 return -EINVAL; 295 } 296 297 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 298 unsigned int keylen) 299 { 300 struct crypto_authenc_keys keys; 301 u32 flags; 302 int err; 303 304 err = crypto_authenc_extractkeys(&keys, key, keylen); 305 if (unlikely(err)) 306 goto badkey; 307 308 err = -EINVAL; 309 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 310 goto badkey; 311 312 flags = crypto_aead_get_flags(aead); 313 err = __des3_verify_key(&flags, keys.enckey); 314 if (unlikely(err)) { 315 crypto_aead_set_flags(aead, flags); 316 goto out; 317 } 318 319 err = aead_setkey(aead, key, keylen); 320 321 out: 322 memzero_explicit(&keys, sizeof(keys)); 323 return err; 324 325 badkey: 326 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 327 goto out; 328 } 329 330 static int gcm_set_sh_desc(struct crypto_aead *aead) 331 { 332 struct caam_ctx *ctx = crypto_aead_ctx(aead); 333 unsigned int ivsize = crypto_aead_ivsize(aead); 334 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 335 ctx->cdata.keylen; 336 337 if (!ctx->cdata.keylen || !ctx->authsize) 338 return 0; 339 340 /* 341 * Job Descriptor and Shared Descriptor 342 * must fit into the 64-word Descriptor h/w Buffer 343 */ 344 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { 345 ctx->cdata.key_inline = true; 346 ctx->cdata.key_virt = ctx->key; 347 } else { 348 ctx->cdata.key_inline = false; 349 ctx->cdata.key_dma = ctx->key_dma; 350 } 351 352 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 353 ctx->authsize, true); 354 355 /* 356 * Job Descriptor and Shared Descriptor 357 * must fit into the 64-word Descriptor h/w Buffer 358 */ 359 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { 360 ctx->cdata.key_inline = true; 361 ctx->cdata.key_virt = ctx->key; 362 } else { 363 ctx->cdata.key_inline = false; 364 ctx->cdata.key_dma = ctx->key_dma; 365 } 366 367 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 368 ctx->authsize, true); 369 370 return 0; 371 } 372 373 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 374 { 375 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 376 377 ctx->authsize = authsize; 378 gcm_set_sh_desc(authenc); 379 380 return 0; 381 } 382 383 static int gcm_setkey(struct crypto_aead *aead, 384 const u8 *key, unsigned int keylen) 385 { 386 struct caam_ctx *ctx = crypto_aead_ctx(aead); 387 struct device *jrdev = ctx->jrdev; 388 int ret; 389 390 #ifdef DEBUG 391 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 392 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 393 #endif 394 395 memcpy(ctx->key, key, keylen); 396 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, 397 ctx->dir); 398 ctx->cdata.keylen = keylen; 399 400 ret = gcm_set_sh_desc(aead); 401 if (ret) 402 return ret; 403 404 /* Now update the driver contexts with the new shared descriptor */ 405 if (ctx->drv_ctx[ENCRYPT]) { 406 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 407 ctx->sh_desc_enc); 408 if (ret) { 409 dev_err(jrdev, "driver enc context update failed\n"); 410 return ret; 411 } 412 } 413 414 if (ctx->drv_ctx[DECRYPT]) { 415 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 416 ctx->sh_desc_dec); 417 if (ret) { 418 dev_err(jrdev, "driver dec context update failed\n"); 419 return ret; 420 } 421 } 422 423 return 0; 424 } 425 426 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 427 { 428 struct caam_ctx *ctx = crypto_aead_ctx(aead); 429 unsigned int ivsize = crypto_aead_ivsize(aead); 430 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 431 ctx->cdata.keylen; 432 433 if (!ctx->cdata.keylen || !ctx->authsize) 434 return 0; 435 436 ctx->cdata.key_virt = ctx->key; 437 438 /* 439 * Job Descriptor and Shared Descriptor 440 * must fit into the 64-word Descriptor h/w Buffer 441 */ 442 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { 443 ctx->cdata.key_inline = true; 444 } else { 445 ctx->cdata.key_inline = false; 446 ctx->cdata.key_dma = ctx->key_dma; 447 } 448 449 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 450 ctx->authsize, true); 451 452 /* 453 * Job Descriptor and Shared Descriptor 454 * must fit into the 64-word Descriptor h/w Buffer 455 */ 456 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { 457 ctx->cdata.key_inline = true; 458 } else { 459 ctx->cdata.key_inline = false; 460 ctx->cdata.key_dma = ctx->key_dma; 461 } 462 463 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 464 ctx->authsize, true); 465 466 return 0; 467 } 468 469 static int rfc4106_setauthsize(struct crypto_aead *authenc, 470 unsigned int authsize) 471 { 472 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 473 474 ctx->authsize = authsize; 475 rfc4106_set_sh_desc(authenc); 476 477 return 0; 478 } 479 480 static int rfc4106_setkey(struct crypto_aead *aead, 481 const u8 *key, unsigned int keylen) 482 { 483 struct caam_ctx *ctx = crypto_aead_ctx(aead); 484 struct device *jrdev = ctx->jrdev; 485 int ret; 486 487 if (keylen < 4) 488 return -EINVAL; 489 490 #ifdef DEBUG 491 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 492 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 493 #endif 494 495 memcpy(ctx->key, key, keylen); 496 /* 497 * The last four bytes of the key material are used as the salt value 498 * in the nonce. Update the AES key length. 499 */ 500 ctx->cdata.keylen = keylen - 4; 501 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 502 ctx->cdata.keylen, ctx->dir); 503 504 ret = rfc4106_set_sh_desc(aead); 505 if (ret) 506 return ret; 507 508 /* Now update the driver contexts with the new shared descriptor */ 509 if (ctx->drv_ctx[ENCRYPT]) { 510 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 511 ctx->sh_desc_enc); 512 if (ret) { 513 dev_err(jrdev, "driver enc context update failed\n"); 514 return ret; 515 } 516 } 517 518 if (ctx->drv_ctx[DECRYPT]) { 519 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 520 ctx->sh_desc_dec); 521 if (ret) { 522 dev_err(jrdev, "driver dec context update failed\n"); 523 return ret; 524 } 525 } 526 527 return 0; 528 } 529 530 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 531 { 532 struct caam_ctx *ctx = crypto_aead_ctx(aead); 533 unsigned int ivsize = crypto_aead_ivsize(aead); 534 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 535 ctx->cdata.keylen; 536 537 if (!ctx->cdata.keylen || !ctx->authsize) 538 return 0; 539 540 ctx->cdata.key_virt = ctx->key; 541 542 /* 543 * Job Descriptor and Shared Descriptor 544 * must fit into the 64-word Descriptor h/w Buffer 545 */ 546 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { 547 ctx->cdata.key_inline = true; 548 } else { 549 ctx->cdata.key_inline = false; 550 ctx->cdata.key_dma = ctx->key_dma; 551 } 552 553 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 554 ctx->authsize, true); 555 556 /* 557 * Job Descriptor and Shared Descriptor 558 * must fit into the 64-word Descriptor h/w Buffer 559 */ 560 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { 561 ctx->cdata.key_inline = true; 562 } else { 563 ctx->cdata.key_inline = false; 564 ctx->cdata.key_dma = ctx->key_dma; 565 } 566 567 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 568 ctx->authsize, true); 569 570 return 0; 571 } 572 573 static int rfc4543_setauthsize(struct crypto_aead *authenc, 574 unsigned int authsize) 575 { 576 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 577 578 ctx->authsize = authsize; 579 rfc4543_set_sh_desc(authenc); 580 581 return 0; 582 } 583 584 static int rfc4543_setkey(struct crypto_aead *aead, 585 const u8 *key, unsigned int keylen) 586 { 587 struct caam_ctx *ctx = crypto_aead_ctx(aead); 588 struct device *jrdev = ctx->jrdev; 589 int ret; 590 591 if (keylen < 4) 592 return -EINVAL; 593 594 #ifdef DEBUG 595 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 596 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 597 #endif 598 599 memcpy(ctx->key, key, keylen); 600 /* 601 * The last four bytes of the key material are used as the salt value 602 * in the nonce. Update the AES key length. 603 */ 604 ctx->cdata.keylen = keylen - 4; 605 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 606 ctx->cdata.keylen, ctx->dir); 607 608 ret = rfc4543_set_sh_desc(aead); 609 if (ret) 610 return ret; 611 612 /* Now update the driver contexts with the new shared descriptor */ 613 if (ctx->drv_ctx[ENCRYPT]) { 614 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 615 ctx->sh_desc_enc); 616 if (ret) { 617 dev_err(jrdev, "driver enc context update failed\n"); 618 return ret; 619 } 620 } 621 622 if (ctx->drv_ctx[DECRYPT]) { 623 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 624 ctx->sh_desc_dec); 625 if (ret) { 626 dev_err(jrdev, "driver dec context update failed\n"); 627 return ret; 628 } 629 } 630 631 return 0; 632 } 633 634 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 635 unsigned int keylen) 636 { 637 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 638 struct caam_skcipher_alg *alg = 639 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 640 skcipher); 641 struct device *jrdev = ctx->jrdev; 642 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 643 u32 ctx1_iv_off = 0; 644 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 645 OP_ALG_AAI_CTR_MOD128); 646 const bool is_rfc3686 = alg->caam.rfc3686; 647 int ret = 0; 648 649 #ifdef DEBUG 650 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 651 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 652 #endif 653 /* 654 * AES-CTR needs to load IV in CONTEXT1 reg 655 * at an offset of 128bits (16bytes) 656 * CONTEXT1[255:128] = IV 657 */ 658 if (ctr_mode) 659 ctx1_iv_off = 16; 660 661 /* 662 * RFC3686 specific: 663 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 664 * | *key = {KEY, NONCE} 665 */ 666 if (is_rfc3686) { 667 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 668 keylen -= CTR_RFC3686_NONCE_SIZE; 669 } 670 671 ctx->cdata.keylen = keylen; 672 ctx->cdata.key_virt = key; 673 ctx->cdata.key_inline = true; 674 675 /* skcipher encrypt, decrypt shared descriptors */ 676 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 677 is_rfc3686, ctx1_iv_off); 678 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 679 is_rfc3686, ctx1_iv_off); 680 681 /* Now update the driver contexts with the new shared descriptor */ 682 if (ctx->drv_ctx[ENCRYPT]) { 683 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 684 ctx->sh_desc_enc); 685 if (ret) { 686 dev_err(jrdev, "driver enc context update failed\n"); 687 goto badkey; 688 } 689 } 690 691 if (ctx->drv_ctx[DECRYPT]) { 692 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 693 ctx->sh_desc_dec); 694 if (ret) { 695 dev_err(jrdev, "driver dec context update failed\n"); 696 goto badkey; 697 } 698 } 699 700 return ret; 701 badkey: 702 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 703 return -EINVAL; 704 } 705 706 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 707 const u8 *key, unsigned int keylen) 708 { 709 return unlikely(des3_verify_key(skcipher, key)) ?: 710 skcipher_setkey(skcipher, key, keylen); 711 } 712 713 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 714 unsigned int keylen) 715 { 716 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 717 struct device *jrdev = ctx->jrdev; 718 int ret = 0; 719 720 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 721 dev_err(jrdev, "key size mismatch\n"); 722 goto badkey; 723 } 724 725 ctx->cdata.keylen = keylen; 726 ctx->cdata.key_virt = key; 727 ctx->cdata.key_inline = true; 728 729 /* xts skcipher encrypt, decrypt shared descriptors */ 730 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 731 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 732 733 /* Now update the driver contexts with the new shared descriptor */ 734 if (ctx->drv_ctx[ENCRYPT]) { 735 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 736 ctx->sh_desc_enc); 737 if (ret) { 738 dev_err(jrdev, "driver enc context update failed\n"); 739 goto badkey; 740 } 741 } 742 743 if (ctx->drv_ctx[DECRYPT]) { 744 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 745 ctx->sh_desc_dec); 746 if (ret) { 747 dev_err(jrdev, "driver dec context update failed\n"); 748 goto badkey; 749 } 750 } 751 752 return ret; 753 badkey: 754 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 755 return -EINVAL; 756 } 757 758 /* 759 * aead_edesc - s/w-extended aead descriptor 760 * @src_nents: number of segments in input scatterlist 761 * @dst_nents: number of segments in output scatterlist 762 * @iv_dma: dma address of iv for checking continuity and link table 763 * @qm_sg_bytes: length of dma mapped h/w link table 764 * @qm_sg_dma: bus physical mapped address of h/w link table 765 * @assoclen: associated data length, in CAAM endianness 766 * @assoclen_dma: bus physical mapped address of req->assoclen 767 * @drv_req: driver-specific request structure 768 * @sgt: the h/w link table, followed by IV 769 */ 770 struct aead_edesc { 771 int src_nents; 772 int dst_nents; 773 dma_addr_t iv_dma; 774 int qm_sg_bytes; 775 dma_addr_t qm_sg_dma; 776 unsigned int assoclen; 777 dma_addr_t assoclen_dma; 778 struct caam_drv_req drv_req; 779 struct qm_sg_entry sgt[0]; 780 }; 781 782 /* 783 * skcipher_edesc - s/w-extended skcipher descriptor 784 * @src_nents: number of segments in input scatterlist 785 * @dst_nents: number of segments in output scatterlist 786 * @iv_dma: dma address of iv for checking continuity and link table 787 * @qm_sg_bytes: length of dma mapped h/w link table 788 * @qm_sg_dma: bus physical mapped address of h/w link table 789 * @drv_req: driver-specific request structure 790 * @sgt: the h/w link table, followed by IV 791 */ 792 struct skcipher_edesc { 793 int src_nents; 794 int dst_nents; 795 dma_addr_t iv_dma; 796 int qm_sg_bytes; 797 dma_addr_t qm_sg_dma; 798 struct caam_drv_req drv_req; 799 struct qm_sg_entry sgt[0]; 800 }; 801 802 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 803 enum optype type) 804 { 805 /* 806 * This function is called on the fast path with values of 'type' 807 * known at compile time. Invalid arguments are not expected and 808 * thus no checks are made. 809 */ 810 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 811 u32 *desc; 812 813 if (unlikely(!drv_ctx)) { 814 spin_lock(&ctx->lock); 815 816 /* Read again to check if some other core init drv_ctx */ 817 drv_ctx = ctx->drv_ctx[type]; 818 if (!drv_ctx) { 819 int cpu; 820 821 if (type == ENCRYPT) 822 desc = ctx->sh_desc_enc; 823 else /* (type == DECRYPT) */ 824 desc = ctx->sh_desc_dec; 825 826 cpu = smp_processor_id(); 827 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 828 if (!IS_ERR_OR_NULL(drv_ctx)) 829 drv_ctx->op_type = type; 830 831 ctx->drv_ctx[type] = drv_ctx; 832 } 833 834 spin_unlock(&ctx->lock); 835 } 836 837 return drv_ctx; 838 } 839 840 static void caam_unmap(struct device *dev, struct scatterlist *src, 841 struct scatterlist *dst, int src_nents, 842 int dst_nents, dma_addr_t iv_dma, int ivsize, 843 dma_addr_t qm_sg_dma, int qm_sg_bytes) 844 { 845 if (dst != src) { 846 if (src_nents) 847 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 848 if (dst_nents) 849 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 850 } else { 851 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 852 } 853 854 if (iv_dma) 855 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 856 if (qm_sg_bytes) 857 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 858 } 859 860 static void aead_unmap(struct device *dev, 861 struct aead_edesc *edesc, 862 struct aead_request *req) 863 { 864 struct crypto_aead *aead = crypto_aead_reqtfm(req); 865 int ivsize = crypto_aead_ivsize(aead); 866 867 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 868 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 869 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 870 } 871 872 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 873 struct skcipher_request *req) 874 { 875 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 876 int ivsize = crypto_skcipher_ivsize(skcipher); 877 878 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 879 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 880 } 881 882 static void aead_done(struct caam_drv_req *drv_req, u32 status) 883 { 884 struct device *qidev; 885 struct aead_edesc *edesc; 886 struct aead_request *aead_req = drv_req->app_ctx; 887 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 888 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); 889 int ecode = 0; 890 891 qidev = caam_ctx->qidev; 892 893 if (unlikely(status)) { 894 u32 ssrc = status & JRSTA_SSRC_MASK; 895 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; 896 897 caam_jr_strstatus(qidev, status); 898 /* 899 * verify hw auth check passed else return -EBADMSG 900 */ 901 if (ssrc == JRSTA_SSRC_CCB_ERROR && 902 err_id == JRSTA_CCBERR_ERRID_ICVCHK) 903 ecode = -EBADMSG; 904 else 905 ecode = -EIO; 906 } 907 908 edesc = container_of(drv_req, typeof(*edesc), drv_req); 909 aead_unmap(qidev, edesc, aead_req); 910 911 aead_request_complete(aead_req, ecode); 912 qi_cache_free(edesc); 913 } 914 915 /* 916 * allocate and map the aead extended descriptor 917 */ 918 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 919 bool encrypt) 920 { 921 struct crypto_aead *aead = crypto_aead_reqtfm(req); 922 struct caam_ctx *ctx = crypto_aead_ctx(aead); 923 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 924 typeof(*alg), aead); 925 struct device *qidev = ctx->qidev; 926 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 927 GFP_KERNEL : GFP_ATOMIC; 928 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 929 struct aead_edesc *edesc; 930 dma_addr_t qm_sg_dma, iv_dma = 0; 931 int ivsize = 0; 932 unsigned int authsize = ctx->authsize; 933 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 934 int in_len, out_len; 935 struct qm_sg_entry *sg_table, *fd_sgt; 936 struct caam_drv_ctx *drv_ctx; 937 938 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 939 if (IS_ERR_OR_NULL(drv_ctx)) 940 return (struct aead_edesc *)drv_ctx; 941 942 /* allocate space for base edesc and hw desc commands, link tables */ 943 edesc = qi_cache_alloc(GFP_DMA | flags); 944 if (unlikely(!edesc)) { 945 dev_err(qidev, "could not allocate extended descriptor\n"); 946 return ERR_PTR(-ENOMEM); 947 } 948 949 if (likely(req->src == req->dst)) { 950 src_nents = sg_nents_for_len(req->src, req->assoclen + 951 req->cryptlen + 952 (encrypt ? authsize : 0)); 953 if (unlikely(src_nents < 0)) { 954 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 955 req->assoclen + req->cryptlen + 956 (encrypt ? authsize : 0)); 957 qi_cache_free(edesc); 958 return ERR_PTR(src_nents); 959 } 960 961 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 962 DMA_BIDIRECTIONAL); 963 if (unlikely(!mapped_src_nents)) { 964 dev_err(qidev, "unable to map source\n"); 965 qi_cache_free(edesc); 966 return ERR_PTR(-ENOMEM); 967 } 968 } else { 969 src_nents = sg_nents_for_len(req->src, req->assoclen + 970 req->cryptlen); 971 if (unlikely(src_nents < 0)) { 972 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 973 req->assoclen + req->cryptlen); 974 qi_cache_free(edesc); 975 return ERR_PTR(src_nents); 976 } 977 978 dst_nents = sg_nents_for_len(req->dst, req->assoclen + 979 req->cryptlen + 980 (encrypt ? authsize : 981 (-authsize))); 982 if (unlikely(dst_nents < 0)) { 983 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 984 req->assoclen + req->cryptlen + 985 (encrypt ? authsize : (-authsize))); 986 qi_cache_free(edesc); 987 return ERR_PTR(dst_nents); 988 } 989 990 if (src_nents) { 991 mapped_src_nents = dma_map_sg(qidev, req->src, 992 src_nents, DMA_TO_DEVICE); 993 if (unlikely(!mapped_src_nents)) { 994 dev_err(qidev, "unable to map source\n"); 995 qi_cache_free(edesc); 996 return ERR_PTR(-ENOMEM); 997 } 998 } else { 999 mapped_src_nents = 0; 1000 } 1001 1002 if (dst_nents) { 1003 mapped_dst_nents = dma_map_sg(qidev, req->dst, 1004 dst_nents, 1005 DMA_FROM_DEVICE); 1006 if (unlikely(!mapped_dst_nents)) { 1007 dev_err(qidev, "unable to map destination\n"); 1008 dma_unmap_sg(qidev, req->src, src_nents, 1009 DMA_TO_DEVICE); 1010 qi_cache_free(edesc); 1011 return ERR_PTR(-ENOMEM); 1012 } 1013 } else { 1014 mapped_dst_nents = 0; 1015 } 1016 } 1017 1018 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) 1019 ivsize = crypto_aead_ivsize(aead); 1020 1021 /* 1022 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1023 * Input is not contiguous. 1024 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1025 * the end of the table by allocating more S/G entries. Logic: 1026 * if (src != dst && output S/G) 1027 * pad output S/G, if needed 1028 * else if (src == dst && S/G) 1029 * overlapping S/Gs; pad one of them 1030 * else if (input S/G) ... 1031 * pad input S/G, if needed 1032 */ 1033 qm_sg_ents = 1 + !!ivsize + mapped_src_nents; 1034 if (mapped_dst_nents > 1) 1035 qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1036 else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1037 qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1038 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); 1039 else 1040 qm_sg_ents = pad_sg_nents(qm_sg_ents); 1041 1042 sg_table = &edesc->sgt[0]; 1043 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1044 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1045 CAAM_QI_MEMCACHE_SIZE)) { 1046 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1047 qm_sg_ents, ivsize); 1048 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1049 0, 0, 0); 1050 qi_cache_free(edesc); 1051 return ERR_PTR(-ENOMEM); 1052 } 1053 1054 if (ivsize) { 1055 u8 *iv = (u8 *)(sg_table + qm_sg_ents); 1056 1057 /* Make sure IV is located in a DMAable area */ 1058 memcpy(iv, req->iv, ivsize); 1059 1060 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1061 if (dma_mapping_error(qidev, iv_dma)) { 1062 dev_err(qidev, "unable to map IV\n"); 1063 caam_unmap(qidev, req->src, req->dst, src_nents, 1064 dst_nents, 0, 0, 0, 0); 1065 qi_cache_free(edesc); 1066 return ERR_PTR(-ENOMEM); 1067 } 1068 } 1069 1070 edesc->src_nents = src_nents; 1071 edesc->dst_nents = dst_nents; 1072 edesc->iv_dma = iv_dma; 1073 edesc->drv_req.app_ctx = req; 1074 edesc->drv_req.cbk = aead_done; 1075 edesc->drv_req.drv_ctx = drv_ctx; 1076 1077 edesc->assoclen = cpu_to_caam32(req->assoclen); 1078 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 1079 DMA_TO_DEVICE); 1080 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1081 dev_err(qidev, "unable to map assoclen\n"); 1082 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1083 iv_dma, ivsize, 0, 0); 1084 qi_cache_free(edesc); 1085 return ERR_PTR(-ENOMEM); 1086 } 1087 1088 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 1089 qm_sg_index++; 1090 if (ivsize) { 1091 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1092 qm_sg_index++; 1093 } 1094 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 1095 qm_sg_index += mapped_src_nents; 1096 1097 if (mapped_dst_nents > 1) 1098 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1099 qm_sg_index, 0); 1100 1101 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1102 if (dma_mapping_error(qidev, qm_sg_dma)) { 1103 dev_err(qidev, "unable to map S/G table\n"); 1104 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1105 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1106 iv_dma, ivsize, 0, 0); 1107 qi_cache_free(edesc); 1108 return ERR_PTR(-ENOMEM); 1109 } 1110 1111 edesc->qm_sg_dma = qm_sg_dma; 1112 edesc->qm_sg_bytes = qm_sg_bytes; 1113 1114 out_len = req->assoclen + req->cryptlen + 1115 (encrypt ? ctx->authsize : (-ctx->authsize)); 1116 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 1117 1118 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1119 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 1120 1121 if (req->dst == req->src) { 1122 if (mapped_src_nents == 1) 1123 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 1124 out_len, 0); 1125 else 1126 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1127 (1 + !!ivsize) * sizeof(*sg_table), 1128 out_len, 0); 1129 } else if (mapped_dst_nents <= 1) { 1130 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1131 0); 1132 } else { 1133 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 1134 qm_sg_index, out_len, 0); 1135 } 1136 1137 return edesc; 1138 } 1139 1140 static inline int aead_crypt(struct aead_request *req, bool encrypt) 1141 { 1142 struct aead_edesc *edesc; 1143 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1144 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1145 int ret; 1146 1147 if (unlikely(caam_congested)) 1148 return -EAGAIN; 1149 1150 /* allocate extended descriptor */ 1151 edesc = aead_edesc_alloc(req, encrypt); 1152 if (IS_ERR_OR_NULL(edesc)) 1153 return PTR_ERR(edesc); 1154 1155 /* Create and submit job descriptor */ 1156 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1157 if (!ret) { 1158 ret = -EINPROGRESS; 1159 } else { 1160 aead_unmap(ctx->qidev, edesc, req); 1161 qi_cache_free(edesc); 1162 } 1163 1164 return ret; 1165 } 1166 1167 static int aead_encrypt(struct aead_request *req) 1168 { 1169 return aead_crypt(req, true); 1170 } 1171 1172 static int aead_decrypt(struct aead_request *req) 1173 { 1174 return aead_crypt(req, false); 1175 } 1176 1177 static int ipsec_gcm_encrypt(struct aead_request *req) 1178 { 1179 if (req->assoclen < 8) 1180 return -EINVAL; 1181 1182 return aead_crypt(req, true); 1183 } 1184 1185 static int ipsec_gcm_decrypt(struct aead_request *req) 1186 { 1187 if (req->assoclen < 8) 1188 return -EINVAL; 1189 1190 return aead_crypt(req, false); 1191 } 1192 1193 static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1194 { 1195 struct skcipher_edesc *edesc; 1196 struct skcipher_request *req = drv_req->app_ctx; 1197 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1198 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1199 struct device *qidev = caam_ctx->qidev; 1200 int ivsize = crypto_skcipher_ivsize(skcipher); 1201 1202 #ifdef DEBUG 1203 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1204 #endif 1205 1206 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1207 1208 if (status) 1209 caam_jr_strstatus(qidev, status); 1210 1211 #ifdef DEBUG 1212 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", 1213 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1214 edesc->src_nents > 1 ? 100 : ivsize, 1); 1215 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 1216 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1217 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1218 #endif 1219 1220 skcipher_unmap(qidev, edesc, req); 1221 1222 /* 1223 * The crypto API expects us to set the IV (req->iv) to the last 1224 * ciphertext block. This is used e.g. by the CTS mode. 1225 */ 1226 if (edesc->drv_req.drv_ctx->op_type == ENCRYPT) 1227 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - 1228 ivsize, ivsize, 0); 1229 1230 qi_cache_free(edesc); 1231 skcipher_request_complete(req, status); 1232 } 1233 1234 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1235 bool encrypt) 1236 { 1237 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1238 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1239 struct device *qidev = ctx->qidev; 1240 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1241 GFP_KERNEL : GFP_ATOMIC; 1242 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1243 struct skcipher_edesc *edesc; 1244 dma_addr_t iv_dma; 1245 u8 *iv; 1246 int ivsize = crypto_skcipher_ivsize(skcipher); 1247 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1248 struct qm_sg_entry *sg_table, *fd_sgt; 1249 struct caam_drv_ctx *drv_ctx; 1250 1251 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 1252 if (IS_ERR_OR_NULL(drv_ctx)) 1253 return (struct skcipher_edesc *)drv_ctx; 1254 1255 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1256 if (unlikely(src_nents < 0)) { 1257 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1258 req->cryptlen); 1259 return ERR_PTR(src_nents); 1260 } 1261 1262 if (unlikely(req->src != req->dst)) { 1263 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 1264 if (unlikely(dst_nents < 0)) { 1265 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1266 req->cryptlen); 1267 return ERR_PTR(dst_nents); 1268 } 1269 1270 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1271 DMA_TO_DEVICE); 1272 if (unlikely(!mapped_src_nents)) { 1273 dev_err(qidev, "unable to map source\n"); 1274 return ERR_PTR(-ENOMEM); 1275 } 1276 1277 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1278 DMA_FROM_DEVICE); 1279 if (unlikely(!mapped_dst_nents)) { 1280 dev_err(qidev, "unable to map destination\n"); 1281 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1282 return ERR_PTR(-ENOMEM); 1283 } 1284 } else { 1285 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1286 DMA_BIDIRECTIONAL); 1287 if (unlikely(!mapped_src_nents)) { 1288 dev_err(qidev, "unable to map source\n"); 1289 return ERR_PTR(-ENOMEM); 1290 } 1291 } 1292 1293 qm_sg_ents = 1 + mapped_src_nents; 1294 dst_sg_idx = qm_sg_ents; 1295 1296 /* 1297 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1298 * the end of the table by allocating more S/G entries. Logic: 1299 * if (src != dst && output S/G) 1300 * pad output S/G, if needed 1301 * else if (src == dst && S/G) 1302 * overlapping S/Gs; pad one of them 1303 * else if (input S/G) ... 1304 * pad input S/G, if needed 1305 */ 1306 if (mapped_dst_nents > 1) 1307 qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1308 else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1309 qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1310 1 + pad_sg_nents(mapped_src_nents)); 1311 else 1312 qm_sg_ents = pad_sg_nents(qm_sg_ents); 1313 1314 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1315 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1316 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1317 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1318 qm_sg_ents, ivsize); 1319 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1320 0, 0, 0); 1321 return ERR_PTR(-ENOMEM); 1322 } 1323 1324 /* allocate space for base edesc, link tables and IV */ 1325 edesc = qi_cache_alloc(GFP_DMA | flags); 1326 if (unlikely(!edesc)) { 1327 dev_err(qidev, "could not allocate extended descriptor\n"); 1328 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1329 0, 0, 0); 1330 return ERR_PTR(-ENOMEM); 1331 } 1332 1333 /* Make sure IV is located in a DMAable area */ 1334 sg_table = &edesc->sgt[0]; 1335 iv = (u8 *)(sg_table + qm_sg_ents); 1336 memcpy(iv, req->iv, ivsize); 1337 1338 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1339 if (dma_mapping_error(qidev, iv_dma)) { 1340 dev_err(qidev, "unable to map IV\n"); 1341 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1342 0, 0, 0); 1343 qi_cache_free(edesc); 1344 return ERR_PTR(-ENOMEM); 1345 } 1346 1347 edesc->src_nents = src_nents; 1348 edesc->dst_nents = dst_nents; 1349 edesc->iv_dma = iv_dma; 1350 edesc->qm_sg_bytes = qm_sg_bytes; 1351 edesc->drv_req.app_ctx = req; 1352 edesc->drv_req.cbk = skcipher_done; 1353 edesc->drv_req.drv_ctx = drv_ctx; 1354 1355 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1356 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1357 1358 if (mapped_dst_nents > 1) 1359 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1360 dst_sg_idx, 0); 1361 1362 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1363 DMA_TO_DEVICE); 1364 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1365 dev_err(qidev, "unable to map S/G table\n"); 1366 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1367 iv_dma, ivsize, 0, 0); 1368 qi_cache_free(edesc); 1369 return ERR_PTR(-ENOMEM); 1370 } 1371 1372 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1373 1374 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1375 ivsize + req->cryptlen, 0); 1376 1377 if (req->src == req->dst) { 1378 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1379 sizeof(*sg_table), req->cryptlen, 0); 1380 } else if (mapped_dst_nents > 1) { 1381 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1382 sizeof(*sg_table), req->cryptlen, 0); 1383 } else { 1384 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), 1385 req->cryptlen, 0); 1386 } 1387 1388 return edesc; 1389 } 1390 1391 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) 1392 { 1393 struct skcipher_edesc *edesc; 1394 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1395 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1396 int ivsize = crypto_skcipher_ivsize(skcipher); 1397 int ret; 1398 1399 if (unlikely(caam_congested)) 1400 return -EAGAIN; 1401 1402 /* allocate extended descriptor */ 1403 edesc = skcipher_edesc_alloc(req, encrypt); 1404 if (IS_ERR(edesc)) 1405 return PTR_ERR(edesc); 1406 1407 /* 1408 * The crypto API expects us to set the IV (req->iv) to the last 1409 * ciphertext block. 1410 */ 1411 if (!encrypt) 1412 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - 1413 ivsize, ivsize, 0); 1414 1415 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1416 if (!ret) { 1417 ret = -EINPROGRESS; 1418 } else { 1419 skcipher_unmap(ctx->qidev, edesc, req); 1420 qi_cache_free(edesc); 1421 } 1422 1423 return ret; 1424 } 1425 1426 static int skcipher_encrypt(struct skcipher_request *req) 1427 { 1428 return skcipher_crypt(req, true); 1429 } 1430 1431 static int skcipher_decrypt(struct skcipher_request *req) 1432 { 1433 return skcipher_crypt(req, false); 1434 } 1435 1436 static struct caam_skcipher_alg driver_algs[] = { 1437 { 1438 .skcipher = { 1439 .base = { 1440 .cra_name = "cbc(aes)", 1441 .cra_driver_name = "cbc-aes-caam-qi", 1442 .cra_blocksize = AES_BLOCK_SIZE, 1443 }, 1444 .setkey = skcipher_setkey, 1445 .encrypt = skcipher_encrypt, 1446 .decrypt = skcipher_decrypt, 1447 .min_keysize = AES_MIN_KEY_SIZE, 1448 .max_keysize = AES_MAX_KEY_SIZE, 1449 .ivsize = AES_BLOCK_SIZE, 1450 }, 1451 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1452 }, 1453 { 1454 .skcipher = { 1455 .base = { 1456 .cra_name = "cbc(des3_ede)", 1457 .cra_driver_name = "cbc-3des-caam-qi", 1458 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1459 }, 1460 .setkey = des3_skcipher_setkey, 1461 .encrypt = skcipher_encrypt, 1462 .decrypt = skcipher_decrypt, 1463 .min_keysize = DES3_EDE_KEY_SIZE, 1464 .max_keysize = DES3_EDE_KEY_SIZE, 1465 .ivsize = DES3_EDE_BLOCK_SIZE, 1466 }, 1467 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1468 }, 1469 { 1470 .skcipher = { 1471 .base = { 1472 .cra_name = "cbc(des)", 1473 .cra_driver_name = "cbc-des-caam-qi", 1474 .cra_blocksize = DES_BLOCK_SIZE, 1475 }, 1476 .setkey = skcipher_setkey, 1477 .encrypt = skcipher_encrypt, 1478 .decrypt = skcipher_decrypt, 1479 .min_keysize = DES_KEY_SIZE, 1480 .max_keysize = DES_KEY_SIZE, 1481 .ivsize = DES_BLOCK_SIZE, 1482 }, 1483 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1484 }, 1485 { 1486 .skcipher = { 1487 .base = { 1488 .cra_name = "ctr(aes)", 1489 .cra_driver_name = "ctr-aes-caam-qi", 1490 .cra_blocksize = 1, 1491 }, 1492 .setkey = skcipher_setkey, 1493 .encrypt = skcipher_encrypt, 1494 .decrypt = skcipher_decrypt, 1495 .min_keysize = AES_MIN_KEY_SIZE, 1496 .max_keysize = AES_MAX_KEY_SIZE, 1497 .ivsize = AES_BLOCK_SIZE, 1498 .chunksize = AES_BLOCK_SIZE, 1499 }, 1500 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1501 OP_ALG_AAI_CTR_MOD128, 1502 }, 1503 { 1504 .skcipher = { 1505 .base = { 1506 .cra_name = "rfc3686(ctr(aes))", 1507 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1508 .cra_blocksize = 1, 1509 }, 1510 .setkey = skcipher_setkey, 1511 .encrypt = skcipher_encrypt, 1512 .decrypt = skcipher_decrypt, 1513 .min_keysize = AES_MIN_KEY_SIZE + 1514 CTR_RFC3686_NONCE_SIZE, 1515 .max_keysize = AES_MAX_KEY_SIZE + 1516 CTR_RFC3686_NONCE_SIZE, 1517 .ivsize = CTR_RFC3686_IV_SIZE, 1518 .chunksize = AES_BLOCK_SIZE, 1519 }, 1520 .caam = { 1521 .class1_alg_type = OP_ALG_ALGSEL_AES | 1522 OP_ALG_AAI_CTR_MOD128, 1523 .rfc3686 = true, 1524 }, 1525 }, 1526 { 1527 .skcipher = { 1528 .base = { 1529 .cra_name = "xts(aes)", 1530 .cra_driver_name = "xts-aes-caam-qi", 1531 .cra_blocksize = AES_BLOCK_SIZE, 1532 }, 1533 .setkey = xts_skcipher_setkey, 1534 .encrypt = skcipher_encrypt, 1535 .decrypt = skcipher_decrypt, 1536 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1537 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1538 .ivsize = AES_BLOCK_SIZE, 1539 }, 1540 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1541 }, 1542 }; 1543 1544 static struct caam_aead_alg driver_aeads[] = { 1545 { 1546 .aead = { 1547 .base = { 1548 .cra_name = "rfc4106(gcm(aes))", 1549 .cra_driver_name = "rfc4106-gcm-aes-caam-qi", 1550 .cra_blocksize = 1, 1551 }, 1552 .setkey = rfc4106_setkey, 1553 .setauthsize = rfc4106_setauthsize, 1554 .encrypt = ipsec_gcm_encrypt, 1555 .decrypt = ipsec_gcm_decrypt, 1556 .ivsize = 8, 1557 .maxauthsize = AES_BLOCK_SIZE, 1558 }, 1559 .caam = { 1560 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1561 .nodkp = true, 1562 }, 1563 }, 1564 { 1565 .aead = { 1566 .base = { 1567 .cra_name = "rfc4543(gcm(aes))", 1568 .cra_driver_name = "rfc4543-gcm-aes-caam-qi", 1569 .cra_blocksize = 1, 1570 }, 1571 .setkey = rfc4543_setkey, 1572 .setauthsize = rfc4543_setauthsize, 1573 .encrypt = ipsec_gcm_encrypt, 1574 .decrypt = ipsec_gcm_decrypt, 1575 .ivsize = 8, 1576 .maxauthsize = AES_BLOCK_SIZE, 1577 }, 1578 .caam = { 1579 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1580 .nodkp = true, 1581 }, 1582 }, 1583 /* Galois Counter Mode */ 1584 { 1585 .aead = { 1586 .base = { 1587 .cra_name = "gcm(aes)", 1588 .cra_driver_name = "gcm-aes-caam-qi", 1589 .cra_blocksize = 1, 1590 }, 1591 .setkey = gcm_setkey, 1592 .setauthsize = gcm_setauthsize, 1593 .encrypt = aead_encrypt, 1594 .decrypt = aead_decrypt, 1595 .ivsize = 12, 1596 .maxauthsize = AES_BLOCK_SIZE, 1597 }, 1598 .caam = { 1599 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1600 .nodkp = true, 1601 } 1602 }, 1603 /* single-pass ipsec_esp descriptor */ 1604 { 1605 .aead = { 1606 .base = { 1607 .cra_name = "authenc(hmac(md5),cbc(aes))", 1608 .cra_driver_name = "authenc-hmac-md5-" 1609 "cbc-aes-caam-qi", 1610 .cra_blocksize = AES_BLOCK_SIZE, 1611 }, 1612 .setkey = aead_setkey, 1613 .setauthsize = aead_setauthsize, 1614 .encrypt = aead_encrypt, 1615 .decrypt = aead_decrypt, 1616 .ivsize = AES_BLOCK_SIZE, 1617 .maxauthsize = MD5_DIGEST_SIZE, 1618 }, 1619 .caam = { 1620 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1621 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1622 OP_ALG_AAI_HMAC_PRECOMP, 1623 } 1624 }, 1625 { 1626 .aead = { 1627 .base = { 1628 .cra_name = "echainiv(authenc(hmac(md5)," 1629 "cbc(aes)))", 1630 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1631 "cbc-aes-caam-qi", 1632 .cra_blocksize = AES_BLOCK_SIZE, 1633 }, 1634 .setkey = aead_setkey, 1635 .setauthsize = aead_setauthsize, 1636 .encrypt = aead_encrypt, 1637 .decrypt = aead_decrypt, 1638 .ivsize = AES_BLOCK_SIZE, 1639 .maxauthsize = MD5_DIGEST_SIZE, 1640 }, 1641 .caam = { 1642 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1643 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1644 OP_ALG_AAI_HMAC_PRECOMP, 1645 .geniv = true, 1646 } 1647 }, 1648 { 1649 .aead = { 1650 .base = { 1651 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1652 .cra_driver_name = "authenc-hmac-sha1-" 1653 "cbc-aes-caam-qi", 1654 .cra_blocksize = AES_BLOCK_SIZE, 1655 }, 1656 .setkey = aead_setkey, 1657 .setauthsize = aead_setauthsize, 1658 .encrypt = aead_encrypt, 1659 .decrypt = aead_decrypt, 1660 .ivsize = AES_BLOCK_SIZE, 1661 .maxauthsize = SHA1_DIGEST_SIZE, 1662 }, 1663 .caam = { 1664 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1665 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1666 OP_ALG_AAI_HMAC_PRECOMP, 1667 } 1668 }, 1669 { 1670 .aead = { 1671 .base = { 1672 .cra_name = "echainiv(authenc(hmac(sha1)," 1673 "cbc(aes)))", 1674 .cra_driver_name = "echainiv-authenc-" 1675 "hmac-sha1-cbc-aes-caam-qi", 1676 .cra_blocksize = AES_BLOCK_SIZE, 1677 }, 1678 .setkey = aead_setkey, 1679 .setauthsize = aead_setauthsize, 1680 .encrypt = aead_encrypt, 1681 .decrypt = aead_decrypt, 1682 .ivsize = AES_BLOCK_SIZE, 1683 .maxauthsize = SHA1_DIGEST_SIZE, 1684 }, 1685 .caam = { 1686 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1687 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1688 OP_ALG_AAI_HMAC_PRECOMP, 1689 .geniv = true, 1690 }, 1691 }, 1692 { 1693 .aead = { 1694 .base = { 1695 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1696 .cra_driver_name = "authenc-hmac-sha224-" 1697 "cbc-aes-caam-qi", 1698 .cra_blocksize = AES_BLOCK_SIZE, 1699 }, 1700 .setkey = aead_setkey, 1701 .setauthsize = aead_setauthsize, 1702 .encrypt = aead_encrypt, 1703 .decrypt = aead_decrypt, 1704 .ivsize = AES_BLOCK_SIZE, 1705 .maxauthsize = SHA224_DIGEST_SIZE, 1706 }, 1707 .caam = { 1708 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1709 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1710 OP_ALG_AAI_HMAC_PRECOMP, 1711 } 1712 }, 1713 { 1714 .aead = { 1715 .base = { 1716 .cra_name = "echainiv(authenc(hmac(sha224)," 1717 "cbc(aes)))", 1718 .cra_driver_name = "echainiv-authenc-" 1719 "hmac-sha224-cbc-aes-caam-qi", 1720 .cra_blocksize = AES_BLOCK_SIZE, 1721 }, 1722 .setkey = aead_setkey, 1723 .setauthsize = aead_setauthsize, 1724 .encrypt = aead_encrypt, 1725 .decrypt = aead_decrypt, 1726 .ivsize = AES_BLOCK_SIZE, 1727 .maxauthsize = SHA224_DIGEST_SIZE, 1728 }, 1729 .caam = { 1730 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1731 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1732 OP_ALG_AAI_HMAC_PRECOMP, 1733 .geniv = true, 1734 } 1735 }, 1736 { 1737 .aead = { 1738 .base = { 1739 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1740 .cra_driver_name = "authenc-hmac-sha256-" 1741 "cbc-aes-caam-qi", 1742 .cra_blocksize = AES_BLOCK_SIZE, 1743 }, 1744 .setkey = aead_setkey, 1745 .setauthsize = aead_setauthsize, 1746 .encrypt = aead_encrypt, 1747 .decrypt = aead_decrypt, 1748 .ivsize = AES_BLOCK_SIZE, 1749 .maxauthsize = SHA256_DIGEST_SIZE, 1750 }, 1751 .caam = { 1752 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1753 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1754 OP_ALG_AAI_HMAC_PRECOMP, 1755 } 1756 }, 1757 { 1758 .aead = { 1759 .base = { 1760 .cra_name = "echainiv(authenc(hmac(sha256)," 1761 "cbc(aes)))", 1762 .cra_driver_name = "echainiv-authenc-" 1763 "hmac-sha256-cbc-aes-" 1764 "caam-qi", 1765 .cra_blocksize = AES_BLOCK_SIZE, 1766 }, 1767 .setkey = aead_setkey, 1768 .setauthsize = aead_setauthsize, 1769 .encrypt = aead_encrypt, 1770 .decrypt = aead_decrypt, 1771 .ivsize = AES_BLOCK_SIZE, 1772 .maxauthsize = SHA256_DIGEST_SIZE, 1773 }, 1774 .caam = { 1775 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1776 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1777 OP_ALG_AAI_HMAC_PRECOMP, 1778 .geniv = true, 1779 } 1780 }, 1781 { 1782 .aead = { 1783 .base = { 1784 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1785 .cra_driver_name = "authenc-hmac-sha384-" 1786 "cbc-aes-caam-qi", 1787 .cra_blocksize = AES_BLOCK_SIZE, 1788 }, 1789 .setkey = aead_setkey, 1790 .setauthsize = aead_setauthsize, 1791 .encrypt = aead_encrypt, 1792 .decrypt = aead_decrypt, 1793 .ivsize = AES_BLOCK_SIZE, 1794 .maxauthsize = SHA384_DIGEST_SIZE, 1795 }, 1796 .caam = { 1797 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1798 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1799 OP_ALG_AAI_HMAC_PRECOMP, 1800 } 1801 }, 1802 { 1803 .aead = { 1804 .base = { 1805 .cra_name = "echainiv(authenc(hmac(sha384)," 1806 "cbc(aes)))", 1807 .cra_driver_name = "echainiv-authenc-" 1808 "hmac-sha384-cbc-aes-" 1809 "caam-qi", 1810 .cra_blocksize = AES_BLOCK_SIZE, 1811 }, 1812 .setkey = aead_setkey, 1813 .setauthsize = aead_setauthsize, 1814 .encrypt = aead_encrypt, 1815 .decrypt = aead_decrypt, 1816 .ivsize = AES_BLOCK_SIZE, 1817 .maxauthsize = SHA384_DIGEST_SIZE, 1818 }, 1819 .caam = { 1820 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1821 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1822 OP_ALG_AAI_HMAC_PRECOMP, 1823 .geniv = true, 1824 } 1825 }, 1826 { 1827 .aead = { 1828 .base = { 1829 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1830 .cra_driver_name = "authenc-hmac-sha512-" 1831 "cbc-aes-caam-qi", 1832 .cra_blocksize = AES_BLOCK_SIZE, 1833 }, 1834 .setkey = aead_setkey, 1835 .setauthsize = aead_setauthsize, 1836 .encrypt = aead_encrypt, 1837 .decrypt = aead_decrypt, 1838 .ivsize = AES_BLOCK_SIZE, 1839 .maxauthsize = SHA512_DIGEST_SIZE, 1840 }, 1841 .caam = { 1842 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1843 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1844 OP_ALG_AAI_HMAC_PRECOMP, 1845 } 1846 }, 1847 { 1848 .aead = { 1849 .base = { 1850 .cra_name = "echainiv(authenc(hmac(sha512)," 1851 "cbc(aes)))", 1852 .cra_driver_name = "echainiv-authenc-" 1853 "hmac-sha512-cbc-aes-" 1854 "caam-qi", 1855 .cra_blocksize = AES_BLOCK_SIZE, 1856 }, 1857 .setkey = aead_setkey, 1858 .setauthsize = aead_setauthsize, 1859 .encrypt = aead_encrypt, 1860 .decrypt = aead_decrypt, 1861 .ivsize = AES_BLOCK_SIZE, 1862 .maxauthsize = SHA512_DIGEST_SIZE, 1863 }, 1864 .caam = { 1865 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1866 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1867 OP_ALG_AAI_HMAC_PRECOMP, 1868 .geniv = true, 1869 } 1870 }, 1871 { 1872 .aead = { 1873 .base = { 1874 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1875 .cra_driver_name = "authenc-hmac-md5-" 1876 "cbc-des3_ede-caam-qi", 1877 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1878 }, 1879 .setkey = des3_aead_setkey, 1880 .setauthsize = aead_setauthsize, 1881 .encrypt = aead_encrypt, 1882 .decrypt = aead_decrypt, 1883 .ivsize = DES3_EDE_BLOCK_SIZE, 1884 .maxauthsize = MD5_DIGEST_SIZE, 1885 }, 1886 .caam = { 1887 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1888 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1889 OP_ALG_AAI_HMAC_PRECOMP, 1890 } 1891 }, 1892 { 1893 .aead = { 1894 .base = { 1895 .cra_name = "echainiv(authenc(hmac(md5)," 1896 "cbc(des3_ede)))", 1897 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1898 "cbc-des3_ede-caam-qi", 1899 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1900 }, 1901 .setkey = des3_aead_setkey, 1902 .setauthsize = aead_setauthsize, 1903 .encrypt = aead_encrypt, 1904 .decrypt = aead_decrypt, 1905 .ivsize = DES3_EDE_BLOCK_SIZE, 1906 .maxauthsize = MD5_DIGEST_SIZE, 1907 }, 1908 .caam = { 1909 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1910 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1911 OP_ALG_AAI_HMAC_PRECOMP, 1912 .geniv = true, 1913 } 1914 }, 1915 { 1916 .aead = { 1917 .base = { 1918 .cra_name = "authenc(hmac(sha1)," 1919 "cbc(des3_ede))", 1920 .cra_driver_name = "authenc-hmac-sha1-" 1921 "cbc-des3_ede-caam-qi", 1922 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1923 }, 1924 .setkey = des3_aead_setkey, 1925 .setauthsize = aead_setauthsize, 1926 .encrypt = aead_encrypt, 1927 .decrypt = aead_decrypt, 1928 .ivsize = DES3_EDE_BLOCK_SIZE, 1929 .maxauthsize = SHA1_DIGEST_SIZE, 1930 }, 1931 .caam = { 1932 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1933 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1934 OP_ALG_AAI_HMAC_PRECOMP, 1935 }, 1936 }, 1937 { 1938 .aead = { 1939 .base = { 1940 .cra_name = "echainiv(authenc(hmac(sha1)," 1941 "cbc(des3_ede)))", 1942 .cra_driver_name = "echainiv-authenc-" 1943 "hmac-sha1-" 1944 "cbc-des3_ede-caam-qi", 1945 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1946 }, 1947 .setkey = des3_aead_setkey, 1948 .setauthsize = aead_setauthsize, 1949 .encrypt = aead_encrypt, 1950 .decrypt = aead_decrypt, 1951 .ivsize = DES3_EDE_BLOCK_SIZE, 1952 .maxauthsize = SHA1_DIGEST_SIZE, 1953 }, 1954 .caam = { 1955 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1956 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1957 OP_ALG_AAI_HMAC_PRECOMP, 1958 .geniv = true, 1959 } 1960 }, 1961 { 1962 .aead = { 1963 .base = { 1964 .cra_name = "authenc(hmac(sha224)," 1965 "cbc(des3_ede))", 1966 .cra_driver_name = "authenc-hmac-sha224-" 1967 "cbc-des3_ede-caam-qi", 1968 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1969 }, 1970 .setkey = des3_aead_setkey, 1971 .setauthsize = aead_setauthsize, 1972 .encrypt = aead_encrypt, 1973 .decrypt = aead_decrypt, 1974 .ivsize = DES3_EDE_BLOCK_SIZE, 1975 .maxauthsize = SHA224_DIGEST_SIZE, 1976 }, 1977 .caam = { 1978 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1979 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1980 OP_ALG_AAI_HMAC_PRECOMP, 1981 }, 1982 }, 1983 { 1984 .aead = { 1985 .base = { 1986 .cra_name = "echainiv(authenc(hmac(sha224)," 1987 "cbc(des3_ede)))", 1988 .cra_driver_name = "echainiv-authenc-" 1989 "hmac-sha224-" 1990 "cbc-des3_ede-caam-qi", 1991 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1992 }, 1993 .setkey = des3_aead_setkey, 1994 .setauthsize = aead_setauthsize, 1995 .encrypt = aead_encrypt, 1996 .decrypt = aead_decrypt, 1997 .ivsize = DES3_EDE_BLOCK_SIZE, 1998 .maxauthsize = SHA224_DIGEST_SIZE, 1999 }, 2000 .caam = { 2001 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2002 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2003 OP_ALG_AAI_HMAC_PRECOMP, 2004 .geniv = true, 2005 } 2006 }, 2007 { 2008 .aead = { 2009 .base = { 2010 .cra_name = "authenc(hmac(sha256)," 2011 "cbc(des3_ede))", 2012 .cra_driver_name = "authenc-hmac-sha256-" 2013 "cbc-des3_ede-caam-qi", 2014 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2015 }, 2016 .setkey = des3_aead_setkey, 2017 .setauthsize = aead_setauthsize, 2018 .encrypt = aead_encrypt, 2019 .decrypt = aead_decrypt, 2020 .ivsize = DES3_EDE_BLOCK_SIZE, 2021 .maxauthsize = SHA256_DIGEST_SIZE, 2022 }, 2023 .caam = { 2024 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2025 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2026 OP_ALG_AAI_HMAC_PRECOMP, 2027 }, 2028 }, 2029 { 2030 .aead = { 2031 .base = { 2032 .cra_name = "echainiv(authenc(hmac(sha256)," 2033 "cbc(des3_ede)))", 2034 .cra_driver_name = "echainiv-authenc-" 2035 "hmac-sha256-" 2036 "cbc-des3_ede-caam-qi", 2037 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2038 }, 2039 .setkey = des3_aead_setkey, 2040 .setauthsize = aead_setauthsize, 2041 .encrypt = aead_encrypt, 2042 .decrypt = aead_decrypt, 2043 .ivsize = DES3_EDE_BLOCK_SIZE, 2044 .maxauthsize = SHA256_DIGEST_SIZE, 2045 }, 2046 .caam = { 2047 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2048 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2049 OP_ALG_AAI_HMAC_PRECOMP, 2050 .geniv = true, 2051 } 2052 }, 2053 { 2054 .aead = { 2055 .base = { 2056 .cra_name = "authenc(hmac(sha384)," 2057 "cbc(des3_ede))", 2058 .cra_driver_name = "authenc-hmac-sha384-" 2059 "cbc-des3_ede-caam-qi", 2060 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2061 }, 2062 .setkey = des3_aead_setkey, 2063 .setauthsize = aead_setauthsize, 2064 .encrypt = aead_encrypt, 2065 .decrypt = aead_decrypt, 2066 .ivsize = DES3_EDE_BLOCK_SIZE, 2067 .maxauthsize = SHA384_DIGEST_SIZE, 2068 }, 2069 .caam = { 2070 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2071 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2072 OP_ALG_AAI_HMAC_PRECOMP, 2073 }, 2074 }, 2075 { 2076 .aead = { 2077 .base = { 2078 .cra_name = "echainiv(authenc(hmac(sha384)," 2079 "cbc(des3_ede)))", 2080 .cra_driver_name = "echainiv-authenc-" 2081 "hmac-sha384-" 2082 "cbc-des3_ede-caam-qi", 2083 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2084 }, 2085 .setkey = des3_aead_setkey, 2086 .setauthsize = aead_setauthsize, 2087 .encrypt = aead_encrypt, 2088 .decrypt = aead_decrypt, 2089 .ivsize = DES3_EDE_BLOCK_SIZE, 2090 .maxauthsize = SHA384_DIGEST_SIZE, 2091 }, 2092 .caam = { 2093 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2094 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2095 OP_ALG_AAI_HMAC_PRECOMP, 2096 .geniv = true, 2097 } 2098 }, 2099 { 2100 .aead = { 2101 .base = { 2102 .cra_name = "authenc(hmac(sha512)," 2103 "cbc(des3_ede))", 2104 .cra_driver_name = "authenc-hmac-sha512-" 2105 "cbc-des3_ede-caam-qi", 2106 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2107 }, 2108 .setkey = des3_aead_setkey, 2109 .setauthsize = aead_setauthsize, 2110 .encrypt = aead_encrypt, 2111 .decrypt = aead_decrypt, 2112 .ivsize = DES3_EDE_BLOCK_SIZE, 2113 .maxauthsize = SHA512_DIGEST_SIZE, 2114 }, 2115 .caam = { 2116 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2117 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2118 OP_ALG_AAI_HMAC_PRECOMP, 2119 }, 2120 }, 2121 { 2122 .aead = { 2123 .base = { 2124 .cra_name = "echainiv(authenc(hmac(sha512)," 2125 "cbc(des3_ede)))", 2126 .cra_driver_name = "echainiv-authenc-" 2127 "hmac-sha512-" 2128 "cbc-des3_ede-caam-qi", 2129 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2130 }, 2131 .setkey = des3_aead_setkey, 2132 .setauthsize = aead_setauthsize, 2133 .encrypt = aead_encrypt, 2134 .decrypt = aead_decrypt, 2135 .ivsize = DES3_EDE_BLOCK_SIZE, 2136 .maxauthsize = SHA512_DIGEST_SIZE, 2137 }, 2138 .caam = { 2139 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2140 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2141 OP_ALG_AAI_HMAC_PRECOMP, 2142 .geniv = true, 2143 } 2144 }, 2145 { 2146 .aead = { 2147 .base = { 2148 .cra_name = "authenc(hmac(md5),cbc(des))", 2149 .cra_driver_name = "authenc-hmac-md5-" 2150 "cbc-des-caam-qi", 2151 .cra_blocksize = DES_BLOCK_SIZE, 2152 }, 2153 .setkey = aead_setkey, 2154 .setauthsize = aead_setauthsize, 2155 .encrypt = aead_encrypt, 2156 .decrypt = aead_decrypt, 2157 .ivsize = DES_BLOCK_SIZE, 2158 .maxauthsize = MD5_DIGEST_SIZE, 2159 }, 2160 .caam = { 2161 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2162 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2163 OP_ALG_AAI_HMAC_PRECOMP, 2164 }, 2165 }, 2166 { 2167 .aead = { 2168 .base = { 2169 .cra_name = "echainiv(authenc(hmac(md5)," 2170 "cbc(des)))", 2171 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2172 "cbc-des-caam-qi", 2173 .cra_blocksize = DES_BLOCK_SIZE, 2174 }, 2175 .setkey = aead_setkey, 2176 .setauthsize = aead_setauthsize, 2177 .encrypt = aead_encrypt, 2178 .decrypt = aead_decrypt, 2179 .ivsize = DES_BLOCK_SIZE, 2180 .maxauthsize = MD5_DIGEST_SIZE, 2181 }, 2182 .caam = { 2183 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2184 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2185 OP_ALG_AAI_HMAC_PRECOMP, 2186 .geniv = true, 2187 } 2188 }, 2189 { 2190 .aead = { 2191 .base = { 2192 .cra_name = "authenc(hmac(sha1),cbc(des))", 2193 .cra_driver_name = "authenc-hmac-sha1-" 2194 "cbc-des-caam-qi", 2195 .cra_blocksize = DES_BLOCK_SIZE, 2196 }, 2197 .setkey = aead_setkey, 2198 .setauthsize = aead_setauthsize, 2199 .encrypt = aead_encrypt, 2200 .decrypt = aead_decrypt, 2201 .ivsize = DES_BLOCK_SIZE, 2202 .maxauthsize = SHA1_DIGEST_SIZE, 2203 }, 2204 .caam = { 2205 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2206 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2207 OP_ALG_AAI_HMAC_PRECOMP, 2208 }, 2209 }, 2210 { 2211 .aead = { 2212 .base = { 2213 .cra_name = "echainiv(authenc(hmac(sha1)," 2214 "cbc(des)))", 2215 .cra_driver_name = "echainiv-authenc-" 2216 "hmac-sha1-cbc-des-caam-qi", 2217 .cra_blocksize = DES_BLOCK_SIZE, 2218 }, 2219 .setkey = aead_setkey, 2220 .setauthsize = aead_setauthsize, 2221 .encrypt = aead_encrypt, 2222 .decrypt = aead_decrypt, 2223 .ivsize = DES_BLOCK_SIZE, 2224 .maxauthsize = SHA1_DIGEST_SIZE, 2225 }, 2226 .caam = { 2227 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2228 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2229 OP_ALG_AAI_HMAC_PRECOMP, 2230 .geniv = true, 2231 } 2232 }, 2233 { 2234 .aead = { 2235 .base = { 2236 .cra_name = "authenc(hmac(sha224),cbc(des))", 2237 .cra_driver_name = "authenc-hmac-sha224-" 2238 "cbc-des-caam-qi", 2239 .cra_blocksize = DES_BLOCK_SIZE, 2240 }, 2241 .setkey = aead_setkey, 2242 .setauthsize = aead_setauthsize, 2243 .encrypt = aead_encrypt, 2244 .decrypt = aead_decrypt, 2245 .ivsize = DES_BLOCK_SIZE, 2246 .maxauthsize = SHA224_DIGEST_SIZE, 2247 }, 2248 .caam = { 2249 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2250 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2251 OP_ALG_AAI_HMAC_PRECOMP, 2252 }, 2253 }, 2254 { 2255 .aead = { 2256 .base = { 2257 .cra_name = "echainiv(authenc(hmac(sha224)," 2258 "cbc(des)))", 2259 .cra_driver_name = "echainiv-authenc-" 2260 "hmac-sha224-cbc-des-" 2261 "caam-qi", 2262 .cra_blocksize = DES_BLOCK_SIZE, 2263 }, 2264 .setkey = aead_setkey, 2265 .setauthsize = aead_setauthsize, 2266 .encrypt = aead_encrypt, 2267 .decrypt = aead_decrypt, 2268 .ivsize = DES_BLOCK_SIZE, 2269 .maxauthsize = SHA224_DIGEST_SIZE, 2270 }, 2271 .caam = { 2272 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2273 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2274 OP_ALG_AAI_HMAC_PRECOMP, 2275 .geniv = true, 2276 } 2277 }, 2278 { 2279 .aead = { 2280 .base = { 2281 .cra_name = "authenc(hmac(sha256),cbc(des))", 2282 .cra_driver_name = "authenc-hmac-sha256-" 2283 "cbc-des-caam-qi", 2284 .cra_blocksize = DES_BLOCK_SIZE, 2285 }, 2286 .setkey = aead_setkey, 2287 .setauthsize = aead_setauthsize, 2288 .encrypt = aead_encrypt, 2289 .decrypt = aead_decrypt, 2290 .ivsize = DES_BLOCK_SIZE, 2291 .maxauthsize = SHA256_DIGEST_SIZE, 2292 }, 2293 .caam = { 2294 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2295 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2296 OP_ALG_AAI_HMAC_PRECOMP, 2297 }, 2298 }, 2299 { 2300 .aead = { 2301 .base = { 2302 .cra_name = "echainiv(authenc(hmac(sha256)," 2303 "cbc(des)))", 2304 .cra_driver_name = "echainiv-authenc-" 2305 "hmac-sha256-cbc-des-" 2306 "caam-qi", 2307 .cra_blocksize = DES_BLOCK_SIZE, 2308 }, 2309 .setkey = aead_setkey, 2310 .setauthsize = aead_setauthsize, 2311 .encrypt = aead_encrypt, 2312 .decrypt = aead_decrypt, 2313 .ivsize = DES_BLOCK_SIZE, 2314 .maxauthsize = SHA256_DIGEST_SIZE, 2315 }, 2316 .caam = { 2317 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2318 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2319 OP_ALG_AAI_HMAC_PRECOMP, 2320 .geniv = true, 2321 }, 2322 }, 2323 { 2324 .aead = { 2325 .base = { 2326 .cra_name = "authenc(hmac(sha384),cbc(des))", 2327 .cra_driver_name = "authenc-hmac-sha384-" 2328 "cbc-des-caam-qi", 2329 .cra_blocksize = DES_BLOCK_SIZE, 2330 }, 2331 .setkey = aead_setkey, 2332 .setauthsize = aead_setauthsize, 2333 .encrypt = aead_encrypt, 2334 .decrypt = aead_decrypt, 2335 .ivsize = DES_BLOCK_SIZE, 2336 .maxauthsize = SHA384_DIGEST_SIZE, 2337 }, 2338 .caam = { 2339 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2340 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2341 OP_ALG_AAI_HMAC_PRECOMP, 2342 }, 2343 }, 2344 { 2345 .aead = { 2346 .base = { 2347 .cra_name = "echainiv(authenc(hmac(sha384)," 2348 "cbc(des)))", 2349 .cra_driver_name = "echainiv-authenc-" 2350 "hmac-sha384-cbc-des-" 2351 "caam-qi", 2352 .cra_blocksize = DES_BLOCK_SIZE, 2353 }, 2354 .setkey = aead_setkey, 2355 .setauthsize = aead_setauthsize, 2356 .encrypt = aead_encrypt, 2357 .decrypt = aead_decrypt, 2358 .ivsize = DES_BLOCK_SIZE, 2359 .maxauthsize = SHA384_DIGEST_SIZE, 2360 }, 2361 .caam = { 2362 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2363 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2364 OP_ALG_AAI_HMAC_PRECOMP, 2365 .geniv = true, 2366 } 2367 }, 2368 { 2369 .aead = { 2370 .base = { 2371 .cra_name = "authenc(hmac(sha512),cbc(des))", 2372 .cra_driver_name = "authenc-hmac-sha512-" 2373 "cbc-des-caam-qi", 2374 .cra_blocksize = DES_BLOCK_SIZE, 2375 }, 2376 .setkey = aead_setkey, 2377 .setauthsize = aead_setauthsize, 2378 .encrypt = aead_encrypt, 2379 .decrypt = aead_decrypt, 2380 .ivsize = DES_BLOCK_SIZE, 2381 .maxauthsize = SHA512_DIGEST_SIZE, 2382 }, 2383 .caam = { 2384 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2385 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2386 OP_ALG_AAI_HMAC_PRECOMP, 2387 } 2388 }, 2389 { 2390 .aead = { 2391 .base = { 2392 .cra_name = "echainiv(authenc(hmac(sha512)," 2393 "cbc(des)))", 2394 .cra_driver_name = "echainiv-authenc-" 2395 "hmac-sha512-cbc-des-" 2396 "caam-qi", 2397 .cra_blocksize = DES_BLOCK_SIZE, 2398 }, 2399 .setkey = aead_setkey, 2400 .setauthsize = aead_setauthsize, 2401 .encrypt = aead_encrypt, 2402 .decrypt = aead_decrypt, 2403 .ivsize = DES_BLOCK_SIZE, 2404 .maxauthsize = SHA512_DIGEST_SIZE, 2405 }, 2406 .caam = { 2407 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2408 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2409 OP_ALG_AAI_HMAC_PRECOMP, 2410 .geniv = true, 2411 } 2412 }, 2413 }; 2414 2415 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2416 bool uses_dkp) 2417 { 2418 struct caam_drv_private *priv; 2419 struct device *dev; 2420 2421 /* 2422 * distribute tfms across job rings to ensure in-order 2423 * crypto request processing per tfm 2424 */ 2425 ctx->jrdev = caam_jr_alloc(); 2426 if (IS_ERR(ctx->jrdev)) { 2427 pr_err("Job Ring Device allocation for transform failed\n"); 2428 return PTR_ERR(ctx->jrdev); 2429 } 2430 2431 dev = ctx->jrdev->parent; 2432 priv = dev_get_drvdata(dev); 2433 if (priv->era >= 6 && uses_dkp) 2434 ctx->dir = DMA_BIDIRECTIONAL; 2435 else 2436 ctx->dir = DMA_TO_DEVICE; 2437 2438 ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), 2439 ctx->dir); 2440 if (dma_mapping_error(dev, ctx->key_dma)) { 2441 dev_err(dev, "unable to map key\n"); 2442 caam_jr_free(ctx->jrdev); 2443 return -ENOMEM; 2444 } 2445 2446 /* copy descriptor header template value */ 2447 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2448 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2449 2450 ctx->qidev = dev; 2451 2452 spin_lock_init(&ctx->lock); 2453 ctx->drv_ctx[ENCRYPT] = NULL; 2454 ctx->drv_ctx[DECRYPT] = NULL; 2455 2456 return 0; 2457 } 2458 2459 static int caam_cra_init(struct crypto_skcipher *tfm) 2460 { 2461 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 2462 struct caam_skcipher_alg *caam_alg = 2463 container_of(alg, typeof(*caam_alg), skcipher); 2464 2465 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, 2466 false); 2467 } 2468 2469 static int caam_aead_init(struct crypto_aead *tfm) 2470 { 2471 struct aead_alg *alg = crypto_aead_alg(tfm); 2472 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2473 aead); 2474 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2475 2476 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); 2477 } 2478 2479 static void caam_exit_common(struct caam_ctx *ctx) 2480 { 2481 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2482 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2483 2484 dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), 2485 ctx->dir); 2486 2487 caam_jr_free(ctx->jrdev); 2488 } 2489 2490 static void caam_cra_exit(struct crypto_skcipher *tfm) 2491 { 2492 caam_exit_common(crypto_skcipher_ctx(tfm)); 2493 } 2494 2495 static void caam_aead_exit(struct crypto_aead *tfm) 2496 { 2497 caam_exit_common(crypto_aead_ctx(tfm)); 2498 } 2499 2500 void caam_qi_algapi_exit(void) 2501 { 2502 int i; 2503 2504 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2505 struct caam_aead_alg *t_alg = driver_aeads + i; 2506 2507 if (t_alg->registered) 2508 crypto_unregister_aead(&t_alg->aead); 2509 } 2510 2511 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2512 struct caam_skcipher_alg *t_alg = driver_algs + i; 2513 2514 if (t_alg->registered) 2515 crypto_unregister_skcipher(&t_alg->skcipher); 2516 } 2517 } 2518 2519 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) 2520 { 2521 struct skcipher_alg *alg = &t_alg->skcipher; 2522 2523 alg->base.cra_module = THIS_MODULE; 2524 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2525 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2526 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2527 2528 alg->init = caam_cra_init; 2529 alg->exit = caam_cra_exit; 2530 } 2531 2532 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2533 { 2534 struct aead_alg *alg = &t_alg->aead; 2535 2536 alg->base.cra_module = THIS_MODULE; 2537 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2538 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2539 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2540 2541 alg->init = caam_aead_init; 2542 alg->exit = caam_aead_exit; 2543 } 2544 2545 int caam_qi_algapi_init(struct device *ctrldev) 2546 { 2547 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 2548 int i = 0, err = 0; 2549 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2550 unsigned int md_limit = SHA512_DIGEST_SIZE; 2551 bool registered = false; 2552 2553 if (caam_dpaa2) { 2554 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); 2555 return -ENODEV; 2556 } 2557 2558 /* 2559 * Register crypto algorithms the device supports. 2560 * First, detect presence and attributes of DES, AES, and MD blocks. 2561 */ 2562 if (priv->era < 10) { 2563 u32 cha_vid, cha_inst; 2564 2565 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2566 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 2567 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2568 2569 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2570 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> 2571 CHA_ID_LS_DES_SHIFT; 2572 aes_inst = cha_inst & CHA_ID_LS_AES_MASK; 2573 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2574 } else { 2575 u32 aesa, mdha; 2576 2577 aesa = rd_reg32(&priv->ctrl->vreg.aesa); 2578 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2579 2580 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2581 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2582 2583 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; 2584 aes_inst = aesa & CHA_VER_NUM_MASK; 2585 md_inst = mdha & CHA_VER_NUM_MASK; 2586 } 2587 2588 /* If MD is present, limit digest size based on LP256 */ 2589 if (md_inst && md_vid == CHA_VER_VID_MD_LP256) 2590 md_limit = SHA256_DIGEST_SIZE; 2591 2592 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2593 struct caam_skcipher_alg *t_alg = driver_algs + i; 2594 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; 2595 2596 /* Skip DES algorithms if not supported by device */ 2597 if (!des_inst && 2598 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2599 (alg_sel == OP_ALG_ALGSEL_DES))) 2600 continue; 2601 2602 /* Skip AES algorithms if not supported by device */ 2603 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2604 continue; 2605 2606 caam_skcipher_alg_init(t_alg); 2607 2608 err = crypto_register_skcipher(&t_alg->skcipher); 2609 if (err) { 2610 dev_warn(ctrldev, "%s alg registration failed\n", 2611 t_alg->skcipher.base.cra_driver_name); 2612 continue; 2613 } 2614 2615 t_alg->registered = true; 2616 registered = true; 2617 } 2618 2619 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2620 struct caam_aead_alg *t_alg = driver_aeads + i; 2621 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2622 OP_ALG_ALGSEL_MASK; 2623 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2624 OP_ALG_ALGSEL_MASK; 2625 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2626 2627 /* Skip DES algorithms if not supported by device */ 2628 if (!des_inst && 2629 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2630 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2631 continue; 2632 2633 /* Skip AES algorithms if not supported by device */ 2634 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2635 continue; 2636 2637 /* 2638 * Check support for AES algorithms not available 2639 * on LP devices. 2640 */ 2641 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) 2642 continue; 2643 2644 /* 2645 * Skip algorithms requiring message digests 2646 * if MD or MD size is not supported by device. 2647 */ 2648 if (c2_alg_sel && 2649 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2650 continue; 2651 2652 caam_aead_alg_init(t_alg); 2653 2654 err = crypto_register_aead(&t_alg->aead); 2655 if (err) { 2656 pr_warn("%s alg registration failed\n", 2657 t_alg->aead.base.cra_driver_name); 2658 continue; 2659 } 2660 2661 t_alg->registered = true; 2662 registered = true; 2663 } 2664 2665 if (registered) 2666 dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); 2667 2668 return err; 2669 } 2670