1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale FSL CAAM support for crypto API over QI backend. 4 * Based on caamalg.c 5 * 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 * Copyright 2016-2019 NXP 8 */ 9 10 #include "compat.h" 11 #include "ctrl.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "desc_constr.h" 15 #include "error.h" 16 #include "sg_sw_qm.h" 17 #include "key_gen.h" 18 #include "qi.h" 19 #include "jr.h" 20 #include "caamalg_desc.h" 21 22 /* 23 * crypto alg 24 */ 25 #define CAAM_CRA_PRIORITY 2000 26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 28 SHA512_DIGEST_SIZE * 2) 29 30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 31 CAAM_MAX_KEY_SIZE) 32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 33 34 struct caam_alg_entry { 35 int class1_alg_type; 36 int class2_alg_type; 37 bool rfc3686; 38 bool geniv; 39 bool nodkp; 40 }; 41 42 struct caam_aead_alg { 43 struct aead_alg aead; 44 struct caam_alg_entry caam; 45 bool registered; 46 }; 47 48 struct caam_skcipher_alg { 49 struct skcipher_alg skcipher; 50 struct caam_alg_entry caam; 51 bool registered; 52 }; 53 54 /* 55 * per-session context 56 */ 57 struct caam_ctx { 58 struct device *jrdev; 59 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 60 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 61 u8 key[CAAM_MAX_KEY_SIZE]; 62 dma_addr_t key_dma; 63 enum dma_data_direction dir; 64 struct alginfo adata; 65 struct alginfo cdata; 66 unsigned int authsize; 67 struct device *qidev; 68 spinlock_t lock; /* Protects multiple init of driver context */ 69 struct caam_drv_ctx *drv_ctx[NUM_OP]; 70 }; 71 72 static int aead_set_sh_desc(struct crypto_aead *aead) 73 { 74 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 75 typeof(*alg), aead); 76 struct caam_ctx *ctx = crypto_aead_ctx(aead); 77 unsigned int ivsize = crypto_aead_ivsize(aead); 78 u32 ctx1_iv_off = 0; 79 u32 *nonce = NULL; 80 unsigned int data_len[2]; 81 u32 inl_mask; 82 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 83 OP_ALG_AAI_CTR_MOD128); 84 const bool is_rfc3686 = alg->caam.rfc3686; 85 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 86 87 if (!ctx->cdata.keylen || !ctx->authsize) 88 return 0; 89 90 /* 91 * AES-CTR needs to load IV in CONTEXT1 reg 92 * at an offset of 128bits (16bytes) 93 * CONTEXT1[255:128] = IV 94 */ 95 if (ctr_mode) 96 ctx1_iv_off = 16; 97 98 /* 99 * RFC3686 specific: 100 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 101 */ 102 if (is_rfc3686) { 103 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 104 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 106 } 107 108 data_len[0] = ctx->adata.keylen_pad; 109 data_len[1] = ctx->cdata.keylen; 110 111 if (alg->caam.geniv) 112 goto skip_enc; 113 114 /* aead_encrypt shared descriptor */ 115 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 116 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 117 DESC_JOB_IO_LEN, data_len, &inl_mask, 118 ARRAY_SIZE(data_len)) < 0) 119 return -EINVAL; 120 121 if (inl_mask & 1) 122 ctx->adata.key_virt = ctx->key; 123 else 124 ctx->adata.key_dma = ctx->key_dma; 125 126 if (inl_mask & 2) 127 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 128 else 129 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 130 131 ctx->adata.key_inline = !!(inl_mask & 1); 132 ctx->cdata.key_inline = !!(inl_mask & 2); 133 134 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 135 ivsize, ctx->authsize, is_rfc3686, nonce, 136 ctx1_iv_off, true, ctrlpriv->era); 137 138 skip_enc: 139 /* aead_decrypt shared descriptor */ 140 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 141 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 142 DESC_JOB_IO_LEN, data_len, &inl_mask, 143 ARRAY_SIZE(data_len)) < 0) 144 return -EINVAL; 145 146 if (inl_mask & 1) 147 ctx->adata.key_virt = ctx->key; 148 else 149 ctx->adata.key_dma = ctx->key_dma; 150 151 if (inl_mask & 2) 152 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 153 else 154 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 155 156 ctx->adata.key_inline = !!(inl_mask & 1); 157 ctx->cdata.key_inline = !!(inl_mask & 2); 158 159 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 160 ivsize, ctx->authsize, alg->caam.geniv, 161 is_rfc3686, nonce, ctx1_iv_off, true, 162 ctrlpriv->era); 163 164 if (!alg->caam.geniv) 165 goto skip_givenc; 166 167 /* aead_givencrypt shared descriptor */ 168 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 169 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 170 DESC_JOB_IO_LEN, data_len, &inl_mask, 171 ARRAY_SIZE(data_len)) < 0) 172 return -EINVAL; 173 174 if (inl_mask & 1) 175 ctx->adata.key_virt = ctx->key; 176 else 177 ctx->adata.key_dma = ctx->key_dma; 178 179 if (inl_mask & 2) 180 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 181 else 182 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 183 184 ctx->adata.key_inline = !!(inl_mask & 1); 185 ctx->cdata.key_inline = !!(inl_mask & 2); 186 187 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 188 ivsize, ctx->authsize, is_rfc3686, nonce, 189 ctx1_iv_off, true, ctrlpriv->era); 190 191 skip_givenc: 192 return 0; 193 } 194 195 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 196 { 197 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 198 199 ctx->authsize = authsize; 200 aead_set_sh_desc(authenc); 201 202 return 0; 203 } 204 205 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 206 unsigned int keylen) 207 { 208 struct caam_ctx *ctx = crypto_aead_ctx(aead); 209 struct device *jrdev = ctx->jrdev; 210 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 211 struct crypto_authenc_keys keys; 212 int ret = 0; 213 214 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 215 goto badkey; 216 217 dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 218 keys.authkeylen + keys.enckeylen, keys.enckeylen, 219 keys.authkeylen); 220 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 221 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 222 223 /* 224 * If DKP is supported, use it in the shared descriptor to generate 225 * the split key. 226 */ 227 if (ctrlpriv->era >= 6) { 228 ctx->adata.keylen = keys.authkeylen; 229 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 230 OP_ALG_ALGSEL_MASK); 231 232 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 233 goto badkey; 234 235 memcpy(ctx->key, keys.authkey, keys.authkeylen); 236 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 237 keys.enckeylen); 238 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 239 ctx->adata.keylen_pad + 240 keys.enckeylen, ctx->dir); 241 goto skip_split_key; 242 } 243 244 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 245 keys.authkeylen, CAAM_MAX_KEY_SIZE - 246 keys.enckeylen); 247 if (ret) 248 goto badkey; 249 250 /* postpend encryption key to auth split key */ 251 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 252 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 253 ctx->adata.keylen_pad + keys.enckeylen, 254 ctx->dir); 255 #ifdef DEBUG 256 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 257 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 258 ctx->adata.keylen_pad + keys.enckeylen, 1); 259 #endif 260 261 skip_split_key: 262 ctx->cdata.keylen = keys.enckeylen; 263 264 ret = aead_set_sh_desc(aead); 265 if (ret) 266 goto badkey; 267 268 /* Now update the driver contexts with the new shared descriptor */ 269 if (ctx->drv_ctx[ENCRYPT]) { 270 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 271 ctx->sh_desc_enc); 272 if (ret) { 273 dev_err(jrdev, "driver enc context update failed\n"); 274 goto badkey; 275 } 276 } 277 278 if (ctx->drv_ctx[DECRYPT]) { 279 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 280 ctx->sh_desc_dec); 281 if (ret) { 282 dev_err(jrdev, "driver dec context update failed\n"); 283 goto badkey; 284 } 285 } 286 287 memzero_explicit(&keys, sizeof(keys)); 288 return ret; 289 badkey: 290 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 291 memzero_explicit(&keys, sizeof(keys)); 292 return -EINVAL; 293 } 294 295 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 296 unsigned int keylen) 297 { 298 struct crypto_authenc_keys keys; 299 u32 flags; 300 int err; 301 302 err = crypto_authenc_extractkeys(&keys, key, keylen); 303 if (unlikely(err)) 304 goto badkey; 305 306 err = -EINVAL; 307 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 308 goto badkey; 309 310 flags = crypto_aead_get_flags(aead); 311 err = __des3_verify_key(&flags, keys.enckey); 312 if (unlikely(err)) { 313 crypto_aead_set_flags(aead, flags); 314 goto out; 315 } 316 317 err = aead_setkey(aead, key, keylen); 318 319 out: 320 memzero_explicit(&keys, sizeof(keys)); 321 return err; 322 323 badkey: 324 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 325 goto out; 326 } 327 328 static int gcm_set_sh_desc(struct crypto_aead *aead) 329 { 330 struct caam_ctx *ctx = crypto_aead_ctx(aead); 331 unsigned int ivsize = crypto_aead_ivsize(aead); 332 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 333 ctx->cdata.keylen; 334 335 if (!ctx->cdata.keylen || !ctx->authsize) 336 return 0; 337 338 /* 339 * Job Descriptor and Shared Descriptor 340 * must fit into the 64-word Descriptor h/w Buffer 341 */ 342 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { 343 ctx->cdata.key_inline = true; 344 ctx->cdata.key_virt = ctx->key; 345 } else { 346 ctx->cdata.key_inline = false; 347 ctx->cdata.key_dma = ctx->key_dma; 348 } 349 350 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 351 ctx->authsize, true); 352 353 /* 354 * Job Descriptor and Shared Descriptor 355 * must fit into the 64-word Descriptor h/w Buffer 356 */ 357 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { 358 ctx->cdata.key_inline = true; 359 ctx->cdata.key_virt = ctx->key; 360 } else { 361 ctx->cdata.key_inline = false; 362 ctx->cdata.key_dma = ctx->key_dma; 363 } 364 365 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 366 ctx->authsize, true); 367 368 return 0; 369 } 370 371 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 372 { 373 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 374 375 ctx->authsize = authsize; 376 gcm_set_sh_desc(authenc); 377 378 return 0; 379 } 380 381 static int gcm_setkey(struct crypto_aead *aead, 382 const u8 *key, unsigned int keylen) 383 { 384 struct caam_ctx *ctx = crypto_aead_ctx(aead); 385 struct device *jrdev = ctx->jrdev; 386 int ret; 387 388 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 389 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 390 391 memcpy(ctx->key, key, keylen); 392 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, 393 ctx->dir); 394 ctx->cdata.keylen = keylen; 395 396 ret = gcm_set_sh_desc(aead); 397 if (ret) 398 return ret; 399 400 /* Now update the driver contexts with the new shared descriptor */ 401 if (ctx->drv_ctx[ENCRYPT]) { 402 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 403 ctx->sh_desc_enc); 404 if (ret) { 405 dev_err(jrdev, "driver enc context update failed\n"); 406 return ret; 407 } 408 } 409 410 if (ctx->drv_ctx[DECRYPT]) { 411 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 412 ctx->sh_desc_dec); 413 if (ret) { 414 dev_err(jrdev, "driver dec context update failed\n"); 415 return ret; 416 } 417 } 418 419 return 0; 420 } 421 422 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 423 { 424 struct caam_ctx *ctx = crypto_aead_ctx(aead); 425 unsigned int ivsize = crypto_aead_ivsize(aead); 426 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 427 ctx->cdata.keylen; 428 429 if (!ctx->cdata.keylen || !ctx->authsize) 430 return 0; 431 432 ctx->cdata.key_virt = ctx->key; 433 434 /* 435 * Job Descriptor and Shared Descriptor 436 * must fit into the 64-word Descriptor h/w Buffer 437 */ 438 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { 439 ctx->cdata.key_inline = true; 440 } else { 441 ctx->cdata.key_inline = false; 442 ctx->cdata.key_dma = ctx->key_dma; 443 } 444 445 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 446 ctx->authsize, true); 447 448 /* 449 * Job Descriptor and Shared Descriptor 450 * must fit into the 64-word Descriptor h/w Buffer 451 */ 452 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { 453 ctx->cdata.key_inline = true; 454 } else { 455 ctx->cdata.key_inline = false; 456 ctx->cdata.key_dma = ctx->key_dma; 457 } 458 459 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 460 ctx->authsize, true); 461 462 return 0; 463 } 464 465 static int rfc4106_setauthsize(struct crypto_aead *authenc, 466 unsigned int authsize) 467 { 468 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 469 470 ctx->authsize = authsize; 471 rfc4106_set_sh_desc(authenc); 472 473 return 0; 474 } 475 476 static int rfc4106_setkey(struct crypto_aead *aead, 477 const u8 *key, unsigned int keylen) 478 { 479 struct caam_ctx *ctx = crypto_aead_ctx(aead); 480 struct device *jrdev = ctx->jrdev; 481 int ret; 482 483 if (keylen < 4) 484 return -EINVAL; 485 486 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 487 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 488 489 memcpy(ctx->key, key, keylen); 490 /* 491 * The last four bytes of the key material are used as the salt value 492 * in the nonce. Update the AES key length. 493 */ 494 ctx->cdata.keylen = keylen - 4; 495 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 496 ctx->cdata.keylen, ctx->dir); 497 498 ret = rfc4106_set_sh_desc(aead); 499 if (ret) 500 return ret; 501 502 /* Now update the driver contexts with the new shared descriptor */ 503 if (ctx->drv_ctx[ENCRYPT]) { 504 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 505 ctx->sh_desc_enc); 506 if (ret) { 507 dev_err(jrdev, "driver enc context update failed\n"); 508 return ret; 509 } 510 } 511 512 if (ctx->drv_ctx[DECRYPT]) { 513 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 514 ctx->sh_desc_dec); 515 if (ret) { 516 dev_err(jrdev, "driver dec context update failed\n"); 517 return ret; 518 } 519 } 520 521 return 0; 522 } 523 524 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 525 { 526 struct caam_ctx *ctx = crypto_aead_ctx(aead); 527 unsigned int ivsize = crypto_aead_ivsize(aead); 528 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 529 ctx->cdata.keylen; 530 531 if (!ctx->cdata.keylen || !ctx->authsize) 532 return 0; 533 534 ctx->cdata.key_virt = ctx->key; 535 536 /* 537 * Job Descriptor and Shared Descriptor 538 * must fit into the 64-word Descriptor h/w Buffer 539 */ 540 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { 541 ctx->cdata.key_inline = true; 542 } else { 543 ctx->cdata.key_inline = false; 544 ctx->cdata.key_dma = ctx->key_dma; 545 } 546 547 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 548 ctx->authsize, true); 549 550 /* 551 * Job Descriptor and Shared Descriptor 552 * must fit into the 64-word Descriptor h/w Buffer 553 */ 554 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { 555 ctx->cdata.key_inline = true; 556 } else { 557 ctx->cdata.key_inline = false; 558 ctx->cdata.key_dma = ctx->key_dma; 559 } 560 561 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 562 ctx->authsize, true); 563 564 return 0; 565 } 566 567 static int rfc4543_setauthsize(struct crypto_aead *authenc, 568 unsigned int authsize) 569 { 570 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 571 572 ctx->authsize = authsize; 573 rfc4543_set_sh_desc(authenc); 574 575 return 0; 576 } 577 578 static int rfc4543_setkey(struct crypto_aead *aead, 579 const u8 *key, unsigned int keylen) 580 { 581 struct caam_ctx *ctx = crypto_aead_ctx(aead); 582 struct device *jrdev = ctx->jrdev; 583 int ret; 584 585 if (keylen < 4) 586 return -EINVAL; 587 588 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 590 591 memcpy(ctx->key, key, keylen); 592 /* 593 * The last four bytes of the key material are used as the salt value 594 * in the nonce. Update the AES key length. 595 */ 596 ctx->cdata.keylen = keylen - 4; 597 dma_sync_single_for_device(jrdev->parent, ctx->key_dma, 598 ctx->cdata.keylen, ctx->dir); 599 600 ret = rfc4543_set_sh_desc(aead); 601 if (ret) 602 return ret; 603 604 /* Now update the driver contexts with the new shared descriptor */ 605 if (ctx->drv_ctx[ENCRYPT]) { 606 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 607 ctx->sh_desc_enc); 608 if (ret) { 609 dev_err(jrdev, "driver enc context update failed\n"); 610 return ret; 611 } 612 } 613 614 if (ctx->drv_ctx[DECRYPT]) { 615 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 616 ctx->sh_desc_dec); 617 if (ret) { 618 dev_err(jrdev, "driver dec context update failed\n"); 619 return ret; 620 } 621 } 622 623 return 0; 624 } 625 626 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 627 unsigned int keylen) 628 { 629 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 630 struct caam_skcipher_alg *alg = 631 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 632 skcipher); 633 struct device *jrdev = ctx->jrdev; 634 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 635 u32 ctx1_iv_off = 0; 636 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 637 OP_ALG_AAI_CTR_MOD128); 638 const bool is_rfc3686 = alg->caam.rfc3686; 639 int ret = 0; 640 641 print_hex_dump_debug("key in @" __stringify(__LINE__)": ", 642 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 643 644 /* 645 * AES-CTR needs to load IV in CONTEXT1 reg 646 * at an offset of 128bits (16bytes) 647 * CONTEXT1[255:128] = IV 648 */ 649 if (ctr_mode) 650 ctx1_iv_off = 16; 651 652 /* 653 * RFC3686 specific: 654 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 655 * | *key = {KEY, NONCE} 656 */ 657 if (is_rfc3686) { 658 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 659 keylen -= CTR_RFC3686_NONCE_SIZE; 660 } 661 662 ctx->cdata.keylen = keylen; 663 ctx->cdata.key_virt = key; 664 ctx->cdata.key_inline = true; 665 666 /* skcipher encrypt, decrypt shared descriptors */ 667 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 668 is_rfc3686, ctx1_iv_off); 669 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 670 is_rfc3686, ctx1_iv_off); 671 672 /* Now update the driver contexts with the new shared descriptor */ 673 if (ctx->drv_ctx[ENCRYPT]) { 674 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 675 ctx->sh_desc_enc); 676 if (ret) { 677 dev_err(jrdev, "driver enc context update failed\n"); 678 goto badkey; 679 } 680 } 681 682 if (ctx->drv_ctx[DECRYPT]) { 683 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 684 ctx->sh_desc_dec); 685 if (ret) { 686 dev_err(jrdev, "driver dec context update failed\n"); 687 goto badkey; 688 } 689 } 690 691 return ret; 692 badkey: 693 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 694 return -EINVAL; 695 } 696 697 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 698 const u8 *key, unsigned int keylen) 699 { 700 return unlikely(des3_verify_key(skcipher, key)) ?: 701 skcipher_setkey(skcipher, key, keylen); 702 } 703 704 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 705 unsigned int keylen) 706 { 707 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 708 struct device *jrdev = ctx->jrdev; 709 int ret = 0; 710 711 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 712 dev_err(jrdev, "key size mismatch\n"); 713 goto badkey; 714 } 715 716 ctx->cdata.keylen = keylen; 717 ctx->cdata.key_virt = key; 718 ctx->cdata.key_inline = true; 719 720 /* xts skcipher encrypt, decrypt shared descriptors */ 721 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 722 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 723 724 /* Now update the driver contexts with the new shared descriptor */ 725 if (ctx->drv_ctx[ENCRYPT]) { 726 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 727 ctx->sh_desc_enc); 728 if (ret) { 729 dev_err(jrdev, "driver enc context update failed\n"); 730 goto badkey; 731 } 732 } 733 734 if (ctx->drv_ctx[DECRYPT]) { 735 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 736 ctx->sh_desc_dec); 737 if (ret) { 738 dev_err(jrdev, "driver dec context update failed\n"); 739 goto badkey; 740 } 741 } 742 743 return ret; 744 badkey: 745 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 746 return -EINVAL; 747 } 748 749 /* 750 * aead_edesc - s/w-extended aead descriptor 751 * @src_nents: number of segments in input scatterlist 752 * @dst_nents: number of segments in output scatterlist 753 * @iv_dma: dma address of iv for checking continuity and link table 754 * @qm_sg_bytes: length of dma mapped h/w link table 755 * @qm_sg_dma: bus physical mapped address of h/w link table 756 * @assoclen: associated data length, in CAAM endianness 757 * @assoclen_dma: bus physical mapped address of req->assoclen 758 * @drv_req: driver-specific request structure 759 * @sgt: the h/w link table, followed by IV 760 */ 761 struct aead_edesc { 762 int src_nents; 763 int dst_nents; 764 dma_addr_t iv_dma; 765 int qm_sg_bytes; 766 dma_addr_t qm_sg_dma; 767 unsigned int assoclen; 768 dma_addr_t assoclen_dma; 769 struct caam_drv_req drv_req; 770 struct qm_sg_entry sgt[0]; 771 }; 772 773 /* 774 * skcipher_edesc - s/w-extended skcipher descriptor 775 * @src_nents: number of segments in input scatterlist 776 * @dst_nents: number of segments in output scatterlist 777 * @iv_dma: dma address of iv for checking continuity and link table 778 * @qm_sg_bytes: length of dma mapped h/w link table 779 * @qm_sg_dma: bus physical mapped address of h/w link table 780 * @drv_req: driver-specific request structure 781 * @sgt: the h/w link table, followed by IV 782 */ 783 struct skcipher_edesc { 784 int src_nents; 785 int dst_nents; 786 dma_addr_t iv_dma; 787 int qm_sg_bytes; 788 dma_addr_t qm_sg_dma; 789 struct caam_drv_req drv_req; 790 struct qm_sg_entry sgt[0]; 791 }; 792 793 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 794 enum optype type) 795 { 796 /* 797 * This function is called on the fast path with values of 'type' 798 * known at compile time. Invalid arguments are not expected and 799 * thus no checks are made. 800 */ 801 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 802 u32 *desc; 803 804 if (unlikely(!drv_ctx)) { 805 spin_lock(&ctx->lock); 806 807 /* Read again to check if some other core init drv_ctx */ 808 drv_ctx = ctx->drv_ctx[type]; 809 if (!drv_ctx) { 810 int cpu; 811 812 if (type == ENCRYPT) 813 desc = ctx->sh_desc_enc; 814 else /* (type == DECRYPT) */ 815 desc = ctx->sh_desc_dec; 816 817 cpu = smp_processor_id(); 818 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 819 if (!IS_ERR_OR_NULL(drv_ctx)) 820 drv_ctx->op_type = type; 821 822 ctx->drv_ctx[type] = drv_ctx; 823 } 824 825 spin_unlock(&ctx->lock); 826 } 827 828 return drv_ctx; 829 } 830 831 static void caam_unmap(struct device *dev, struct scatterlist *src, 832 struct scatterlist *dst, int src_nents, 833 int dst_nents, dma_addr_t iv_dma, int ivsize, 834 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, 835 int qm_sg_bytes) 836 { 837 if (dst != src) { 838 if (src_nents) 839 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 840 if (dst_nents) 841 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 842 } else { 843 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 844 } 845 846 if (iv_dma) 847 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 848 if (qm_sg_bytes) 849 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 850 } 851 852 static void aead_unmap(struct device *dev, 853 struct aead_edesc *edesc, 854 struct aead_request *req) 855 { 856 struct crypto_aead *aead = crypto_aead_reqtfm(req); 857 int ivsize = crypto_aead_ivsize(aead); 858 859 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 860 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, 861 edesc->qm_sg_bytes); 862 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 863 } 864 865 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 866 struct skcipher_request *req) 867 { 868 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 869 int ivsize = crypto_skcipher_ivsize(skcipher); 870 871 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 872 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, 873 edesc->qm_sg_bytes); 874 } 875 876 static void aead_done(struct caam_drv_req *drv_req, u32 status) 877 { 878 struct device *qidev; 879 struct aead_edesc *edesc; 880 struct aead_request *aead_req = drv_req->app_ctx; 881 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 882 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); 883 int ecode = 0; 884 885 qidev = caam_ctx->qidev; 886 887 if (unlikely(status)) { 888 u32 ssrc = status & JRSTA_SSRC_MASK; 889 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; 890 891 caam_jr_strstatus(qidev, status); 892 /* 893 * verify hw auth check passed else return -EBADMSG 894 */ 895 if (ssrc == JRSTA_SSRC_CCB_ERROR && 896 err_id == JRSTA_CCBERR_ERRID_ICVCHK) 897 ecode = -EBADMSG; 898 else 899 ecode = -EIO; 900 } 901 902 edesc = container_of(drv_req, typeof(*edesc), drv_req); 903 aead_unmap(qidev, edesc, aead_req); 904 905 aead_request_complete(aead_req, ecode); 906 qi_cache_free(edesc); 907 } 908 909 /* 910 * allocate and map the aead extended descriptor 911 */ 912 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 913 bool encrypt) 914 { 915 struct crypto_aead *aead = crypto_aead_reqtfm(req); 916 struct caam_ctx *ctx = crypto_aead_ctx(aead); 917 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 918 typeof(*alg), aead); 919 struct device *qidev = ctx->qidev; 920 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 921 GFP_KERNEL : GFP_ATOMIC; 922 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 923 int src_len, dst_len = 0; 924 struct aead_edesc *edesc; 925 dma_addr_t qm_sg_dma, iv_dma = 0; 926 int ivsize = 0; 927 unsigned int authsize = ctx->authsize; 928 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 929 int in_len, out_len; 930 struct qm_sg_entry *sg_table, *fd_sgt; 931 struct caam_drv_ctx *drv_ctx; 932 933 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 934 if (IS_ERR_OR_NULL(drv_ctx)) 935 return (struct aead_edesc *)drv_ctx; 936 937 /* allocate space for base edesc and hw desc commands, link tables */ 938 edesc = qi_cache_alloc(GFP_DMA | flags); 939 if (unlikely(!edesc)) { 940 dev_err(qidev, "could not allocate extended descriptor\n"); 941 return ERR_PTR(-ENOMEM); 942 } 943 944 if (likely(req->src == req->dst)) { 945 src_len = req->assoclen + req->cryptlen + 946 (encrypt ? authsize : 0); 947 948 src_nents = sg_nents_for_len(req->src, src_len); 949 if (unlikely(src_nents < 0)) { 950 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 951 src_len); 952 qi_cache_free(edesc); 953 return ERR_PTR(src_nents); 954 } 955 956 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 957 DMA_BIDIRECTIONAL); 958 if (unlikely(!mapped_src_nents)) { 959 dev_err(qidev, "unable to map source\n"); 960 qi_cache_free(edesc); 961 return ERR_PTR(-ENOMEM); 962 } 963 } else { 964 src_len = req->assoclen + req->cryptlen; 965 dst_len = src_len + (encrypt ? authsize : (-authsize)); 966 967 src_nents = sg_nents_for_len(req->src, src_len); 968 if (unlikely(src_nents < 0)) { 969 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 970 src_len); 971 qi_cache_free(edesc); 972 return ERR_PTR(src_nents); 973 } 974 975 dst_nents = sg_nents_for_len(req->dst, dst_len); 976 if (unlikely(dst_nents < 0)) { 977 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 978 dst_len); 979 qi_cache_free(edesc); 980 return ERR_PTR(dst_nents); 981 } 982 983 if (src_nents) { 984 mapped_src_nents = dma_map_sg(qidev, req->src, 985 src_nents, DMA_TO_DEVICE); 986 if (unlikely(!mapped_src_nents)) { 987 dev_err(qidev, "unable to map source\n"); 988 qi_cache_free(edesc); 989 return ERR_PTR(-ENOMEM); 990 } 991 } else { 992 mapped_src_nents = 0; 993 } 994 995 if (dst_nents) { 996 mapped_dst_nents = dma_map_sg(qidev, req->dst, 997 dst_nents, 998 DMA_FROM_DEVICE); 999 if (unlikely(!mapped_dst_nents)) { 1000 dev_err(qidev, "unable to map destination\n"); 1001 dma_unmap_sg(qidev, req->src, src_nents, 1002 DMA_TO_DEVICE); 1003 qi_cache_free(edesc); 1004 return ERR_PTR(-ENOMEM); 1005 } 1006 } else { 1007 mapped_dst_nents = 0; 1008 } 1009 } 1010 1011 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) 1012 ivsize = crypto_aead_ivsize(aead); 1013 1014 /* 1015 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1016 * Input is not contiguous. 1017 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1018 * the end of the table by allocating more S/G entries. Logic: 1019 * if (src != dst && output S/G) 1020 * pad output S/G, if needed 1021 * else if (src == dst && S/G) 1022 * overlapping S/Gs; pad one of them 1023 * else if (input S/G) ... 1024 * pad input S/G, if needed 1025 */ 1026 qm_sg_ents = 1 + !!ivsize + mapped_src_nents; 1027 if (mapped_dst_nents > 1) 1028 qm_sg_ents += pad_sg_nents(mapped_dst_nents); 1029 else if ((req->src == req->dst) && (mapped_src_nents > 1)) 1030 qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1031 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); 1032 else 1033 qm_sg_ents = pad_sg_nents(qm_sg_ents); 1034 1035 sg_table = &edesc->sgt[0]; 1036 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1037 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1038 CAAM_QI_MEMCACHE_SIZE)) { 1039 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1040 qm_sg_ents, ivsize); 1041 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1042 0, DMA_NONE, 0, 0); 1043 qi_cache_free(edesc); 1044 return ERR_PTR(-ENOMEM); 1045 } 1046 1047 if (ivsize) { 1048 u8 *iv = (u8 *)(sg_table + qm_sg_ents); 1049 1050 /* Make sure IV is located in a DMAable area */ 1051 memcpy(iv, req->iv, ivsize); 1052 1053 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1054 if (dma_mapping_error(qidev, iv_dma)) { 1055 dev_err(qidev, "unable to map IV\n"); 1056 caam_unmap(qidev, req->src, req->dst, src_nents, 1057 dst_nents, 0, 0, DMA_NONE, 0, 0); 1058 qi_cache_free(edesc); 1059 return ERR_PTR(-ENOMEM); 1060 } 1061 } 1062 1063 edesc->src_nents = src_nents; 1064 edesc->dst_nents = dst_nents; 1065 edesc->iv_dma = iv_dma; 1066 edesc->drv_req.app_ctx = req; 1067 edesc->drv_req.cbk = aead_done; 1068 edesc->drv_req.drv_ctx = drv_ctx; 1069 1070 edesc->assoclen = cpu_to_caam32(req->assoclen); 1071 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 1072 DMA_TO_DEVICE); 1073 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1074 dev_err(qidev, "unable to map assoclen\n"); 1075 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1076 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1077 qi_cache_free(edesc); 1078 return ERR_PTR(-ENOMEM); 1079 } 1080 1081 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 1082 qm_sg_index++; 1083 if (ivsize) { 1084 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1085 qm_sg_index++; 1086 } 1087 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 1088 qm_sg_index += mapped_src_nents; 1089 1090 if (mapped_dst_nents > 1) 1091 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 1092 1093 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1094 if (dma_mapping_error(qidev, qm_sg_dma)) { 1095 dev_err(qidev, "unable to map S/G table\n"); 1096 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1097 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1098 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1099 qi_cache_free(edesc); 1100 return ERR_PTR(-ENOMEM); 1101 } 1102 1103 edesc->qm_sg_dma = qm_sg_dma; 1104 edesc->qm_sg_bytes = qm_sg_bytes; 1105 1106 out_len = req->assoclen + req->cryptlen + 1107 (encrypt ? ctx->authsize : (-ctx->authsize)); 1108 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 1109 1110 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1111 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 1112 1113 if (req->dst == req->src) { 1114 if (mapped_src_nents == 1) 1115 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 1116 out_len, 0); 1117 else 1118 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1119 (1 + !!ivsize) * sizeof(*sg_table), 1120 out_len, 0); 1121 } else if (mapped_dst_nents <= 1) { 1122 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1123 0); 1124 } else { 1125 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 1126 qm_sg_index, out_len, 0); 1127 } 1128 1129 return edesc; 1130 } 1131 1132 static inline int aead_crypt(struct aead_request *req, bool encrypt) 1133 { 1134 struct aead_edesc *edesc; 1135 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1136 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1137 int ret; 1138 1139 if (unlikely(caam_congested)) 1140 return -EAGAIN; 1141 1142 /* allocate extended descriptor */ 1143 edesc = aead_edesc_alloc(req, encrypt); 1144 if (IS_ERR_OR_NULL(edesc)) 1145 return PTR_ERR(edesc); 1146 1147 /* Create and submit job descriptor */ 1148 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1149 if (!ret) { 1150 ret = -EINPROGRESS; 1151 } else { 1152 aead_unmap(ctx->qidev, edesc, req); 1153 qi_cache_free(edesc); 1154 } 1155 1156 return ret; 1157 } 1158 1159 static int aead_encrypt(struct aead_request *req) 1160 { 1161 return aead_crypt(req, true); 1162 } 1163 1164 static int aead_decrypt(struct aead_request *req) 1165 { 1166 return aead_crypt(req, false); 1167 } 1168 1169 static int ipsec_gcm_encrypt(struct aead_request *req) 1170 { 1171 if (req->assoclen < 8) 1172 return -EINVAL; 1173 1174 return aead_crypt(req, true); 1175 } 1176 1177 static int ipsec_gcm_decrypt(struct aead_request *req) 1178 { 1179 if (req->assoclen < 8) 1180 return -EINVAL; 1181 1182 return aead_crypt(req, false); 1183 } 1184 1185 static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1186 { 1187 struct skcipher_edesc *edesc; 1188 struct skcipher_request *req = drv_req->app_ctx; 1189 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1190 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1191 struct device *qidev = caam_ctx->qidev; 1192 int ivsize = crypto_skcipher_ivsize(skcipher); 1193 1194 dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1195 1196 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1197 1198 if (status) 1199 caam_jr_strstatus(qidev, status); 1200 1201 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", 1202 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1203 edesc->src_nents > 1 ? 100 : ivsize, 1); 1204 caam_dump_sg("dst @" __stringify(__LINE__)": ", 1205 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1206 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1207 1208 skcipher_unmap(qidev, edesc, req); 1209 1210 /* 1211 * The crypto API expects us to set the IV (req->iv) to the last 1212 * ciphertext block (CBC mode) or last counter (CTR mode). 1213 * This is used e.g. by the CTS mode. 1214 */ 1215 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); 1216 1217 qi_cache_free(edesc); 1218 skcipher_request_complete(req, status); 1219 } 1220 1221 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1222 bool encrypt) 1223 { 1224 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1225 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1226 struct device *qidev = ctx->qidev; 1227 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1228 GFP_KERNEL : GFP_ATOMIC; 1229 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1230 struct skcipher_edesc *edesc; 1231 dma_addr_t iv_dma; 1232 u8 *iv; 1233 int ivsize = crypto_skcipher_ivsize(skcipher); 1234 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1235 struct qm_sg_entry *sg_table, *fd_sgt; 1236 struct caam_drv_ctx *drv_ctx; 1237 1238 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 1239 if (IS_ERR_OR_NULL(drv_ctx)) 1240 return (struct skcipher_edesc *)drv_ctx; 1241 1242 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1243 if (unlikely(src_nents < 0)) { 1244 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1245 req->cryptlen); 1246 return ERR_PTR(src_nents); 1247 } 1248 1249 if (unlikely(req->src != req->dst)) { 1250 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 1251 if (unlikely(dst_nents < 0)) { 1252 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1253 req->cryptlen); 1254 return ERR_PTR(dst_nents); 1255 } 1256 1257 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1258 DMA_TO_DEVICE); 1259 if (unlikely(!mapped_src_nents)) { 1260 dev_err(qidev, "unable to map source\n"); 1261 return ERR_PTR(-ENOMEM); 1262 } 1263 1264 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1265 DMA_FROM_DEVICE); 1266 if (unlikely(!mapped_dst_nents)) { 1267 dev_err(qidev, "unable to map destination\n"); 1268 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1269 return ERR_PTR(-ENOMEM); 1270 } 1271 } else { 1272 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1273 DMA_BIDIRECTIONAL); 1274 if (unlikely(!mapped_src_nents)) { 1275 dev_err(qidev, "unable to map source\n"); 1276 return ERR_PTR(-ENOMEM); 1277 } 1278 } 1279 1280 qm_sg_ents = 1 + mapped_src_nents; 1281 dst_sg_idx = qm_sg_ents; 1282 1283 /* 1284 * Input, output HW S/G tables: [IV, src][dst, IV] 1285 * IV entries point to the same buffer 1286 * If src == dst, S/G entries are reused (S/G tables overlap) 1287 * 1288 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond 1289 * the end of the table by allocating more S/G entries. 1290 */ 1291 if (req->src != req->dst) 1292 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); 1293 else 1294 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); 1295 1296 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1297 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1298 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1299 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1300 qm_sg_ents, ivsize); 1301 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1302 0, DMA_NONE, 0, 0); 1303 return ERR_PTR(-ENOMEM); 1304 } 1305 1306 /* allocate space for base edesc, link tables and IV */ 1307 edesc = qi_cache_alloc(GFP_DMA | flags); 1308 if (unlikely(!edesc)) { 1309 dev_err(qidev, "could not allocate extended descriptor\n"); 1310 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1311 0, DMA_NONE, 0, 0); 1312 return ERR_PTR(-ENOMEM); 1313 } 1314 1315 /* Make sure IV is located in a DMAable area */ 1316 sg_table = &edesc->sgt[0]; 1317 iv = (u8 *)(sg_table + qm_sg_ents); 1318 memcpy(iv, req->iv, ivsize); 1319 1320 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); 1321 if (dma_mapping_error(qidev, iv_dma)) { 1322 dev_err(qidev, "unable to map IV\n"); 1323 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1324 0, DMA_NONE, 0, 0); 1325 qi_cache_free(edesc); 1326 return ERR_PTR(-ENOMEM); 1327 } 1328 1329 edesc->src_nents = src_nents; 1330 edesc->dst_nents = dst_nents; 1331 edesc->iv_dma = iv_dma; 1332 edesc->qm_sg_bytes = qm_sg_bytes; 1333 edesc->drv_req.app_ctx = req; 1334 edesc->drv_req.cbk = skcipher_done; 1335 edesc->drv_req.drv_ctx = drv_ctx; 1336 1337 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1338 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); 1339 1340 if (req->src != req->dst) 1341 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); 1342 1343 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, 1344 ivsize, 0); 1345 1346 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1347 DMA_TO_DEVICE); 1348 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1349 dev_err(qidev, "unable to map S/G table\n"); 1350 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1351 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); 1352 qi_cache_free(edesc); 1353 return ERR_PTR(-ENOMEM); 1354 } 1355 1356 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1357 1358 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1359 ivsize + req->cryptlen, 0); 1360 1361 if (req->src == req->dst) 1362 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1363 sizeof(*sg_table), req->cryptlen + ivsize, 1364 0); 1365 else 1366 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1367 sizeof(*sg_table), req->cryptlen + ivsize, 1368 0); 1369 1370 return edesc; 1371 } 1372 1373 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) 1374 { 1375 struct skcipher_edesc *edesc; 1376 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1377 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1378 int ret; 1379 1380 if (unlikely(caam_congested)) 1381 return -EAGAIN; 1382 1383 /* allocate extended descriptor */ 1384 edesc = skcipher_edesc_alloc(req, encrypt); 1385 if (IS_ERR(edesc)) 1386 return PTR_ERR(edesc); 1387 1388 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1389 if (!ret) { 1390 ret = -EINPROGRESS; 1391 } else { 1392 skcipher_unmap(ctx->qidev, edesc, req); 1393 qi_cache_free(edesc); 1394 } 1395 1396 return ret; 1397 } 1398 1399 static int skcipher_encrypt(struct skcipher_request *req) 1400 { 1401 return skcipher_crypt(req, true); 1402 } 1403 1404 static int skcipher_decrypt(struct skcipher_request *req) 1405 { 1406 return skcipher_crypt(req, false); 1407 } 1408 1409 static struct caam_skcipher_alg driver_algs[] = { 1410 { 1411 .skcipher = { 1412 .base = { 1413 .cra_name = "cbc(aes)", 1414 .cra_driver_name = "cbc-aes-caam-qi", 1415 .cra_blocksize = AES_BLOCK_SIZE, 1416 }, 1417 .setkey = skcipher_setkey, 1418 .encrypt = skcipher_encrypt, 1419 .decrypt = skcipher_decrypt, 1420 .min_keysize = AES_MIN_KEY_SIZE, 1421 .max_keysize = AES_MAX_KEY_SIZE, 1422 .ivsize = AES_BLOCK_SIZE, 1423 }, 1424 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1425 }, 1426 { 1427 .skcipher = { 1428 .base = { 1429 .cra_name = "cbc(des3_ede)", 1430 .cra_driver_name = "cbc-3des-caam-qi", 1431 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1432 }, 1433 .setkey = des3_skcipher_setkey, 1434 .encrypt = skcipher_encrypt, 1435 .decrypt = skcipher_decrypt, 1436 .min_keysize = DES3_EDE_KEY_SIZE, 1437 .max_keysize = DES3_EDE_KEY_SIZE, 1438 .ivsize = DES3_EDE_BLOCK_SIZE, 1439 }, 1440 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1441 }, 1442 { 1443 .skcipher = { 1444 .base = { 1445 .cra_name = "cbc(des)", 1446 .cra_driver_name = "cbc-des-caam-qi", 1447 .cra_blocksize = DES_BLOCK_SIZE, 1448 }, 1449 .setkey = skcipher_setkey, 1450 .encrypt = skcipher_encrypt, 1451 .decrypt = skcipher_decrypt, 1452 .min_keysize = DES_KEY_SIZE, 1453 .max_keysize = DES_KEY_SIZE, 1454 .ivsize = DES_BLOCK_SIZE, 1455 }, 1456 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1457 }, 1458 { 1459 .skcipher = { 1460 .base = { 1461 .cra_name = "ctr(aes)", 1462 .cra_driver_name = "ctr-aes-caam-qi", 1463 .cra_blocksize = 1, 1464 }, 1465 .setkey = skcipher_setkey, 1466 .encrypt = skcipher_encrypt, 1467 .decrypt = skcipher_decrypt, 1468 .min_keysize = AES_MIN_KEY_SIZE, 1469 .max_keysize = AES_MAX_KEY_SIZE, 1470 .ivsize = AES_BLOCK_SIZE, 1471 .chunksize = AES_BLOCK_SIZE, 1472 }, 1473 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1474 OP_ALG_AAI_CTR_MOD128, 1475 }, 1476 { 1477 .skcipher = { 1478 .base = { 1479 .cra_name = "rfc3686(ctr(aes))", 1480 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1481 .cra_blocksize = 1, 1482 }, 1483 .setkey = skcipher_setkey, 1484 .encrypt = skcipher_encrypt, 1485 .decrypt = skcipher_decrypt, 1486 .min_keysize = AES_MIN_KEY_SIZE + 1487 CTR_RFC3686_NONCE_SIZE, 1488 .max_keysize = AES_MAX_KEY_SIZE + 1489 CTR_RFC3686_NONCE_SIZE, 1490 .ivsize = CTR_RFC3686_IV_SIZE, 1491 .chunksize = AES_BLOCK_SIZE, 1492 }, 1493 .caam = { 1494 .class1_alg_type = OP_ALG_ALGSEL_AES | 1495 OP_ALG_AAI_CTR_MOD128, 1496 .rfc3686 = true, 1497 }, 1498 }, 1499 { 1500 .skcipher = { 1501 .base = { 1502 .cra_name = "xts(aes)", 1503 .cra_driver_name = "xts-aes-caam-qi", 1504 .cra_blocksize = AES_BLOCK_SIZE, 1505 }, 1506 .setkey = xts_skcipher_setkey, 1507 .encrypt = skcipher_encrypt, 1508 .decrypt = skcipher_decrypt, 1509 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1510 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1511 .ivsize = AES_BLOCK_SIZE, 1512 }, 1513 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1514 }, 1515 }; 1516 1517 static struct caam_aead_alg driver_aeads[] = { 1518 { 1519 .aead = { 1520 .base = { 1521 .cra_name = "rfc4106(gcm(aes))", 1522 .cra_driver_name = "rfc4106-gcm-aes-caam-qi", 1523 .cra_blocksize = 1, 1524 }, 1525 .setkey = rfc4106_setkey, 1526 .setauthsize = rfc4106_setauthsize, 1527 .encrypt = ipsec_gcm_encrypt, 1528 .decrypt = ipsec_gcm_decrypt, 1529 .ivsize = 8, 1530 .maxauthsize = AES_BLOCK_SIZE, 1531 }, 1532 .caam = { 1533 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1534 .nodkp = true, 1535 }, 1536 }, 1537 { 1538 .aead = { 1539 .base = { 1540 .cra_name = "rfc4543(gcm(aes))", 1541 .cra_driver_name = "rfc4543-gcm-aes-caam-qi", 1542 .cra_blocksize = 1, 1543 }, 1544 .setkey = rfc4543_setkey, 1545 .setauthsize = rfc4543_setauthsize, 1546 .encrypt = ipsec_gcm_encrypt, 1547 .decrypt = ipsec_gcm_decrypt, 1548 .ivsize = 8, 1549 .maxauthsize = AES_BLOCK_SIZE, 1550 }, 1551 .caam = { 1552 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1553 .nodkp = true, 1554 }, 1555 }, 1556 /* Galois Counter Mode */ 1557 { 1558 .aead = { 1559 .base = { 1560 .cra_name = "gcm(aes)", 1561 .cra_driver_name = "gcm-aes-caam-qi", 1562 .cra_blocksize = 1, 1563 }, 1564 .setkey = gcm_setkey, 1565 .setauthsize = gcm_setauthsize, 1566 .encrypt = aead_encrypt, 1567 .decrypt = aead_decrypt, 1568 .ivsize = 12, 1569 .maxauthsize = AES_BLOCK_SIZE, 1570 }, 1571 .caam = { 1572 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1573 .nodkp = true, 1574 } 1575 }, 1576 /* single-pass ipsec_esp descriptor */ 1577 { 1578 .aead = { 1579 .base = { 1580 .cra_name = "authenc(hmac(md5),cbc(aes))", 1581 .cra_driver_name = "authenc-hmac-md5-" 1582 "cbc-aes-caam-qi", 1583 .cra_blocksize = AES_BLOCK_SIZE, 1584 }, 1585 .setkey = aead_setkey, 1586 .setauthsize = aead_setauthsize, 1587 .encrypt = aead_encrypt, 1588 .decrypt = aead_decrypt, 1589 .ivsize = AES_BLOCK_SIZE, 1590 .maxauthsize = MD5_DIGEST_SIZE, 1591 }, 1592 .caam = { 1593 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1594 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1595 OP_ALG_AAI_HMAC_PRECOMP, 1596 } 1597 }, 1598 { 1599 .aead = { 1600 .base = { 1601 .cra_name = "echainiv(authenc(hmac(md5)," 1602 "cbc(aes)))", 1603 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1604 "cbc-aes-caam-qi", 1605 .cra_blocksize = AES_BLOCK_SIZE, 1606 }, 1607 .setkey = aead_setkey, 1608 .setauthsize = aead_setauthsize, 1609 .encrypt = aead_encrypt, 1610 .decrypt = aead_decrypt, 1611 .ivsize = AES_BLOCK_SIZE, 1612 .maxauthsize = MD5_DIGEST_SIZE, 1613 }, 1614 .caam = { 1615 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1616 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1617 OP_ALG_AAI_HMAC_PRECOMP, 1618 .geniv = true, 1619 } 1620 }, 1621 { 1622 .aead = { 1623 .base = { 1624 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1625 .cra_driver_name = "authenc-hmac-sha1-" 1626 "cbc-aes-caam-qi", 1627 .cra_blocksize = AES_BLOCK_SIZE, 1628 }, 1629 .setkey = aead_setkey, 1630 .setauthsize = aead_setauthsize, 1631 .encrypt = aead_encrypt, 1632 .decrypt = aead_decrypt, 1633 .ivsize = AES_BLOCK_SIZE, 1634 .maxauthsize = SHA1_DIGEST_SIZE, 1635 }, 1636 .caam = { 1637 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1638 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1639 OP_ALG_AAI_HMAC_PRECOMP, 1640 } 1641 }, 1642 { 1643 .aead = { 1644 .base = { 1645 .cra_name = "echainiv(authenc(hmac(sha1)," 1646 "cbc(aes)))", 1647 .cra_driver_name = "echainiv-authenc-" 1648 "hmac-sha1-cbc-aes-caam-qi", 1649 .cra_blocksize = AES_BLOCK_SIZE, 1650 }, 1651 .setkey = aead_setkey, 1652 .setauthsize = aead_setauthsize, 1653 .encrypt = aead_encrypt, 1654 .decrypt = aead_decrypt, 1655 .ivsize = AES_BLOCK_SIZE, 1656 .maxauthsize = SHA1_DIGEST_SIZE, 1657 }, 1658 .caam = { 1659 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1660 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1661 OP_ALG_AAI_HMAC_PRECOMP, 1662 .geniv = true, 1663 }, 1664 }, 1665 { 1666 .aead = { 1667 .base = { 1668 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1669 .cra_driver_name = "authenc-hmac-sha224-" 1670 "cbc-aes-caam-qi", 1671 .cra_blocksize = AES_BLOCK_SIZE, 1672 }, 1673 .setkey = aead_setkey, 1674 .setauthsize = aead_setauthsize, 1675 .encrypt = aead_encrypt, 1676 .decrypt = aead_decrypt, 1677 .ivsize = AES_BLOCK_SIZE, 1678 .maxauthsize = SHA224_DIGEST_SIZE, 1679 }, 1680 .caam = { 1681 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1682 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1683 OP_ALG_AAI_HMAC_PRECOMP, 1684 } 1685 }, 1686 { 1687 .aead = { 1688 .base = { 1689 .cra_name = "echainiv(authenc(hmac(sha224)," 1690 "cbc(aes)))", 1691 .cra_driver_name = "echainiv-authenc-" 1692 "hmac-sha224-cbc-aes-caam-qi", 1693 .cra_blocksize = AES_BLOCK_SIZE, 1694 }, 1695 .setkey = aead_setkey, 1696 .setauthsize = aead_setauthsize, 1697 .encrypt = aead_encrypt, 1698 .decrypt = aead_decrypt, 1699 .ivsize = AES_BLOCK_SIZE, 1700 .maxauthsize = SHA224_DIGEST_SIZE, 1701 }, 1702 .caam = { 1703 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1704 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1705 OP_ALG_AAI_HMAC_PRECOMP, 1706 .geniv = true, 1707 } 1708 }, 1709 { 1710 .aead = { 1711 .base = { 1712 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1713 .cra_driver_name = "authenc-hmac-sha256-" 1714 "cbc-aes-caam-qi", 1715 .cra_blocksize = AES_BLOCK_SIZE, 1716 }, 1717 .setkey = aead_setkey, 1718 .setauthsize = aead_setauthsize, 1719 .encrypt = aead_encrypt, 1720 .decrypt = aead_decrypt, 1721 .ivsize = AES_BLOCK_SIZE, 1722 .maxauthsize = SHA256_DIGEST_SIZE, 1723 }, 1724 .caam = { 1725 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1726 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1727 OP_ALG_AAI_HMAC_PRECOMP, 1728 } 1729 }, 1730 { 1731 .aead = { 1732 .base = { 1733 .cra_name = "echainiv(authenc(hmac(sha256)," 1734 "cbc(aes)))", 1735 .cra_driver_name = "echainiv-authenc-" 1736 "hmac-sha256-cbc-aes-" 1737 "caam-qi", 1738 .cra_blocksize = AES_BLOCK_SIZE, 1739 }, 1740 .setkey = aead_setkey, 1741 .setauthsize = aead_setauthsize, 1742 .encrypt = aead_encrypt, 1743 .decrypt = aead_decrypt, 1744 .ivsize = AES_BLOCK_SIZE, 1745 .maxauthsize = SHA256_DIGEST_SIZE, 1746 }, 1747 .caam = { 1748 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1749 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1750 OP_ALG_AAI_HMAC_PRECOMP, 1751 .geniv = true, 1752 } 1753 }, 1754 { 1755 .aead = { 1756 .base = { 1757 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1758 .cra_driver_name = "authenc-hmac-sha384-" 1759 "cbc-aes-caam-qi", 1760 .cra_blocksize = AES_BLOCK_SIZE, 1761 }, 1762 .setkey = aead_setkey, 1763 .setauthsize = aead_setauthsize, 1764 .encrypt = aead_encrypt, 1765 .decrypt = aead_decrypt, 1766 .ivsize = AES_BLOCK_SIZE, 1767 .maxauthsize = SHA384_DIGEST_SIZE, 1768 }, 1769 .caam = { 1770 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1771 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1772 OP_ALG_AAI_HMAC_PRECOMP, 1773 } 1774 }, 1775 { 1776 .aead = { 1777 .base = { 1778 .cra_name = "echainiv(authenc(hmac(sha384)," 1779 "cbc(aes)))", 1780 .cra_driver_name = "echainiv-authenc-" 1781 "hmac-sha384-cbc-aes-" 1782 "caam-qi", 1783 .cra_blocksize = AES_BLOCK_SIZE, 1784 }, 1785 .setkey = aead_setkey, 1786 .setauthsize = aead_setauthsize, 1787 .encrypt = aead_encrypt, 1788 .decrypt = aead_decrypt, 1789 .ivsize = AES_BLOCK_SIZE, 1790 .maxauthsize = SHA384_DIGEST_SIZE, 1791 }, 1792 .caam = { 1793 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1794 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1795 OP_ALG_AAI_HMAC_PRECOMP, 1796 .geniv = true, 1797 } 1798 }, 1799 { 1800 .aead = { 1801 .base = { 1802 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1803 .cra_driver_name = "authenc-hmac-sha512-" 1804 "cbc-aes-caam-qi", 1805 .cra_blocksize = AES_BLOCK_SIZE, 1806 }, 1807 .setkey = aead_setkey, 1808 .setauthsize = aead_setauthsize, 1809 .encrypt = aead_encrypt, 1810 .decrypt = aead_decrypt, 1811 .ivsize = AES_BLOCK_SIZE, 1812 .maxauthsize = SHA512_DIGEST_SIZE, 1813 }, 1814 .caam = { 1815 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1816 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1817 OP_ALG_AAI_HMAC_PRECOMP, 1818 } 1819 }, 1820 { 1821 .aead = { 1822 .base = { 1823 .cra_name = "echainiv(authenc(hmac(sha512)," 1824 "cbc(aes)))", 1825 .cra_driver_name = "echainiv-authenc-" 1826 "hmac-sha512-cbc-aes-" 1827 "caam-qi", 1828 .cra_blocksize = AES_BLOCK_SIZE, 1829 }, 1830 .setkey = aead_setkey, 1831 .setauthsize = aead_setauthsize, 1832 .encrypt = aead_encrypt, 1833 .decrypt = aead_decrypt, 1834 .ivsize = AES_BLOCK_SIZE, 1835 .maxauthsize = SHA512_DIGEST_SIZE, 1836 }, 1837 .caam = { 1838 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1839 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1840 OP_ALG_AAI_HMAC_PRECOMP, 1841 .geniv = true, 1842 } 1843 }, 1844 { 1845 .aead = { 1846 .base = { 1847 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1848 .cra_driver_name = "authenc-hmac-md5-" 1849 "cbc-des3_ede-caam-qi", 1850 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1851 }, 1852 .setkey = des3_aead_setkey, 1853 .setauthsize = aead_setauthsize, 1854 .encrypt = aead_encrypt, 1855 .decrypt = aead_decrypt, 1856 .ivsize = DES3_EDE_BLOCK_SIZE, 1857 .maxauthsize = MD5_DIGEST_SIZE, 1858 }, 1859 .caam = { 1860 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1861 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1862 OP_ALG_AAI_HMAC_PRECOMP, 1863 } 1864 }, 1865 { 1866 .aead = { 1867 .base = { 1868 .cra_name = "echainiv(authenc(hmac(md5)," 1869 "cbc(des3_ede)))", 1870 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1871 "cbc-des3_ede-caam-qi", 1872 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1873 }, 1874 .setkey = des3_aead_setkey, 1875 .setauthsize = aead_setauthsize, 1876 .encrypt = aead_encrypt, 1877 .decrypt = aead_decrypt, 1878 .ivsize = DES3_EDE_BLOCK_SIZE, 1879 .maxauthsize = MD5_DIGEST_SIZE, 1880 }, 1881 .caam = { 1882 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1883 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1884 OP_ALG_AAI_HMAC_PRECOMP, 1885 .geniv = true, 1886 } 1887 }, 1888 { 1889 .aead = { 1890 .base = { 1891 .cra_name = "authenc(hmac(sha1)," 1892 "cbc(des3_ede))", 1893 .cra_driver_name = "authenc-hmac-sha1-" 1894 "cbc-des3_ede-caam-qi", 1895 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1896 }, 1897 .setkey = des3_aead_setkey, 1898 .setauthsize = aead_setauthsize, 1899 .encrypt = aead_encrypt, 1900 .decrypt = aead_decrypt, 1901 .ivsize = DES3_EDE_BLOCK_SIZE, 1902 .maxauthsize = SHA1_DIGEST_SIZE, 1903 }, 1904 .caam = { 1905 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1906 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1907 OP_ALG_AAI_HMAC_PRECOMP, 1908 }, 1909 }, 1910 { 1911 .aead = { 1912 .base = { 1913 .cra_name = "echainiv(authenc(hmac(sha1)," 1914 "cbc(des3_ede)))", 1915 .cra_driver_name = "echainiv-authenc-" 1916 "hmac-sha1-" 1917 "cbc-des3_ede-caam-qi", 1918 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1919 }, 1920 .setkey = des3_aead_setkey, 1921 .setauthsize = aead_setauthsize, 1922 .encrypt = aead_encrypt, 1923 .decrypt = aead_decrypt, 1924 .ivsize = DES3_EDE_BLOCK_SIZE, 1925 .maxauthsize = SHA1_DIGEST_SIZE, 1926 }, 1927 .caam = { 1928 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1929 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1930 OP_ALG_AAI_HMAC_PRECOMP, 1931 .geniv = true, 1932 } 1933 }, 1934 { 1935 .aead = { 1936 .base = { 1937 .cra_name = "authenc(hmac(sha224)," 1938 "cbc(des3_ede))", 1939 .cra_driver_name = "authenc-hmac-sha224-" 1940 "cbc-des3_ede-caam-qi", 1941 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1942 }, 1943 .setkey = des3_aead_setkey, 1944 .setauthsize = aead_setauthsize, 1945 .encrypt = aead_encrypt, 1946 .decrypt = aead_decrypt, 1947 .ivsize = DES3_EDE_BLOCK_SIZE, 1948 .maxauthsize = SHA224_DIGEST_SIZE, 1949 }, 1950 .caam = { 1951 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1952 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1953 OP_ALG_AAI_HMAC_PRECOMP, 1954 }, 1955 }, 1956 { 1957 .aead = { 1958 .base = { 1959 .cra_name = "echainiv(authenc(hmac(sha224)," 1960 "cbc(des3_ede)))", 1961 .cra_driver_name = "echainiv-authenc-" 1962 "hmac-sha224-" 1963 "cbc-des3_ede-caam-qi", 1964 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1965 }, 1966 .setkey = des3_aead_setkey, 1967 .setauthsize = aead_setauthsize, 1968 .encrypt = aead_encrypt, 1969 .decrypt = aead_decrypt, 1970 .ivsize = DES3_EDE_BLOCK_SIZE, 1971 .maxauthsize = SHA224_DIGEST_SIZE, 1972 }, 1973 .caam = { 1974 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1975 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1976 OP_ALG_AAI_HMAC_PRECOMP, 1977 .geniv = true, 1978 } 1979 }, 1980 { 1981 .aead = { 1982 .base = { 1983 .cra_name = "authenc(hmac(sha256)," 1984 "cbc(des3_ede))", 1985 .cra_driver_name = "authenc-hmac-sha256-" 1986 "cbc-des3_ede-caam-qi", 1987 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1988 }, 1989 .setkey = des3_aead_setkey, 1990 .setauthsize = aead_setauthsize, 1991 .encrypt = aead_encrypt, 1992 .decrypt = aead_decrypt, 1993 .ivsize = DES3_EDE_BLOCK_SIZE, 1994 .maxauthsize = SHA256_DIGEST_SIZE, 1995 }, 1996 .caam = { 1997 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1998 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1999 OP_ALG_AAI_HMAC_PRECOMP, 2000 }, 2001 }, 2002 { 2003 .aead = { 2004 .base = { 2005 .cra_name = "echainiv(authenc(hmac(sha256)," 2006 "cbc(des3_ede)))", 2007 .cra_driver_name = "echainiv-authenc-" 2008 "hmac-sha256-" 2009 "cbc-des3_ede-caam-qi", 2010 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2011 }, 2012 .setkey = des3_aead_setkey, 2013 .setauthsize = aead_setauthsize, 2014 .encrypt = aead_encrypt, 2015 .decrypt = aead_decrypt, 2016 .ivsize = DES3_EDE_BLOCK_SIZE, 2017 .maxauthsize = SHA256_DIGEST_SIZE, 2018 }, 2019 .caam = { 2020 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2021 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2022 OP_ALG_AAI_HMAC_PRECOMP, 2023 .geniv = true, 2024 } 2025 }, 2026 { 2027 .aead = { 2028 .base = { 2029 .cra_name = "authenc(hmac(sha384)," 2030 "cbc(des3_ede))", 2031 .cra_driver_name = "authenc-hmac-sha384-" 2032 "cbc-des3_ede-caam-qi", 2033 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2034 }, 2035 .setkey = des3_aead_setkey, 2036 .setauthsize = aead_setauthsize, 2037 .encrypt = aead_encrypt, 2038 .decrypt = aead_decrypt, 2039 .ivsize = DES3_EDE_BLOCK_SIZE, 2040 .maxauthsize = SHA384_DIGEST_SIZE, 2041 }, 2042 .caam = { 2043 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2044 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2045 OP_ALG_AAI_HMAC_PRECOMP, 2046 }, 2047 }, 2048 { 2049 .aead = { 2050 .base = { 2051 .cra_name = "echainiv(authenc(hmac(sha384)," 2052 "cbc(des3_ede)))", 2053 .cra_driver_name = "echainiv-authenc-" 2054 "hmac-sha384-" 2055 "cbc-des3_ede-caam-qi", 2056 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2057 }, 2058 .setkey = des3_aead_setkey, 2059 .setauthsize = aead_setauthsize, 2060 .encrypt = aead_encrypt, 2061 .decrypt = aead_decrypt, 2062 .ivsize = DES3_EDE_BLOCK_SIZE, 2063 .maxauthsize = SHA384_DIGEST_SIZE, 2064 }, 2065 .caam = { 2066 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2067 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2068 OP_ALG_AAI_HMAC_PRECOMP, 2069 .geniv = true, 2070 } 2071 }, 2072 { 2073 .aead = { 2074 .base = { 2075 .cra_name = "authenc(hmac(sha512)," 2076 "cbc(des3_ede))", 2077 .cra_driver_name = "authenc-hmac-sha512-" 2078 "cbc-des3_ede-caam-qi", 2079 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2080 }, 2081 .setkey = des3_aead_setkey, 2082 .setauthsize = aead_setauthsize, 2083 .encrypt = aead_encrypt, 2084 .decrypt = aead_decrypt, 2085 .ivsize = DES3_EDE_BLOCK_SIZE, 2086 .maxauthsize = SHA512_DIGEST_SIZE, 2087 }, 2088 .caam = { 2089 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2090 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2091 OP_ALG_AAI_HMAC_PRECOMP, 2092 }, 2093 }, 2094 { 2095 .aead = { 2096 .base = { 2097 .cra_name = "echainiv(authenc(hmac(sha512)," 2098 "cbc(des3_ede)))", 2099 .cra_driver_name = "echainiv-authenc-" 2100 "hmac-sha512-" 2101 "cbc-des3_ede-caam-qi", 2102 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2103 }, 2104 .setkey = des3_aead_setkey, 2105 .setauthsize = aead_setauthsize, 2106 .encrypt = aead_encrypt, 2107 .decrypt = aead_decrypt, 2108 .ivsize = DES3_EDE_BLOCK_SIZE, 2109 .maxauthsize = SHA512_DIGEST_SIZE, 2110 }, 2111 .caam = { 2112 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2113 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2114 OP_ALG_AAI_HMAC_PRECOMP, 2115 .geniv = true, 2116 } 2117 }, 2118 { 2119 .aead = { 2120 .base = { 2121 .cra_name = "authenc(hmac(md5),cbc(des))", 2122 .cra_driver_name = "authenc-hmac-md5-" 2123 "cbc-des-caam-qi", 2124 .cra_blocksize = DES_BLOCK_SIZE, 2125 }, 2126 .setkey = aead_setkey, 2127 .setauthsize = aead_setauthsize, 2128 .encrypt = aead_encrypt, 2129 .decrypt = aead_decrypt, 2130 .ivsize = DES_BLOCK_SIZE, 2131 .maxauthsize = MD5_DIGEST_SIZE, 2132 }, 2133 .caam = { 2134 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2135 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2136 OP_ALG_AAI_HMAC_PRECOMP, 2137 }, 2138 }, 2139 { 2140 .aead = { 2141 .base = { 2142 .cra_name = "echainiv(authenc(hmac(md5)," 2143 "cbc(des)))", 2144 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2145 "cbc-des-caam-qi", 2146 .cra_blocksize = DES_BLOCK_SIZE, 2147 }, 2148 .setkey = aead_setkey, 2149 .setauthsize = aead_setauthsize, 2150 .encrypt = aead_encrypt, 2151 .decrypt = aead_decrypt, 2152 .ivsize = DES_BLOCK_SIZE, 2153 .maxauthsize = MD5_DIGEST_SIZE, 2154 }, 2155 .caam = { 2156 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2157 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2158 OP_ALG_AAI_HMAC_PRECOMP, 2159 .geniv = true, 2160 } 2161 }, 2162 { 2163 .aead = { 2164 .base = { 2165 .cra_name = "authenc(hmac(sha1),cbc(des))", 2166 .cra_driver_name = "authenc-hmac-sha1-" 2167 "cbc-des-caam-qi", 2168 .cra_blocksize = DES_BLOCK_SIZE, 2169 }, 2170 .setkey = aead_setkey, 2171 .setauthsize = aead_setauthsize, 2172 .encrypt = aead_encrypt, 2173 .decrypt = aead_decrypt, 2174 .ivsize = DES_BLOCK_SIZE, 2175 .maxauthsize = SHA1_DIGEST_SIZE, 2176 }, 2177 .caam = { 2178 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2179 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2180 OP_ALG_AAI_HMAC_PRECOMP, 2181 }, 2182 }, 2183 { 2184 .aead = { 2185 .base = { 2186 .cra_name = "echainiv(authenc(hmac(sha1)," 2187 "cbc(des)))", 2188 .cra_driver_name = "echainiv-authenc-" 2189 "hmac-sha1-cbc-des-caam-qi", 2190 .cra_blocksize = DES_BLOCK_SIZE, 2191 }, 2192 .setkey = aead_setkey, 2193 .setauthsize = aead_setauthsize, 2194 .encrypt = aead_encrypt, 2195 .decrypt = aead_decrypt, 2196 .ivsize = DES_BLOCK_SIZE, 2197 .maxauthsize = SHA1_DIGEST_SIZE, 2198 }, 2199 .caam = { 2200 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2201 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2202 OP_ALG_AAI_HMAC_PRECOMP, 2203 .geniv = true, 2204 } 2205 }, 2206 { 2207 .aead = { 2208 .base = { 2209 .cra_name = "authenc(hmac(sha224),cbc(des))", 2210 .cra_driver_name = "authenc-hmac-sha224-" 2211 "cbc-des-caam-qi", 2212 .cra_blocksize = DES_BLOCK_SIZE, 2213 }, 2214 .setkey = aead_setkey, 2215 .setauthsize = aead_setauthsize, 2216 .encrypt = aead_encrypt, 2217 .decrypt = aead_decrypt, 2218 .ivsize = DES_BLOCK_SIZE, 2219 .maxauthsize = SHA224_DIGEST_SIZE, 2220 }, 2221 .caam = { 2222 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2223 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2224 OP_ALG_AAI_HMAC_PRECOMP, 2225 }, 2226 }, 2227 { 2228 .aead = { 2229 .base = { 2230 .cra_name = "echainiv(authenc(hmac(sha224)," 2231 "cbc(des)))", 2232 .cra_driver_name = "echainiv-authenc-" 2233 "hmac-sha224-cbc-des-" 2234 "caam-qi", 2235 .cra_blocksize = DES_BLOCK_SIZE, 2236 }, 2237 .setkey = aead_setkey, 2238 .setauthsize = aead_setauthsize, 2239 .encrypt = aead_encrypt, 2240 .decrypt = aead_decrypt, 2241 .ivsize = DES_BLOCK_SIZE, 2242 .maxauthsize = SHA224_DIGEST_SIZE, 2243 }, 2244 .caam = { 2245 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2246 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2247 OP_ALG_AAI_HMAC_PRECOMP, 2248 .geniv = true, 2249 } 2250 }, 2251 { 2252 .aead = { 2253 .base = { 2254 .cra_name = "authenc(hmac(sha256),cbc(des))", 2255 .cra_driver_name = "authenc-hmac-sha256-" 2256 "cbc-des-caam-qi", 2257 .cra_blocksize = DES_BLOCK_SIZE, 2258 }, 2259 .setkey = aead_setkey, 2260 .setauthsize = aead_setauthsize, 2261 .encrypt = aead_encrypt, 2262 .decrypt = aead_decrypt, 2263 .ivsize = DES_BLOCK_SIZE, 2264 .maxauthsize = SHA256_DIGEST_SIZE, 2265 }, 2266 .caam = { 2267 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2268 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2269 OP_ALG_AAI_HMAC_PRECOMP, 2270 }, 2271 }, 2272 { 2273 .aead = { 2274 .base = { 2275 .cra_name = "echainiv(authenc(hmac(sha256)," 2276 "cbc(des)))", 2277 .cra_driver_name = "echainiv-authenc-" 2278 "hmac-sha256-cbc-des-" 2279 "caam-qi", 2280 .cra_blocksize = DES_BLOCK_SIZE, 2281 }, 2282 .setkey = aead_setkey, 2283 .setauthsize = aead_setauthsize, 2284 .encrypt = aead_encrypt, 2285 .decrypt = aead_decrypt, 2286 .ivsize = DES_BLOCK_SIZE, 2287 .maxauthsize = SHA256_DIGEST_SIZE, 2288 }, 2289 .caam = { 2290 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2291 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2292 OP_ALG_AAI_HMAC_PRECOMP, 2293 .geniv = true, 2294 }, 2295 }, 2296 { 2297 .aead = { 2298 .base = { 2299 .cra_name = "authenc(hmac(sha384),cbc(des))", 2300 .cra_driver_name = "authenc-hmac-sha384-" 2301 "cbc-des-caam-qi", 2302 .cra_blocksize = DES_BLOCK_SIZE, 2303 }, 2304 .setkey = aead_setkey, 2305 .setauthsize = aead_setauthsize, 2306 .encrypt = aead_encrypt, 2307 .decrypt = aead_decrypt, 2308 .ivsize = DES_BLOCK_SIZE, 2309 .maxauthsize = SHA384_DIGEST_SIZE, 2310 }, 2311 .caam = { 2312 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2313 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2314 OP_ALG_AAI_HMAC_PRECOMP, 2315 }, 2316 }, 2317 { 2318 .aead = { 2319 .base = { 2320 .cra_name = "echainiv(authenc(hmac(sha384)," 2321 "cbc(des)))", 2322 .cra_driver_name = "echainiv-authenc-" 2323 "hmac-sha384-cbc-des-" 2324 "caam-qi", 2325 .cra_blocksize = DES_BLOCK_SIZE, 2326 }, 2327 .setkey = aead_setkey, 2328 .setauthsize = aead_setauthsize, 2329 .encrypt = aead_encrypt, 2330 .decrypt = aead_decrypt, 2331 .ivsize = DES_BLOCK_SIZE, 2332 .maxauthsize = SHA384_DIGEST_SIZE, 2333 }, 2334 .caam = { 2335 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2336 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2337 OP_ALG_AAI_HMAC_PRECOMP, 2338 .geniv = true, 2339 } 2340 }, 2341 { 2342 .aead = { 2343 .base = { 2344 .cra_name = "authenc(hmac(sha512),cbc(des))", 2345 .cra_driver_name = "authenc-hmac-sha512-" 2346 "cbc-des-caam-qi", 2347 .cra_blocksize = DES_BLOCK_SIZE, 2348 }, 2349 .setkey = aead_setkey, 2350 .setauthsize = aead_setauthsize, 2351 .encrypt = aead_encrypt, 2352 .decrypt = aead_decrypt, 2353 .ivsize = DES_BLOCK_SIZE, 2354 .maxauthsize = SHA512_DIGEST_SIZE, 2355 }, 2356 .caam = { 2357 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2358 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2359 OP_ALG_AAI_HMAC_PRECOMP, 2360 } 2361 }, 2362 { 2363 .aead = { 2364 .base = { 2365 .cra_name = "echainiv(authenc(hmac(sha512)," 2366 "cbc(des)))", 2367 .cra_driver_name = "echainiv-authenc-" 2368 "hmac-sha512-cbc-des-" 2369 "caam-qi", 2370 .cra_blocksize = DES_BLOCK_SIZE, 2371 }, 2372 .setkey = aead_setkey, 2373 .setauthsize = aead_setauthsize, 2374 .encrypt = aead_encrypt, 2375 .decrypt = aead_decrypt, 2376 .ivsize = DES_BLOCK_SIZE, 2377 .maxauthsize = SHA512_DIGEST_SIZE, 2378 }, 2379 .caam = { 2380 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2381 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2382 OP_ALG_AAI_HMAC_PRECOMP, 2383 .geniv = true, 2384 } 2385 }, 2386 }; 2387 2388 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2389 bool uses_dkp) 2390 { 2391 struct caam_drv_private *priv; 2392 struct device *dev; 2393 2394 /* 2395 * distribute tfms across job rings to ensure in-order 2396 * crypto request processing per tfm 2397 */ 2398 ctx->jrdev = caam_jr_alloc(); 2399 if (IS_ERR(ctx->jrdev)) { 2400 pr_err("Job Ring Device allocation for transform failed\n"); 2401 return PTR_ERR(ctx->jrdev); 2402 } 2403 2404 dev = ctx->jrdev->parent; 2405 priv = dev_get_drvdata(dev); 2406 if (priv->era >= 6 && uses_dkp) 2407 ctx->dir = DMA_BIDIRECTIONAL; 2408 else 2409 ctx->dir = DMA_TO_DEVICE; 2410 2411 ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), 2412 ctx->dir); 2413 if (dma_mapping_error(dev, ctx->key_dma)) { 2414 dev_err(dev, "unable to map key\n"); 2415 caam_jr_free(ctx->jrdev); 2416 return -ENOMEM; 2417 } 2418 2419 /* copy descriptor header template value */ 2420 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2421 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2422 2423 ctx->qidev = dev; 2424 2425 spin_lock_init(&ctx->lock); 2426 ctx->drv_ctx[ENCRYPT] = NULL; 2427 ctx->drv_ctx[DECRYPT] = NULL; 2428 2429 return 0; 2430 } 2431 2432 static int caam_cra_init(struct crypto_skcipher *tfm) 2433 { 2434 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 2435 struct caam_skcipher_alg *caam_alg = 2436 container_of(alg, typeof(*caam_alg), skcipher); 2437 2438 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, 2439 false); 2440 } 2441 2442 static int caam_aead_init(struct crypto_aead *tfm) 2443 { 2444 struct aead_alg *alg = crypto_aead_alg(tfm); 2445 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2446 aead); 2447 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2448 2449 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); 2450 } 2451 2452 static void caam_exit_common(struct caam_ctx *ctx) 2453 { 2454 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2455 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2456 2457 dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), 2458 ctx->dir); 2459 2460 caam_jr_free(ctx->jrdev); 2461 } 2462 2463 static void caam_cra_exit(struct crypto_skcipher *tfm) 2464 { 2465 caam_exit_common(crypto_skcipher_ctx(tfm)); 2466 } 2467 2468 static void caam_aead_exit(struct crypto_aead *tfm) 2469 { 2470 caam_exit_common(crypto_aead_ctx(tfm)); 2471 } 2472 2473 void caam_qi_algapi_exit(void) 2474 { 2475 int i; 2476 2477 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2478 struct caam_aead_alg *t_alg = driver_aeads + i; 2479 2480 if (t_alg->registered) 2481 crypto_unregister_aead(&t_alg->aead); 2482 } 2483 2484 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2485 struct caam_skcipher_alg *t_alg = driver_algs + i; 2486 2487 if (t_alg->registered) 2488 crypto_unregister_skcipher(&t_alg->skcipher); 2489 } 2490 } 2491 2492 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) 2493 { 2494 struct skcipher_alg *alg = &t_alg->skcipher; 2495 2496 alg->base.cra_module = THIS_MODULE; 2497 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2498 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2499 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2500 2501 alg->init = caam_cra_init; 2502 alg->exit = caam_cra_exit; 2503 } 2504 2505 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2506 { 2507 struct aead_alg *alg = &t_alg->aead; 2508 2509 alg->base.cra_module = THIS_MODULE; 2510 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2511 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2512 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2513 2514 alg->init = caam_aead_init; 2515 alg->exit = caam_aead_exit; 2516 } 2517 2518 int caam_qi_algapi_init(struct device *ctrldev) 2519 { 2520 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 2521 int i = 0, err = 0; 2522 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2523 unsigned int md_limit = SHA512_DIGEST_SIZE; 2524 bool registered = false; 2525 2526 if (caam_dpaa2) { 2527 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); 2528 return -ENODEV; 2529 } 2530 2531 /* 2532 * Register crypto algorithms the device supports. 2533 * First, detect presence and attributes of DES, AES, and MD blocks. 2534 */ 2535 if (priv->era < 10) { 2536 u32 cha_vid, cha_inst; 2537 2538 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2539 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 2540 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2541 2542 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2543 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> 2544 CHA_ID_LS_DES_SHIFT; 2545 aes_inst = cha_inst & CHA_ID_LS_AES_MASK; 2546 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2547 } else { 2548 u32 aesa, mdha; 2549 2550 aesa = rd_reg32(&priv->ctrl->vreg.aesa); 2551 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2552 2553 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2554 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2555 2556 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; 2557 aes_inst = aesa & CHA_VER_NUM_MASK; 2558 md_inst = mdha & CHA_VER_NUM_MASK; 2559 } 2560 2561 /* If MD is present, limit digest size based on LP256 */ 2562 if (md_inst && md_vid == CHA_VER_VID_MD_LP256) 2563 md_limit = SHA256_DIGEST_SIZE; 2564 2565 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2566 struct caam_skcipher_alg *t_alg = driver_algs + i; 2567 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; 2568 2569 /* Skip DES algorithms if not supported by device */ 2570 if (!des_inst && 2571 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2572 (alg_sel == OP_ALG_ALGSEL_DES))) 2573 continue; 2574 2575 /* Skip AES algorithms if not supported by device */ 2576 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2577 continue; 2578 2579 caam_skcipher_alg_init(t_alg); 2580 2581 err = crypto_register_skcipher(&t_alg->skcipher); 2582 if (err) { 2583 dev_warn(ctrldev, "%s alg registration failed\n", 2584 t_alg->skcipher.base.cra_driver_name); 2585 continue; 2586 } 2587 2588 t_alg->registered = true; 2589 registered = true; 2590 } 2591 2592 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2593 struct caam_aead_alg *t_alg = driver_aeads + i; 2594 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2595 OP_ALG_ALGSEL_MASK; 2596 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2597 OP_ALG_ALGSEL_MASK; 2598 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2599 2600 /* Skip DES algorithms if not supported by device */ 2601 if (!des_inst && 2602 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2603 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2604 continue; 2605 2606 /* Skip AES algorithms if not supported by device */ 2607 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2608 continue; 2609 2610 /* 2611 * Check support for AES algorithms not available 2612 * on LP devices. 2613 */ 2614 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) 2615 continue; 2616 2617 /* 2618 * Skip algorithms requiring message digests 2619 * if MD or MD size is not supported by device. 2620 */ 2621 if (c2_alg_sel && 2622 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2623 continue; 2624 2625 caam_aead_alg_init(t_alg); 2626 2627 err = crypto_register_aead(&t_alg->aead); 2628 if (err) { 2629 pr_warn("%s alg registration failed\n", 2630 t_alg->aead.base.cra_driver_name); 2631 continue; 2632 } 2633 2634 t_alg->registered = true; 2635 registered = true; 2636 } 2637 2638 if (registered) 2639 dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); 2640 2641 return err; 2642 } 2643