1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Freescale FSL CAAM support for crypto API over QI backend. 4 * Based on caamalg.c 5 * 6 * Copyright 2013-2016 Freescale Semiconductor, Inc. 7 * Copyright 2016-2018 NXP 8 */ 9 10 #include "compat.h" 11 #include "ctrl.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "desc_constr.h" 15 #include "error.h" 16 #include "sg_sw_qm.h" 17 #include "key_gen.h" 18 #include "qi.h" 19 #include "jr.h" 20 #include "caamalg_desc.h" 21 22 /* 23 * crypto alg 24 */ 25 #define CAAM_CRA_PRIORITY 2000 26 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 27 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 28 SHA512_DIGEST_SIZE * 2) 29 30 #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ 31 CAAM_MAX_KEY_SIZE) 32 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 33 34 struct caam_alg_entry { 35 int class1_alg_type; 36 int class2_alg_type; 37 bool rfc3686; 38 bool geniv; 39 bool nodkp; 40 }; 41 42 struct caam_aead_alg { 43 struct aead_alg aead; 44 struct caam_alg_entry caam; 45 bool registered; 46 }; 47 48 struct caam_skcipher_alg { 49 struct skcipher_alg skcipher; 50 struct caam_alg_entry caam; 51 bool registered; 52 }; 53 54 /* 55 * per-session context 56 */ 57 struct caam_ctx { 58 struct device *jrdev; 59 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 60 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 61 u8 key[CAAM_MAX_KEY_SIZE]; 62 dma_addr_t key_dma; 63 enum dma_data_direction dir; 64 struct alginfo adata; 65 struct alginfo cdata; 66 unsigned int authsize; 67 struct device *qidev; 68 spinlock_t lock; /* Protects multiple init of driver context */ 69 struct caam_drv_ctx *drv_ctx[NUM_OP]; 70 }; 71 72 static int aead_set_sh_desc(struct crypto_aead *aead) 73 { 74 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 75 typeof(*alg), aead); 76 struct caam_ctx *ctx = crypto_aead_ctx(aead); 77 unsigned int ivsize = crypto_aead_ivsize(aead); 78 u32 ctx1_iv_off = 0; 79 u32 *nonce = NULL; 80 unsigned int data_len[2]; 81 u32 inl_mask; 82 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 83 OP_ALG_AAI_CTR_MOD128); 84 const bool is_rfc3686 = alg->caam.rfc3686; 85 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 86 87 if (!ctx->cdata.keylen || !ctx->authsize) 88 return 0; 89 90 /* 91 * AES-CTR needs to load IV in CONTEXT1 reg 92 * at an offset of 128bits (16bytes) 93 * CONTEXT1[255:128] = IV 94 */ 95 if (ctr_mode) 96 ctx1_iv_off = 16; 97 98 /* 99 * RFC3686 specific: 100 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 101 */ 102 if (is_rfc3686) { 103 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 104 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 105 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 106 } 107 108 data_len[0] = ctx->adata.keylen_pad; 109 data_len[1] = ctx->cdata.keylen; 110 111 if (alg->caam.geniv) 112 goto skip_enc; 113 114 /* aead_encrypt shared descriptor */ 115 if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + 116 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 117 DESC_JOB_IO_LEN, data_len, &inl_mask, 118 ARRAY_SIZE(data_len)) < 0) 119 return -EINVAL; 120 121 if (inl_mask & 1) 122 ctx->adata.key_virt = ctx->key; 123 else 124 ctx->adata.key_dma = ctx->key_dma; 125 126 if (inl_mask & 2) 127 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 128 else 129 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 130 131 ctx->adata.key_inline = !!(inl_mask & 1); 132 ctx->cdata.key_inline = !!(inl_mask & 2); 133 134 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 135 ivsize, ctx->authsize, is_rfc3686, nonce, 136 ctx1_iv_off, true, ctrlpriv->era); 137 138 skip_enc: 139 /* aead_decrypt shared descriptor */ 140 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + 141 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 142 DESC_JOB_IO_LEN, data_len, &inl_mask, 143 ARRAY_SIZE(data_len)) < 0) 144 return -EINVAL; 145 146 if (inl_mask & 1) 147 ctx->adata.key_virt = ctx->key; 148 else 149 ctx->adata.key_dma = ctx->key_dma; 150 151 if (inl_mask & 2) 152 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 153 else 154 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 155 156 ctx->adata.key_inline = !!(inl_mask & 1); 157 ctx->cdata.key_inline = !!(inl_mask & 2); 158 159 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 160 ivsize, ctx->authsize, alg->caam.geniv, 161 is_rfc3686, nonce, ctx1_iv_off, true, 162 ctrlpriv->era); 163 164 if (!alg->caam.geniv) 165 goto skip_givenc; 166 167 /* aead_givencrypt shared descriptor */ 168 if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + 169 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 170 DESC_JOB_IO_LEN, data_len, &inl_mask, 171 ARRAY_SIZE(data_len)) < 0) 172 return -EINVAL; 173 174 if (inl_mask & 1) 175 ctx->adata.key_virt = ctx->key; 176 else 177 ctx->adata.key_dma = ctx->key_dma; 178 179 if (inl_mask & 2) 180 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 181 else 182 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 183 184 ctx->adata.key_inline = !!(inl_mask & 1); 185 ctx->cdata.key_inline = !!(inl_mask & 2); 186 187 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 188 ivsize, ctx->authsize, is_rfc3686, nonce, 189 ctx1_iv_off, true, ctrlpriv->era); 190 191 skip_givenc: 192 return 0; 193 } 194 195 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 196 { 197 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 198 199 ctx->authsize = authsize; 200 aead_set_sh_desc(authenc); 201 202 return 0; 203 } 204 205 static int aead_setkey(struct crypto_aead *aead, const u8 *key, 206 unsigned int keylen) 207 { 208 struct caam_ctx *ctx = crypto_aead_ctx(aead); 209 struct device *jrdev = ctx->jrdev; 210 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 211 struct crypto_authenc_keys keys; 212 int ret = 0; 213 214 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 215 goto badkey; 216 217 #ifdef DEBUG 218 dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", 219 keys.authkeylen + keys.enckeylen, keys.enckeylen, 220 keys.authkeylen); 221 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 222 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 223 #endif 224 225 /* 226 * If DKP is supported, use it in the shared descriptor to generate 227 * the split key. 228 */ 229 if (ctrlpriv->era >= 6) { 230 ctx->adata.keylen = keys.authkeylen; 231 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 232 OP_ALG_ALGSEL_MASK); 233 234 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 235 goto badkey; 236 237 memcpy(ctx->key, keys.authkey, keys.authkeylen); 238 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 239 keys.enckeylen); 240 dma_sync_single_for_device(jrdev, ctx->key_dma, 241 ctx->adata.keylen_pad + 242 keys.enckeylen, ctx->dir); 243 goto skip_split_key; 244 } 245 246 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 247 keys.authkeylen, CAAM_MAX_KEY_SIZE - 248 keys.enckeylen); 249 if (ret) 250 goto badkey; 251 252 /* postpend encryption key to auth split key */ 253 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 254 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 255 keys.enckeylen, ctx->dir); 256 #ifdef DEBUG 257 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 258 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 259 ctx->adata.keylen_pad + keys.enckeylen, 1); 260 #endif 261 262 skip_split_key: 263 ctx->cdata.keylen = keys.enckeylen; 264 265 ret = aead_set_sh_desc(aead); 266 if (ret) 267 goto badkey; 268 269 /* Now update the driver contexts with the new shared descriptor */ 270 if (ctx->drv_ctx[ENCRYPT]) { 271 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 272 ctx->sh_desc_enc); 273 if (ret) { 274 dev_err(jrdev, "driver enc context update failed\n"); 275 goto badkey; 276 } 277 } 278 279 if (ctx->drv_ctx[DECRYPT]) { 280 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 281 ctx->sh_desc_dec); 282 if (ret) { 283 dev_err(jrdev, "driver dec context update failed\n"); 284 goto badkey; 285 } 286 } 287 288 memzero_explicit(&keys, sizeof(keys)); 289 return ret; 290 badkey: 291 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 292 memzero_explicit(&keys, sizeof(keys)); 293 return -EINVAL; 294 } 295 296 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, 297 unsigned int keylen) 298 { 299 struct crypto_authenc_keys keys; 300 u32 flags; 301 int err; 302 303 err = crypto_authenc_extractkeys(&keys, key, keylen); 304 if (unlikely(err)) 305 goto badkey; 306 307 err = -EINVAL; 308 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 309 goto badkey; 310 311 flags = crypto_aead_get_flags(aead); 312 err = __des3_verify_key(&flags, keys.enckey); 313 if (unlikely(err)) { 314 crypto_aead_set_flags(aead, flags); 315 goto out; 316 } 317 318 err = aead_setkey(aead, key, keylen); 319 320 out: 321 memzero_explicit(&keys, sizeof(keys)); 322 return err; 323 324 badkey: 325 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 326 goto out; 327 } 328 329 static int gcm_set_sh_desc(struct crypto_aead *aead) 330 { 331 struct caam_ctx *ctx = crypto_aead_ctx(aead); 332 unsigned int ivsize = crypto_aead_ivsize(aead); 333 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 334 ctx->cdata.keylen; 335 336 if (!ctx->cdata.keylen || !ctx->authsize) 337 return 0; 338 339 /* 340 * Job Descriptor and Shared Descriptor 341 * must fit into the 64-word Descriptor h/w Buffer 342 */ 343 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { 344 ctx->cdata.key_inline = true; 345 ctx->cdata.key_virt = ctx->key; 346 } else { 347 ctx->cdata.key_inline = false; 348 ctx->cdata.key_dma = ctx->key_dma; 349 } 350 351 cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 352 ctx->authsize, true); 353 354 /* 355 * Job Descriptor and Shared Descriptor 356 * must fit into the 64-word Descriptor h/w Buffer 357 */ 358 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { 359 ctx->cdata.key_inline = true; 360 ctx->cdata.key_virt = ctx->key; 361 } else { 362 ctx->cdata.key_inline = false; 363 ctx->cdata.key_dma = ctx->key_dma; 364 } 365 366 cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 367 ctx->authsize, true); 368 369 return 0; 370 } 371 372 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 373 { 374 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 375 376 ctx->authsize = authsize; 377 gcm_set_sh_desc(authenc); 378 379 return 0; 380 } 381 382 static int gcm_setkey(struct crypto_aead *aead, 383 const u8 *key, unsigned int keylen) 384 { 385 struct caam_ctx *ctx = crypto_aead_ctx(aead); 386 struct device *jrdev = ctx->jrdev; 387 int ret; 388 389 #ifdef DEBUG 390 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 391 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 392 #endif 393 394 memcpy(ctx->key, key, keylen); 395 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); 396 ctx->cdata.keylen = keylen; 397 398 ret = gcm_set_sh_desc(aead); 399 if (ret) 400 return ret; 401 402 /* Now update the driver contexts with the new shared descriptor */ 403 if (ctx->drv_ctx[ENCRYPT]) { 404 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 405 ctx->sh_desc_enc); 406 if (ret) { 407 dev_err(jrdev, "driver enc context update failed\n"); 408 return ret; 409 } 410 } 411 412 if (ctx->drv_ctx[DECRYPT]) { 413 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 414 ctx->sh_desc_dec); 415 if (ret) { 416 dev_err(jrdev, "driver dec context update failed\n"); 417 return ret; 418 } 419 } 420 421 return 0; 422 } 423 424 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 425 { 426 struct caam_ctx *ctx = crypto_aead_ctx(aead); 427 unsigned int ivsize = crypto_aead_ivsize(aead); 428 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 429 ctx->cdata.keylen; 430 431 if (!ctx->cdata.keylen || !ctx->authsize) 432 return 0; 433 434 ctx->cdata.key_virt = ctx->key; 435 436 /* 437 * Job Descriptor and Shared Descriptor 438 * must fit into the 64-word Descriptor h/w Buffer 439 */ 440 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { 441 ctx->cdata.key_inline = true; 442 } else { 443 ctx->cdata.key_inline = false; 444 ctx->cdata.key_dma = ctx->key_dma; 445 } 446 447 cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 448 ctx->authsize, true); 449 450 /* 451 * Job Descriptor and Shared Descriptor 452 * must fit into the 64-word Descriptor h/w Buffer 453 */ 454 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { 455 ctx->cdata.key_inline = true; 456 } else { 457 ctx->cdata.key_inline = false; 458 ctx->cdata.key_dma = ctx->key_dma; 459 } 460 461 cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 462 ctx->authsize, true); 463 464 return 0; 465 } 466 467 static int rfc4106_setauthsize(struct crypto_aead *authenc, 468 unsigned int authsize) 469 { 470 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 471 472 ctx->authsize = authsize; 473 rfc4106_set_sh_desc(authenc); 474 475 return 0; 476 } 477 478 static int rfc4106_setkey(struct crypto_aead *aead, 479 const u8 *key, unsigned int keylen) 480 { 481 struct caam_ctx *ctx = crypto_aead_ctx(aead); 482 struct device *jrdev = ctx->jrdev; 483 int ret; 484 485 if (keylen < 4) 486 return -EINVAL; 487 488 #ifdef DEBUG 489 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 490 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 491 #endif 492 493 memcpy(ctx->key, key, keylen); 494 /* 495 * The last four bytes of the key material are used as the salt value 496 * in the nonce. Update the AES key length. 497 */ 498 ctx->cdata.keylen = keylen - 4; 499 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 500 ctx->dir); 501 502 ret = rfc4106_set_sh_desc(aead); 503 if (ret) 504 return ret; 505 506 /* Now update the driver contexts with the new shared descriptor */ 507 if (ctx->drv_ctx[ENCRYPT]) { 508 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 509 ctx->sh_desc_enc); 510 if (ret) { 511 dev_err(jrdev, "driver enc context update failed\n"); 512 return ret; 513 } 514 } 515 516 if (ctx->drv_ctx[DECRYPT]) { 517 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 518 ctx->sh_desc_dec); 519 if (ret) { 520 dev_err(jrdev, "driver dec context update failed\n"); 521 return ret; 522 } 523 } 524 525 return 0; 526 } 527 528 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 529 { 530 struct caam_ctx *ctx = crypto_aead_ctx(aead); 531 unsigned int ivsize = crypto_aead_ivsize(aead); 532 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - 533 ctx->cdata.keylen; 534 535 if (!ctx->cdata.keylen || !ctx->authsize) 536 return 0; 537 538 ctx->cdata.key_virt = ctx->key; 539 540 /* 541 * Job Descriptor and Shared Descriptor 542 * must fit into the 64-word Descriptor h/w Buffer 543 */ 544 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { 545 ctx->cdata.key_inline = true; 546 } else { 547 ctx->cdata.key_inline = false; 548 ctx->cdata.key_dma = ctx->key_dma; 549 } 550 551 cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 552 ctx->authsize, true); 553 554 /* 555 * Job Descriptor and Shared Descriptor 556 * must fit into the 64-word Descriptor h/w Buffer 557 */ 558 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { 559 ctx->cdata.key_inline = true; 560 } else { 561 ctx->cdata.key_inline = false; 562 ctx->cdata.key_dma = ctx->key_dma; 563 } 564 565 cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 566 ctx->authsize, true); 567 568 return 0; 569 } 570 571 static int rfc4543_setauthsize(struct crypto_aead *authenc, 572 unsigned int authsize) 573 { 574 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 575 576 ctx->authsize = authsize; 577 rfc4543_set_sh_desc(authenc); 578 579 return 0; 580 } 581 582 static int rfc4543_setkey(struct crypto_aead *aead, 583 const u8 *key, unsigned int keylen) 584 { 585 struct caam_ctx *ctx = crypto_aead_ctx(aead); 586 struct device *jrdev = ctx->jrdev; 587 int ret; 588 589 if (keylen < 4) 590 return -EINVAL; 591 592 #ifdef DEBUG 593 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 594 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 595 #endif 596 597 memcpy(ctx->key, key, keylen); 598 /* 599 * The last four bytes of the key material are used as the salt value 600 * in the nonce. Update the AES key length. 601 */ 602 ctx->cdata.keylen = keylen - 4; 603 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 604 ctx->dir); 605 606 ret = rfc4543_set_sh_desc(aead); 607 if (ret) 608 return ret; 609 610 /* Now update the driver contexts with the new shared descriptor */ 611 if (ctx->drv_ctx[ENCRYPT]) { 612 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 613 ctx->sh_desc_enc); 614 if (ret) { 615 dev_err(jrdev, "driver enc context update failed\n"); 616 return ret; 617 } 618 } 619 620 if (ctx->drv_ctx[DECRYPT]) { 621 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 622 ctx->sh_desc_dec); 623 if (ret) { 624 dev_err(jrdev, "driver dec context update failed\n"); 625 return ret; 626 } 627 } 628 629 return 0; 630 } 631 632 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 633 unsigned int keylen) 634 { 635 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 636 struct caam_skcipher_alg *alg = 637 container_of(crypto_skcipher_alg(skcipher), typeof(*alg), 638 skcipher); 639 struct device *jrdev = ctx->jrdev; 640 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 641 u32 ctx1_iv_off = 0; 642 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 643 OP_ALG_AAI_CTR_MOD128); 644 const bool is_rfc3686 = alg->caam.rfc3686; 645 int ret = 0; 646 647 #ifdef DEBUG 648 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 649 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 650 #endif 651 /* 652 * AES-CTR needs to load IV in CONTEXT1 reg 653 * at an offset of 128bits (16bytes) 654 * CONTEXT1[255:128] = IV 655 */ 656 if (ctr_mode) 657 ctx1_iv_off = 16; 658 659 /* 660 * RFC3686 specific: 661 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 662 * | *key = {KEY, NONCE} 663 */ 664 if (is_rfc3686) { 665 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 666 keylen -= CTR_RFC3686_NONCE_SIZE; 667 } 668 669 ctx->cdata.keylen = keylen; 670 ctx->cdata.key_virt = key; 671 ctx->cdata.key_inline = true; 672 673 /* skcipher encrypt, decrypt shared descriptors */ 674 cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, 675 is_rfc3686, ctx1_iv_off); 676 cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, 677 is_rfc3686, ctx1_iv_off); 678 679 /* Now update the driver contexts with the new shared descriptor */ 680 if (ctx->drv_ctx[ENCRYPT]) { 681 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 682 ctx->sh_desc_enc); 683 if (ret) { 684 dev_err(jrdev, "driver enc context update failed\n"); 685 goto badkey; 686 } 687 } 688 689 if (ctx->drv_ctx[DECRYPT]) { 690 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 691 ctx->sh_desc_dec); 692 if (ret) { 693 dev_err(jrdev, "driver dec context update failed\n"); 694 goto badkey; 695 } 696 } 697 698 return ret; 699 badkey: 700 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 701 return -EINVAL; 702 } 703 704 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, 705 const u8 *key, unsigned int keylen) 706 { 707 return unlikely(des3_verify_key(skcipher, key)) ?: 708 skcipher_setkey(skcipher, key, keylen); 709 } 710 711 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, 712 unsigned int keylen) 713 { 714 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 715 struct device *jrdev = ctx->jrdev; 716 int ret = 0; 717 718 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 719 dev_err(jrdev, "key size mismatch\n"); 720 goto badkey; 721 } 722 723 ctx->cdata.keylen = keylen; 724 ctx->cdata.key_virt = key; 725 ctx->cdata.key_inline = true; 726 727 /* xts skcipher encrypt, decrypt shared descriptors */ 728 cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); 729 cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); 730 731 /* Now update the driver contexts with the new shared descriptor */ 732 if (ctx->drv_ctx[ENCRYPT]) { 733 ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], 734 ctx->sh_desc_enc); 735 if (ret) { 736 dev_err(jrdev, "driver enc context update failed\n"); 737 goto badkey; 738 } 739 } 740 741 if (ctx->drv_ctx[DECRYPT]) { 742 ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], 743 ctx->sh_desc_dec); 744 if (ret) { 745 dev_err(jrdev, "driver dec context update failed\n"); 746 goto badkey; 747 } 748 } 749 750 return ret; 751 badkey: 752 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 753 return -EINVAL; 754 } 755 756 /* 757 * aead_edesc - s/w-extended aead descriptor 758 * @src_nents: number of segments in input scatterlist 759 * @dst_nents: number of segments in output scatterlist 760 * @iv_dma: dma address of iv for checking continuity and link table 761 * @qm_sg_bytes: length of dma mapped h/w link table 762 * @qm_sg_dma: bus physical mapped address of h/w link table 763 * @assoclen: associated data length, in CAAM endianness 764 * @assoclen_dma: bus physical mapped address of req->assoclen 765 * @drv_req: driver-specific request structure 766 * @sgt: the h/w link table, followed by IV 767 */ 768 struct aead_edesc { 769 int src_nents; 770 int dst_nents; 771 dma_addr_t iv_dma; 772 int qm_sg_bytes; 773 dma_addr_t qm_sg_dma; 774 unsigned int assoclen; 775 dma_addr_t assoclen_dma; 776 struct caam_drv_req drv_req; 777 struct qm_sg_entry sgt[0]; 778 }; 779 780 /* 781 * skcipher_edesc - s/w-extended skcipher descriptor 782 * @src_nents: number of segments in input scatterlist 783 * @dst_nents: number of segments in output scatterlist 784 * @iv_dma: dma address of iv for checking continuity and link table 785 * @qm_sg_bytes: length of dma mapped h/w link table 786 * @qm_sg_dma: bus physical mapped address of h/w link table 787 * @drv_req: driver-specific request structure 788 * @sgt: the h/w link table, followed by IV 789 */ 790 struct skcipher_edesc { 791 int src_nents; 792 int dst_nents; 793 dma_addr_t iv_dma; 794 int qm_sg_bytes; 795 dma_addr_t qm_sg_dma; 796 struct caam_drv_req drv_req; 797 struct qm_sg_entry sgt[0]; 798 }; 799 800 static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, 801 enum optype type) 802 { 803 /* 804 * This function is called on the fast path with values of 'type' 805 * known at compile time. Invalid arguments are not expected and 806 * thus no checks are made. 807 */ 808 struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; 809 u32 *desc; 810 811 if (unlikely(!drv_ctx)) { 812 spin_lock(&ctx->lock); 813 814 /* Read again to check if some other core init drv_ctx */ 815 drv_ctx = ctx->drv_ctx[type]; 816 if (!drv_ctx) { 817 int cpu; 818 819 if (type == ENCRYPT) 820 desc = ctx->sh_desc_enc; 821 else /* (type == DECRYPT) */ 822 desc = ctx->sh_desc_dec; 823 824 cpu = smp_processor_id(); 825 drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); 826 if (!IS_ERR_OR_NULL(drv_ctx)) 827 drv_ctx->op_type = type; 828 829 ctx->drv_ctx[type] = drv_ctx; 830 } 831 832 spin_unlock(&ctx->lock); 833 } 834 835 return drv_ctx; 836 } 837 838 static void caam_unmap(struct device *dev, struct scatterlist *src, 839 struct scatterlist *dst, int src_nents, 840 int dst_nents, dma_addr_t iv_dma, int ivsize, 841 dma_addr_t qm_sg_dma, int qm_sg_bytes) 842 { 843 if (dst != src) { 844 if (src_nents) 845 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 846 if (dst_nents) 847 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 848 } else { 849 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 850 } 851 852 if (iv_dma) 853 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 854 if (qm_sg_bytes) 855 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); 856 } 857 858 static void aead_unmap(struct device *dev, 859 struct aead_edesc *edesc, 860 struct aead_request *req) 861 { 862 struct crypto_aead *aead = crypto_aead_reqtfm(req); 863 int ivsize = crypto_aead_ivsize(aead); 864 865 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 866 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 867 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 868 } 869 870 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, 871 struct skcipher_request *req) 872 { 873 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 874 int ivsize = crypto_skcipher_ivsize(skcipher); 875 876 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 877 edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); 878 } 879 880 static void aead_done(struct caam_drv_req *drv_req, u32 status) 881 { 882 struct device *qidev; 883 struct aead_edesc *edesc; 884 struct aead_request *aead_req = drv_req->app_ctx; 885 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); 886 struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); 887 int ecode = 0; 888 889 qidev = caam_ctx->qidev; 890 891 if (unlikely(status)) { 892 u32 ssrc = status & JRSTA_SSRC_MASK; 893 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; 894 895 caam_jr_strstatus(qidev, status); 896 /* 897 * verify hw auth check passed else return -EBADMSG 898 */ 899 if (ssrc == JRSTA_SSRC_CCB_ERROR && 900 err_id == JRSTA_CCBERR_ERRID_ICVCHK) 901 ecode = -EBADMSG; 902 else 903 ecode = -EIO; 904 } 905 906 edesc = container_of(drv_req, typeof(*edesc), drv_req); 907 aead_unmap(qidev, edesc, aead_req); 908 909 aead_request_complete(aead_req, ecode); 910 qi_cache_free(edesc); 911 } 912 913 /* 914 * allocate and map the aead extended descriptor 915 */ 916 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 917 bool encrypt) 918 { 919 struct crypto_aead *aead = crypto_aead_reqtfm(req); 920 struct caam_ctx *ctx = crypto_aead_ctx(aead); 921 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 922 typeof(*alg), aead); 923 struct device *qidev = ctx->qidev; 924 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 925 GFP_KERNEL : GFP_ATOMIC; 926 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 927 struct aead_edesc *edesc; 928 dma_addr_t qm_sg_dma, iv_dma = 0; 929 int ivsize = 0; 930 unsigned int authsize = ctx->authsize; 931 int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; 932 int in_len, out_len; 933 struct qm_sg_entry *sg_table, *fd_sgt; 934 struct caam_drv_ctx *drv_ctx; 935 936 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 937 if (IS_ERR_OR_NULL(drv_ctx)) 938 return (struct aead_edesc *)drv_ctx; 939 940 /* allocate space for base edesc and hw desc commands, link tables */ 941 edesc = qi_cache_alloc(GFP_DMA | flags); 942 if (unlikely(!edesc)) { 943 dev_err(qidev, "could not allocate extended descriptor\n"); 944 return ERR_PTR(-ENOMEM); 945 } 946 947 if (likely(req->src == req->dst)) { 948 src_nents = sg_nents_for_len(req->src, req->assoclen + 949 req->cryptlen + 950 (encrypt ? authsize : 0)); 951 if (unlikely(src_nents < 0)) { 952 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 953 req->assoclen + req->cryptlen + 954 (encrypt ? authsize : 0)); 955 qi_cache_free(edesc); 956 return ERR_PTR(src_nents); 957 } 958 959 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 960 DMA_BIDIRECTIONAL); 961 if (unlikely(!mapped_src_nents)) { 962 dev_err(qidev, "unable to map source\n"); 963 qi_cache_free(edesc); 964 return ERR_PTR(-ENOMEM); 965 } 966 } else { 967 src_nents = sg_nents_for_len(req->src, req->assoclen + 968 req->cryptlen); 969 if (unlikely(src_nents < 0)) { 970 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 971 req->assoclen + req->cryptlen); 972 qi_cache_free(edesc); 973 return ERR_PTR(src_nents); 974 } 975 976 dst_nents = sg_nents_for_len(req->dst, req->assoclen + 977 req->cryptlen + 978 (encrypt ? authsize : 979 (-authsize))); 980 if (unlikely(dst_nents < 0)) { 981 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 982 req->assoclen + req->cryptlen + 983 (encrypt ? authsize : (-authsize))); 984 qi_cache_free(edesc); 985 return ERR_PTR(dst_nents); 986 } 987 988 if (src_nents) { 989 mapped_src_nents = dma_map_sg(qidev, req->src, 990 src_nents, DMA_TO_DEVICE); 991 if (unlikely(!mapped_src_nents)) { 992 dev_err(qidev, "unable to map source\n"); 993 qi_cache_free(edesc); 994 return ERR_PTR(-ENOMEM); 995 } 996 } else { 997 mapped_src_nents = 0; 998 } 999 1000 if (dst_nents) { 1001 mapped_dst_nents = dma_map_sg(qidev, req->dst, 1002 dst_nents, 1003 DMA_FROM_DEVICE); 1004 if (unlikely(!mapped_dst_nents)) { 1005 dev_err(qidev, "unable to map destination\n"); 1006 dma_unmap_sg(qidev, req->src, src_nents, 1007 DMA_TO_DEVICE); 1008 qi_cache_free(edesc); 1009 return ERR_PTR(-ENOMEM); 1010 } 1011 } else { 1012 mapped_dst_nents = 0; 1013 } 1014 } 1015 1016 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) 1017 ivsize = crypto_aead_ivsize(aead); 1018 1019 /* 1020 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. 1021 * Input is not contiguous. 1022 */ 1023 qm_sg_ents = 1 + !!ivsize + mapped_src_nents + 1024 (mapped_dst_nents > 1 ? mapped_dst_nents : 0); 1025 sg_table = &edesc->sgt[0]; 1026 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); 1027 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > 1028 CAAM_QI_MEMCACHE_SIZE)) { 1029 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1030 qm_sg_ents, ivsize); 1031 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1032 0, 0, 0); 1033 qi_cache_free(edesc); 1034 return ERR_PTR(-ENOMEM); 1035 } 1036 1037 if (ivsize) { 1038 u8 *iv = (u8 *)(sg_table + qm_sg_ents); 1039 1040 /* Make sure IV is located in a DMAable area */ 1041 memcpy(iv, req->iv, ivsize); 1042 1043 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1044 if (dma_mapping_error(qidev, iv_dma)) { 1045 dev_err(qidev, "unable to map IV\n"); 1046 caam_unmap(qidev, req->src, req->dst, src_nents, 1047 dst_nents, 0, 0, 0, 0); 1048 qi_cache_free(edesc); 1049 return ERR_PTR(-ENOMEM); 1050 } 1051 } 1052 1053 edesc->src_nents = src_nents; 1054 edesc->dst_nents = dst_nents; 1055 edesc->iv_dma = iv_dma; 1056 edesc->drv_req.app_ctx = req; 1057 edesc->drv_req.cbk = aead_done; 1058 edesc->drv_req.drv_ctx = drv_ctx; 1059 1060 edesc->assoclen = cpu_to_caam32(req->assoclen); 1061 edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, 1062 DMA_TO_DEVICE); 1063 if (dma_mapping_error(qidev, edesc->assoclen_dma)) { 1064 dev_err(qidev, "unable to map assoclen\n"); 1065 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1066 iv_dma, ivsize, 0, 0); 1067 qi_cache_free(edesc); 1068 return ERR_PTR(-ENOMEM); 1069 } 1070 1071 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); 1072 qm_sg_index++; 1073 if (ivsize) { 1074 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1075 qm_sg_index++; 1076 } 1077 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 1078 qm_sg_index += mapped_src_nents; 1079 1080 if (mapped_dst_nents > 1) 1081 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1082 qm_sg_index, 0); 1083 1084 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1085 if (dma_mapping_error(qidev, qm_sg_dma)) { 1086 dev_err(qidev, "unable to map S/G table\n"); 1087 dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); 1088 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1089 iv_dma, ivsize, 0, 0); 1090 qi_cache_free(edesc); 1091 return ERR_PTR(-ENOMEM); 1092 } 1093 1094 edesc->qm_sg_dma = qm_sg_dma; 1095 edesc->qm_sg_bytes = qm_sg_bytes; 1096 1097 out_len = req->assoclen + req->cryptlen + 1098 (encrypt ? ctx->authsize : (-ctx->authsize)); 1099 in_len = 4 + ivsize + req->assoclen + req->cryptlen; 1100 1101 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1102 dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); 1103 1104 if (req->dst == req->src) { 1105 if (mapped_src_nents == 1) 1106 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), 1107 out_len, 0); 1108 else 1109 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + 1110 (1 + !!ivsize) * sizeof(*sg_table), 1111 out_len, 0); 1112 } else if (mapped_dst_nents == 1) { 1113 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 1114 0); 1115 } else { 1116 dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * 1117 qm_sg_index, out_len, 0); 1118 } 1119 1120 return edesc; 1121 } 1122 1123 static inline int aead_crypt(struct aead_request *req, bool encrypt) 1124 { 1125 struct aead_edesc *edesc; 1126 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1127 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1128 int ret; 1129 1130 if (unlikely(caam_congested)) 1131 return -EAGAIN; 1132 1133 /* allocate extended descriptor */ 1134 edesc = aead_edesc_alloc(req, encrypt); 1135 if (IS_ERR_OR_NULL(edesc)) 1136 return PTR_ERR(edesc); 1137 1138 /* Create and submit job descriptor */ 1139 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1140 if (!ret) { 1141 ret = -EINPROGRESS; 1142 } else { 1143 aead_unmap(ctx->qidev, edesc, req); 1144 qi_cache_free(edesc); 1145 } 1146 1147 return ret; 1148 } 1149 1150 static int aead_encrypt(struct aead_request *req) 1151 { 1152 return aead_crypt(req, true); 1153 } 1154 1155 static int aead_decrypt(struct aead_request *req) 1156 { 1157 return aead_crypt(req, false); 1158 } 1159 1160 static int ipsec_gcm_encrypt(struct aead_request *req) 1161 { 1162 if (req->assoclen < 8) 1163 return -EINVAL; 1164 1165 return aead_crypt(req, true); 1166 } 1167 1168 static int ipsec_gcm_decrypt(struct aead_request *req) 1169 { 1170 if (req->assoclen < 8) 1171 return -EINVAL; 1172 1173 return aead_crypt(req, false); 1174 } 1175 1176 static void skcipher_done(struct caam_drv_req *drv_req, u32 status) 1177 { 1178 struct skcipher_edesc *edesc; 1179 struct skcipher_request *req = drv_req->app_ctx; 1180 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1181 struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher); 1182 struct device *qidev = caam_ctx->qidev; 1183 int ivsize = crypto_skcipher_ivsize(skcipher); 1184 1185 #ifdef DEBUG 1186 dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); 1187 #endif 1188 1189 edesc = container_of(drv_req, typeof(*edesc), drv_req); 1190 1191 if (status) 1192 caam_jr_strstatus(qidev, status); 1193 1194 #ifdef DEBUG 1195 print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ", 1196 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, 1197 edesc->src_nents > 1 ? 100 : ivsize, 1); 1198 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 1199 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1200 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); 1201 #endif 1202 1203 skcipher_unmap(qidev, edesc, req); 1204 1205 /* 1206 * The crypto API expects us to set the IV (req->iv) to the last 1207 * ciphertext block. This is used e.g. by the CTS mode. 1208 */ 1209 if (edesc->drv_req.drv_ctx->op_type == ENCRYPT) 1210 scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - 1211 ivsize, ivsize, 0); 1212 1213 qi_cache_free(edesc); 1214 skcipher_request_complete(req, status); 1215 } 1216 1217 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, 1218 bool encrypt) 1219 { 1220 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1221 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1222 struct device *qidev = ctx->qidev; 1223 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1224 GFP_KERNEL : GFP_ATOMIC; 1225 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1226 struct skcipher_edesc *edesc; 1227 dma_addr_t iv_dma; 1228 u8 *iv; 1229 int ivsize = crypto_skcipher_ivsize(skcipher); 1230 int dst_sg_idx, qm_sg_ents, qm_sg_bytes; 1231 struct qm_sg_entry *sg_table, *fd_sgt; 1232 struct caam_drv_ctx *drv_ctx; 1233 1234 drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); 1235 if (IS_ERR_OR_NULL(drv_ctx)) 1236 return (struct skcipher_edesc *)drv_ctx; 1237 1238 src_nents = sg_nents_for_len(req->src, req->cryptlen); 1239 if (unlikely(src_nents < 0)) { 1240 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 1241 req->cryptlen); 1242 return ERR_PTR(src_nents); 1243 } 1244 1245 if (unlikely(req->src != req->dst)) { 1246 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 1247 if (unlikely(dst_nents < 0)) { 1248 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 1249 req->cryptlen); 1250 return ERR_PTR(dst_nents); 1251 } 1252 1253 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1254 DMA_TO_DEVICE); 1255 if (unlikely(!mapped_src_nents)) { 1256 dev_err(qidev, "unable to map source\n"); 1257 return ERR_PTR(-ENOMEM); 1258 } 1259 1260 mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, 1261 DMA_FROM_DEVICE); 1262 if (unlikely(!mapped_dst_nents)) { 1263 dev_err(qidev, "unable to map destination\n"); 1264 dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); 1265 return ERR_PTR(-ENOMEM); 1266 } 1267 } else { 1268 mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, 1269 DMA_BIDIRECTIONAL); 1270 if (unlikely(!mapped_src_nents)) { 1271 dev_err(qidev, "unable to map source\n"); 1272 return ERR_PTR(-ENOMEM); 1273 } 1274 } 1275 1276 qm_sg_ents = 1 + mapped_src_nents; 1277 dst_sg_idx = qm_sg_ents; 1278 1279 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1280 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); 1281 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + 1282 ivsize > CAAM_QI_MEMCACHE_SIZE)) { 1283 dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", 1284 qm_sg_ents, ivsize); 1285 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1286 0, 0, 0); 1287 return ERR_PTR(-ENOMEM); 1288 } 1289 1290 /* allocate space for base edesc, link tables and IV */ 1291 edesc = qi_cache_alloc(GFP_DMA | flags); 1292 if (unlikely(!edesc)) { 1293 dev_err(qidev, "could not allocate extended descriptor\n"); 1294 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1295 0, 0, 0); 1296 return ERR_PTR(-ENOMEM); 1297 } 1298 1299 /* Make sure IV is located in a DMAable area */ 1300 sg_table = &edesc->sgt[0]; 1301 iv = (u8 *)(sg_table + qm_sg_ents); 1302 memcpy(iv, req->iv, ivsize); 1303 1304 iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); 1305 if (dma_mapping_error(qidev, iv_dma)) { 1306 dev_err(qidev, "unable to map IV\n"); 1307 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 1308 0, 0, 0); 1309 qi_cache_free(edesc); 1310 return ERR_PTR(-ENOMEM); 1311 } 1312 1313 edesc->src_nents = src_nents; 1314 edesc->dst_nents = dst_nents; 1315 edesc->iv_dma = iv_dma; 1316 edesc->qm_sg_bytes = qm_sg_bytes; 1317 edesc->drv_req.app_ctx = req; 1318 edesc->drv_req.cbk = skcipher_done; 1319 edesc->drv_req.drv_ctx = drv_ctx; 1320 1321 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1322 sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1323 1324 if (mapped_dst_nents > 1) 1325 sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1326 dst_sg_idx, 0); 1327 1328 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, 1329 DMA_TO_DEVICE); 1330 if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { 1331 dev_err(qidev, "unable to map S/G table\n"); 1332 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1333 iv_dma, ivsize, 0, 0); 1334 qi_cache_free(edesc); 1335 return ERR_PTR(-ENOMEM); 1336 } 1337 1338 fd_sgt = &edesc->drv_req.fd_sgt[0]; 1339 1340 dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, 1341 ivsize + req->cryptlen, 0); 1342 1343 if (req->src == req->dst) { 1344 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + 1345 sizeof(*sg_table), req->cryptlen, 0); 1346 } else if (mapped_dst_nents > 1) { 1347 dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * 1348 sizeof(*sg_table), req->cryptlen, 0); 1349 } else { 1350 dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), 1351 req->cryptlen, 0); 1352 } 1353 1354 return edesc; 1355 } 1356 1357 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) 1358 { 1359 struct skcipher_edesc *edesc; 1360 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1361 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); 1362 int ivsize = crypto_skcipher_ivsize(skcipher); 1363 int ret; 1364 1365 if (unlikely(caam_congested)) 1366 return -EAGAIN; 1367 1368 /* allocate extended descriptor */ 1369 edesc = skcipher_edesc_alloc(req, encrypt); 1370 if (IS_ERR(edesc)) 1371 return PTR_ERR(edesc); 1372 1373 /* 1374 * The crypto API expects us to set the IV (req->iv) to the last 1375 * ciphertext block. 1376 */ 1377 if (!encrypt) 1378 scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - 1379 ivsize, ivsize, 0); 1380 1381 ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); 1382 if (!ret) { 1383 ret = -EINPROGRESS; 1384 } else { 1385 skcipher_unmap(ctx->qidev, edesc, req); 1386 qi_cache_free(edesc); 1387 } 1388 1389 return ret; 1390 } 1391 1392 static int skcipher_encrypt(struct skcipher_request *req) 1393 { 1394 return skcipher_crypt(req, true); 1395 } 1396 1397 static int skcipher_decrypt(struct skcipher_request *req) 1398 { 1399 return skcipher_crypt(req, false); 1400 } 1401 1402 static struct caam_skcipher_alg driver_algs[] = { 1403 { 1404 .skcipher = { 1405 .base = { 1406 .cra_name = "cbc(aes)", 1407 .cra_driver_name = "cbc-aes-caam-qi", 1408 .cra_blocksize = AES_BLOCK_SIZE, 1409 }, 1410 .setkey = skcipher_setkey, 1411 .encrypt = skcipher_encrypt, 1412 .decrypt = skcipher_decrypt, 1413 .min_keysize = AES_MIN_KEY_SIZE, 1414 .max_keysize = AES_MAX_KEY_SIZE, 1415 .ivsize = AES_BLOCK_SIZE, 1416 }, 1417 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1418 }, 1419 { 1420 .skcipher = { 1421 .base = { 1422 .cra_name = "cbc(des3_ede)", 1423 .cra_driver_name = "cbc-3des-caam-qi", 1424 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1425 }, 1426 .setkey = des3_skcipher_setkey, 1427 .encrypt = skcipher_encrypt, 1428 .decrypt = skcipher_decrypt, 1429 .min_keysize = DES3_EDE_KEY_SIZE, 1430 .max_keysize = DES3_EDE_KEY_SIZE, 1431 .ivsize = DES3_EDE_BLOCK_SIZE, 1432 }, 1433 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1434 }, 1435 { 1436 .skcipher = { 1437 .base = { 1438 .cra_name = "cbc(des)", 1439 .cra_driver_name = "cbc-des-caam-qi", 1440 .cra_blocksize = DES_BLOCK_SIZE, 1441 }, 1442 .setkey = skcipher_setkey, 1443 .encrypt = skcipher_encrypt, 1444 .decrypt = skcipher_decrypt, 1445 .min_keysize = DES_KEY_SIZE, 1446 .max_keysize = DES_KEY_SIZE, 1447 .ivsize = DES_BLOCK_SIZE, 1448 }, 1449 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1450 }, 1451 { 1452 .skcipher = { 1453 .base = { 1454 .cra_name = "ctr(aes)", 1455 .cra_driver_name = "ctr-aes-caam-qi", 1456 .cra_blocksize = 1, 1457 }, 1458 .setkey = skcipher_setkey, 1459 .encrypt = skcipher_encrypt, 1460 .decrypt = skcipher_decrypt, 1461 .min_keysize = AES_MIN_KEY_SIZE, 1462 .max_keysize = AES_MAX_KEY_SIZE, 1463 .ivsize = AES_BLOCK_SIZE, 1464 .chunksize = AES_BLOCK_SIZE, 1465 }, 1466 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | 1467 OP_ALG_AAI_CTR_MOD128, 1468 }, 1469 { 1470 .skcipher = { 1471 .base = { 1472 .cra_name = "rfc3686(ctr(aes))", 1473 .cra_driver_name = "rfc3686-ctr-aes-caam-qi", 1474 .cra_blocksize = 1, 1475 }, 1476 .setkey = skcipher_setkey, 1477 .encrypt = skcipher_encrypt, 1478 .decrypt = skcipher_decrypt, 1479 .min_keysize = AES_MIN_KEY_SIZE + 1480 CTR_RFC3686_NONCE_SIZE, 1481 .max_keysize = AES_MAX_KEY_SIZE + 1482 CTR_RFC3686_NONCE_SIZE, 1483 .ivsize = CTR_RFC3686_IV_SIZE, 1484 .chunksize = AES_BLOCK_SIZE, 1485 }, 1486 .caam = { 1487 .class1_alg_type = OP_ALG_ALGSEL_AES | 1488 OP_ALG_AAI_CTR_MOD128, 1489 .rfc3686 = true, 1490 }, 1491 }, 1492 { 1493 .skcipher = { 1494 .base = { 1495 .cra_name = "xts(aes)", 1496 .cra_driver_name = "xts-aes-caam-qi", 1497 .cra_blocksize = AES_BLOCK_SIZE, 1498 }, 1499 .setkey = xts_skcipher_setkey, 1500 .encrypt = skcipher_encrypt, 1501 .decrypt = skcipher_decrypt, 1502 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1503 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1504 .ivsize = AES_BLOCK_SIZE, 1505 }, 1506 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1507 }, 1508 }; 1509 1510 static struct caam_aead_alg driver_aeads[] = { 1511 { 1512 .aead = { 1513 .base = { 1514 .cra_name = "rfc4106(gcm(aes))", 1515 .cra_driver_name = "rfc4106-gcm-aes-caam-qi", 1516 .cra_blocksize = 1, 1517 }, 1518 .setkey = rfc4106_setkey, 1519 .setauthsize = rfc4106_setauthsize, 1520 .encrypt = ipsec_gcm_encrypt, 1521 .decrypt = ipsec_gcm_decrypt, 1522 .ivsize = 8, 1523 .maxauthsize = AES_BLOCK_SIZE, 1524 }, 1525 .caam = { 1526 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1527 .nodkp = true, 1528 }, 1529 }, 1530 { 1531 .aead = { 1532 .base = { 1533 .cra_name = "rfc4543(gcm(aes))", 1534 .cra_driver_name = "rfc4543-gcm-aes-caam-qi", 1535 .cra_blocksize = 1, 1536 }, 1537 .setkey = rfc4543_setkey, 1538 .setauthsize = rfc4543_setauthsize, 1539 .encrypt = ipsec_gcm_encrypt, 1540 .decrypt = ipsec_gcm_decrypt, 1541 .ivsize = 8, 1542 .maxauthsize = AES_BLOCK_SIZE, 1543 }, 1544 .caam = { 1545 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1546 .nodkp = true, 1547 }, 1548 }, 1549 /* Galois Counter Mode */ 1550 { 1551 .aead = { 1552 .base = { 1553 .cra_name = "gcm(aes)", 1554 .cra_driver_name = "gcm-aes-caam-qi", 1555 .cra_blocksize = 1, 1556 }, 1557 .setkey = gcm_setkey, 1558 .setauthsize = gcm_setauthsize, 1559 .encrypt = aead_encrypt, 1560 .decrypt = aead_decrypt, 1561 .ivsize = 12, 1562 .maxauthsize = AES_BLOCK_SIZE, 1563 }, 1564 .caam = { 1565 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1566 .nodkp = true, 1567 } 1568 }, 1569 /* single-pass ipsec_esp descriptor */ 1570 { 1571 .aead = { 1572 .base = { 1573 .cra_name = "authenc(hmac(md5),cbc(aes))", 1574 .cra_driver_name = "authenc-hmac-md5-" 1575 "cbc-aes-caam-qi", 1576 .cra_blocksize = AES_BLOCK_SIZE, 1577 }, 1578 .setkey = aead_setkey, 1579 .setauthsize = aead_setauthsize, 1580 .encrypt = aead_encrypt, 1581 .decrypt = aead_decrypt, 1582 .ivsize = AES_BLOCK_SIZE, 1583 .maxauthsize = MD5_DIGEST_SIZE, 1584 }, 1585 .caam = { 1586 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1587 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1588 OP_ALG_AAI_HMAC_PRECOMP, 1589 } 1590 }, 1591 { 1592 .aead = { 1593 .base = { 1594 .cra_name = "echainiv(authenc(hmac(md5)," 1595 "cbc(aes)))", 1596 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1597 "cbc-aes-caam-qi", 1598 .cra_blocksize = AES_BLOCK_SIZE, 1599 }, 1600 .setkey = aead_setkey, 1601 .setauthsize = aead_setauthsize, 1602 .encrypt = aead_encrypt, 1603 .decrypt = aead_decrypt, 1604 .ivsize = AES_BLOCK_SIZE, 1605 .maxauthsize = MD5_DIGEST_SIZE, 1606 }, 1607 .caam = { 1608 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1609 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1610 OP_ALG_AAI_HMAC_PRECOMP, 1611 .geniv = true, 1612 } 1613 }, 1614 { 1615 .aead = { 1616 .base = { 1617 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1618 .cra_driver_name = "authenc-hmac-sha1-" 1619 "cbc-aes-caam-qi", 1620 .cra_blocksize = AES_BLOCK_SIZE, 1621 }, 1622 .setkey = aead_setkey, 1623 .setauthsize = aead_setauthsize, 1624 .encrypt = aead_encrypt, 1625 .decrypt = aead_decrypt, 1626 .ivsize = AES_BLOCK_SIZE, 1627 .maxauthsize = SHA1_DIGEST_SIZE, 1628 }, 1629 .caam = { 1630 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1631 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1632 OP_ALG_AAI_HMAC_PRECOMP, 1633 } 1634 }, 1635 { 1636 .aead = { 1637 .base = { 1638 .cra_name = "echainiv(authenc(hmac(sha1)," 1639 "cbc(aes)))", 1640 .cra_driver_name = "echainiv-authenc-" 1641 "hmac-sha1-cbc-aes-caam-qi", 1642 .cra_blocksize = AES_BLOCK_SIZE, 1643 }, 1644 .setkey = aead_setkey, 1645 .setauthsize = aead_setauthsize, 1646 .encrypt = aead_encrypt, 1647 .decrypt = aead_decrypt, 1648 .ivsize = AES_BLOCK_SIZE, 1649 .maxauthsize = SHA1_DIGEST_SIZE, 1650 }, 1651 .caam = { 1652 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1653 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1654 OP_ALG_AAI_HMAC_PRECOMP, 1655 .geniv = true, 1656 }, 1657 }, 1658 { 1659 .aead = { 1660 .base = { 1661 .cra_name = "authenc(hmac(sha224),cbc(aes))", 1662 .cra_driver_name = "authenc-hmac-sha224-" 1663 "cbc-aes-caam-qi", 1664 .cra_blocksize = AES_BLOCK_SIZE, 1665 }, 1666 .setkey = aead_setkey, 1667 .setauthsize = aead_setauthsize, 1668 .encrypt = aead_encrypt, 1669 .decrypt = aead_decrypt, 1670 .ivsize = AES_BLOCK_SIZE, 1671 .maxauthsize = SHA224_DIGEST_SIZE, 1672 }, 1673 .caam = { 1674 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1675 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1676 OP_ALG_AAI_HMAC_PRECOMP, 1677 } 1678 }, 1679 { 1680 .aead = { 1681 .base = { 1682 .cra_name = "echainiv(authenc(hmac(sha224)," 1683 "cbc(aes)))", 1684 .cra_driver_name = "echainiv-authenc-" 1685 "hmac-sha224-cbc-aes-caam-qi", 1686 .cra_blocksize = AES_BLOCK_SIZE, 1687 }, 1688 .setkey = aead_setkey, 1689 .setauthsize = aead_setauthsize, 1690 .encrypt = aead_encrypt, 1691 .decrypt = aead_decrypt, 1692 .ivsize = AES_BLOCK_SIZE, 1693 .maxauthsize = SHA224_DIGEST_SIZE, 1694 }, 1695 .caam = { 1696 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1697 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1698 OP_ALG_AAI_HMAC_PRECOMP, 1699 .geniv = true, 1700 } 1701 }, 1702 { 1703 .aead = { 1704 .base = { 1705 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1706 .cra_driver_name = "authenc-hmac-sha256-" 1707 "cbc-aes-caam-qi", 1708 .cra_blocksize = AES_BLOCK_SIZE, 1709 }, 1710 .setkey = aead_setkey, 1711 .setauthsize = aead_setauthsize, 1712 .encrypt = aead_encrypt, 1713 .decrypt = aead_decrypt, 1714 .ivsize = AES_BLOCK_SIZE, 1715 .maxauthsize = SHA256_DIGEST_SIZE, 1716 }, 1717 .caam = { 1718 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1719 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1720 OP_ALG_AAI_HMAC_PRECOMP, 1721 } 1722 }, 1723 { 1724 .aead = { 1725 .base = { 1726 .cra_name = "echainiv(authenc(hmac(sha256)," 1727 "cbc(aes)))", 1728 .cra_driver_name = "echainiv-authenc-" 1729 "hmac-sha256-cbc-aes-" 1730 "caam-qi", 1731 .cra_blocksize = AES_BLOCK_SIZE, 1732 }, 1733 .setkey = aead_setkey, 1734 .setauthsize = aead_setauthsize, 1735 .encrypt = aead_encrypt, 1736 .decrypt = aead_decrypt, 1737 .ivsize = AES_BLOCK_SIZE, 1738 .maxauthsize = SHA256_DIGEST_SIZE, 1739 }, 1740 .caam = { 1741 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1742 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1743 OP_ALG_AAI_HMAC_PRECOMP, 1744 .geniv = true, 1745 } 1746 }, 1747 { 1748 .aead = { 1749 .base = { 1750 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1751 .cra_driver_name = "authenc-hmac-sha384-" 1752 "cbc-aes-caam-qi", 1753 .cra_blocksize = AES_BLOCK_SIZE, 1754 }, 1755 .setkey = aead_setkey, 1756 .setauthsize = aead_setauthsize, 1757 .encrypt = aead_encrypt, 1758 .decrypt = aead_decrypt, 1759 .ivsize = AES_BLOCK_SIZE, 1760 .maxauthsize = SHA384_DIGEST_SIZE, 1761 }, 1762 .caam = { 1763 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1764 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1765 OP_ALG_AAI_HMAC_PRECOMP, 1766 } 1767 }, 1768 { 1769 .aead = { 1770 .base = { 1771 .cra_name = "echainiv(authenc(hmac(sha384)," 1772 "cbc(aes)))", 1773 .cra_driver_name = "echainiv-authenc-" 1774 "hmac-sha384-cbc-aes-" 1775 "caam-qi", 1776 .cra_blocksize = AES_BLOCK_SIZE, 1777 }, 1778 .setkey = aead_setkey, 1779 .setauthsize = aead_setauthsize, 1780 .encrypt = aead_encrypt, 1781 .decrypt = aead_decrypt, 1782 .ivsize = AES_BLOCK_SIZE, 1783 .maxauthsize = SHA384_DIGEST_SIZE, 1784 }, 1785 .caam = { 1786 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1787 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 1788 OP_ALG_AAI_HMAC_PRECOMP, 1789 .geniv = true, 1790 } 1791 }, 1792 { 1793 .aead = { 1794 .base = { 1795 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1796 .cra_driver_name = "authenc-hmac-sha512-" 1797 "cbc-aes-caam-qi", 1798 .cra_blocksize = AES_BLOCK_SIZE, 1799 }, 1800 .setkey = aead_setkey, 1801 .setauthsize = aead_setauthsize, 1802 .encrypt = aead_encrypt, 1803 .decrypt = aead_decrypt, 1804 .ivsize = AES_BLOCK_SIZE, 1805 .maxauthsize = SHA512_DIGEST_SIZE, 1806 }, 1807 .caam = { 1808 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1809 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1810 OP_ALG_AAI_HMAC_PRECOMP, 1811 } 1812 }, 1813 { 1814 .aead = { 1815 .base = { 1816 .cra_name = "echainiv(authenc(hmac(sha512)," 1817 "cbc(aes)))", 1818 .cra_driver_name = "echainiv-authenc-" 1819 "hmac-sha512-cbc-aes-" 1820 "caam-qi", 1821 .cra_blocksize = AES_BLOCK_SIZE, 1822 }, 1823 .setkey = aead_setkey, 1824 .setauthsize = aead_setauthsize, 1825 .encrypt = aead_encrypt, 1826 .decrypt = aead_decrypt, 1827 .ivsize = AES_BLOCK_SIZE, 1828 .maxauthsize = SHA512_DIGEST_SIZE, 1829 }, 1830 .caam = { 1831 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1832 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 1833 OP_ALG_AAI_HMAC_PRECOMP, 1834 .geniv = true, 1835 } 1836 }, 1837 { 1838 .aead = { 1839 .base = { 1840 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1841 .cra_driver_name = "authenc-hmac-md5-" 1842 "cbc-des3_ede-caam-qi", 1843 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1844 }, 1845 .setkey = des3_aead_setkey, 1846 .setauthsize = aead_setauthsize, 1847 .encrypt = aead_encrypt, 1848 .decrypt = aead_decrypt, 1849 .ivsize = DES3_EDE_BLOCK_SIZE, 1850 .maxauthsize = MD5_DIGEST_SIZE, 1851 }, 1852 .caam = { 1853 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1854 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1855 OP_ALG_AAI_HMAC_PRECOMP, 1856 } 1857 }, 1858 { 1859 .aead = { 1860 .base = { 1861 .cra_name = "echainiv(authenc(hmac(md5)," 1862 "cbc(des3_ede)))", 1863 .cra_driver_name = "echainiv-authenc-hmac-md5-" 1864 "cbc-des3_ede-caam-qi", 1865 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1866 }, 1867 .setkey = des3_aead_setkey, 1868 .setauthsize = aead_setauthsize, 1869 .encrypt = aead_encrypt, 1870 .decrypt = aead_decrypt, 1871 .ivsize = DES3_EDE_BLOCK_SIZE, 1872 .maxauthsize = MD5_DIGEST_SIZE, 1873 }, 1874 .caam = { 1875 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1876 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 1877 OP_ALG_AAI_HMAC_PRECOMP, 1878 .geniv = true, 1879 } 1880 }, 1881 { 1882 .aead = { 1883 .base = { 1884 .cra_name = "authenc(hmac(sha1)," 1885 "cbc(des3_ede))", 1886 .cra_driver_name = "authenc-hmac-sha1-" 1887 "cbc-des3_ede-caam-qi", 1888 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1889 }, 1890 .setkey = des3_aead_setkey, 1891 .setauthsize = aead_setauthsize, 1892 .encrypt = aead_encrypt, 1893 .decrypt = aead_decrypt, 1894 .ivsize = DES3_EDE_BLOCK_SIZE, 1895 .maxauthsize = SHA1_DIGEST_SIZE, 1896 }, 1897 .caam = { 1898 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1899 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1900 OP_ALG_AAI_HMAC_PRECOMP, 1901 }, 1902 }, 1903 { 1904 .aead = { 1905 .base = { 1906 .cra_name = "echainiv(authenc(hmac(sha1)," 1907 "cbc(des3_ede)))", 1908 .cra_driver_name = "echainiv-authenc-" 1909 "hmac-sha1-" 1910 "cbc-des3_ede-caam-qi", 1911 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1912 }, 1913 .setkey = des3_aead_setkey, 1914 .setauthsize = aead_setauthsize, 1915 .encrypt = aead_encrypt, 1916 .decrypt = aead_decrypt, 1917 .ivsize = DES3_EDE_BLOCK_SIZE, 1918 .maxauthsize = SHA1_DIGEST_SIZE, 1919 }, 1920 .caam = { 1921 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1922 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 1923 OP_ALG_AAI_HMAC_PRECOMP, 1924 .geniv = true, 1925 } 1926 }, 1927 { 1928 .aead = { 1929 .base = { 1930 .cra_name = "authenc(hmac(sha224)," 1931 "cbc(des3_ede))", 1932 .cra_driver_name = "authenc-hmac-sha224-" 1933 "cbc-des3_ede-caam-qi", 1934 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1935 }, 1936 .setkey = des3_aead_setkey, 1937 .setauthsize = aead_setauthsize, 1938 .encrypt = aead_encrypt, 1939 .decrypt = aead_decrypt, 1940 .ivsize = DES3_EDE_BLOCK_SIZE, 1941 .maxauthsize = SHA224_DIGEST_SIZE, 1942 }, 1943 .caam = { 1944 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1945 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1946 OP_ALG_AAI_HMAC_PRECOMP, 1947 }, 1948 }, 1949 { 1950 .aead = { 1951 .base = { 1952 .cra_name = "echainiv(authenc(hmac(sha224)," 1953 "cbc(des3_ede)))", 1954 .cra_driver_name = "echainiv-authenc-" 1955 "hmac-sha224-" 1956 "cbc-des3_ede-caam-qi", 1957 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1958 }, 1959 .setkey = des3_aead_setkey, 1960 .setauthsize = aead_setauthsize, 1961 .encrypt = aead_encrypt, 1962 .decrypt = aead_decrypt, 1963 .ivsize = DES3_EDE_BLOCK_SIZE, 1964 .maxauthsize = SHA224_DIGEST_SIZE, 1965 }, 1966 .caam = { 1967 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1968 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 1969 OP_ALG_AAI_HMAC_PRECOMP, 1970 .geniv = true, 1971 } 1972 }, 1973 { 1974 .aead = { 1975 .base = { 1976 .cra_name = "authenc(hmac(sha256)," 1977 "cbc(des3_ede))", 1978 .cra_driver_name = "authenc-hmac-sha256-" 1979 "cbc-des3_ede-caam-qi", 1980 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1981 }, 1982 .setkey = des3_aead_setkey, 1983 .setauthsize = aead_setauthsize, 1984 .encrypt = aead_encrypt, 1985 .decrypt = aead_decrypt, 1986 .ivsize = DES3_EDE_BLOCK_SIZE, 1987 .maxauthsize = SHA256_DIGEST_SIZE, 1988 }, 1989 .caam = { 1990 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1991 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 1992 OP_ALG_AAI_HMAC_PRECOMP, 1993 }, 1994 }, 1995 { 1996 .aead = { 1997 .base = { 1998 .cra_name = "echainiv(authenc(hmac(sha256)," 1999 "cbc(des3_ede)))", 2000 .cra_driver_name = "echainiv-authenc-" 2001 "hmac-sha256-" 2002 "cbc-des3_ede-caam-qi", 2003 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2004 }, 2005 .setkey = des3_aead_setkey, 2006 .setauthsize = aead_setauthsize, 2007 .encrypt = aead_encrypt, 2008 .decrypt = aead_decrypt, 2009 .ivsize = DES3_EDE_BLOCK_SIZE, 2010 .maxauthsize = SHA256_DIGEST_SIZE, 2011 }, 2012 .caam = { 2013 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2014 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2015 OP_ALG_AAI_HMAC_PRECOMP, 2016 .geniv = true, 2017 } 2018 }, 2019 { 2020 .aead = { 2021 .base = { 2022 .cra_name = "authenc(hmac(sha384)," 2023 "cbc(des3_ede))", 2024 .cra_driver_name = "authenc-hmac-sha384-" 2025 "cbc-des3_ede-caam-qi", 2026 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2027 }, 2028 .setkey = des3_aead_setkey, 2029 .setauthsize = aead_setauthsize, 2030 .encrypt = aead_encrypt, 2031 .decrypt = aead_decrypt, 2032 .ivsize = DES3_EDE_BLOCK_SIZE, 2033 .maxauthsize = SHA384_DIGEST_SIZE, 2034 }, 2035 .caam = { 2036 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2037 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2038 OP_ALG_AAI_HMAC_PRECOMP, 2039 }, 2040 }, 2041 { 2042 .aead = { 2043 .base = { 2044 .cra_name = "echainiv(authenc(hmac(sha384)," 2045 "cbc(des3_ede)))", 2046 .cra_driver_name = "echainiv-authenc-" 2047 "hmac-sha384-" 2048 "cbc-des3_ede-caam-qi", 2049 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2050 }, 2051 .setkey = des3_aead_setkey, 2052 .setauthsize = aead_setauthsize, 2053 .encrypt = aead_encrypt, 2054 .decrypt = aead_decrypt, 2055 .ivsize = DES3_EDE_BLOCK_SIZE, 2056 .maxauthsize = SHA384_DIGEST_SIZE, 2057 }, 2058 .caam = { 2059 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2060 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2061 OP_ALG_AAI_HMAC_PRECOMP, 2062 .geniv = true, 2063 } 2064 }, 2065 { 2066 .aead = { 2067 .base = { 2068 .cra_name = "authenc(hmac(sha512)," 2069 "cbc(des3_ede))", 2070 .cra_driver_name = "authenc-hmac-sha512-" 2071 "cbc-des3_ede-caam-qi", 2072 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2073 }, 2074 .setkey = des3_aead_setkey, 2075 .setauthsize = aead_setauthsize, 2076 .encrypt = aead_encrypt, 2077 .decrypt = aead_decrypt, 2078 .ivsize = DES3_EDE_BLOCK_SIZE, 2079 .maxauthsize = SHA512_DIGEST_SIZE, 2080 }, 2081 .caam = { 2082 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2083 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2084 OP_ALG_AAI_HMAC_PRECOMP, 2085 }, 2086 }, 2087 { 2088 .aead = { 2089 .base = { 2090 .cra_name = "echainiv(authenc(hmac(sha512)," 2091 "cbc(des3_ede)))", 2092 .cra_driver_name = "echainiv-authenc-" 2093 "hmac-sha512-" 2094 "cbc-des3_ede-caam-qi", 2095 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2096 }, 2097 .setkey = des3_aead_setkey, 2098 .setauthsize = aead_setauthsize, 2099 .encrypt = aead_encrypt, 2100 .decrypt = aead_decrypt, 2101 .ivsize = DES3_EDE_BLOCK_SIZE, 2102 .maxauthsize = SHA512_DIGEST_SIZE, 2103 }, 2104 .caam = { 2105 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2106 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2107 OP_ALG_AAI_HMAC_PRECOMP, 2108 .geniv = true, 2109 } 2110 }, 2111 { 2112 .aead = { 2113 .base = { 2114 .cra_name = "authenc(hmac(md5),cbc(des))", 2115 .cra_driver_name = "authenc-hmac-md5-" 2116 "cbc-des-caam-qi", 2117 .cra_blocksize = DES_BLOCK_SIZE, 2118 }, 2119 .setkey = aead_setkey, 2120 .setauthsize = aead_setauthsize, 2121 .encrypt = aead_encrypt, 2122 .decrypt = aead_decrypt, 2123 .ivsize = DES_BLOCK_SIZE, 2124 .maxauthsize = MD5_DIGEST_SIZE, 2125 }, 2126 .caam = { 2127 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2128 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2129 OP_ALG_AAI_HMAC_PRECOMP, 2130 }, 2131 }, 2132 { 2133 .aead = { 2134 .base = { 2135 .cra_name = "echainiv(authenc(hmac(md5)," 2136 "cbc(des)))", 2137 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2138 "cbc-des-caam-qi", 2139 .cra_blocksize = DES_BLOCK_SIZE, 2140 }, 2141 .setkey = aead_setkey, 2142 .setauthsize = aead_setauthsize, 2143 .encrypt = aead_encrypt, 2144 .decrypt = aead_decrypt, 2145 .ivsize = DES_BLOCK_SIZE, 2146 .maxauthsize = MD5_DIGEST_SIZE, 2147 }, 2148 .caam = { 2149 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2150 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2151 OP_ALG_AAI_HMAC_PRECOMP, 2152 .geniv = true, 2153 } 2154 }, 2155 { 2156 .aead = { 2157 .base = { 2158 .cra_name = "authenc(hmac(sha1),cbc(des))", 2159 .cra_driver_name = "authenc-hmac-sha1-" 2160 "cbc-des-caam-qi", 2161 .cra_blocksize = DES_BLOCK_SIZE, 2162 }, 2163 .setkey = aead_setkey, 2164 .setauthsize = aead_setauthsize, 2165 .encrypt = aead_encrypt, 2166 .decrypt = aead_decrypt, 2167 .ivsize = DES_BLOCK_SIZE, 2168 .maxauthsize = SHA1_DIGEST_SIZE, 2169 }, 2170 .caam = { 2171 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2172 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2173 OP_ALG_AAI_HMAC_PRECOMP, 2174 }, 2175 }, 2176 { 2177 .aead = { 2178 .base = { 2179 .cra_name = "echainiv(authenc(hmac(sha1)," 2180 "cbc(des)))", 2181 .cra_driver_name = "echainiv-authenc-" 2182 "hmac-sha1-cbc-des-caam-qi", 2183 .cra_blocksize = DES_BLOCK_SIZE, 2184 }, 2185 .setkey = aead_setkey, 2186 .setauthsize = aead_setauthsize, 2187 .encrypt = aead_encrypt, 2188 .decrypt = aead_decrypt, 2189 .ivsize = DES_BLOCK_SIZE, 2190 .maxauthsize = SHA1_DIGEST_SIZE, 2191 }, 2192 .caam = { 2193 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2194 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2195 OP_ALG_AAI_HMAC_PRECOMP, 2196 .geniv = true, 2197 } 2198 }, 2199 { 2200 .aead = { 2201 .base = { 2202 .cra_name = "authenc(hmac(sha224),cbc(des))", 2203 .cra_driver_name = "authenc-hmac-sha224-" 2204 "cbc-des-caam-qi", 2205 .cra_blocksize = DES_BLOCK_SIZE, 2206 }, 2207 .setkey = aead_setkey, 2208 .setauthsize = aead_setauthsize, 2209 .encrypt = aead_encrypt, 2210 .decrypt = aead_decrypt, 2211 .ivsize = DES_BLOCK_SIZE, 2212 .maxauthsize = SHA224_DIGEST_SIZE, 2213 }, 2214 .caam = { 2215 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2216 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2217 OP_ALG_AAI_HMAC_PRECOMP, 2218 }, 2219 }, 2220 { 2221 .aead = { 2222 .base = { 2223 .cra_name = "echainiv(authenc(hmac(sha224)," 2224 "cbc(des)))", 2225 .cra_driver_name = "echainiv-authenc-" 2226 "hmac-sha224-cbc-des-" 2227 "caam-qi", 2228 .cra_blocksize = DES_BLOCK_SIZE, 2229 }, 2230 .setkey = aead_setkey, 2231 .setauthsize = aead_setauthsize, 2232 .encrypt = aead_encrypt, 2233 .decrypt = aead_decrypt, 2234 .ivsize = DES_BLOCK_SIZE, 2235 .maxauthsize = SHA224_DIGEST_SIZE, 2236 }, 2237 .caam = { 2238 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2239 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2240 OP_ALG_AAI_HMAC_PRECOMP, 2241 .geniv = true, 2242 } 2243 }, 2244 { 2245 .aead = { 2246 .base = { 2247 .cra_name = "authenc(hmac(sha256),cbc(des))", 2248 .cra_driver_name = "authenc-hmac-sha256-" 2249 "cbc-des-caam-qi", 2250 .cra_blocksize = DES_BLOCK_SIZE, 2251 }, 2252 .setkey = aead_setkey, 2253 .setauthsize = aead_setauthsize, 2254 .encrypt = aead_encrypt, 2255 .decrypt = aead_decrypt, 2256 .ivsize = DES_BLOCK_SIZE, 2257 .maxauthsize = SHA256_DIGEST_SIZE, 2258 }, 2259 .caam = { 2260 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2261 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2262 OP_ALG_AAI_HMAC_PRECOMP, 2263 }, 2264 }, 2265 { 2266 .aead = { 2267 .base = { 2268 .cra_name = "echainiv(authenc(hmac(sha256)," 2269 "cbc(des)))", 2270 .cra_driver_name = "echainiv-authenc-" 2271 "hmac-sha256-cbc-des-" 2272 "caam-qi", 2273 .cra_blocksize = DES_BLOCK_SIZE, 2274 }, 2275 .setkey = aead_setkey, 2276 .setauthsize = aead_setauthsize, 2277 .encrypt = aead_encrypt, 2278 .decrypt = aead_decrypt, 2279 .ivsize = DES_BLOCK_SIZE, 2280 .maxauthsize = SHA256_DIGEST_SIZE, 2281 }, 2282 .caam = { 2283 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2284 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2285 OP_ALG_AAI_HMAC_PRECOMP, 2286 .geniv = true, 2287 }, 2288 }, 2289 { 2290 .aead = { 2291 .base = { 2292 .cra_name = "authenc(hmac(sha384),cbc(des))", 2293 .cra_driver_name = "authenc-hmac-sha384-" 2294 "cbc-des-caam-qi", 2295 .cra_blocksize = DES_BLOCK_SIZE, 2296 }, 2297 .setkey = aead_setkey, 2298 .setauthsize = aead_setauthsize, 2299 .encrypt = aead_encrypt, 2300 .decrypt = aead_decrypt, 2301 .ivsize = DES_BLOCK_SIZE, 2302 .maxauthsize = SHA384_DIGEST_SIZE, 2303 }, 2304 .caam = { 2305 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2306 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2307 OP_ALG_AAI_HMAC_PRECOMP, 2308 }, 2309 }, 2310 { 2311 .aead = { 2312 .base = { 2313 .cra_name = "echainiv(authenc(hmac(sha384)," 2314 "cbc(des)))", 2315 .cra_driver_name = "echainiv-authenc-" 2316 "hmac-sha384-cbc-des-" 2317 "caam-qi", 2318 .cra_blocksize = DES_BLOCK_SIZE, 2319 }, 2320 .setkey = aead_setkey, 2321 .setauthsize = aead_setauthsize, 2322 .encrypt = aead_encrypt, 2323 .decrypt = aead_decrypt, 2324 .ivsize = DES_BLOCK_SIZE, 2325 .maxauthsize = SHA384_DIGEST_SIZE, 2326 }, 2327 .caam = { 2328 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2329 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2330 OP_ALG_AAI_HMAC_PRECOMP, 2331 .geniv = true, 2332 } 2333 }, 2334 { 2335 .aead = { 2336 .base = { 2337 .cra_name = "authenc(hmac(sha512),cbc(des))", 2338 .cra_driver_name = "authenc-hmac-sha512-" 2339 "cbc-des-caam-qi", 2340 .cra_blocksize = DES_BLOCK_SIZE, 2341 }, 2342 .setkey = aead_setkey, 2343 .setauthsize = aead_setauthsize, 2344 .encrypt = aead_encrypt, 2345 .decrypt = aead_decrypt, 2346 .ivsize = DES_BLOCK_SIZE, 2347 .maxauthsize = SHA512_DIGEST_SIZE, 2348 }, 2349 .caam = { 2350 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2351 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2352 OP_ALG_AAI_HMAC_PRECOMP, 2353 } 2354 }, 2355 { 2356 .aead = { 2357 .base = { 2358 .cra_name = "echainiv(authenc(hmac(sha512)," 2359 "cbc(des)))", 2360 .cra_driver_name = "echainiv-authenc-" 2361 "hmac-sha512-cbc-des-" 2362 "caam-qi", 2363 .cra_blocksize = DES_BLOCK_SIZE, 2364 }, 2365 .setkey = aead_setkey, 2366 .setauthsize = aead_setauthsize, 2367 .encrypt = aead_encrypt, 2368 .decrypt = aead_decrypt, 2369 .ivsize = DES_BLOCK_SIZE, 2370 .maxauthsize = SHA512_DIGEST_SIZE, 2371 }, 2372 .caam = { 2373 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2374 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2375 OP_ALG_AAI_HMAC_PRECOMP, 2376 .geniv = true, 2377 } 2378 }, 2379 }; 2380 2381 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2382 bool uses_dkp) 2383 { 2384 struct caam_drv_private *priv; 2385 2386 /* 2387 * distribute tfms across job rings to ensure in-order 2388 * crypto request processing per tfm 2389 */ 2390 ctx->jrdev = caam_jr_alloc(); 2391 if (IS_ERR(ctx->jrdev)) { 2392 pr_err("Job Ring Device allocation for transform failed\n"); 2393 return PTR_ERR(ctx->jrdev); 2394 } 2395 2396 priv = dev_get_drvdata(ctx->jrdev->parent); 2397 if (priv->era >= 6 && uses_dkp) 2398 ctx->dir = DMA_BIDIRECTIONAL; 2399 else 2400 ctx->dir = DMA_TO_DEVICE; 2401 2402 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), 2403 ctx->dir); 2404 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 2405 dev_err(ctx->jrdev, "unable to map key\n"); 2406 caam_jr_free(ctx->jrdev); 2407 return -ENOMEM; 2408 } 2409 2410 /* copy descriptor header template value */ 2411 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2412 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2413 2414 ctx->qidev = priv->qidev; 2415 2416 spin_lock_init(&ctx->lock); 2417 ctx->drv_ctx[ENCRYPT] = NULL; 2418 ctx->drv_ctx[DECRYPT] = NULL; 2419 2420 return 0; 2421 } 2422 2423 static int caam_cra_init(struct crypto_skcipher *tfm) 2424 { 2425 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 2426 struct caam_skcipher_alg *caam_alg = 2427 container_of(alg, typeof(*caam_alg), skcipher); 2428 2429 return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, 2430 false); 2431 } 2432 2433 static int caam_aead_init(struct crypto_aead *tfm) 2434 { 2435 struct aead_alg *alg = crypto_aead_alg(tfm); 2436 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), 2437 aead); 2438 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2439 2440 return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); 2441 } 2442 2443 static void caam_exit_common(struct caam_ctx *ctx) 2444 { 2445 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); 2446 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2447 2448 dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); 2449 2450 caam_jr_free(ctx->jrdev); 2451 } 2452 2453 static void caam_cra_exit(struct crypto_skcipher *tfm) 2454 { 2455 caam_exit_common(crypto_skcipher_ctx(tfm)); 2456 } 2457 2458 static void caam_aead_exit(struct crypto_aead *tfm) 2459 { 2460 caam_exit_common(crypto_aead_ctx(tfm)); 2461 } 2462 2463 static void __exit caam_qi_algapi_exit(void) 2464 { 2465 int i; 2466 2467 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2468 struct caam_aead_alg *t_alg = driver_aeads + i; 2469 2470 if (t_alg->registered) 2471 crypto_unregister_aead(&t_alg->aead); 2472 } 2473 2474 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2475 struct caam_skcipher_alg *t_alg = driver_algs + i; 2476 2477 if (t_alg->registered) 2478 crypto_unregister_skcipher(&t_alg->skcipher); 2479 } 2480 } 2481 2482 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) 2483 { 2484 struct skcipher_alg *alg = &t_alg->skcipher; 2485 2486 alg->base.cra_module = THIS_MODULE; 2487 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2488 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2489 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2490 2491 alg->init = caam_cra_init; 2492 alg->exit = caam_cra_exit; 2493 } 2494 2495 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 2496 { 2497 struct aead_alg *alg = &t_alg->aead; 2498 2499 alg->base.cra_module = THIS_MODULE; 2500 alg->base.cra_priority = CAAM_CRA_PRIORITY; 2501 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 2502 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 2503 2504 alg->init = caam_aead_init; 2505 alg->exit = caam_aead_exit; 2506 } 2507 2508 static int __init caam_qi_algapi_init(void) 2509 { 2510 struct device_node *dev_node; 2511 struct platform_device *pdev; 2512 struct device *ctrldev; 2513 struct caam_drv_private *priv; 2514 int i = 0, err = 0; 2515 u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; 2516 unsigned int md_limit = SHA512_DIGEST_SIZE; 2517 bool registered = false; 2518 2519 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2520 if (!dev_node) { 2521 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 2522 if (!dev_node) 2523 return -ENODEV; 2524 } 2525 2526 pdev = of_find_device_by_node(dev_node); 2527 of_node_put(dev_node); 2528 if (!pdev) 2529 return -ENODEV; 2530 2531 ctrldev = &pdev->dev; 2532 priv = dev_get_drvdata(ctrldev); 2533 2534 /* 2535 * If priv is NULL, it's probably because the caam driver wasn't 2536 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 2537 */ 2538 if (!priv || !priv->qi_present) { 2539 err = -ENODEV; 2540 goto out_put_dev; 2541 } 2542 2543 if (caam_dpaa2) { 2544 dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); 2545 err = -ENODEV; 2546 goto out_put_dev; 2547 } 2548 2549 /* 2550 * Register crypto algorithms the device supports. 2551 * First, detect presence and attributes of DES, AES, and MD blocks. 2552 */ 2553 if (priv->era < 10) { 2554 u32 cha_vid, cha_inst; 2555 2556 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2557 aes_vid = cha_vid & CHA_ID_LS_AES_MASK; 2558 md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2559 2560 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2561 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> 2562 CHA_ID_LS_DES_SHIFT; 2563 aes_inst = cha_inst & CHA_ID_LS_AES_MASK; 2564 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2565 } else { 2566 u32 aesa, mdha; 2567 2568 aesa = rd_reg32(&priv->ctrl->vreg.aesa); 2569 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2570 2571 aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2572 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2573 2574 des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; 2575 aes_inst = aesa & CHA_VER_NUM_MASK; 2576 md_inst = mdha & CHA_VER_NUM_MASK; 2577 } 2578 2579 /* If MD is present, limit digest size based on LP256 */ 2580 if (md_inst && md_vid == CHA_VER_VID_MD_LP256) 2581 md_limit = SHA256_DIGEST_SIZE; 2582 2583 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2584 struct caam_skcipher_alg *t_alg = driver_algs + i; 2585 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; 2586 2587 /* Skip DES algorithms if not supported by device */ 2588 if (!des_inst && 2589 ((alg_sel == OP_ALG_ALGSEL_3DES) || 2590 (alg_sel == OP_ALG_ALGSEL_DES))) 2591 continue; 2592 2593 /* Skip AES algorithms if not supported by device */ 2594 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 2595 continue; 2596 2597 caam_skcipher_alg_init(t_alg); 2598 2599 err = crypto_register_skcipher(&t_alg->skcipher); 2600 if (err) { 2601 dev_warn(priv->qidev, "%s alg registration failed\n", 2602 t_alg->skcipher.base.cra_driver_name); 2603 continue; 2604 } 2605 2606 t_alg->registered = true; 2607 registered = true; 2608 } 2609 2610 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 2611 struct caam_aead_alg *t_alg = driver_aeads + i; 2612 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 2613 OP_ALG_ALGSEL_MASK; 2614 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 2615 OP_ALG_ALGSEL_MASK; 2616 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 2617 2618 /* Skip DES algorithms if not supported by device */ 2619 if (!des_inst && 2620 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 2621 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 2622 continue; 2623 2624 /* Skip AES algorithms if not supported by device */ 2625 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 2626 continue; 2627 2628 /* 2629 * Check support for AES algorithms not available 2630 * on LP devices. 2631 */ 2632 if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) 2633 continue; 2634 2635 /* 2636 * Skip algorithms requiring message digests 2637 * if MD or MD size is not supported by device. 2638 */ 2639 if (c2_alg_sel && 2640 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 2641 continue; 2642 2643 caam_aead_alg_init(t_alg); 2644 2645 err = crypto_register_aead(&t_alg->aead); 2646 if (err) { 2647 pr_warn("%s alg registration failed\n", 2648 t_alg->aead.base.cra_driver_name); 2649 continue; 2650 } 2651 2652 t_alg->registered = true; 2653 registered = true; 2654 } 2655 2656 if (registered) 2657 dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); 2658 2659 out_put_dev: 2660 put_device(ctrldev); 2661 return err; 2662 } 2663 2664 module_init(caam_qi_algapi_init); 2665 module_exit(caam_qi_algapi_exit); 2666 2667 MODULE_LICENSE("GPL"); 2668 MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend"); 2669 MODULE_AUTHOR("Freescale Semiconductor"); 2670