1 /* 2 * caam - Freescale FSL CAAM support for crypto API 3 * 4 * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 * Copyright 2016 NXP 6 * 7 * Based on talitos crypto API driver. 8 * 9 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (PDB) | 14 * --------------- |------------->| (hashKey) | 15 * . | | (cipherKey) | 16 * . | |-------->| (operation) | 17 * --------------- | | --------------- 18 * | JobDesc #2 |------| | 19 * | *(packet 2) | | 20 * --------------- | 21 * . | 22 * . | 23 * --------------- | 24 * | JobDesc #3 |------------ 25 * | *(packet 3) | 26 * --------------- 27 * 28 * The SharedDesc never changes for a connection unless rekeyed, but 29 * each packet will likely be in a different place. So all we need 30 * to know to process the packet is where the input is, where the 31 * output goes, and what context we want to process with. Context is 32 * in the SharedDesc, packet references in the JobDesc. 33 * 34 * So, a job desc looks like: 35 * 36 * --------------------- 37 * | Header | 38 * | ShareDesc Pointer | 39 * | SEQ_OUT_PTR | 40 * | (output buffer) | 41 * | (output length) | 42 * | SEQ_IN_PTR | 43 * | (input buffer) | 44 * | (input length) | 45 * --------------------- 46 */ 47 48 #include "compat.h" 49 50 #include "regs.h" 51 #include "intern.h" 52 #include "desc_constr.h" 53 #include "jr.h" 54 #include "error.h" 55 #include "sg_sw_sec4.h" 56 #include "key_gen.h" 57 #include "caamalg_desc.h" 58 59 /* 60 * crypto alg 61 */ 62 #define CAAM_CRA_PRIORITY 3000 63 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 64 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 65 CTR_RFC3686_NONCE_SIZE + \ 66 SHA512_DIGEST_SIZE * 2) 67 68 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) 69 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ 70 CAAM_CMD_SZ * 4) 71 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ 72 CAAM_CMD_SZ * 5) 73 74 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) 75 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 76 77 #ifdef DEBUG 78 /* for print_hex_dumps with line references */ 79 #define debug(format, arg...) printk(format, arg) 80 #else 81 #define debug(format, arg...) 82 #endif 83 84 static struct list_head alg_list; 85 86 struct caam_alg_entry { 87 int class1_alg_type; 88 int class2_alg_type; 89 bool rfc3686; 90 bool geniv; 91 }; 92 93 struct caam_aead_alg { 94 struct aead_alg aead; 95 struct caam_alg_entry caam; 96 bool registered; 97 }; 98 99 /* 100 * per-session context 101 */ 102 struct caam_ctx { 103 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 104 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 105 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 106 u8 key[CAAM_MAX_KEY_SIZE]; 107 dma_addr_t sh_desc_enc_dma; 108 dma_addr_t sh_desc_dec_dma; 109 dma_addr_t sh_desc_givenc_dma; 110 dma_addr_t key_dma; 111 enum dma_data_direction dir; 112 struct device *jrdev; 113 struct alginfo adata; 114 struct alginfo cdata; 115 unsigned int authsize; 116 }; 117 118 static int aead_null_set_sh_desc(struct crypto_aead *aead) 119 { 120 struct caam_ctx *ctx = crypto_aead_ctx(aead); 121 struct device *jrdev = ctx->jrdev; 122 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 123 u32 *desc; 124 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - 125 ctx->adata.keylen_pad; 126 127 /* 128 * Job Descriptor and Shared Descriptors 129 * must all fit into the 64-word Descriptor h/w Buffer 130 */ 131 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) { 132 ctx->adata.key_inline = true; 133 ctx->adata.key_virt = ctx->key; 134 } else { 135 ctx->adata.key_inline = false; 136 ctx->adata.key_dma = ctx->key_dma; 137 } 138 139 /* aead_encrypt shared descriptor */ 140 desc = ctx->sh_desc_enc; 141 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, 142 ctrlpriv->era); 143 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 144 desc_bytes(desc), ctx->dir); 145 146 /* 147 * Job Descriptor and Shared Descriptors 148 * must all fit into the 64-word Descriptor h/w Buffer 149 */ 150 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) { 151 ctx->adata.key_inline = true; 152 ctx->adata.key_virt = ctx->key; 153 } else { 154 ctx->adata.key_inline = false; 155 ctx->adata.key_dma = ctx->key_dma; 156 } 157 158 /* aead_decrypt shared descriptor */ 159 desc = ctx->sh_desc_dec; 160 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, 161 ctrlpriv->era); 162 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 163 desc_bytes(desc), ctx->dir); 164 165 return 0; 166 } 167 168 static int aead_set_sh_desc(struct crypto_aead *aead) 169 { 170 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 171 struct caam_aead_alg, aead); 172 unsigned int ivsize = crypto_aead_ivsize(aead); 173 struct caam_ctx *ctx = crypto_aead_ctx(aead); 174 struct device *jrdev = ctx->jrdev; 175 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 176 u32 ctx1_iv_off = 0; 177 u32 *desc, *nonce = NULL; 178 u32 inl_mask; 179 unsigned int data_len[2]; 180 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 181 OP_ALG_AAI_CTR_MOD128); 182 const bool is_rfc3686 = alg->caam.rfc3686; 183 184 if (!ctx->authsize) 185 return 0; 186 187 /* NULL encryption / decryption */ 188 if (!ctx->cdata.keylen) 189 return aead_null_set_sh_desc(aead); 190 191 /* 192 * AES-CTR needs to load IV in CONTEXT1 reg 193 * at an offset of 128bits (16bytes) 194 * CONTEXT1[255:128] = IV 195 */ 196 if (ctr_mode) 197 ctx1_iv_off = 16; 198 199 /* 200 * RFC3686 specific: 201 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 202 */ 203 if (is_rfc3686) { 204 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 205 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 206 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 207 } 208 209 data_len[0] = ctx->adata.keylen_pad; 210 data_len[1] = ctx->cdata.keylen; 211 212 if (alg->caam.geniv) 213 goto skip_enc; 214 215 /* 216 * Job Descriptor and Shared Descriptors 217 * must all fit into the 64-word Descriptor h/w Buffer 218 */ 219 if (desc_inline_query(DESC_AEAD_ENC_LEN + 220 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 221 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, 222 ARRAY_SIZE(data_len)) < 0) 223 return -EINVAL; 224 225 if (inl_mask & 1) 226 ctx->adata.key_virt = ctx->key; 227 else 228 ctx->adata.key_dma = ctx->key_dma; 229 230 if (inl_mask & 2) 231 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 232 else 233 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 234 235 ctx->adata.key_inline = !!(inl_mask & 1); 236 ctx->cdata.key_inline = !!(inl_mask & 2); 237 238 /* aead_encrypt shared descriptor */ 239 desc = ctx->sh_desc_enc; 240 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, 241 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, 242 false, ctrlpriv->era); 243 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 244 desc_bytes(desc), ctx->dir); 245 246 skip_enc: 247 /* 248 * Job Descriptor and Shared Descriptors 249 * must all fit into the 64-word Descriptor h/w Buffer 250 */ 251 if (desc_inline_query(DESC_AEAD_DEC_LEN + 252 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 253 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, 254 ARRAY_SIZE(data_len)) < 0) 255 return -EINVAL; 256 257 if (inl_mask & 1) 258 ctx->adata.key_virt = ctx->key; 259 else 260 ctx->adata.key_dma = ctx->key_dma; 261 262 if (inl_mask & 2) 263 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 264 else 265 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 266 267 ctx->adata.key_inline = !!(inl_mask & 1); 268 ctx->cdata.key_inline = !!(inl_mask & 2); 269 270 /* aead_decrypt shared descriptor */ 271 desc = ctx->sh_desc_dec; 272 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, 273 ctx->authsize, alg->caam.geniv, is_rfc3686, 274 nonce, ctx1_iv_off, false, ctrlpriv->era); 275 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 276 desc_bytes(desc), ctx->dir); 277 278 if (!alg->caam.geniv) 279 goto skip_givenc; 280 281 /* 282 * Job Descriptor and Shared Descriptors 283 * must all fit into the 64-word Descriptor h/w Buffer 284 */ 285 if (desc_inline_query(DESC_AEAD_GIVENC_LEN + 286 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 287 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, 288 ARRAY_SIZE(data_len)) < 0) 289 return -EINVAL; 290 291 if (inl_mask & 1) 292 ctx->adata.key_virt = ctx->key; 293 else 294 ctx->adata.key_dma = ctx->key_dma; 295 296 if (inl_mask & 2) 297 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 298 else 299 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 300 301 ctx->adata.key_inline = !!(inl_mask & 1); 302 ctx->cdata.key_inline = !!(inl_mask & 2); 303 304 /* aead_givencrypt shared descriptor */ 305 desc = ctx->sh_desc_enc; 306 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, 307 ctx->authsize, is_rfc3686, nonce, 308 ctx1_iv_off, false, ctrlpriv->era); 309 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 310 desc_bytes(desc), ctx->dir); 311 312 skip_givenc: 313 return 0; 314 } 315 316 static int aead_setauthsize(struct crypto_aead *authenc, 317 unsigned int authsize) 318 { 319 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 320 321 ctx->authsize = authsize; 322 aead_set_sh_desc(authenc); 323 324 return 0; 325 } 326 327 static int gcm_set_sh_desc(struct crypto_aead *aead) 328 { 329 struct caam_ctx *ctx = crypto_aead_ctx(aead); 330 struct device *jrdev = ctx->jrdev; 331 unsigned int ivsize = crypto_aead_ivsize(aead); 332 u32 *desc; 333 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 334 ctx->cdata.keylen; 335 336 if (!ctx->cdata.keylen || !ctx->authsize) 337 return 0; 338 339 /* 340 * AES GCM encrypt shared descriptor 341 * Job Descriptor and Shared Descriptor 342 * must fit into the 64-word Descriptor h/w Buffer 343 */ 344 if (rem_bytes >= DESC_GCM_ENC_LEN) { 345 ctx->cdata.key_inline = true; 346 ctx->cdata.key_virt = ctx->key; 347 } else { 348 ctx->cdata.key_inline = false; 349 ctx->cdata.key_dma = ctx->key_dma; 350 } 351 352 desc = ctx->sh_desc_enc; 353 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); 354 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 355 desc_bytes(desc), ctx->dir); 356 357 /* 358 * Job Descriptor and Shared Descriptors 359 * must all fit into the 64-word Descriptor h/w Buffer 360 */ 361 if (rem_bytes >= DESC_GCM_DEC_LEN) { 362 ctx->cdata.key_inline = true; 363 ctx->cdata.key_virt = ctx->key; 364 } else { 365 ctx->cdata.key_inline = false; 366 ctx->cdata.key_dma = ctx->key_dma; 367 } 368 369 desc = ctx->sh_desc_dec; 370 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); 371 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 372 desc_bytes(desc), ctx->dir); 373 374 return 0; 375 } 376 377 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 378 { 379 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 380 381 ctx->authsize = authsize; 382 gcm_set_sh_desc(authenc); 383 384 return 0; 385 } 386 387 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 388 { 389 struct caam_ctx *ctx = crypto_aead_ctx(aead); 390 struct device *jrdev = ctx->jrdev; 391 unsigned int ivsize = crypto_aead_ivsize(aead); 392 u32 *desc; 393 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 394 ctx->cdata.keylen; 395 396 if (!ctx->cdata.keylen || !ctx->authsize) 397 return 0; 398 399 /* 400 * RFC4106 encrypt shared descriptor 401 * Job Descriptor and Shared Descriptor 402 * must fit into the 64-word Descriptor h/w Buffer 403 */ 404 if (rem_bytes >= DESC_RFC4106_ENC_LEN) { 405 ctx->cdata.key_inline = true; 406 ctx->cdata.key_virt = ctx->key; 407 } else { 408 ctx->cdata.key_inline = false; 409 ctx->cdata.key_dma = ctx->key_dma; 410 } 411 412 desc = ctx->sh_desc_enc; 413 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, 414 false); 415 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 416 desc_bytes(desc), ctx->dir); 417 418 /* 419 * Job Descriptor and Shared Descriptors 420 * must all fit into the 64-word Descriptor h/w Buffer 421 */ 422 if (rem_bytes >= DESC_RFC4106_DEC_LEN) { 423 ctx->cdata.key_inline = true; 424 ctx->cdata.key_virt = ctx->key; 425 } else { 426 ctx->cdata.key_inline = false; 427 ctx->cdata.key_dma = ctx->key_dma; 428 } 429 430 desc = ctx->sh_desc_dec; 431 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, 432 false); 433 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 434 desc_bytes(desc), ctx->dir); 435 436 return 0; 437 } 438 439 static int rfc4106_setauthsize(struct crypto_aead *authenc, 440 unsigned int authsize) 441 { 442 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 443 444 ctx->authsize = authsize; 445 rfc4106_set_sh_desc(authenc); 446 447 return 0; 448 } 449 450 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 451 { 452 struct caam_ctx *ctx = crypto_aead_ctx(aead); 453 struct device *jrdev = ctx->jrdev; 454 unsigned int ivsize = crypto_aead_ivsize(aead); 455 u32 *desc; 456 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 457 ctx->cdata.keylen; 458 459 if (!ctx->cdata.keylen || !ctx->authsize) 460 return 0; 461 462 /* 463 * RFC4543 encrypt shared descriptor 464 * Job Descriptor and Shared Descriptor 465 * must fit into the 64-word Descriptor h/w Buffer 466 */ 467 if (rem_bytes >= DESC_RFC4543_ENC_LEN) { 468 ctx->cdata.key_inline = true; 469 ctx->cdata.key_virt = ctx->key; 470 } else { 471 ctx->cdata.key_inline = false; 472 ctx->cdata.key_dma = ctx->key_dma; 473 } 474 475 desc = ctx->sh_desc_enc; 476 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, 477 false); 478 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 479 desc_bytes(desc), ctx->dir); 480 481 /* 482 * Job Descriptor and Shared Descriptors 483 * must all fit into the 64-word Descriptor h/w Buffer 484 */ 485 if (rem_bytes >= DESC_RFC4543_DEC_LEN) { 486 ctx->cdata.key_inline = true; 487 ctx->cdata.key_virt = ctx->key; 488 } else { 489 ctx->cdata.key_inline = false; 490 ctx->cdata.key_dma = ctx->key_dma; 491 } 492 493 desc = ctx->sh_desc_dec; 494 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, 495 false); 496 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 497 desc_bytes(desc), ctx->dir); 498 499 return 0; 500 } 501 502 static int rfc4543_setauthsize(struct crypto_aead *authenc, 503 unsigned int authsize) 504 { 505 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 506 507 ctx->authsize = authsize; 508 rfc4543_set_sh_desc(authenc); 509 510 return 0; 511 } 512 513 static int aead_setkey(struct crypto_aead *aead, 514 const u8 *key, unsigned int keylen) 515 { 516 struct caam_ctx *ctx = crypto_aead_ctx(aead); 517 struct device *jrdev = ctx->jrdev; 518 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 519 struct crypto_authenc_keys keys; 520 int ret = 0; 521 522 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 523 goto badkey; 524 525 #ifdef DEBUG 526 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", 527 keys.authkeylen + keys.enckeylen, keys.enckeylen, 528 keys.authkeylen); 529 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 530 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 531 #endif 532 533 /* 534 * If DKP is supported, use it in the shared descriptor to generate 535 * the split key. 536 */ 537 if (ctrlpriv->era >= 6) { 538 ctx->adata.keylen = keys.authkeylen; 539 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 540 OP_ALG_ALGSEL_MASK); 541 542 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 543 goto badkey; 544 545 memcpy(ctx->key, keys.authkey, keys.authkeylen); 546 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 547 keys.enckeylen); 548 dma_sync_single_for_device(jrdev, ctx->key_dma, 549 ctx->adata.keylen_pad + 550 keys.enckeylen, ctx->dir); 551 goto skip_split_key; 552 } 553 554 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, 555 keys.authkeylen, CAAM_MAX_KEY_SIZE - 556 keys.enckeylen); 557 if (ret) { 558 goto badkey; 559 } 560 561 /* postpend encryption key to auth split key */ 562 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 563 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 564 keys.enckeylen, ctx->dir); 565 #ifdef DEBUG 566 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 567 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 568 ctx->adata.keylen_pad + keys.enckeylen, 1); 569 #endif 570 571 skip_split_key: 572 ctx->cdata.keylen = keys.enckeylen; 573 memzero_explicit(&keys, sizeof(keys)); 574 return aead_set_sh_desc(aead); 575 badkey: 576 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 577 memzero_explicit(&keys, sizeof(keys)); 578 return -EINVAL; 579 } 580 581 static int gcm_setkey(struct crypto_aead *aead, 582 const u8 *key, unsigned int keylen) 583 { 584 struct caam_ctx *ctx = crypto_aead_ctx(aead); 585 struct device *jrdev = ctx->jrdev; 586 587 #ifdef DEBUG 588 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 589 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 590 #endif 591 592 memcpy(ctx->key, key, keylen); 593 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); 594 ctx->cdata.keylen = keylen; 595 596 return gcm_set_sh_desc(aead); 597 } 598 599 static int rfc4106_setkey(struct crypto_aead *aead, 600 const u8 *key, unsigned int keylen) 601 { 602 struct caam_ctx *ctx = crypto_aead_ctx(aead); 603 struct device *jrdev = ctx->jrdev; 604 605 if (keylen < 4) 606 return -EINVAL; 607 608 #ifdef DEBUG 609 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 610 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 611 #endif 612 613 memcpy(ctx->key, key, keylen); 614 615 /* 616 * The last four bytes of the key material are used as the salt value 617 * in the nonce. Update the AES key length. 618 */ 619 ctx->cdata.keylen = keylen - 4; 620 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 621 ctx->dir); 622 return rfc4106_set_sh_desc(aead); 623 } 624 625 static int rfc4543_setkey(struct crypto_aead *aead, 626 const u8 *key, unsigned int keylen) 627 { 628 struct caam_ctx *ctx = crypto_aead_ctx(aead); 629 struct device *jrdev = ctx->jrdev; 630 631 if (keylen < 4) 632 return -EINVAL; 633 634 #ifdef DEBUG 635 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 636 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 637 #endif 638 639 memcpy(ctx->key, key, keylen); 640 641 /* 642 * The last four bytes of the key material are used as the salt value 643 * in the nonce. Update the AES key length. 644 */ 645 ctx->cdata.keylen = keylen - 4; 646 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 647 ctx->dir); 648 return rfc4543_set_sh_desc(aead); 649 } 650 651 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 652 const u8 *key, unsigned int keylen) 653 { 654 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 655 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); 656 const char *alg_name = crypto_tfm_alg_name(tfm); 657 struct device *jrdev = ctx->jrdev; 658 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 659 u32 *desc; 660 u32 ctx1_iv_off = 0; 661 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 662 OP_ALG_AAI_CTR_MOD128); 663 const bool is_rfc3686 = (ctr_mode && 664 (strstr(alg_name, "rfc3686") != NULL)); 665 666 #ifdef DEBUG 667 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 668 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 669 #endif 670 /* 671 * AES-CTR needs to load IV in CONTEXT1 reg 672 * at an offset of 128bits (16bytes) 673 * CONTEXT1[255:128] = IV 674 */ 675 if (ctr_mode) 676 ctx1_iv_off = 16; 677 678 /* 679 * RFC3686 specific: 680 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 681 * | *key = {KEY, NONCE} 682 */ 683 if (is_rfc3686) { 684 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 685 keylen -= CTR_RFC3686_NONCE_SIZE; 686 } 687 688 ctx->cdata.keylen = keylen; 689 ctx->cdata.key_virt = key; 690 ctx->cdata.key_inline = true; 691 692 /* ablkcipher_encrypt shared descriptor */ 693 desc = ctx->sh_desc_enc; 694 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 695 ctx1_iv_off); 696 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 697 desc_bytes(desc), ctx->dir); 698 699 /* ablkcipher_decrypt shared descriptor */ 700 desc = ctx->sh_desc_dec; 701 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 702 ctx1_iv_off); 703 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 704 desc_bytes(desc), ctx->dir); 705 706 /* ablkcipher_givencrypt shared descriptor */ 707 desc = ctx->sh_desc_givenc; 708 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, 709 ctx1_iv_off); 710 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, 711 desc_bytes(desc), ctx->dir); 712 713 return 0; 714 } 715 716 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 717 const u8 *key, unsigned int keylen) 718 { 719 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 720 struct device *jrdev = ctx->jrdev; 721 u32 *desc; 722 723 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 724 crypto_ablkcipher_set_flags(ablkcipher, 725 CRYPTO_TFM_RES_BAD_KEY_LEN); 726 dev_err(jrdev, "key size mismatch\n"); 727 return -EINVAL; 728 } 729 730 ctx->cdata.keylen = keylen; 731 ctx->cdata.key_virt = key; 732 ctx->cdata.key_inline = true; 733 734 /* xts_ablkcipher_encrypt shared descriptor */ 735 desc = ctx->sh_desc_enc; 736 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); 737 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 738 desc_bytes(desc), ctx->dir); 739 740 /* xts_ablkcipher_decrypt shared descriptor */ 741 desc = ctx->sh_desc_dec; 742 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); 743 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 744 desc_bytes(desc), ctx->dir); 745 746 return 0; 747 } 748 749 /* 750 * aead_edesc - s/w-extended aead descriptor 751 * @src_nents: number of segments in input s/w scatterlist 752 * @dst_nents: number of segments in output s/w scatterlist 753 * @sec4_sg_bytes: length of dma mapped sec4_sg space 754 * @sec4_sg_dma: bus physical mapped address of h/w link table 755 * @sec4_sg: pointer to h/w link table 756 * @hw_desc: the h/w job descriptor followed by any referenced link tables 757 */ 758 struct aead_edesc { 759 int src_nents; 760 int dst_nents; 761 int sec4_sg_bytes; 762 dma_addr_t sec4_sg_dma; 763 struct sec4_sg_entry *sec4_sg; 764 u32 hw_desc[]; 765 }; 766 767 /* 768 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 769 * @src_nents: number of segments in input s/w scatterlist 770 * @dst_nents: number of segments in output s/w scatterlist 771 * @iv_dma: dma address of iv for checking continuity and link table 772 * @iv_dir: DMA mapping direction for IV 773 * @sec4_sg_bytes: length of dma mapped sec4_sg space 774 * @sec4_sg_dma: bus physical mapped address of h/w link table 775 * @sec4_sg: pointer to h/w link table 776 * @hw_desc: the h/w job descriptor followed by any referenced link tables 777 * and IV 778 */ 779 struct ablkcipher_edesc { 780 int src_nents; 781 int dst_nents; 782 dma_addr_t iv_dma; 783 enum dma_data_direction iv_dir; 784 int sec4_sg_bytes; 785 dma_addr_t sec4_sg_dma; 786 struct sec4_sg_entry *sec4_sg; 787 u32 hw_desc[0]; 788 }; 789 790 static void caam_unmap(struct device *dev, struct scatterlist *src, 791 struct scatterlist *dst, int src_nents, 792 int dst_nents, 793 dma_addr_t iv_dma, int ivsize, 794 enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma, 795 int sec4_sg_bytes) 796 { 797 if (dst != src) { 798 if (src_nents) 799 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 800 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 801 } else { 802 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 803 } 804 805 if (iv_dma) 806 dma_unmap_single(dev, iv_dma, ivsize, iv_dir); 807 if (sec4_sg_bytes) 808 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 809 DMA_TO_DEVICE); 810 } 811 812 static void aead_unmap(struct device *dev, 813 struct aead_edesc *edesc, 814 struct aead_request *req) 815 { 816 caam_unmap(dev, req->src, req->dst, 817 edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE, 818 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 819 } 820 821 static void ablkcipher_unmap(struct device *dev, 822 struct ablkcipher_edesc *edesc, 823 struct ablkcipher_request *req) 824 { 825 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 826 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 827 828 caam_unmap(dev, req->src, req->dst, 829 edesc->src_nents, edesc->dst_nents, 830 edesc->iv_dma, ivsize, edesc->iv_dir, 831 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 832 } 833 834 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 835 void *context) 836 { 837 struct aead_request *req = context; 838 struct aead_edesc *edesc; 839 840 #ifdef DEBUG 841 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 842 #endif 843 844 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 845 846 if (err) 847 caam_jr_strstatus(jrdev, err); 848 849 aead_unmap(jrdev, edesc, req); 850 851 kfree(edesc); 852 853 aead_request_complete(req, err); 854 } 855 856 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 857 void *context) 858 { 859 struct aead_request *req = context; 860 struct aead_edesc *edesc; 861 862 #ifdef DEBUG 863 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 864 #endif 865 866 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 867 868 if (err) 869 caam_jr_strstatus(jrdev, err); 870 871 aead_unmap(jrdev, edesc, req); 872 873 /* 874 * verify hw auth check passed else return -EBADMSG 875 */ 876 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 877 err = -EBADMSG; 878 879 kfree(edesc); 880 881 aead_request_complete(req, err); 882 } 883 884 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 885 void *context) 886 { 887 struct ablkcipher_request *req = context; 888 struct ablkcipher_edesc *edesc; 889 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 890 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 891 892 #ifdef DEBUG 893 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 894 #endif 895 896 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); 897 898 if (err) 899 caam_jr_strstatus(jrdev, err); 900 901 #ifdef DEBUG 902 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 903 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 904 edesc->src_nents > 1 ? 100 : ivsize, 1); 905 #endif 906 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 907 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 908 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 909 910 ablkcipher_unmap(jrdev, edesc, req); 911 912 /* 913 * The crypto API expects us to set the IV (req->info) to the last 914 * ciphertext block. This is used e.g. by the CTS mode. 915 */ 916 scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, 917 ivsize, 0); 918 919 /* In case initial IV was generated, copy it in GIVCIPHER request */ 920 if (edesc->iv_dir == DMA_FROM_DEVICE) { 921 u8 *iv; 922 struct skcipher_givcrypt_request *greq; 923 924 greq = container_of(req, struct skcipher_givcrypt_request, 925 creq); 926 iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) + 927 edesc->sec4_sg_bytes; 928 memcpy(greq->giv, iv, ivsize); 929 } 930 931 kfree(edesc); 932 933 ablkcipher_request_complete(req, err); 934 } 935 936 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 937 void *context) 938 { 939 struct ablkcipher_request *req = context; 940 struct ablkcipher_edesc *edesc; 941 #ifdef DEBUG 942 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 943 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 944 945 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 946 #endif 947 948 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); 949 if (err) 950 caam_jr_strstatus(jrdev, err); 951 952 #ifdef DEBUG 953 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 954 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 955 ivsize, 1); 956 #endif 957 caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ", 958 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 959 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 960 961 ablkcipher_unmap(jrdev, edesc, req); 962 kfree(edesc); 963 964 ablkcipher_request_complete(req, err); 965 } 966 967 /* 968 * Fill in aead job descriptor 969 */ 970 static void init_aead_job(struct aead_request *req, 971 struct aead_edesc *edesc, 972 bool all_contig, bool encrypt) 973 { 974 struct crypto_aead *aead = crypto_aead_reqtfm(req); 975 struct caam_ctx *ctx = crypto_aead_ctx(aead); 976 int authsize = ctx->authsize; 977 u32 *desc = edesc->hw_desc; 978 u32 out_options, in_options; 979 dma_addr_t dst_dma, src_dma; 980 int len, sec4_sg_index = 0; 981 dma_addr_t ptr; 982 u32 *sh_desc; 983 984 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; 985 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; 986 987 len = desc_len(sh_desc); 988 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 989 990 if (all_contig) { 991 src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0; 992 in_options = 0; 993 } else { 994 src_dma = edesc->sec4_sg_dma; 995 sec4_sg_index += edesc->src_nents; 996 in_options = LDST_SGF; 997 } 998 999 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, 1000 in_options); 1001 1002 dst_dma = src_dma; 1003 out_options = in_options; 1004 1005 if (unlikely(req->src != req->dst)) { 1006 if (edesc->dst_nents == 1) { 1007 dst_dma = sg_dma_address(req->dst); 1008 } else { 1009 dst_dma = edesc->sec4_sg_dma + 1010 sec4_sg_index * 1011 sizeof(struct sec4_sg_entry); 1012 out_options = LDST_SGF; 1013 } 1014 } 1015 1016 if (encrypt) 1017 append_seq_out_ptr(desc, dst_dma, 1018 req->assoclen + req->cryptlen + authsize, 1019 out_options); 1020 else 1021 append_seq_out_ptr(desc, dst_dma, 1022 req->assoclen + req->cryptlen - authsize, 1023 out_options); 1024 } 1025 1026 static void init_gcm_job(struct aead_request *req, 1027 struct aead_edesc *edesc, 1028 bool all_contig, bool encrypt) 1029 { 1030 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1031 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1032 unsigned int ivsize = crypto_aead_ivsize(aead); 1033 u32 *desc = edesc->hw_desc; 1034 bool generic_gcm = (ivsize == GCM_AES_IV_SIZE); 1035 unsigned int last; 1036 1037 init_aead_job(req, edesc, all_contig, encrypt); 1038 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1039 1040 /* BUG This should not be specific to generic GCM. */ 1041 last = 0; 1042 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) 1043 last = FIFOLD_TYPE_LAST1; 1044 1045 /* Read GCM IV */ 1046 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | 1047 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last); 1048 /* Append Salt */ 1049 if (!generic_gcm) 1050 append_data(desc, ctx->key + ctx->cdata.keylen, 4); 1051 /* Append IV */ 1052 append_data(desc, req->iv, ivsize); 1053 /* End of blank commands */ 1054 } 1055 1056 static void init_authenc_job(struct aead_request *req, 1057 struct aead_edesc *edesc, 1058 bool all_contig, bool encrypt) 1059 { 1060 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1061 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 1062 struct caam_aead_alg, aead); 1063 unsigned int ivsize = crypto_aead_ivsize(aead); 1064 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1065 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 1066 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 1067 OP_ALG_AAI_CTR_MOD128); 1068 const bool is_rfc3686 = alg->caam.rfc3686; 1069 u32 *desc = edesc->hw_desc; 1070 u32 ivoffset = 0; 1071 1072 /* 1073 * AES-CTR needs to load IV in CONTEXT1 reg 1074 * at an offset of 128bits (16bytes) 1075 * CONTEXT1[255:128] = IV 1076 */ 1077 if (ctr_mode) 1078 ivoffset = 16; 1079 1080 /* 1081 * RFC3686 specific: 1082 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 1083 */ 1084 if (is_rfc3686) 1085 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; 1086 1087 init_aead_job(req, edesc, all_contig, encrypt); 1088 1089 /* 1090 * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports 1091 * having DPOVRD as destination. 1092 */ 1093 if (ctrlpriv->era < 3) 1094 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1095 else 1096 append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); 1097 1098 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) 1099 append_load_as_imm(desc, req->iv, ivsize, 1100 LDST_CLASS_1_CCB | 1101 LDST_SRCDST_BYTE_CONTEXT | 1102 (ivoffset << LDST_OFFSET_SHIFT)); 1103 } 1104 1105 /* 1106 * Fill in ablkcipher job descriptor 1107 */ 1108 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1109 struct ablkcipher_edesc *edesc, 1110 struct ablkcipher_request *req) 1111 { 1112 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1113 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1114 u32 *desc = edesc->hw_desc; 1115 u32 out_options = 0; 1116 dma_addr_t dst_dma; 1117 int len; 1118 1119 #ifdef DEBUG 1120 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1121 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1122 ivsize, 1); 1123 pr_err("asked=%d, nbytes%d\n", 1124 (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes); 1125 #endif 1126 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ", 1127 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1128 edesc->src_nents > 1 ? 100 : req->nbytes, 1); 1129 1130 len = desc_len(sh_desc); 1131 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1132 1133 append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize, 1134 LDST_SGF); 1135 1136 if (likely(req->src == req->dst)) { 1137 dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); 1138 out_options = LDST_SGF; 1139 } else { 1140 if (edesc->dst_nents == 1) { 1141 dst_dma = sg_dma_address(req->dst); 1142 } else { 1143 dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * 1144 sizeof(struct sec4_sg_entry); 1145 out_options = LDST_SGF; 1146 } 1147 } 1148 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); 1149 } 1150 1151 /* 1152 * Fill in ablkcipher givencrypt job descriptor 1153 */ 1154 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, 1155 struct ablkcipher_edesc *edesc, 1156 struct ablkcipher_request *req) 1157 { 1158 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1159 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1160 u32 *desc = edesc->hw_desc; 1161 u32 in_options; 1162 dma_addr_t dst_dma, src_dma; 1163 int len, sec4_sg_index = 0; 1164 1165 #ifdef DEBUG 1166 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", 1167 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1168 ivsize, 1); 1169 #endif 1170 caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", 1171 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1172 edesc->src_nents > 1 ? 100 : req->nbytes, 1); 1173 1174 len = desc_len(sh_desc); 1175 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1176 1177 if (edesc->src_nents == 1) { 1178 src_dma = sg_dma_address(req->src); 1179 in_options = 0; 1180 } else { 1181 src_dma = edesc->sec4_sg_dma; 1182 sec4_sg_index += edesc->src_nents; 1183 in_options = LDST_SGF; 1184 } 1185 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); 1186 1187 dst_dma = edesc->sec4_sg_dma + sec4_sg_index * 1188 sizeof(struct sec4_sg_entry); 1189 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF); 1190 } 1191 1192 /* 1193 * allocate and map the aead extended descriptor 1194 */ 1195 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1196 int desc_bytes, bool *all_contig_ptr, 1197 bool encrypt) 1198 { 1199 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1200 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1201 struct device *jrdev = ctx->jrdev; 1202 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1203 GFP_KERNEL : GFP_ATOMIC; 1204 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1205 struct aead_edesc *edesc; 1206 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; 1207 unsigned int authsize = ctx->authsize; 1208 1209 if (unlikely(req->dst != req->src)) { 1210 src_nents = sg_nents_for_len(req->src, req->assoclen + 1211 req->cryptlen); 1212 if (unlikely(src_nents < 0)) { 1213 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1214 req->assoclen + req->cryptlen); 1215 return ERR_PTR(src_nents); 1216 } 1217 1218 dst_nents = sg_nents_for_len(req->dst, req->assoclen + 1219 req->cryptlen + 1220 (encrypt ? authsize : 1221 (-authsize))); 1222 if (unlikely(dst_nents < 0)) { 1223 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1224 req->assoclen + req->cryptlen + 1225 (encrypt ? authsize : (-authsize))); 1226 return ERR_PTR(dst_nents); 1227 } 1228 } else { 1229 src_nents = sg_nents_for_len(req->src, req->assoclen + 1230 req->cryptlen + 1231 (encrypt ? authsize : 0)); 1232 if (unlikely(src_nents < 0)) { 1233 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1234 req->assoclen + req->cryptlen + 1235 (encrypt ? authsize : 0)); 1236 return ERR_PTR(src_nents); 1237 } 1238 } 1239 1240 if (likely(req->src == req->dst)) { 1241 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1242 DMA_BIDIRECTIONAL); 1243 if (unlikely(!mapped_src_nents)) { 1244 dev_err(jrdev, "unable to map source\n"); 1245 return ERR_PTR(-ENOMEM); 1246 } 1247 } else { 1248 /* Cover also the case of null (zero length) input data */ 1249 if (src_nents) { 1250 mapped_src_nents = dma_map_sg(jrdev, req->src, 1251 src_nents, DMA_TO_DEVICE); 1252 if (unlikely(!mapped_src_nents)) { 1253 dev_err(jrdev, "unable to map source\n"); 1254 return ERR_PTR(-ENOMEM); 1255 } 1256 } else { 1257 mapped_src_nents = 0; 1258 } 1259 1260 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, 1261 DMA_FROM_DEVICE); 1262 if (unlikely(!mapped_dst_nents)) { 1263 dev_err(jrdev, "unable to map destination\n"); 1264 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1265 return ERR_PTR(-ENOMEM); 1266 } 1267 } 1268 1269 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; 1270 sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1271 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1272 1273 /* allocate space for base edesc and hw desc commands, link tables */ 1274 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, 1275 GFP_DMA | flags); 1276 if (!edesc) { 1277 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1278 0, DMA_NONE, 0, 0); 1279 return ERR_PTR(-ENOMEM); 1280 } 1281 1282 edesc->src_nents = src_nents; 1283 edesc->dst_nents = dst_nents; 1284 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 1285 desc_bytes; 1286 *all_contig_ptr = !(mapped_src_nents > 1); 1287 1288 sec4_sg_index = 0; 1289 if (mapped_src_nents > 1) { 1290 sg_to_sec4_sg_last(req->src, mapped_src_nents, 1291 edesc->sec4_sg + sec4_sg_index, 0); 1292 sec4_sg_index += mapped_src_nents; 1293 } 1294 if (mapped_dst_nents > 1) { 1295 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1296 edesc->sec4_sg + sec4_sg_index, 0); 1297 } 1298 1299 if (!sec4_sg_bytes) 1300 return edesc; 1301 1302 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1303 sec4_sg_bytes, DMA_TO_DEVICE); 1304 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1305 dev_err(jrdev, "unable to map S/G table\n"); 1306 aead_unmap(jrdev, edesc, req); 1307 kfree(edesc); 1308 return ERR_PTR(-ENOMEM); 1309 } 1310 1311 edesc->sec4_sg_bytes = sec4_sg_bytes; 1312 1313 return edesc; 1314 } 1315 1316 static int gcm_encrypt(struct aead_request *req) 1317 { 1318 struct aead_edesc *edesc; 1319 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1320 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1321 struct device *jrdev = ctx->jrdev; 1322 bool all_contig; 1323 u32 *desc; 1324 int ret = 0; 1325 1326 /* allocate extended descriptor */ 1327 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true); 1328 if (IS_ERR(edesc)) 1329 return PTR_ERR(edesc); 1330 1331 /* Create and submit job descriptor */ 1332 init_gcm_job(req, edesc, all_contig, true); 1333 #ifdef DEBUG 1334 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1335 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1336 desc_bytes(edesc->hw_desc), 1); 1337 #endif 1338 1339 desc = edesc->hw_desc; 1340 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1341 if (!ret) { 1342 ret = -EINPROGRESS; 1343 } else { 1344 aead_unmap(jrdev, edesc, req); 1345 kfree(edesc); 1346 } 1347 1348 return ret; 1349 } 1350 1351 static int ipsec_gcm_encrypt(struct aead_request *req) 1352 { 1353 if (req->assoclen < 8) 1354 return -EINVAL; 1355 1356 return gcm_encrypt(req); 1357 } 1358 1359 static int aead_encrypt(struct aead_request *req) 1360 { 1361 struct aead_edesc *edesc; 1362 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1363 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1364 struct device *jrdev = ctx->jrdev; 1365 bool all_contig; 1366 u32 *desc; 1367 int ret = 0; 1368 1369 /* allocate extended descriptor */ 1370 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, 1371 &all_contig, true); 1372 if (IS_ERR(edesc)) 1373 return PTR_ERR(edesc); 1374 1375 /* Create and submit job descriptor */ 1376 init_authenc_job(req, edesc, all_contig, true); 1377 #ifdef DEBUG 1378 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1379 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1380 desc_bytes(edesc->hw_desc), 1); 1381 #endif 1382 1383 desc = edesc->hw_desc; 1384 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1385 if (!ret) { 1386 ret = -EINPROGRESS; 1387 } else { 1388 aead_unmap(jrdev, edesc, req); 1389 kfree(edesc); 1390 } 1391 1392 return ret; 1393 } 1394 1395 static int gcm_decrypt(struct aead_request *req) 1396 { 1397 struct aead_edesc *edesc; 1398 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1399 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1400 struct device *jrdev = ctx->jrdev; 1401 bool all_contig; 1402 u32 *desc; 1403 int ret = 0; 1404 1405 /* allocate extended descriptor */ 1406 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false); 1407 if (IS_ERR(edesc)) 1408 return PTR_ERR(edesc); 1409 1410 /* Create and submit job descriptor*/ 1411 init_gcm_job(req, edesc, all_contig, false); 1412 #ifdef DEBUG 1413 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1414 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1415 desc_bytes(edesc->hw_desc), 1); 1416 #endif 1417 1418 desc = edesc->hw_desc; 1419 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1420 if (!ret) { 1421 ret = -EINPROGRESS; 1422 } else { 1423 aead_unmap(jrdev, edesc, req); 1424 kfree(edesc); 1425 } 1426 1427 return ret; 1428 } 1429 1430 static int ipsec_gcm_decrypt(struct aead_request *req) 1431 { 1432 if (req->assoclen < 8) 1433 return -EINVAL; 1434 1435 return gcm_decrypt(req); 1436 } 1437 1438 static int aead_decrypt(struct aead_request *req) 1439 { 1440 struct aead_edesc *edesc; 1441 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1442 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1443 struct device *jrdev = ctx->jrdev; 1444 bool all_contig; 1445 u32 *desc; 1446 int ret = 0; 1447 1448 caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ", 1449 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1450 req->assoclen + req->cryptlen, 1); 1451 1452 /* allocate extended descriptor */ 1453 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, 1454 &all_contig, false); 1455 if (IS_ERR(edesc)) 1456 return PTR_ERR(edesc); 1457 1458 /* Create and submit job descriptor*/ 1459 init_authenc_job(req, edesc, all_contig, false); 1460 #ifdef DEBUG 1461 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1462 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1463 desc_bytes(edesc->hw_desc), 1); 1464 #endif 1465 1466 desc = edesc->hw_desc; 1467 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1468 if (!ret) { 1469 ret = -EINPROGRESS; 1470 } else { 1471 aead_unmap(jrdev, edesc, req); 1472 kfree(edesc); 1473 } 1474 1475 return ret; 1476 } 1477 1478 /* 1479 * allocate and map the ablkcipher extended descriptor for ablkcipher 1480 */ 1481 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1482 *req, int desc_bytes) 1483 { 1484 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1485 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1486 struct device *jrdev = ctx->jrdev; 1487 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1488 GFP_KERNEL : GFP_ATOMIC; 1489 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1490 struct ablkcipher_edesc *edesc; 1491 dma_addr_t iv_dma; 1492 u8 *iv; 1493 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1494 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; 1495 1496 src_nents = sg_nents_for_len(req->src, req->nbytes); 1497 if (unlikely(src_nents < 0)) { 1498 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1499 req->nbytes); 1500 return ERR_PTR(src_nents); 1501 } 1502 1503 if (req->dst != req->src) { 1504 dst_nents = sg_nents_for_len(req->dst, req->nbytes); 1505 if (unlikely(dst_nents < 0)) { 1506 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1507 req->nbytes); 1508 return ERR_PTR(dst_nents); 1509 } 1510 } 1511 1512 if (likely(req->src == req->dst)) { 1513 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1514 DMA_BIDIRECTIONAL); 1515 if (unlikely(!mapped_src_nents)) { 1516 dev_err(jrdev, "unable to map source\n"); 1517 return ERR_PTR(-ENOMEM); 1518 } 1519 } else { 1520 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1521 DMA_TO_DEVICE); 1522 if (unlikely(!mapped_src_nents)) { 1523 dev_err(jrdev, "unable to map source\n"); 1524 return ERR_PTR(-ENOMEM); 1525 } 1526 1527 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, 1528 DMA_FROM_DEVICE); 1529 if (unlikely(!mapped_dst_nents)) { 1530 dev_err(jrdev, "unable to map destination\n"); 1531 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1532 return ERR_PTR(-ENOMEM); 1533 } 1534 } 1535 1536 sec4_sg_ents = 1 + mapped_src_nents; 1537 dst_sg_idx = sec4_sg_ents; 1538 sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 1539 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); 1540 1541 /* 1542 * allocate space for base edesc and hw desc commands, link tables, IV 1543 */ 1544 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, 1545 GFP_DMA | flags); 1546 if (!edesc) { 1547 dev_err(jrdev, "could not allocate extended descriptor\n"); 1548 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1549 0, DMA_NONE, 0, 0); 1550 return ERR_PTR(-ENOMEM); 1551 } 1552 1553 edesc->src_nents = src_nents; 1554 edesc->dst_nents = dst_nents; 1555 edesc->sec4_sg_bytes = sec4_sg_bytes; 1556 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1557 desc_bytes; 1558 edesc->iv_dir = DMA_TO_DEVICE; 1559 1560 /* Make sure IV is located in a DMAable area */ 1561 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; 1562 memcpy(iv, req->info, ivsize); 1563 1564 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); 1565 if (dma_mapping_error(jrdev, iv_dma)) { 1566 dev_err(jrdev, "unable to map IV\n"); 1567 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1568 0, DMA_NONE, 0, 0); 1569 kfree(edesc); 1570 return ERR_PTR(-ENOMEM); 1571 } 1572 1573 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1574 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); 1575 1576 if (mapped_dst_nents > 1) { 1577 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1578 edesc->sec4_sg + dst_sg_idx, 0); 1579 } 1580 1581 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1582 sec4_sg_bytes, DMA_TO_DEVICE); 1583 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1584 dev_err(jrdev, "unable to map S/G table\n"); 1585 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1586 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); 1587 kfree(edesc); 1588 return ERR_PTR(-ENOMEM); 1589 } 1590 1591 edesc->iv_dma = iv_dma; 1592 1593 #ifdef DEBUG 1594 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", 1595 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1596 sec4_sg_bytes, 1); 1597 #endif 1598 1599 return edesc; 1600 } 1601 1602 static int ablkcipher_encrypt(struct ablkcipher_request *req) 1603 { 1604 struct ablkcipher_edesc *edesc; 1605 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1606 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1607 struct device *jrdev = ctx->jrdev; 1608 u32 *desc; 1609 int ret = 0; 1610 1611 /* allocate extended descriptor */ 1612 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1613 if (IS_ERR(edesc)) 1614 return PTR_ERR(edesc); 1615 1616 /* Create and submit job descriptor*/ 1617 init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req); 1618 #ifdef DEBUG 1619 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1620 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1621 desc_bytes(edesc->hw_desc), 1); 1622 #endif 1623 desc = edesc->hw_desc; 1624 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1625 1626 if (!ret) { 1627 ret = -EINPROGRESS; 1628 } else { 1629 ablkcipher_unmap(jrdev, edesc, req); 1630 kfree(edesc); 1631 } 1632 1633 return ret; 1634 } 1635 1636 static int ablkcipher_decrypt(struct ablkcipher_request *req) 1637 { 1638 struct ablkcipher_edesc *edesc; 1639 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1640 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1641 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1642 struct device *jrdev = ctx->jrdev; 1643 u32 *desc; 1644 int ret = 0; 1645 1646 /* allocate extended descriptor */ 1647 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1648 if (IS_ERR(edesc)) 1649 return PTR_ERR(edesc); 1650 1651 /* 1652 * The crypto API expects us to set the IV (req->info) to the last 1653 * ciphertext block. 1654 */ 1655 scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, 1656 ivsize, 0); 1657 1658 /* Create and submit job descriptor*/ 1659 init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); 1660 desc = edesc->hw_desc; 1661 #ifdef DEBUG 1662 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1663 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1664 desc_bytes(edesc->hw_desc), 1); 1665 #endif 1666 1667 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); 1668 if (!ret) { 1669 ret = -EINPROGRESS; 1670 } else { 1671 ablkcipher_unmap(jrdev, edesc, req); 1672 kfree(edesc); 1673 } 1674 1675 return ret; 1676 } 1677 1678 /* 1679 * allocate and map the ablkcipher extended descriptor 1680 * for ablkcipher givencrypt 1681 */ 1682 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( 1683 struct skcipher_givcrypt_request *greq, 1684 int desc_bytes) 1685 { 1686 struct ablkcipher_request *req = &greq->creq; 1687 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1688 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1689 struct device *jrdev = ctx->jrdev; 1690 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1691 GFP_KERNEL : GFP_ATOMIC; 1692 int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents; 1693 struct ablkcipher_edesc *edesc; 1694 dma_addr_t iv_dma; 1695 u8 *iv; 1696 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1697 int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; 1698 1699 src_nents = sg_nents_for_len(req->src, req->nbytes); 1700 if (unlikely(src_nents < 0)) { 1701 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1702 req->nbytes); 1703 return ERR_PTR(src_nents); 1704 } 1705 1706 if (likely(req->src == req->dst)) { 1707 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1708 DMA_BIDIRECTIONAL); 1709 if (unlikely(!mapped_src_nents)) { 1710 dev_err(jrdev, "unable to map source\n"); 1711 return ERR_PTR(-ENOMEM); 1712 } 1713 1714 dst_nents = src_nents; 1715 mapped_dst_nents = src_nents; 1716 } else { 1717 mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, 1718 DMA_TO_DEVICE); 1719 if (unlikely(!mapped_src_nents)) { 1720 dev_err(jrdev, "unable to map source\n"); 1721 return ERR_PTR(-ENOMEM); 1722 } 1723 1724 dst_nents = sg_nents_for_len(req->dst, req->nbytes); 1725 if (unlikely(dst_nents < 0)) { 1726 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1727 req->nbytes); 1728 return ERR_PTR(dst_nents); 1729 } 1730 1731 mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, 1732 DMA_FROM_DEVICE); 1733 if (unlikely(!mapped_dst_nents)) { 1734 dev_err(jrdev, "unable to map destination\n"); 1735 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1736 return ERR_PTR(-ENOMEM); 1737 } 1738 } 1739 1740 sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0; 1741 dst_sg_idx = sec4_sg_ents; 1742 sec4_sg_ents += 1 + mapped_dst_nents; 1743 1744 /* 1745 * allocate space for base edesc and hw desc commands, link tables, IV 1746 */ 1747 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); 1748 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize, 1749 GFP_DMA | flags); 1750 if (!edesc) { 1751 dev_err(jrdev, "could not allocate extended descriptor\n"); 1752 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1753 0, DMA_NONE, 0, 0); 1754 return ERR_PTR(-ENOMEM); 1755 } 1756 1757 edesc->src_nents = src_nents; 1758 edesc->dst_nents = dst_nents; 1759 edesc->sec4_sg_bytes = sec4_sg_bytes; 1760 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1761 desc_bytes; 1762 edesc->iv_dir = DMA_FROM_DEVICE; 1763 1764 /* Make sure IV is located in a DMAable area */ 1765 iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; 1766 iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE); 1767 if (dma_mapping_error(jrdev, iv_dma)) { 1768 dev_err(jrdev, "unable to map IV\n"); 1769 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1770 0, DMA_NONE, 0, 0); 1771 kfree(edesc); 1772 return ERR_PTR(-ENOMEM); 1773 } 1774 1775 if (mapped_src_nents > 1) 1776 sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg, 1777 0); 1778 1779 dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0); 1780 sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + 1781 dst_sg_idx + 1, 0); 1782 1783 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1784 sec4_sg_bytes, DMA_TO_DEVICE); 1785 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1786 dev_err(jrdev, "unable to map S/G table\n"); 1787 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1788 iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0); 1789 kfree(edesc); 1790 return ERR_PTR(-ENOMEM); 1791 } 1792 edesc->iv_dma = iv_dma; 1793 1794 #ifdef DEBUG 1795 print_hex_dump(KERN_ERR, 1796 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ", 1797 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1798 sec4_sg_bytes, 1); 1799 #endif 1800 1801 return edesc; 1802 } 1803 1804 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) 1805 { 1806 struct ablkcipher_request *req = &creq->creq; 1807 struct ablkcipher_edesc *edesc; 1808 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1809 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1810 struct device *jrdev = ctx->jrdev; 1811 u32 *desc; 1812 int ret = 0; 1813 1814 /* allocate extended descriptor */ 1815 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ); 1816 if (IS_ERR(edesc)) 1817 return PTR_ERR(edesc); 1818 1819 /* Create and submit job descriptor*/ 1820 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, 1821 edesc, req); 1822 #ifdef DEBUG 1823 print_hex_dump(KERN_ERR, 1824 "ablkcipher jobdesc@" __stringify(__LINE__) ": ", 1825 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1826 desc_bytes(edesc->hw_desc), 1); 1827 #endif 1828 desc = edesc->hw_desc; 1829 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1830 1831 if (!ret) { 1832 ret = -EINPROGRESS; 1833 } else { 1834 ablkcipher_unmap(jrdev, edesc, req); 1835 kfree(edesc); 1836 } 1837 1838 return ret; 1839 } 1840 1841 #define template_aead template_u.aead 1842 #define template_ablkcipher template_u.ablkcipher 1843 struct caam_alg_template { 1844 char name[CRYPTO_MAX_ALG_NAME]; 1845 char driver_name[CRYPTO_MAX_ALG_NAME]; 1846 unsigned int blocksize; 1847 u32 type; 1848 union { 1849 struct ablkcipher_alg ablkcipher; 1850 } template_u; 1851 u32 class1_alg_type; 1852 u32 class2_alg_type; 1853 }; 1854 1855 static struct caam_alg_template driver_algs[] = { 1856 /* ablkcipher descriptor */ 1857 { 1858 .name = "cbc(aes)", 1859 .driver_name = "cbc-aes-caam", 1860 .blocksize = AES_BLOCK_SIZE, 1861 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1862 .template_ablkcipher = { 1863 .setkey = ablkcipher_setkey, 1864 .encrypt = ablkcipher_encrypt, 1865 .decrypt = ablkcipher_decrypt, 1866 .givencrypt = ablkcipher_givencrypt, 1867 .geniv = "<built-in>", 1868 .min_keysize = AES_MIN_KEY_SIZE, 1869 .max_keysize = AES_MAX_KEY_SIZE, 1870 .ivsize = AES_BLOCK_SIZE, 1871 }, 1872 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1873 }, 1874 { 1875 .name = "cbc(des3_ede)", 1876 .driver_name = "cbc-3des-caam", 1877 .blocksize = DES3_EDE_BLOCK_SIZE, 1878 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1879 .template_ablkcipher = { 1880 .setkey = ablkcipher_setkey, 1881 .encrypt = ablkcipher_encrypt, 1882 .decrypt = ablkcipher_decrypt, 1883 .givencrypt = ablkcipher_givencrypt, 1884 .geniv = "<built-in>", 1885 .min_keysize = DES3_EDE_KEY_SIZE, 1886 .max_keysize = DES3_EDE_KEY_SIZE, 1887 .ivsize = DES3_EDE_BLOCK_SIZE, 1888 }, 1889 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1890 }, 1891 { 1892 .name = "cbc(des)", 1893 .driver_name = "cbc-des-caam", 1894 .blocksize = DES_BLOCK_SIZE, 1895 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1896 .template_ablkcipher = { 1897 .setkey = ablkcipher_setkey, 1898 .encrypt = ablkcipher_encrypt, 1899 .decrypt = ablkcipher_decrypt, 1900 .givencrypt = ablkcipher_givencrypt, 1901 .geniv = "<built-in>", 1902 .min_keysize = DES_KEY_SIZE, 1903 .max_keysize = DES_KEY_SIZE, 1904 .ivsize = DES_BLOCK_SIZE, 1905 }, 1906 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 1907 }, 1908 { 1909 .name = "ctr(aes)", 1910 .driver_name = "ctr-aes-caam", 1911 .blocksize = 1, 1912 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1913 .template_ablkcipher = { 1914 .setkey = ablkcipher_setkey, 1915 .encrypt = ablkcipher_encrypt, 1916 .decrypt = ablkcipher_decrypt, 1917 .geniv = "chainiv", 1918 .min_keysize = AES_MIN_KEY_SIZE, 1919 .max_keysize = AES_MAX_KEY_SIZE, 1920 .ivsize = AES_BLOCK_SIZE, 1921 }, 1922 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 1923 }, 1924 { 1925 .name = "rfc3686(ctr(aes))", 1926 .driver_name = "rfc3686-ctr-aes-caam", 1927 .blocksize = 1, 1928 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1929 .template_ablkcipher = { 1930 .setkey = ablkcipher_setkey, 1931 .encrypt = ablkcipher_encrypt, 1932 .decrypt = ablkcipher_decrypt, 1933 .givencrypt = ablkcipher_givencrypt, 1934 .geniv = "<built-in>", 1935 .min_keysize = AES_MIN_KEY_SIZE + 1936 CTR_RFC3686_NONCE_SIZE, 1937 .max_keysize = AES_MAX_KEY_SIZE + 1938 CTR_RFC3686_NONCE_SIZE, 1939 .ivsize = CTR_RFC3686_IV_SIZE, 1940 }, 1941 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 1942 }, 1943 { 1944 .name = "xts(aes)", 1945 .driver_name = "xts-aes-caam", 1946 .blocksize = AES_BLOCK_SIZE, 1947 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1948 .template_ablkcipher = { 1949 .setkey = xts_ablkcipher_setkey, 1950 .encrypt = ablkcipher_encrypt, 1951 .decrypt = ablkcipher_decrypt, 1952 .geniv = "eseqiv", 1953 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1954 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1955 .ivsize = AES_BLOCK_SIZE, 1956 }, 1957 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 1958 }, 1959 }; 1960 1961 static struct caam_aead_alg driver_aeads[] = { 1962 { 1963 .aead = { 1964 .base = { 1965 .cra_name = "rfc4106(gcm(aes))", 1966 .cra_driver_name = "rfc4106-gcm-aes-caam", 1967 .cra_blocksize = 1, 1968 }, 1969 .setkey = rfc4106_setkey, 1970 .setauthsize = rfc4106_setauthsize, 1971 .encrypt = ipsec_gcm_encrypt, 1972 .decrypt = ipsec_gcm_decrypt, 1973 .ivsize = GCM_RFC4106_IV_SIZE, 1974 .maxauthsize = AES_BLOCK_SIZE, 1975 }, 1976 .caam = { 1977 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1978 }, 1979 }, 1980 { 1981 .aead = { 1982 .base = { 1983 .cra_name = "rfc4543(gcm(aes))", 1984 .cra_driver_name = "rfc4543-gcm-aes-caam", 1985 .cra_blocksize = 1, 1986 }, 1987 .setkey = rfc4543_setkey, 1988 .setauthsize = rfc4543_setauthsize, 1989 .encrypt = ipsec_gcm_encrypt, 1990 .decrypt = ipsec_gcm_decrypt, 1991 .ivsize = GCM_RFC4543_IV_SIZE, 1992 .maxauthsize = AES_BLOCK_SIZE, 1993 }, 1994 .caam = { 1995 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 1996 }, 1997 }, 1998 /* Galois Counter Mode */ 1999 { 2000 .aead = { 2001 .base = { 2002 .cra_name = "gcm(aes)", 2003 .cra_driver_name = "gcm-aes-caam", 2004 .cra_blocksize = 1, 2005 }, 2006 .setkey = gcm_setkey, 2007 .setauthsize = gcm_setauthsize, 2008 .encrypt = gcm_encrypt, 2009 .decrypt = gcm_decrypt, 2010 .ivsize = GCM_AES_IV_SIZE, 2011 .maxauthsize = AES_BLOCK_SIZE, 2012 }, 2013 .caam = { 2014 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 2015 }, 2016 }, 2017 /* single-pass ipsec_esp descriptor */ 2018 { 2019 .aead = { 2020 .base = { 2021 .cra_name = "authenc(hmac(md5)," 2022 "ecb(cipher_null))", 2023 .cra_driver_name = "authenc-hmac-md5-" 2024 "ecb-cipher_null-caam", 2025 .cra_blocksize = NULL_BLOCK_SIZE, 2026 }, 2027 .setkey = aead_setkey, 2028 .setauthsize = aead_setauthsize, 2029 .encrypt = aead_encrypt, 2030 .decrypt = aead_decrypt, 2031 .ivsize = NULL_IV_SIZE, 2032 .maxauthsize = MD5_DIGEST_SIZE, 2033 }, 2034 .caam = { 2035 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2036 OP_ALG_AAI_HMAC_PRECOMP, 2037 }, 2038 }, 2039 { 2040 .aead = { 2041 .base = { 2042 .cra_name = "authenc(hmac(sha1)," 2043 "ecb(cipher_null))", 2044 .cra_driver_name = "authenc-hmac-sha1-" 2045 "ecb-cipher_null-caam", 2046 .cra_blocksize = NULL_BLOCK_SIZE, 2047 }, 2048 .setkey = aead_setkey, 2049 .setauthsize = aead_setauthsize, 2050 .encrypt = aead_encrypt, 2051 .decrypt = aead_decrypt, 2052 .ivsize = NULL_IV_SIZE, 2053 .maxauthsize = SHA1_DIGEST_SIZE, 2054 }, 2055 .caam = { 2056 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2057 OP_ALG_AAI_HMAC_PRECOMP, 2058 }, 2059 }, 2060 { 2061 .aead = { 2062 .base = { 2063 .cra_name = "authenc(hmac(sha224)," 2064 "ecb(cipher_null))", 2065 .cra_driver_name = "authenc-hmac-sha224-" 2066 "ecb-cipher_null-caam", 2067 .cra_blocksize = NULL_BLOCK_SIZE, 2068 }, 2069 .setkey = aead_setkey, 2070 .setauthsize = aead_setauthsize, 2071 .encrypt = aead_encrypt, 2072 .decrypt = aead_decrypt, 2073 .ivsize = NULL_IV_SIZE, 2074 .maxauthsize = SHA224_DIGEST_SIZE, 2075 }, 2076 .caam = { 2077 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2078 OP_ALG_AAI_HMAC_PRECOMP, 2079 }, 2080 }, 2081 { 2082 .aead = { 2083 .base = { 2084 .cra_name = "authenc(hmac(sha256)," 2085 "ecb(cipher_null))", 2086 .cra_driver_name = "authenc-hmac-sha256-" 2087 "ecb-cipher_null-caam", 2088 .cra_blocksize = NULL_BLOCK_SIZE, 2089 }, 2090 .setkey = aead_setkey, 2091 .setauthsize = aead_setauthsize, 2092 .encrypt = aead_encrypt, 2093 .decrypt = aead_decrypt, 2094 .ivsize = NULL_IV_SIZE, 2095 .maxauthsize = SHA256_DIGEST_SIZE, 2096 }, 2097 .caam = { 2098 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2099 OP_ALG_AAI_HMAC_PRECOMP, 2100 }, 2101 }, 2102 { 2103 .aead = { 2104 .base = { 2105 .cra_name = "authenc(hmac(sha384)," 2106 "ecb(cipher_null))", 2107 .cra_driver_name = "authenc-hmac-sha384-" 2108 "ecb-cipher_null-caam", 2109 .cra_blocksize = NULL_BLOCK_SIZE, 2110 }, 2111 .setkey = aead_setkey, 2112 .setauthsize = aead_setauthsize, 2113 .encrypt = aead_encrypt, 2114 .decrypt = aead_decrypt, 2115 .ivsize = NULL_IV_SIZE, 2116 .maxauthsize = SHA384_DIGEST_SIZE, 2117 }, 2118 .caam = { 2119 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2120 OP_ALG_AAI_HMAC_PRECOMP, 2121 }, 2122 }, 2123 { 2124 .aead = { 2125 .base = { 2126 .cra_name = "authenc(hmac(sha512)," 2127 "ecb(cipher_null))", 2128 .cra_driver_name = "authenc-hmac-sha512-" 2129 "ecb-cipher_null-caam", 2130 .cra_blocksize = NULL_BLOCK_SIZE, 2131 }, 2132 .setkey = aead_setkey, 2133 .setauthsize = aead_setauthsize, 2134 .encrypt = aead_encrypt, 2135 .decrypt = aead_decrypt, 2136 .ivsize = NULL_IV_SIZE, 2137 .maxauthsize = SHA512_DIGEST_SIZE, 2138 }, 2139 .caam = { 2140 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2141 OP_ALG_AAI_HMAC_PRECOMP, 2142 }, 2143 }, 2144 { 2145 .aead = { 2146 .base = { 2147 .cra_name = "authenc(hmac(md5),cbc(aes))", 2148 .cra_driver_name = "authenc-hmac-md5-" 2149 "cbc-aes-caam", 2150 .cra_blocksize = AES_BLOCK_SIZE, 2151 }, 2152 .setkey = aead_setkey, 2153 .setauthsize = aead_setauthsize, 2154 .encrypt = aead_encrypt, 2155 .decrypt = aead_decrypt, 2156 .ivsize = AES_BLOCK_SIZE, 2157 .maxauthsize = MD5_DIGEST_SIZE, 2158 }, 2159 .caam = { 2160 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2161 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2162 OP_ALG_AAI_HMAC_PRECOMP, 2163 }, 2164 }, 2165 { 2166 .aead = { 2167 .base = { 2168 .cra_name = "echainiv(authenc(hmac(md5)," 2169 "cbc(aes)))", 2170 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2171 "cbc-aes-caam", 2172 .cra_blocksize = AES_BLOCK_SIZE, 2173 }, 2174 .setkey = aead_setkey, 2175 .setauthsize = aead_setauthsize, 2176 .encrypt = aead_encrypt, 2177 .decrypt = aead_decrypt, 2178 .ivsize = AES_BLOCK_SIZE, 2179 .maxauthsize = MD5_DIGEST_SIZE, 2180 }, 2181 .caam = { 2182 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2183 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2184 OP_ALG_AAI_HMAC_PRECOMP, 2185 .geniv = true, 2186 }, 2187 }, 2188 { 2189 .aead = { 2190 .base = { 2191 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2192 .cra_driver_name = "authenc-hmac-sha1-" 2193 "cbc-aes-caam", 2194 .cra_blocksize = AES_BLOCK_SIZE, 2195 }, 2196 .setkey = aead_setkey, 2197 .setauthsize = aead_setauthsize, 2198 .encrypt = aead_encrypt, 2199 .decrypt = aead_decrypt, 2200 .ivsize = AES_BLOCK_SIZE, 2201 .maxauthsize = SHA1_DIGEST_SIZE, 2202 }, 2203 .caam = { 2204 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2205 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2206 OP_ALG_AAI_HMAC_PRECOMP, 2207 }, 2208 }, 2209 { 2210 .aead = { 2211 .base = { 2212 .cra_name = "echainiv(authenc(hmac(sha1)," 2213 "cbc(aes)))", 2214 .cra_driver_name = "echainiv-authenc-" 2215 "hmac-sha1-cbc-aes-caam", 2216 .cra_blocksize = AES_BLOCK_SIZE, 2217 }, 2218 .setkey = aead_setkey, 2219 .setauthsize = aead_setauthsize, 2220 .encrypt = aead_encrypt, 2221 .decrypt = aead_decrypt, 2222 .ivsize = AES_BLOCK_SIZE, 2223 .maxauthsize = SHA1_DIGEST_SIZE, 2224 }, 2225 .caam = { 2226 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2227 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2228 OP_ALG_AAI_HMAC_PRECOMP, 2229 .geniv = true, 2230 }, 2231 }, 2232 { 2233 .aead = { 2234 .base = { 2235 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2236 .cra_driver_name = "authenc-hmac-sha224-" 2237 "cbc-aes-caam", 2238 .cra_blocksize = AES_BLOCK_SIZE, 2239 }, 2240 .setkey = aead_setkey, 2241 .setauthsize = aead_setauthsize, 2242 .encrypt = aead_encrypt, 2243 .decrypt = aead_decrypt, 2244 .ivsize = AES_BLOCK_SIZE, 2245 .maxauthsize = SHA224_DIGEST_SIZE, 2246 }, 2247 .caam = { 2248 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2249 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2250 OP_ALG_AAI_HMAC_PRECOMP, 2251 }, 2252 }, 2253 { 2254 .aead = { 2255 .base = { 2256 .cra_name = "echainiv(authenc(hmac(sha224)," 2257 "cbc(aes)))", 2258 .cra_driver_name = "echainiv-authenc-" 2259 "hmac-sha224-cbc-aes-caam", 2260 .cra_blocksize = AES_BLOCK_SIZE, 2261 }, 2262 .setkey = aead_setkey, 2263 .setauthsize = aead_setauthsize, 2264 .encrypt = aead_encrypt, 2265 .decrypt = aead_decrypt, 2266 .ivsize = AES_BLOCK_SIZE, 2267 .maxauthsize = SHA224_DIGEST_SIZE, 2268 }, 2269 .caam = { 2270 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2271 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2272 OP_ALG_AAI_HMAC_PRECOMP, 2273 .geniv = true, 2274 }, 2275 }, 2276 { 2277 .aead = { 2278 .base = { 2279 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2280 .cra_driver_name = "authenc-hmac-sha256-" 2281 "cbc-aes-caam", 2282 .cra_blocksize = AES_BLOCK_SIZE, 2283 }, 2284 .setkey = aead_setkey, 2285 .setauthsize = aead_setauthsize, 2286 .encrypt = aead_encrypt, 2287 .decrypt = aead_decrypt, 2288 .ivsize = AES_BLOCK_SIZE, 2289 .maxauthsize = SHA256_DIGEST_SIZE, 2290 }, 2291 .caam = { 2292 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2293 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2294 OP_ALG_AAI_HMAC_PRECOMP, 2295 }, 2296 }, 2297 { 2298 .aead = { 2299 .base = { 2300 .cra_name = "echainiv(authenc(hmac(sha256)," 2301 "cbc(aes)))", 2302 .cra_driver_name = "echainiv-authenc-" 2303 "hmac-sha256-cbc-aes-caam", 2304 .cra_blocksize = AES_BLOCK_SIZE, 2305 }, 2306 .setkey = aead_setkey, 2307 .setauthsize = aead_setauthsize, 2308 .encrypt = aead_encrypt, 2309 .decrypt = aead_decrypt, 2310 .ivsize = AES_BLOCK_SIZE, 2311 .maxauthsize = SHA256_DIGEST_SIZE, 2312 }, 2313 .caam = { 2314 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2315 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2316 OP_ALG_AAI_HMAC_PRECOMP, 2317 .geniv = true, 2318 }, 2319 }, 2320 { 2321 .aead = { 2322 .base = { 2323 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2324 .cra_driver_name = "authenc-hmac-sha384-" 2325 "cbc-aes-caam", 2326 .cra_blocksize = AES_BLOCK_SIZE, 2327 }, 2328 .setkey = aead_setkey, 2329 .setauthsize = aead_setauthsize, 2330 .encrypt = aead_encrypt, 2331 .decrypt = aead_decrypt, 2332 .ivsize = AES_BLOCK_SIZE, 2333 .maxauthsize = SHA384_DIGEST_SIZE, 2334 }, 2335 .caam = { 2336 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2337 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2338 OP_ALG_AAI_HMAC_PRECOMP, 2339 }, 2340 }, 2341 { 2342 .aead = { 2343 .base = { 2344 .cra_name = "echainiv(authenc(hmac(sha384)," 2345 "cbc(aes)))", 2346 .cra_driver_name = "echainiv-authenc-" 2347 "hmac-sha384-cbc-aes-caam", 2348 .cra_blocksize = AES_BLOCK_SIZE, 2349 }, 2350 .setkey = aead_setkey, 2351 .setauthsize = aead_setauthsize, 2352 .encrypt = aead_encrypt, 2353 .decrypt = aead_decrypt, 2354 .ivsize = AES_BLOCK_SIZE, 2355 .maxauthsize = SHA384_DIGEST_SIZE, 2356 }, 2357 .caam = { 2358 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2359 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2360 OP_ALG_AAI_HMAC_PRECOMP, 2361 .geniv = true, 2362 }, 2363 }, 2364 { 2365 .aead = { 2366 .base = { 2367 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2368 .cra_driver_name = "authenc-hmac-sha512-" 2369 "cbc-aes-caam", 2370 .cra_blocksize = AES_BLOCK_SIZE, 2371 }, 2372 .setkey = aead_setkey, 2373 .setauthsize = aead_setauthsize, 2374 .encrypt = aead_encrypt, 2375 .decrypt = aead_decrypt, 2376 .ivsize = AES_BLOCK_SIZE, 2377 .maxauthsize = SHA512_DIGEST_SIZE, 2378 }, 2379 .caam = { 2380 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2381 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2382 OP_ALG_AAI_HMAC_PRECOMP, 2383 }, 2384 }, 2385 { 2386 .aead = { 2387 .base = { 2388 .cra_name = "echainiv(authenc(hmac(sha512)," 2389 "cbc(aes)))", 2390 .cra_driver_name = "echainiv-authenc-" 2391 "hmac-sha512-cbc-aes-caam", 2392 .cra_blocksize = AES_BLOCK_SIZE, 2393 }, 2394 .setkey = aead_setkey, 2395 .setauthsize = aead_setauthsize, 2396 .encrypt = aead_encrypt, 2397 .decrypt = aead_decrypt, 2398 .ivsize = AES_BLOCK_SIZE, 2399 .maxauthsize = SHA512_DIGEST_SIZE, 2400 }, 2401 .caam = { 2402 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2403 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2404 OP_ALG_AAI_HMAC_PRECOMP, 2405 .geniv = true, 2406 }, 2407 }, 2408 { 2409 .aead = { 2410 .base = { 2411 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2412 .cra_driver_name = "authenc-hmac-md5-" 2413 "cbc-des3_ede-caam", 2414 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2415 }, 2416 .setkey = aead_setkey, 2417 .setauthsize = aead_setauthsize, 2418 .encrypt = aead_encrypt, 2419 .decrypt = aead_decrypt, 2420 .ivsize = DES3_EDE_BLOCK_SIZE, 2421 .maxauthsize = MD5_DIGEST_SIZE, 2422 }, 2423 .caam = { 2424 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2425 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2426 OP_ALG_AAI_HMAC_PRECOMP, 2427 } 2428 }, 2429 { 2430 .aead = { 2431 .base = { 2432 .cra_name = "echainiv(authenc(hmac(md5)," 2433 "cbc(des3_ede)))", 2434 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2435 "cbc-des3_ede-caam", 2436 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2437 }, 2438 .setkey = aead_setkey, 2439 .setauthsize = aead_setauthsize, 2440 .encrypt = aead_encrypt, 2441 .decrypt = aead_decrypt, 2442 .ivsize = DES3_EDE_BLOCK_SIZE, 2443 .maxauthsize = MD5_DIGEST_SIZE, 2444 }, 2445 .caam = { 2446 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2447 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2448 OP_ALG_AAI_HMAC_PRECOMP, 2449 .geniv = true, 2450 } 2451 }, 2452 { 2453 .aead = { 2454 .base = { 2455 .cra_name = "authenc(hmac(sha1)," 2456 "cbc(des3_ede))", 2457 .cra_driver_name = "authenc-hmac-sha1-" 2458 "cbc-des3_ede-caam", 2459 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2460 }, 2461 .setkey = aead_setkey, 2462 .setauthsize = aead_setauthsize, 2463 .encrypt = aead_encrypt, 2464 .decrypt = aead_decrypt, 2465 .ivsize = DES3_EDE_BLOCK_SIZE, 2466 .maxauthsize = SHA1_DIGEST_SIZE, 2467 }, 2468 .caam = { 2469 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2470 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2471 OP_ALG_AAI_HMAC_PRECOMP, 2472 }, 2473 }, 2474 { 2475 .aead = { 2476 .base = { 2477 .cra_name = "echainiv(authenc(hmac(sha1)," 2478 "cbc(des3_ede)))", 2479 .cra_driver_name = "echainiv-authenc-" 2480 "hmac-sha1-" 2481 "cbc-des3_ede-caam", 2482 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2483 }, 2484 .setkey = aead_setkey, 2485 .setauthsize = aead_setauthsize, 2486 .encrypt = aead_encrypt, 2487 .decrypt = aead_decrypt, 2488 .ivsize = DES3_EDE_BLOCK_SIZE, 2489 .maxauthsize = SHA1_DIGEST_SIZE, 2490 }, 2491 .caam = { 2492 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2493 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2494 OP_ALG_AAI_HMAC_PRECOMP, 2495 .geniv = true, 2496 }, 2497 }, 2498 { 2499 .aead = { 2500 .base = { 2501 .cra_name = "authenc(hmac(sha224)," 2502 "cbc(des3_ede))", 2503 .cra_driver_name = "authenc-hmac-sha224-" 2504 "cbc-des3_ede-caam", 2505 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2506 }, 2507 .setkey = aead_setkey, 2508 .setauthsize = aead_setauthsize, 2509 .encrypt = aead_encrypt, 2510 .decrypt = aead_decrypt, 2511 .ivsize = DES3_EDE_BLOCK_SIZE, 2512 .maxauthsize = SHA224_DIGEST_SIZE, 2513 }, 2514 .caam = { 2515 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2516 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2517 OP_ALG_AAI_HMAC_PRECOMP, 2518 }, 2519 }, 2520 { 2521 .aead = { 2522 .base = { 2523 .cra_name = "echainiv(authenc(hmac(sha224)," 2524 "cbc(des3_ede)))", 2525 .cra_driver_name = "echainiv-authenc-" 2526 "hmac-sha224-" 2527 "cbc-des3_ede-caam", 2528 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2529 }, 2530 .setkey = aead_setkey, 2531 .setauthsize = aead_setauthsize, 2532 .encrypt = aead_encrypt, 2533 .decrypt = aead_decrypt, 2534 .ivsize = DES3_EDE_BLOCK_SIZE, 2535 .maxauthsize = SHA224_DIGEST_SIZE, 2536 }, 2537 .caam = { 2538 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2539 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2540 OP_ALG_AAI_HMAC_PRECOMP, 2541 .geniv = true, 2542 }, 2543 }, 2544 { 2545 .aead = { 2546 .base = { 2547 .cra_name = "authenc(hmac(sha256)," 2548 "cbc(des3_ede))", 2549 .cra_driver_name = "authenc-hmac-sha256-" 2550 "cbc-des3_ede-caam", 2551 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2552 }, 2553 .setkey = aead_setkey, 2554 .setauthsize = aead_setauthsize, 2555 .encrypt = aead_encrypt, 2556 .decrypt = aead_decrypt, 2557 .ivsize = DES3_EDE_BLOCK_SIZE, 2558 .maxauthsize = SHA256_DIGEST_SIZE, 2559 }, 2560 .caam = { 2561 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2562 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2563 OP_ALG_AAI_HMAC_PRECOMP, 2564 }, 2565 }, 2566 { 2567 .aead = { 2568 .base = { 2569 .cra_name = "echainiv(authenc(hmac(sha256)," 2570 "cbc(des3_ede)))", 2571 .cra_driver_name = "echainiv-authenc-" 2572 "hmac-sha256-" 2573 "cbc-des3_ede-caam", 2574 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2575 }, 2576 .setkey = aead_setkey, 2577 .setauthsize = aead_setauthsize, 2578 .encrypt = aead_encrypt, 2579 .decrypt = aead_decrypt, 2580 .ivsize = DES3_EDE_BLOCK_SIZE, 2581 .maxauthsize = SHA256_DIGEST_SIZE, 2582 }, 2583 .caam = { 2584 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2585 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2586 OP_ALG_AAI_HMAC_PRECOMP, 2587 .geniv = true, 2588 }, 2589 }, 2590 { 2591 .aead = { 2592 .base = { 2593 .cra_name = "authenc(hmac(sha384)," 2594 "cbc(des3_ede))", 2595 .cra_driver_name = "authenc-hmac-sha384-" 2596 "cbc-des3_ede-caam", 2597 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2598 }, 2599 .setkey = aead_setkey, 2600 .setauthsize = aead_setauthsize, 2601 .encrypt = aead_encrypt, 2602 .decrypt = aead_decrypt, 2603 .ivsize = DES3_EDE_BLOCK_SIZE, 2604 .maxauthsize = SHA384_DIGEST_SIZE, 2605 }, 2606 .caam = { 2607 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2608 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2609 OP_ALG_AAI_HMAC_PRECOMP, 2610 }, 2611 }, 2612 { 2613 .aead = { 2614 .base = { 2615 .cra_name = "echainiv(authenc(hmac(sha384)," 2616 "cbc(des3_ede)))", 2617 .cra_driver_name = "echainiv-authenc-" 2618 "hmac-sha384-" 2619 "cbc-des3_ede-caam", 2620 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2621 }, 2622 .setkey = aead_setkey, 2623 .setauthsize = aead_setauthsize, 2624 .encrypt = aead_encrypt, 2625 .decrypt = aead_decrypt, 2626 .ivsize = DES3_EDE_BLOCK_SIZE, 2627 .maxauthsize = SHA384_DIGEST_SIZE, 2628 }, 2629 .caam = { 2630 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2631 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2632 OP_ALG_AAI_HMAC_PRECOMP, 2633 .geniv = true, 2634 }, 2635 }, 2636 { 2637 .aead = { 2638 .base = { 2639 .cra_name = "authenc(hmac(sha512)," 2640 "cbc(des3_ede))", 2641 .cra_driver_name = "authenc-hmac-sha512-" 2642 "cbc-des3_ede-caam", 2643 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2644 }, 2645 .setkey = aead_setkey, 2646 .setauthsize = aead_setauthsize, 2647 .encrypt = aead_encrypt, 2648 .decrypt = aead_decrypt, 2649 .ivsize = DES3_EDE_BLOCK_SIZE, 2650 .maxauthsize = SHA512_DIGEST_SIZE, 2651 }, 2652 .caam = { 2653 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2654 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2655 OP_ALG_AAI_HMAC_PRECOMP, 2656 }, 2657 }, 2658 { 2659 .aead = { 2660 .base = { 2661 .cra_name = "echainiv(authenc(hmac(sha512)," 2662 "cbc(des3_ede)))", 2663 .cra_driver_name = "echainiv-authenc-" 2664 "hmac-sha512-" 2665 "cbc-des3_ede-caam", 2666 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2667 }, 2668 .setkey = aead_setkey, 2669 .setauthsize = aead_setauthsize, 2670 .encrypt = aead_encrypt, 2671 .decrypt = aead_decrypt, 2672 .ivsize = DES3_EDE_BLOCK_SIZE, 2673 .maxauthsize = SHA512_DIGEST_SIZE, 2674 }, 2675 .caam = { 2676 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2677 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2678 OP_ALG_AAI_HMAC_PRECOMP, 2679 .geniv = true, 2680 }, 2681 }, 2682 { 2683 .aead = { 2684 .base = { 2685 .cra_name = "authenc(hmac(md5),cbc(des))", 2686 .cra_driver_name = "authenc-hmac-md5-" 2687 "cbc-des-caam", 2688 .cra_blocksize = DES_BLOCK_SIZE, 2689 }, 2690 .setkey = aead_setkey, 2691 .setauthsize = aead_setauthsize, 2692 .encrypt = aead_encrypt, 2693 .decrypt = aead_decrypt, 2694 .ivsize = DES_BLOCK_SIZE, 2695 .maxauthsize = MD5_DIGEST_SIZE, 2696 }, 2697 .caam = { 2698 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2699 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2700 OP_ALG_AAI_HMAC_PRECOMP, 2701 }, 2702 }, 2703 { 2704 .aead = { 2705 .base = { 2706 .cra_name = "echainiv(authenc(hmac(md5)," 2707 "cbc(des)))", 2708 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2709 "cbc-des-caam", 2710 .cra_blocksize = DES_BLOCK_SIZE, 2711 }, 2712 .setkey = aead_setkey, 2713 .setauthsize = aead_setauthsize, 2714 .encrypt = aead_encrypt, 2715 .decrypt = aead_decrypt, 2716 .ivsize = DES_BLOCK_SIZE, 2717 .maxauthsize = MD5_DIGEST_SIZE, 2718 }, 2719 .caam = { 2720 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2721 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2722 OP_ALG_AAI_HMAC_PRECOMP, 2723 .geniv = true, 2724 }, 2725 }, 2726 { 2727 .aead = { 2728 .base = { 2729 .cra_name = "authenc(hmac(sha1),cbc(des))", 2730 .cra_driver_name = "authenc-hmac-sha1-" 2731 "cbc-des-caam", 2732 .cra_blocksize = DES_BLOCK_SIZE, 2733 }, 2734 .setkey = aead_setkey, 2735 .setauthsize = aead_setauthsize, 2736 .encrypt = aead_encrypt, 2737 .decrypt = aead_decrypt, 2738 .ivsize = DES_BLOCK_SIZE, 2739 .maxauthsize = SHA1_DIGEST_SIZE, 2740 }, 2741 .caam = { 2742 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2743 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2744 OP_ALG_AAI_HMAC_PRECOMP, 2745 }, 2746 }, 2747 { 2748 .aead = { 2749 .base = { 2750 .cra_name = "echainiv(authenc(hmac(sha1)," 2751 "cbc(des)))", 2752 .cra_driver_name = "echainiv-authenc-" 2753 "hmac-sha1-cbc-des-caam", 2754 .cra_blocksize = DES_BLOCK_SIZE, 2755 }, 2756 .setkey = aead_setkey, 2757 .setauthsize = aead_setauthsize, 2758 .encrypt = aead_encrypt, 2759 .decrypt = aead_decrypt, 2760 .ivsize = DES_BLOCK_SIZE, 2761 .maxauthsize = SHA1_DIGEST_SIZE, 2762 }, 2763 .caam = { 2764 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2765 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2766 OP_ALG_AAI_HMAC_PRECOMP, 2767 .geniv = true, 2768 }, 2769 }, 2770 { 2771 .aead = { 2772 .base = { 2773 .cra_name = "authenc(hmac(sha224),cbc(des))", 2774 .cra_driver_name = "authenc-hmac-sha224-" 2775 "cbc-des-caam", 2776 .cra_blocksize = DES_BLOCK_SIZE, 2777 }, 2778 .setkey = aead_setkey, 2779 .setauthsize = aead_setauthsize, 2780 .encrypt = aead_encrypt, 2781 .decrypt = aead_decrypt, 2782 .ivsize = DES_BLOCK_SIZE, 2783 .maxauthsize = SHA224_DIGEST_SIZE, 2784 }, 2785 .caam = { 2786 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2787 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2788 OP_ALG_AAI_HMAC_PRECOMP, 2789 }, 2790 }, 2791 { 2792 .aead = { 2793 .base = { 2794 .cra_name = "echainiv(authenc(hmac(sha224)," 2795 "cbc(des)))", 2796 .cra_driver_name = "echainiv-authenc-" 2797 "hmac-sha224-cbc-des-caam", 2798 .cra_blocksize = DES_BLOCK_SIZE, 2799 }, 2800 .setkey = aead_setkey, 2801 .setauthsize = aead_setauthsize, 2802 .encrypt = aead_encrypt, 2803 .decrypt = aead_decrypt, 2804 .ivsize = DES_BLOCK_SIZE, 2805 .maxauthsize = SHA224_DIGEST_SIZE, 2806 }, 2807 .caam = { 2808 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2809 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2810 OP_ALG_AAI_HMAC_PRECOMP, 2811 .geniv = true, 2812 }, 2813 }, 2814 { 2815 .aead = { 2816 .base = { 2817 .cra_name = "authenc(hmac(sha256),cbc(des))", 2818 .cra_driver_name = "authenc-hmac-sha256-" 2819 "cbc-des-caam", 2820 .cra_blocksize = DES_BLOCK_SIZE, 2821 }, 2822 .setkey = aead_setkey, 2823 .setauthsize = aead_setauthsize, 2824 .encrypt = aead_encrypt, 2825 .decrypt = aead_decrypt, 2826 .ivsize = DES_BLOCK_SIZE, 2827 .maxauthsize = SHA256_DIGEST_SIZE, 2828 }, 2829 .caam = { 2830 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2831 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2832 OP_ALG_AAI_HMAC_PRECOMP, 2833 }, 2834 }, 2835 { 2836 .aead = { 2837 .base = { 2838 .cra_name = "echainiv(authenc(hmac(sha256)," 2839 "cbc(des)))", 2840 .cra_driver_name = "echainiv-authenc-" 2841 "hmac-sha256-cbc-des-caam", 2842 .cra_blocksize = DES_BLOCK_SIZE, 2843 }, 2844 .setkey = aead_setkey, 2845 .setauthsize = aead_setauthsize, 2846 .encrypt = aead_encrypt, 2847 .decrypt = aead_decrypt, 2848 .ivsize = DES_BLOCK_SIZE, 2849 .maxauthsize = SHA256_DIGEST_SIZE, 2850 }, 2851 .caam = { 2852 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2853 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2854 OP_ALG_AAI_HMAC_PRECOMP, 2855 .geniv = true, 2856 }, 2857 }, 2858 { 2859 .aead = { 2860 .base = { 2861 .cra_name = "authenc(hmac(sha384),cbc(des))", 2862 .cra_driver_name = "authenc-hmac-sha384-" 2863 "cbc-des-caam", 2864 .cra_blocksize = DES_BLOCK_SIZE, 2865 }, 2866 .setkey = aead_setkey, 2867 .setauthsize = aead_setauthsize, 2868 .encrypt = aead_encrypt, 2869 .decrypt = aead_decrypt, 2870 .ivsize = DES_BLOCK_SIZE, 2871 .maxauthsize = SHA384_DIGEST_SIZE, 2872 }, 2873 .caam = { 2874 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2875 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2876 OP_ALG_AAI_HMAC_PRECOMP, 2877 }, 2878 }, 2879 { 2880 .aead = { 2881 .base = { 2882 .cra_name = "echainiv(authenc(hmac(sha384)," 2883 "cbc(des)))", 2884 .cra_driver_name = "echainiv-authenc-" 2885 "hmac-sha384-cbc-des-caam", 2886 .cra_blocksize = DES_BLOCK_SIZE, 2887 }, 2888 .setkey = aead_setkey, 2889 .setauthsize = aead_setauthsize, 2890 .encrypt = aead_encrypt, 2891 .decrypt = aead_decrypt, 2892 .ivsize = DES_BLOCK_SIZE, 2893 .maxauthsize = SHA384_DIGEST_SIZE, 2894 }, 2895 .caam = { 2896 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2897 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2898 OP_ALG_AAI_HMAC_PRECOMP, 2899 .geniv = true, 2900 }, 2901 }, 2902 { 2903 .aead = { 2904 .base = { 2905 .cra_name = "authenc(hmac(sha512),cbc(des))", 2906 .cra_driver_name = "authenc-hmac-sha512-" 2907 "cbc-des-caam", 2908 .cra_blocksize = DES_BLOCK_SIZE, 2909 }, 2910 .setkey = aead_setkey, 2911 .setauthsize = aead_setauthsize, 2912 .encrypt = aead_encrypt, 2913 .decrypt = aead_decrypt, 2914 .ivsize = DES_BLOCK_SIZE, 2915 .maxauthsize = SHA512_DIGEST_SIZE, 2916 }, 2917 .caam = { 2918 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2919 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2920 OP_ALG_AAI_HMAC_PRECOMP, 2921 }, 2922 }, 2923 { 2924 .aead = { 2925 .base = { 2926 .cra_name = "echainiv(authenc(hmac(sha512)," 2927 "cbc(des)))", 2928 .cra_driver_name = "echainiv-authenc-" 2929 "hmac-sha512-cbc-des-caam", 2930 .cra_blocksize = DES_BLOCK_SIZE, 2931 }, 2932 .setkey = aead_setkey, 2933 .setauthsize = aead_setauthsize, 2934 .encrypt = aead_encrypt, 2935 .decrypt = aead_decrypt, 2936 .ivsize = DES_BLOCK_SIZE, 2937 .maxauthsize = SHA512_DIGEST_SIZE, 2938 }, 2939 .caam = { 2940 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2941 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2942 OP_ALG_AAI_HMAC_PRECOMP, 2943 .geniv = true, 2944 }, 2945 }, 2946 { 2947 .aead = { 2948 .base = { 2949 .cra_name = "authenc(hmac(md5)," 2950 "rfc3686(ctr(aes)))", 2951 .cra_driver_name = "authenc-hmac-md5-" 2952 "rfc3686-ctr-aes-caam", 2953 .cra_blocksize = 1, 2954 }, 2955 .setkey = aead_setkey, 2956 .setauthsize = aead_setauthsize, 2957 .encrypt = aead_encrypt, 2958 .decrypt = aead_decrypt, 2959 .ivsize = CTR_RFC3686_IV_SIZE, 2960 .maxauthsize = MD5_DIGEST_SIZE, 2961 }, 2962 .caam = { 2963 .class1_alg_type = OP_ALG_ALGSEL_AES | 2964 OP_ALG_AAI_CTR_MOD128, 2965 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2966 OP_ALG_AAI_HMAC_PRECOMP, 2967 .rfc3686 = true, 2968 }, 2969 }, 2970 { 2971 .aead = { 2972 .base = { 2973 .cra_name = "seqiv(authenc(" 2974 "hmac(md5),rfc3686(ctr(aes))))", 2975 .cra_driver_name = "seqiv-authenc-hmac-md5-" 2976 "rfc3686-ctr-aes-caam", 2977 .cra_blocksize = 1, 2978 }, 2979 .setkey = aead_setkey, 2980 .setauthsize = aead_setauthsize, 2981 .encrypt = aead_encrypt, 2982 .decrypt = aead_decrypt, 2983 .ivsize = CTR_RFC3686_IV_SIZE, 2984 .maxauthsize = MD5_DIGEST_SIZE, 2985 }, 2986 .caam = { 2987 .class1_alg_type = OP_ALG_ALGSEL_AES | 2988 OP_ALG_AAI_CTR_MOD128, 2989 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2990 OP_ALG_AAI_HMAC_PRECOMP, 2991 .rfc3686 = true, 2992 .geniv = true, 2993 }, 2994 }, 2995 { 2996 .aead = { 2997 .base = { 2998 .cra_name = "authenc(hmac(sha1)," 2999 "rfc3686(ctr(aes)))", 3000 .cra_driver_name = "authenc-hmac-sha1-" 3001 "rfc3686-ctr-aes-caam", 3002 .cra_blocksize = 1, 3003 }, 3004 .setkey = aead_setkey, 3005 .setauthsize = aead_setauthsize, 3006 .encrypt = aead_encrypt, 3007 .decrypt = aead_decrypt, 3008 .ivsize = CTR_RFC3686_IV_SIZE, 3009 .maxauthsize = SHA1_DIGEST_SIZE, 3010 }, 3011 .caam = { 3012 .class1_alg_type = OP_ALG_ALGSEL_AES | 3013 OP_ALG_AAI_CTR_MOD128, 3014 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 3015 OP_ALG_AAI_HMAC_PRECOMP, 3016 .rfc3686 = true, 3017 }, 3018 }, 3019 { 3020 .aead = { 3021 .base = { 3022 .cra_name = "seqiv(authenc(" 3023 "hmac(sha1),rfc3686(ctr(aes))))", 3024 .cra_driver_name = "seqiv-authenc-hmac-sha1-" 3025 "rfc3686-ctr-aes-caam", 3026 .cra_blocksize = 1, 3027 }, 3028 .setkey = aead_setkey, 3029 .setauthsize = aead_setauthsize, 3030 .encrypt = aead_encrypt, 3031 .decrypt = aead_decrypt, 3032 .ivsize = CTR_RFC3686_IV_SIZE, 3033 .maxauthsize = SHA1_DIGEST_SIZE, 3034 }, 3035 .caam = { 3036 .class1_alg_type = OP_ALG_ALGSEL_AES | 3037 OP_ALG_AAI_CTR_MOD128, 3038 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 3039 OP_ALG_AAI_HMAC_PRECOMP, 3040 .rfc3686 = true, 3041 .geniv = true, 3042 }, 3043 }, 3044 { 3045 .aead = { 3046 .base = { 3047 .cra_name = "authenc(hmac(sha224)," 3048 "rfc3686(ctr(aes)))", 3049 .cra_driver_name = "authenc-hmac-sha224-" 3050 "rfc3686-ctr-aes-caam", 3051 .cra_blocksize = 1, 3052 }, 3053 .setkey = aead_setkey, 3054 .setauthsize = aead_setauthsize, 3055 .encrypt = aead_encrypt, 3056 .decrypt = aead_decrypt, 3057 .ivsize = CTR_RFC3686_IV_SIZE, 3058 .maxauthsize = SHA224_DIGEST_SIZE, 3059 }, 3060 .caam = { 3061 .class1_alg_type = OP_ALG_ALGSEL_AES | 3062 OP_ALG_AAI_CTR_MOD128, 3063 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 3064 OP_ALG_AAI_HMAC_PRECOMP, 3065 .rfc3686 = true, 3066 }, 3067 }, 3068 { 3069 .aead = { 3070 .base = { 3071 .cra_name = "seqiv(authenc(" 3072 "hmac(sha224),rfc3686(ctr(aes))))", 3073 .cra_driver_name = "seqiv-authenc-hmac-sha224-" 3074 "rfc3686-ctr-aes-caam", 3075 .cra_blocksize = 1, 3076 }, 3077 .setkey = aead_setkey, 3078 .setauthsize = aead_setauthsize, 3079 .encrypt = aead_encrypt, 3080 .decrypt = aead_decrypt, 3081 .ivsize = CTR_RFC3686_IV_SIZE, 3082 .maxauthsize = SHA224_DIGEST_SIZE, 3083 }, 3084 .caam = { 3085 .class1_alg_type = OP_ALG_ALGSEL_AES | 3086 OP_ALG_AAI_CTR_MOD128, 3087 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 3088 OP_ALG_AAI_HMAC_PRECOMP, 3089 .rfc3686 = true, 3090 .geniv = true, 3091 }, 3092 }, 3093 { 3094 .aead = { 3095 .base = { 3096 .cra_name = "authenc(hmac(sha256)," 3097 "rfc3686(ctr(aes)))", 3098 .cra_driver_name = "authenc-hmac-sha256-" 3099 "rfc3686-ctr-aes-caam", 3100 .cra_blocksize = 1, 3101 }, 3102 .setkey = aead_setkey, 3103 .setauthsize = aead_setauthsize, 3104 .encrypt = aead_encrypt, 3105 .decrypt = aead_decrypt, 3106 .ivsize = CTR_RFC3686_IV_SIZE, 3107 .maxauthsize = SHA256_DIGEST_SIZE, 3108 }, 3109 .caam = { 3110 .class1_alg_type = OP_ALG_ALGSEL_AES | 3111 OP_ALG_AAI_CTR_MOD128, 3112 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 3113 OP_ALG_AAI_HMAC_PRECOMP, 3114 .rfc3686 = true, 3115 }, 3116 }, 3117 { 3118 .aead = { 3119 .base = { 3120 .cra_name = "seqiv(authenc(hmac(sha256)," 3121 "rfc3686(ctr(aes))))", 3122 .cra_driver_name = "seqiv-authenc-hmac-sha256-" 3123 "rfc3686-ctr-aes-caam", 3124 .cra_blocksize = 1, 3125 }, 3126 .setkey = aead_setkey, 3127 .setauthsize = aead_setauthsize, 3128 .encrypt = aead_encrypt, 3129 .decrypt = aead_decrypt, 3130 .ivsize = CTR_RFC3686_IV_SIZE, 3131 .maxauthsize = SHA256_DIGEST_SIZE, 3132 }, 3133 .caam = { 3134 .class1_alg_type = OP_ALG_ALGSEL_AES | 3135 OP_ALG_AAI_CTR_MOD128, 3136 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 3137 OP_ALG_AAI_HMAC_PRECOMP, 3138 .rfc3686 = true, 3139 .geniv = true, 3140 }, 3141 }, 3142 { 3143 .aead = { 3144 .base = { 3145 .cra_name = "authenc(hmac(sha384)," 3146 "rfc3686(ctr(aes)))", 3147 .cra_driver_name = "authenc-hmac-sha384-" 3148 "rfc3686-ctr-aes-caam", 3149 .cra_blocksize = 1, 3150 }, 3151 .setkey = aead_setkey, 3152 .setauthsize = aead_setauthsize, 3153 .encrypt = aead_encrypt, 3154 .decrypt = aead_decrypt, 3155 .ivsize = CTR_RFC3686_IV_SIZE, 3156 .maxauthsize = SHA384_DIGEST_SIZE, 3157 }, 3158 .caam = { 3159 .class1_alg_type = OP_ALG_ALGSEL_AES | 3160 OP_ALG_AAI_CTR_MOD128, 3161 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 3162 OP_ALG_AAI_HMAC_PRECOMP, 3163 .rfc3686 = true, 3164 }, 3165 }, 3166 { 3167 .aead = { 3168 .base = { 3169 .cra_name = "seqiv(authenc(hmac(sha384)," 3170 "rfc3686(ctr(aes))))", 3171 .cra_driver_name = "seqiv-authenc-hmac-sha384-" 3172 "rfc3686-ctr-aes-caam", 3173 .cra_blocksize = 1, 3174 }, 3175 .setkey = aead_setkey, 3176 .setauthsize = aead_setauthsize, 3177 .encrypt = aead_encrypt, 3178 .decrypt = aead_decrypt, 3179 .ivsize = CTR_RFC3686_IV_SIZE, 3180 .maxauthsize = SHA384_DIGEST_SIZE, 3181 }, 3182 .caam = { 3183 .class1_alg_type = OP_ALG_ALGSEL_AES | 3184 OP_ALG_AAI_CTR_MOD128, 3185 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 3186 OP_ALG_AAI_HMAC_PRECOMP, 3187 .rfc3686 = true, 3188 .geniv = true, 3189 }, 3190 }, 3191 { 3192 .aead = { 3193 .base = { 3194 .cra_name = "authenc(hmac(sha512)," 3195 "rfc3686(ctr(aes)))", 3196 .cra_driver_name = "authenc-hmac-sha512-" 3197 "rfc3686-ctr-aes-caam", 3198 .cra_blocksize = 1, 3199 }, 3200 .setkey = aead_setkey, 3201 .setauthsize = aead_setauthsize, 3202 .encrypt = aead_encrypt, 3203 .decrypt = aead_decrypt, 3204 .ivsize = CTR_RFC3686_IV_SIZE, 3205 .maxauthsize = SHA512_DIGEST_SIZE, 3206 }, 3207 .caam = { 3208 .class1_alg_type = OP_ALG_ALGSEL_AES | 3209 OP_ALG_AAI_CTR_MOD128, 3210 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3211 OP_ALG_AAI_HMAC_PRECOMP, 3212 .rfc3686 = true, 3213 }, 3214 }, 3215 { 3216 .aead = { 3217 .base = { 3218 .cra_name = "seqiv(authenc(hmac(sha512)," 3219 "rfc3686(ctr(aes))))", 3220 .cra_driver_name = "seqiv-authenc-hmac-sha512-" 3221 "rfc3686-ctr-aes-caam", 3222 .cra_blocksize = 1, 3223 }, 3224 .setkey = aead_setkey, 3225 .setauthsize = aead_setauthsize, 3226 .encrypt = aead_encrypt, 3227 .decrypt = aead_decrypt, 3228 .ivsize = CTR_RFC3686_IV_SIZE, 3229 .maxauthsize = SHA512_DIGEST_SIZE, 3230 }, 3231 .caam = { 3232 .class1_alg_type = OP_ALG_ALGSEL_AES | 3233 OP_ALG_AAI_CTR_MOD128, 3234 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3235 OP_ALG_AAI_HMAC_PRECOMP, 3236 .rfc3686 = true, 3237 .geniv = true, 3238 }, 3239 }, 3240 }; 3241 3242 struct caam_crypto_alg { 3243 struct crypto_alg crypto_alg; 3244 struct list_head entry; 3245 struct caam_alg_entry caam; 3246 }; 3247 3248 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 3249 bool uses_dkp) 3250 { 3251 dma_addr_t dma_addr; 3252 struct caam_drv_private *priv; 3253 3254 ctx->jrdev = caam_jr_alloc(); 3255 if (IS_ERR(ctx->jrdev)) { 3256 pr_err("Job Ring Device allocation for transform failed\n"); 3257 return PTR_ERR(ctx->jrdev); 3258 } 3259 3260 priv = dev_get_drvdata(ctx->jrdev->parent); 3261 if (priv->era >= 6 && uses_dkp) 3262 ctx->dir = DMA_BIDIRECTIONAL; 3263 else 3264 ctx->dir = DMA_TO_DEVICE; 3265 3266 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, 3267 offsetof(struct caam_ctx, 3268 sh_desc_enc_dma), 3269 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3270 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 3271 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); 3272 caam_jr_free(ctx->jrdev); 3273 return -ENOMEM; 3274 } 3275 3276 ctx->sh_desc_enc_dma = dma_addr; 3277 ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, 3278 sh_desc_dec); 3279 ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx, 3280 sh_desc_givenc); 3281 ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key); 3282 3283 /* copy descriptor header template value */ 3284 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 3285 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 3286 3287 return 0; 3288 } 3289 3290 static int caam_cra_init(struct crypto_tfm *tfm) 3291 { 3292 struct crypto_alg *alg = tfm->__crt_alg; 3293 struct caam_crypto_alg *caam_alg = 3294 container_of(alg, struct caam_crypto_alg, crypto_alg); 3295 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 3296 3297 return caam_init_common(ctx, &caam_alg->caam, false); 3298 } 3299 3300 static int caam_aead_init(struct crypto_aead *tfm) 3301 { 3302 struct aead_alg *alg = crypto_aead_alg(tfm); 3303 struct caam_aead_alg *caam_alg = 3304 container_of(alg, struct caam_aead_alg, aead); 3305 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 3306 3307 return caam_init_common(ctx, &caam_alg->caam, 3308 alg->setkey == aead_setkey); 3309 } 3310 3311 static void caam_exit_common(struct caam_ctx *ctx) 3312 { 3313 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, 3314 offsetof(struct caam_ctx, sh_desc_enc_dma), 3315 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3316 caam_jr_free(ctx->jrdev); 3317 } 3318 3319 static void caam_cra_exit(struct crypto_tfm *tfm) 3320 { 3321 caam_exit_common(crypto_tfm_ctx(tfm)); 3322 } 3323 3324 static void caam_aead_exit(struct crypto_aead *tfm) 3325 { 3326 caam_exit_common(crypto_aead_ctx(tfm)); 3327 } 3328 3329 static void __exit caam_algapi_exit(void) 3330 { 3331 3332 struct caam_crypto_alg *t_alg, *n; 3333 int i; 3334 3335 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3336 struct caam_aead_alg *t_alg = driver_aeads + i; 3337 3338 if (t_alg->registered) 3339 crypto_unregister_aead(&t_alg->aead); 3340 } 3341 3342 if (!alg_list.next) 3343 return; 3344 3345 list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 3346 crypto_unregister_alg(&t_alg->crypto_alg); 3347 list_del(&t_alg->entry); 3348 kfree(t_alg); 3349 } 3350 } 3351 3352 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 3353 *template) 3354 { 3355 struct caam_crypto_alg *t_alg; 3356 struct crypto_alg *alg; 3357 3358 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 3359 if (!t_alg) { 3360 pr_err("failed to allocate t_alg\n"); 3361 return ERR_PTR(-ENOMEM); 3362 } 3363 3364 alg = &t_alg->crypto_alg; 3365 3366 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 3367 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 3368 template->driver_name); 3369 alg->cra_module = THIS_MODULE; 3370 alg->cra_init = caam_cra_init; 3371 alg->cra_exit = caam_cra_exit; 3372 alg->cra_priority = CAAM_CRA_PRIORITY; 3373 alg->cra_blocksize = template->blocksize; 3374 alg->cra_alignmask = 0; 3375 alg->cra_ctxsize = sizeof(struct caam_ctx); 3376 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 3377 template->type; 3378 switch (template->type) { 3379 case CRYPTO_ALG_TYPE_GIVCIPHER: 3380 alg->cra_type = &crypto_givcipher_type; 3381 alg->cra_ablkcipher = template->template_ablkcipher; 3382 break; 3383 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3384 alg->cra_type = &crypto_ablkcipher_type; 3385 alg->cra_ablkcipher = template->template_ablkcipher; 3386 break; 3387 } 3388 3389 t_alg->caam.class1_alg_type = template->class1_alg_type; 3390 t_alg->caam.class2_alg_type = template->class2_alg_type; 3391 3392 return t_alg; 3393 } 3394 3395 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 3396 { 3397 struct aead_alg *alg = &t_alg->aead; 3398 3399 alg->base.cra_module = THIS_MODULE; 3400 alg->base.cra_priority = CAAM_CRA_PRIORITY; 3401 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 3402 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 3403 3404 alg->init = caam_aead_init; 3405 alg->exit = caam_aead_exit; 3406 } 3407 3408 static int __init caam_algapi_init(void) 3409 { 3410 struct device_node *dev_node; 3411 struct platform_device *pdev; 3412 struct device *ctrldev; 3413 struct caam_drv_private *priv; 3414 int i = 0, err = 0; 3415 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; 3416 unsigned int md_limit = SHA512_DIGEST_SIZE; 3417 bool registered = false; 3418 3419 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 3420 if (!dev_node) { 3421 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 3422 if (!dev_node) 3423 return -ENODEV; 3424 } 3425 3426 pdev = of_find_device_by_node(dev_node); 3427 if (!pdev) { 3428 of_node_put(dev_node); 3429 return -ENODEV; 3430 } 3431 3432 ctrldev = &pdev->dev; 3433 priv = dev_get_drvdata(ctrldev); 3434 of_node_put(dev_node); 3435 3436 /* 3437 * If priv is NULL, it's probably because the caam driver wasn't 3438 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 3439 */ 3440 if (!priv) 3441 return -ENODEV; 3442 3443 3444 INIT_LIST_HEAD(&alg_list); 3445 3446 /* 3447 * Register crypto algorithms the device supports. 3448 * First, detect presence and attributes of DES, AES, and MD blocks. 3449 */ 3450 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 3451 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 3452 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; 3453 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; 3454 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 3455 3456 /* If MD is present, limit digest size based on LP256 */ 3457 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) 3458 md_limit = SHA256_DIGEST_SIZE; 3459 3460 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3461 struct caam_crypto_alg *t_alg; 3462 struct caam_alg_template *alg = driver_algs + i; 3463 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; 3464 3465 /* Skip DES algorithms if not supported by device */ 3466 if (!des_inst && 3467 ((alg_sel == OP_ALG_ALGSEL_3DES) || 3468 (alg_sel == OP_ALG_ALGSEL_DES))) 3469 continue; 3470 3471 /* Skip AES algorithms if not supported by device */ 3472 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 3473 continue; 3474 3475 /* 3476 * Check support for AES modes not available 3477 * on LP devices. 3478 */ 3479 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) 3480 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == 3481 OP_ALG_AAI_XTS) 3482 continue; 3483 3484 t_alg = caam_alg_alloc(alg); 3485 if (IS_ERR(t_alg)) { 3486 err = PTR_ERR(t_alg); 3487 pr_warn("%s alg allocation failed\n", alg->driver_name); 3488 continue; 3489 } 3490 3491 err = crypto_register_alg(&t_alg->crypto_alg); 3492 if (err) { 3493 pr_warn("%s alg registration failed\n", 3494 t_alg->crypto_alg.cra_driver_name); 3495 kfree(t_alg); 3496 continue; 3497 } 3498 3499 list_add_tail(&t_alg->entry, &alg_list); 3500 registered = true; 3501 } 3502 3503 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3504 struct caam_aead_alg *t_alg = driver_aeads + i; 3505 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 3506 OP_ALG_ALGSEL_MASK; 3507 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 3508 OP_ALG_ALGSEL_MASK; 3509 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 3510 3511 /* Skip DES algorithms if not supported by device */ 3512 if (!des_inst && 3513 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 3514 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 3515 continue; 3516 3517 /* Skip AES algorithms if not supported by device */ 3518 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 3519 continue; 3520 3521 /* 3522 * Check support for AES algorithms not available 3523 * on LP devices. 3524 */ 3525 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) 3526 if (alg_aai == OP_ALG_AAI_GCM) 3527 continue; 3528 3529 /* 3530 * Skip algorithms requiring message digests 3531 * if MD or MD size is not supported by device. 3532 */ 3533 if (c2_alg_sel && 3534 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 3535 continue; 3536 3537 caam_aead_alg_init(t_alg); 3538 3539 err = crypto_register_aead(&t_alg->aead); 3540 if (err) { 3541 pr_warn("%s alg registration failed\n", 3542 t_alg->aead.base.cra_driver_name); 3543 continue; 3544 } 3545 3546 t_alg->registered = true; 3547 registered = true; 3548 } 3549 3550 if (registered) 3551 pr_info("caam algorithms registered in /proc/crypto\n"); 3552 3553 return err; 3554 } 3555 3556 module_init(caam_algapi_init); 3557 module_exit(caam_algapi_exit); 3558 3559 MODULE_LICENSE("GPL"); 3560 MODULE_DESCRIPTION("FSL CAAM support for crypto API"); 3561 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); 3562