1 /* 2 * caam - Freescale FSL CAAM support for crypto API 3 * 4 * Copyright 2008-2011 Freescale Semiconductor, Inc. 5 * Copyright 2016 NXP 6 * 7 * Based on talitos crypto API driver. 8 * 9 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (PDB) | 14 * --------------- |------------->| (hashKey) | 15 * . | | (cipherKey) | 16 * . | |-------->| (operation) | 17 * --------------- | | --------------- 18 * | JobDesc #2 |------| | 19 * | *(packet 2) | | 20 * --------------- | 21 * . | 22 * . | 23 * --------------- | 24 * | JobDesc #3 |------------ 25 * | *(packet 3) | 26 * --------------- 27 * 28 * The SharedDesc never changes for a connection unless rekeyed, but 29 * each packet will likely be in a different place. So all we need 30 * to know to process the packet is where the input is, where the 31 * output goes, and what context we want to process with. Context is 32 * in the SharedDesc, packet references in the JobDesc. 33 * 34 * So, a job desc looks like: 35 * 36 * --------------------- 37 * | Header | 38 * | ShareDesc Pointer | 39 * | SEQ_OUT_PTR | 40 * | (output buffer) | 41 * | (output length) | 42 * | SEQ_IN_PTR | 43 * | (input buffer) | 44 * | (input length) | 45 * --------------------- 46 */ 47 48 #include "compat.h" 49 50 #include "regs.h" 51 #include "intern.h" 52 #include "desc_constr.h" 53 #include "jr.h" 54 #include "error.h" 55 #include "sg_sw_sec4.h" 56 #include "key_gen.h" 57 #include "caamalg_desc.h" 58 59 /* 60 * crypto alg 61 */ 62 #define CAAM_CRA_PRIORITY 3000 63 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ 64 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ 65 CTR_RFC3686_NONCE_SIZE + \ 66 SHA512_DIGEST_SIZE * 2) 67 68 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) 69 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ 70 CAAM_CMD_SZ * 4) 71 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ 72 CAAM_CMD_SZ * 5) 73 74 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) 75 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) 76 77 #ifdef DEBUG 78 /* for print_hex_dumps with line references */ 79 #define debug(format, arg...) printk(format, arg) 80 #else 81 #define debug(format, arg...) 82 #endif 83 84 #ifdef DEBUG 85 #include <linux/highmem.h> 86 87 static void dbg_dump_sg(const char *level, const char *prefix_str, 88 int prefix_type, int rowsize, int groupsize, 89 struct scatterlist *sg, size_t tlen, bool ascii) 90 { 91 struct scatterlist *it; 92 void *it_page; 93 size_t len; 94 void *buf; 95 96 for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) { 97 /* 98 * make sure the scatterlist's page 99 * has a valid virtual memory mapping 100 */ 101 it_page = kmap_atomic(sg_page(it)); 102 if (unlikely(!it_page)) { 103 printk(KERN_ERR "dbg_dump_sg: kmap failed\n"); 104 return; 105 } 106 107 buf = it_page + it->offset; 108 len = min_t(size_t, tlen, it->length); 109 print_hex_dump(level, prefix_str, prefix_type, rowsize, 110 groupsize, buf, len, ascii); 111 tlen -= len; 112 113 kunmap_atomic(it_page); 114 } 115 } 116 #endif 117 118 static struct list_head alg_list; 119 120 struct caam_alg_entry { 121 int class1_alg_type; 122 int class2_alg_type; 123 bool rfc3686; 124 bool geniv; 125 }; 126 127 struct caam_aead_alg { 128 struct aead_alg aead; 129 struct caam_alg_entry caam; 130 bool registered; 131 }; 132 133 /* 134 * per-session context 135 */ 136 struct caam_ctx { 137 struct device *jrdev; 138 u32 sh_desc_enc[DESC_MAX_USED_LEN]; 139 u32 sh_desc_dec[DESC_MAX_USED_LEN]; 140 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 141 dma_addr_t sh_desc_enc_dma; 142 dma_addr_t sh_desc_dec_dma; 143 dma_addr_t sh_desc_givenc_dma; 144 u8 key[CAAM_MAX_KEY_SIZE]; 145 dma_addr_t key_dma; 146 struct alginfo adata; 147 struct alginfo cdata; 148 unsigned int authsize; 149 }; 150 151 static int aead_null_set_sh_desc(struct crypto_aead *aead) 152 { 153 struct caam_ctx *ctx = crypto_aead_ctx(aead); 154 struct device *jrdev = ctx->jrdev; 155 u32 *desc; 156 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - 157 ctx->adata.keylen_pad; 158 159 /* 160 * Job Descriptor and Shared Descriptors 161 * must all fit into the 64-word Descriptor h/w Buffer 162 */ 163 if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) { 164 ctx->adata.key_inline = true; 165 ctx->adata.key_virt = ctx->key; 166 } else { 167 ctx->adata.key_inline = false; 168 ctx->adata.key_dma = ctx->key_dma; 169 } 170 171 /* aead_encrypt shared descriptor */ 172 desc = ctx->sh_desc_enc; 173 cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); 174 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 175 desc_bytes(desc), 176 DMA_TO_DEVICE); 177 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 178 dev_err(jrdev, "unable to map shared descriptor\n"); 179 return -ENOMEM; 180 } 181 182 /* 183 * Job Descriptor and Shared Descriptors 184 * must all fit into the 64-word Descriptor h/w Buffer 185 */ 186 if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) { 187 ctx->adata.key_inline = true; 188 ctx->adata.key_virt = ctx->key; 189 } else { 190 ctx->adata.key_inline = false; 191 ctx->adata.key_dma = ctx->key_dma; 192 } 193 194 /* aead_decrypt shared descriptor */ 195 desc = ctx->sh_desc_dec; 196 cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); 197 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 198 desc_bytes(desc), 199 DMA_TO_DEVICE); 200 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 201 dev_err(jrdev, "unable to map shared descriptor\n"); 202 return -ENOMEM; 203 } 204 205 return 0; 206 } 207 208 static int aead_set_sh_desc(struct crypto_aead *aead) 209 { 210 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 211 struct caam_aead_alg, aead); 212 unsigned int ivsize = crypto_aead_ivsize(aead); 213 struct caam_ctx *ctx = crypto_aead_ctx(aead); 214 struct device *jrdev = ctx->jrdev; 215 u32 ctx1_iv_off = 0; 216 u32 *desc, *nonce = NULL; 217 u32 inl_mask; 218 unsigned int data_len[2]; 219 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 220 OP_ALG_AAI_CTR_MOD128); 221 const bool is_rfc3686 = alg->caam.rfc3686; 222 223 if (!ctx->authsize) 224 return 0; 225 226 /* NULL encryption / decryption */ 227 if (!ctx->cdata.keylen) 228 return aead_null_set_sh_desc(aead); 229 230 /* 231 * AES-CTR needs to load IV in CONTEXT1 reg 232 * at an offset of 128bits (16bytes) 233 * CONTEXT1[255:128] = IV 234 */ 235 if (ctr_mode) 236 ctx1_iv_off = 16; 237 238 /* 239 * RFC3686 specific: 240 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 241 */ 242 if (is_rfc3686) { 243 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 244 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + 245 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); 246 } 247 248 data_len[0] = ctx->adata.keylen_pad; 249 data_len[1] = ctx->cdata.keylen; 250 251 if (alg->caam.geniv) 252 goto skip_enc; 253 254 /* 255 * Job Descriptor and Shared Descriptors 256 * must all fit into the 64-word Descriptor h/w Buffer 257 */ 258 if (desc_inline_query(DESC_AEAD_ENC_LEN + 259 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 260 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, 261 ARRAY_SIZE(data_len)) < 0) 262 return -EINVAL; 263 264 if (inl_mask & 1) 265 ctx->adata.key_virt = ctx->key; 266 else 267 ctx->adata.key_dma = ctx->key_dma; 268 269 if (inl_mask & 2) 270 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 271 else 272 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 273 274 ctx->adata.key_inline = !!(inl_mask & 1); 275 ctx->cdata.key_inline = !!(inl_mask & 2); 276 277 /* aead_encrypt shared descriptor */ 278 desc = ctx->sh_desc_enc; 279 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize, 280 is_rfc3686, nonce, ctx1_iv_off); 281 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 282 desc_bytes(desc), 283 DMA_TO_DEVICE); 284 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 285 dev_err(jrdev, "unable to map shared descriptor\n"); 286 return -ENOMEM; 287 } 288 289 skip_enc: 290 /* 291 * Job Descriptor and Shared Descriptors 292 * must all fit into the 64-word Descriptor h/w Buffer 293 */ 294 if (desc_inline_query(DESC_AEAD_DEC_LEN + 295 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 296 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, 297 ARRAY_SIZE(data_len)) < 0) 298 return -EINVAL; 299 300 if (inl_mask & 1) 301 ctx->adata.key_virt = ctx->key; 302 else 303 ctx->adata.key_dma = ctx->key_dma; 304 305 if (inl_mask & 2) 306 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 307 else 308 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 309 310 ctx->adata.key_inline = !!(inl_mask & 1); 311 ctx->cdata.key_inline = !!(inl_mask & 2); 312 313 /* aead_decrypt shared descriptor */ 314 desc = ctx->sh_desc_dec; 315 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, 316 ctx->authsize, alg->caam.geniv, is_rfc3686, 317 nonce, ctx1_iv_off); 318 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 319 desc_bytes(desc), 320 DMA_TO_DEVICE); 321 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 322 dev_err(jrdev, "unable to map shared descriptor\n"); 323 return -ENOMEM; 324 } 325 326 if (!alg->caam.geniv) 327 goto skip_givenc; 328 329 /* 330 * Job Descriptor and Shared Descriptors 331 * must all fit into the 64-word Descriptor h/w Buffer 332 */ 333 if (desc_inline_query(DESC_AEAD_GIVENC_LEN + 334 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), 335 AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, 336 ARRAY_SIZE(data_len)) < 0) 337 return -EINVAL; 338 339 if (inl_mask & 1) 340 ctx->adata.key_virt = ctx->key; 341 else 342 ctx->adata.key_dma = ctx->key_dma; 343 344 if (inl_mask & 2) 345 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; 346 else 347 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; 348 349 ctx->adata.key_inline = !!(inl_mask & 1); 350 ctx->cdata.key_inline = !!(inl_mask & 2); 351 352 /* aead_givencrypt shared descriptor */ 353 desc = ctx->sh_desc_enc; 354 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, 355 ctx->authsize, is_rfc3686, nonce, 356 ctx1_iv_off); 357 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 358 desc_bytes(desc), 359 DMA_TO_DEVICE); 360 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 361 dev_err(jrdev, "unable to map shared descriptor\n"); 362 return -ENOMEM; 363 } 364 365 skip_givenc: 366 return 0; 367 } 368 369 static int aead_setauthsize(struct crypto_aead *authenc, 370 unsigned int authsize) 371 { 372 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 373 374 ctx->authsize = authsize; 375 aead_set_sh_desc(authenc); 376 377 return 0; 378 } 379 380 static int gcm_set_sh_desc(struct crypto_aead *aead) 381 { 382 struct caam_ctx *ctx = crypto_aead_ctx(aead); 383 struct device *jrdev = ctx->jrdev; 384 u32 *desc; 385 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 386 ctx->cdata.keylen; 387 388 if (!ctx->cdata.keylen || !ctx->authsize) 389 return 0; 390 391 /* 392 * AES GCM encrypt shared descriptor 393 * Job Descriptor and Shared Descriptor 394 * must fit into the 64-word Descriptor h/w Buffer 395 */ 396 if (rem_bytes >= DESC_GCM_ENC_LEN) { 397 ctx->cdata.key_inline = true; 398 ctx->cdata.key_virt = ctx->key; 399 } else { 400 ctx->cdata.key_inline = false; 401 ctx->cdata.key_dma = ctx->key_dma; 402 } 403 404 desc = ctx->sh_desc_enc; 405 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); 406 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 407 desc_bytes(desc), 408 DMA_TO_DEVICE); 409 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 410 dev_err(jrdev, "unable to map shared descriptor\n"); 411 return -ENOMEM; 412 } 413 414 /* 415 * Job Descriptor and Shared Descriptors 416 * must all fit into the 64-word Descriptor h/w Buffer 417 */ 418 if (rem_bytes >= DESC_GCM_DEC_LEN) { 419 ctx->cdata.key_inline = true; 420 ctx->cdata.key_virt = ctx->key; 421 } else { 422 ctx->cdata.key_inline = false; 423 ctx->cdata.key_dma = ctx->key_dma; 424 } 425 426 desc = ctx->sh_desc_dec; 427 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); 428 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 429 desc_bytes(desc), 430 DMA_TO_DEVICE); 431 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 432 dev_err(jrdev, "unable to map shared descriptor\n"); 433 return -ENOMEM; 434 } 435 436 return 0; 437 } 438 439 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) 440 { 441 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 442 443 ctx->authsize = authsize; 444 gcm_set_sh_desc(authenc); 445 446 return 0; 447 } 448 449 static int rfc4106_set_sh_desc(struct crypto_aead *aead) 450 { 451 struct caam_ctx *ctx = crypto_aead_ctx(aead); 452 struct device *jrdev = ctx->jrdev; 453 u32 *desc; 454 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 455 ctx->cdata.keylen; 456 457 if (!ctx->cdata.keylen || !ctx->authsize) 458 return 0; 459 460 /* 461 * RFC4106 encrypt shared descriptor 462 * Job Descriptor and Shared Descriptor 463 * must fit into the 64-word Descriptor h/w Buffer 464 */ 465 if (rem_bytes >= DESC_RFC4106_ENC_LEN) { 466 ctx->cdata.key_inline = true; 467 ctx->cdata.key_virt = ctx->key; 468 } else { 469 ctx->cdata.key_inline = false; 470 ctx->cdata.key_dma = ctx->key_dma; 471 } 472 473 desc = ctx->sh_desc_enc; 474 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); 475 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 476 desc_bytes(desc), 477 DMA_TO_DEVICE); 478 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 479 dev_err(jrdev, "unable to map shared descriptor\n"); 480 return -ENOMEM; 481 } 482 483 /* 484 * Job Descriptor and Shared Descriptors 485 * must all fit into the 64-word Descriptor h/w Buffer 486 */ 487 if (rem_bytes >= DESC_RFC4106_DEC_LEN) { 488 ctx->cdata.key_inline = true; 489 ctx->cdata.key_virt = ctx->key; 490 } else { 491 ctx->cdata.key_inline = false; 492 ctx->cdata.key_dma = ctx->key_dma; 493 } 494 495 desc = ctx->sh_desc_dec; 496 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); 497 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 498 desc_bytes(desc), 499 DMA_TO_DEVICE); 500 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 501 dev_err(jrdev, "unable to map shared descriptor\n"); 502 return -ENOMEM; 503 } 504 505 return 0; 506 } 507 508 static int rfc4106_setauthsize(struct crypto_aead *authenc, 509 unsigned int authsize) 510 { 511 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 512 513 ctx->authsize = authsize; 514 rfc4106_set_sh_desc(authenc); 515 516 return 0; 517 } 518 519 static int rfc4543_set_sh_desc(struct crypto_aead *aead) 520 { 521 struct caam_ctx *ctx = crypto_aead_ctx(aead); 522 struct device *jrdev = ctx->jrdev; 523 u32 *desc; 524 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - 525 ctx->cdata.keylen; 526 527 if (!ctx->cdata.keylen || !ctx->authsize) 528 return 0; 529 530 /* 531 * RFC4543 encrypt shared descriptor 532 * Job Descriptor and Shared Descriptor 533 * must fit into the 64-word Descriptor h/w Buffer 534 */ 535 if (rem_bytes >= DESC_RFC4543_ENC_LEN) { 536 ctx->cdata.key_inline = true; 537 ctx->cdata.key_virt = ctx->key; 538 } else { 539 ctx->cdata.key_inline = false; 540 ctx->cdata.key_dma = ctx->key_dma; 541 } 542 543 desc = ctx->sh_desc_enc; 544 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); 545 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 546 desc_bytes(desc), 547 DMA_TO_DEVICE); 548 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 549 dev_err(jrdev, "unable to map shared descriptor\n"); 550 return -ENOMEM; 551 } 552 553 /* 554 * Job Descriptor and Shared Descriptors 555 * must all fit into the 64-word Descriptor h/w Buffer 556 */ 557 if (rem_bytes >= DESC_RFC4543_DEC_LEN) { 558 ctx->cdata.key_inline = true; 559 ctx->cdata.key_virt = ctx->key; 560 } else { 561 ctx->cdata.key_inline = false; 562 ctx->cdata.key_dma = ctx->key_dma; 563 } 564 565 desc = ctx->sh_desc_dec; 566 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); 567 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 568 desc_bytes(desc), 569 DMA_TO_DEVICE); 570 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 571 dev_err(jrdev, "unable to map shared descriptor\n"); 572 return -ENOMEM; 573 } 574 575 return 0; 576 } 577 578 static int rfc4543_setauthsize(struct crypto_aead *authenc, 579 unsigned int authsize) 580 { 581 struct caam_ctx *ctx = crypto_aead_ctx(authenc); 582 583 ctx->authsize = authsize; 584 rfc4543_set_sh_desc(authenc); 585 586 return 0; 587 } 588 589 static int aead_setkey(struct crypto_aead *aead, 590 const u8 *key, unsigned int keylen) 591 { 592 struct caam_ctx *ctx = crypto_aead_ctx(aead); 593 struct device *jrdev = ctx->jrdev; 594 struct crypto_authenc_keys keys; 595 int ret = 0; 596 597 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 598 goto badkey; 599 600 #ifdef DEBUG 601 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n", 602 keys.authkeylen + keys.enckeylen, keys.enckeylen, 603 keys.authkeylen); 604 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 605 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 606 #endif 607 608 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, 609 keys.authkeylen, CAAM_MAX_KEY_SIZE - 610 keys.enckeylen); 611 if (ret) { 612 goto badkey; 613 } 614 615 /* postpend encryption key to auth split key */ 616 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 617 618 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad + 619 keys.enckeylen, DMA_TO_DEVICE); 620 if (dma_mapping_error(jrdev, ctx->key_dma)) { 621 dev_err(jrdev, "unable to map key i/o memory\n"); 622 return -ENOMEM; 623 } 624 #ifdef DEBUG 625 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 626 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 627 ctx->adata.keylen_pad + keys.enckeylen, 1); 628 #endif 629 630 ctx->cdata.keylen = keys.enckeylen; 631 632 ret = aead_set_sh_desc(aead); 633 if (ret) { 634 dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 635 keys.enckeylen, DMA_TO_DEVICE); 636 } 637 638 return ret; 639 badkey: 640 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 641 return -EINVAL; 642 } 643 644 static int gcm_setkey(struct crypto_aead *aead, 645 const u8 *key, unsigned int keylen) 646 { 647 struct caam_ctx *ctx = crypto_aead_ctx(aead); 648 struct device *jrdev = ctx->jrdev; 649 int ret = 0; 650 651 #ifdef DEBUG 652 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 653 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 654 #endif 655 656 memcpy(ctx->key, key, keylen); 657 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 658 DMA_TO_DEVICE); 659 if (dma_mapping_error(jrdev, ctx->key_dma)) { 660 dev_err(jrdev, "unable to map key i/o memory\n"); 661 return -ENOMEM; 662 } 663 ctx->cdata.keylen = keylen; 664 665 ret = gcm_set_sh_desc(aead); 666 if (ret) { 667 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen, 668 DMA_TO_DEVICE); 669 } 670 671 return ret; 672 } 673 674 static int rfc4106_setkey(struct crypto_aead *aead, 675 const u8 *key, unsigned int keylen) 676 { 677 struct caam_ctx *ctx = crypto_aead_ctx(aead); 678 struct device *jrdev = ctx->jrdev; 679 int ret = 0; 680 681 if (keylen < 4) 682 return -EINVAL; 683 684 #ifdef DEBUG 685 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 686 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 687 #endif 688 689 memcpy(ctx->key, key, keylen); 690 691 /* 692 * The last four bytes of the key material are used as the salt value 693 * in the nonce. Update the AES key length. 694 */ 695 ctx->cdata.keylen = keylen - 4; 696 697 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen, 698 DMA_TO_DEVICE); 699 if (dma_mapping_error(jrdev, ctx->key_dma)) { 700 dev_err(jrdev, "unable to map key i/o memory\n"); 701 return -ENOMEM; 702 } 703 704 ret = rfc4106_set_sh_desc(aead); 705 if (ret) { 706 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen, 707 DMA_TO_DEVICE); 708 } 709 710 return ret; 711 } 712 713 static int rfc4543_setkey(struct crypto_aead *aead, 714 const u8 *key, unsigned int keylen) 715 { 716 struct caam_ctx *ctx = crypto_aead_ctx(aead); 717 struct device *jrdev = ctx->jrdev; 718 int ret = 0; 719 720 if (keylen < 4) 721 return -EINVAL; 722 723 #ifdef DEBUG 724 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 725 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 726 #endif 727 728 memcpy(ctx->key, key, keylen); 729 730 /* 731 * The last four bytes of the key material are used as the salt value 732 * in the nonce. Update the AES key length. 733 */ 734 ctx->cdata.keylen = keylen - 4; 735 736 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen, 737 DMA_TO_DEVICE); 738 if (dma_mapping_error(jrdev, ctx->key_dma)) { 739 dev_err(jrdev, "unable to map key i/o memory\n"); 740 return -ENOMEM; 741 } 742 743 ret = rfc4543_set_sh_desc(aead); 744 if (ret) { 745 dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen, 746 DMA_TO_DEVICE); 747 } 748 749 return ret; 750 } 751 752 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 753 const u8 *key, unsigned int keylen) 754 { 755 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 756 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); 757 const char *alg_name = crypto_tfm_alg_name(tfm); 758 struct device *jrdev = ctx->jrdev; 759 unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 760 u32 *desc; 761 u32 ctx1_iv_off = 0; 762 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 763 OP_ALG_AAI_CTR_MOD128); 764 const bool is_rfc3686 = (ctr_mode && 765 (strstr(alg_name, "rfc3686") != NULL)); 766 767 memcpy(ctx->key, key, keylen); 768 #ifdef DEBUG 769 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 770 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 771 #endif 772 /* 773 * AES-CTR needs to load IV in CONTEXT1 reg 774 * at an offset of 128bits (16bytes) 775 * CONTEXT1[255:128] = IV 776 */ 777 if (ctr_mode) 778 ctx1_iv_off = 16; 779 780 /* 781 * RFC3686 specific: 782 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} 783 * | *key = {KEY, NONCE} 784 */ 785 if (is_rfc3686) { 786 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; 787 keylen -= CTR_RFC3686_NONCE_SIZE; 788 } 789 790 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, 791 DMA_TO_DEVICE); 792 if (dma_mapping_error(jrdev, ctx->key_dma)) { 793 dev_err(jrdev, "unable to map key i/o memory\n"); 794 return -ENOMEM; 795 } 796 ctx->cdata.keylen = keylen; 797 ctx->cdata.key_virt = ctx->key; 798 ctx->cdata.key_inline = true; 799 800 /* ablkcipher_encrypt shared descriptor */ 801 desc = ctx->sh_desc_enc; 802 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 803 ctx1_iv_off); 804 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, 805 desc_bytes(desc), 806 DMA_TO_DEVICE); 807 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 808 dev_err(jrdev, "unable to map shared descriptor\n"); 809 return -ENOMEM; 810 } 811 812 /* ablkcipher_decrypt shared descriptor */ 813 desc = ctx->sh_desc_dec; 814 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 815 ctx1_iv_off); 816 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, 817 desc_bytes(desc), 818 DMA_TO_DEVICE); 819 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 820 dev_err(jrdev, "unable to map shared descriptor\n"); 821 return -ENOMEM; 822 } 823 824 /* ablkcipher_givencrypt shared descriptor */ 825 desc = ctx->sh_desc_givenc; 826 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, 827 ctx1_iv_off); 828 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, 829 desc_bytes(desc), 830 DMA_TO_DEVICE); 831 if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { 832 dev_err(jrdev, "unable to map shared descriptor\n"); 833 return -ENOMEM; 834 } 835 836 return 0; 837 } 838 839 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, 840 const u8 *key, unsigned int keylen) 841 { 842 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 843 struct device *jrdev = ctx->jrdev; 844 u32 *desc; 845 846 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 847 crypto_ablkcipher_set_flags(ablkcipher, 848 CRYPTO_TFM_RES_BAD_KEY_LEN); 849 dev_err(jrdev, "key size mismatch\n"); 850 return -EINVAL; 851 } 852 853 memcpy(ctx->key, key, keylen); 854 ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE); 855 if (dma_mapping_error(jrdev, ctx->key_dma)) { 856 dev_err(jrdev, "unable to map key i/o memory\n"); 857 return -ENOMEM; 858 } 859 ctx->cdata.keylen = keylen; 860 ctx->cdata.key_virt = ctx->key; 861 ctx->cdata.key_inline = true; 862 863 /* xts_ablkcipher_encrypt shared descriptor */ 864 desc = ctx->sh_desc_enc; 865 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); 866 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 867 DMA_TO_DEVICE); 868 if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { 869 dev_err(jrdev, "unable to map shared descriptor\n"); 870 return -ENOMEM; 871 } 872 873 /* xts_ablkcipher_decrypt shared descriptor */ 874 desc = ctx->sh_desc_dec; 875 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); 876 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 877 DMA_TO_DEVICE); 878 if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) { 879 dma_unmap_single(jrdev, ctx->sh_desc_enc_dma, 880 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); 881 dev_err(jrdev, "unable to map shared descriptor\n"); 882 return -ENOMEM; 883 } 884 885 return 0; 886 } 887 888 /* 889 * aead_edesc - s/w-extended aead descriptor 890 * @src_nents: number of segments in input scatterlist 891 * @dst_nents: number of segments in output scatterlist 892 * @sec4_sg_bytes: length of dma mapped sec4_sg space 893 * @sec4_sg_dma: bus physical mapped address of h/w link table 894 * @sec4_sg: pointer to h/w link table 895 * @hw_desc: the h/w job descriptor followed by any referenced link tables 896 */ 897 struct aead_edesc { 898 int src_nents; 899 int dst_nents; 900 int sec4_sg_bytes; 901 dma_addr_t sec4_sg_dma; 902 struct sec4_sg_entry *sec4_sg; 903 u32 hw_desc[]; 904 }; 905 906 /* 907 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 908 * @src_nents: number of segments in input scatterlist 909 * @dst_nents: number of segments in output scatterlist 910 * @iv_dma: dma address of iv for checking continuity and link table 911 * @sec4_sg_bytes: length of dma mapped sec4_sg space 912 * @sec4_sg_dma: bus physical mapped address of h/w link table 913 * @sec4_sg: pointer to h/w link table 914 * @hw_desc: the h/w job descriptor followed by any referenced link tables 915 */ 916 struct ablkcipher_edesc { 917 int src_nents; 918 int dst_nents; 919 dma_addr_t iv_dma; 920 int sec4_sg_bytes; 921 dma_addr_t sec4_sg_dma; 922 struct sec4_sg_entry *sec4_sg; 923 u32 hw_desc[0]; 924 }; 925 926 static void caam_unmap(struct device *dev, struct scatterlist *src, 927 struct scatterlist *dst, int src_nents, 928 int dst_nents, 929 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, 930 int sec4_sg_bytes) 931 { 932 if (dst != src) { 933 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE); 934 dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE); 935 } else { 936 dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL); 937 } 938 939 if (iv_dma) 940 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 941 if (sec4_sg_bytes) 942 dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, 943 DMA_TO_DEVICE); 944 } 945 946 static void aead_unmap(struct device *dev, 947 struct aead_edesc *edesc, 948 struct aead_request *req) 949 { 950 caam_unmap(dev, req->src, req->dst, 951 edesc->src_nents, edesc->dst_nents, 0, 0, 952 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 953 } 954 955 static void ablkcipher_unmap(struct device *dev, 956 struct ablkcipher_edesc *edesc, 957 struct ablkcipher_request *req) 958 { 959 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 960 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 961 962 caam_unmap(dev, req->src, req->dst, 963 edesc->src_nents, edesc->dst_nents, 964 edesc->iv_dma, ivsize, 965 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 966 } 967 968 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 969 void *context) 970 { 971 struct aead_request *req = context; 972 struct aead_edesc *edesc; 973 974 #ifdef DEBUG 975 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 976 #endif 977 978 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 979 980 if (err) 981 caam_jr_strstatus(jrdev, err); 982 983 aead_unmap(jrdev, edesc, req); 984 985 kfree(edesc); 986 987 aead_request_complete(req, err); 988 } 989 990 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 991 void *context) 992 { 993 struct aead_request *req = context; 994 struct aead_edesc *edesc; 995 996 #ifdef DEBUG 997 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 998 #endif 999 1000 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); 1001 1002 if (err) 1003 caam_jr_strstatus(jrdev, err); 1004 1005 aead_unmap(jrdev, edesc, req); 1006 1007 /* 1008 * verify hw auth check passed else return -EBADMSG 1009 */ 1010 if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) 1011 err = -EBADMSG; 1012 1013 kfree(edesc); 1014 1015 aead_request_complete(req, err); 1016 } 1017 1018 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, 1019 void *context) 1020 { 1021 struct ablkcipher_request *req = context; 1022 struct ablkcipher_edesc *edesc; 1023 #ifdef DEBUG 1024 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1025 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1026 1027 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1028 #endif 1029 1030 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); 1031 1032 if (err) 1033 caam_jr_strstatus(jrdev, err); 1034 1035 #ifdef DEBUG 1036 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 1037 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1038 edesc->src_nents > 1 ? 100 : ivsize, 1); 1039 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", 1040 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1041 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 1042 #endif 1043 1044 ablkcipher_unmap(jrdev, edesc, req); 1045 kfree(edesc); 1046 1047 ablkcipher_request_complete(req, err); 1048 } 1049 1050 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, 1051 void *context) 1052 { 1053 struct ablkcipher_request *req = context; 1054 struct ablkcipher_edesc *edesc; 1055 #ifdef DEBUG 1056 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1057 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1058 1059 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1060 #endif 1061 1062 edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]); 1063 if (err) 1064 caam_jr_strstatus(jrdev, err); 1065 1066 #ifdef DEBUG 1067 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ", 1068 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1069 ivsize, 1); 1070 dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ", 1071 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, 1072 edesc->dst_nents > 1 ? 100 : req->nbytes, 1); 1073 #endif 1074 1075 ablkcipher_unmap(jrdev, edesc, req); 1076 kfree(edesc); 1077 1078 ablkcipher_request_complete(req, err); 1079 } 1080 1081 /* 1082 * Fill in aead job descriptor 1083 */ 1084 static void init_aead_job(struct aead_request *req, 1085 struct aead_edesc *edesc, 1086 bool all_contig, bool encrypt) 1087 { 1088 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1089 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1090 int authsize = ctx->authsize; 1091 u32 *desc = edesc->hw_desc; 1092 u32 out_options, in_options; 1093 dma_addr_t dst_dma, src_dma; 1094 int len, sec4_sg_index = 0; 1095 dma_addr_t ptr; 1096 u32 *sh_desc; 1097 1098 sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; 1099 ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; 1100 1101 len = desc_len(sh_desc); 1102 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1103 1104 if (all_contig) { 1105 src_dma = sg_dma_address(req->src); 1106 in_options = 0; 1107 } else { 1108 src_dma = edesc->sec4_sg_dma; 1109 sec4_sg_index += edesc->src_nents; 1110 in_options = LDST_SGF; 1111 } 1112 1113 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, 1114 in_options); 1115 1116 dst_dma = src_dma; 1117 out_options = in_options; 1118 1119 if (unlikely(req->src != req->dst)) { 1120 if (!edesc->dst_nents) { 1121 dst_dma = sg_dma_address(req->dst); 1122 } else { 1123 dst_dma = edesc->sec4_sg_dma + 1124 sec4_sg_index * 1125 sizeof(struct sec4_sg_entry); 1126 out_options = LDST_SGF; 1127 } 1128 } 1129 1130 if (encrypt) 1131 append_seq_out_ptr(desc, dst_dma, 1132 req->assoclen + req->cryptlen + authsize, 1133 out_options); 1134 else 1135 append_seq_out_ptr(desc, dst_dma, 1136 req->assoclen + req->cryptlen - authsize, 1137 out_options); 1138 1139 /* REG3 = assoclen */ 1140 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1141 } 1142 1143 static void init_gcm_job(struct aead_request *req, 1144 struct aead_edesc *edesc, 1145 bool all_contig, bool encrypt) 1146 { 1147 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1148 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1149 unsigned int ivsize = crypto_aead_ivsize(aead); 1150 u32 *desc = edesc->hw_desc; 1151 bool generic_gcm = (ivsize == 12); 1152 unsigned int last; 1153 1154 init_aead_job(req, edesc, all_contig, encrypt); 1155 1156 /* BUG This should not be specific to generic GCM. */ 1157 last = 0; 1158 if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) 1159 last = FIFOLD_TYPE_LAST1; 1160 1161 /* Read GCM IV */ 1162 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | 1163 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); 1164 /* Append Salt */ 1165 if (!generic_gcm) 1166 append_data(desc, ctx->key + ctx->cdata.keylen, 4); 1167 /* Append IV */ 1168 append_data(desc, req->iv, ivsize); 1169 /* End of blank commands */ 1170 } 1171 1172 static void init_authenc_job(struct aead_request *req, 1173 struct aead_edesc *edesc, 1174 bool all_contig, bool encrypt) 1175 { 1176 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1177 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), 1178 struct caam_aead_alg, aead); 1179 unsigned int ivsize = crypto_aead_ivsize(aead); 1180 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1181 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 1182 OP_ALG_AAI_CTR_MOD128); 1183 const bool is_rfc3686 = alg->caam.rfc3686; 1184 u32 *desc = edesc->hw_desc; 1185 u32 ivoffset = 0; 1186 1187 /* 1188 * AES-CTR needs to load IV in CONTEXT1 reg 1189 * at an offset of 128bits (16bytes) 1190 * CONTEXT1[255:128] = IV 1191 */ 1192 if (ctr_mode) 1193 ivoffset = 16; 1194 1195 /* 1196 * RFC3686 specific: 1197 * CONTEXT1[255:128] = {NONCE, IV, COUNTER} 1198 */ 1199 if (is_rfc3686) 1200 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; 1201 1202 init_aead_job(req, edesc, all_contig, encrypt); 1203 1204 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) 1205 append_load_as_imm(desc, req->iv, ivsize, 1206 LDST_CLASS_1_CCB | 1207 LDST_SRCDST_BYTE_CONTEXT | 1208 (ivoffset << LDST_OFFSET_SHIFT)); 1209 } 1210 1211 /* 1212 * Fill in ablkcipher job descriptor 1213 */ 1214 static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr, 1215 struct ablkcipher_edesc *edesc, 1216 struct ablkcipher_request *req, 1217 bool iv_contig) 1218 { 1219 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1220 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1221 u32 *desc = edesc->hw_desc; 1222 u32 out_options = 0, in_options; 1223 dma_addr_t dst_dma, src_dma; 1224 int len, sec4_sg_index = 0; 1225 1226 #ifdef DEBUG 1227 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", 1228 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1229 ivsize, 1); 1230 printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes); 1231 dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ", 1232 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1233 edesc->src_nents ? 100 : req->nbytes, 1); 1234 #endif 1235 1236 len = desc_len(sh_desc); 1237 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1238 1239 if (iv_contig) { 1240 src_dma = edesc->iv_dma; 1241 in_options = 0; 1242 } else { 1243 src_dma = edesc->sec4_sg_dma; 1244 sec4_sg_index += edesc->src_nents + 1; 1245 in_options = LDST_SGF; 1246 } 1247 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); 1248 1249 if (likely(req->src == req->dst)) { 1250 if (!edesc->src_nents && iv_contig) { 1251 dst_dma = sg_dma_address(req->src); 1252 } else { 1253 dst_dma = edesc->sec4_sg_dma + 1254 sizeof(struct sec4_sg_entry); 1255 out_options = LDST_SGF; 1256 } 1257 } else { 1258 if (!edesc->dst_nents) { 1259 dst_dma = sg_dma_address(req->dst); 1260 } else { 1261 dst_dma = edesc->sec4_sg_dma + 1262 sec4_sg_index * sizeof(struct sec4_sg_entry); 1263 out_options = LDST_SGF; 1264 } 1265 } 1266 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); 1267 } 1268 1269 /* 1270 * Fill in ablkcipher givencrypt job descriptor 1271 */ 1272 static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, 1273 struct ablkcipher_edesc *edesc, 1274 struct ablkcipher_request *req, 1275 bool iv_contig) 1276 { 1277 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1278 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1279 u32 *desc = edesc->hw_desc; 1280 u32 out_options, in_options; 1281 dma_addr_t dst_dma, src_dma; 1282 int len, sec4_sg_index = 0; 1283 1284 #ifdef DEBUG 1285 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ", 1286 DUMP_PREFIX_ADDRESS, 16, 4, req->info, 1287 ivsize, 1); 1288 dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ", 1289 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1290 edesc->src_nents ? 100 : req->nbytes, 1); 1291 #endif 1292 1293 len = desc_len(sh_desc); 1294 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); 1295 1296 if (!edesc->src_nents) { 1297 src_dma = sg_dma_address(req->src); 1298 in_options = 0; 1299 } else { 1300 src_dma = edesc->sec4_sg_dma; 1301 sec4_sg_index += edesc->src_nents; 1302 in_options = LDST_SGF; 1303 } 1304 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); 1305 1306 if (iv_contig) { 1307 dst_dma = edesc->iv_dma; 1308 out_options = 0; 1309 } else { 1310 dst_dma = edesc->sec4_sg_dma + 1311 sec4_sg_index * sizeof(struct sec4_sg_entry); 1312 out_options = LDST_SGF; 1313 } 1314 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); 1315 } 1316 1317 /* 1318 * allocate and map the aead extended descriptor 1319 */ 1320 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, 1321 int desc_bytes, bool *all_contig_ptr, 1322 bool encrypt) 1323 { 1324 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1325 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1326 struct device *jrdev = ctx->jrdev; 1327 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1328 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1329 int src_nents, dst_nents = 0; 1330 struct aead_edesc *edesc; 1331 int sgc; 1332 bool all_contig = true; 1333 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1334 unsigned int authsize = ctx->authsize; 1335 1336 if (unlikely(req->dst != req->src)) { 1337 src_nents = sg_count(req->src, req->assoclen + req->cryptlen); 1338 dst_nents = sg_count(req->dst, 1339 req->assoclen + req->cryptlen + 1340 (encrypt ? authsize : (-authsize))); 1341 } else { 1342 src_nents = sg_count(req->src, 1343 req->assoclen + req->cryptlen + 1344 (encrypt ? authsize : 0)); 1345 } 1346 1347 /* Check if data are contiguous. */ 1348 all_contig = !src_nents; 1349 if (!all_contig) 1350 sec4_sg_len = src_nents; 1351 1352 sec4_sg_len += dst_nents; 1353 1354 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 1355 1356 /* allocate space for base edesc and hw desc commands, link tables */ 1357 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, 1358 GFP_DMA | flags); 1359 if (!edesc) { 1360 dev_err(jrdev, "could not allocate extended descriptor\n"); 1361 return ERR_PTR(-ENOMEM); 1362 } 1363 1364 if (likely(req->src == req->dst)) { 1365 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1366 DMA_BIDIRECTIONAL); 1367 if (unlikely(!sgc)) { 1368 dev_err(jrdev, "unable to map source\n"); 1369 kfree(edesc); 1370 return ERR_PTR(-ENOMEM); 1371 } 1372 } else { 1373 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1374 DMA_TO_DEVICE); 1375 if (unlikely(!sgc)) { 1376 dev_err(jrdev, "unable to map source\n"); 1377 kfree(edesc); 1378 return ERR_PTR(-ENOMEM); 1379 } 1380 1381 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1382 DMA_FROM_DEVICE); 1383 if (unlikely(!sgc)) { 1384 dev_err(jrdev, "unable to map destination\n"); 1385 dma_unmap_sg(jrdev, req->src, src_nents ? : 1, 1386 DMA_TO_DEVICE); 1387 kfree(edesc); 1388 return ERR_PTR(-ENOMEM); 1389 } 1390 } 1391 1392 edesc->src_nents = src_nents; 1393 edesc->dst_nents = dst_nents; 1394 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 1395 desc_bytes; 1396 *all_contig_ptr = all_contig; 1397 1398 sec4_sg_index = 0; 1399 if (!all_contig) { 1400 sg_to_sec4_sg_last(req->src, src_nents, 1401 edesc->sec4_sg + sec4_sg_index, 0); 1402 sec4_sg_index += src_nents; 1403 } 1404 if (dst_nents) { 1405 sg_to_sec4_sg_last(req->dst, dst_nents, 1406 edesc->sec4_sg + sec4_sg_index, 0); 1407 } 1408 1409 if (!sec4_sg_bytes) 1410 return edesc; 1411 1412 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1413 sec4_sg_bytes, DMA_TO_DEVICE); 1414 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1415 dev_err(jrdev, "unable to map S/G table\n"); 1416 aead_unmap(jrdev, edesc, req); 1417 kfree(edesc); 1418 return ERR_PTR(-ENOMEM); 1419 } 1420 1421 edesc->sec4_sg_bytes = sec4_sg_bytes; 1422 1423 return edesc; 1424 } 1425 1426 static int gcm_encrypt(struct aead_request *req) 1427 { 1428 struct aead_edesc *edesc; 1429 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1430 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1431 struct device *jrdev = ctx->jrdev; 1432 bool all_contig; 1433 u32 *desc; 1434 int ret = 0; 1435 1436 /* allocate extended descriptor */ 1437 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true); 1438 if (IS_ERR(edesc)) 1439 return PTR_ERR(edesc); 1440 1441 /* Create and submit job descriptor */ 1442 init_gcm_job(req, edesc, all_contig, true); 1443 #ifdef DEBUG 1444 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1445 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1446 desc_bytes(edesc->hw_desc), 1); 1447 #endif 1448 1449 desc = edesc->hw_desc; 1450 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1451 if (!ret) { 1452 ret = -EINPROGRESS; 1453 } else { 1454 aead_unmap(jrdev, edesc, req); 1455 kfree(edesc); 1456 } 1457 1458 return ret; 1459 } 1460 1461 static int ipsec_gcm_encrypt(struct aead_request *req) 1462 { 1463 if (req->assoclen < 8) 1464 return -EINVAL; 1465 1466 return gcm_encrypt(req); 1467 } 1468 1469 static int aead_encrypt(struct aead_request *req) 1470 { 1471 struct aead_edesc *edesc; 1472 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1473 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1474 struct device *jrdev = ctx->jrdev; 1475 bool all_contig; 1476 u32 *desc; 1477 int ret = 0; 1478 1479 /* allocate extended descriptor */ 1480 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, 1481 &all_contig, true); 1482 if (IS_ERR(edesc)) 1483 return PTR_ERR(edesc); 1484 1485 /* Create and submit job descriptor */ 1486 init_authenc_job(req, edesc, all_contig, true); 1487 #ifdef DEBUG 1488 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1489 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1490 desc_bytes(edesc->hw_desc), 1); 1491 #endif 1492 1493 desc = edesc->hw_desc; 1494 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); 1495 if (!ret) { 1496 ret = -EINPROGRESS; 1497 } else { 1498 aead_unmap(jrdev, edesc, req); 1499 kfree(edesc); 1500 } 1501 1502 return ret; 1503 } 1504 1505 static int gcm_decrypt(struct aead_request *req) 1506 { 1507 struct aead_edesc *edesc; 1508 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1509 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1510 struct device *jrdev = ctx->jrdev; 1511 bool all_contig; 1512 u32 *desc; 1513 int ret = 0; 1514 1515 /* allocate extended descriptor */ 1516 edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false); 1517 if (IS_ERR(edesc)) 1518 return PTR_ERR(edesc); 1519 1520 /* Create and submit job descriptor*/ 1521 init_gcm_job(req, edesc, all_contig, false); 1522 #ifdef DEBUG 1523 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1524 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1525 desc_bytes(edesc->hw_desc), 1); 1526 #endif 1527 1528 desc = edesc->hw_desc; 1529 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1530 if (!ret) { 1531 ret = -EINPROGRESS; 1532 } else { 1533 aead_unmap(jrdev, edesc, req); 1534 kfree(edesc); 1535 } 1536 1537 return ret; 1538 } 1539 1540 static int ipsec_gcm_decrypt(struct aead_request *req) 1541 { 1542 if (req->assoclen < 8) 1543 return -EINVAL; 1544 1545 return gcm_decrypt(req); 1546 } 1547 1548 static int aead_decrypt(struct aead_request *req) 1549 { 1550 struct aead_edesc *edesc; 1551 struct crypto_aead *aead = crypto_aead_reqtfm(req); 1552 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1553 struct device *jrdev = ctx->jrdev; 1554 bool all_contig; 1555 u32 *desc; 1556 int ret = 0; 1557 1558 #ifdef DEBUG 1559 dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ", 1560 DUMP_PREFIX_ADDRESS, 16, 4, req->src, 1561 req->assoclen + req->cryptlen, 1); 1562 #endif 1563 1564 /* allocate extended descriptor */ 1565 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, 1566 &all_contig, false); 1567 if (IS_ERR(edesc)) 1568 return PTR_ERR(edesc); 1569 1570 /* Create and submit job descriptor*/ 1571 init_authenc_job(req, edesc, all_contig, false); 1572 #ifdef DEBUG 1573 print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", 1574 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1575 desc_bytes(edesc->hw_desc), 1); 1576 #endif 1577 1578 desc = edesc->hw_desc; 1579 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); 1580 if (!ret) { 1581 ret = -EINPROGRESS; 1582 } else { 1583 aead_unmap(jrdev, edesc, req); 1584 kfree(edesc); 1585 } 1586 1587 return ret; 1588 } 1589 1590 /* 1591 * allocate and map the ablkcipher extended descriptor for ablkcipher 1592 */ 1593 static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request 1594 *req, int desc_bytes, 1595 bool *iv_contig_out) 1596 { 1597 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1598 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1599 struct device *jrdev = ctx->jrdev; 1600 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1601 CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1602 GFP_KERNEL : GFP_ATOMIC; 1603 int src_nents, dst_nents = 0, sec4_sg_bytes; 1604 struct ablkcipher_edesc *edesc; 1605 dma_addr_t iv_dma = 0; 1606 bool iv_contig = false; 1607 int sgc; 1608 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1609 int sec4_sg_index; 1610 1611 src_nents = sg_count(req->src, req->nbytes); 1612 1613 if (req->dst != req->src) 1614 dst_nents = sg_count(req->dst, req->nbytes); 1615 1616 if (likely(req->src == req->dst)) { 1617 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1618 DMA_BIDIRECTIONAL); 1619 if (unlikely(!sgc)) { 1620 dev_err(jrdev, "unable to map source\n"); 1621 return ERR_PTR(-ENOMEM); 1622 } 1623 } else { 1624 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1625 DMA_TO_DEVICE); 1626 if (unlikely(!sgc)) { 1627 dev_err(jrdev, "unable to map source\n"); 1628 return ERR_PTR(-ENOMEM); 1629 } 1630 1631 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1632 DMA_FROM_DEVICE); 1633 if (unlikely(!sgc)) { 1634 dev_err(jrdev, "unable to map destination\n"); 1635 dma_unmap_sg(jrdev, req->src, src_nents ? : 1, 1636 DMA_TO_DEVICE); 1637 return ERR_PTR(-ENOMEM); 1638 } 1639 } 1640 1641 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); 1642 if (dma_mapping_error(jrdev, iv_dma)) { 1643 dev_err(jrdev, "unable to map IV\n"); 1644 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1645 0, 0, 0); 1646 return ERR_PTR(-ENOMEM); 1647 } 1648 1649 /* 1650 * Check if iv can be contiguous with source and destination. 1651 * If so, include it. If not, create scatterlist. 1652 */ 1653 if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src)) 1654 iv_contig = true; 1655 else 1656 src_nents = src_nents ? : 1; 1657 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1658 sizeof(struct sec4_sg_entry); 1659 1660 /* allocate space for base edesc and hw desc commands, link tables */ 1661 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, 1662 GFP_DMA | flags); 1663 if (!edesc) { 1664 dev_err(jrdev, "could not allocate extended descriptor\n"); 1665 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1666 iv_dma, ivsize, 0, 0); 1667 return ERR_PTR(-ENOMEM); 1668 } 1669 1670 edesc->src_nents = src_nents; 1671 edesc->dst_nents = dst_nents; 1672 edesc->sec4_sg_bytes = sec4_sg_bytes; 1673 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1674 desc_bytes; 1675 1676 sec4_sg_index = 0; 1677 if (!iv_contig) { 1678 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1679 sg_to_sec4_sg_last(req->src, src_nents, 1680 edesc->sec4_sg + 1, 0); 1681 sec4_sg_index += 1 + src_nents; 1682 } 1683 1684 if (dst_nents) { 1685 sg_to_sec4_sg_last(req->dst, dst_nents, 1686 edesc->sec4_sg + sec4_sg_index, 0); 1687 } 1688 1689 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1690 sec4_sg_bytes, DMA_TO_DEVICE); 1691 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1692 dev_err(jrdev, "unable to map S/G table\n"); 1693 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1694 iv_dma, ivsize, 0, 0); 1695 kfree(edesc); 1696 return ERR_PTR(-ENOMEM); 1697 } 1698 1699 edesc->iv_dma = iv_dma; 1700 1701 #ifdef DEBUG 1702 print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ", 1703 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1704 sec4_sg_bytes, 1); 1705 #endif 1706 1707 *iv_contig_out = iv_contig; 1708 return edesc; 1709 } 1710 1711 static int ablkcipher_encrypt(struct ablkcipher_request *req) 1712 { 1713 struct ablkcipher_edesc *edesc; 1714 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1715 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1716 struct device *jrdev = ctx->jrdev; 1717 bool iv_contig; 1718 u32 *desc; 1719 int ret = 0; 1720 1721 /* allocate extended descriptor */ 1722 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1723 CAAM_CMD_SZ, &iv_contig); 1724 if (IS_ERR(edesc)) 1725 return PTR_ERR(edesc); 1726 1727 /* Create and submit job descriptor*/ 1728 init_ablkcipher_job(ctx->sh_desc_enc, 1729 ctx->sh_desc_enc_dma, edesc, req, iv_contig); 1730 #ifdef DEBUG 1731 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1732 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1733 desc_bytes(edesc->hw_desc), 1); 1734 #endif 1735 desc = edesc->hw_desc; 1736 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1737 1738 if (!ret) { 1739 ret = -EINPROGRESS; 1740 } else { 1741 ablkcipher_unmap(jrdev, edesc, req); 1742 kfree(edesc); 1743 } 1744 1745 return ret; 1746 } 1747 1748 static int ablkcipher_decrypt(struct ablkcipher_request *req) 1749 { 1750 struct ablkcipher_edesc *edesc; 1751 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1752 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1753 struct device *jrdev = ctx->jrdev; 1754 bool iv_contig; 1755 u32 *desc; 1756 int ret = 0; 1757 1758 /* allocate extended descriptor */ 1759 edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * 1760 CAAM_CMD_SZ, &iv_contig); 1761 if (IS_ERR(edesc)) 1762 return PTR_ERR(edesc); 1763 1764 /* Create and submit job descriptor*/ 1765 init_ablkcipher_job(ctx->sh_desc_dec, 1766 ctx->sh_desc_dec_dma, edesc, req, iv_contig); 1767 desc = edesc->hw_desc; 1768 #ifdef DEBUG 1769 print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ", 1770 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1771 desc_bytes(edesc->hw_desc), 1); 1772 #endif 1773 1774 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); 1775 if (!ret) { 1776 ret = -EINPROGRESS; 1777 } else { 1778 ablkcipher_unmap(jrdev, edesc, req); 1779 kfree(edesc); 1780 } 1781 1782 return ret; 1783 } 1784 1785 /* 1786 * allocate and map the ablkcipher extended descriptor 1787 * for ablkcipher givencrypt 1788 */ 1789 static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc( 1790 struct skcipher_givcrypt_request *greq, 1791 int desc_bytes, 1792 bool *iv_contig_out) 1793 { 1794 struct ablkcipher_request *req = &greq->creq; 1795 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1796 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1797 struct device *jrdev = ctx->jrdev; 1798 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1799 CRYPTO_TFM_REQ_MAY_SLEEP)) ? 1800 GFP_KERNEL : GFP_ATOMIC; 1801 int src_nents, dst_nents = 0, sec4_sg_bytes; 1802 struct ablkcipher_edesc *edesc; 1803 dma_addr_t iv_dma = 0; 1804 bool iv_contig = false; 1805 int sgc; 1806 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1807 int sec4_sg_index; 1808 1809 src_nents = sg_count(req->src, req->nbytes); 1810 1811 if (unlikely(req->dst != req->src)) 1812 dst_nents = sg_count(req->dst, req->nbytes); 1813 1814 if (likely(req->src == req->dst)) { 1815 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1816 DMA_BIDIRECTIONAL); 1817 if (unlikely(!sgc)) { 1818 dev_err(jrdev, "unable to map source\n"); 1819 return ERR_PTR(-ENOMEM); 1820 } 1821 } else { 1822 sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1823 DMA_TO_DEVICE); 1824 if (unlikely(!sgc)) { 1825 dev_err(jrdev, "unable to map source\n"); 1826 return ERR_PTR(-ENOMEM); 1827 } 1828 1829 sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1830 DMA_FROM_DEVICE); 1831 if (unlikely(!sgc)) { 1832 dev_err(jrdev, "unable to map destination\n"); 1833 dma_unmap_sg(jrdev, req->src, src_nents ? : 1, 1834 DMA_TO_DEVICE); 1835 return ERR_PTR(-ENOMEM); 1836 } 1837 } 1838 1839 /* 1840 * Check if iv can be contiguous with source and destination. 1841 * If so, include it. If not, create scatterlist. 1842 */ 1843 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE); 1844 if (dma_mapping_error(jrdev, iv_dma)) { 1845 dev_err(jrdev, "unable to map IV\n"); 1846 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 1847 0, 0, 0); 1848 return ERR_PTR(-ENOMEM); 1849 } 1850 1851 if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst)) 1852 iv_contig = true; 1853 else 1854 dst_nents = dst_nents ? : 1; 1855 sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) * 1856 sizeof(struct sec4_sg_entry); 1857 1858 /* allocate space for base edesc and hw desc commands, link tables */ 1859 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, 1860 GFP_DMA | flags); 1861 if (!edesc) { 1862 dev_err(jrdev, "could not allocate extended descriptor\n"); 1863 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1864 iv_dma, ivsize, 0, 0); 1865 return ERR_PTR(-ENOMEM); 1866 } 1867 1868 edesc->src_nents = src_nents; 1869 edesc->dst_nents = dst_nents; 1870 edesc->sec4_sg_bytes = sec4_sg_bytes; 1871 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1872 desc_bytes; 1873 1874 sec4_sg_index = 0; 1875 if (src_nents) { 1876 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); 1877 sec4_sg_index += src_nents; 1878 } 1879 1880 if (!iv_contig) { 1881 dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index, 1882 iv_dma, ivsize, 0); 1883 sec4_sg_index += 1; 1884 sg_to_sec4_sg_last(req->dst, dst_nents, 1885 edesc->sec4_sg + sec4_sg_index, 0); 1886 } 1887 1888 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1889 sec4_sg_bytes, DMA_TO_DEVICE); 1890 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1891 dev_err(jrdev, "unable to map S/G table\n"); 1892 caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 1893 iv_dma, ivsize, 0, 0); 1894 kfree(edesc); 1895 return ERR_PTR(-ENOMEM); 1896 } 1897 edesc->iv_dma = iv_dma; 1898 1899 #ifdef DEBUG 1900 print_hex_dump(KERN_ERR, 1901 "ablkcipher sec4_sg@" __stringify(__LINE__) ": ", 1902 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 1903 sec4_sg_bytes, 1); 1904 #endif 1905 1906 *iv_contig_out = iv_contig; 1907 return edesc; 1908 } 1909 1910 static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq) 1911 { 1912 struct ablkcipher_request *req = &creq->creq; 1913 struct ablkcipher_edesc *edesc; 1914 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1915 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1916 struct device *jrdev = ctx->jrdev; 1917 bool iv_contig; 1918 u32 *desc; 1919 int ret = 0; 1920 1921 /* allocate extended descriptor */ 1922 edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * 1923 CAAM_CMD_SZ, &iv_contig); 1924 if (IS_ERR(edesc)) 1925 return PTR_ERR(edesc); 1926 1927 /* Create and submit job descriptor*/ 1928 init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma, 1929 edesc, req, iv_contig); 1930 #ifdef DEBUG 1931 print_hex_dump(KERN_ERR, 1932 "ablkcipher jobdesc@" __stringify(__LINE__) ": ", 1933 DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, 1934 desc_bytes(edesc->hw_desc), 1); 1935 #endif 1936 desc = edesc->hw_desc; 1937 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); 1938 1939 if (!ret) { 1940 ret = -EINPROGRESS; 1941 } else { 1942 ablkcipher_unmap(jrdev, edesc, req); 1943 kfree(edesc); 1944 } 1945 1946 return ret; 1947 } 1948 1949 #define template_aead template_u.aead 1950 #define template_ablkcipher template_u.ablkcipher 1951 struct caam_alg_template { 1952 char name[CRYPTO_MAX_ALG_NAME]; 1953 char driver_name[CRYPTO_MAX_ALG_NAME]; 1954 unsigned int blocksize; 1955 u32 type; 1956 union { 1957 struct ablkcipher_alg ablkcipher; 1958 } template_u; 1959 u32 class1_alg_type; 1960 u32 class2_alg_type; 1961 }; 1962 1963 static struct caam_alg_template driver_algs[] = { 1964 /* ablkcipher descriptor */ 1965 { 1966 .name = "cbc(aes)", 1967 .driver_name = "cbc-aes-caam", 1968 .blocksize = AES_BLOCK_SIZE, 1969 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1970 .template_ablkcipher = { 1971 .setkey = ablkcipher_setkey, 1972 .encrypt = ablkcipher_encrypt, 1973 .decrypt = ablkcipher_decrypt, 1974 .givencrypt = ablkcipher_givencrypt, 1975 .geniv = "<built-in>", 1976 .min_keysize = AES_MIN_KEY_SIZE, 1977 .max_keysize = AES_MAX_KEY_SIZE, 1978 .ivsize = AES_BLOCK_SIZE, 1979 }, 1980 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 1981 }, 1982 { 1983 .name = "cbc(des3_ede)", 1984 .driver_name = "cbc-3des-caam", 1985 .blocksize = DES3_EDE_BLOCK_SIZE, 1986 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 1987 .template_ablkcipher = { 1988 .setkey = ablkcipher_setkey, 1989 .encrypt = ablkcipher_encrypt, 1990 .decrypt = ablkcipher_decrypt, 1991 .givencrypt = ablkcipher_givencrypt, 1992 .geniv = "<built-in>", 1993 .min_keysize = DES3_EDE_KEY_SIZE, 1994 .max_keysize = DES3_EDE_KEY_SIZE, 1995 .ivsize = DES3_EDE_BLOCK_SIZE, 1996 }, 1997 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 1998 }, 1999 { 2000 .name = "cbc(des)", 2001 .driver_name = "cbc-des-caam", 2002 .blocksize = DES_BLOCK_SIZE, 2003 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 2004 .template_ablkcipher = { 2005 .setkey = ablkcipher_setkey, 2006 .encrypt = ablkcipher_encrypt, 2007 .decrypt = ablkcipher_decrypt, 2008 .givencrypt = ablkcipher_givencrypt, 2009 .geniv = "<built-in>", 2010 .min_keysize = DES_KEY_SIZE, 2011 .max_keysize = DES_KEY_SIZE, 2012 .ivsize = DES_BLOCK_SIZE, 2013 }, 2014 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2015 }, 2016 { 2017 .name = "ctr(aes)", 2018 .driver_name = "ctr-aes-caam", 2019 .blocksize = 1, 2020 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2021 .template_ablkcipher = { 2022 .setkey = ablkcipher_setkey, 2023 .encrypt = ablkcipher_encrypt, 2024 .decrypt = ablkcipher_decrypt, 2025 .geniv = "chainiv", 2026 .min_keysize = AES_MIN_KEY_SIZE, 2027 .max_keysize = AES_MAX_KEY_SIZE, 2028 .ivsize = AES_BLOCK_SIZE, 2029 }, 2030 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 2031 }, 2032 { 2033 .name = "rfc3686(ctr(aes))", 2034 .driver_name = "rfc3686-ctr-aes-caam", 2035 .blocksize = 1, 2036 .type = CRYPTO_ALG_TYPE_GIVCIPHER, 2037 .template_ablkcipher = { 2038 .setkey = ablkcipher_setkey, 2039 .encrypt = ablkcipher_encrypt, 2040 .decrypt = ablkcipher_decrypt, 2041 .givencrypt = ablkcipher_givencrypt, 2042 .geniv = "<built-in>", 2043 .min_keysize = AES_MIN_KEY_SIZE + 2044 CTR_RFC3686_NONCE_SIZE, 2045 .max_keysize = AES_MAX_KEY_SIZE + 2046 CTR_RFC3686_NONCE_SIZE, 2047 .ivsize = CTR_RFC3686_IV_SIZE, 2048 }, 2049 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, 2050 }, 2051 { 2052 .name = "xts(aes)", 2053 .driver_name = "xts-aes-caam", 2054 .blocksize = AES_BLOCK_SIZE, 2055 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2056 .template_ablkcipher = { 2057 .setkey = xts_ablkcipher_setkey, 2058 .encrypt = ablkcipher_encrypt, 2059 .decrypt = ablkcipher_decrypt, 2060 .geniv = "eseqiv", 2061 .min_keysize = 2 * AES_MIN_KEY_SIZE, 2062 .max_keysize = 2 * AES_MAX_KEY_SIZE, 2063 .ivsize = AES_BLOCK_SIZE, 2064 }, 2065 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, 2066 }, 2067 }; 2068 2069 static struct caam_aead_alg driver_aeads[] = { 2070 { 2071 .aead = { 2072 .base = { 2073 .cra_name = "rfc4106(gcm(aes))", 2074 .cra_driver_name = "rfc4106-gcm-aes-caam", 2075 .cra_blocksize = 1, 2076 }, 2077 .setkey = rfc4106_setkey, 2078 .setauthsize = rfc4106_setauthsize, 2079 .encrypt = ipsec_gcm_encrypt, 2080 .decrypt = ipsec_gcm_decrypt, 2081 .ivsize = 8, 2082 .maxauthsize = AES_BLOCK_SIZE, 2083 }, 2084 .caam = { 2085 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 2086 }, 2087 }, 2088 { 2089 .aead = { 2090 .base = { 2091 .cra_name = "rfc4543(gcm(aes))", 2092 .cra_driver_name = "rfc4543-gcm-aes-caam", 2093 .cra_blocksize = 1, 2094 }, 2095 .setkey = rfc4543_setkey, 2096 .setauthsize = rfc4543_setauthsize, 2097 .encrypt = ipsec_gcm_encrypt, 2098 .decrypt = ipsec_gcm_decrypt, 2099 .ivsize = 8, 2100 .maxauthsize = AES_BLOCK_SIZE, 2101 }, 2102 .caam = { 2103 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 2104 }, 2105 }, 2106 /* Galois Counter Mode */ 2107 { 2108 .aead = { 2109 .base = { 2110 .cra_name = "gcm(aes)", 2111 .cra_driver_name = "gcm-aes-caam", 2112 .cra_blocksize = 1, 2113 }, 2114 .setkey = gcm_setkey, 2115 .setauthsize = gcm_setauthsize, 2116 .encrypt = gcm_encrypt, 2117 .decrypt = gcm_decrypt, 2118 .ivsize = 12, 2119 .maxauthsize = AES_BLOCK_SIZE, 2120 }, 2121 .caam = { 2122 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, 2123 }, 2124 }, 2125 /* single-pass ipsec_esp descriptor */ 2126 { 2127 .aead = { 2128 .base = { 2129 .cra_name = "authenc(hmac(md5)," 2130 "ecb(cipher_null))", 2131 .cra_driver_name = "authenc-hmac-md5-" 2132 "ecb-cipher_null-caam", 2133 .cra_blocksize = NULL_BLOCK_SIZE, 2134 }, 2135 .setkey = aead_setkey, 2136 .setauthsize = aead_setauthsize, 2137 .encrypt = aead_encrypt, 2138 .decrypt = aead_decrypt, 2139 .ivsize = NULL_IV_SIZE, 2140 .maxauthsize = MD5_DIGEST_SIZE, 2141 }, 2142 .caam = { 2143 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2144 OP_ALG_AAI_HMAC_PRECOMP, 2145 }, 2146 }, 2147 { 2148 .aead = { 2149 .base = { 2150 .cra_name = "authenc(hmac(sha1)," 2151 "ecb(cipher_null))", 2152 .cra_driver_name = "authenc-hmac-sha1-" 2153 "ecb-cipher_null-caam", 2154 .cra_blocksize = NULL_BLOCK_SIZE, 2155 }, 2156 .setkey = aead_setkey, 2157 .setauthsize = aead_setauthsize, 2158 .encrypt = aead_encrypt, 2159 .decrypt = aead_decrypt, 2160 .ivsize = NULL_IV_SIZE, 2161 .maxauthsize = SHA1_DIGEST_SIZE, 2162 }, 2163 .caam = { 2164 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2165 OP_ALG_AAI_HMAC_PRECOMP, 2166 }, 2167 }, 2168 { 2169 .aead = { 2170 .base = { 2171 .cra_name = "authenc(hmac(sha224)," 2172 "ecb(cipher_null))", 2173 .cra_driver_name = "authenc-hmac-sha224-" 2174 "ecb-cipher_null-caam", 2175 .cra_blocksize = NULL_BLOCK_SIZE, 2176 }, 2177 .setkey = aead_setkey, 2178 .setauthsize = aead_setauthsize, 2179 .encrypt = aead_encrypt, 2180 .decrypt = aead_decrypt, 2181 .ivsize = NULL_IV_SIZE, 2182 .maxauthsize = SHA224_DIGEST_SIZE, 2183 }, 2184 .caam = { 2185 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2186 OP_ALG_AAI_HMAC_PRECOMP, 2187 }, 2188 }, 2189 { 2190 .aead = { 2191 .base = { 2192 .cra_name = "authenc(hmac(sha256)," 2193 "ecb(cipher_null))", 2194 .cra_driver_name = "authenc-hmac-sha256-" 2195 "ecb-cipher_null-caam", 2196 .cra_blocksize = NULL_BLOCK_SIZE, 2197 }, 2198 .setkey = aead_setkey, 2199 .setauthsize = aead_setauthsize, 2200 .encrypt = aead_encrypt, 2201 .decrypt = aead_decrypt, 2202 .ivsize = NULL_IV_SIZE, 2203 .maxauthsize = SHA256_DIGEST_SIZE, 2204 }, 2205 .caam = { 2206 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2207 OP_ALG_AAI_HMAC_PRECOMP, 2208 }, 2209 }, 2210 { 2211 .aead = { 2212 .base = { 2213 .cra_name = "authenc(hmac(sha384)," 2214 "ecb(cipher_null))", 2215 .cra_driver_name = "authenc-hmac-sha384-" 2216 "ecb-cipher_null-caam", 2217 .cra_blocksize = NULL_BLOCK_SIZE, 2218 }, 2219 .setkey = aead_setkey, 2220 .setauthsize = aead_setauthsize, 2221 .encrypt = aead_encrypt, 2222 .decrypt = aead_decrypt, 2223 .ivsize = NULL_IV_SIZE, 2224 .maxauthsize = SHA384_DIGEST_SIZE, 2225 }, 2226 .caam = { 2227 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2228 OP_ALG_AAI_HMAC_PRECOMP, 2229 }, 2230 }, 2231 { 2232 .aead = { 2233 .base = { 2234 .cra_name = "authenc(hmac(sha512)," 2235 "ecb(cipher_null))", 2236 .cra_driver_name = "authenc-hmac-sha512-" 2237 "ecb-cipher_null-caam", 2238 .cra_blocksize = NULL_BLOCK_SIZE, 2239 }, 2240 .setkey = aead_setkey, 2241 .setauthsize = aead_setauthsize, 2242 .encrypt = aead_encrypt, 2243 .decrypt = aead_decrypt, 2244 .ivsize = NULL_IV_SIZE, 2245 .maxauthsize = SHA512_DIGEST_SIZE, 2246 }, 2247 .caam = { 2248 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2249 OP_ALG_AAI_HMAC_PRECOMP, 2250 }, 2251 }, 2252 { 2253 .aead = { 2254 .base = { 2255 .cra_name = "authenc(hmac(md5),cbc(aes))", 2256 .cra_driver_name = "authenc-hmac-md5-" 2257 "cbc-aes-caam", 2258 .cra_blocksize = AES_BLOCK_SIZE, 2259 }, 2260 .setkey = aead_setkey, 2261 .setauthsize = aead_setauthsize, 2262 .encrypt = aead_encrypt, 2263 .decrypt = aead_decrypt, 2264 .ivsize = AES_BLOCK_SIZE, 2265 .maxauthsize = MD5_DIGEST_SIZE, 2266 }, 2267 .caam = { 2268 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2269 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2270 OP_ALG_AAI_HMAC_PRECOMP, 2271 }, 2272 }, 2273 { 2274 .aead = { 2275 .base = { 2276 .cra_name = "echainiv(authenc(hmac(md5)," 2277 "cbc(aes)))", 2278 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2279 "cbc-aes-caam", 2280 .cra_blocksize = AES_BLOCK_SIZE, 2281 }, 2282 .setkey = aead_setkey, 2283 .setauthsize = aead_setauthsize, 2284 .encrypt = aead_encrypt, 2285 .decrypt = aead_decrypt, 2286 .ivsize = AES_BLOCK_SIZE, 2287 .maxauthsize = MD5_DIGEST_SIZE, 2288 }, 2289 .caam = { 2290 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2291 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2292 OP_ALG_AAI_HMAC_PRECOMP, 2293 .geniv = true, 2294 }, 2295 }, 2296 { 2297 .aead = { 2298 .base = { 2299 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2300 .cra_driver_name = "authenc-hmac-sha1-" 2301 "cbc-aes-caam", 2302 .cra_blocksize = AES_BLOCK_SIZE, 2303 }, 2304 .setkey = aead_setkey, 2305 .setauthsize = aead_setauthsize, 2306 .encrypt = aead_encrypt, 2307 .decrypt = aead_decrypt, 2308 .ivsize = AES_BLOCK_SIZE, 2309 .maxauthsize = SHA1_DIGEST_SIZE, 2310 }, 2311 .caam = { 2312 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2313 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2314 OP_ALG_AAI_HMAC_PRECOMP, 2315 }, 2316 }, 2317 { 2318 .aead = { 2319 .base = { 2320 .cra_name = "echainiv(authenc(hmac(sha1)," 2321 "cbc(aes)))", 2322 .cra_driver_name = "echainiv-authenc-" 2323 "hmac-sha1-cbc-aes-caam", 2324 .cra_blocksize = AES_BLOCK_SIZE, 2325 }, 2326 .setkey = aead_setkey, 2327 .setauthsize = aead_setauthsize, 2328 .encrypt = aead_encrypt, 2329 .decrypt = aead_decrypt, 2330 .ivsize = AES_BLOCK_SIZE, 2331 .maxauthsize = SHA1_DIGEST_SIZE, 2332 }, 2333 .caam = { 2334 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2335 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2336 OP_ALG_AAI_HMAC_PRECOMP, 2337 .geniv = true, 2338 }, 2339 }, 2340 { 2341 .aead = { 2342 .base = { 2343 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2344 .cra_driver_name = "authenc-hmac-sha224-" 2345 "cbc-aes-caam", 2346 .cra_blocksize = AES_BLOCK_SIZE, 2347 }, 2348 .setkey = aead_setkey, 2349 .setauthsize = aead_setauthsize, 2350 .encrypt = aead_encrypt, 2351 .decrypt = aead_decrypt, 2352 .ivsize = AES_BLOCK_SIZE, 2353 .maxauthsize = SHA224_DIGEST_SIZE, 2354 }, 2355 .caam = { 2356 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2357 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2358 OP_ALG_AAI_HMAC_PRECOMP, 2359 }, 2360 }, 2361 { 2362 .aead = { 2363 .base = { 2364 .cra_name = "echainiv(authenc(hmac(sha224)," 2365 "cbc(aes)))", 2366 .cra_driver_name = "echainiv-authenc-" 2367 "hmac-sha224-cbc-aes-caam", 2368 .cra_blocksize = AES_BLOCK_SIZE, 2369 }, 2370 .setkey = aead_setkey, 2371 .setauthsize = aead_setauthsize, 2372 .encrypt = aead_encrypt, 2373 .decrypt = aead_decrypt, 2374 .ivsize = AES_BLOCK_SIZE, 2375 .maxauthsize = SHA224_DIGEST_SIZE, 2376 }, 2377 .caam = { 2378 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2379 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2380 OP_ALG_AAI_HMAC_PRECOMP, 2381 .geniv = true, 2382 }, 2383 }, 2384 { 2385 .aead = { 2386 .base = { 2387 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2388 .cra_driver_name = "authenc-hmac-sha256-" 2389 "cbc-aes-caam", 2390 .cra_blocksize = AES_BLOCK_SIZE, 2391 }, 2392 .setkey = aead_setkey, 2393 .setauthsize = aead_setauthsize, 2394 .encrypt = aead_encrypt, 2395 .decrypt = aead_decrypt, 2396 .ivsize = AES_BLOCK_SIZE, 2397 .maxauthsize = SHA256_DIGEST_SIZE, 2398 }, 2399 .caam = { 2400 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2401 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2402 OP_ALG_AAI_HMAC_PRECOMP, 2403 }, 2404 }, 2405 { 2406 .aead = { 2407 .base = { 2408 .cra_name = "echainiv(authenc(hmac(sha256)," 2409 "cbc(aes)))", 2410 .cra_driver_name = "echainiv-authenc-" 2411 "hmac-sha256-cbc-aes-caam", 2412 .cra_blocksize = AES_BLOCK_SIZE, 2413 }, 2414 .setkey = aead_setkey, 2415 .setauthsize = aead_setauthsize, 2416 .encrypt = aead_encrypt, 2417 .decrypt = aead_decrypt, 2418 .ivsize = AES_BLOCK_SIZE, 2419 .maxauthsize = SHA256_DIGEST_SIZE, 2420 }, 2421 .caam = { 2422 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2423 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2424 OP_ALG_AAI_HMAC_PRECOMP, 2425 .geniv = true, 2426 }, 2427 }, 2428 { 2429 .aead = { 2430 .base = { 2431 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2432 .cra_driver_name = "authenc-hmac-sha384-" 2433 "cbc-aes-caam", 2434 .cra_blocksize = AES_BLOCK_SIZE, 2435 }, 2436 .setkey = aead_setkey, 2437 .setauthsize = aead_setauthsize, 2438 .encrypt = aead_encrypt, 2439 .decrypt = aead_decrypt, 2440 .ivsize = AES_BLOCK_SIZE, 2441 .maxauthsize = SHA384_DIGEST_SIZE, 2442 }, 2443 .caam = { 2444 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2445 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2446 OP_ALG_AAI_HMAC_PRECOMP, 2447 }, 2448 }, 2449 { 2450 .aead = { 2451 .base = { 2452 .cra_name = "echainiv(authenc(hmac(sha384)," 2453 "cbc(aes)))", 2454 .cra_driver_name = "echainiv-authenc-" 2455 "hmac-sha384-cbc-aes-caam", 2456 .cra_blocksize = AES_BLOCK_SIZE, 2457 }, 2458 .setkey = aead_setkey, 2459 .setauthsize = aead_setauthsize, 2460 .encrypt = aead_encrypt, 2461 .decrypt = aead_decrypt, 2462 .ivsize = AES_BLOCK_SIZE, 2463 .maxauthsize = SHA384_DIGEST_SIZE, 2464 }, 2465 .caam = { 2466 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2467 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2468 OP_ALG_AAI_HMAC_PRECOMP, 2469 .geniv = true, 2470 }, 2471 }, 2472 { 2473 .aead = { 2474 .base = { 2475 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2476 .cra_driver_name = "authenc-hmac-sha512-" 2477 "cbc-aes-caam", 2478 .cra_blocksize = AES_BLOCK_SIZE, 2479 }, 2480 .setkey = aead_setkey, 2481 .setauthsize = aead_setauthsize, 2482 .encrypt = aead_encrypt, 2483 .decrypt = aead_decrypt, 2484 .ivsize = AES_BLOCK_SIZE, 2485 .maxauthsize = SHA512_DIGEST_SIZE, 2486 }, 2487 .caam = { 2488 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2489 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2490 OP_ALG_AAI_HMAC_PRECOMP, 2491 }, 2492 }, 2493 { 2494 .aead = { 2495 .base = { 2496 .cra_name = "echainiv(authenc(hmac(sha512)," 2497 "cbc(aes)))", 2498 .cra_driver_name = "echainiv-authenc-" 2499 "hmac-sha512-cbc-aes-caam", 2500 .cra_blocksize = AES_BLOCK_SIZE, 2501 }, 2502 .setkey = aead_setkey, 2503 .setauthsize = aead_setauthsize, 2504 .encrypt = aead_encrypt, 2505 .decrypt = aead_decrypt, 2506 .ivsize = AES_BLOCK_SIZE, 2507 .maxauthsize = SHA512_DIGEST_SIZE, 2508 }, 2509 .caam = { 2510 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, 2511 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2512 OP_ALG_AAI_HMAC_PRECOMP, 2513 .geniv = true, 2514 }, 2515 }, 2516 { 2517 .aead = { 2518 .base = { 2519 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2520 .cra_driver_name = "authenc-hmac-md5-" 2521 "cbc-des3_ede-caam", 2522 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2523 }, 2524 .setkey = aead_setkey, 2525 .setauthsize = aead_setauthsize, 2526 .encrypt = aead_encrypt, 2527 .decrypt = aead_decrypt, 2528 .ivsize = DES3_EDE_BLOCK_SIZE, 2529 .maxauthsize = MD5_DIGEST_SIZE, 2530 }, 2531 .caam = { 2532 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2533 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2534 OP_ALG_AAI_HMAC_PRECOMP, 2535 } 2536 }, 2537 { 2538 .aead = { 2539 .base = { 2540 .cra_name = "echainiv(authenc(hmac(md5)," 2541 "cbc(des3_ede)))", 2542 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2543 "cbc-des3_ede-caam", 2544 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2545 }, 2546 .setkey = aead_setkey, 2547 .setauthsize = aead_setauthsize, 2548 .encrypt = aead_encrypt, 2549 .decrypt = aead_decrypt, 2550 .ivsize = DES3_EDE_BLOCK_SIZE, 2551 .maxauthsize = MD5_DIGEST_SIZE, 2552 }, 2553 .caam = { 2554 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2555 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2556 OP_ALG_AAI_HMAC_PRECOMP, 2557 .geniv = true, 2558 } 2559 }, 2560 { 2561 .aead = { 2562 .base = { 2563 .cra_name = "authenc(hmac(sha1)," 2564 "cbc(des3_ede))", 2565 .cra_driver_name = "authenc-hmac-sha1-" 2566 "cbc-des3_ede-caam", 2567 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2568 }, 2569 .setkey = aead_setkey, 2570 .setauthsize = aead_setauthsize, 2571 .encrypt = aead_encrypt, 2572 .decrypt = aead_decrypt, 2573 .ivsize = DES3_EDE_BLOCK_SIZE, 2574 .maxauthsize = SHA1_DIGEST_SIZE, 2575 }, 2576 .caam = { 2577 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2578 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2579 OP_ALG_AAI_HMAC_PRECOMP, 2580 }, 2581 }, 2582 { 2583 .aead = { 2584 .base = { 2585 .cra_name = "echainiv(authenc(hmac(sha1)," 2586 "cbc(des3_ede)))", 2587 .cra_driver_name = "echainiv-authenc-" 2588 "hmac-sha1-" 2589 "cbc-des3_ede-caam", 2590 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2591 }, 2592 .setkey = aead_setkey, 2593 .setauthsize = aead_setauthsize, 2594 .encrypt = aead_encrypt, 2595 .decrypt = aead_decrypt, 2596 .ivsize = DES3_EDE_BLOCK_SIZE, 2597 .maxauthsize = SHA1_DIGEST_SIZE, 2598 }, 2599 .caam = { 2600 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2601 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2602 OP_ALG_AAI_HMAC_PRECOMP, 2603 .geniv = true, 2604 }, 2605 }, 2606 { 2607 .aead = { 2608 .base = { 2609 .cra_name = "authenc(hmac(sha224)," 2610 "cbc(des3_ede))", 2611 .cra_driver_name = "authenc-hmac-sha224-" 2612 "cbc-des3_ede-caam", 2613 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2614 }, 2615 .setkey = aead_setkey, 2616 .setauthsize = aead_setauthsize, 2617 .encrypt = aead_encrypt, 2618 .decrypt = aead_decrypt, 2619 .ivsize = DES3_EDE_BLOCK_SIZE, 2620 .maxauthsize = SHA224_DIGEST_SIZE, 2621 }, 2622 .caam = { 2623 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2624 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2625 OP_ALG_AAI_HMAC_PRECOMP, 2626 }, 2627 }, 2628 { 2629 .aead = { 2630 .base = { 2631 .cra_name = "echainiv(authenc(hmac(sha224)," 2632 "cbc(des3_ede)))", 2633 .cra_driver_name = "echainiv-authenc-" 2634 "hmac-sha224-" 2635 "cbc-des3_ede-caam", 2636 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2637 }, 2638 .setkey = aead_setkey, 2639 .setauthsize = aead_setauthsize, 2640 .encrypt = aead_encrypt, 2641 .decrypt = aead_decrypt, 2642 .ivsize = DES3_EDE_BLOCK_SIZE, 2643 .maxauthsize = SHA224_DIGEST_SIZE, 2644 }, 2645 .caam = { 2646 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2647 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2648 OP_ALG_AAI_HMAC_PRECOMP, 2649 .geniv = true, 2650 }, 2651 }, 2652 { 2653 .aead = { 2654 .base = { 2655 .cra_name = "authenc(hmac(sha256)," 2656 "cbc(des3_ede))", 2657 .cra_driver_name = "authenc-hmac-sha256-" 2658 "cbc-des3_ede-caam", 2659 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2660 }, 2661 .setkey = aead_setkey, 2662 .setauthsize = aead_setauthsize, 2663 .encrypt = aead_encrypt, 2664 .decrypt = aead_decrypt, 2665 .ivsize = DES3_EDE_BLOCK_SIZE, 2666 .maxauthsize = SHA256_DIGEST_SIZE, 2667 }, 2668 .caam = { 2669 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2670 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2671 OP_ALG_AAI_HMAC_PRECOMP, 2672 }, 2673 }, 2674 { 2675 .aead = { 2676 .base = { 2677 .cra_name = "echainiv(authenc(hmac(sha256)," 2678 "cbc(des3_ede)))", 2679 .cra_driver_name = "echainiv-authenc-" 2680 "hmac-sha256-" 2681 "cbc-des3_ede-caam", 2682 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2683 }, 2684 .setkey = aead_setkey, 2685 .setauthsize = aead_setauthsize, 2686 .encrypt = aead_encrypt, 2687 .decrypt = aead_decrypt, 2688 .ivsize = DES3_EDE_BLOCK_SIZE, 2689 .maxauthsize = SHA256_DIGEST_SIZE, 2690 }, 2691 .caam = { 2692 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2693 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2694 OP_ALG_AAI_HMAC_PRECOMP, 2695 .geniv = true, 2696 }, 2697 }, 2698 { 2699 .aead = { 2700 .base = { 2701 .cra_name = "authenc(hmac(sha384)," 2702 "cbc(des3_ede))", 2703 .cra_driver_name = "authenc-hmac-sha384-" 2704 "cbc-des3_ede-caam", 2705 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2706 }, 2707 .setkey = aead_setkey, 2708 .setauthsize = aead_setauthsize, 2709 .encrypt = aead_encrypt, 2710 .decrypt = aead_decrypt, 2711 .ivsize = DES3_EDE_BLOCK_SIZE, 2712 .maxauthsize = SHA384_DIGEST_SIZE, 2713 }, 2714 .caam = { 2715 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2716 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2717 OP_ALG_AAI_HMAC_PRECOMP, 2718 }, 2719 }, 2720 { 2721 .aead = { 2722 .base = { 2723 .cra_name = "echainiv(authenc(hmac(sha384)," 2724 "cbc(des3_ede)))", 2725 .cra_driver_name = "echainiv-authenc-" 2726 "hmac-sha384-" 2727 "cbc-des3_ede-caam", 2728 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2729 }, 2730 .setkey = aead_setkey, 2731 .setauthsize = aead_setauthsize, 2732 .encrypt = aead_encrypt, 2733 .decrypt = aead_decrypt, 2734 .ivsize = DES3_EDE_BLOCK_SIZE, 2735 .maxauthsize = SHA384_DIGEST_SIZE, 2736 }, 2737 .caam = { 2738 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2739 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2740 OP_ALG_AAI_HMAC_PRECOMP, 2741 .geniv = true, 2742 }, 2743 }, 2744 { 2745 .aead = { 2746 .base = { 2747 .cra_name = "authenc(hmac(sha512)," 2748 "cbc(des3_ede))", 2749 .cra_driver_name = "authenc-hmac-sha512-" 2750 "cbc-des3_ede-caam", 2751 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2752 }, 2753 .setkey = aead_setkey, 2754 .setauthsize = aead_setauthsize, 2755 .encrypt = aead_encrypt, 2756 .decrypt = aead_decrypt, 2757 .ivsize = DES3_EDE_BLOCK_SIZE, 2758 .maxauthsize = SHA512_DIGEST_SIZE, 2759 }, 2760 .caam = { 2761 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2762 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2763 OP_ALG_AAI_HMAC_PRECOMP, 2764 }, 2765 }, 2766 { 2767 .aead = { 2768 .base = { 2769 .cra_name = "echainiv(authenc(hmac(sha512)," 2770 "cbc(des3_ede)))", 2771 .cra_driver_name = "echainiv-authenc-" 2772 "hmac-sha512-" 2773 "cbc-des3_ede-caam", 2774 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2775 }, 2776 .setkey = aead_setkey, 2777 .setauthsize = aead_setauthsize, 2778 .encrypt = aead_encrypt, 2779 .decrypt = aead_decrypt, 2780 .ivsize = DES3_EDE_BLOCK_SIZE, 2781 .maxauthsize = SHA512_DIGEST_SIZE, 2782 }, 2783 .caam = { 2784 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, 2785 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 2786 OP_ALG_AAI_HMAC_PRECOMP, 2787 .geniv = true, 2788 }, 2789 }, 2790 { 2791 .aead = { 2792 .base = { 2793 .cra_name = "authenc(hmac(md5),cbc(des))", 2794 .cra_driver_name = "authenc-hmac-md5-" 2795 "cbc-des-caam", 2796 .cra_blocksize = DES_BLOCK_SIZE, 2797 }, 2798 .setkey = aead_setkey, 2799 .setauthsize = aead_setauthsize, 2800 .encrypt = aead_encrypt, 2801 .decrypt = aead_decrypt, 2802 .ivsize = DES_BLOCK_SIZE, 2803 .maxauthsize = MD5_DIGEST_SIZE, 2804 }, 2805 .caam = { 2806 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2807 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2808 OP_ALG_AAI_HMAC_PRECOMP, 2809 }, 2810 }, 2811 { 2812 .aead = { 2813 .base = { 2814 .cra_name = "echainiv(authenc(hmac(md5)," 2815 "cbc(des)))", 2816 .cra_driver_name = "echainiv-authenc-hmac-md5-" 2817 "cbc-des-caam", 2818 .cra_blocksize = DES_BLOCK_SIZE, 2819 }, 2820 .setkey = aead_setkey, 2821 .setauthsize = aead_setauthsize, 2822 .encrypt = aead_encrypt, 2823 .decrypt = aead_decrypt, 2824 .ivsize = DES_BLOCK_SIZE, 2825 .maxauthsize = MD5_DIGEST_SIZE, 2826 }, 2827 .caam = { 2828 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2829 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 2830 OP_ALG_AAI_HMAC_PRECOMP, 2831 .geniv = true, 2832 }, 2833 }, 2834 { 2835 .aead = { 2836 .base = { 2837 .cra_name = "authenc(hmac(sha1),cbc(des))", 2838 .cra_driver_name = "authenc-hmac-sha1-" 2839 "cbc-des-caam", 2840 .cra_blocksize = DES_BLOCK_SIZE, 2841 }, 2842 .setkey = aead_setkey, 2843 .setauthsize = aead_setauthsize, 2844 .encrypt = aead_encrypt, 2845 .decrypt = aead_decrypt, 2846 .ivsize = DES_BLOCK_SIZE, 2847 .maxauthsize = SHA1_DIGEST_SIZE, 2848 }, 2849 .caam = { 2850 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2851 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2852 OP_ALG_AAI_HMAC_PRECOMP, 2853 }, 2854 }, 2855 { 2856 .aead = { 2857 .base = { 2858 .cra_name = "echainiv(authenc(hmac(sha1)," 2859 "cbc(des)))", 2860 .cra_driver_name = "echainiv-authenc-" 2861 "hmac-sha1-cbc-des-caam", 2862 .cra_blocksize = DES_BLOCK_SIZE, 2863 }, 2864 .setkey = aead_setkey, 2865 .setauthsize = aead_setauthsize, 2866 .encrypt = aead_encrypt, 2867 .decrypt = aead_decrypt, 2868 .ivsize = DES_BLOCK_SIZE, 2869 .maxauthsize = SHA1_DIGEST_SIZE, 2870 }, 2871 .caam = { 2872 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2873 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 2874 OP_ALG_AAI_HMAC_PRECOMP, 2875 .geniv = true, 2876 }, 2877 }, 2878 { 2879 .aead = { 2880 .base = { 2881 .cra_name = "authenc(hmac(sha224),cbc(des))", 2882 .cra_driver_name = "authenc-hmac-sha224-" 2883 "cbc-des-caam", 2884 .cra_blocksize = DES_BLOCK_SIZE, 2885 }, 2886 .setkey = aead_setkey, 2887 .setauthsize = aead_setauthsize, 2888 .encrypt = aead_encrypt, 2889 .decrypt = aead_decrypt, 2890 .ivsize = DES_BLOCK_SIZE, 2891 .maxauthsize = SHA224_DIGEST_SIZE, 2892 }, 2893 .caam = { 2894 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2895 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2896 OP_ALG_AAI_HMAC_PRECOMP, 2897 }, 2898 }, 2899 { 2900 .aead = { 2901 .base = { 2902 .cra_name = "echainiv(authenc(hmac(sha224)," 2903 "cbc(des)))", 2904 .cra_driver_name = "echainiv-authenc-" 2905 "hmac-sha224-cbc-des-caam", 2906 .cra_blocksize = DES_BLOCK_SIZE, 2907 }, 2908 .setkey = aead_setkey, 2909 .setauthsize = aead_setauthsize, 2910 .encrypt = aead_encrypt, 2911 .decrypt = aead_decrypt, 2912 .ivsize = DES_BLOCK_SIZE, 2913 .maxauthsize = SHA224_DIGEST_SIZE, 2914 }, 2915 .caam = { 2916 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2917 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 2918 OP_ALG_AAI_HMAC_PRECOMP, 2919 .geniv = true, 2920 }, 2921 }, 2922 { 2923 .aead = { 2924 .base = { 2925 .cra_name = "authenc(hmac(sha256),cbc(des))", 2926 .cra_driver_name = "authenc-hmac-sha256-" 2927 "cbc-des-caam", 2928 .cra_blocksize = DES_BLOCK_SIZE, 2929 }, 2930 .setkey = aead_setkey, 2931 .setauthsize = aead_setauthsize, 2932 .encrypt = aead_encrypt, 2933 .decrypt = aead_decrypt, 2934 .ivsize = DES_BLOCK_SIZE, 2935 .maxauthsize = SHA256_DIGEST_SIZE, 2936 }, 2937 .caam = { 2938 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2939 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2940 OP_ALG_AAI_HMAC_PRECOMP, 2941 }, 2942 }, 2943 { 2944 .aead = { 2945 .base = { 2946 .cra_name = "echainiv(authenc(hmac(sha256)," 2947 "cbc(des)))", 2948 .cra_driver_name = "echainiv-authenc-" 2949 "hmac-sha256-cbc-des-caam", 2950 .cra_blocksize = DES_BLOCK_SIZE, 2951 }, 2952 .setkey = aead_setkey, 2953 .setauthsize = aead_setauthsize, 2954 .encrypt = aead_encrypt, 2955 .decrypt = aead_decrypt, 2956 .ivsize = DES_BLOCK_SIZE, 2957 .maxauthsize = SHA256_DIGEST_SIZE, 2958 }, 2959 .caam = { 2960 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2961 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 2962 OP_ALG_AAI_HMAC_PRECOMP, 2963 .geniv = true, 2964 }, 2965 }, 2966 { 2967 .aead = { 2968 .base = { 2969 .cra_name = "authenc(hmac(sha384),cbc(des))", 2970 .cra_driver_name = "authenc-hmac-sha384-" 2971 "cbc-des-caam", 2972 .cra_blocksize = DES_BLOCK_SIZE, 2973 }, 2974 .setkey = aead_setkey, 2975 .setauthsize = aead_setauthsize, 2976 .encrypt = aead_encrypt, 2977 .decrypt = aead_decrypt, 2978 .ivsize = DES_BLOCK_SIZE, 2979 .maxauthsize = SHA384_DIGEST_SIZE, 2980 }, 2981 .caam = { 2982 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 2983 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 2984 OP_ALG_AAI_HMAC_PRECOMP, 2985 }, 2986 }, 2987 { 2988 .aead = { 2989 .base = { 2990 .cra_name = "echainiv(authenc(hmac(sha384)," 2991 "cbc(des)))", 2992 .cra_driver_name = "echainiv-authenc-" 2993 "hmac-sha384-cbc-des-caam", 2994 .cra_blocksize = DES_BLOCK_SIZE, 2995 }, 2996 .setkey = aead_setkey, 2997 .setauthsize = aead_setauthsize, 2998 .encrypt = aead_encrypt, 2999 .decrypt = aead_decrypt, 3000 .ivsize = DES_BLOCK_SIZE, 3001 .maxauthsize = SHA384_DIGEST_SIZE, 3002 }, 3003 .caam = { 3004 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 3005 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 3006 OP_ALG_AAI_HMAC_PRECOMP, 3007 .geniv = true, 3008 }, 3009 }, 3010 { 3011 .aead = { 3012 .base = { 3013 .cra_name = "authenc(hmac(sha512),cbc(des))", 3014 .cra_driver_name = "authenc-hmac-sha512-" 3015 "cbc-des-caam", 3016 .cra_blocksize = DES_BLOCK_SIZE, 3017 }, 3018 .setkey = aead_setkey, 3019 .setauthsize = aead_setauthsize, 3020 .encrypt = aead_encrypt, 3021 .decrypt = aead_decrypt, 3022 .ivsize = DES_BLOCK_SIZE, 3023 .maxauthsize = SHA512_DIGEST_SIZE, 3024 }, 3025 .caam = { 3026 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 3027 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3028 OP_ALG_AAI_HMAC_PRECOMP, 3029 }, 3030 }, 3031 { 3032 .aead = { 3033 .base = { 3034 .cra_name = "echainiv(authenc(hmac(sha512)," 3035 "cbc(des)))", 3036 .cra_driver_name = "echainiv-authenc-" 3037 "hmac-sha512-cbc-des-caam", 3038 .cra_blocksize = DES_BLOCK_SIZE, 3039 }, 3040 .setkey = aead_setkey, 3041 .setauthsize = aead_setauthsize, 3042 .encrypt = aead_encrypt, 3043 .decrypt = aead_decrypt, 3044 .ivsize = DES_BLOCK_SIZE, 3045 .maxauthsize = SHA512_DIGEST_SIZE, 3046 }, 3047 .caam = { 3048 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, 3049 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3050 OP_ALG_AAI_HMAC_PRECOMP, 3051 .geniv = true, 3052 }, 3053 }, 3054 { 3055 .aead = { 3056 .base = { 3057 .cra_name = "authenc(hmac(md5)," 3058 "rfc3686(ctr(aes)))", 3059 .cra_driver_name = "authenc-hmac-md5-" 3060 "rfc3686-ctr-aes-caam", 3061 .cra_blocksize = 1, 3062 }, 3063 .setkey = aead_setkey, 3064 .setauthsize = aead_setauthsize, 3065 .encrypt = aead_encrypt, 3066 .decrypt = aead_decrypt, 3067 .ivsize = CTR_RFC3686_IV_SIZE, 3068 .maxauthsize = MD5_DIGEST_SIZE, 3069 }, 3070 .caam = { 3071 .class1_alg_type = OP_ALG_ALGSEL_AES | 3072 OP_ALG_AAI_CTR_MOD128, 3073 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 3074 OP_ALG_AAI_HMAC_PRECOMP, 3075 .rfc3686 = true, 3076 }, 3077 }, 3078 { 3079 .aead = { 3080 .base = { 3081 .cra_name = "seqiv(authenc(" 3082 "hmac(md5),rfc3686(ctr(aes))))", 3083 .cra_driver_name = "seqiv-authenc-hmac-md5-" 3084 "rfc3686-ctr-aes-caam", 3085 .cra_blocksize = 1, 3086 }, 3087 .setkey = aead_setkey, 3088 .setauthsize = aead_setauthsize, 3089 .encrypt = aead_encrypt, 3090 .decrypt = aead_decrypt, 3091 .ivsize = CTR_RFC3686_IV_SIZE, 3092 .maxauthsize = MD5_DIGEST_SIZE, 3093 }, 3094 .caam = { 3095 .class1_alg_type = OP_ALG_ALGSEL_AES | 3096 OP_ALG_AAI_CTR_MOD128, 3097 .class2_alg_type = OP_ALG_ALGSEL_MD5 | 3098 OP_ALG_AAI_HMAC_PRECOMP, 3099 .rfc3686 = true, 3100 .geniv = true, 3101 }, 3102 }, 3103 { 3104 .aead = { 3105 .base = { 3106 .cra_name = "authenc(hmac(sha1)," 3107 "rfc3686(ctr(aes)))", 3108 .cra_driver_name = "authenc-hmac-sha1-" 3109 "rfc3686-ctr-aes-caam", 3110 .cra_blocksize = 1, 3111 }, 3112 .setkey = aead_setkey, 3113 .setauthsize = aead_setauthsize, 3114 .encrypt = aead_encrypt, 3115 .decrypt = aead_decrypt, 3116 .ivsize = CTR_RFC3686_IV_SIZE, 3117 .maxauthsize = SHA1_DIGEST_SIZE, 3118 }, 3119 .caam = { 3120 .class1_alg_type = OP_ALG_ALGSEL_AES | 3121 OP_ALG_AAI_CTR_MOD128, 3122 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 3123 OP_ALG_AAI_HMAC_PRECOMP, 3124 .rfc3686 = true, 3125 }, 3126 }, 3127 { 3128 .aead = { 3129 .base = { 3130 .cra_name = "seqiv(authenc(" 3131 "hmac(sha1),rfc3686(ctr(aes))))", 3132 .cra_driver_name = "seqiv-authenc-hmac-sha1-" 3133 "rfc3686-ctr-aes-caam", 3134 .cra_blocksize = 1, 3135 }, 3136 .setkey = aead_setkey, 3137 .setauthsize = aead_setauthsize, 3138 .encrypt = aead_encrypt, 3139 .decrypt = aead_decrypt, 3140 .ivsize = CTR_RFC3686_IV_SIZE, 3141 .maxauthsize = SHA1_DIGEST_SIZE, 3142 }, 3143 .caam = { 3144 .class1_alg_type = OP_ALG_ALGSEL_AES | 3145 OP_ALG_AAI_CTR_MOD128, 3146 .class2_alg_type = OP_ALG_ALGSEL_SHA1 | 3147 OP_ALG_AAI_HMAC_PRECOMP, 3148 .rfc3686 = true, 3149 .geniv = true, 3150 }, 3151 }, 3152 { 3153 .aead = { 3154 .base = { 3155 .cra_name = "authenc(hmac(sha224)," 3156 "rfc3686(ctr(aes)))", 3157 .cra_driver_name = "authenc-hmac-sha224-" 3158 "rfc3686-ctr-aes-caam", 3159 .cra_blocksize = 1, 3160 }, 3161 .setkey = aead_setkey, 3162 .setauthsize = aead_setauthsize, 3163 .encrypt = aead_encrypt, 3164 .decrypt = aead_decrypt, 3165 .ivsize = CTR_RFC3686_IV_SIZE, 3166 .maxauthsize = SHA224_DIGEST_SIZE, 3167 }, 3168 .caam = { 3169 .class1_alg_type = OP_ALG_ALGSEL_AES | 3170 OP_ALG_AAI_CTR_MOD128, 3171 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 3172 OP_ALG_AAI_HMAC_PRECOMP, 3173 .rfc3686 = true, 3174 }, 3175 }, 3176 { 3177 .aead = { 3178 .base = { 3179 .cra_name = "seqiv(authenc(" 3180 "hmac(sha224),rfc3686(ctr(aes))))", 3181 .cra_driver_name = "seqiv-authenc-hmac-sha224-" 3182 "rfc3686-ctr-aes-caam", 3183 .cra_blocksize = 1, 3184 }, 3185 .setkey = aead_setkey, 3186 .setauthsize = aead_setauthsize, 3187 .encrypt = aead_encrypt, 3188 .decrypt = aead_decrypt, 3189 .ivsize = CTR_RFC3686_IV_SIZE, 3190 .maxauthsize = SHA224_DIGEST_SIZE, 3191 }, 3192 .caam = { 3193 .class1_alg_type = OP_ALG_ALGSEL_AES | 3194 OP_ALG_AAI_CTR_MOD128, 3195 .class2_alg_type = OP_ALG_ALGSEL_SHA224 | 3196 OP_ALG_AAI_HMAC_PRECOMP, 3197 .rfc3686 = true, 3198 .geniv = true, 3199 }, 3200 }, 3201 { 3202 .aead = { 3203 .base = { 3204 .cra_name = "authenc(hmac(sha256)," 3205 "rfc3686(ctr(aes)))", 3206 .cra_driver_name = "authenc-hmac-sha256-" 3207 "rfc3686-ctr-aes-caam", 3208 .cra_blocksize = 1, 3209 }, 3210 .setkey = aead_setkey, 3211 .setauthsize = aead_setauthsize, 3212 .encrypt = aead_encrypt, 3213 .decrypt = aead_decrypt, 3214 .ivsize = CTR_RFC3686_IV_SIZE, 3215 .maxauthsize = SHA256_DIGEST_SIZE, 3216 }, 3217 .caam = { 3218 .class1_alg_type = OP_ALG_ALGSEL_AES | 3219 OP_ALG_AAI_CTR_MOD128, 3220 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 3221 OP_ALG_AAI_HMAC_PRECOMP, 3222 .rfc3686 = true, 3223 }, 3224 }, 3225 { 3226 .aead = { 3227 .base = { 3228 .cra_name = "seqiv(authenc(hmac(sha256)," 3229 "rfc3686(ctr(aes))))", 3230 .cra_driver_name = "seqiv-authenc-hmac-sha256-" 3231 "rfc3686-ctr-aes-caam", 3232 .cra_blocksize = 1, 3233 }, 3234 .setkey = aead_setkey, 3235 .setauthsize = aead_setauthsize, 3236 .encrypt = aead_encrypt, 3237 .decrypt = aead_decrypt, 3238 .ivsize = CTR_RFC3686_IV_SIZE, 3239 .maxauthsize = SHA256_DIGEST_SIZE, 3240 }, 3241 .caam = { 3242 .class1_alg_type = OP_ALG_ALGSEL_AES | 3243 OP_ALG_AAI_CTR_MOD128, 3244 .class2_alg_type = OP_ALG_ALGSEL_SHA256 | 3245 OP_ALG_AAI_HMAC_PRECOMP, 3246 .rfc3686 = true, 3247 .geniv = true, 3248 }, 3249 }, 3250 { 3251 .aead = { 3252 .base = { 3253 .cra_name = "authenc(hmac(sha384)," 3254 "rfc3686(ctr(aes)))", 3255 .cra_driver_name = "authenc-hmac-sha384-" 3256 "rfc3686-ctr-aes-caam", 3257 .cra_blocksize = 1, 3258 }, 3259 .setkey = aead_setkey, 3260 .setauthsize = aead_setauthsize, 3261 .encrypt = aead_encrypt, 3262 .decrypt = aead_decrypt, 3263 .ivsize = CTR_RFC3686_IV_SIZE, 3264 .maxauthsize = SHA384_DIGEST_SIZE, 3265 }, 3266 .caam = { 3267 .class1_alg_type = OP_ALG_ALGSEL_AES | 3268 OP_ALG_AAI_CTR_MOD128, 3269 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 3270 OP_ALG_AAI_HMAC_PRECOMP, 3271 .rfc3686 = true, 3272 }, 3273 }, 3274 { 3275 .aead = { 3276 .base = { 3277 .cra_name = "seqiv(authenc(hmac(sha384)," 3278 "rfc3686(ctr(aes))))", 3279 .cra_driver_name = "seqiv-authenc-hmac-sha384-" 3280 "rfc3686-ctr-aes-caam", 3281 .cra_blocksize = 1, 3282 }, 3283 .setkey = aead_setkey, 3284 .setauthsize = aead_setauthsize, 3285 .encrypt = aead_encrypt, 3286 .decrypt = aead_decrypt, 3287 .ivsize = CTR_RFC3686_IV_SIZE, 3288 .maxauthsize = SHA384_DIGEST_SIZE, 3289 }, 3290 .caam = { 3291 .class1_alg_type = OP_ALG_ALGSEL_AES | 3292 OP_ALG_AAI_CTR_MOD128, 3293 .class2_alg_type = OP_ALG_ALGSEL_SHA384 | 3294 OP_ALG_AAI_HMAC_PRECOMP, 3295 .rfc3686 = true, 3296 .geniv = true, 3297 }, 3298 }, 3299 { 3300 .aead = { 3301 .base = { 3302 .cra_name = "authenc(hmac(sha512)," 3303 "rfc3686(ctr(aes)))", 3304 .cra_driver_name = "authenc-hmac-sha512-" 3305 "rfc3686-ctr-aes-caam", 3306 .cra_blocksize = 1, 3307 }, 3308 .setkey = aead_setkey, 3309 .setauthsize = aead_setauthsize, 3310 .encrypt = aead_encrypt, 3311 .decrypt = aead_decrypt, 3312 .ivsize = CTR_RFC3686_IV_SIZE, 3313 .maxauthsize = SHA512_DIGEST_SIZE, 3314 }, 3315 .caam = { 3316 .class1_alg_type = OP_ALG_ALGSEL_AES | 3317 OP_ALG_AAI_CTR_MOD128, 3318 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3319 OP_ALG_AAI_HMAC_PRECOMP, 3320 .rfc3686 = true, 3321 }, 3322 }, 3323 { 3324 .aead = { 3325 .base = { 3326 .cra_name = "seqiv(authenc(hmac(sha512)," 3327 "rfc3686(ctr(aes))))", 3328 .cra_driver_name = "seqiv-authenc-hmac-sha512-" 3329 "rfc3686-ctr-aes-caam", 3330 .cra_blocksize = 1, 3331 }, 3332 .setkey = aead_setkey, 3333 .setauthsize = aead_setauthsize, 3334 .encrypt = aead_encrypt, 3335 .decrypt = aead_decrypt, 3336 .ivsize = CTR_RFC3686_IV_SIZE, 3337 .maxauthsize = SHA512_DIGEST_SIZE, 3338 }, 3339 .caam = { 3340 .class1_alg_type = OP_ALG_ALGSEL_AES | 3341 OP_ALG_AAI_CTR_MOD128, 3342 .class2_alg_type = OP_ALG_ALGSEL_SHA512 | 3343 OP_ALG_AAI_HMAC_PRECOMP, 3344 .rfc3686 = true, 3345 .geniv = true, 3346 }, 3347 }, 3348 }; 3349 3350 struct caam_crypto_alg { 3351 struct crypto_alg crypto_alg; 3352 struct list_head entry; 3353 struct caam_alg_entry caam; 3354 }; 3355 3356 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) 3357 { 3358 ctx->jrdev = caam_jr_alloc(); 3359 if (IS_ERR(ctx->jrdev)) { 3360 pr_err("Job Ring Device allocation for transform failed\n"); 3361 return PTR_ERR(ctx->jrdev); 3362 } 3363 3364 /* copy descriptor header template value */ 3365 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 3366 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 3367 3368 return 0; 3369 } 3370 3371 static int caam_cra_init(struct crypto_tfm *tfm) 3372 { 3373 struct crypto_alg *alg = tfm->__crt_alg; 3374 struct caam_crypto_alg *caam_alg = 3375 container_of(alg, struct caam_crypto_alg, crypto_alg); 3376 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 3377 3378 return caam_init_common(ctx, &caam_alg->caam); 3379 } 3380 3381 static int caam_aead_init(struct crypto_aead *tfm) 3382 { 3383 struct aead_alg *alg = crypto_aead_alg(tfm); 3384 struct caam_aead_alg *caam_alg = 3385 container_of(alg, struct caam_aead_alg, aead); 3386 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 3387 3388 return caam_init_common(ctx, &caam_alg->caam); 3389 } 3390 3391 static void caam_exit_common(struct caam_ctx *ctx) 3392 { 3393 if (ctx->sh_desc_enc_dma && 3394 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) 3395 dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, 3396 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE); 3397 if (ctx->sh_desc_dec_dma && 3398 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma)) 3399 dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma, 3400 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE); 3401 if (ctx->sh_desc_givenc_dma && 3402 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma)) 3403 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 3404 desc_bytes(ctx->sh_desc_givenc), 3405 DMA_TO_DEVICE); 3406 if (ctx->key_dma && 3407 !dma_mapping_error(ctx->jrdev, ctx->key_dma)) 3408 dma_unmap_single(ctx->jrdev, ctx->key_dma, 3409 ctx->cdata.keylen + ctx->adata.keylen_pad, 3410 DMA_TO_DEVICE); 3411 3412 caam_jr_free(ctx->jrdev); 3413 } 3414 3415 static void caam_cra_exit(struct crypto_tfm *tfm) 3416 { 3417 caam_exit_common(crypto_tfm_ctx(tfm)); 3418 } 3419 3420 static void caam_aead_exit(struct crypto_aead *tfm) 3421 { 3422 caam_exit_common(crypto_aead_ctx(tfm)); 3423 } 3424 3425 static void __exit caam_algapi_exit(void) 3426 { 3427 3428 struct caam_crypto_alg *t_alg, *n; 3429 int i; 3430 3431 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3432 struct caam_aead_alg *t_alg = driver_aeads + i; 3433 3434 if (t_alg->registered) 3435 crypto_unregister_aead(&t_alg->aead); 3436 } 3437 3438 if (!alg_list.next) 3439 return; 3440 3441 list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 3442 crypto_unregister_alg(&t_alg->crypto_alg); 3443 list_del(&t_alg->entry); 3444 kfree(t_alg); 3445 } 3446 } 3447 3448 static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 3449 *template) 3450 { 3451 struct caam_crypto_alg *t_alg; 3452 struct crypto_alg *alg; 3453 3454 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 3455 if (!t_alg) { 3456 pr_err("failed to allocate t_alg\n"); 3457 return ERR_PTR(-ENOMEM); 3458 } 3459 3460 alg = &t_alg->crypto_alg; 3461 3462 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); 3463 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 3464 template->driver_name); 3465 alg->cra_module = THIS_MODULE; 3466 alg->cra_init = caam_cra_init; 3467 alg->cra_exit = caam_cra_exit; 3468 alg->cra_priority = CAAM_CRA_PRIORITY; 3469 alg->cra_blocksize = template->blocksize; 3470 alg->cra_alignmask = 0; 3471 alg->cra_ctxsize = sizeof(struct caam_ctx); 3472 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 3473 template->type; 3474 switch (template->type) { 3475 case CRYPTO_ALG_TYPE_GIVCIPHER: 3476 alg->cra_type = &crypto_givcipher_type; 3477 alg->cra_ablkcipher = template->template_ablkcipher; 3478 break; 3479 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3480 alg->cra_type = &crypto_ablkcipher_type; 3481 alg->cra_ablkcipher = template->template_ablkcipher; 3482 break; 3483 } 3484 3485 t_alg->caam.class1_alg_type = template->class1_alg_type; 3486 t_alg->caam.class2_alg_type = template->class2_alg_type; 3487 3488 return t_alg; 3489 } 3490 3491 static void caam_aead_alg_init(struct caam_aead_alg *t_alg) 3492 { 3493 struct aead_alg *alg = &t_alg->aead; 3494 3495 alg->base.cra_module = THIS_MODULE; 3496 alg->base.cra_priority = CAAM_CRA_PRIORITY; 3497 alg->base.cra_ctxsize = sizeof(struct caam_ctx); 3498 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; 3499 3500 alg->init = caam_aead_init; 3501 alg->exit = caam_aead_exit; 3502 } 3503 3504 static int __init caam_algapi_init(void) 3505 { 3506 struct device_node *dev_node; 3507 struct platform_device *pdev; 3508 struct device *ctrldev; 3509 struct caam_drv_private *priv; 3510 int i = 0, err = 0; 3511 u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; 3512 unsigned int md_limit = SHA512_DIGEST_SIZE; 3513 bool registered = false; 3514 3515 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 3516 if (!dev_node) { 3517 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 3518 if (!dev_node) 3519 return -ENODEV; 3520 } 3521 3522 pdev = of_find_device_by_node(dev_node); 3523 if (!pdev) { 3524 of_node_put(dev_node); 3525 return -ENODEV; 3526 } 3527 3528 ctrldev = &pdev->dev; 3529 priv = dev_get_drvdata(ctrldev); 3530 of_node_put(dev_node); 3531 3532 /* 3533 * If priv is NULL, it's probably because the caam driver wasn't 3534 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 3535 */ 3536 if (!priv) 3537 return -ENODEV; 3538 3539 3540 INIT_LIST_HEAD(&alg_list); 3541 3542 /* 3543 * Register crypto algorithms the device supports. 3544 * First, detect presence and attributes of DES, AES, and MD blocks. 3545 */ 3546 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 3547 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 3548 des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; 3549 aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; 3550 md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 3551 3552 /* If MD is present, limit digest size based on LP256 */ 3553 if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) 3554 md_limit = SHA256_DIGEST_SIZE; 3555 3556 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3557 struct caam_crypto_alg *t_alg; 3558 struct caam_alg_template *alg = driver_algs + i; 3559 u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK; 3560 3561 /* Skip DES algorithms if not supported by device */ 3562 if (!des_inst && 3563 ((alg_sel == OP_ALG_ALGSEL_3DES) || 3564 (alg_sel == OP_ALG_ALGSEL_DES))) 3565 continue; 3566 3567 /* Skip AES algorithms if not supported by device */ 3568 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 3569 continue; 3570 3571 /* 3572 * Check support for AES modes not available 3573 * on LP devices. 3574 */ 3575 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) 3576 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == 3577 OP_ALG_AAI_XTS) 3578 continue; 3579 3580 t_alg = caam_alg_alloc(alg); 3581 if (IS_ERR(t_alg)) { 3582 err = PTR_ERR(t_alg); 3583 pr_warn("%s alg allocation failed\n", alg->driver_name); 3584 continue; 3585 } 3586 3587 err = crypto_register_alg(&t_alg->crypto_alg); 3588 if (err) { 3589 pr_warn("%s alg registration failed\n", 3590 t_alg->crypto_alg.cra_driver_name); 3591 kfree(t_alg); 3592 continue; 3593 } 3594 3595 list_add_tail(&t_alg->entry, &alg_list); 3596 registered = true; 3597 } 3598 3599 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { 3600 struct caam_aead_alg *t_alg = driver_aeads + i; 3601 u32 c1_alg_sel = t_alg->caam.class1_alg_type & 3602 OP_ALG_ALGSEL_MASK; 3603 u32 c2_alg_sel = t_alg->caam.class2_alg_type & 3604 OP_ALG_ALGSEL_MASK; 3605 u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; 3606 3607 /* Skip DES algorithms if not supported by device */ 3608 if (!des_inst && 3609 ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || 3610 (c1_alg_sel == OP_ALG_ALGSEL_DES))) 3611 continue; 3612 3613 /* Skip AES algorithms if not supported by device */ 3614 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) 3615 continue; 3616 3617 /* 3618 * Check support for AES algorithms not available 3619 * on LP devices. 3620 */ 3621 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) 3622 if (alg_aai == OP_ALG_AAI_GCM) 3623 continue; 3624 3625 /* 3626 * Skip algorithms requiring message digests 3627 * if MD or MD size is not supported by device. 3628 */ 3629 if (c2_alg_sel && 3630 (!md_inst || (t_alg->aead.maxauthsize > md_limit))) 3631 continue; 3632 3633 caam_aead_alg_init(t_alg); 3634 3635 err = crypto_register_aead(&t_alg->aead); 3636 if (err) { 3637 pr_warn("%s alg registration failed\n", 3638 t_alg->aead.base.cra_driver_name); 3639 continue; 3640 } 3641 3642 t_alg->registered = true; 3643 registered = true; 3644 } 3645 3646 if (registered) 3647 pr_info("caam algorithms registered in /proc/crypto\n"); 3648 3649 return err; 3650 } 3651 3652 module_init(caam_algapi_init); 3653 module_exit(caam_algapi_exit); 3654 3655 MODULE_LICENSE("GPL"); 3656 MODULE_DESCRIPTION("FSL CAAM support for crypto API"); 3657 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC"); 3658