1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * caam - Freescale FSL CAAM support for ahash functions of crypto API 4 * 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * Copyright 2018-2019 NXP 7 * 8 * Based on caamalg.c crypto API driver. 9 * 10 * relationship of digest job descriptor or first job descriptor after init to 11 * shared descriptors: 12 * 13 * --------------- --------------- 14 * | JobDesc #1 |-------------------->| ShareDesc | 15 * | *(packet 1) | | (hashKey) | 16 * --------------- | (operation) | 17 * --------------- 18 * 19 * relationship of subsequent job descriptors to shared descriptors: 20 * 21 * --------------- --------------- 22 * | JobDesc #2 |-------------------->| ShareDesc | 23 * | *(packet 2) | |------------->| (hashKey) | 24 * --------------- | |-------->| (operation) | 25 * . | | | (load ctx2) | 26 * . | | --------------- 27 * --------------- | | 28 * | JobDesc #3 |------| | 29 * | *(packet 3) | | 30 * --------------- | 31 * . | 32 * . | 33 * --------------- | 34 * | JobDesc #4 |------------ 35 * | *(packet 4) | 36 * --------------- 37 * 38 * The SharedDesc never changes for a connection unless rekeyed, but 39 * each packet will likely be in a different place. So all we need 40 * to know to process the packet is where the input is, where the 41 * output goes, and what context we want to process with. Context is 42 * in the SharedDesc, packet references in the JobDesc. 43 * 44 * So, a job desc looks like: 45 * 46 * --------------------- 47 * | Header | 48 * | ShareDesc Pointer | 49 * | SEQ_OUT_PTR | 50 * | (output buffer) | 51 * | (output length) | 52 * | SEQ_IN_PTR | 53 * | (input buffer) | 54 * | (input length) | 55 * --------------------- 56 */ 57 58 #include "compat.h" 59 60 #include "regs.h" 61 #include "intern.h" 62 #include "desc_constr.h" 63 #include "jr.h" 64 #include "error.h" 65 #include "sg_sw_sec4.h" 66 #include "key_gen.h" 67 #include "caamhash_desc.h" 68 69 #define CAAM_CRA_PRIORITY 3000 70 71 /* max hash key is max split key size */ 72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 73 74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 76 77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 78 CAAM_MAX_HASH_KEY_SIZE) 79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 80 81 /* caam context sizes for hashes: running digest + 8 */ 82 #define HASH_MSG_LEN 8 83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 84 85 static struct list_head hash_list; 86 87 /* ahash per-session context */ 88 struct caam_hash_ctx { 89 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 90 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 91 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 92 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 93 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; 94 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 95 dma_addr_t sh_desc_update_first_dma; 96 dma_addr_t sh_desc_fin_dma; 97 dma_addr_t sh_desc_digest_dma; 98 enum dma_data_direction dir; 99 enum dma_data_direction key_dir; 100 struct device *jrdev; 101 int ctx_len; 102 struct alginfo adata; 103 }; 104 105 /* ahash state */ 106 struct caam_hash_state { 107 dma_addr_t buf_dma; 108 dma_addr_t ctx_dma; 109 int ctx_dma_len; 110 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 111 int buflen; 112 int next_buflen; 113 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 114 int (*update)(struct ahash_request *req); 115 int (*final)(struct ahash_request *req); 116 int (*finup)(struct ahash_request *req); 117 }; 118 119 struct caam_export_state { 120 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 121 u8 caam_ctx[MAX_CTX_LEN]; 122 int buflen; 123 int (*update)(struct ahash_request *req); 124 int (*final)(struct ahash_request *req); 125 int (*finup)(struct ahash_request *req); 126 }; 127 128 static inline bool is_cmac_aes(u32 algtype) 129 { 130 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == 131 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); 132 } 133 /* Common job descriptor seq in/out ptr routines */ 134 135 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 136 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 137 struct caam_hash_state *state, 138 int ctx_len) 139 { 140 state->ctx_dma_len = ctx_len; 141 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 142 ctx_len, DMA_FROM_DEVICE); 143 if (dma_mapping_error(jrdev, state->ctx_dma)) { 144 dev_err(jrdev, "unable to map ctx\n"); 145 state->ctx_dma = 0; 146 return -ENOMEM; 147 } 148 149 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 150 151 return 0; 152 } 153 154 /* Map current buffer in state (if length > 0) and put it in link table */ 155 static inline int buf_map_to_sec4_sg(struct device *jrdev, 156 struct sec4_sg_entry *sec4_sg, 157 struct caam_hash_state *state) 158 { 159 int buflen = state->buflen; 160 161 if (!buflen) 162 return 0; 163 164 state->buf_dma = dma_map_single(jrdev, state->buf, buflen, 165 DMA_TO_DEVICE); 166 if (dma_mapping_error(jrdev, state->buf_dma)) { 167 dev_err(jrdev, "unable to map buf\n"); 168 state->buf_dma = 0; 169 return -ENOMEM; 170 } 171 172 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 173 174 return 0; 175 } 176 177 /* Map state->caam_ctx, and add it to link table */ 178 static inline int ctx_map_to_sec4_sg(struct device *jrdev, 179 struct caam_hash_state *state, int ctx_len, 180 struct sec4_sg_entry *sec4_sg, u32 flag) 181 { 182 state->ctx_dma_len = ctx_len; 183 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 184 if (dma_mapping_error(jrdev, state->ctx_dma)) { 185 dev_err(jrdev, "unable to map ctx\n"); 186 state->ctx_dma = 0; 187 return -ENOMEM; 188 } 189 190 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 191 192 return 0; 193 } 194 195 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 196 { 197 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 198 int digestsize = crypto_ahash_digestsize(ahash); 199 struct device *jrdev = ctx->jrdev; 200 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 201 u32 *desc; 202 203 ctx->adata.key_virt = ctx->key; 204 205 /* ahash_update shared descriptor */ 206 desc = ctx->sh_desc_update; 207 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 208 ctx->ctx_len, true, ctrlpriv->era); 209 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 210 desc_bytes(desc), ctx->dir); 211 212 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", 213 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 214 1); 215 216 /* ahash_update_first shared descriptor */ 217 desc = ctx->sh_desc_update_first; 218 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 219 ctx->ctx_len, false, ctrlpriv->era); 220 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 221 desc_bytes(desc), ctx->dir); 222 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) 223 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 224 desc_bytes(desc), 1); 225 226 /* ahash_final shared descriptor */ 227 desc = ctx->sh_desc_fin; 228 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 229 ctx->ctx_len, true, ctrlpriv->era); 230 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 231 desc_bytes(desc), ctx->dir); 232 233 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", 234 DUMP_PREFIX_ADDRESS, 16, 4, desc, 235 desc_bytes(desc), 1); 236 237 /* ahash_digest shared descriptor */ 238 desc = ctx->sh_desc_digest; 239 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 240 ctx->ctx_len, false, ctrlpriv->era); 241 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 242 desc_bytes(desc), ctx->dir); 243 244 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", 245 DUMP_PREFIX_ADDRESS, 16, 4, desc, 246 desc_bytes(desc), 1); 247 248 return 0; 249 } 250 251 static int axcbc_set_sh_desc(struct crypto_ahash *ahash) 252 { 253 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 254 int digestsize = crypto_ahash_digestsize(ahash); 255 struct device *jrdev = ctx->jrdev; 256 u32 *desc; 257 258 /* shared descriptor for ahash_update */ 259 desc = ctx->sh_desc_update; 260 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 261 ctx->ctx_len, ctx->ctx_len); 262 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 263 desc_bytes(desc), ctx->dir); 264 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", 265 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 266 1); 267 268 /* shared descriptor for ahash_{final,finup} */ 269 desc = ctx->sh_desc_fin; 270 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 271 digestsize, ctx->ctx_len); 272 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 273 desc_bytes(desc), ctx->dir); 274 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", 275 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 276 1); 277 278 /* key is immediate data for INIT and INITFINAL states */ 279 ctx->adata.key_virt = ctx->key; 280 281 /* shared descriptor for first invocation of ahash_update */ 282 desc = ctx->sh_desc_update_first; 283 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 284 ctx->ctx_len); 285 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 286 desc_bytes(desc), ctx->dir); 287 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) 288 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 289 desc_bytes(desc), 1); 290 291 /* shared descriptor for ahash_digest */ 292 desc = ctx->sh_desc_digest; 293 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 294 digestsize, ctx->ctx_len); 295 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 296 desc_bytes(desc), ctx->dir); 297 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", 298 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 299 1); 300 return 0; 301 } 302 303 static int acmac_set_sh_desc(struct crypto_ahash *ahash) 304 { 305 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 306 int digestsize = crypto_ahash_digestsize(ahash); 307 struct device *jrdev = ctx->jrdev; 308 u32 *desc; 309 310 /* shared descriptor for ahash_update */ 311 desc = ctx->sh_desc_update; 312 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 313 ctx->ctx_len, ctx->ctx_len); 314 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 315 desc_bytes(desc), ctx->dir); 316 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", 317 DUMP_PREFIX_ADDRESS, 16, 4, desc, 318 desc_bytes(desc), 1); 319 320 /* shared descriptor for ahash_{final,finup} */ 321 desc = ctx->sh_desc_fin; 322 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 323 digestsize, ctx->ctx_len); 324 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 325 desc_bytes(desc), ctx->dir); 326 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", 327 DUMP_PREFIX_ADDRESS, 16, 4, desc, 328 desc_bytes(desc), 1); 329 330 /* shared descriptor for first invocation of ahash_update */ 331 desc = ctx->sh_desc_update_first; 332 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 333 ctx->ctx_len); 334 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 335 desc_bytes(desc), ctx->dir); 336 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) 337 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, 338 desc_bytes(desc), 1); 339 340 /* shared descriptor for ahash_digest */ 341 desc = ctx->sh_desc_digest; 342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 343 digestsize, ctx->ctx_len); 344 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 345 desc_bytes(desc), ctx->dir); 346 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", 347 DUMP_PREFIX_ADDRESS, 16, 4, desc, 348 desc_bytes(desc), 1); 349 350 return 0; 351 } 352 353 /* Digest hash size if it is too large */ 354 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, 355 u32 digestsize) 356 { 357 struct device *jrdev = ctx->jrdev; 358 u32 *desc; 359 struct split_key_result result; 360 dma_addr_t key_dma; 361 int ret; 362 363 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 364 if (!desc) { 365 dev_err(jrdev, "unable to allocate key input memory\n"); 366 return -ENOMEM; 367 } 368 369 init_job_desc(desc, 0); 370 371 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); 372 if (dma_mapping_error(jrdev, key_dma)) { 373 dev_err(jrdev, "unable to map key memory\n"); 374 kfree(desc); 375 return -ENOMEM; 376 } 377 378 /* Job descriptor to perform unkeyed hash on key_in */ 379 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 380 OP_ALG_AS_INITFINAL); 381 append_seq_in_ptr(desc, key_dma, *keylen, 0); 382 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 383 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 384 append_seq_out_ptr(desc, key_dma, digestsize, 0); 385 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 386 LDST_SRCDST_BYTE_CONTEXT); 387 388 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", 389 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 390 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 391 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 392 1); 393 394 result.err = 0; 395 init_completion(&result.completion); 396 397 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 398 if (!ret) { 399 /* in progress */ 400 wait_for_completion(&result.completion); 401 ret = result.err; 402 403 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", 404 DUMP_PREFIX_ADDRESS, 16, 4, key, 405 digestsize, 1); 406 } 407 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); 408 409 *keylen = digestsize; 410 411 kfree(desc); 412 413 return ret; 414 } 415 416 static int ahash_setkey(struct crypto_ahash *ahash, 417 const u8 *key, unsigned int keylen) 418 { 419 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 420 struct device *jrdev = ctx->jrdev; 421 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 422 int digestsize = crypto_ahash_digestsize(ahash); 423 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 424 int ret; 425 u8 *hashed_key = NULL; 426 427 dev_dbg(jrdev, "keylen %d\n", keylen); 428 429 if (keylen > blocksize) { 430 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 431 if (!hashed_key) 432 return -ENOMEM; 433 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 434 if (ret) 435 goto bad_free_key; 436 key = hashed_key; 437 } 438 439 /* 440 * If DKP is supported, use it in the shared descriptor to generate 441 * the split key. 442 */ 443 if (ctrlpriv->era >= 6) { 444 ctx->adata.key_inline = true; 445 ctx->adata.keylen = keylen; 446 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 447 OP_ALG_ALGSEL_MASK); 448 449 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 450 goto bad_free_key; 451 452 memcpy(ctx->key, key, keylen); 453 454 /* 455 * In case |user key| > |derived key|, using DKP<imm,imm> 456 * would result in invalid opcodes (last bytes of user key) in 457 * the resulting descriptor. Use DKP<ptr,imm> instead => both 458 * virtual and dma key addresses are needed. 459 */ 460 if (keylen > ctx->adata.keylen_pad) 461 dma_sync_single_for_device(ctx->jrdev, 462 ctx->adata.key_dma, 463 ctx->adata.keylen_pad, 464 DMA_TO_DEVICE); 465 } else { 466 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 467 keylen, CAAM_MAX_HASH_KEY_SIZE); 468 if (ret) 469 goto bad_free_key; 470 } 471 472 kfree(hashed_key); 473 return ahash_set_sh_desc(ahash); 474 bad_free_key: 475 kfree(hashed_key); 476 return -EINVAL; 477 } 478 479 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, 480 unsigned int keylen) 481 { 482 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 483 struct device *jrdev = ctx->jrdev; 484 485 if (keylen != AES_KEYSIZE_128) 486 return -EINVAL; 487 488 memcpy(ctx->key, key, keylen); 489 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, 490 DMA_TO_DEVICE); 491 ctx->adata.keylen = keylen; 492 493 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", 494 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); 495 496 return axcbc_set_sh_desc(ahash); 497 } 498 499 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, 500 unsigned int keylen) 501 { 502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 503 int err; 504 505 err = aes_check_keylen(keylen); 506 if (err) 507 return err; 508 509 /* key is immediate data for all cmac shared descriptors */ 510 ctx->adata.key_virt = key; 511 ctx->adata.keylen = keylen; 512 513 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", 514 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 515 516 return acmac_set_sh_desc(ahash); 517 } 518 519 /* 520 * ahash_edesc - s/w-extended ahash descriptor 521 * @sec4_sg_dma: physical mapped address of h/w link table 522 * @src_nents: number of segments in input scatterlist 523 * @sec4_sg_bytes: length of dma mapped sec4_sg space 524 * @hw_desc: the h/w job descriptor followed by any referenced link tables 525 * @sec4_sg: h/w link table 526 */ 527 struct ahash_edesc { 528 dma_addr_t sec4_sg_dma; 529 int src_nents; 530 int sec4_sg_bytes; 531 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned; 532 struct sec4_sg_entry sec4_sg[0]; 533 }; 534 535 static inline void ahash_unmap(struct device *dev, 536 struct ahash_edesc *edesc, 537 struct ahash_request *req, int dst_len) 538 { 539 struct caam_hash_state *state = ahash_request_ctx(req); 540 541 if (edesc->src_nents) 542 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 543 544 if (edesc->sec4_sg_bytes) 545 dma_unmap_single(dev, edesc->sec4_sg_dma, 546 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 547 548 if (state->buf_dma) { 549 dma_unmap_single(dev, state->buf_dma, state->buflen, 550 DMA_TO_DEVICE); 551 state->buf_dma = 0; 552 } 553 } 554 555 static inline void ahash_unmap_ctx(struct device *dev, 556 struct ahash_edesc *edesc, 557 struct ahash_request *req, int dst_len, u32 flag) 558 { 559 struct caam_hash_state *state = ahash_request_ctx(req); 560 561 if (state->ctx_dma) { 562 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); 563 state->ctx_dma = 0; 564 } 565 ahash_unmap(dev, edesc, req, dst_len); 566 } 567 568 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, 569 void *context, enum dma_data_direction dir) 570 { 571 struct ahash_request *req = context; 572 struct ahash_edesc *edesc; 573 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 574 int digestsize = crypto_ahash_digestsize(ahash); 575 struct caam_hash_state *state = ahash_request_ctx(req); 576 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 577 int ecode = 0; 578 579 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 580 581 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 582 if (err) 583 ecode = caam_jr_strstatus(jrdev, err); 584 585 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); 586 memcpy(req->result, state->caam_ctx, digestsize); 587 kfree(edesc); 588 589 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 590 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 591 ctx->ctx_len, 1); 592 593 req->base.complete(&req->base, ecode); 594 } 595 596 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 597 void *context) 598 { 599 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE); 600 } 601 602 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 603 void *context) 604 { 605 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL); 606 } 607 608 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, 609 void *context, enum dma_data_direction dir) 610 { 611 struct ahash_request *req = context; 612 struct ahash_edesc *edesc; 613 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 614 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 615 struct caam_hash_state *state = ahash_request_ctx(req); 616 int digestsize = crypto_ahash_digestsize(ahash); 617 int ecode = 0; 618 619 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 620 621 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 622 if (err) 623 ecode = caam_jr_strstatus(jrdev, err); 624 625 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); 626 kfree(edesc); 627 628 scatterwalk_map_and_copy(state->buf, req->src, 629 req->nbytes - state->next_buflen, 630 state->next_buflen, 0); 631 state->buflen = state->next_buflen; 632 633 print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 634 DUMP_PREFIX_ADDRESS, 16, 4, state->buf, 635 state->buflen, 1); 636 637 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", 638 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 639 ctx->ctx_len, 1); 640 if (req->result) 641 print_hex_dump_debug("result@"__stringify(__LINE__)": ", 642 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 643 digestsize, 1); 644 645 req->base.complete(&req->base, ecode); 646 } 647 648 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 649 void *context) 650 { 651 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL); 652 } 653 654 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 655 void *context) 656 { 657 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE); 658 } 659 660 /* 661 * Allocate an enhanced descriptor, which contains the hardware descriptor 662 * and space for hardware scatter table containing sg_num entries. 663 */ 664 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 665 int sg_num, u32 *sh_desc, 666 dma_addr_t sh_desc_dma, 667 gfp_t flags) 668 { 669 struct ahash_edesc *edesc; 670 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 671 672 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 673 if (!edesc) { 674 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 675 return NULL; 676 } 677 678 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 679 HDR_SHARE_DEFER | HDR_REVERSE); 680 681 return edesc; 682 } 683 684 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 685 struct ahash_edesc *edesc, 686 struct ahash_request *req, int nents, 687 unsigned int first_sg, 688 unsigned int first_bytes, size_t to_hash) 689 { 690 dma_addr_t src_dma; 691 u32 options; 692 693 if (nents > 1 || first_sg) { 694 struct sec4_sg_entry *sg = edesc->sec4_sg; 695 unsigned int sgsize = sizeof(*sg) * 696 pad_sg_nents(first_sg + nents); 697 698 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); 699 700 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 701 if (dma_mapping_error(ctx->jrdev, src_dma)) { 702 dev_err(ctx->jrdev, "unable to map S/G table\n"); 703 return -ENOMEM; 704 } 705 706 edesc->sec4_sg_bytes = sgsize; 707 edesc->sec4_sg_dma = src_dma; 708 options = LDST_SGF; 709 } else { 710 src_dma = sg_dma_address(req->src); 711 options = 0; 712 } 713 714 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 715 options); 716 717 return 0; 718 } 719 720 /* submit update job descriptor */ 721 static int ahash_update_ctx(struct ahash_request *req) 722 { 723 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 724 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 725 struct caam_hash_state *state = ahash_request_ctx(req); 726 struct device *jrdev = ctx->jrdev; 727 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 728 GFP_KERNEL : GFP_ATOMIC; 729 u8 *buf = state->buf; 730 int *buflen = &state->buflen; 731 int *next_buflen = &state->next_buflen; 732 int blocksize = crypto_ahash_blocksize(ahash); 733 int in_len = *buflen + req->nbytes, to_hash; 734 u32 *desc; 735 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 736 struct ahash_edesc *edesc; 737 int ret = 0; 738 739 *next_buflen = in_len & (blocksize - 1); 740 to_hash = in_len - *next_buflen; 741 742 /* 743 * For XCBC and CMAC, if to_hash is multiple of block size, 744 * keep last block in internal buffer 745 */ 746 if ((is_xcbc_aes(ctx->adata.algtype) || 747 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 748 (*next_buflen == 0)) { 749 *next_buflen = blocksize; 750 to_hash -= blocksize; 751 } 752 753 if (to_hash) { 754 int pad_nents; 755 int src_len = req->nbytes - *next_buflen; 756 757 src_nents = sg_nents_for_len(req->src, src_len); 758 if (src_nents < 0) { 759 dev_err(jrdev, "Invalid number of src SG.\n"); 760 return src_nents; 761 } 762 763 if (src_nents) { 764 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 765 DMA_TO_DEVICE); 766 if (!mapped_nents) { 767 dev_err(jrdev, "unable to DMA map source\n"); 768 return -ENOMEM; 769 } 770 } else { 771 mapped_nents = 0; 772 } 773 774 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 775 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); 776 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 777 778 /* 779 * allocate space for base edesc and hw desc commands, 780 * link tables 781 */ 782 edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update, 783 ctx->sh_desc_update_dma, flags); 784 if (!edesc) { 785 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 786 return -ENOMEM; 787 } 788 789 edesc->src_nents = src_nents; 790 edesc->sec4_sg_bytes = sec4_sg_bytes; 791 792 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 793 edesc->sec4_sg, DMA_BIDIRECTIONAL); 794 if (ret) 795 goto unmap_ctx; 796 797 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 798 if (ret) 799 goto unmap_ctx; 800 801 if (mapped_nents) 802 sg_to_sec4_sg_last(req->src, src_len, 803 edesc->sec4_sg + sec4_sg_src_index, 804 0); 805 else 806 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 807 1); 808 809 desc = edesc->hw_desc; 810 811 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 812 sec4_sg_bytes, 813 DMA_TO_DEVICE); 814 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 815 dev_err(jrdev, "unable to map S/G table\n"); 816 ret = -ENOMEM; 817 goto unmap_ctx; 818 } 819 820 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 821 to_hash, LDST_SGF); 822 823 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 824 825 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 826 DUMP_PREFIX_ADDRESS, 16, 4, desc, 827 desc_bytes(desc), 1); 828 829 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 830 if (ret) 831 goto unmap_ctx; 832 833 ret = -EINPROGRESS; 834 } else if (*next_buflen) { 835 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 836 req->nbytes, 0); 837 *buflen = *next_buflen; 838 839 print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 840 DUMP_PREFIX_ADDRESS, 16, 4, buf, 841 *buflen, 1); 842 } 843 844 return ret; 845 unmap_ctx: 846 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 847 kfree(edesc); 848 return ret; 849 } 850 851 static int ahash_final_ctx(struct ahash_request *req) 852 { 853 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 854 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 855 struct caam_hash_state *state = ahash_request_ctx(req); 856 struct device *jrdev = ctx->jrdev; 857 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 858 GFP_KERNEL : GFP_ATOMIC; 859 int buflen = state->buflen; 860 u32 *desc; 861 int sec4_sg_bytes; 862 int digestsize = crypto_ahash_digestsize(ahash); 863 struct ahash_edesc *edesc; 864 int ret; 865 866 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * 867 sizeof(struct sec4_sg_entry); 868 869 /* allocate space for base edesc and hw desc commands, link tables */ 870 edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin, 871 ctx->sh_desc_fin_dma, flags); 872 if (!edesc) 873 return -ENOMEM; 874 875 desc = edesc->hw_desc; 876 877 edesc->sec4_sg_bytes = sec4_sg_bytes; 878 879 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 880 edesc->sec4_sg, DMA_BIDIRECTIONAL); 881 if (ret) 882 goto unmap_ctx; 883 884 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 885 if (ret) 886 goto unmap_ctx; 887 888 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); 889 890 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 891 sec4_sg_bytes, DMA_TO_DEVICE); 892 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 893 dev_err(jrdev, "unable to map S/G table\n"); 894 ret = -ENOMEM; 895 goto unmap_ctx; 896 } 897 898 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 899 LDST_SGF); 900 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 901 902 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 903 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 904 1); 905 906 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 907 if (ret) 908 goto unmap_ctx; 909 910 return -EINPROGRESS; 911 unmap_ctx: 912 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 913 kfree(edesc); 914 return ret; 915 } 916 917 static int ahash_finup_ctx(struct ahash_request *req) 918 { 919 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 920 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 921 struct caam_hash_state *state = ahash_request_ctx(req); 922 struct device *jrdev = ctx->jrdev; 923 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 924 GFP_KERNEL : GFP_ATOMIC; 925 int buflen = state->buflen; 926 u32 *desc; 927 int sec4_sg_src_index; 928 int src_nents, mapped_nents; 929 int digestsize = crypto_ahash_digestsize(ahash); 930 struct ahash_edesc *edesc; 931 int ret; 932 933 src_nents = sg_nents_for_len(req->src, req->nbytes); 934 if (src_nents < 0) { 935 dev_err(jrdev, "Invalid number of src SG.\n"); 936 return src_nents; 937 } 938 939 if (src_nents) { 940 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 941 DMA_TO_DEVICE); 942 if (!mapped_nents) { 943 dev_err(jrdev, "unable to DMA map source\n"); 944 return -ENOMEM; 945 } 946 } else { 947 mapped_nents = 0; 948 } 949 950 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 951 952 /* allocate space for base edesc and hw desc commands, link tables */ 953 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 954 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 955 flags); 956 if (!edesc) { 957 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 958 return -ENOMEM; 959 } 960 961 desc = edesc->hw_desc; 962 963 edesc->src_nents = src_nents; 964 965 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 966 edesc->sec4_sg, DMA_BIDIRECTIONAL); 967 if (ret) 968 goto unmap_ctx; 969 970 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 971 if (ret) 972 goto unmap_ctx; 973 974 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 975 sec4_sg_src_index, ctx->ctx_len + buflen, 976 req->nbytes); 977 if (ret) 978 goto unmap_ctx; 979 980 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 981 982 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 983 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 984 1); 985 986 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 987 if (ret) 988 goto unmap_ctx; 989 990 return -EINPROGRESS; 991 unmap_ctx: 992 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 993 kfree(edesc); 994 return ret; 995 } 996 997 static int ahash_digest(struct ahash_request *req) 998 { 999 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1000 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1001 struct caam_hash_state *state = ahash_request_ctx(req); 1002 struct device *jrdev = ctx->jrdev; 1003 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1004 GFP_KERNEL : GFP_ATOMIC; 1005 u32 *desc; 1006 int digestsize = crypto_ahash_digestsize(ahash); 1007 int src_nents, mapped_nents; 1008 struct ahash_edesc *edesc; 1009 int ret; 1010 1011 state->buf_dma = 0; 1012 1013 src_nents = sg_nents_for_len(req->src, req->nbytes); 1014 if (src_nents < 0) { 1015 dev_err(jrdev, "Invalid number of src SG.\n"); 1016 return src_nents; 1017 } 1018 1019 if (src_nents) { 1020 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1021 DMA_TO_DEVICE); 1022 if (!mapped_nents) { 1023 dev_err(jrdev, "unable to map source for DMA\n"); 1024 return -ENOMEM; 1025 } 1026 } else { 1027 mapped_nents = 0; 1028 } 1029 1030 /* allocate space for base edesc and hw desc commands, link tables */ 1031 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1032 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1033 flags); 1034 if (!edesc) { 1035 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1036 return -ENOMEM; 1037 } 1038 1039 edesc->src_nents = src_nents; 1040 1041 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1042 req->nbytes); 1043 if (ret) { 1044 ahash_unmap(jrdev, edesc, req, digestsize); 1045 kfree(edesc); 1046 return ret; 1047 } 1048 1049 desc = edesc->hw_desc; 1050 1051 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1052 if (ret) { 1053 ahash_unmap(jrdev, edesc, req, digestsize); 1054 kfree(edesc); 1055 return -ENOMEM; 1056 } 1057 1058 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1059 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1060 1); 1061 1062 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1063 if (!ret) { 1064 ret = -EINPROGRESS; 1065 } else { 1066 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1067 kfree(edesc); 1068 } 1069 1070 return ret; 1071 } 1072 1073 /* submit ahash final if it the first job descriptor */ 1074 static int ahash_final_no_ctx(struct ahash_request *req) 1075 { 1076 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1077 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1078 struct caam_hash_state *state = ahash_request_ctx(req); 1079 struct device *jrdev = ctx->jrdev; 1080 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1081 GFP_KERNEL : GFP_ATOMIC; 1082 u8 *buf = state->buf; 1083 int buflen = state->buflen; 1084 u32 *desc; 1085 int digestsize = crypto_ahash_digestsize(ahash); 1086 struct ahash_edesc *edesc; 1087 int ret; 1088 1089 /* allocate space for base edesc and hw desc commands, link tables */ 1090 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1091 ctx->sh_desc_digest_dma, flags); 1092 if (!edesc) 1093 return -ENOMEM; 1094 1095 desc = edesc->hw_desc; 1096 1097 if (buflen) { 1098 state->buf_dma = dma_map_single(jrdev, buf, buflen, 1099 DMA_TO_DEVICE); 1100 if (dma_mapping_error(jrdev, state->buf_dma)) { 1101 dev_err(jrdev, "unable to map src\n"); 1102 goto unmap; 1103 } 1104 1105 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1106 } 1107 1108 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1109 if (ret) 1110 goto unmap; 1111 1112 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1113 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1114 1); 1115 1116 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1117 if (!ret) { 1118 ret = -EINPROGRESS; 1119 } else { 1120 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1121 kfree(edesc); 1122 } 1123 1124 return ret; 1125 unmap: 1126 ahash_unmap(jrdev, edesc, req, digestsize); 1127 kfree(edesc); 1128 return -ENOMEM; 1129 1130 } 1131 1132 /* submit ahash update if it the first job descriptor after update */ 1133 static int ahash_update_no_ctx(struct ahash_request *req) 1134 { 1135 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1136 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1137 struct caam_hash_state *state = ahash_request_ctx(req); 1138 struct device *jrdev = ctx->jrdev; 1139 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1140 GFP_KERNEL : GFP_ATOMIC; 1141 u8 *buf = state->buf; 1142 int *buflen = &state->buflen; 1143 int *next_buflen = &state->next_buflen; 1144 int blocksize = crypto_ahash_blocksize(ahash); 1145 int in_len = *buflen + req->nbytes, to_hash; 1146 int sec4_sg_bytes, src_nents, mapped_nents; 1147 struct ahash_edesc *edesc; 1148 u32 *desc; 1149 int ret = 0; 1150 1151 *next_buflen = in_len & (blocksize - 1); 1152 to_hash = in_len - *next_buflen; 1153 1154 /* 1155 * For XCBC and CMAC, if to_hash is multiple of block size, 1156 * keep last block in internal buffer 1157 */ 1158 if ((is_xcbc_aes(ctx->adata.algtype) || 1159 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1160 (*next_buflen == 0)) { 1161 *next_buflen = blocksize; 1162 to_hash -= blocksize; 1163 } 1164 1165 if (to_hash) { 1166 int pad_nents; 1167 int src_len = req->nbytes - *next_buflen; 1168 1169 src_nents = sg_nents_for_len(req->src, src_len); 1170 if (src_nents < 0) { 1171 dev_err(jrdev, "Invalid number of src SG.\n"); 1172 return src_nents; 1173 } 1174 1175 if (src_nents) { 1176 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1177 DMA_TO_DEVICE); 1178 if (!mapped_nents) { 1179 dev_err(jrdev, "unable to DMA map source\n"); 1180 return -ENOMEM; 1181 } 1182 } else { 1183 mapped_nents = 0; 1184 } 1185 1186 pad_nents = pad_sg_nents(1 + mapped_nents); 1187 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 1188 1189 /* 1190 * allocate space for base edesc and hw desc commands, 1191 * link tables 1192 */ 1193 edesc = ahash_edesc_alloc(ctx, pad_nents, 1194 ctx->sh_desc_update_first, 1195 ctx->sh_desc_update_first_dma, 1196 flags); 1197 if (!edesc) { 1198 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1199 return -ENOMEM; 1200 } 1201 1202 edesc->src_nents = src_nents; 1203 edesc->sec4_sg_bytes = sec4_sg_bytes; 1204 1205 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1206 if (ret) 1207 goto unmap_ctx; 1208 1209 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); 1210 1211 desc = edesc->hw_desc; 1212 1213 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1214 sec4_sg_bytes, 1215 DMA_TO_DEVICE); 1216 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1217 dev_err(jrdev, "unable to map S/G table\n"); 1218 ret = -ENOMEM; 1219 goto unmap_ctx; 1220 } 1221 1222 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1223 1224 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1225 if (ret) 1226 goto unmap_ctx; 1227 1228 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1229 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1230 desc_bytes(desc), 1); 1231 1232 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1233 if (ret) 1234 goto unmap_ctx; 1235 1236 ret = -EINPROGRESS; 1237 state->update = ahash_update_ctx; 1238 state->finup = ahash_finup_ctx; 1239 state->final = ahash_final_ctx; 1240 } else if (*next_buflen) { 1241 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1242 req->nbytes, 0); 1243 *buflen = *next_buflen; 1244 1245 print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 1246 DUMP_PREFIX_ADDRESS, 16, 4, buf, 1247 *buflen, 1); 1248 } 1249 1250 return ret; 1251 unmap_ctx: 1252 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1253 kfree(edesc); 1254 return ret; 1255 } 1256 1257 /* submit ahash finup if it the first job descriptor after update */ 1258 static int ahash_finup_no_ctx(struct ahash_request *req) 1259 { 1260 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1261 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1262 struct caam_hash_state *state = ahash_request_ctx(req); 1263 struct device *jrdev = ctx->jrdev; 1264 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1265 GFP_KERNEL : GFP_ATOMIC; 1266 int buflen = state->buflen; 1267 u32 *desc; 1268 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1269 int digestsize = crypto_ahash_digestsize(ahash); 1270 struct ahash_edesc *edesc; 1271 int ret; 1272 1273 src_nents = sg_nents_for_len(req->src, req->nbytes); 1274 if (src_nents < 0) { 1275 dev_err(jrdev, "Invalid number of src SG.\n"); 1276 return src_nents; 1277 } 1278 1279 if (src_nents) { 1280 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1281 DMA_TO_DEVICE); 1282 if (!mapped_nents) { 1283 dev_err(jrdev, "unable to DMA map source\n"); 1284 return -ENOMEM; 1285 } 1286 } else { 1287 mapped_nents = 0; 1288 } 1289 1290 sec4_sg_src_index = 2; 1291 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1292 sizeof(struct sec4_sg_entry); 1293 1294 /* allocate space for base edesc and hw desc commands, link tables */ 1295 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1296 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1297 flags); 1298 if (!edesc) { 1299 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1300 return -ENOMEM; 1301 } 1302 1303 desc = edesc->hw_desc; 1304 1305 edesc->src_nents = src_nents; 1306 edesc->sec4_sg_bytes = sec4_sg_bytes; 1307 1308 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1309 if (ret) 1310 goto unmap; 1311 1312 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1313 req->nbytes); 1314 if (ret) { 1315 dev_err(jrdev, "unable to map S/G table\n"); 1316 goto unmap; 1317 } 1318 1319 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1320 if (ret) 1321 goto unmap; 1322 1323 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1324 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1325 1); 1326 1327 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1328 if (!ret) { 1329 ret = -EINPROGRESS; 1330 } else { 1331 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1332 kfree(edesc); 1333 } 1334 1335 return ret; 1336 unmap: 1337 ahash_unmap(jrdev, edesc, req, digestsize); 1338 kfree(edesc); 1339 return -ENOMEM; 1340 1341 } 1342 1343 /* submit first update job descriptor after init */ 1344 static int ahash_update_first(struct ahash_request *req) 1345 { 1346 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1347 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1348 struct caam_hash_state *state = ahash_request_ctx(req); 1349 struct device *jrdev = ctx->jrdev; 1350 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1351 GFP_KERNEL : GFP_ATOMIC; 1352 u8 *buf = state->buf; 1353 int *buflen = &state->buflen; 1354 int *next_buflen = &state->next_buflen; 1355 int to_hash; 1356 int blocksize = crypto_ahash_blocksize(ahash); 1357 u32 *desc; 1358 int src_nents, mapped_nents; 1359 struct ahash_edesc *edesc; 1360 int ret = 0; 1361 1362 *next_buflen = req->nbytes & (blocksize - 1); 1363 to_hash = req->nbytes - *next_buflen; 1364 1365 /* 1366 * For XCBC and CMAC, if to_hash is multiple of block size, 1367 * keep last block in internal buffer 1368 */ 1369 if ((is_xcbc_aes(ctx->adata.algtype) || 1370 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1371 (*next_buflen == 0)) { 1372 *next_buflen = blocksize; 1373 to_hash -= blocksize; 1374 } 1375 1376 if (to_hash) { 1377 src_nents = sg_nents_for_len(req->src, 1378 req->nbytes - *next_buflen); 1379 if (src_nents < 0) { 1380 dev_err(jrdev, "Invalid number of src SG.\n"); 1381 return src_nents; 1382 } 1383 1384 if (src_nents) { 1385 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1386 DMA_TO_DEVICE); 1387 if (!mapped_nents) { 1388 dev_err(jrdev, "unable to map source for DMA\n"); 1389 return -ENOMEM; 1390 } 1391 } else { 1392 mapped_nents = 0; 1393 } 1394 1395 /* 1396 * allocate space for base edesc and hw desc commands, 1397 * link tables 1398 */ 1399 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1400 mapped_nents : 0, 1401 ctx->sh_desc_update_first, 1402 ctx->sh_desc_update_first_dma, 1403 flags); 1404 if (!edesc) { 1405 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1406 return -ENOMEM; 1407 } 1408 1409 edesc->src_nents = src_nents; 1410 1411 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1412 to_hash); 1413 if (ret) 1414 goto unmap_ctx; 1415 1416 desc = edesc->hw_desc; 1417 1418 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1419 if (ret) 1420 goto unmap_ctx; 1421 1422 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", 1423 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1424 desc_bytes(desc), 1); 1425 1426 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1427 if (ret) 1428 goto unmap_ctx; 1429 1430 ret = -EINPROGRESS; 1431 state->update = ahash_update_ctx; 1432 state->finup = ahash_finup_ctx; 1433 state->final = ahash_final_ctx; 1434 } else if (*next_buflen) { 1435 state->update = ahash_update_no_ctx; 1436 state->finup = ahash_finup_no_ctx; 1437 state->final = ahash_final_no_ctx; 1438 scatterwalk_map_and_copy(buf, req->src, 0, 1439 req->nbytes, 0); 1440 *buflen = *next_buflen; 1441 1442 print_hex_dump_debug("buf@" __stringify(__LINE__)": ", 1443 DUMP_PREFIX_ADDRESS, 16, 4, buf, 1444 *buflen, 1); 1445 } 1446 1447 return ret; 1448 unmap_ctx: 1449 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1450 kfree(edesc); 1451 return ret; 1452 } 1453 1454 static int ahash_finup_first(struct ahash_request *req) 1455 { 1456 return ahash_digest(req); 1457 } 1458 1459 static int ahash_init(struct ahash_request *req) 1460 { 1461 struct caam_hash_state *state = ahash_request_ctx(req); 1462 1463 state->update = ahash_update_first; 1464 state->finup = ahash_finup_first; 1465 state->final = ahash_final_no_ctx; 1466 1467 state->ctx_dma = 0; 1468 state->ctx_dma_len = 0; 1469 state->buf_dma = 0; 1470 state->buflen = 0; 1471 state->next_buflen = 0; 1472 1473 return 0; 1474 } 1475 1476 static int ahash_update(struct ahash_request *req) 1477 { 1478 struct caam_hash_state *state = ahash_request_ctx(req); 1479 1480 return state->update(req); 1481 } 1482 1483 static int ahash_finup(struct ahash_request *req) 1484 { 1485 struct caam_hash_state *state = ahash_request_ctx(req); 1486 1487 return state->finup(req); 1488 } 1489 1490 static int ahash_final(struct ahash_request *req) 1491 { 1492 struct caam_hash_state *state = ahash_request_ctx(req); 1493 1494 return state->final(req); 1495 } 1496 1497 static int ahash_export(struct ahash_request *req, void *out) 1498 { 1499 struct caam_hash_state *state = ahash_request_ctx(req); 1500 struct caam_export_state *export = out; 1501 u8 *buf = state->buf; 1502 int len = state->buflen; 1503 1504 memcpy(export->buf, buf, len); 1505 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1506 export->buflen = len; 1507 export->update = state->update; 1508 export->final = state->final; 1509 export->finup = state->finup; 1510 1511 return 0; 1512 } 1513 1514 static int ahash_import(struct ahash_request *req, const void *in) 1515 { 1516 struct caam_hash_state *state = ahash_request_ctx(req); 1517 const struct caam_export_state *export = in; 1518 1519 memset(state, 0, sizeof(*state)); 1520 memcpy(state->buf, export->buf, export->buflen); 1521 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1522 state->buflen = export->buflen; 1523 state->update = export->update; 1524 state->final = export->final; 1525 state->finup = export->finup; 1526 1527 return 0; 1528 } 1529 1530 struct caam_hash_template { 1531 char name[CRYPTO_MAX_ALG_NAME]; 1532 char driver_name[CRYPTO_MAX_ALG_NAME]; 1533 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1534 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1535 unsigned int blocksize; 1536 struct ahash_alg template_ahash; 1537 u32 alg_type; 1538 }; 1539 1540 /* ahash descriptors */ 1541 static struct caam_hash_template driver_hash[] = { 1542 { 1543 .name = "sha1", 1544 .driver_name = "sha1-caam", 1545 .hmac_name = "hmac(sha1)", 1546 .hmac_driver_name = "hmac-sha1-caam", 1547 .blocksize = SHA1_BLOCK_SIZE, 1548 .template_ahash = { 1549 .init = ahash_init, 1550 .update = ahash_update, 1551 .final = ahash_final, 1552 .finup = ahash_finup, 1553 .digest = ahash_digest, 1554 .export = ahash_export, 1555 .import = ahash_import, 1556 .setkey = ahash_setkey, 1557 .halg = { 1558 .digestsize = SHA1_DIGEST_SIZE, 1559 .statesize = sizeof(struct caam_export_state), 1560 }, 1561 }, 1562 .alg_type = OP_ALG_ALGSEL_SHA1, 1563 }, { 1564 .name = "sha224", 1565 .driver_name = "sha224-caam", 1566 .hmac_name = "hmac(sha224)", 1567 .hmac_driver_name = "hmac-sha224-caam", 1568 .blocksize = SHA224_BLOCK_SIZE, 1569 .template_ahash = { 1570 .init = ahash_init, 1571 .update = ahash_update, 1572 .final = ahash_final, 1573 .finup = ahash_finup, 1574 .digest = ahash_digest, 1575 .export = ahash_export, 1576 .import = ahash_import, 1577 .setkey = ahash_setkey, 1578 .halg = { 1579 .digestsize = SHA224_DIGEST_SIZE, 1580 .statesize = sizeof(struct caam_export_state), 1581 }, 1582 }, 1583 .alg_type = OP_ALG_ALGSEL_SHA224, 1584 }, { 1585 .name = "sha256", 1586 .driver_name = "sha256-caam", 1587 .hmac_name = "hmac(sha256)", 1588 .hmac_driver_name = "hmac-sha256-caam", 1589 .blocksize = SHA256_BLOCK_SIZE, 1590 .template_ahash = { 1591 .init = ahash_init, 1592 .update = ahash_update, 1593 .final = ahash_final, 1594 .finup = ahash_finup, 1595 .digest = ahash_digest, 1596 .export = ahash_export, 1597 .import = ahash_import, 1598 .setkey = ahash_setkey, 1599 .halg = { 1600 .digestsize = SHA256_DIGEST_SIZE, 1601 .statesize = sizeof(struct caam_export_state), 1602 }, 1603 }, 1604 .alg_type = OP_ALG_ALGSEL_SHA256, 1605 }, { 1606 .name = "sha384", 1607 .driver_name = "sha384-caam", 1608 .hmac_name = "hmac(sha384)", 1609 .hmac_driver_name = "hmac-sha384-caam", 1610 .blocksize = SHA384_BLOCK_SIZE, 1611 .template_ahash = { 1612 .init = ahash_init, 1613 .update = ahash_update, 1614 .final = ahash_final, 1615 .finup = ahash_finup, 1616 .digest = ahash_digest, 1617 .export = ahash_export, 1618 .import = ahash_import, 1619 .setkey = ahash_setkey, 1620 .halg = { 1621 .digestsize = SHA384_DIGEST_SIZE, 1622 .statesize = sizeof(struct caam_export_state), 1623 }, 1624 }, 1625 .alg_type = OP_ALG_ALGSEL_SHA384, 1626 }, { 1627 .name = "sha512", 1628 .driver_name = "sha512-caam", 1629 .hmac_name = "hmac(sha512)", 1630 .hmac_driver_name = "hmac-sha512-caam", 1631 .blocksize = SHA512_BLOCK_SIZE, 1632 .template_ahash = { 1633 .init = ahash_init, 1634 .update = ahash_update, 1635 .final = ahash_final, 1636 .finup = ahash_finup, 1637 .digest = ahash_digest, 1638 .export = ahash_export, 1639 .import = ahash_import, 1640 .setkey = ahash_setkey, 1641 .halg = { 1642 .digestsize = SHA512_DIGEST_SIZE, 1643 .statesize = sizeof(struct caam_export_state), 1644 }, 1645 }, 1646 .alg_type = OP_ALG_ALGSEL_SHA512, 1647 }, { 1648 .name = "md5", 1649 .driver_name = "md5-caam", 1650 .hmac_name = "hmac(md5)", 1651 .hmac_driver_name = "hmac-md5-caam", 1652 .blocksize = MD5_BLOCK_WORDS * 4, 1653 .template_ahash = { 1654 .init = ahash_init, 1655 .update = ahash_update, 1656 .final = ahash_final, 1657 .finup = ahash_finup, 1658 .digest = ahash_digest, 1659 .export = ahash_export, 1660 .import = ahash_import, 1661 .setkey = ahash_setkey, 1662 .halg = { 1663 .digestsize = MD5_DIGEST_SIZE, 1664 .statesize = sizeof(struct caam_export_state), 1665 }, 1666 }, 1667 .alg_type = OP_ALG_ALGSEL_MD5, 1668 }, { 1669 .hmac_name = "xcbc(aes)", 1670 .hmac_driver_name = "xcbc-aes-caam", 1671 .blocksize = AES_BLOCK_SIZE, 1672 .template_ahash = { 1673 .init = ahash_init, 1674 .update = ahash_update, 1675 .final = ahash_final, 1676 .finup = ahash_finup, 1677 .digest = ahash_digest, 1678 .export = ahash_export, 1679 .import = ahash_import, 1680 .setkey = axcbc_setkey, 1681 .halg = { 1682 .digestsize = AES_BLOCK_SIZE, 1683 .statesize = sizeof(struct caam_export_state), 1684 }, 1685 }, 1686 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, 1687 }, { 1688 .hmac_name = "cmac(aes)", 1689 .hmac_driver_name = "cmac-aes-caam", 1690 .blocksize = AES_BLOCK_SIZE, 1691 .template_ahash = { 1692 .init = ahash_init, 1693 .update = ahash_update, 1694 .final = ahash_final, 1695 .finup = ahash_finup, 1696 .digest = ahash_digest, 1697 .export = ahash_export, 1698 .import = ahash_import, 1699 .setkey = acmac_setkey, 1700 .halg = { 1701 .digestsize = AES_BLOCK_SIZE, 1702 .statesize = sizeof(struct caam_export_state), 1703 }, 1704 }, 1705 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, 1706 }, 1707 }; 1708 1709 struct caam_hash_alg { 1710 struct list_head entry; 1711 int alg_type; 1712 struct ahash_alg ahash_alg; 1713 }; 1714 1715 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1716 { 1717 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1718 struct crypto_alg *base = tfm->__crt_alg; 1719 struct hash_alg_common *halg = 1720 container_of(base, struct hash_alg_common, base); 1721 struct ahash_alg *alg = 1722 container_of(halg, struct ahash_alg, halg); 1723 struct caam_hash_alg *caam_hash = 1724 container_of(alg, struct caam_hash_alg, ahash_alg); 1725 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1726 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1727 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1728 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1729 HASH_MSG_LEN + 32, 1730 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1731 HASH_MSG_LEN + 64, 1732 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1733 dma_addr_t dma_addr; 1734 struct caam_drv_private *priv; 1735 1736 /* 1737 * Get a Job ring from Job Ring driver to ensure in-order 1738 * crypto request processing per tfm 1739 */ 1740 ctx->jrdev = caam_jr_alloc(); 1741 if (IS_ERR(ctx->jrdev)) { 1742 pr_err("Job Ring Device allocation for transform failed\n"); 1743 return PTR_ERR(ctx->jrdev); 1744 } 1745 1746 priv = dev_get_drvdata(ctx->jrdev->parent); 1747 1748 if (is_xcbc_aes(caam_hash->alg_type)) { 1749 ctx->dir = DMA_TO_DEVICE; 1750 ctx->key_dir = DMA_BIDIRECTIONAL; 1751 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1752 ctx->ctx_len = 48; 1753 } else if (is_cmac_aes(caam_hash->alg_type)) { 1754 ctx->dir = DMA_TO_DEVICE; 1755 ctx->key_dir = DMA_NONE; 1756 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1757 ctx->ctx_len = 32; 1758 } else { 1759 if (priv->era >= 6) { 1760 ctx->dir = DMA_BIDIRECTIONAL; 1761 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE; 1762 } else { 1763 ctx->dir = DMA_TO_DEVICE; 1764 ctx->key_dir = DMA_NONE; 1765 } 1766 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1767 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1768 OP_ALG_ALGSEL_SUBMASK) >> 1769 OP_ALG_ALGSEL_SHIFT]; 1770 } 1771 1772 if (ctx->key_dir != DMA_NONE) { 1773 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, 1774 ARRAY_SIZE(ctx->key), 1775 ctx->key_dir, 1776 DMA_ATTR_SKIP_CPU_SYNC); 1777 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { 1778 dev_err(ctx->jrdev, "unable to map key\n"); 1779 caam_jr_free(ctx->jrdev); 1780 return -ENOMEM; 1781 } 1782 } 1783 1784 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1785 offsetof(struct caam_hash_ctx, key), 1786 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1787 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1788 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1789 1790 if (ctx->key_dir != DMA_NONE) 1791 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, 1792 ARRAY_SIZE(ctx->key), 1793 ctx->key_dir, 1794 DMA_ATTR_SKIP_CPU_SYNC); 1795 1796 caam_jr_free(ctx->jrdev); 1797 return -ENOMEM; 1798 } 1799 1800 ctx->sh_desc_update_dma = dma_addr; 1801 ctx->sh_desc_update_first_dma = dma_addr + 1802 offsetof(struct caam_hash_ctx, 1803 sh_desc_update_first); 1804 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1805 sh_desc_fin); 1806 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1807 sh_desc_digest); 1808 1809 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1810 sizeof(struct caam_hash_state)); 1811 1812 /* 1813 * For keyed hash algorithms shared descriptors 1814 * will be created later in setkey() callback 1815 */ 1816 return alg->setkey ? 0 : ahash_set_sh_desc(ahash); 1817 } 1818 1819 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1820 { 1821 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1822 1823 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1824 offsetof(struct caam_hash_ctx, key), 1825 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1826 if (ctx->key_dir != DMA_NONE) 1827 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, 1828 ARRAY_SIZE(ctx->key), ctx->key_dir, 1829 DMA_ATTR_SKIP_CPU_SYNC); 1830 caam_jr_free(ctx->jrdev); 1831 } 1832 1833 void caam_algapi_hash_exit(void) 1834 { 1835 struct caam_hash_alg *t_alg, *n; 1836 1837 if (!hash_list.next) 1838 return; 1839 1840 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1841 crypto_unregister_ahash(&t_alg->ahash_alg); 1842 list_del(&t_alg->entry); 1843 kfree(t_alg); 1844 } 1845 } 1846 1847 static struct caam_hash_alg * 1848 caam_hash_alloc(struct caam_hash_template *template, 1849 bool keyed) 1850 { 1851 struct caam_hash_alg *t_alg; 1852 struct ahash_alg *halg; 1853 struct crypto_alg *alg; 1854 1855 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1856 if (!t_alg) { 1857 pr_err("failed to allocate t_alg\n"); 1858 return ERR_PTR(-ENOMEM); 1859 } 1860 1861 t_alg->ahash_alg = template->template_ahash; 1862 halg = &t_alg->ahash_alg; 1863 alg = &halg->halg.base; 1864 1865 if (keyed) { 1866 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1867 template->hmac_name); 1868 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1869 template->hmac_driver_name); 1870 } else { 1871 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1872 template->name); 1873 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1874 template->driver_name); 1875 t_alg->ahash_alg.setkey = NULL; 1876 } 1877 alg->cra_module = THIS_MODULE; 1878 alg->cra_init = caam_hash_cra_init; 1879 alg->cra_exit = caam_hash_cra_exit; 1880 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1881 alg->cra_priority = CAAM_CRA_PRIORITY; 1882 alg->cra_blocksize = template->blocksize; 1883 alg->cra_alignmask = 0; 1884 alg->cra_flags = CRYPTO_ALG_ASYNC; 1885 1886 t_alg->alg_type = template->alg_type; 1887 1888 return t_alg; 1889 } 1890 1891 int caam_algapi_hash_init(struct device *ctrldev) 1892 { 1893 int i = 0, err = 0; 1894 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 1895 unsigned int md_limit = SHA512_DIGEST_SIZE; 1896 u32 md_inst, md_vid; 1897 1898 /* 1899 * Register crypto algorithms the device supports. First, identify 1900 * presence and attributes of MD block. 1901 */ 1902 if (priv->era < 10) { 1903 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & 1904 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 1905 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 1906 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 1907 } else { 1908 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 1909 1910 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 1911 md_inst = mdha & CHA_VER_NUM_MASK; 1912 } 1913 1914 /* 1915 * Skip registration of any hashing algorithms if MD block 1916 * is not present. 1917 */ 1918 if (!md_inst) 1919 return 0; 1920 1921 /* Limit digest size based on LP256 */ 1922 if (md_vid == CHA_VER_VID_MD_LP256) 1923 md_limit = SHA256_DIGEST_SIZE; 1924 1925 INIT_LIST_HEAD(&hash_list); 1926 1927 /* register crypto algorithms the device supports */ 1928 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1929 struct caam_hash_alg *t_alg; 1930 struct caam_hash_template *alg = driver_hash + i; 1931 1932 /* If MD size is not supported by device, skip registration */ 1933 if (is_mdha(alg->alg_type) && 1934 alg->template_ahash.halg.digestsize > md_limit) 1935 continue; 1936 1937 /* register hmac version */ 1938 t_alg = caam_hash_alloc(alg, true); 1939 if (IS_ERR(t_alg)) { 1940 err = PTR_ERR(t_alg); 1941 pr_warn("%s alg allocation failed\n", 1942 alg->hmac_driver_name); 1943 continue; 1944 } 1945 1946 err = crypto_register_ahash(&t_alg->ahash_alg); 1947 if (err) { 1948 pr_warn("%s alg registration failed: %d\n", 1949 t_alg->ahash_alg.halg.base.cra_driver_name, 1950 err); 1951 kfree(t_alg); 1952 } else 1953 list_add_tail(&t_alg->entry, &hash_list); 1954 1955 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) 1956 continue; 1957 1958 /* register unkeyed version */ 1959 t_alg = caam_hash_alloc(alg, false); 1960 if (IS_ERR(t_alg)) { 1961 err = PTR_ERR(t_alg); 1962 pr_warn("%s alg allocation failed\n", alg->driver_name); 1963 continue; 1964 } 1965 1966 err = crypto_register_ahash(&t_alg->ahash_alg); 1967 if (err) { 1968 pr_warn("%s alg registration failed: %d\n", 1969 t_alg->ahash_alg.halg.base.cra_driver_name, 1970 err); 1971 kfree(t_alg); 1972 } else 1973 list_add_tail(&t_alg->entry, &hash_list); 1974 } 1975 1976 return err; 1977 } 1978