1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * caam - Freescale FSL CAAM support for ahash functions of crypto API 4 * 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * Copyright 2018 NXP 7 * 8 * Based on caamalg.c crypto API driver. 9 * 10 * relationship of digest job descriptor or first job descriptor after init to 11 * shared descriptors: 12 * 13 * --------------- --------------- 14 * | JobDesc #1 |-------------------->| ShareDesc | 15 * | *(packet 1) | | (hashKey) | 16 * --------------- | (operation) | 17 * --------------- 18 * 19 * relationship of subsequent job descriptors to shared descriptors: 20 * 21 * --------------- --------------- 22 * | JobDesc #2 |-------------------->| ShareDesc | 23 * | *(packet 2) | |------------->| (hashKey) | 24 * --------------- | |-------->| (operation) | 25 * . | | | (load ctx2) | 26 * . | | --------------- 27 * --------------- | | 28 * | JobDesc #3 |------| | 29 * | *(packet 3) | | 30 * --------------- | 31 * . | 32 * . | 33 * --------------- | 34 * | JobDesc #4 |------------ 35 * | *(packet 4) | 36 * --------------- 37 * 38 * The SharedDesc never changes for a connection unless rekeyed, but 39 * each packet will likely be in a different place. So all we need 40 * to know to process the packet is where the input is, where the 41 * output goes, and what context we want to process with. Context is 42 * in the SharedDesc, packet references in the JobDesc. 43 * 44 * So, a job desc looks like: 45 * 46 * --------------------- 47 * | Header | 48 * | ShareDesc Pointer | 49 * | SEQ_OUT_PTR | 50 * | (output buffer) | 51 * | (output length) | 52 * | SEQ_IN_PTR | 53 * | (input buffer) | 54 * | (input length) | 55 * --------------------- 56 */ 57 58 #include "compat.h" 59 60 #include "regs.h" 61 #include "intern.h" 62 #include "desc_constr.h" 63 #include "jr.h" 64 #include "error.h" 65 #include "sg_sw_sec4.h" 66 #include "key_gen.h" 67 #include "caamhash_desc.h" 68 69 #define CAAM_CRA_PRIORITY 3000 70 71 /* max hash key is max split key size */ 72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 73 74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 76 77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 78 CAAM_MAX_HASH_KEY_SIZE) 79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 80 81 /* caam context sizes for hashes: running digest + 8 */ 82 #define HASH_MSG_LEN 8 83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 84 85 #ifdef DEBUG 86 /* for print_hex_dumps with line references */ 87 #define debug(format, arg...) printk(format, arg) 88 #else 89 #define debug(format, arg...) 90 #endif 91 92 93 static struct list_head hash_list; 94 95 /* ahash per-session context */ 96 struct caam_hash_ctx { 97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 101 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 102 dma_addr_t sh_desc_update_first_dma; 103 dma_addr_t sh_desc_fin_dma; 104 dma_addr_t sh_desc_digest_dma; 105 enum dma_data_direction dir; 106 struct device *jrdev; 107 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 108 int ctx_len; 109 struct alginfo adata; 110 }; 111 112 /* ahash state */ 113 struct caam_hash_state { 114 dma_addr_t buf_dma; 115 dma_addr_t ctx_dma; 116 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 117 int buflen_0; 118 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 119 int buflen_1; 120 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 121 int (*update)(struct ahash_request *req); 122 int (*final)(struct ahash_request *req); 123 int (*finup)(struct ahash_request *req); 124 int current_buf; 125 }; 126 127 struct caam_export_state { 128 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 129 u8 caam_ctx[MAX_CTX_LEN]; 130 int buflen; 131 int (*update)(struct ahash_request *req); 132 int (*final)(struct ahash_request *req); 133 int (*finup)(struct ahash_request *req); 134 }; 135 136 static inline void switch_buf(struct caam_hash_state *state) 137 { 138 state->current_buf ^= 1; 139 } 140 141 static inline u8 *current_buf(struct caam_hash_state *state) 142 { 143 return state->current_buf ? state->buf_1 : state->buf_0; 144 } 145 146 static inline u8 *alt_buf(struct caam_hash_state *state) 147 { 148 return state->current_buf ? state->buf_0 : state->buf_1; 149 } 150 151 static inline int *current_buflen(struct caam_hash_state *state) 152 { 153 return state->current_buf ? &state->buflen_1 : &state->buflen_0; 154 } 155 156 static inline int *alt_buflen(struct caam_hash_state *state) 157 { 158 return state->current_buf ? &state->buflen_0 : &state->buflen_1; 159 } 160 161 /* Common job descriptor seq in/out ptr routines */ 162 163 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 164 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 165 struct caam_hash_state *state, 166 int ctx_len) 167 { 168 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 169 ctx_len, DMA_FROM_DEVICE); 170 if (dma_mapping_error(jrdev, state->ctx_dma)) { 171 dev_err(jrdev, "unable to map ctx\n"); 172 state->ctx_dma = 0; 173 return -ENOMEM; 174 } 175 176 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 177 178 return 0; 179 } 180 181 /* Map req->result, and append seq_out_ptr command that points to it */ 182 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 183 u8 *result, int digestsize) 184 { 185 dma_addr_t dst_dma; 186 187 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 188 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 189 190 return dst_dma; 191 } 192 193 /* Map current buffer in state (if length > 0) and put it in link table */ 194 static inline int buf_map_to_sec4_sg(struct device *jrdev, 195 struct sec4_sg_entry *sec4_sg, 196 struct caam_hash_state *state) 197 { 198 int buflen = *current_buflen(state); 199 200 if (!buflen) 201 return 0; 202 203 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, 204 DMA_TO_DEVICE); 205 if (dma_mapping_error(jrdev, state->buf_dma)) { 206 dev_err(jrdev, "unable to map buf\n"); 207 state->buf_dma = 0; 208 return -ENOMEM; 209 } 210 211 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 212 213 return 0; 214 } 215 216 /* Map state->caam_ctx, and add it to link table */ 217 static inline int ctx_map_to_sec4_sg(struct device *jrdev, 218 struct caam_hash_state *state, int ctx_len, 219 struct sec4_sg_entry *sec4_sg, u32 flag) 220 { 221 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 222 if (dma_mapping_error(jrdev, state->ctx_dma)) { 223 dev_err(jrdev, "unable to map ctx\n"); 224 state->ctx_dma = 0; 225 return -ENOMEM; 226 } 227 228 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 229 230 return 0; 231 } 232 233 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 234 { 235 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 236 int digestsize = crypto_ahash_digestsize(ahash); 237 struct device *jrdev = ctx->jrdev; 238 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 239 u32 *desc; 240 241 ctx->adata.key_virt = ctx->key; 242 243 /* ahash_update shared descriptor */ 244 desc = ctx->sh_desc_update; 245 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 246 ctx->ctx_len, true, ctrlpriv->era); 247 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 248 desc_bytes(desc), ctx->dir); 249 #ifdef DEBUG 250 print_hex_dump(KERN_ERR, 251 "ahash update shdesc@"__stringify(__LINE__)": ", 252 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 253 #endif 254 255 /* ahash_update_first shared descriptor */ 256 desc = ctx->sh_desc_update_first; 257 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 258 ctx->ctx_len, false, ctrlpriv->era); 259 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 260 desc_bytes(desc), ctx->dir); 261 #ifdef DEBUG 262 print_hex_dump(KERN_ERR, 263 "ahash update first shdesc@"__stringify(__LINE__)": ", 264 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 265 #endif 266 267 /* ahash_final shared descriptor */ 268 desc = ctx->sh_desc_fin; 269 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 270 ctx->ctx_len, true, ctrlpriv->era); 271 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 272 desc_bytes(desc), ctx->dir); 273 #ifdef DEBUG 274 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 275 DUMP_PREFIX_ADDRESS, 16, 4, desc, 276 desc_bytes(desc), 1); 277 #endif 278 279 /* ahash_digest shared descriptor */ 280 desc = ctx->sh_desc_digest; 281 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 282 ctx->ctx_len, false, ctrlpriv->era); 283 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 284 desc_bytes(desc), ctx->dir); 285 #ifdef DEBUG 286 print_hex_dump(KERN_ERR, 287 "ahash digest shdesc@"__stringify(__LINE__)": ", 288 DUMP_PREFIX_ADDRESS, 16, 4, desc, 289 desc_bytes(desc), 1); 290 #endif 291 292 return 0; 293 } 294 295 /* Digest hash size if it is too large */ 296 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 297 u32 *keylen, u8 *key_out, u32 digestsize) 298 { 299 struct device *jrdev = ctx->jrdev; 300 u32 *desc; 301 struct split_key_result result; 302 dma_addr_t src_dma, dst_dma; 303 int ret; 304 305 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 306 if (!desc) { 307 dev_err(jrdev, "unable to allocate key input memory\n"); 308 return -ENOMEM; 309 } 310 311 init_job_desc(desc, 0); 312 313 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 314 DMA_TO_DEVICE); 315 if (dma_mapping_error(jrdev, src_dma)) { 316 dev_err(jrdev, "unable to map key input memory\n"); 317 kfree(desc); 318 return -ENOMEM; 319 } 320 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 321 DMA_FROM_DEVICE); 322 if (dma_mapping_error(jrdev, dst_dma)) { 323 dev_err(jrdev, "unable to map key output memory\n"); 324 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 325 kfree(desc); 326 return -ENOMEM; 327 } 328 329 /* Job descriptor to perform unkeyed hash on key_in */ 330 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 331 OP_ALG_AS_INITFINAL); 332 append_seq_in_ptr(desc, src_dma, *keylen, 0); 333 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 334 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 335 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 336 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 337 LDST_SRCDST_BYTE_CONTEXT); 338 339 #ifdef DEBUG 340 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 341 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 342 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 343 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 344 #endif 345 346 result.err = 0; 347 init_completion(&result.completion); 348 349 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 350 if (!ret) { 351 /* in progress */ 352 wait_for_completion(&result.completion); 353 ret = result.err; 354 #ifdef DEBUG 355 print_hex_dump(KERN_ERR, 356 "digested key@"__stringify(__LINE__)": ", 357 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 358 digestsize, 1); 359 #endif 360 } 361 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 362 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 363 364 *keylen = digestsize; 365 366 kfree(desc); 367 368 return ret; 369 } 370 371 static int ahash_setkey(struct crypto_ahash *ahash, 372 const u8 *key, unsigned int keylen) 373 { 374 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 375 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 376 int digestsize = crypto_ahash_digestsize(ahash); 377 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 378 int ret; 379 u8 *hashed_key = NULL; 380 381 #ifdef DEBUG 382 printk(KERN_ERR "keylen %d\n", keylen); 383 #endif 384 385 if (keylen > blocksize) { 386 hashed_key = kmalloc_array(digestsize, 387 sizeof(*hashed_key), 388 GFP_KERNEL | GFP_DMA); 389 if (!hashed_key) 390 return -ENOMEM; 391 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 392 digestsize); 393 if (ret) 394 goto bad_free_key; 395 key = hashed_key; 396 } 397 398 /* 399 * If DKP is supported, use it in the shared descriptor to generate 400 * the split key. 401 */ 402 if (ctrlpriv->era >= 6) { 403 ctx->adata.key_inline = true; 404 ctx->adata.keylen = keylen; 405 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 406 OP_ALG_ALGSEL_MASK); 407 408 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 409 goto bad_free_key; 410 411 memcpy(ctx->key, key, keylen); 412 } else { 413 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 414 keylen, CAAM_MAX_HASH_KEY_SIZE); 415 if (ret) 416 goto bad_free_key; 417 } 418 419 kfree(hashed_key); 420 return ahash_set_sh_desc(ahash); 421 bad_free_key: 422 kfree(hashed_key); 423 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 424 return -EINVAL; 425 } 426 427 /* 428 * ahash_edesc - s/w-extended ahash descriptor 429 * @dst_dma: physical mapped address of req->result 430 * @sec4_sg_dma: physical mapped address of h/w link table 431 * @src_nents: number of segments in input scatterlist 432 * @sec4_sg_bytes: length of dma mapped sec4_sg space 433 * @hw_desc: the h/w job descriptor followed by any referenced link tables 434 * @sec4_sg: h/w link table 435 */ 436 struct ahash_edesc { 437 dma_addr_t dst_dma; 438 dma_addr_t sec4_sg_dma; 439 int src_nents; 440 int sec4_sg_bytes; 441 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 442 struct sec4_sg_entry sec4_sg[0]; 443 }; 444 445 static inline void ahash_unmap(struct device *dev, 446 struct ahash_edesc *edesc, 447 struct ahash_request *req, int dst_len) 448 { 449 struct caam_hash_state *state = ahash_request_ctx(req); 450 451 if (edesc->src_nents) 452 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 453 if (edesc->dst_dma) 454 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 455 456 if (edesc->sec4_sg_bytes) 457 dma_unmap_single(dev, edesc->sec4_sg_dma, 458 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 459 460 if (state->buf_dma) { 461 dma_unmap_single(dev, state->buf_dma, *current_buflen(state), 462 DMA_TO_DEVICE); 463 state->buf_dma = 0; 464 } 465 } 466 467 static inline void ahash_unmap_ctx(struct device *dev, 468 struct ahash_edesc *edesc, 469 struct ahash_request *req, int dst_len, u32 flag) 470 { 471 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 472 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 473 struct caam_hash_state *state = ahash_request_ctx(req); 474 475 if (state->ctx_dma) { 476 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 477 state->ctx_dma = 0; 478 } 479 ahash_unmap(dev, edesc, req, dst_len); 480 } 481 482 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 483 void *context) 484 { 485 struct ahash_request *req = context; 486 struct ahash_edesc *edesc; 487 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 488 int digestsize = crypto_ahash_digestsize(ahash); 489 #ifdef DEBUG 490 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 491 struct caam_hash_state *state = ahash_request_ctx(req); 492 493 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 494 #endif 495 496 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 497 if (err) 498 caam_jr_strstatus(jrdev, err); 499 500 ahash_unmap(jrdev, edesc, req, digestsize); 501 kfree(edesc); 502 503 #ifdef DEBUG 504 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 505 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 506 ctx->ctx_len, 1); 507 if (req->result) 508 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 509 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 510 digestsize, 1); 511 #endif 512 513 req->base.complete(&req->base, err); 514 } 515 516 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 517 void *context) 518 { 519 struct ahash_request *req = context; 520 struct ahash_edesc *edesc; 521 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 522 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 523 struct caam_hash_state *state = ahash_request_ctx(req); 524 #ifdef DEBUG 525 int digestsize = crypto_ahash_digestsize(ahash); 526 527 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 528 #endif 529 530 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 531 if (err) 532 caam_jr_strstatus(jrdev, err); 533 534 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 535 switch_buf(state); 536 kfree(edesc); 537 538 #ifdef DEBUG 539 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 540 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 541 ctx->ctx_len, 1); 542 if (req->result) 543 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 544 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 545 digestsize, 1); 546 #endif 547 548 req->base.complete(&req->base, err); 549 } 550 551 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 552 void *context) 553 { 554 struct ahash_request *req = context; 555 struct ahash_edesc *edesc; 556 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 557 int digestsize = crypto_ahash_digestsize(ahash); 558 #ifdef DEBUG 559 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 560 struct caam_hash_state *state = ahash_request_ctx(req); 561 562 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 563 #endif 564 565 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 566 if (err) 567 caam_jr_strstatus(jrdev, err); 568 569 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); 570 kfree(edesc); 571 572 #ifdef DEBUG 573 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 574 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 575 ctx->ctx_len, 1); 576 if (req->result) 577 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 578 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 579 digestsize, 1); 580 #endif 581 582 req->base.complete(&req->base, err); 583 } 584 585 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 586 void *context) 587 { 588 struct ahash_request *req = context; 589 struct ahash_edesc *edesc; 590 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 591 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 592 struct caam_hash_state *state = ahash_request_ctx(req); 593 #ifdef DEBUG 594 int digestsize = crypto_ahash_digestsize(ahash); 595 596 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 597 #endif 598 599 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 600 if (err) 601 caam_jr_strstatus(jrdev, err); 602 603 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 604 switch_buf(state); 605 kfree(edesc); 606 607 #ifdef DEBUG 608 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 609 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 610 ctx->ctx_len, 1); 611 if (req->result) 612 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 613 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 614 digestsize, 1); 615 #endif 616 617 req->base.complete(&req->base, err); 618 } 619 620 /* 621 * Allocate an enhanced descriptor, which contains the hardware descriptor 622 * and space for hardware scatter table containing sg_num entries. 623 */ 624 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 625 int sg_num, u32 *sh_desc, 626 dma_addr_t sh_desc_dma, 627 gfp_t flags) 628 { 629 struct ahash_edesc *edesc; 630 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 631 632 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 633 if (!edesc) { 634 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 635 return NULL; 636 } 637 638 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 639 HDR_SHARE_DEFER | HDR_REVERSE); 640 641 return edesc; 642 } 643 644 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 645 struct ahash_edesc *edesc, 646 struct ahash_request *req, int nents, 647 unsigned int first_sg, 648 unsigned int first_bytes, size_t to_hash) 649 { 650 dma_addr_t src_dma; 651 u32 options; 652 653 if (nents > 1 || first_sg) { 654 struct sec4_sg_entry *sg = edesc->sec4_sg; 655 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 656 657 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 658 659 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 660 if (dma_mapping_error(ctx->jrdev, src_dma)) { 661 dev_err(ctx->jrdev, "unable to map S/G table\n"); 662 return -ENOMEM; 663 } 664 665 edesc->sec4_sg_bytes = sgsize; 666 edesc->sec4_sg_dma = src_dma; 667 options = LDST_SGF; 668 } else { 669 src_dma = sg_dma_address(req->src); 670 options = 0; 671 } 672 673 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 674 options); 675 676 return 0; 677 } 678 679 /* submit update job descriptor */ 680 static int ahash_update_ctx(struct ahash_request *req) 681 { 682 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 683 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 684 struct caam_hash_state *state = ahash_request_ctx(req); 685 struct device *jrdev = ctx->jrdev; 686 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 687 GFP_KERNEL : GFP_ATOMIC; 688 u8 *buf = current_buf(state); 689 int *buflen = current_buflen(state); 690 u8 *next_buf = alt_buf(state); 691 int *next_buflen = alt_buflen(state), last_buflen; 692 int in_len = *buflen + req->nbytes, to_hash; 693 u32 *desc; 694 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 695 struct ahash_edesc *edesc; 696 int ret = 0; 697 698 last_buflen = *next_buflen; 699 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 700 to_hash = in_len - *next_buflen; 701 702 if (to_hash) { 703 src_nents = sg_nents_for_len(req->src, 704 req->nbytes - (*next_buflen)); 705 if (src_nents < 0) { 706 dev_err(jrdev, "Invalid number of src SG.\n"); 707 return src_nents; 708 } 709 710 if (src_nents) { 711 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 712 DMA_TO_DEVICE); 713 if (!mapped_nents) { 714 dev_err(jrdev, "unable to DMA map source\n"); 715 return -ENOMEM; 716 } 717 } else { 718 mapped_nents = 0; 719 } 720 721 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 722 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 723 sizeof(struct sec4_sg_entry); 724 725 /* 726 * allocate space for base edesc and hw desc commands, 727 * link tables 728 */ 729 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 730 ctx->sh_desc_update, 731 ctx->sh_desc_update_dma, flags); 732 if (!edesc) { 733 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 734 return -ENOMEM; 735 } 736 737 edesc->src_nents = src_nents; 738 edesc->sec4_sg_bytes = sec4_sg_bytes; 739 740 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 741 edesc->sec4_sg, DMA_BIDIRECTIONAL); 742 if (ret) 743 goto unmap_ctx; 744 745 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 746 if (ret) 747 goto unmap_ctx; 748 749 if (mapped_nents) { 750 sg_to_sec4_sg_last(req->src, mapped_nents, 751 edesc->sec4_sg + sec4_sg_src_index, 752 0); 753 if (*next_buflen) 754 scatterwalk_map_and_copy(next_buf, req->src, 755 to_hash - *buflen, 756 *next_buflen, 0); 757 } else { 758 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 759 1); 760 } 761 762 desc = edesc->hw_desc; 763 764 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 765 sec4_sg_bytes, 766 DMA_TO_DEVICE); 767 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 768 dev_err(jrdev, "unable to map S/G table\n"); 769 ret = -ENOMEM; 770 goto unmap_ctx; 771 } 772 773 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 774 to_hash, LDST_SGF); 775 776 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 777 778 #ifdef DEBUG 779 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 780 DUMP_PREFIX_ADDRESS, 16, 4, desc, 781 desc_bytes(desc), 1); 782 #endif 783 784 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 785 if (ret) 786 goto unmap_ctx; 787 788 ret = -EINPROGRESS; 789 } else if (*next_buflen) { 790 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 791 req->nbytes, 0); 792 *buflen = *next_buflen; 793 *next_buflen = last_buflen; 794 } 795 #ifdef DEBUG 796 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 797 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 798 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 799 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 800 *next_buflen, 1); 801 #endif 802 803 return ret; 804 unmap_ctx: 805 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 806 kfree(edesc); 807 return ret; 808 } 809 810 static int ahash_final_ctx(struct ahash_request *req) 811 { 812 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 813 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 814 struct caam_hash_state *state = ahash_request_ctx(req); 815 struct device *jrdev = ctx->jrdev; 816 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 817 GFP_KERNEL : GFP_ATOMIC; 818 int buflen = *current_buflen(state); 819 u32 *desc; 820 int sec4_sg_bytes, sec4_sg_src_index; 821 int digestsize = crypto_ahash_digestsize(ahash); 822 struct ahash_edesc *edesc; 823 int ret; 824 825 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 826 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 827 828 /* allocate space for base edesc and hw desc commands, link tables */ 829 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 830 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 831 flags); 832 if (!edesc) 833 return -ENOMEM; 834 835 desc = edesc->hw_desc; 836 837 edesc->sec4_sg_bytes = sec4_sg_bytes; 838 839 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 840 edesc->sec4_sg, DMA_TO_DEVICE); 841 if (ret) 842 goto unmap_ctx; 843 844 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 845 if (ret) 846 goto unmap_ctx; 847 848 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 849 850 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 851 sec4_sg_bytes, DMA_TO_DEVICE); 852 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 853 dev_err(jrdev, "unable to map S/G table\n"); 854 ret = -ENOMEM; 855 goto unmap_ctx; 856 } 857 858 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 859 LDST_SGF); 860 861 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 862 digestsize); 863 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 864 dev_err(jrdev, "unable to map dst\n"); 865 ret = -ENOMEM; 866 goto unmap_ctx; 867 } 868 869 #ifdef DEBUG 870 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 871 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 872 #endif 873 874 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 875 if (ret) 876 goto unmap_ctx; 877 878 return -EINPROGRESS; 879 unmap_ctx: 880 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 881 kfree(edesc); 882 return ret; 883 } 884 885 static int ahash_finup_ctx(struct ahash_request *req) 886 { 887 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 888 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 889 struct caam_hash_state *state = ahash_request_ctx(req); 890 struct device *jrdev = ctx->jrdev; 891 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 892 GFP_KERNEL : GFP_ATOMIC; 893 int buflen = *current_buflen(state); 894 u32 *desc; 895 int sec4_sg_src_index; 896 int src_nents, mapped_nents; 897 int digestsize = crypto_ahash_digestsize(ahash); 898 struct ahash_edesc *edesc; 899 int ret; 900 901 src_nents = sg_nents_for_len(req->src, req->nbytes); 902 if (src_nents < 0) { 903 dev_err(jrdev, "Invalid number of src SG.\n"); 904 return src_nents; 905 } 906 907 if (src_nents) { 908 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 909 DMA_TO_DEVICE); 910 if (!mapped_nents) { 911 dev_err(jrdev, "unable to DMA map source\n"); 912 return -ENOMEM; 913 } 914 } else { 915 mapped_nents = 0; 916 } 917 918 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 919 920 /* allocate space for base edesc and hw desc commands, link tables */ 921 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 922 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 923 flags); 924 if (!edesc) { 925 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 926 return -ENOMEM; 927 } 928 929 desc = edesc->hw_desc; 930 931 edesc->src_nents = src_nents; 932 933 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 934 edesc->sec4_sg, DMA_TO_DEVICE); 935 if (ret) 936 goto unmap_ctx; 937 938 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 939 if (ret) 940 goto unmap_ctx; 941 942 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 943 sec4_sg_src_index, ctx->ctx_len + buflen, 944 req->nbytes); 945 if (ret) 946 goto unmap_ctx; 947 948 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 949 digestsize); 950 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 951 dev_err(jrdev, "unable to map dst\n"); 952 ret = -ENOMEM; 953 goto unmap_ctx; 954 } 955 956 #ifdef DEBUG 957 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 958 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 959 #endif 960 961 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 962 if (ret) 963 goto unmap_ctx; 964 965 return -EINPROGRESS; 966 unmap_ctx: 967 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 968 kfree(edesc); 969 return ret; 970 } 971 972 static int ahash_digest(struct ahash_request *req) 973 { 974 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 975 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 976 struct caam_hash_state *state = ahash_request_ctx(req); 977 struct device *jrdev = ctx->jrdev; 978 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 979 GFP_KERNEL : GFP_ATOMIC; 980 u32 *desc; 981 int digestsize = crypto_ahash_digestsize(ahash); 982 int src_nents, mapped_nents; 983 struct ahash_edesc *edesc; 984 int ret; 985 986 state->buf_dma = 0; 987 988 src_nents = sg_nents_for_len(req->src, req->nbytes); 989 if (src_nents < 0) { 990 dev_err(jrdev, "Invalid number of src SG.\n"); 991 return src_nents; 992 } 993 994 if (src_nents) { 995 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 996 DMA_TO_DEVICE); 997 if (!mapped_nents) { 998 dev_err(jrdev, "unable to map source for DMA\n"); 999 return -ENOMEM; 1000 } 1001 } else { 1002 mapped_nents = 0; 1003 } 1004 1005 /* allocate space for base edesc and hw desc commands, link tables */ 1006 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1007 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1008 flags); 1009 if (!edesc) { 1010 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1011 return -ENOMEM; 1012 } 1013 1014 edesc->src_nents = src_nents; 1015 1016 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1017 req->nbytes); 1018 if (ret) { 1019 ahash_unmap(jrdev, edesc, req, digestsize); 1020 kfree(edesc); 1021 return ret; 1022 } 1023 1024 desc = edesc->hw_desc; 1025 1026 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1027 digestsize); 1028 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1029 dev_err(jrdev, "unable to map dst\n"); 1030 ahash_unmap(jrdev, edesc, req, digestsize); 1031 kfree(edesc); 1032 return -ENOMEM; 1033 } 1034 1035 #ifdef DEBUG 1036 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1037 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1038 #endif 1039 1040 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1041 if (!ret) { 1042 ret = -EINPROGRESS; 1043 } else { 1044 ahash_unmap(jrdev, edesc, req, digestsize); 1045 kfree(edesc); 1046 } 1047 1048 return ret; 1049 } 1050 1051 /* submit ahash final if it the first job descriptor */ 1052 static int ahash_final_no_ctx(struct ahash_request *req) 1053 { 1054 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1055 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1056 struct caam_hash_state *state = ahash_request_ctx(req); 1057 struct device *jrdev = ctx->jrdev; 1058 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1059 GFP_KERNEL : GFP_ATOMIC; 1060 u8 *buf = current_buf(state); 1061 int buflen = *current_buflen(state); 1062 u32 *desc; 1063 int digestsize = crypto_ahash_digestsize(ahash); 1064 struct ahash_edesc *edesc; 1065 int ret; 1066 1067 /* allocate space for base edesc and hw desc commands, link tables */ 1068 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1069 ctx->sh_desc_digest_dma, flags); 1070 if (!edesc) 1071 return -ENOMEM; 1072 1073 desc = edesc->hw_desc; 1074 1075 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1076 if (dma_mapping_error(jrdev, state->buf_dma)) { 1077 dev_err(jrdev, "unable to map src\n"); 1078 goto unmap; 1079 } 1080 1081 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1082 1083 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1084 digestsize); 1085 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1086 dev_err(jrdev, "unable to map dst\n"); 1087 goto unmap; 1088 } 1089 1090 #ifdef DEBUG 1091 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1092 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1093 #endif 1094 1095 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1096 if (!ret) { 1097 ret = -EINPROGRESS; 1098 } else { 1099 ahash_unmap(jrdev, edesc, req, digestsize); 1100 kfree(edesc); 1101 } 1102 1103 return ret; 1104 unmap: 1105 ahash_unmap(jrdev, edesc, req, digestsize); 1106 kfree(edesc); 1107 return -ENOMEM; 1108 1109 } 1110 1111 /* submit ahash update if it the first job descriptor after update */ 1112 static int ahash_update_no_ctx(struct ahash_request *req) 1113 { 1114 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1115 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1116 struct caam_hash_state *state = ahash_request_ctx(req); 1117 struct device *jrdev = ctx->jrdev; 1118 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1119 GFP_KERNEL : GFP_ATOMIC; 1120 u8 *buf = current_buf(state); 1121 int *buflen = current_buflen(state); 1122 u8 *next_buf = alt_buf(state); 1123 int *next_buflen = alt_buflen(state); 1124 int in_len = *buflen + req->nbytes, to_hash; 1125 int sec4_sg_bytes, src_nents, mapped_nents; 1126 struct ahash_edesc *edesc; 1127 u32 *desc; 1128 int ret = 0; 1129 1130 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1131 to_hash = in_len - *next_buflen; 1132 1133 if (to_hash) { 1134 src_nents = sg_nents_for_len(req->src, 1135 req->nbytes - *next_buflen); 1136 if (src_nents < 0) { 1137 dev_err(jrdev, "Invalid number of src SG.\n"); 1138 return src_nents; 1139 } 1140 1141 if (src_nents) { 1142 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1143 DMA_TO_DEVICE); 1144 if (!mapped_nents) { 1145 dev_err(jrdev, "unable to DMA map source\n"); 1146 return -ENOMEM; 1147 } 1148 } else { 1149 mapped_nents = 0; 1150 } 1151 1152 sec4_sg_bytes = (1 + mapped_nents) * 1153 sizeof(struct sec4_sg_entry); 1154 1155 /* 1156 * allocate space for base edesc and hw desc commands, 1157 * link tables 1158 */ 1159 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1160 ctx->sh_desc_update_first, 1161 ctx->sh_desc_update_first_dma, 1162 flags); 1163 if (!edesc) { 1164 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1165 return -ENOMEM; 1166 } 1167 1168 edesc->src_nents = src_nents; 1169 edesc->sec4_sg_bytes = sec4_sg_bytes; 1170 1171 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1172 if (ret) 1173 goto unmap_ctx; 1174 1175 sg_to_sec4_sg_last(req->src, mapped_nents, 1176 edesc->sec4_sg + 1, 0); 1177 1178 if (*next_buflen) { 1179 scatterwalk_map_and_copy(next_buf, req->src, 1180 to_hash - *buflen, 1181 *next_buflen, 0); 1182 } 1183 1184 desc = edesc->hw_desc; 1185 1186 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1187 sec4_sg_bytes, 1188 DMA_TO_DEVICE); 1189 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1190 dev_err(jrdev, "unable to map S/G table\n"); 1191 ret = -ENOMEM; 1192 goto unmap_ctx; 1193 } 1194 1195 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1196 1197 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1198 if (ret) 1199 goto unmap_ctx; 1200 1201 #ifdef DEBUG 1202 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1203 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1204 desc_bytes(desc), 1); 1205 #endif 1206 1207 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1208 if (ret) 1209 goto unmap_ctx; 1210 1211 ret = -EINPROGRESS; 1212 state->update = ahash_update_ctx; 1213 state->finup = ahash_finup_ctx; 1214 state->final = ahash_final_ctx; 1215 } else if (*next_buflen) { 1216 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1217 req->nbytes, 0); 1218 *buflen = *next_buflen; 1219 *next_buflen = 0; 1220 } 1221 #ifdef DEBUG 1222 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1223 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1224 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1225 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1226 *next_buflen, 1); 1227 #endif 1228 1229 return ret; 1230 unmap_ctx: 1231 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1232 kfree(edesc); 1233 return ret; 1234 } 1235 1236 /* submit ahash finup if it the first job descriptor after update */ 1237 static int ahash_finup_no_ctx(struct ahash_request *req) 1238 { 1239 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1240 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1241 struct caam_hash_state *state = ahash_request_ctx(req); 1242 struct device *jrdev = ctx->jrdev; 1243 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1244 GFP_KERNEL : GFP_ATOMIC; 1245 int buflen = *current_buflen(state); 1246 u32 *desc; 1247 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1248 int digestsize = crypto_ahash_digestsize(ahash); 1249 struct ahash_edesc *edesc; 1250 int ret; 1251 1252 src_nents = sg_nents_for_len(req->src, req->nbytes); 1253 if (src_nents < 0) { 1254 dev_err(jrdev, "Invalid number of src SG.\n"); 1255 return src_nents; 1256 } 1257 1258 if (src_nents) { 1259 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1260 DMA_TO_DEVICE); 1261 if (!mapped_nents) { 1262 dev_err(jrdev, "unable to DMA map source\n"); 1263 return -ENOMEM; 1264 } 1265 } else { 1266 mapped_nents = 0; 1267 } 1268 1269 sec4_sg_src_index = 2; 1270 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1271 sizeof(struct sec4_sg_entry); 1272 1273 /* allocate space for base edesc and hw desc commands, link tables */ 1274 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1275 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1276 flags); 1277 if (!edesc) { 1278 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1279 return -ENOMEM; 1280 } 1281 1282 desc = edesc->hw_desc; 1283 1284 edesc->src_nents = src_nents; 1285 edesc->sec4_sg_bytes = sec4_sg_bytes; 1286 1287 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1288 if (ret) 1289 goto unmap; 1290 1291 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1292 req->nbytes); 1293 if (ret) { 1294 dev_err(jrdev, "unable to map S/G table\n"); 1295 goto unmap; 1296 } 1297 1298 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1299 digestsize); 1300 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1301 dev_err(jrdev, "unable to map dst\n"); 1302 goto unmap; 1303 } 1304 1305 #ifdef DEBUG 1306 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1307 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1308 #endif 1309 1310 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1311 if (!ret) { 1312 ret = -EINPROGRESS; 1313 } else { 1314 ahash_unmap(jrdev, edesc, req, digestsize); 1315 kfree(edesc); 1316 } 1317 1318 return ret; 1319 unmap: 1320 ahash_unmap(jrdev, edesc, req, digestsize); 1321 kfree(edesc); 1322 return -ENOMEM; 1323 1324 } 1325 1326 /* submit first update job descriptor after init */ 1327 static int ahash_update_first(struct ahash_request *req) 1328 { 1329 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1330 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1331 struct caam_hash_state *state = ahash_request_ctx(req); 1332 struct device *jrdev = ctx->jrdev; 1333 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1334 GFP_KERNEL : GFP_ATOMIC; 1335 u8 *next_buf = alt_buf(state); 1336 int *next_buflen = alt_buflen(state); 1337 int to_hash; 1338 u32 *desc; 1339 int src_nents, mapped_nents; 1340 struct ahash_edesc *edesc; 1341 int ret = 0; 1342 1343 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1344 1); 1345 to_hash = req->nbytes - *next_buflen; 1346 1347 if (to_hash) { 1348 src_nents = sg_nents_for_len(req->src, 1349 req->nbytes - *next_buflen); 1350 if (src_nents < 0) { 1351 dev_err(jrdev, "Invalid number of src SG.\n"); 1352 return src_nents; 1353 } 1354 1355 if (src_nents) { 1356 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1357 DMA_TO_DEVICE); 1358 if (!mapped_nents) { 1359 dev_err(jrdev, "unable to map source for DMA\n"); 1360 return -ENOMEM; 1361 } 1362 } else { 1363 mapped_nents = 0; 1364 } 1365 1366 /* 1367 * allocate space for base edesc and hw desc commands, 1368 * link tables 1369 */ 1370 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1371 mapped_nents : 0, 1372 ctx->sh_desc_update_first, 1373 ctx->sh_desc_update_first_dma, 1374 flags); 1375 if (!edesc) { 1376 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1377 return -ENOMEM; 1378 } 1379 1380 edesc->src_nents = src_nents; 1381 1382 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1383 to_hash); 1384 if (ret) 1385 goto unmap_ctx; 1386 1387 if (*next_buflen) 1388 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1389 *next_buflen, 0); 1390 1391 desc = edesc->hw_desc; 1392 1393 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1394 if (ret) 1395 goto unmap_ctx; 1396 1397 #ifdef DEBUG 1398 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1399 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1400 desc_bytes(desc), 1); 1401 #endif 1402 1403 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1404 if (ret) 1405 goto unmap_ctx; 1406 1407 ret = -EINPROGRESS; 1408 state->update = ahash_update_ctx; 1409 state->finup = ahash_finup_ctx; 1410 state->final = ahash_final_ctx; 1411 } else if (*next_buflen) { 1412 state->update = ahash_update_no_ctx; 1413 state->finup = ahash_finup_no_ctx; 1414 state->final = ahash_final_no_ctx; 1415 scatterwalk_map_and_copy(next_buf, req->src, 0, 1416 req->nbytes, 0); 1417 switch_buf(state); 1418 } 1419 #ifdef DEBUG 1420 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1421 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1422 *next_buflen, 1); 1423 #endif 1424 1425 return ret; 1426 unmap_ctx: 1427 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1428 kfree(edesc); 1429 return ret; 1430 } 1431 1432 static int ahash_finup_first(struct ahash_request *req) 1433 { 1434 return ahash_digest(req); 1435 } 1436 1437 static int ahash_init(struct ahash_request *req) 1438 { 1439 struct caam_hash_state *state = ahash_request_ctx(req); 1440 1441 state->update = ahash_update_first; 1442 state->finup = ahash_finup_first; 1443 state->final = ahash_final_no_ctx; 1444 1445 state->ctx_dma = 0; 1446 state->current_buf = 0; 1447 state->buf_dma = 0; 1448 state->buflen_0 = 0; 1449 state->buflen_1 = 0; 1450 1451 return 0; 1452 } 1453 1454 static int ahash_update(struct ahash_request *req) 1455 { 1456 struct caam_hash_state *state = ahash_request_ctx(req); 1457 1458 return state->update(req); 1459 } 1460 1461 static int ahash_finup(struct ahash_request *req) 1462 { 1463 struct caam_hash_state *state = ahash_request_ctx(req); 1464 1465 return state->finup(req); 1466 } 1467 1468 static int ahash_final(struct ahash_request *req) 1469 { 1470 struct caam_hash_state *state = ahash_request_ctx(req); 1471 1472 return state->final(req); 1473 } 1474 1475 static int ahash_export(struct ahash_request *req, void *out) 1476 { 1477 struct caam_hash_state *state = ahash_request_ctx(req); 1478 struct caam_export_state *export = out; 1479 int len; 1480 u8 *buf; 1481 1482 if (state->current_buf) { 1483 buf = state->buf_1; 1484 len = state->buflen_1; 1485 } else { 1486 buf = state->buf_0; 1487 len = state->buflen_0; 1488 } 1489 1490 memcpy(export->buf, buf, len); 1491 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1492 export->buflen = len; 1493 export->update = state->update; 1494 export->final = state->final; 1495 export->finup = state->finup; 1496 1497 return 0; 1498 } 1499 1500 static int ahash_import(struct ahash_request *req, const void *in) 1501 { 1502 struct caam_hash_state *state = ahash_request_ctx(req); 1503 const struct caam_export_state *export = in; 1504 1505 memset(state, 0, sizeof(*state)); 1506 memcpy(state->buf_0, export->buf, export->buflen); 1507 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1508 state->buflen_0 = export->buflen; 1509 state->update = export->update; 1510 state->final = export->final; 1511 state->finup = export->finup; 1512 1513 return 0; 1514 } 1515 1516 struct caam_hash_template { 1517 char name[CRYPTO_MAX_ALG_NAME]; 1518 char driver_name[CRYPTO_MAX_ALG_NAME]; 1519 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1520 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1521 unsigned int blocksize; 1522 struct ahash_alg template_ahash; 1523 u32 alg_type; 1524 }; 1525 1526 /* ahash descriptors */ 1527 static struct caam_hash_template driver_hash[] = { 1528 { 1529 .name = "sha1", 1530 .driver_name = "sha1-caam", 1531 .hmac_name = "hmac(sha1)", 1532 .hmac_driver_name = "hmac-sha1-caam", 1533 .blocksize = SHA1_BLOCK_SIZE, 1534 .template_ahash = { 1535 .init = ahash_init, 1536 .update = ahash_update, 1537 .final = ahash_final, 1538 .finup = ahash_finup, 1539 .digest = ahash_digest, 1540 .export = ahash_export, 1541 .import = ahash_import, 1542 .setkey = ahash_setkey, 1543 .halg = { 1544 .digestsize = SHA1_DIGEST_SIZE, 1545 .statesize = sizeof(struct caam_export_state), 1546 }, 1547 }, 1548 .alg_type = OP_ALG_ALGSEL_SHA1, 1549 }, { 1550 .name = "sha224", 1551 .driver_name = "sha224-caam", 1552 .hmac_name = "hmac(sha224)", 1553 .hmac_driver_name = "hmac-sha224-caam", 1554 .blocksize = SHA224_BLOCK_SIZE, 1555 .template_ahash = { 1556 .init = ahash_init, 1557 .update = ahash_update, 1558 .final = ahash_final, 1559 .finup = ahash_finup, 1560 .digest = ahash_digest, 1561 .export = ahash_export, 1562 .import = ahash_import, 1563 .setkey = ahash_setkey, 1564 .halg = { 1565 .digestsize = SHA224_DIGEST_SIZE, 1566 .statesize = sizeof(struct caam_export_state), 1567 }, 1568 }, 1569 .alg_type = OP_ALG_ALGSEL_SHA224, 1570 }, { 1571 .name = "sha256", 1572 .driver_name = "sha256-caam", 1573 .hmac_name = "hmac(sha256)", 1574 .hmac_driver_name = "hmac-sha256-caam", 1575 .blocksize = SHA256_BLOCK_SIZE, 1576 .template_ahash = { 1577 .init = ahash_init, 1578 .update = ahash_update, 1579 .final = ahash_final, 1580 .finup = ahash_finup, 1581 .digest = ahash_digest, 1582 .export = ahash_export, 1583 .import = ahash_import, 1584 .setkey = ahash_setkey, 1585 .halg = { 1586 .digestsize = SHA256_DIGEST_SIZE, 1587 .statesize = sizeof(struct caam_export_state), 1588 }, 1589 }, 1590 .alg_type = OP_ALG_ALGSEL_SHA256, 1591 }, { 1592 .name = "sha384", 1593 .driver_name = "sha384-caam", 1594 .hmac_name = "hmac(sha384)", 1595 .hmac_driver_name = "hmac-sha384-caam", 1596 .blocksize = SHA384_BLOCK_SIZE, 1597 .template_ahash = { 1598 .init = ahash_init, 1599 .update = ahash_update, 1600 .final = ahash_final, 1601 .finup = ahash_finup, 1602 .digest = ahash_digest, 1603 .export = ahash_export, 1604 .import = ahash_import, 1605 .setkey = ahash_setkey, 1606 .halg = { 1607 .digestsize = SHA384_DIGEST_SIZE, 1608 .statesize = sizeof(struct caam_export_state), 1609 }, 1610 }, 1611 .alg_type = OP_ALG_ALGSEL_SHA384, 1612 }, { 1613 .name = "sha512", 1614 .driver_name = "sha512-caam", 1615 .hmac_name = "hmac(sha512)", 1616 .hmac_driver_name = "hmac-sha512-caam", 1617 .blocksize = SHA512_BLOCK_SIZE, 1618 .template_ahash = { 1619 .init = ahash_init, 1620 .update = ahash_update, 1621 .final = ahash_final, 1622 .finup = ahash_finup, 1623 .digest = ahash_digest, 1624 .export = ahash_export, 1625 .import = ahash_import, 1626 .setkey = ahash_setkey, 1627 .halg = { 1628 .digestsize = SHA512_DIGEST_SIZE, 1629 .statesize = sizeof(struct caam_export_state), 1630 }, 1631 }, 1632 .alg_type = OP_ALG_ALGSEL_SHA512, 1633 }, { 1634 .name = "md5", 1635 .driver_name = "md5-caam", 1636 .hmac_name = "hmac(md5)", 1637 .hmac_driver_name = "hmac-md5-caam", 1638 .blocksize = MD5_BLOCK_WORDS * 4, 1639 .template_ahash = { 1640 .init = ahash_init, 1641 .update = ahash_update, 1642 .final = ahash_final, 1643 .finup = ahash_finup, 1644 .digest = ahash_digest, 1645 .export = ahash_export, 1646 .import = ahash_import, 1647 .setkey = ahash_setkey, 1648 .halg = { 1649 .digestsize = MD5_DIGEST_SIZE, 1650 .statesize = sizeof(struct caam_export_state), 1651 }, 1652 }, 1653 .alg_type = OP_ALG_ALGSEL_MD5, 1654 }, 1655 }; 1656 1657 struct caam_hash_alg { 1658 struct list_head entry; 1659 int alg_type; 1660 struct ahash_alg ahash_alg; 1661 }; 1662 1663 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1664 { 1665 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1666 struct crypto_alg *base = tfm->__crt_alg; 1667 struct hash_alg_common *halg = 1668 container_of(base, struct hash_alg_common, base); 1669 struct ahash_alg *alg = 1670 container_of(halg, struct ahash_alg, halg); 1671 struct caam_hash_alg *caam_hash = 1672 container_of(alg, struct caam_hash_alg, ahash_alg); 1673 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1674 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1675 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1676 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1677 HASH_MSG_LEN + 32, 1678 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1679 HASH_MSG_LEN + 64, 1680 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1681 dma_addr_t dma_addr; 1682 struct caam_drv_private *priv; 1683 1684 /* 1685 * Get a Job ring from Job Ring driver to ensure in-order 1686 * crypto request processing per tfm 1687 */ 1688 ctx->jrdev = caam_jr_alloc(); 1689 if (IS_ERR(ctx->jrdev)) { 1690 pr_err("Job Ring Device allocation for transform failed\n"); 1691 return PTR_ERR(ctx->jrdev); 1692 } 1693 1694 priv = dev_get_drvdata(ctx->jrdev->parent); 1695 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1696 1697 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1698 offsetof(struct caam_hash_ctx, 1699 sh_desc_update_dma), 1700 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1701 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1702 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1703 caam_jr_free(ctx->jrdev); 1704 return -ENOMEM; 1705 } 1706 1707 ctx->sh_desc_update_dma = dma_addr; 1708 ctx->sh_desc_update_first_dma = dma_addr + 1709 offsetof(struct caam_hash_ctx, 1710 sh_desc_update_first); 1711 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1712 sh_desc_fin); 1713 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1714 sh_desc_digest); 1715 1716 /* copy descriptor header template value */ 1717 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1718 1719 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1720 OP_ALG_ALGSEL_SUBMASK) >> 1721 OP_ALG_ALGSEL_SHIFT]; 1722 1723 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1724 sizeof(struct caam_hash_state)); 1725 return ahash_set_sh_desc(ahash); 1726 } 1727 1728 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1729 { 1730 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1731 1732 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1733 offsetof(struct caam_hash_ctx, 1734 sh_desc_update_dma), 1735 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1736 caam_jr_free(ctx->jrdev); 1737 } 1738 1739 static void __exit caam_algapi_hash_exit(void) 1740 { 1741 struct caam_hash_alg *t_alg, *n; 1742 1743 if (!hash_list.next) 1744 return; 1745 1746 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1747 crypto_unregister_ahash(&t_alg->ahash_alg); 1748 list_del(&t_alg->entry); 1749 kfree(t_alg); 1750 } 1751 } 1752 1753 static struct caam_hash_alg * 1754 caam_hash_alloc(struct caam_hash_template *template, 1755 bool keyed) 1756 { 1757 struct caam_hash_alg *t_alg; 1758 struct ahash_alg *halg; 1759 struct crypto_alg *alg; 1760 1761 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1762 if (!t_alg) { 1763 pr_err("failed to allocate t_alg\n"); 1764 return ERR_PTR(-ENOMEM); 1765 } 1766 1767 t_alg->ahash_alg = template->template_ahash; 1768 halg = &t_alg->ahash_alg; 1769 alg = &halg->halg.base; 1770 1771 if (keyed) { 1772 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1773 template->hmac_name); 1774 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1775 template->hmac_driver_name); 1776 } else { 1777 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1778 template->name); 1779 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1780 template->driver_name); 1781 t_alg->ahash_alg.setkey = NULL; 1782 } 1783 alg->cra_module = THIS_MODULE; 1784 alg->cra_init = caam_hash_cra_init; 1785 alg->cra_exit = caam_hash_cra_exit; 1786 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1787 alg->cra_priority = CAAM_CRA_PRIORITY; 1788 alg->cra_blocksize = template->blocksize; 1789 alg->cra_alignmask = 0; 1790 alg->cra_flags = CRYPTO_ALG_ASYNC; 1791 1792 t_alg->alg_type = template->alg_type; 1793 1794 return t_alg; 1795 } 1796 1797 static int __init caam_algapi_hash_init(void) 1798 { 1799 struct device_node *dev_node; 1800 struct platform_device *pdev; 1801 struct device *ctrldev; 1802 int i = 0, err = 0; 1803 struct caam_drv_private *priv; 1804 unsigned int md_limit = SHA512_DIGEST_SIZE; 1805 u32 md_inst, md_vid; 1806 1807 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1808 if (!dev_node) { 1809 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1810 if (!dev_node) 1811 return -ENODEV; 1812 } 1813 1814 pdev = of_find_device_by_node(dev_node); 1815 if (!pdev) { 1816 of_node_put(dev_node); 1817 return -ENODEV; 1818 } 1819 1820 ctrldev = &pdev->dev; 1821 priv = dev_get_drvdata(ctrldev); 1822 of_node_put(dev_node); 1823 1824 /* 1825 * If priv is NULL, it's probably because the caam driver wasn't 1826 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1827 */ 1828 if (!priv) 1829 return -ENODEV; 1830 1831 /* 1832 * Register crypto algorithms the device supports. First, identify 1833 * presence and attributes of MD block. 1834 */ 1835 if (priv->era < 10) { 1836 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & 1837 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 1838 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 1839 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 1840 } else { 1841 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 1842 1843 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 1844 md_inst = mdha & CHA_VER_NUM_MASK; 1845 } 1846 1847 /* 1848 * Skip registration of any hashing algorithms if MD block 1849 * is not present. 1850 */ 1851 if (!md_inst) 1852 return -ENODEV; 1853 1854 /* Limit digest size based on LP256 */ 1855 if (md_vid == CHA_VER_VID_MD_LP256) 1856 md_limit = SHA256_DIGEST_SIZE; 1857 1858 INIT_LIST_HEAD(&hash_list); 1859 1860 /* register crypto algorithms the device supports */ 1861 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1862 struct caam_hash_alg *t_alg; 1863 struct caam_hash_template *alg = driver_hash + i; 1864 1865 /* If MD size is not supported by device, skip registration */ 1866 if (alg->template_ahash.halg.digestsize > md_limit) 1867 continue; 1868 1869 /* register hmac version */ 1870 t_alg = caam_hash_alloc(alg, true); 1871 if (IS_ERR(t_alg)) { 1872 err = PTR_ERR(t_alg); 1873 pr_warn("%s alg allocation failed\n", alg->driver_name); 1874 continue; 1875 } 1876 1877 err = crypto_register_ahash(&t_alg->ahash_alg); 1878 if (err) { 1879 pr_warn("%s alg registration failed: %d\n", 1880 t_alg->ahash_alg.halg.base.cra_driver_name, 1881 err); 1882 kfree(t_alg); 1883 } else 1884 list_add_tail(&t_alg->entry, &hash_list); 1885 1886 /* register unkeyed version */ 1887 t_alg = caam_hash_alloc(alg, false); 1888 if (IS_ERR(t_alg)) { 1889 err = PTR_ERR(t_alg); 1890 pr_warn("%s alg allocation failed\n", alg->driver_name); 1891 continue; 1892 } 1893 1894 err = crypto_register_ahash(&t_alg->ahash_alg); 1895 if (err) { 1896 pr_warn("%s alg registration failed: %d\n", 1897 t_alg->ahash_alg.halg.base.cra_driver_name, 1898 err); 1899 kfree(t_alg); 1900 } else 1901 list_add_tail(&t_alg->entry, &hash_list); 1902 } 1903 1904 return err; 1905 } 1906 1907 module_init(caam_algapi_hash_init); 1908 module_exit(caam_algapi_hash_exit); 1909 1910 MODULE_LICENSE("GPL"); 1911 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 1912 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 1913