1 /* 2 * caam - Freescale FSL CAAM support for ahash functions of crypto API 3 * 4 * Copyright 2011 Freescale Semiconductor, Inc. 5 * 6 * Based on caamalg.c crypto API driver. 7 * 8 * relationship of digest job descriptor or first job descriptor after init to 9 * shared descriptors: 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (hashKey) | 14 * --------------- | (operation) | 15 * --------------- 16 * 17 * relationship of subsequent job descriptors to shared descriptors: 18 * 19 * --------------- --------------- 20 * | JobDesc #2 |-------------------->| ShareDesc | 21 * | *(packet 2) | |------------->| (hashKey) | 22 * --------------- | |-------->| (operation) | 23 * . | | | (load ctx2) | 24 * . | | --------------- 25 * --------------- | | 26 * | JobDesc #3 |------| | 27 * | *(packet 3) | | 28 * --------------- | 29 * . | 30 * . | 31 * --------------- | 32 * | JobDesc #4 |------------ 33 * | *(packet 4) | 34 * --------------- 35 * 36 * The SharedDesc never changes for a connection unless rekeyed, but 37 * each packet will likely be in a different place. So all we need 38 * to know to process the packet is where the input is, where the 39 * output goes, and what context we want to process with. Context is 40 * in the SharedDesc, packet references in the JobDesc. 41 * 42 * So, a job desc looks like: 43 * 44 * --------------------- 45 * | Header | 46 * | ShareDesc Pointer | 47 * | SEQ_OUT_PTR | 48 * | (output buffer) | 49 * | (output length) | 50 * | SEQ_IN_PTR | 51 * | (input buffer) | 52 * | (input length) | 53 * --------------------- 54 */ 55 56 #include "compat.h" 57 58 #include "regs.h" 59 #include "intern.h" 60 #include "desc_constr.h" 61 #include "jr.h" 62 #include "error.h" 63 #include "sg_sw_sec4.h" 64 #include "key_gen.h" 65 #include "caamhash_desc.h" 66 67 #define CAAM_CRA_PRIORITY 3000 68 69 /* max hash key is max split key size */ 70 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 71 72 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 73 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 74 75 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 76 CAAM_MAX_HASH_KEY_SIZE) 77 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 78 79 /* caam context sizes for hashes: running digest + 8 */ 80 #define HASH_MSG_LEN 8 81 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 82 83 #ifdef DEBUG 84 /* for print_hex_dumps with line references */ 85 #define debug(format, arg...) printk(format, arg) 86 #else 87 #define debug(format, arg...) 88 #endif 89 90 91 static struct list_head hash_list; 92 93 /* ahash per-session context */ 94 struct caam_hash_ctx { 95 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 96 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 97 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 98 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 99 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 100 dma_addr_t sh_desc_update_first_dma; 101 dma_addr_t sh_desc_fin_dma; 102 dma_addr_t sh_desc_digest_dma; 103 enum dma_data_direction dir; 104 struct device *jrdev; 105 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 106 int ctx_len; 107 struct alginfo adata; 108 }; 109 110 /* ahash state */ 111 struct caam_hash_state { 112 dma_addr_t buf_dma; 113 dma_addr_t ctx_dma; 114 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 115 int buflen_0; 116 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 117 int buflen_1; 118 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 119 int (*update)(struct ahash_request *req); 120 int (*final)(struct ahash_request *req); 121 int (*finup)(struct ahash_request *req); 122 int current_buf; 123 }; 124 125 struct caam_export_state { 126 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 127 u8 caam_ctx[MAX_CTX_LEN]; 128 int buflen; 129 int (*update)(struct ahash_request *req); 130 int (*final)(struct ahash_request *req); 131 int (*finup)(struct ahash_request *req); 132 }; 133 134 static inline void switch_buf(struct caam_hash_state *state) 135 { 136 state->current_buf ^= 1; 137 } 138 139 static inline u8 *current_buf(struct caam_hash_state *state) 140 { 141 return state->current_buf ? state->buf_1 : state->buf_0; 142 } 143 144 static inline u8 *alt_buf(struct caam_hash_state *state) 145 { 146 return state->current_buf ? state->buf_0 : state->buf_1; 147 } 148 149 static inline int *current_buflen(struct caam_hash_state *state) 150 { 151 return state->current_buf ? &state->buflen_1 : &state->buflen_0; 152 } 153 154 static inline int *alt_buflen(struct caam_hash_state *state) 155 { 156 return state->current_buf ? &state->buflen_0 : &state->buflen_1; 157 } 158 159 /* Common job descriptor seq in/out ptr routines */ 160 161 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 162 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 163 struct caam_hash_state *state, 164 int ctx_len) 165 { 166 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 167 ctx_len, DMA_FROM_DEVICE); 168 if (dma_mapping_error(jrdev, state->ctx_dma)) { 169 dev_err(jrdev, "unable to map ctx\n"); 170 state->ctx_dma = 0; 171 return -ENOMEM; 172 } 173 174 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 175 176 return 0; 177 } 178 179 /* Map req->result, and append seq_out_ptr command that points to it */ 180 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 181 u8 *result, int digestsize) 182 { 183 dma_addr_t dst_dma; 184 185 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 186 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 187 188 return dst_dma; 189 } 190 191 /* Map current buffer in state (if length > 0) and put it in link table */ 192 static inline int buf_map_to_sec4_sg(struct device *jrdev, 193 struct sec4_sg_entry *sec4_sg, 194 struct caam_hash_state *state) 195 { 196 int buflen = *current_buflen(state); 197 198 if (!buflen) 199 return 0; 200 201 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, 202 DMA_TO_DEVICE); 203 if (dma_mapping_error(jrdev, state->buf_dma)) { 204 dev_err(jrdev, "unable to map buf\n"); 205 state->buf_dma = 0; 206 return -ENOMEM; 207 } 208 209 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 210 211 return 0; 212 } 213 214 /* Map state->caam_ctx, and add it to link table */ 215 static inline int ctx_map_to_sec4_sg(struct device *jrdev, 216 struct caam_hash_state *state, int ctx_len, 217 struct sec4_sg_entry *sec4_sg, u32 flag) 218 { 219 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 220 if (dma_mapping_error(jrdev, state->ctx_dma)) { 221 dev_err(jrdev, "unable to map ctx\n"); 222 state->ctx_dma = 0; 223 return -ENOMEM; 224 } 225 226 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 227 228 return 0; 229 } 230 231 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 232 { 233 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 234 int digestsize = crypto_ahash_digestsize(ahash); 235 struct device *jrdev = ctx->jrdev; 236 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 237 u32 *desc; 238 239 ctx->adata.key_virt = ctx->key; 240 241 /* ahash_update shared descriptor */ 242 desc = ctx->sh_desc_update; 243 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 244 ctx->ctx_len, true, ctrlpriv->era); 245 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 246 desc_bytes(desc), ctx->dir); 247 #ifdef DEBUG 248 print_hex_dump(KERN_ERR, 249 "ahash update shdesc@"__stringify(__LINE__)": ", 250 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 251 #endif 252 253 /* ahash_update_first shared descriptor */ 254 desc = ctx->sh_desc_update_first; 255 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 256 ctx->ctx_len, false, ctrlpriv->era); 257 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 258 desc_bytes(desc), ctx->dir); 259 #ifdef DEBUG 260 print_hex_dump(KERN_ERR, 261 "ahash update first shdesc@"__stringify(__LINE__)": ", 262 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 263 #endif 264 265 /* ahash_final shared descriptor */ 266 desc = ctx->sh_desc_fin; 267 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 268 ctx->ctx_len, true, ctrlpriv->era); 269 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 270 desc_bytes(desc), ctx->dir); 271 #ifdef DEBUG 272 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 273 DUMP_PREFIX_ADDRESS, 16, 4, desc, 274 desc_bytes(desc), 1); 275 #endif 276 277 /* ahash_digest shared descriptor */ 278 desc = ctx->sh_desc_digest; 279 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 280 ctx->ctx_len, false, ctrlpriv->era); 281 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 282 desc_bytes(desc), ctx->dir); 283 #ifdef DEBUG 284 print_hex_dump(KERN_ERR, 285 "ahash digest shdesc@"__stringify(__LINE__)": ", 286 DUMP_PREFIX_ADDRESS, 16, 4, desc, 287 desc_bytes(desc), 1); 288 #endif 289 290 return 0; 291 } 292 293 /* Digest hash size if it is too large */ 294 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 295 u32 *keylen, u8 *key_out, u32 digestsize) 296 { 297 struct device *jrdev = ctx->jrdev; 298 u32 *desc; 299 struct split_key_result result; 300 dma_addr_t src_dma, dst_dma; 301 int ret; 302 303 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 304 if (!desc) { 305 dev_err(jrdev, "unable to allocate key input memory\n"); 306 return -ENOMEM; 307 } 308 309 init_job_desc(desc, 0); 310 311 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 312 DMA_TO_DEVICE); 313 if (dma_mapping_error(jrdev, src_dma)) { 314 dev_err(jrdev, "unable to map key input memory\n"); 315 kfree(desc); 316 return -ENOMEM; 317 } 318 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 319 DMA_FROM_DEVICE); 320 if (dma_mapping_error(jrdev, dst_dma)) { 321 dev_err(jrdev, "unable to map key output memory\n"); 322 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 323 kfree(desc); 324 return -ENOMEM; 325 } 326 327 /* Job descriptor to perform unkeyed hash on key_in */ 328 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 329 OP_ALG_AS_INITFINAL); 330 append_seq_in_ptr(desc, src_dma, *keylen, 0); 331 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 332 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 333 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 334 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 335 LDST_SRCDST_BYTE_CONTEXT); 336 337 #ifdef DEBUG 338 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 339 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 340 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 341 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 342 #endif 343 344 result.err = 0; 345 init_completion(&result.completion); 346 347 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 348 if (!ret) { 349 /* in progress */ 350 wait_for_completion(&result.completion); 351 ret = result.err; 352 #ifdef DEBUG 353 print_hex_dump(KERN_ERR, 354 "digested key@"__stringify(__LINE__)": ", 355 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 356 digestsize, 1); 357 #endif 358 } 359 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 360 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 361 362 *keylen = digestsize; 363 364 kfree(desc); 365 366 return ret; 367 } 368 369 static int ahash_setkey(struct crypto_ahash *ahash, 370 const u8 *key, unsigned int keylen) 371 { 372 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 373 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 374 int digestsize = crypto_ahash_digestsize(ahash); 375 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 376 int ret; 377 u8 *hashed_key = NULL; 378 379 #ifdef DEBUG 380 printk(KERN_ERR "keylen %d\n", keylen); 381 #endif 382 383 if (keylen > blocksize) { 384 hashed_key = kmalloc_array(digestsize, 385 sizeof(*hashed_key), 386 GFP_KERNEL | GFP_DMA); 387 if (!hashed_key) 388 return -ENOMEM; 389 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 390 digestsize); 391 if (ret) 392 goto bad_free_key; 393 key = hashed_key; 394 } 395 396 /* 397 * If DKP is supported, use it in the shared descriptor to generate 398 * the split key. 399 */ 400 if (ctrlpriv->era >= 6) { 401 ctx->adata.key_inline = true; 402 ctx->adata.keylen = keylen; 403 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 404 OP_ALG_ALGSEL_MASK); 405 406 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 407 goto bad_free_key; 408 409 memcpy(ctx->key, key, keylen); 410 } else { 411 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 412 keylen, CAAM_MAX_HASH_KEY_SIZE); 413 if (ret) 414 goto bad_free_key; 415 } 416 417 kfree(hashed_key); 418 return ahash_set_sh_desc(ahash); 419 bad_free_key: 420 kfree(hashed_key); 421 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 422 return -EINVAL; 423 } 424 425 /* 426 * ahash_edesc - s/w-extended ahash descriptor 427 * @dst_dma: physical mapped address of req->result 428 * @sec4_sg_dma: physical mapped address of h/w link table 429 * @src_nents: number of segments in input scatterlist 430 * @sec4_sg_bytes: length of dma mapped sec4_sg space 431 * @hw_desc: the h/w job descriptor followed by any referenced link tables 432 * @sec4_sg: h/w link table 433 */ 434 struct ahash_edesc { 435 dma_addr_t dst_dma; 436 dma_addr_t sec4_sg_dma; 437 int src_nents; 438 int sec4_sg_bytes; 439 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 440 struct sec4_sg_entry sec4_sg[0]; 441 }; 442 443 static inline void ahash_unmap(struct device *dev, 444 struct ahash_edesc *edesc, 445 struct ahash_request *req, int dst_len) 446 { 447 struct caam_hash_state *state = ahash_request_ctx(req); 448 449 if (edesc->src_nents) 450 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 451 if (edesc->dst_dma) 452 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 453 454 if (edesc->sec4_sg_bytes) 455 dma_unmap_single(dev, edesc->sec4_sg_dma, 456 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 457 458 if (state->buf_dma) { 459 dma_unmap_single(dev, state->buf_dma, *current_buflen(state), 460 DMA_TO_DEVICE); 461 state->buf_dma = 0; 462 } 463 } 464 465 static inline void ahash_unmap_ctx(struct device *dev, 466 struct ahash_edesc *edesc, 467 struct ahash_request *req, int dst_len, u32 flag) 468 { 469 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 470 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 471 struct caam_hash_state *state = ahash_request_ctx(req); 472 473 if (state->ctx_dma) { 474 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 475 state->ctx_dma = 0; 476 } 477 ahash_unmap(dev, edesc, req, dst_len); 478 } 479 480 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 481 void *context) 482 { 483 struct ahash_request *req = context; 484 struct ahash_edesc *edesc; 485 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 486 int digestsize = crypto_ahash_digestsize(ahash); 487 #ifdef DEBUG 488 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 489 struct caam_hash_state *state = ahash_request_ctx(req); 490 491 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 492 #endif 493 494 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 495 if (err) 496 caam_jr_strstatus(jrdev, err); 497 498 ahash_unmap(jrdev, edesc, req, digestsize); 499 kfree(edesc); 500 501 #ifdef DEBUG 502 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 503 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 504 ctx->ctx_len, 1); 505 if (req->result) 506 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 507 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 508 digestsize, 1); 509 #endif 510 511 req->base.complete(&req->base, err); 512 } 513 514 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 515 void *context) 516 { 517 struct ahash_request *req = context; 518 struct ahash_edesc *edesc; 519 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 520 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 521 struct caam_hash_state *state = ahash_request_ctx(req); 522 #ifdef DEBUG 523 int digestsize = crypto_ahash_digestsize(ahash); 524 525 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 526 #endif 527 528 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 529 if (err) 530 caam_jr_strstatus(jrdev, err); 531 532 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 533 switch_buf(state); 534 kfree(edesc); 535 536 #ifdef DEBUG 537 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 538 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 539 ctx->ctx_len, 1); 540 if (req->result) 541 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 542 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 543 digestsize, 1); 544 #endif 545 546 req->base.complete(&req->base, err); 547 } 548 549 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 550 void *context) 551 { 552 struct ahash_request *req = context; 553 struct ahash_edesc *edesc; 554 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 555 int digestsize = crypto_ahash_digestsize(ahash); 556 #ifdef DEBUG 557 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 558 struct caam_hash_state *state = ahash_request_ctx(req); 559 560 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 561 #endif 562 563 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 564 if (err) 565 caam_jr_strstatus(jrdev, err); 566 567 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); 568 kfree(edesc); 569 570 #ifdef DEBUG 571 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 572 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 573 ctx->ctx_len, 1); 574 if (req->result) 575 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 576 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 577 digestsize, 1); 578 #endif 579 580 req->base.complete(&req->base, err); 581 } 582 583 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 584 void *context) 585 { 586 struct ahash_request *req = context; 587 struct ahash_edesc *edesc; 588 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 589 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 590 struct caam_hash_state *state = ahash_request_ctx(req); 591 #ifdef DEBUG 592 int digestsize = crypto_ahash_digestsize(ahash); 593 594 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 595 #endif 596 597 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 598 if (err) 599 caam_jr_strstatus(jrdev, err); 600 601 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 602 switch_buf(state); 603 kfree(edesc); 604 605 #ifdef DEBUG 606 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 607 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 608 ctx->ctx_len, 1); 609 if (req->result) 610 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 611 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 612 digestsize, 1); 613 #endif 614 615 req->base.complete(&req->base, err); 616 } 617 618 /* 619 * Allocate an enhanced descriptor, which contains the hardware descriptor 620 * and space for hardware scatter table containing sg_num entries. 621 */ 622 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 623 int sg_num, u32 *sh_desc, 624 dma_addr_t sh_desc_dma, 625 gfp_t flags) 626 { 627 struct ahash_edesc *edesc; 628 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 629 630 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 631 if (!edesc) { 632 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 633 return NULL; 634 } 635 636 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 637 HDR_SHARE_DEFER | HDR_REVERSE); 638 639 return edesc; 640 } 641 642 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 643 struct ahash_edesc *edesc, 644 struct ahash_request *req, int nents, 645 unsigned int first_sg, 646 unsigned int first_bytes, size_t to_hash) 647 { 648 dma_addr_t src_dma; 649 u32 options; 650 651 if (nents > 1 || first_sg) { 652 struct sec4_sg_entry *sg = edesc->sec4_sg; 653 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 654 655 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 656 657 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 658 if (dma_mapping_error(ctx->jrdev, src_dma)) { 659 dev_err(ctx->jrdev, "unable to map S/G table\n"); 660 return -ENOMEM; 661 } 662 663 edesc->sec4_sg_bytes = sgsize; 664 edesc->sec4_sg_dma = src_dma; 665 options = LDST_SGF; 666 } else { 667 src_dma = sg_dma_address(req->src); 668 options = 0; 669 } 670 671 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 672 options); 673 674 return 0; 675 } 676 677 /* submit update job descriptor */ 678 static int ahash_update_ctx(struct ahash_request *req) 679 { 680 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 681 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 682 struct caam_hash_state *state = ahash_request_ctx(req); 683 struct device *jrdev = ctx->jrdev; 684 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 685 GFP_KERNEL : GFP_ATOMIC; 686 u8 *buf = current_buf(state); 687 int *buflen = current_buflen(state); 688 u8 *next_buf = alt_buf(state); 689 int *next_buflen = alt_buflen(state), last_buflen; 690 int in_len = *buflen + req->nbytes, to_hash; 691 u32 *desc; 692 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 693 struct ahash_edesc *edesc; 694 int ret = 0; 695 696 last_buflen = *next_buflen; 697 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 698 to_hash = in_len - *next_buflen; 699 700 if (to_hash) { 701 src_nents = sg_nents_for_len(req->src, 702 req->nbytes - (*next_buflen)); 703 if (src_nents < 0) { 704 dev_err(jrdev, "Invalid number of src SG.\n"); 705 return src_nents; 706 } 707 708 if (src_nents) { 709 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 710 DMA_TO_DEVICE); 711 if (!mapped_nents) { 712 dev_err(jrdev, "unable to DMA map source\n"); 713 return -ENOMEM; 714 } 715 } else { 716 mapped_nents = 0; 717 } 718 719 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 720 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 721 sizeof(struct sec4_sg_entry); 722 723 /* 724 * allocate space for base edesc and hw desc commands, 725 * link tables 726 */ 727 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 728 ctx->sh_desc_update, 729 ctx->sh_desc_update_dma, flags); 730 if (!edesc) { 731 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 732 return -ENOMEM; 733 } 734 735 edesc->src_nents = src_nents; 736 edesc->sec4_sg_bytes = sec4_sg_bytes; 737 738 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 739 edesc->sec4_sg, DMA_BIDIRECTIONAL); 740 if (ret) 741 goto unmap_ctx; 742 743 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 744 if (ret) 745 goto unmap_ctx; 746 747 if (mapped_nents) { 748 sg_to_sec4_sg_last(req->src, mapped_nents, 749 edesc->sec4_sg + sec4_sg_src_index, 750 0); 751 if (*next_buflen) 752 scatterwalk_map_and_copy(next_buf, req->src, 753 to_hash - *buflen, 754 *next_buflen, 0); 755 } else { 756 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 757 1); 758 } 759 760 desc = edesc->hw_desc; 761 762 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 763 sec4_sg_bytes, 764 DMA_TO_DEVICE); 765 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 766 dev_err(jrdev, "unable to map S/G table\n"); 767 ret = -ENOMEM; 768 goto unmap_ctx; 769 } 770 771 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 772 to_hash, LDST_SGF); 773 774 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 775 776 #ifdef DEBUG 777 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 778 DUMP_PREFIX_ADDRESS, 16, 4, desc, 779 desc_bytes(desc), 1); 780 #endif 781 782 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 783 if (ret) 784 goto unmap_ctx; 785 786 ret = -EINPROGRESS; 787 } else if (*next_buflen) { 788 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 789 req->nbytes, 0); 790 *buflen = *next_buflen; 791 *next_buflen = last_buflen; 792 } 793 #ifdef DEBUG 794 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 795 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 796 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 797 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 798 *next_buflen, 1); 799 #endif 800 801 return ret; 802 unmap_ctx: 803 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 804 kfree(edesc); 805 return ret; 806 } 807 808 static int ahash_final_ctx(struct ahash_request *req) 809 { 810 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 811 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 812 struct caam_hash_state *state = ahash_request_ctx(req); 813 struct device *jrdev = ctx->jrdev; 814 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 815 GFP_KERNEL : GFP_ATOMIC; 816 int buflen = *current_buflen(state); 817 u32 *desc; 818 int sec4_sg_bytes, sec4_sg_src_index; 819 int digestsize = crypto_ahash_digestsize(ahash); 820 struct ahash_edesc *edesc; 821 int ret; 822 823 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 824 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 825 826 /* allocate space for base edesc and hw desc commands, link tables */ 827 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 828 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 829 flags); 830 if (!edesc) 831 return -ENOMEM; 832 833 desc = edesc->hw_desc; 834 835 edesc->sec4_sg_bytes = sec4_sg_bytes; 836 837 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 838 edesc->sec4_sg, DMA_TO_DEVICE); 839 if (ret) 840 goto unmap_ctx; 841 842 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 843 if (ret) 844 goto unmap_ctx; 845 846 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 847 848 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 849 sec4_sg_bytes, DMA_TO_DEVICE); 850 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 851 dev_err(jrdev, "unable to map S/G table\n"); 852 ret = -ENOMEM; 853 goto unmap_ctx; 854 } 855 856 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 857 LDST_SGF); 858 859 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 860 digestsize); 861 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 862 dev_err(jrdev, "unable to map dst\n"); 863 ret = -ENOMEM; 864 goto unmap_ctx; 865 } 866 867 #ifdef DEBUG 868 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 869 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 870 #endif 871 872 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 873 if (ret) 874 goto unmap_ctx; 875 876 return -EINPROGRESS; 877 unmap_ctx: 878 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 879 kfree(edesc); 880 return ret; 881 } 882 883 static int ahash_finup_ctx(struct ahash_request *req) 884 { 885 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 886 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 887 struct caam_hash_state *state = ahash_request_ctx(req); 888 struct device *jrdev = ctx->jrdev; 889 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 890 GFP_KERNEL : GFP_ATOMIC; 891 int buflen = *current_buflen(state); 892 u32 *desc; 893 int sec4_sg_src_index; 894 int src_nents, mapped_nents; 895 int digestsize = crypto_ahash_digestsize(ahash); 896 struct ahash_edesc *edesc; 897 int ret; 898 899 src_nents = sg_nents_for_len(req->src, req->nbytes); 900 if (src_nents < 0) { 901 dev_err(jrdev, "Invalid number of src SG.\n"); 902 return src_nents; 903 } 904 905 if (src_nents) { 906 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 907 DMA_TO_DEVICE); 908 if (!mapped_nents) { 909 dev_err(jrdev, "unable to DMA map source\n"); 910 return -ENOMEM; 911 } 912 } else { 913 mapped_nents = 0; 914 } 915 916 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 917 918 /* allocate space for base edesc and hw desc commands, link tables */ 919 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 920 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 921 flags); 922 if (!edesc) { 923 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 924 return -ENOMEM; 925 } 926 927 desc = edesc->hw_desc; 928 929 edesc->src_nents = src_nents; 930 931 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 932 edesc->sec4_sg, DMA_TO_DEVICE); 933 if (ret) 934 goto unmap_ctx; 935 936 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 937 if (ret) 938 goto unmap_ctx; 939 940 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 941 sec4_sg_src_index, ctx->ctx_len + buflen, 942 req->nbytes); 943 if (ret) 944 goto unmap_ctx; 945 946 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 947 digestsize); 948 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 949 dev_err(jrdev, "unable to map dst\n"); 950 ret = -ENOMEM; 951 goto unmap_ctx; 952 } 953 954 #ifdef DEBUG 955 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 956 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 957 #endif 958 959 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 960 if (ret) 961 goto unmap_ctx; 962 963 return -EINPROGRESS; 964 unmap_ctx: 965 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 966 kfree(edesc); 967 return ret; 968 } 969 970 static int ahash_digest(struct ahash_request *req) 971 { 972 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 973 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 974 struct caam_hash_state *state = ahash_request_ctx(req); 975 struct device *jrdev = ctx->jrdev; 976 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 977 GFP_KERNEL : GFP_ATOMIC; 978 u32 *desc; 979 int digestsize = crypto_ahash_digestsize(ahash); 980 int src_nents, mapped_nents; 981 struct ahash_edesc *edesc; 982 int ret; 983 984 state->buf_dma = 0; 985 986 src_nents = sg_nents_for_len(req->src, req->nbytes); 987 if (src_nents < 0) { 988 dev_err(jrdev, "Invalid number of src SG.\n"); 989 return src_nents; 990 } 991 992 if (src_nents) { 993 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 994 DMA_TO_DEVICE); 995 if (!mapped_nents) { 996 dev_err(jrdev, "unable to map source for DMA\n"); 997 return -ENOMEM; 998 } 999 } else { 1000 mapped_nents = 0; 1001 } 1002 1003 /* allocate space for base edesc and hw desc commands, link tables */ 1004 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1005 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1006 flags); 1007 if (!edesc) { 1008 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1009 return -ENOMEM; 1010 } 1011 1012 edesc->src_nents = src_nents; 1013 1014 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1015 req->nbytes); 1016 if (ret) { 1017 ahash_unmap(jrdev, edesc, req, digestsize); 1018 kfree(edesc); 1019 return ret; 1020 } 1021 1022 desc = edesc->hw_desc; 1023 1024 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1025 digestsize); 1026 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1027 dev_err(jrdev, "unable to map dst\n"); 1028 ahash_unmap(jrdev, edesc, req, digestsize); 1029 kfree(edesc); 1030 return -ENOMEM; 1031 } 1032 1033 #ifdef DEBUG 1034 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1035 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1036 #endif 1037 1038 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1039 if (!ret) { 1040 ret = -EINPROGRESS; 1041 } else { 1042 ahash_unmap(jrdev, edesc, req, digestsize); 1043 kfree(edesc); 1044 } 1045 1046 return ret; 1047 } 1048 1049 /* submit ahash final if it the first job descriptor */ 1050 static int ahash_final_no_ctx(struct ahash_request *req) 1051 { 1052 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1053 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1054 struct caam_hash_state *state = ahash_request_ctx(req); 1055 struct device *jrdev = ctx->jrdev; 1056 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1057 GFP_KERNEL : GFP_ATOMIC; 1058 u8 *buf = current_buf(state); 1059 int buflen = *current_buflen(state); 1060 u32 *desc; 1061 int digestsize = crypto_ahash_digestsize(ahash); 1062 struct ahash_edesc *edesc; 1063 int ret; 1064 1065 /* allocate space for base edesc and hw desc commands, link tables */ 1066 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1067 ctx->sh_desc_digest_dma, flags); 1068 if (!edesc) 1069 return -ENOMEM; 1070 1071 desc = edesc->hw_desc; 1072 1073 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1074 if (dma_mapping_error(jrdev, state->buf_dma)) { 1075 dev_err(jrdev, "unable to map src\n"); 1076 goto unmap; 1077 } 1078 1079 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1080 1081 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1082 digestsize); 1083 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1084 dev_err(jrdev, "unable to map dst\n"); 1085 goto unmap; 1086 } 1087 1088 #ifdef DEBUG 1089 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1090 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1091 #endif 1092 1093 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1094 if (!ret) { 1095 ret = -EINPROGRESS; 1096 } else { 1097 ahash_unmap(jrdev, edesc, req, digestsize); 1098 kfree(edesc); 1099 } 1100 1101 return ret; 1102 unmap: 1103 ahash_unmap(jrdev, edesc, req, digestsize); 1104 kfree(edesc); 1105 return -ENOMEM; 1106 1107 } 1108 1109 /* submit ahash update if it the first job descriptor after update */ 1110 static int ahash_update_no_ctx(struct ahash_request *req) 1111 { 1112 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1113 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1114 struct caam_hash_state *state = ahash_request_ctx(req); 1115 struct device *jrdev = ctx->jrdev; 1116 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1117 GFP_KERNEL : GFP_ATOMIC; 1118 u8 *buf = current_buf(state); 1119 int *buflen = current_buflen(state); 1120 u8 *next_buf = alt_buf(state); 1121 int *next_buflen = alt_buflen(state); 1122 int in_len = *buflen + req->nbytes, to_hash; 1123 int sec4_sg_bytes, src_nents, mapped_nents; 1124 struct ahash_edesc *edesc; 1125 u32 *desc; 1126 int ret = 0; 1127 1128 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1129 to_hash = in_len - *next_buflen; 1130 1131 if (to_hash) { 1132 src_nents = sg_nents_for_len(req->src, 1133 req->nbytes - *next_buflen); 1134 if (src_nents < 0) { 1135 dev_err(jrdev, "Invalid number of src SG.\n"); 1136 return src_nents; 1137 } 1138 1139 if (src_nents) { 1140 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1141 DMA_TO_DEVICE); 1142 if (!mapped_nents) { 1143 dev_err(jrdev, "unable to DMA map source\n"); 1144 return -ENOMEM; 1145 } 1146 } else { 1147 mapped_nents = 0; 1148 } 1149 1150 sec4_sg_bytes = (1 + mapped_nents) * 1151 sizeof(struct sec4_sg_entry); 1152 1153 /* 1154 * allocate space for base edesc and hw desc commands, 1155 * link tables 1156 */ 1157 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1158 ctx->sh_desc_update_first, 1159 ctx->sh_desc_update_first_dma, 1160 flags); 1161 if (!edesc) { 1162 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1163 return -ENOMEM; 1164 } 1165 1166 edesc->src_nents = src_nents; 1167 edesc->sec4_sg_bytes = sec4_sg_bytes; 1168 1169 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1170 if (ret) 1171 goto unmap_ctx; 1172 1173 sg_to_sec4_sg_last(req->src, mapped_nents, 1174 edesc->sec4_sg + 1, 0); 1175 1176 if (*next_buflen) { 1177 scatterwalk_map_and_copy(next_buf, req->src, 1178 to_hash - *buflen, 1179 *next_buflen, 0); 1180 } 1181 1182 desc = edesc->hw_desc; 1183 1184 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1185 sec4_sg_bytes, 1186 DMA_TO_DEVICE); 1187 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1188 dev_err(jrdev, "unable to map S/G table\n"); 1189 ret = -ENOMEM; 1190 goto unmap_ctx; 1191 } 1192 1193 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1194 1195 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1196 if (ret) 1197 goto unmap_ctx; 1198 1199 #ifdef DEBUG 1200 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1201 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1202 desc_bytes(desc), 1); 1203 #endif 1204 1205 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1206 if (ret) 1207 goto unmap_ctx; 1208 1209 ret = -EINPROGRESS; 1210 state->update = ahash_update_ctx; 1211 state->finup = ahash_finup_ctx; 1212 state->final = ahash_final_ctx; 1213 } else if (*next_buflen) { 1214 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1215 req->nbytes, 0); 1216 *buflen = *next_buflen; 1217 *next_buflen = 0; 1218 } 1219 #ifdef DEBUG 1220 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1221 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1222 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1223 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1224 *next_buflen, 1); 1225 #endif 1226 1227 return ret; 1228 unmap_ctx: 1229 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1230 kfree(edesc); 1231 return ret; 1232 } 1233 1234 /* submit ahash finup if it the first job descriptor after update */ 1235 static int ahash_finup_no_ctx(struct ahash_request *req) 1236 { 1237 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1238 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1239 struct caam_hash_state *state = ahash_request_ctx(req); 1240 struct device *jrdev = ctx->jrdev; 1241 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1242 GFP_KERNEL : GFP_ATOMIC; 1243 int buflen = *current_buflen(state); 1244 u32 *desc; 1245 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1246 int digestsize = crypto_ahash_digestsize(ahash); 1247 struct ahash_edesc *edesc; 1248 int ret; 1249 1250 src_nents = sg_nents_for_len(req->src, req->nbytes); 1251 if (src_nents < 0) { 1252 dev_err(jrdev, "Invalid number of src SG.\n"); 1253 return src_nents; 1254 } 1255 1256 if (src_nents) { 1257 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1258 DMA_TO_DEVICE); 1259 if (!mapped_nents) { 1260 dev_err(jrdev, "unable to DMA map source\n"); 1261 return -ENOMEM; 1262 } 1263 } else { 1264 mapped_nents = 0; 1265 } 1266 1267 sec4_sg_src_index = 2; 1268 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1269 sizeof(struct sec4_sg_entry); 1270 1271 /* allocate space for base edesc and hw desc commands, link tables */ 1272 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1273 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1274 flags); 1275 if (!edesc) { 1276 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1277 return -ENOMEM; 1278 } 1279 1280 desc = edesc->hw_desc; 1281 1282 edesc->src_nents = src_nents; 1283 edesc->sec4_sg_bytes = sec4_sg_bytes; 1284 1285 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1286 if (ret) 1287 goto unmap; 1288 1289 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1290 req->nbytes); 1291 if (ret) { 1292 dev_err(jrdev, "unable to map S/G table\n"); 1293 goto unmap; 1294 } 1295 1296 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1297 digestsize); 1298 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1299 dev_err(jrdev, "unable to map dst\n"); 1300 goto unmap; 1301 } 1302 1303 #ifdef DEBUG 1304 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1305 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1306 #endif 1307 1308 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1309 if (!ret) { 1310 ret = -EINPROGRESS; 1311 } else { 1312 ahash_unmap(jrdev, edesc, req, digestsize); 1313 kfree(edesc); 1314 } 1315 1316 return ret; 1317 unmap: 1318 ahash_unmap(jrdev, edesc, req, digestsize); 1319 kfree(edesc); 1320 return -ENOMEM; 1321 1322 } 1323 1324 /* submit first update job descriptor after init */ 1325 static int ahash_update_first(struct ahash_request *req) 1326 { 1327 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1328 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1329 struct caam_hash_state *state = ahash_request_ctx(req); 1330 struct device *jrdev = ctx->jrdev; 1331 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1332 GFP_KERNEL : GFP_ATOMIC; 1333 u8 *next_buf = alt_buf(state); 1334 int *next_buflen = alt_buflen(state); 1335 int to_hash; 1336 u32 *desc; 1337 int src_nents, mapped_nents; 1338 struct ahash_edesc *edesc; 1339 int ret = 0; 1340 1341 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1342 1); 1343 to_hash = req->nbytes - *next_buflen; 1344 1345 if (to_hash) { 1346 src_nents = sg_nents_for_len(req->src, 1347 req->nbytes - *next_buflen); 1348 if (src_nents < 0) { 1349 dev_err(jrdev, "Invalid number of src SG.\n"); 1350 return src_nents; 1351 } 1352 1353 if (src_nents) { 1354 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1355 DMA_TO_DEVICE); 1356 if (!mapped_nents) { 1357 dev_err(jrdev, "unable to map source for DMA\n"); 1358 return -ENOMEM; 1359 } 1360 } else { 1361 mapped_nents = 0; 1362 } 1363 1364 /* 1365 * allocate space for base edesc and hw desc commands, 1366 * link tables 1367 */ 1368 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1369 mapped_nents : 0, 1370 ctx->sh_desc_update_first, 1371 ctx->sh_desc_update_first_dma, 1372 flags); 1373 if (!edesc) { 1374 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1375 return -ENOMEM; 1376 } 1377 1378 edesc->src_nents = src_nents; 1379 1380 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1381 to_hash); 1382 if (ret) 1383 goto unmap_ctx; 1384 1385 if (*next_buflen) 1386 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1387 *next_buflen, 0); 1388 1389 desc = edesc->hw_desc; 1390 1391 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1392 if (ret) 1393 goto unmap_ctx; 1394 1395 #ifdef DEBUG 1396 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1397 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1398 desc_bytes(desc), 1); 1399 #endif 1400 1401 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1402 if (ret) 1403 goto unmap_ctx; 1404 1405 ret = -EINPROGRESS; 1406 state->update = ahash_update_ctx; 1407 state->finup = ahash_finup_ctx; 1408 state->final = ahash_final_ctx; 1409 } else if (*next_buflen) { 1410 state->update = ahash_update_no_ctx; 1411 state->finup = ahash_finup_no_ctx; 1412 state->final = ahash_final_no_ctx; 1413 scatterwalk_map_and_copy(next_buf, req->src, 0, 1414 req->nbytes, 0); 1415 switch_buf(state); 1416 } 1417 #ifdef DEBUG 1418 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1419 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1420 *next_buflen, 1); 1421 #endif 1422 1423 return ret; 1424 unmap_ctx: 1425 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1426 kfree(edesc); 1427 return ret; 1428 } 1429 1430 static int ahash_finup_first(struct ahash_request *req) 1431 { 1432 return ahash_digest(req); 1433 } 1434 1435 static int ahash_init(struct ahash_request *req) 1436 { 1437 struct caam_hash_state *state = ahash_request_ctx(req); 1438 1439 state->update = ahash_update_first; 1440 state->finup = ahash_finup_first; 1441 state->final = ahash_final_no_ctx; 1442 1443 state->ctx_dma = 0; 1444 state->current_buf = 0; 1445 state->buf_dma = 0; 1446 state->buflen_0 = 0; 1447 state->buflen_1 = 0; 1448 1449 return 0; 1450 } 1451 1452 static int ahash_update(struct ahash_request *req) 1453 { 1454 struct caam_hash_state *state = ahash_request_ctx(req); 1455 1456 return state->update(req); 1457 } 1458 1459 static int ahash_finup(struct ahash_request *req) 1460 { 1461 struct caam_hash_state *state = ahash_request_ctx(req); 1462 1463 return state->finup(req); 1464 } 1465 1466 static int ahash_final(struct ahash_request *req) 1467 { 1468 struct caam_hash_state *state = ahash_request_ctx(req); 1469 1470 return state->final(req); 1471 } 1472 1473 static int ahash_export(struct ahash_request *req, void *out) 1474 { 1475 struct caam_hash_state *state = ahash_request_ctx(req); 1476 struct caam_export_state *export = out; 1477 int len; 1478 u8 *buf; 1479 1480 if (state->current_buf) { 1481 buf = state->buf_1; 1482 len = state->buflen_1; 1483 } else { 1484 buf = state->buf_0; 1485 len = state->buflen_0; 1486 } 1487 1488 memcpy(export->buf, buf, len); 1489 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1490 export->buflen = len; 1491 export->update = state->update; 1492 export->final = state->final; 1493 export->finup = state->finup; 1494 1495 return 0; 1496 } 1497 1498 static int ahash_import(struct ahash_request *req, const void *in) 1499 { 1500 struct caam_hash_state *state = ahash_request_ctx(req); 1501 const struct caam_export_state *export = in; 1502 1503 memset(state, 0, sizeof(*state)); 1504 memcpy(state->buf_0, export->buf, export->buflen); 1505 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1506 state->buflen_0 = export->buflen; 1507 state->update = export->update; 1508 state->final = export->final; 1509 state->finup = export->finup; 1510 1511 return 0; 1512 } 1513 1514 struct caam_hash_template { 1515 char name[CRYPTO_MAX_ALG_NAME]; 1516 char driver_name[CRYPTO_MAX_ALG_NAME]; 1517 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1518 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1519 unsigned int blocksize; 1520 struct ahash_alg template_ahash; 1521 u32 alg_type; 1522 }; 1523 1524 /* ahash descriptors */ 1525 static struct caam_hash_template driver_hash[] = { 1526 { 1527 .name = "sha1", 1528 .driver_name = "sha1-caam", 1529 .hmac_name = "hmac(sha1)", 1530 .hmac_driver_name = "hmac-sha1-caam", 1531 .blocksize = SHA1_BLOCK_SIZE, 1532 .template_ahash = { 1533 .init = ahash_init, 1534 .update = ahash_update, 1535 .final = ahash_final, 1536 .finup = ahash_finup, 1537 .digest = ahash_digest, 1538 .export = ahash_export, 1539 .import = ahash_import, 1540 .setkey = ahash_setkey, 1541 .halg = { 1542 .digestsize = SHA1_DIGEST_SIZE, 1543 .statesize = sizeof(struct caam_export_state), 1544 }, 1545 }, 1546 .alg_type = OP_ALG_ALGSEL_SHA1, 1547 }, { 1548 .name = "sha224", 1549 .driver_name = "sha224-caam", 1550 .hmac_name = "hmac(sha224)", 1551 .hmac_driver_name = "hmac-sha224-caam", 1552 .blocksize = SHA224_BLOCK_SIZE, 1553 .template_ahash = { 1554 .init = ahash_init, 1555 .update = ahash_update, 1556 .final = ahash_final, 1557 .finup = ahash_finup, 1558 .digest = ahash_digest, 1559 .export = ahash_export, 1560 .import = ahash_import, 1561 .setkey = ahash_setkey, 1562 .halg = { 1563 .digestsize = SHA224_DIGEST_SIZE, 1564 .statesize = sizeof(struct caam_export_state), 1565 }, 1566 }, 1567 .alg_type = OP_ALG_ALGSEL_SHA224, 1568 }, { 1569 .name = "sha256", 1570 .driver_name = "sha256-caam", 1571 .hmac_name = "hmac(sha256)", 1572 .hmac_driver_name = "hmac-sha256-caam", 1573 .blocksize = SHA256_BLOCK_SIZE, 1574 .template_ahash = { 1575 .init = ahash_init, 1576 .update = ahash_update, 1577 .final = ahash_final, 1578 .finup = ahash_finup, 1579 .digest = ahash_digest, 1580 .export = ahash_export, 1581 .import = ahash_import, 1582 .setkey = ahash_setkey, 1583 .halg = { 1584 .digestsize = SHA256_DIGEST_SIZE, 1585 .statesize = sizeof(struct caam_export_state), 1586 }, 1587 }, 1588 .alg_type = OP_ALG_ALGSEL_SHA256, 1589 }, { 1590 .name = "sha384", 1591 .driver_name = "sha384-caam", 1592 .hmac_name = "hmac(sha384)", 1593 .hmac_driver_name = "hmac-sha384-caam", 1594 .blocksize = SHA384_BLOCK_SIZE, 1595 .template_ahash = { 1596 .init = ahash_init, 1597 .update = ahash_update, 1598 .final = ahash_final, 1599 .finup = ahash_finup, 1600 .digest = ahash_digest, 1601 .export = ahash_export, 1602 .import = ahash_import, 1603 .setkey = ahash_setkey, 1604 .halg = { 1605 .digestsize = SHA384_DIGEST_SIZE, 1606 .statesize = sizeof(struct caam_export_state), 1607 }, 1608 }, 1609 .alg_type = OP_ALG_ALGSEL_SHA384, 1610 }, { 1611 .name = "sha512", 1612 .driver_name = "sha512-caam", 1613 .hmac_name = "hmac(sha512)", 1614 .hmac_driver_name = "hmac-sha512-caam", 1615 .blocksize = SHA512_BLOCK_SIZE, 1616 .template_ahash = { 1617 .init = ahash_init, 1618 .update = ahash_update, 1619 .final = ahash_final, 1620 .finup = ahash_finup, 1621 .digest = ahash_digest, 1622 .export = ahash_export, 1623 .import = ahash_import, 1624 .setkey = ahash_setkey, 1625 .halg = { 1626 .digestsize = SHA512_DIGEST_SIZE, 1627 .statesize = sizeof(struct caam_export_state), 1628 }, 1629 }, 1630 .alg_type = OP_ALG_ALGSEL_SHA512, 1631 }, { 1632 .name = "md5", 1633 .driver_name = "md5-caam", 1634 .hmac_name = "hmac(md5)", 1635 .hmac_driver_name = "hmac-md5-caam", 1636 .blocksize = MD5_BLOCK_WORDS * 4, 1637 .template_ahash = { 1638 .init = ahash_init, 1639 .update = ahash_update, 1640 .final = ahash_final, 1641 .finup = ahash_finup, 1642 .digest = ahash_digest, 1643 .export = ahash_export, 1644 .import = ahash_import, 1645 .setkey = ahash_setkey, 1646 .halg = { 1647 .digestsize = MD5_DIGEST_SIZE, 1648 .statesize = sizeof(struct caam_export_state), 1649 }, 1650 }, 1651 .alg_type = OP_ALG_ALGSEL_MD5, 1652 }, 1653 }; 1654 1655 struct caam_hash_alg { 1656 struct list_head entry; 1657 int alg_type; 1658 struct ahash_alg ahash_alg; 1659 }; 1660 1661 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1662 { 1663 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1664 struct crypto_alg *base = tfm->__crt_alg; 1665 struct hash_alg_common *halg = 1666 container_of(base, struct hash_alg_common, base); 1667 struct ahash_alg *alg = 1668 container_of(halg, struct ahash_alg, halg); 1669 struct caam_hash_alg *caam_hash = 1670 container_of(alg, struct caam_hash_alg, ahash_alg); 1671 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1672 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1673 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1674 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1675 HASH_MSG_LEN + 32, 1676 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1677 HASH_MSG_LEN + 64, 1678 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1679 dma_addr_t dma_addr; 1680 struct caam_drv_private *priv; 1681 1682 /* 1683 * Get a Job ring from Job Ring driver to ensure in-order 1684 * crypto request processing per tfm 1685 */ 1686 ctx->jrdev = caam_jr_alloc(); 1687 if (IS_ERR(ctx->jrdev)) { 1688 pr_err("Job Ring Device allocation for transform failed\n"); 1689 return PTR_ERR(ctx->jrdev); 1690 } 1691 1692 priv = dev_get_drvdata(ctx->jrdev->parent); 1693 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1694 1695 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1696 offsetof(struct caam_hash_ctx, 1697 sh_desc_update_dma), 1698 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1699 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1700 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1701 caam_jr_free(ctx->jrdev); 1702 return -ENOMEM; 1703 } 1704 1705 ctx->sh_desc_update_dma = dma_addr; 1706 ctx->sh_desc_update_first_dma = dma_addr + 1707 offsetof(struct caam_hash_ctx, 1708 sh_desc_update_first); 1709 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1710 sh_desc_fin); 1711 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1712 sh_desc_digest); 1713 1714 /* copy descriptor header template value */ 1715 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1716 1717 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1718 OP_ALG_ALGSEL_SUBMASK) >> 1719 OP_ALG_ALGSEL_SHIFT]; 1720 1721 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1722 sizeof(struct caam_hash_state)); 1723 return ahash_set_sh_desc(ahash); 1724 } 1725 1726 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1727 { 1728 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1729 1730 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1731 offsetof(struct caam_hash_ctx, 1732 sh_desc_update_dma), 1733 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1734 caam_jr_free(ctx->jrdev); 1735 } 1736 1737 static void __exit caam_algapi_hash_exit(void) 1738 { 1739 struct caam_hash_alg *t_alg, *n; 1740 1741 if (!hash_list.next) 1742 return; 1743 1744 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1745 crypto_unregister_ahash(&t_alg->ahash_alg); 1746 list_del(&t_alg->entry); 1747 kfree(t_alg); 1748 } 1749 } 1750 1751 static struct caam_hash_alg * 1752 caam_hash_alloc(struct caam_hash_template *template, 1753 bool keyed) 1754 { 1755 struct caam_hash_alg *t_alg; 1756 struct ahash_alg *halg; 1757 struct crypto_alg *alg; 1758 1759 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1760 if (!t_alg) { 1761 pr_err("failed to allocate t_alg\n"); 1762 return ERR_PTR(-ENOMEM); 1763 } 1764 1765 t_alg->ahash_alg = template->template_ahash; 1766 halg = &t_alg->ahash_alg; 1767 alg = &halg->halg.base; 1768 1769 if (keyed) { 1770 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1771 template->hmac_name); 1772 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1773 template->hmac_driver_name); 1774 } else { 1775 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1776 template->name); 1777 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1778 template->driver_name); 1779 t_alg->ahash_alg.setkey = NULL; 1780 } 1781 alg->cra_module = THIS_MODULE; 1782 alg->cra_init = caam_hash_cra_init; 1783 alg->cra_exit = caam_hash_cra_exit; 1784 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1785 alg->cra_priority = CAAM_CRA_PRIORITY; 1786 alg->cra_blocksize = template->blocksize; 1787 alg->cra_alignmask = 0; 1788 alg->cra_flags = CRYPTO_ALG_ASYNC; 1789 1790 t_alg->alg_type = template->alg_type; 1791 1792 return t_alg; 1793 } 1794 1795 static int __init caam_algapi_hash_init(void) 1796 { 1797 struct device_node *dev_node; 1798 struct platform_device *pdev; 1799 struct device *ctrldev; 1800 int i = 0, err = 0; 1801 struct caam_drv_private *priv; 1802 unsigned int md_limit = SHA512_DIGEST_SIZE; 1803 u32 cha_inst, cha_vid; 1804 1805 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1806 if (!dev_node) { 1807 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1808 if (!dev_node) 1809 return -ENODEV; 1810 } 1811 1812 pdev = of_find_device_by_node(dev_node); 1813 if (!pdev) { 1814 of_node_put(dev_node); 1815 return -ENODEV; 1816 } 1817 1818 ctrldev = &pdev->dev; 1819 priv = dev_get_drvdata(ctrldev); 1820 of_node_put(dev_node); 1821 1822 /* 1823 * If priv is NULL, it's probably because the caam driver wasn't 1824 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1825 */ 1826 if (!priv) 1827 return -ENODEV; 1828 1829 /* 1830 * Register crypto algorithms the device supports. First, identify 1831 * presence and attributes of MD block. 1832 */ 1833 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 1834 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 1835 1836 /* 1837 * Skip registration of any hashing algorithms if MD block 1838 * is not present. 1839 */ 1840 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) 1841 return -ENODEV; 1842 1843 /* Limit digest size based on LP256 */ 1844 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) 1845 md_limit = SHA256_DIGEST_SIZE; 1846 1847 INIT_LIST_HEAD(&hash_list); 1848 1849 /* register crypto algorithms the device supports */ 1850 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1851 struct caam_hash_alg *t_alg; 1852 struct caam_hash_template *alg = driver_hash + i; 1853 1854 /* If MD size is not supported by device, skip registration */ 1855 if (alg->template_ahash.halg.digestsize > md_limit) 1856 continue; 1857 1858 /* register hmac version */ 1859 t_alg = caam_hash_alloc(alg, true); 1860 if (IS_ERR(t_alg)) { 1861 err = PTR_ERR(t_alg); 1862 pr_warn("%s alg allocation failed\n", alg->driver_name); 1863 continue; 1864 } 1865 1866 err = crypto_register_ahash(&t_alg->ahash_alg); 1867 if (err) { 1868 pr_warn("%s alg registration failed: %d\n", 1869 t_alg->ahash_alg.halg.base.cra_driver_name, 1870 err); 1871 kfree(t_alg); 1872 } else 1873 list_add_tail(&t_alg->entry, &hash_list); 1874 1875 /* register unkeyed version */ 1876 t_alg = caam_hash_alloc(alg, false); 1877 if (IS_ERR(t_alg)) { 1878 err = PTR_ERR(t_alg); 1879 pr_warn("%s alg allocation failed\n", alg->driver_name); 1880 continue; 1881 } 1882 1883 err = crypto_register_ahash(&t_alg->ahash_alg); 1884 if (err) { 1885 pr_warn("%s alg registration failed: %d\n", 1886 t_alg->ahash_alg.halg.base.cra_driver_name, 1887 err); 1888 kfree(t_alg); 1889 } else 1890 list_add_tail(&t_alg->entry, &hash_list); 1891 } 1892 1893 return err; 1894 } 1895 1896 module_init(caam_algapi_hash_init); 1897 module_exit(caam_algapi_hash_exit); 1898 1899 MODULE_LICENSE("GPL"); 1900 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 1901 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 1902