1 /* 2 * caam - Freescale FSL CAAM support for ahash functions of crypto API 3 * 4 * Copyright 2011 Freescale Semiconductor, Inc. 5 * 6 * Based on caamalg.c crypto API driver. 7 * 8 * relationship of digest job descriptor or first job descriptor after init to 9 * shared descriptors: 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (hashKey) | 14 * --------------- | (operation) | 15 * --------------- 16 * 17 * relationship of subsequent job descriptors to shared descriptors: 18 * 19 * --------------- --------------- 20 * | JobDesc #2 |-------------------->| ShareDesc | 21 * | *(packet 2) | |------------->| (hashKey) | 22 * --------------- | |-------->| (operation) | 23 * . | | | (load ctx2) | 24 * . | | --------------- 25 * --------------- | | 26 * | JobDesc #3 |------| | 27 * | *(packet 3) | | 28 * --------------- | 29 * . | 30 * . | 31 * --------------- | 32 * | JobDesc #4 |------------ 33 * | *(packet 4) | 34 * --------------- 35 * 36 * The SharedDesc never changes for a connection unless rekeyed, but 37 * each packet will likely be in a different place. So all we need 38 * to know to process the packet is where the input is, where the 39 * output goes, and what context we want to process with. Context is 40 * in the SharedDesc, packet references in the JobDesc. 41 * 42 * So, a job desc looks like: 43 * 44 * --------------------- 45 * | Header | 46 * | ShareDesc Pointer | 47 * | SEQ_OUT_PTR | 48 * | (output buffer) | 49 * | (output length) | 50 * | SEQ_IN_PTR | 51 * | (input buffer) | 52 * | (input length) | 53 * --------------------- 54 */ 55 56 #include "compat.h" 57 58 #include "regs.h" 59 #include "intern.h" 60 #include "desc_constr.h" 61 #include "jr.h" 62 #include "error.h" 63 #include "sg_sw_sec4.h" 64 #include "key_gen.h" 65 66 #define CAAM_CRA_PRIORITY 3000 67 68 /* max hash key is max split key size */ 69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 70 71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 73 74 /* length of descriptors text */ 75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) 76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) 77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 81 82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 83 CAAM_MAX_HASH_KEY_SIZE) 84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 85 86 /* caam context sizes for hashes: running digest + 8 */ 87 #define HASH_MSG_LEN 8 88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 89 90 #ifdef DEBUG 91 /* for print_hex_dumps with line references */ 92 #define debug(format, arg...) printk(format, arg) 93 #else 94 #define debug(format, arg...) 95 #endif 96 97 98 static struct list_head hash_list; 99 100 /* ahash per-session context */ 101 struct caam_hash_ctx { 102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 106 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 107 dma_addr_t sh_desc_update_first_dma; 108 dma_addr_t sh_desc_fin_dma; 109 dma_addr_t sh_desc_digest_dma; 110 struct device *jrdev; 111 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 112 int ctx_len; 113 struct alginfo adata; 114 }; 115 116 /* ahash state */ 117 struct caam_hash_state { 118 dma_addr_t buf_dma; 119 dma_addr_t ctx_dma; 120 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 121 int buflen_0; 122 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 123 int buflen_1; 124 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 125 int (*update)(struct ahash_request *req); 126 int (*final)(struct ahash_request *req); 127 int (*finup)(struct ahash_request *req); 128 int current_buf; 129 }; 130 131 struct caam_export_state { 132 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 133 u8 caam_ctx[MAX_CTX_LEN]; 134 int buflen; 135 int (*update)(struct ahash_request *req); 136 int (*final)(struct ahash_request *req); 137 int (*finup)(struct ahash_request *req); 138 }; 139 140 /* Common job descriptor seq in/out ptr routines */ 141 142 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 143 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 144 struct caam_hash_state *state, 145 int ctx_len) 146 { 147 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 148 ctx_len, DMA_FROM_DEVICE); 149 if (dma_mapping_error(jrdev, state->ctx_dma)) { 150 dev_err(jrdev, "unable to map ctx\n"); 151 state->ctx_dma = 0; 152 return -ENOMEM; 153 } 154 155 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 156 157 return 0; 158 } 159 160 /* Map req->result, and append seq_out_ptr command that points to it */ 161 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 162 u8 *result, int digestsize) 163 { 164 dma_addr_t dst_dma; 165 166 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 167 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 168 169 return dst_dma; 170 } 171 172 /* Map current buffer in state and put it in link table */ 173 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, 174 struct sec4_sg_entry *sec4_sg, 175 u8 *buf, int buflen) 176 { 177 dma_addr_t buf_dma; 178 179 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 180 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); 181 182 return buf_dma; 183 } 184 185 /* 186 * Only put buffer in link table if it contains data, which is possible, 187 * since a buffer has previously been used, and needs to be unmapped, 188 */ 189 static inline dma_addr_t 190 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, 191 u8 *buf, dma_addr_t buf_dma, int buflen, 192 int last_buflen) 193 { 194 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) 195 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); 196 if (buflen) 197 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); 198 else 199 buf_dma = 0; 200 201 return buf_dma; 202 } 203 204 /* Map state->caam_ctx, and add it to link table */ 205 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, 206 struct caam_hash_state *state, int ctx_len, 207 struct sec4_sg_entry *sec4_sg, u32 flag) 208 { 209 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 210 if (dma_mapping_error(jrdev, state->ctx_dma)) { 211 dev_err(jrdev, "unable to map ctx\n"); 212 state->ctx_dma = 0; 213 return -ENOMEM; 214 } 215 216 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 217 218 return 0; 219 } 220 221 /* 222 * For ahash update, final and finup (import_ctx = true) 223 * import context, read and write to seqout 224 * For ahash firsts and digest (import_ctx = false) 225 * read and write to seqout 226 */ 227 static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, 228 struct caam_hash_ctx *ctx, bool import_ctx) 229 { 230 u32 op = ctx->adata.algtype; 231 u32 *skip_key_load; 232 233 init_sh_desc(desc, HDR_SHARE_SERIAL); 234 235 /* Append key if it has been set; ahash update excluded */ 236 if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) { 237 /* Skip key loading if already shared */ 238 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 239 JUMP_COND_SHRD); 240 241 append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, 242 ctx->adata.keylen, CLASS_2 | 243 KEY_DEST_MDHA_SPLIT | KEY_ENC); 244 245 set_jump_tgt_here(desc, skip_key_load); 246 247 op |= OP_ALG_AAI_HMAC_PRECOMP; 248 } 249 250 /* If needed, import context from software */ 251 if (import_ctx) 252 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB | 253 LDST_SRCDST_BYTE_CONTEXT); 254 255 /* Class 2 operation */ 256 append_operation(desc, op | state | OP_ALG_ENCRYPT); 257 258 /* 259 * Load from buf and/or src and write to req->result or state->context 260 * Calculate remaining bytes to read 261 */ 262 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 263 /* Read remaining bytes */ 264 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | 265 FIFOLD_TYPE_MSG | KEY_VLF); 266 /* Store class2 context bytes */ 267 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 268 LDST_SRCDST_BYTE_CONTEXT); 269 } 270 271 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 272 { 273 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 274 int digestsize = crypto_ahash_digestsize(ahash); 275 struct device *jrdev = ctx->jrdev; 276 u32 *desc; 277 278 /* ahash_update shared descriptor */ 279 desc = ctx->sh_desc_update; 280 ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); 281 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 282 desc_bytes(desc), DMA_TO_DEVICE); 283 #ifdef DEBUG 284 print_hex_dump(KERN_ERR, 285 "ahash update shdesc@"__stringify(__LINE__)": ", 286 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 287 #endif 288 289 /* ahash_update_first shared descriptor */ 290 desc = ctx->sh_desc_update_first; 291 ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); 292 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 293 desc_bytes(desc), DMA_TO_DEVICE); 294 #ifdef DEBUG 295 print_hex_dump(KERN_ERR, 296 "ahash update first shdesc@"__stringify(__LINE__)": ", 297 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 298 #endif 299 300 /* ahash_final shared descriptor */ 301 desc = ctx->sh_desc_fin; 302 ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); 303 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 304 desc_bytes(desc), DMA_TO_DEVICE); 305 #ifdef DEBUG 306 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 307 DUMP_PREFIX_ADDRESS, 16, 4, desc, 308 desc_bytes(desc), 1); 309 #endif 310 311 /* ahash_digest shared descriptor */ 312 desc = ctx->sh_desc_digest; 313 ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); 314 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 315 desc_bytes(desc), DMA_TO_DEVICE); 316 #ifdef DEBUG 317 print_hex_dump(KERN_ERR, 318 "ahash digest shdesc@"__stringify(__LINE__)": ", 319 DUMP_PREFIX_ADDRESS, 16, 4, desc, 320 desc_bytes(desc), 1); 321 #endif 322 323 return 0; 324 } 325 326 /* Digest hash size if it is too large */ 327 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 328 u32 *keylen, u8 *key_out, u32 digestsize) 329 { 330 struct device *jrdev = ctx->jrdev; 331 u32 *desc; 332 struct split_key_result result; 333 dma_addr_t src_dma, dst_dma; 334 int ret; 335 336 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 337 if (!desc) { 338 dev_err(jrdev, "unable to allocate key input memory\n"); 339 return -ENOMEM; 340 } 341 342 init_job_desc(desc, 0); 343 344 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 345 DMA_TO_DEVICE); 346 if (dma_mapping_error(jrdev, src_dma)) { 347 dev_err(jrdev, "unable to map key input memory\n"); 348 kfree(desc); 349 return -ENOMEM; 350 } 351 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 352 DMA_FROM_DEVICE); 353 if (dma_mapping_error(jrdev, dst_dma)) { 354 dev_err(jrdev, "unable to map key output memory\n"); 355 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 356 kfree(desc); 357 return -ENOMEM; 358 } 359 360 /* Job descriptor to perform unkeyed hash on key_in */ 361 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 362 OP_ALG_AS_INITFINAL); 363 append_seq_in_ptr(desc, src_dma, *keylen, 0); 364 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 365 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 366 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 367 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 368 LDST_SRCDST_BYTE_CONTEXT); 369 370 #ifdef DEBUG 371 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 372 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 373 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 374 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 375 #endif 376 377 result.err = 0; 378 init_completion(&result.completion); 379 380 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 381 if (!ret) { 382 /* in progress */ 383 wait_for_completion_interruptible(&result.completion); 384 ret = result.err; 385 #ifdef DEBUG 386 print_hex_dump(KERN_ERR, 387 "digested key@"__stringify(__LINE__)": ", 388 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 389 digestsize, 1); 390 #endif 391 } 392 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 393 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 394 395 *keylen = digestsize; 396 397 kfree(desc); 398 399 return ret; 400 } 401 402 static int ahash_setkey(struct crypto_ahash *ahash, 403 const u8 *key, unsigned int keylen) 404 { 405 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 406 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 407 int digestsize = crypto_ahash_digestsize(ahash); 408 int ret; 409 u8 *hashed_key = NULL; 410 411 #ifdef DEBUG 412 printk(KERN_ERR "keylen %d\n", keylen); 413 #endif 414 415 if (keylen > blocksize) { 416 hashed_key = kmalloc_array(digestsize, 417 sizeof(*hashed_key), 418 GFP_KERNEL | GFP_DMA); 419 if (!hashed_key) 420 return -ENOMEM; 421 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 422 digestsize); 423 if (ret) 424 goto bad_free_key; 425 key = hashed_key; 426 } 427 428 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, 429 CAAM_MAX_HASH_KEY_SIZE); 430 if (ret) 431 goto bad_free_key; 432 433 #ifdef DEBUG 434 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 435 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 436 ctx->adata.keylen_pad, 1); 437 #endif 438 439 kfree(hashed_key); 440 return ahash_set_sh_desc(ahash); 441 bad_free_key: 442 kfree(hashed_key); 443 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 444 return -EINVAL; 445 } 446 447 /* 448 * ahash_edesc - s/w-extended ahash descriptor 449 * @dst_dma: physical mapped address of req->result 450 * @sec4_sg_dma: physical mapped address of h/w link table 451 * @src_nents: number of segments in input scatterlist 452 * @sec4_sg_bytes: length of dma mapped sec4_sg space 453 * @hw_desc: the h/w job descriptor followed by any referenced link tables 454 * @sec4_sg: h/w link table 455 */ 456 struct ahash_edesc { 457 dma_addr_t dst_dma; 458 dma_addr_t sec4_sg_dma; 459 int src_nents; 460 int sec4_sg_bytes; 461 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 462 struct sec4_sg_entry sec4_sg[0]; 463 }; 464 465 static inline void ahash_unmap(struct device *dev, 466 struct ahash_edesc *edesc, 467 struct ahash_request *req, int dst_len) 468 { 469 if (edesc->src_nents) 470 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 471 if (edesc->dst_dma) 472 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 473 474 if (edesc->sec4_sg_bytes) 475 dma_unmap_single(dev, edesc->sec4_sg_dma, 476 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 477 } 478 479 static inline void ahash_unmap_ctx(struct device *dev, 480 struct ahash_edesc *edesc, 481 struct ahash_request *req, int dst_len, u32 flag) 482 { 483 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 484 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 485 struct caam_hash_state *state = ahash_request_ctx(req); 486 487 if (state->ctx_dma) { 488 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 489 state->ctx_dma = 0; 490 } 491 ahash_unmap(dev, edesc, req, dst_len); 492 } 493 494 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 495 void *context) 496 { 497 struct ahash_request *req = context; 498 struct ahash_edesc *edesc; 499 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 500 int digestsize = crypto_ahash_digestsize(ahash); 501 #ifdef DEBUG 502 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 503 struct caam_hash_state *state = ahash_request_ctx(req); 504 505 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 506 #endif 507 508 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 509 if (err) 510 caam_jr_strstatus(jrdev, err); 511 512 ahash_unmap(jrdev, edesc, req, digestsize); 513 kfree(edesc); 514 515 #ifdef DEBUG 516 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 517 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 518 ctx->ctx_len, 1); 519 if (req->result) 520 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 521 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 522 digestsize, 1); 523 #endif 524 525 req->base.complete(&req->base, err); 526 } 527 528 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 529 void *context) 530 { 531 struct ahash_request *req = context; 532 struct ahash_edesc *edesc; 533 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 534 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 535 #ifdef DEBUG 536 struct caam_hash_state *state = ahash_request_ctx(req); 537 int digestsize = crypto_ahash_digestsize(ahash); 538 539 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 540 #endif 541 542 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 543 if (err) 544 caam_jr_strstatus(jrdev, err); 545 546 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 547 kfree(edesc); 548 549 #ifdef DEBUG 550 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 551 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 552 ctx->ctx_len, 1); 553 if (req->result) 554 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 555 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 556 digestsize, 1); 557 #endif 558 559 req->base.complete(&req->base, err); 560 } 561 562 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 563 void *context) 564 { 565 struct ahash_request *req = context; 566 struct ahash_edesc *edesc; 567 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 568 int digestsize = crypto_ahash_digestsize(ahash); 569 #ifdef DEBUG 570 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 571 struct caam_hash_state *state = ahash_request_ctx(req); 572 573 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 574 #endif 575 576 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 577 if (err) 578 caam_jr_strstatus(jrdev, err); 579 580 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); 581 kfree(edesc); 582 583 #ifdef DEBUG 584 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 585 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 586 ctx->ctx_len, 1); 587 if (req->result) 588 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 589 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 590 digestsize, 1); 591 #endif 592 593 req->base.complete(&req->base, err); 594 } 595 596 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 597 void *context) 598 { 599 struct ahash_request *req = context; 600 struct ahash_edesc *edesc; 601 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 602 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 603 #ifdef DEBUG 604 struct caam_hash_state *state = ahash_request_ctx(req); 605 int digestsize = crypto_ahash_digestsize(ahash); 606 607 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 608 #endif 609 610 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 611 if (err) 612 caam_jr_strstatus(jrdev, err); 613 614 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 615 kfree(edesc); 616 617 #ifdef DEBUG 618 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 619 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 620 ctx->ctx_len, 1); 621 if (req->result) 622 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 623 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 624 digestsize, 1); 625 #endif 626 627 req->base.complete(&req->base, err); 628 } 629 630 /* 631 * Allocate an enhanced descriptor, which contains the hardware descriptor 632 * and space for hardware scatter table containing sg_num entries. 633 */ 634 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 635 int sg_num, u32 *sh_desc, 636 dma_addr_t sh_desc_dma, 637 gfp_t flags) 638 { 639 struct ahash_edesc *edesc; 640 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 641 642 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 643 if (!edesc) { 644 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 645 return NULL; 646 } 647 648 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 649 HDR_SHARE_DEFER | HDR_REVERSE); 650 651 return edesc; 652 } 653 654 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 655 struct ahash_edesc *edesc, 656 struct ahash_request *req, int nents, 657 unsigned int first_sg, 658 unsigned int first_bytes, size_t to_hash) 659 { 660 dma_addr_t src_dma; 661 u32 options; 662 663 if (nents > 1 || first_sg) { 664 struct sec4_sg_entry *sg = edesc->sec4_sg; 665 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 666 667 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 668 669 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 670 if (dma_mapping_error(ctx->jrdev, src_dma)) { 671 dev_err(ctx->jrdev, "unable to map S/G table\n"); 672 return -ENOMEM; 673 } 674 675 edesc->sec4_sg_bytes = sgsize; 676 edesc->sec4_sg_dma = src_dma; 677 options = LDST_SGF; 678 } else { 679 src_dma = sg_dma_address(req->src); 680 options = 0; 681 } 682 683 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 684 options); 685 686 return 0; 687 } 688 689 /* submit update job descriptor */ 690 static int ahash_update_ctx(struct ahash_request *req) 691 { 692 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 693 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 694 struct caam_hash_state *state = ahash_request_ctx(req); 695 struct device *jrdev = ctx->jrdev; 696 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 697 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 698 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 699 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 700 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 701 int *next_buflen = state->current_buf ? &state->buflen_0 : 702 &state->buflen_1, last_buflen; 703 int in_len = *buflen + req->nbytes, to_hash; 704 u32 *desc; 705 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 706 struct ahash_edesc *edesc; 707 int ret = 0; 708 709 last_buflen = *next_buflen; 710 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 711 to_hash = in_len - *next_buflen; 712 713 if (to_hash) { 714 src_nents = sg_nents_for_len(req->src, 715 req->nbytes - (*next_buflen)); 716 if (src_nents < 0) { 717 dev_err(jrdev, "Invalid number of src SG.\n"); 718 return src_nents; 719 } 720 721 if (src_nents) { 722 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 723 DMA_TO_DEVICE); 724 if (!mapped_nents) { 725 dev_err(jrdev, "unable to DMA map source\n"); 726 return -ENOMEM; 727 } 728 } else { 729 mapped_nents = 0; 730 } 731 732 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 733 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 734 sizeof(struct sec4_sg_entry); 735 736 /* 737 * allocate space for base edesc and hw desc commands, 738 * link tables 739 */ 740 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 741 ctx->sh_desc_update, 742 ctx->sh_desc_update_dma, flags); 743 if (!edesc) { 744 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 745 return -ENOMEM; 746 } 747 748 edesc->src_nents = src_nents; 749 edesc->sec4_sg_bytes = sec4_sg_bytes; 750 751 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 752 edesc->sec4_sg, DMA_BIDIRECTIONAL); 753 if (ret) 754 goto unmap_ctx; 755 756 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, 757 edesc->sec4_sg + 1, 758 buf, state->buf_dma, 759 *buflen, last_buflen); 760 761 if (mapped_nents) { 762 sg_to_sec4_sg_last(req->src, mapped_nents, 763 edesc->sec4_sg + sec4_sg_src_index, 764 0); 765 if (*next_buflen) 766 scatterwalk_map_and_copy(next_buf, req->src, 767 to_hash - *buflen, 768 *next_buflen, 0); 769 } else { 770 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 771 cpu_to_caam32(SEC4_SG_LEN_FIN); 772 } 773 774 state->current_buf = !state->current_buf; 775 776 desc = edesc->hw_desc; 777 778 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 779 sec4_sg_bytes, 780 DMA_TO_DEVICE); 781 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 782 dev_err(jrdev, "unable to map S/G table\n"); 783 ret = -ENOMEM; 784 goto unmap_ctx; 785 } 786 787 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 788 to_hash, LDST_SGF); 789 790 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 791 792 #ifdef DEBUG 793 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 794 DUMP_PREFIX_ADDRESS, 16, 4, desc, 795 desc_bytes(desc), 1); 796 #endif 797 798 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 799 if (ret) 800 goto unmap_ctx; 801 802 ret = -EINPROGRESS; 803 } else if (*next_buflen) { 804 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 805 req->nbytes, 0); 806 *buflen = *next_buflen; 807 *next_buflen = last_buflen; 808 } 809 #ifdef DEBUG 810 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 811 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 812 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 813 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 814 *next_buflen, 1); 815 #endif 816 817 return ret; 818 unmap_ctx: 819 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 820 kfree(edesc); 821 return ret; 822 } 823 824 static int ahash_final_ctx(struct ahash_request *req) 825 { 826 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 827 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 828 struct caam_hash_state *state = ahash_request_ctx(req); 829 struct device *jrdev = ctx->jrdev; 830 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 831 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 832 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 833 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 834 int last_buflen = state->current_buf ? state->buflen_0 : 835 state->buflen_1; 836 u32 *desc; 837 int sec4_sg_bytes, sec4_sg_src_index; 838 int digestsize = crypto_ahash_digestsize(ahash); 839 struct ahash_edesc *edesc; 840 int ret; 841 842 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 843 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 844 845 /* allocate space for base edesc and hw desc commands, link tables */ 846 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 847 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 848 flags); 849 if (!edesc) 850 return -ENOMEM; 851 852 desc = edesc->hw_desc; 853 854 edesc->sec4_sg_bytes = sec4_sg_bytes; 855 edesc->src_nents = 0; 856 857 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 858 edesc->sec4_sg, DMA_TO_DEVICE); 859 if (ret) 860 goto unmap_ctx; 861 862 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 863 buf, state->buf_dma, buflen, 864 last_buflen); 865 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 866 cpu_to_caam32(SEC4_SG_LEN_FIN); 867 868 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 869 sec4_sg_bytes, DMA_TO_DEVICE); 870 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 871 dev_err(jrdev, "unable to map S/G table\n"); 872 ret = -ENOMEM; 873 goto unmap_ctx; 874 } 875 876 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 877 LDST_SGF); 878 879 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 880 digestsize); 881 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 882 dev_err(jrdev, "unable to map dst\n"); 883 ret = -ENOMEM; 884 goto unmap_ctx; 885 } 886 887 #ifdef DEBUG 888 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 889 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 890 #endif 891 892 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 893 if (ret) 894 goto unmap_ctx; 895 896 return -EINPROGRESS; 897 unmap_ctx: 898 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 899 kfree(edesc); 900 return ret; 901 } 902 903 static int ahash_finup_ctx(struct ahash_request *req) 904 { 905 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 906 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 907 struct caam_hash_state *state = ahash_request_ctx(req); 908 struct device *jrdev = ctx->jrdev; 909 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 910 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 911 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 912 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 913 int last_buflen = state->current_buf ? state->buflen_0 : 914 state->buflen_1; 915 u32 *desc; 916 int sec4_sg_src_index; 917 int src_nents, mapped_nents; 918 int digestsize = crypto_ahash_digestsize(ahash); 919 struct ahash_edesc *edesc; 920 int ret; 921 922 src_nents = sg_nents_for_len(req->src, req->nbytes); 923 if (src_nents < 0) { 924 dev_err(jrdev, "Invalid number of src SG.\n"); 925 return src_nents; 926 } 927 928 if (src_nents) { 929 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 930 DMA_TO_DEVICE); 931 if (!mapped_nents) { 932 dev_err(jrdev, "unable to DMA map source\n"); 933 return -ENOMEM; 934 } 935 } else { 936 mapped_nents = 0; 937 } 938 939 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 940 941 /* allocate space for base edesc and hw desc commands, link tables */ 942 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 943 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 944 flags); 945 if (!edesc) { 946 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 947 return -ENOMEM; 948 } 949 950 desc = edesc->hw_desc; 951 952 edesc->src_nents = src_nents; 953 954 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 955 edesc->sec4_sg, DMA_TO_DEVICE); 956 if (ret) 957 goto unmap_ctx; 958 959 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 960 buf, state->buf_dma, buflen, 961 last_buflen); 962 963 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 964 sec4_sg_src_index, ctx->ctx_len + buflen, 965 req->nbytes); 966 if (ret) 967 goto unmap_ctx; 968 969 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 970 digestsize); 971 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 972 dev_err(jrdev, "unable to map dst\n"); 973 ret = -ENOMEM; 974 goto unmap_ctx; 975 } 976 977 #ifdef DEBUG 978 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 979 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 980 #endif 981 982 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 983 if (ret) 984 goto unmap_ctx; 985 986 return -EINPROGRESS; 987 unmap_ctx: 988 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 989 kfree(edesc); 990 return ret; 991 } 992 993 static int ahash_digest(struct ahash_request *req) 994 { 995 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 996 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 997 struct device *jrdev = ctx->jrdev; 998 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 999 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1000 u32 *desc; 1001 int digestsize = crypto_ahash_digestsize(ahash); 1002 int src_nents, mapped_nents; 1003 struct ahash_edesc *edesc; 1004 int ret; 1005 1006 src_nents = sg_nents_for_len(req->src, req->nbytes); 1007 if (src_nents < 0) { 1008 dev_err(jrdev, "Invalid number of src SG.\n"); 1009 return src_nents; 1010 } 1011 1012 if (src_nents) { 1013 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1014 DMA_TO_DEVICE); 1015 if (!mapped_nents) { 1016 dev_err(jrdev, "unable to map source for DMA\n"); 1017 return -ENOMEM; 1018 } 1019 } else { 1020 mapped_nents = 0; 1021 } 1022 1023 /* allocate space for base edesc and hw desc commands, link tables */ 1024 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1025 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1026 flags); 1027 if (!edesc) { 1028 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1029 return -ENOMEM; 1030 } 1031 1032 edesc->src_nents = src_nents; 1033 1034 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1035 req->nbytes); 1036 if (ret) { 1037 ahash_unmap(jrdev, edesc, req, digestsize); 1038 kfree(edesc); 1039 return ret; 1040 } 1041 1042 desc = edesc->hw_desc; 1043 1044 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1045 digestsize); 1046 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1047 dev_err(jrdev, "unable to map dst\n"); 1048 ahash_unmap(jrdev, edesc, req, digestsize); 1049 kfree(edesc); 1050 return -ENOMEM; 1051 } 1052 1053 #ifdef DEBUG 1054 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1055 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1056 #endif 1057 1058 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1059 if (!ret) { 1060 ret = -EINPROGRESS; 1061 } else { 1062 ahash_unmap(jrdev, edesc, req, digestsize); 1063 kfree(edesc); 1064 } 1065 1066 return ret; 1067 } 1068 1069 /* submit ahash final if it the first job descriptor */ 1070 static int ahash_final_no_ctx(struct ahash_request *req) 1071 { 1072 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1073 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1074 struct caam_hash_state *state = ahash_request_ctx(req); 1075 struct device *jrdev = ctx->jrdev; 1076 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1077 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1078 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1079 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1080 u32 *desc; 1081 int digestsize = crypto_ahash_digestsize(ahash); 1082 struct ahash_edesc *edesc; 1083 int ret; 1084 1085 /* allocate space for base edesc and hw desc commands, link tables */ 1086 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1087 ctx->sh_desc_digest_dma, flags); 1088 if (!edesc) 1089 return -ENOMEM; 1090 1091 desc = edesc->hw_desc; 1092 1093 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1094 if (dma_mapping_error(jrdev, state->buf_dma)) { 1095 dev_err(jrdev, "unable to map src\n"); 1096 goto unmap; 1097 } 1098 1099 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1100 1101 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1102 digestsize); 1103 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1104 dev_err(jrdev, "unable to map dst\n"); 1105 goto unmap; 1106 } 1107 edesc->src_nents = 0; 1108 1109 #ifdef DEBUG 1110 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1111 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1112 #endif 1113 1114 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1115 if (!ret) { 1116 ret = -EINPROGRESS; 1117 } else { 1118 ahash_unmap(jrdev, edesc, req, digestsize); 1119 kfree(edesc); 1120 } 1121 1122 return ret; 1123 unmap: 1124 ahash_unmap(jrdev, edesc, req, digestsize); 1125 kfree(edesc); 1126 return -ENOMEM; 1127 1128 } 1129 1130 /* submit ahash update if it the first job descriptor after update */ 1131 static int ahash_update_no_ctx(struct ahash_request *req) 1132 { 1133 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1134 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1135 struct caam_hash_state *state = ahash_request_ctx(req); 1136 struct device *jrdev = ctx->jrdev; 1137 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1138 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1139 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1140 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 1141 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 1142 int *next_buflen = state->current_buf ? &state->buflen_0 : 1143 &state->buflen_1; 1144 int in_len = *buflen + req->nbytes, to_hash; 1145 int sec4_sg_bytes, src_nents, mapped_nents; 1146 struct ahash_edesc *edesc; 1147 u32 *desc; 1148 int ret = 0; 1149 1150 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1151 to_hash = in_len - *next_buflen; 1152 1153 if (to_hash) { 1154 src_nents = sg_nents_for_len(req->src, 1155 req->nbytes - *next_buflen); 1156 if (src_nents < 0) { 1157 dev_err(jrdev, "Invalid number of src SG.\n"); 1158 return src_nents; 1159 } 1160 1161 if (src_nents) { 1162 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1163 DMA_TO_DEVICE); 1164 if (!mapped_nents) { 1165 dev_err(jrdev, "unable to DMA map source\n"); 1166 return -ENOMEM; 1167 } 1168 } else { 1169 mapped_nents = 0; 1170 } 1171 1172 sec4_sg_bytes = (1 + mapped_nents) * 1173 sizeof(struct sec4_sg_entry); 1174 1175 /* 1176 * allocate space for base edesc and hw desc commands, 1177 * link tables 1178 */ 1179 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1180 ctx->sh_desc_update_first, 1181 ctx->sh_desc_update_first_dma, 1182 flags); 1183 if (!edesc) { 1184 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1185 return -ENOMEM; 1186 } 1187 1188 edesc->src_nents = src_nents; 1189 edesc->sec4_sg_bytes = sec4_sg_bytes; 1190 edesc->dst_dma = 0; 1191 1192 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1193 buf, *buflen); 1194 sg_to_sec4_sg_last(req->src, mapped_nents, 1195 edesc->sec4_sg + 1, 0); 1196 1197 if (*next_buflen) { 1198 scatterwalk_map_and_copy(next_buf, req->src, 1199 to_hash - *buflen, 1200 *next_buflen, 0); 1201 } 1202 1203 state->current_buf = !state->current_buf; 1204 1205 desc = edesc->hw_desc; 1206 1207 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1208 sec4_sg_bytes, 1209 DMA_TO_DEVICE); 1210 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1211 dev_err(jrdev, "unable to map S/G table\n"); 1212 ret = -ENOMEM; 1213 goto unmap_ctx; 1214 } 1215 1216 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1217 1218 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1219 if (ret) 1220 goto unmap_ctx; 1221 1222 #ifdef DEBUG 1223 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1224 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1225 desc_bytes(desc), 1); 1226 #endif 1227 1228 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1229 if (ret) 1230 goto unmap_ctx; 1231 1232 ret = -EINPROGRESS; 1233 state->update = ahash_update_ctx; 1234 state->finup = ahash_finup_ctx; 1235 state->final = ahash_final_ctx; 1236 } else if (*next_buflen) { 1237 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1238 req->nbytes, 0); 1239 *buflen = *next_buflen; 1240 *next_buflen = 0; 1241 } 1242 #ifdef DEBUG 1243 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1244 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1245 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1246 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1247 *next_buflen, 1); 1248 #endif 1249 1250 return ret; 1251 unmap_ctx: 1252 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1253 kfree(edesc); 1254 return ret; 1255 } 1256 1257 /* submit ahash finup if it the first job descriptor after update */ 1258 static int ahash_finup_no_ctx(struct ahash_request *req) 1259 { 1260 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1261 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1262 struct caam_hash_state *state = ahash_request_ctx(req); 1263 struct device *jrdev = ctx->jrdev; 1264 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1265 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1266 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1267 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1268 int last_buflen = state->current_buf ? state->buflen_0 : 1269 state->buflen_1; 1270 u32 *desc; 1271 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1272 int digestsize = crypto_ahash_digestsize(ahash); 1273 struct ahash_edesc *edesc; 1274 int ret; 1275 1276 src_nents = sg_nents_for_len(req->src, req->nbytes); 1277 if (src_nents < 0) { 1278 dev_err(jrdev, "Invalid number of src SG.\n"); 1279 return src_nents; 1280 } 1281 1282 if (src_nents) { 1283 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1284 DMA_TO_DEVICE); 1285 if (!mapped_nents) { 1286 dev_err(jrdev, "unable to DMA map source\n"); 1287 return -ENOMEM; 1288 } 1289 } else { 1290 mapped_nents = 0; 1291 } 1292 1293 sec4_sg_src_index = 2; 1294 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1295 sizeof(struct sec4_sg_entry); 1296 1297 /* allocate space for base edesc and hw desc commands, link tables */ 1298 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1299 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1300 flags); 1301 if (!edesc) { 1302 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1303 return -ENOMEM; 1304 } 1305 1306 desc = edesc->hw_desc; 1307 1308 edesc->src_nents = src_nents; 1309 edesc->sec4_sg_bytes = sec4_sg_bytes; 1310 1311 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, 1312 state->buf_dma, buflen, 1313 last_buflen); 1314 1315 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1316 req->nbytes); 1317 if (ret) { 1318 dev_err(jrdev, "unable to map S/G table\n"); 1319 goto unmap; 1320 } 1321 1322 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1323 digestsize); 1324 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1325 dev_err(jrdev, "unable to map dst\n"); 1326 goto unmap; 1327 } 1328 1329 #ifdef DEBUG 1330 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1331 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1332 #endif 1333 1334 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1335 if (!ret) { 1336 ret = -EINPROGRESS; 1337 } else { 1338 ahash_unmap(jrdev, edesc, req, digestsize); 1339 kfree(edesc); 1340 } 1341 1342 return ret; 1343 unmap: 1344 ahash_unmap(jrdev, edesc, req, digestsize); 1345 kfree(edesc); 1346 return -ENOMEM; 1347 1348 } 1349 1350 /* submit first update job descriptor after init */ 1351 static int ahash_update_first(struct ahash_request *req) 1352 { 1353 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1354 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1355 struct caam_hash_state *state = ahash_request_ctx(req); 1356 struct device *jrdev = ctx->jrdev; 1357 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1358 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1359 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; 1360 int *next_buflen = state->current_buf ? 1361 &state->buflen_1 : &state->buflen_0; 1362 int to_hash; 1363 u32 *desc; 1364 int src_nents, mapped_nents; 1365 struct ahash_edesc *edesc; 1366 int ret = 0; 1367 1368 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1369 1); 1370 to_hash = req->nbytes - *next_buflen; 1371 1372 if (to_hash) { 1373 src_nents = sg_nents_for_len(req->src, 1374 req->nbytes - *next_buflen); 1375 if (src_nents < 0) { 1376 dev_err(jrdev, "Invalid number of src SG.\n"); 1377 return src_nents; 1378 } 1379 1380 if (src_nents) { 1381 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1382 DMA_TO_DEVICE); 1383 if (!mapped_nents) { 1384 dev_err(jrdev, "unable to map source for DMA\n"); 1385 return -ENOMEM; 1386 } 1387 } else { 1388 mapped_nents = 0; 1389 } 1390 1391 /* 1392 * allocate space for base edesc and hw desc commands, 1393 * link tables 1394 */ 1395 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1396 mapped_nents : 0, 1397 ctx->sh_desc_update_first, 1398 ctx->sh_desc_update_first_dma, 1399 flags); 1400 if (!edesc) { 1401 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1402 return -ENOMEM; 1403 } 1404 1405 edesc->src_nents = src_nents; 1406 edesc->dst_dma = 0; 1407 1408 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1409 to_hash); 1410 if (ret) 1411 goto unmap_ctx; 1412 1413 if (*next_buflen) 1414 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1415 *next_buflen, 0); 1416 1417 desc = edesc->hw_desc; 1418 1419 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1420 if (ret) 1421 goto unmap_ctx; 1422 1423 #ifdef DEBUG 1424 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1425 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1426 desc_bytes(desc), 1); 1427 #endif 1428 1429 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1430 if (ret) 1431 goto unmap_ctx; 1432 1433 ret = -EINPROGRESS; 1434 state->update = ahash_update_ctx; 1435 state->finup = ahash_finup_ctx; 1436 state->final = ahash_final_ctx; 1437 } else if (*next_buflen) { 1438 state->update = ahash_update_no_ctx; 1439 state->finup = ahash_finup_no_ctx; 1440 state->final = ahash_final_no_ctx; 1441 scatterwalk_map_and_copy(next_buf, req->src, 0, 1442 req->nbytes, 0); 1443 } 1444 #ifdef DEBUG 1445 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1446 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1447 *next_buflen, 1); 1448 #endif 1449 1450 return ret; 1451 unmap_ctx: 1452 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1453 kfree(edesc); 1454 return ret; 1455 } 1456 1457 static int ahash_finup_first(struct ahash_request *req) 1458 { 1459 return ahash_digest(req); 1460 } 1461 1462 static int ahash_init(struct ahash_request *req) 1463 { 1464 struct caam_hash_state *state = ahash_request_ctx(req); 1465 1466 state->update = ahash_update_first; 1467 state->finup = ahash_finup_first; 1468 state->final = ahash_final_no_ctx; 1469 1470 state->ctx_dma = 0; 1471 state->current_buf = 0; 1472 state->buf_dma = 0; 1473 state->buflen_0 = 0; 1474 state->buflen_1 = 0; 1475 1476 return 0; 1477 } 1478 1479 static int ahash_update(struct ahash_request *req) 1480 { 1481 struct caam_hash_state *state = ahash_request_ctx(req); 1482 1483 return state->update(req); 1484 } 1485 1486 static int ahash_finup(struct ahash_request *req) 1487 { 1488 struct caam_hash_state *state = ahash_request_ctx(req); 1489 1490 return state->finup(req); 1491 } 1492 1493 static int ahash_final(struct ahash_request *req) 1494 { 1495 struct caam_hash_state *state = ahash_request_ctx(req); 1496 1497 return state->final(req); 1498 } 1499 1500 static int ahash_export(struct ahash_request *req, void *out) 1501 { 1502 struct caam_hash_state *state = ahash_request_ctx(req); 1503 struct caam_export_state *export = out; 1504 int len; 1505 u8 *buf; 1506 1507 if (state->current_buf) { 1508 buf = state->buf_1; 1509 len = state->buflen_1; 1510 } else { 1511 buf = state->buf_0; 1512 len = state->buflen_0; 1513 } 1514 1515 memcpy(export->buf, buf, len); 1516 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1517 export->buflen = len; 1518 export->update = state->update; 1519 export->final = state->final; 1520 export->finup = state->finup; 1521 1522 return 0; 1523 } 1524 1525 static int ahash_import(struct ahash_request *req, const void *in) 1526 { 1527 struct caam_hash_state *state = ahash_request_ctx(req); 1528 const struct caam_export_state *export = in; 1529 1530 memset(state, 0, sizeof(*state)); 1531 memcpy(state->buf_0, export->buf, export->buflen); 1532 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1533 state->buflen_0 = export->buflen; 1534 state->update = export->update; 1535 state->final = export->final; 1536 state->finup = export->finup; 1537 1538 return 0; 1539 } 1540 1541 struct caam_hash_template { 1542 char name[CRYPTO_MAX_ALG_NAME]; 1543 char driver_name[CRYPTO_MAX_ALG_NAME]; 1544 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1545 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1546 unsigned int blocksize; 1547 struct ahash_alg template_ahash; 1548 u32 alg_type; 1549 }; 1550 1551 /* ahash descriptors */ 1552 static struct caam_hash_template driver_hash[] = { 1553 { 1554 .name = "sha1", 1555 .driver_name = "sha1-caam", 1556 .hmac_name = "hmac(sha1)", 1557 .hmac_driver_name = "hmac-sha1-caam", 1558 .blocksize = SHA1_BLOCK_SIZE, 1559 .template_ahash = { 1560 .init = ahash_init, 1561 .update = ahash_update, 1562 .final = ahash_final, 1563 .finup = ahash_finup, 1564 .digest = ahash_digest, 1565 .export = ahash_export, 1566 .import = ahash_import, 1567 .setkey = ahash_setkey, 1568 .halg = { 1569 .digestsize = SHA1_DIGEST_SIZE, 1570 .statesize = sizeof(struct caam_export_state), 1571 }, 1572 }, 1573 .alg_type = OP_ALG_ALGSEL_SHA1, 1574 }, { 1575 .name = "sha224", 1576 .driver_name = "sha224-caam", 1577 .hmac_name = "hmac(sha224)", 1578 .hmac_driver_name = "hmac-sha224-caam", 1579 .blocksize = SHA224_BLOCK_SIZE, 1580 .template_ahash = { 1581 .init = ahash_init, 1582 .update = ahash_update, 1583 .final = ahash_final, 1584 .finup = ahash_finup, 1585 .digest = ahash_digest, 1586 .export = ahash_export, 1587 .import = ahash_import, 1588 .setkey = ahash_setkey, 1589 .halg = { 1590 .digestsize = SHA224_DIGEST_SIZE, 1591 .statesize = sizeof(struct caam_export_state), 1592 }, 1593 }, 1594 .alg_type = OP_ALG_ALGSEL_SHA224, 1595 }, { 1596 .name = "sha256", 1597 .driver_name = "sha256-caam", 1598 .hmac_name = "hmac(sha256)", 1599 .hmac_driver_name = "hmac-sha256-caam", 1600 .blocksize = SHA256_BLOCK_SIZE, 1601 .template_ahash = { 1602 .init = ahash_init, 1603 .update = ahash_update, 1604 .final = ahash_final, 1605 .finup = ahash_finup, 1606 .digest = ahash_digest, 1607 .export = ahash_export, 1608 .import = ahash_import, 1609 .setkey = ahash_setkey, 1610 .halg = { 1611 .digestsize = SHA256_DIGEST_SIZE, 1612 .statesize = sizeof(struct caam_export_state), 1613 }, 1614 }, 1615 .alg_type = OP_ALG_ALGSEL_SHA256, 1616 }, { 1617 .name = "sha384", 1618 .driver_name = "sha384-caam", 1619 .hmac_name = "hmac(sha384)", 1620 .hmac_driver_name = "hmac-sha384-caam", 1621 .blocksize = SHA384_BLOCK_SIZE, 1622 .template_ahash = { 1623 .init = ahash_init, 1624 .update = ahash_update, 1625 .final = ahash_final, 1626 .finup = ahash_finup, 1627 .digest = ahash_digest, 1628 .export = ahash_export, 1629 .import = ahash_import, 1630 .setkey = ahash_setkey, 1631 .halg = { 1632 .digestsize = SHA384_DIGEST_SIZE, 1633 .statesize = sizeof(struct caam_export_state), 1634 }, 1635 }, 1636 .alg_type = OP_ALG_ALGSEL_SHA384, 1637 }, { 1638 .name = "sha512", 1639 .driver_name = "sha512-caam", 1640 .hmac_name = "hmac(sha512)", 1641 .hmac_driver_name = "hmac-sha512-caam", 1642 .blocksize = SHA512_BLOCK_SIZE, 1643 .template_ahash = { 1644 .init = ahash_init, 1645 .update = ahash_update, 1646 .final = ahash_final, 1647 .finup = ahash_finup, 1648 .digest = ahash_digest, 1649 .export = ahash_export, 1650 .import = ahash_import, 1651 .setkey = ahash_setkey, 1652 .halg = { 1653 .digestsize = SHA512_DIGEST_SIZE, 1654 .statesize = sizeof(struct caam_export_state), 1655 }, 1656 }, 1657 .alg_type = OP_ALG_ALGSEL_SHA512, 1658 }, { 1659 .name = "md5", 1660 .driver_name = "md5-caam", 1661 .hmac_name = "hmac(md5)", 1662 .hmac_driver_name = "hmac-md5-caam", 1663 .blocksize = MD5_BLOCK_WORDS * 4, 1664 .template_ahash = { 1665 .init = ahash_init, 1666 .update = ahash_update, 1667 .final = ahash_final, 1668 .finup = ahash_finup, 1669 .digest = ahash_digest, 1670 .export = ahash_export, 1671 .import = ahash_import, 1672 .setkey = ahash_setkey, 1673 .halg = { 1674 .digestsize = MD5_DIGEST_SIZE, 1675 .statesize = sizeof(struct caam_export_state), 1676 }, 1677 }, 1678 .alg_type = OP_ALG_ALGSEL_MD5, 1679 }, 1680 }; 1681 1682 struct caam_hash_alg { 1683 struct list_head entry; 1684 int alg_type; 1685 struct ahash_alg ahash_alg; 1686 }; 1687 1688 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1689 { 1690 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1691 struct crypto_alg *base = tfm->__crt_alg; 1692 struct hash_alg_common *halg = 1693 container_of(base, struct hash_alg_common, base); 1694 struct ahash_alg *alg = 1695 container_of(halg, struct ahash_alg, halg); 1696 struct caam_hash_alg *caam_hash = 1697 container_of(alg, struct caam_hash_alg, ahash_alg); 1698 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1699 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1700 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1701 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1702 HASH_MSG_LEN + 32, 1703 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1704 HASH_MSG_LEN + 64, 1705 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1706 dma_addr_t dma_addr; 1707 1708 /* 1709 * Get a Job ring from Job Ring driver to ensure in-order 1710 * crypto request processing per tfm 1711 */ 1712 ctx->jrdev = caam_jr_alloc(); 1713 if (IS_ERR(ctx->jrdev)) { 1714 pr_err("Job Ring Device allocation for transform failed\n"); 1715 return PTR_ERR(ctx->jrdev); 1716 } 1717 1718 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1719 offsetof(struct caam_hash_ctx, 1720 sh_desc_update_dma), 1721 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1722 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1723 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1724 caam_jr_free(ctx->jrdev); 1725 return -ENOMEM; 1726 } 1727 1728 ctx->sh_desc_update_dma = dma_addr; 1729 ctx->sh_desc_update_first_dma = dma_addr + 1730 offsetof(struct caam_hash_ctx, 1731 sh_desc_update_first); 1732 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1733 sh_desc_fin); 1734 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1735 sh_desc_digest); 1736 1737 /* copy descriptor header template value */ 1738 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1739 1740 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1741 OP_ALG_ALGSEL_SUBMASK) >> 1742 OP_ALG_ALGSEL_SHIFT]; 1743 1744 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1745 sizeof(struct caam_hash_state)); 1746 return ahash_set_sh_desc(ahash); 1747 } 1748 1749 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1750 { 1751 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1752 1753 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1754 offsetof(struct caam_hash_ctx, 1755 sh_desc_update_dma), 1756 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1757 caam_jr_free(ctx->jrdev); 1758 } 1759 1760 static void __exit caam_algapi_hash_exit(void) 1761 { 1762 struct caam_hash_alg *t_alg, *n; 1763 1764 if (!hash_list.next) 1765 return; 1766 1767 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1768 crypto_unregister_ahash(&t_alg->ahash_alg); 1769 list_del(&t_alg->entry); 1770 kfree(t_alg); 1771 } 1772 } 1773 1774 static struct caam_hash_alg * 1775 caam_hash_alloc(struct caam_hash_template *template, 1776 bool keyed) 1777 { 1778 struct caam_hash_alg *t_alg; 1779 struct ahash_alg *halg; 1780 struct crypto_alg *alg; 1781 1782 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1783 if (!t_alg) { 1784 pr_err("failed to allocate t_alg\n"); 1785 return ERR_PTR(-ENOMEM); 1786 } 1787 1788 t_alg->ahash_alg = template->template_ahash; 1789 halg = &t_alg->ahash_alg; 1790 alg = &halg->halg.base; 1791 1792 if (keyed) { 1793 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1794 template->hmac_name); 1795 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1796 template->hmac_driver_name); 1797 } else { 1798 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1799 template->name); 1800 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1801 template->driver_name); 1802 t_alg->ahash_alg.setkey = NULL; 1803 } 1804 alg->cra_module = THIS_MODULE; 1805 alg->cra_init = caam_hash_cra_init; 1806 alg->cra_exit = caam_hash_cra_exit; 1807 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1808 alg->cra_priority = CAAM_CRA_PRIORITY; 1809 alg->cra_blocksize = template->blocksize; 1810 alg->cra_alignmask = 0; 1811 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; 1812 alg->cra_type = &crypto_ahash_type; 1813 1814 t_alg->alg_type = template->alg_type; 1815 1816 return t_alg; 1817 } 1818 1819 static int __init caam_algapi_hash_init(void) 1820 { 1821 struct device_node *dev_node; 1822 struct platform_device *pdev; 1823 struct device *ctrldev; 1824 int i = 0, err = 0; 1825 struct caam_drv_private *priv; 1826 unsigned int md_limit = SHA512_DIGEST_SIZE; 1827 u32 cha_inst, cha_vid; 1828 1829 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1830 if (!dev_node) { 1831 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1832 if (!dev_node) 1833 return -ENODEV; 1834 } 1835 1836 pdev = of_find_device_by_node(dev_node); 1837 if (!pdev) { 1838 of_node_put(dev_node); 1839 return -ENODEV; 1840 } 1841 1842 ctrldev = &pdev->dev; 1843 priv = dev_get_drvdata(ctrldev); 1844 of_node_put(dev_node); 1845 1846 /* 1847 * If priv is NULL, it's probably because the caam driver wasn't 1848 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1849 */ 1850 if (!priv) 1851 return -ENODEV; 1852 1853 /* 1854 * Register crypto algorithms the device supports. First, identify 1855 * presence and attributes of MD block. 1856 */ 1857 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 1858 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 1859 1860 /* 1861 * Skip registration of any hashing algorithms if MD block 1862 * is not present. 1863 */ 1864 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) 1865 return -ENODEV; 1866 1867 /* Limit digest size based on LP256 */ 1868 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) 1869 md_limit = SHA256_DIGEST_SIZE; 1870 1871 INIT_LIST_HEAD(&hash_list); 1872 1873 /* register crypto algorithms the device supports */ 1874 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1875 struct caam_hash_alg *t_alg; 1876 struct caam_hash_template *alg = driver_hash + i; 1877 1878 /* If MD size is not supported by device, skip registration */ 1879 if (alg->template_ahash.halg.digestsize > md_limit) 1880 continue; 1881 1882 /* register hmac version */ 1883 t_alg = caam_hash_alloc(alg, true); 1884 if (IS_ERR(t_alg)) { 1885 err = PTR_ERR(t_alg); 1886 pr_warn("%s alg allocation failed\n", alg->driver_name); 1887 continue; 1888 } 1889 1890 err = crypto_register_ahash(&t_alg->ahash_alg); 1891 if (err) { 1892 pr_warn("%s alg registration failed: %d\n", 1893 t_alg->ahash_alg.halg.base.cra_driver_name, 1894 err); 1895 kfree(t_alg); 1896 } else 1897 list_add_tail(&t_alg->entry, &hash_list); 1898 1899 /* register unkeyed version */ 1900 t_alg = caam_hash_alloc(alg, false); 1901 if (IS_ERR(t_alg)) { 1902 err = PTR_ERR(t_alg); 1903 pr_warn("%s alg allocation failed\n", alg->driver_name); 1904 continue; 1905 } 1906 1907 err = crypto_register_ahash(&t_alg->ahash_alg); 1908 if (err) { 1909 pr_warn("%s alg registration failed: %d\n", 1910 t_alg->ahash_alg.halg.base.cra_driver_name, 1911 err); 1912 kfree(t_alg); 1913 } else 1914 list_add_tail(&t_alg->entry, &hash_list); 1915 } 1916 1917 return err; 1918 } 1919 1920 module_init(caam_algapi_hash_init); 1921 module_exit(caam_algapi_hash_exit); 1922 1923 MODULE_LICENSE("GPL"); 1924 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 1925 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 1926