1 /* 2 * caam - Freescale FSL CAAM support for ahash functions of crypto API 3 * 4 * Copyright 2011 Freescale Semiconductor, Inc. 5 * 6 * Based on caamalg.c crypto API driver. 7 * 8 * relationship of digest job descriptor or first job descriptor after init to 9 * shared descriptors: 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (hashKey) | 14 * --------------- | (operation) | 15 * --------------- 16 * 17 * relationship of subsequent job descriptors to shared descriptors: 18 * 19 * --------------- --------------- 20 * | JobDesc #2 |-------------------->| ShareDesc | 21 * | *(packet 2) | |------------->| (hashKey) | 22 * --------------- | |-------->| (operation) | 23 * . | | | (load ctx2) | 24 * . | | --------------- 25 * --------------- | | 26 * | JobDesc #3 |------| | 27 * | *(packet 3) | | 28 * --------------- | 29 * . | 30 * . | 31 * --------------- | 32 * | JobDesc #4 |------------ 33 * | *(packet 4) | 34 * --------------- 35 * 36 * The SharedDesc never changes for a connection unless rekeyed, but 37 * each packet will likely be in a different place. So all we need 38 * to know to process the packet is where the input is, where the 39 * output goes, and what context we want to process with. Context is 40 * in the SharedDesc, packet references in the JobDesc. 41 * 42 * So, a job desc looks like: 43 * 44 * --------------------- 45 * | Header | 46 * | ShareDesc Pointer | 47 * | SEQ_OUT_PTR | 48 * | (output buffer) | 49 * | (output length) | 50 * | SEQ_IN_PTR | 51 * | (input buffer) | 52 * | (input length) | 53 * --------------------- 54 */ 55 56 #include "compat.h" 57 58 #include "regs.h" 59 #include "intern.h" 60 #include "desc_constr.h" 61 #include "jr.h" 62 #include "error.h" 63 #include "sg_sw_sec4.h" 64 #include "key_gen.h" 65 66 #define CAAM_CRA_PRIORITY 3000 67 68 /* max hash key is max split key size */ 69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 70 71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 73 74 /* length of descriptors text */ 75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) 76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) 77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 81 82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 83 CAAM_MAX_HASH_KEY_SIZE) 84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 85 86 /* caam context sizes for hashes: running digest + 8 */ 87 #define HASH_MSG_LEN 8 88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 89 90 #ifdef DEBUG 91 /* for print_hex_dumps with line references */ 92 #define debug(format, arg...) printk(format, arg) 93 #else 94 #define debug(format, arg...) 95 #endif 96 97 98 static struct list_head hash_list; 99 100 /* ahash per-session context */ 101 struct caam_hash_ctx { 102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 106 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 107 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 108 dma_addr_t sh_desc_update_first_dma; 109 dma_addr_t sh_desc_fin_dma; 110 dma_addr_t sh_desc_digest_dma; 111 dma_addr_t sh_desc_finup_dma; 112 struct device *jrdev; 113 u32 alg_type; 114 u32 alg_op; 115 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 116 dma_addr_t key_dma; 117 int ctx_len; 118 unsigned int split_key_len; 119 unsigned int split_key_pad_len; 120 }; 121 122 /* ahash state */ 123 struct caam_hash_state { 124 dma_addr_t buf_dma; 125 dma_addr_t ctx_dma; 126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 127 int buflen_0; 128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 129 int buflen_1; 130 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 131 int (*update)(struct ahash_request *req); 132 int (*final)(struct ahash_request *req); 133 int (*finup)(struct ahash_request *req); 134 int current_buf; 135 }; 136 137 struct caam_export_state { 138 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 139 u8 caam_ctx[MAX_CTX_LEN]; 140 int buflen; 141 int (*update)(struct ahash_request *req); 142 int (*final)(struct ahash_request *req); 143 int (*finup)(struct ahash_request *req); 144 }; 145 146 /* Common job descriptor seq in/out ptr routines */ 147 148 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 150 struct caam_hash_state *state, 151 int ctx_len) 152 { 153 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 154 ctx_len, DMA_FROM_DEVICE); 155 if (dma_mapping_error(jrdev, state->ctx_dma)) { 156 dev_err(jrdev, "unable to map ctx\n"); 157 return -ENOMEM; 158 } 159 160 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 161 162 return 0; 163 } 164 165 /* Map req->result, and append seq_out_ptr command that points to it */ 166 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 167 u8 *result, int digestsize) 168 { 169 dma_addr_t dst_dma; 170 171 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 172 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 173 174 return dst_dma; 175 } 176 177 /* Map current buffer in state and put it in link table */ 178 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, 179 struct sec4_sg_entry *sec4_sg, 180 u8 *buf, int buflen) 181 { 182 dma_addr_t buf_dma; 183 184 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 185 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); 186 187 return buf_dma; 188 } 189 190 /* 191 * Only put buffer in link table if it contains data, which is possible, 192 * since a buffer has previously been used, and needs to be unmapped, 193 */ 194 static inline dma_addr_t 195 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, 196 u8 *buf, dma_addr_t buf_dma, int buflen, 197 int last_buflen) 198 { 199 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) 200 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); 201 if (buflen) 202 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); 203 else 204 buf_dma = 0; 205 206 return buf_dma; 207 } 208 209 /* Map state->caam_ctx, and add it to link table */ 210 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, 211 struct caam_hash_state *state, int ctx_len, 212 struct sec4_sg_entry *sec4_sg, u32 flag) 213 { 214 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 215 if (dma_mapping_error(jrdev, state->ctx_dma)) { 216 dev_err(jrdev, "unable to map ctx\n"); 217 return -ENOMEM; 218 } 219 220 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 221 222 return 0; 223 } 224 225 /* Common shared descriptor commands */ 226 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 227 { 228 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 229 ctx->split_key_len, CLASS_2 | 230 KEY_DEST_MDHA_SPLIT | KEY_ENC); 231 } 232 233 /* Append key if it has been set */ 234 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 235 { 236 u32 *key_jump_cmd; 237 238 init_sh_desc(desc, HDR_SHARE_SERIAL); 239 240 if (ctx->split_key_len) { 241 /* Skip if already shared */ 242 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 243 JUMP_COND_SHRD); 244 245 append_key_ahash(desc, ctx); 246 247 set_jump_tgt_here(desc, key_jump_cmd); 248 } 249 } 250 251 /* 252 * For ahash read data from seqin following state->caam_ctx, 253 * and write resulting class2 context to seqout, which may be state->caam_ctx 254 * or req->result 255 */ 256 static inline void ahash_append_load_str(u32 *desc, int digestsize) 257 { 258 /* Calculate remaining bytes to read */ 259 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 260 261 /* Read remaining bytes */ 262 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | 263 FIFOLD_TYPE_MSG | KEY_VLF); 264 265 /* Store class2 context bytes */ 266 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 267 LDST_SRCDST_BYTE_CONTEXT); 268 } 269 270 /* 271 * For ahash update, final and finup, import context, read and write to seqout 272 */ 273 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, 274 int digestsize, 275 struct caam_hash_ctx *ctx) 276 { 277 init_sh_desc_key_ahash(desc, ctx); 278 279 /* Import context from software */ 280 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 281 LDST_CLASS_2_CCB | ctx->ctx_len); 282 283 /* Class 2 operation */ 284 append_operation(desc, op | state | OP_ALG_ENCRYPT); 285 286 /* 287 * Load from buf and/or src and write to req->result or state->context 288 */ 289 ahash_append_load_str(desc, digestsize); 290 } 291 292 /* For ahash firsts and digest, read and write to seqout */ 293 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, 294 int digestsize, struct caam_hash_ctx *ctx) 295 { 296 init_sh_desc_key_ahash(desc, ctx); 297 298 /* Class 2 operation */ 299 append_operation(desc, op | state | OP_ALG_ENCRYPT); 300 301 /* 302 * Load from buf and/or src and write to req->result or state->context 303 */ 304 ahash_append_load_str(desc, digestsize); 305 } 306 307 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 308 { 309 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 310 int digestsize = crypto_ahash_digestsize(ahash); 311 struct device *jrdev = ctx->jrdev; 312 u32 have_key = 0; 313 u32 *desc; 314 315 if (ctx->split_key_len) 316 have_key = OP_ALG_AAI_HMAC_PRECOMP; 317 318 /* ahash_update shared descriptor */ 319 desc = ctx->sh_desc_update; 320 321 init_sh_desc(desc, HDR_SHARE_SERIAL); 322 323 /* Import context from software */ 324 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 325 LDST_CLASS_2_CCB | ctx->ctx_len); 326 327 /* Class 2 operation */ 328 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | 329 OP_ALG_ENCRYPT); 330 331 /* Load data and write to result or context */ 332 ahash_append_load_str(desc, ctx->ctx_len); 333 334 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 335 DMA_TO_DEVICE); 336 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { 337 dev_err(jrdev, "unable to map shared descriptor\n"); 338 return -ENOMEM; 339 } 340 #ifdef DEBUG 341 print_hex_dump(KERN_ERR, 342 "ahash update shdesc@"__stringify(__LINE__)": ", 343 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 344 #endif 345 346 /* ahash_update_first shared descriptor */ 347 desc = ctx->sh_desc_update_first; 348 349 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, 350 ctx->ctx_len, ctx); 351 352 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, 353 desc_bytes(desc), 354 DMA_TO_DEVICE); 355 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { 356 dev_err(jrdev, "unable to map shared descriptor\n"); 357 return -ENOMEM; 358 } 359 #ifdef DEBUG 360 print_hex_dump(KERN_ERR, 361 "ahash update first shdesc@"__stringify(__LINE__)": ", 362 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 363 #endif 364 365 /* ahash_final shared descriptor */ 366 desc = ctx->sh_desc_fin; 367 368 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, 369 OP_ALG_AS_FINALIZE, digestsize, ctx); 370 371 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 372 DMA_TO_DEVICE); 373 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { 374 dev_err(jrdev, "unable to map shared descriptor\n"); 375 return -ENOMEM; 376 } 377 #ifdef DEBUG 378 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 379 DUMP_PREFIX_ADDRESS, 16, 4, desc, 380 desc_bytes(desc), 1); 381 #endif 382 383 /* ahash_finup shared descriptor */ 384 desc = ctx->sh_desc_finup; 385 386 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, 387 OP_ALG_AS_FINALIZE, digestsize, ctx); 388 389 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 390 DMA_TO_DEVICE); 391 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { 392 dev_err(jrdev, "unable to map shared descriptor\n"); 393 return -ENOMEM; 394 } 395 #ifdef DEBUG 396 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", 397 DUMP_PREFIX_ADDRESS, 16, 4, desc, 398 desc_bytes(desc), 1); 399 #endif 400 401 /* ahash_digest shared descriptor */ 402 desc = ctx->sh_desc_digest; 403 404 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, 405 digestsize, ctx); 406 407 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, 408 desc_bytes(desc), 409 DMA_TO_DEVICE); 410 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { 411 dev_err(jrdev, "unable to map shared descriptor\n"); 412 return -ENOMEM; 413 } 414 #ifdef DEBUG 415 print_hex_dump(KERN_ERR, 416 "ahash digest shdesc@"__stringify(__LINE__)": ", 417 DUMP_PREFIX_ADDRESS, 16, 4, desc, 418 desc_bytes(desc), 1); 419 #endif 420 421 return 0; 422 } 423 424 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, 425 u32 keylen) 426 { 427 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, 428 ctx->split_key_pad_len, key_in, keylen, 429 ctx->alg_op); 430 } 431 432 /* Digest hash size if it is too large */ 433 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 434 u32 *keylen, u8 *key_out, u32 digestsize) 435 { 436 struct device *jrdev = ctx->jrdev; 437 u32 *desc; 438 struct split_key_result result; 439 dma_addr_t src_dma, dst_dma; 440 int ret; 441 442 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 443 if (!desc) { 444 dev_err(jrdev, "unable to allocate key input memory\n"); 445 return -ENOMEM; 446 } 447 448 init_job_desc(desc, 0); 449 450 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 451 DMA_TO_DEVICE); 452 if (dma_mapping_error(jrdev, src_dma)) { 453 dev_err(jrdev, "unable to map key input memory\n"); 454 kfree(desc); 455 return -ENOMEM; 456 } 457 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 458 DMA_FROM_DEVICE); 459 if (dma_mapping_error(jrdev, dst_dma)) { 460 dev_err(jrdev, "unable to map key output memory\n"); 461 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 462 kfree(desc); 463 return -ENOMEM; 464 } 465 466 /* Job descriptor to perform unkeyed hash on key_in */ 467 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | 468 OP_ALG_AS_INITFINAL); 469 append_seq_in_ptr(desc, src_dma, *keylen, 0); 470 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 471 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 472 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 473 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 474 LDST_SRCDST_BYTE_CONTEXT); 475 476 #ifdef DEBUG 477 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 478 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 479 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 480 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 481 #endif 482 483 result.err = 0; 484 init_completion(&result.completion); 485 486 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 487 if (!ret) { 488 /* in progress */ 489 wait_for_completion_interruptible(&result.completion); 490 ret = result.err; 491 #ifdef DEBUG 492 print_hex_dump(KERN_ERR, 493 "digested key@"__stringify(__LINE__)": ", 494 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 495 digestsize, 1); 496 #endif 497 } 498 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 499 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 500 501 *keylen = digestsize; 502 503 kfree(desc); 504 505 return ret; 506 } 507 508 static int ahash_setkey(struct crypto_ahash *ahash, 509 const u8 *key, unsigned int keylen) 510 { 511 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 512 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 513 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 514 struct device *jrdev = ctx->jrdev; 515 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 516 int digestsize = crypto_ahash_digestsize(ahash); 517 int ret; 518 u8 *hashed_key = NULL; 519 520 #ifdef DEBUG 521 printk(KERN_ERR "keylen %d\n", keylen); 522 #endif 523 524 if (keylen > blocksize) { 525 hashed_key = kmalloc_array(digestsize, 526 sizeof(*hashed_key), 527 GFP_KERNEL | GFP_DMA); 528 if (!hashed_key) 529 return -ENOMEM; 530 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 531 digestsize); 532 if (ret) 533 goto bad_free_key; 534 key = hashed_key; 535 } 536 537 /* Pick class 2 key length from algorithm submask */ 538 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 539 OP_ALG_ALGSEL_SHIFT] * 2; 540 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 541 542 #ifdef DEBUG 543 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 544 ctx->split_key_len, ctx->split_key_pad_len); 545 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 546 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 547 #endif 548 549 ret = gen_split_hash_key(ctx, key, keylen); 550 if (ret) 551 goto bad_free_key; 552 553 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, 554 DMA_TO_DEVICE); 555 if (dma_mapping_error(jrdev, ctx->key_dma)) { 556 dev_err(jrdev, "unable to map key i/o memory\n"); 557 ret = -ENOMEM; 558 goto error_free_key; 559 } 560 #ifdef DEBUG 561 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 562 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 563 ctx->split_key_pad_len, 1); 564 #endif 565 566 ret = ahash_set_sh_desc(ahash); 567 if (ret) { 568 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, 569 DMA_TO_DEVICE); 570 } 571 error_free_key: 572 kfree(hashed_key); 573 return ret; 574 bad_free_key: 575 kfree(hashed_key); 576 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 577 return -EINVAL; 578 } 579 580 /* 581 * ahash_edesc - s/w-extended ahash descriptor 582 * @dst_dma: physical mapped address of req->result 583 * @sec4_sg_dma: physical mapped address of h/w link table 584 * @src_nents: number of segments in input scatterlist 585 * @sec4_sg_bytes: length of dma mapped sec4_sg space 586 * @hw_desc: the h/w job descriptor followed by any referenced link tables 587 * @sec4_sg: h/w link table 588 */ 589 struct ahash_edesc { 590 dma_addr_t dst_dma; 591 dma_addr_t sec4_sg_dma; 592 int src_nents; 593 int sec4_sg_bytes; 594 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 595 struct sec4_sg_entry sec4_sg[0]; 596 }; 597 598 static inline void ahash_unmap(struct device *dev, 599 struct ahash_edesc *edesc, 600 struct ahash_request *req, int dst_len) 601 { 602 if (edesc->src_nents) 603 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 604 if (edesc->dst_dma) 605 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 606 607 if (edesc->sec4_sg_bytes) 608 dma_unmap_single(dev, edesc->sec4_sg_dma, 609 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 610 } 611 612 static inline void ahash_unmap_ctx(struct device *dev, 613 struct ahash_edesc *edesc, 614 struct ahash_request *req, int dst_len, u32 flag) 615 { 616 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 617 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 618 struct caam_hash_state *state = ahash_request_ctx(req); 619 620 if (state->ctx_dma) 621 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 622 ahash_unmap(dev, edesc, req, dst_len); 623 } 624 625 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 626 void *context) 627 { 628 struct ahash_request *req = context; 629 struct ahash_edesc *edesc; 630 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 631 int digestsize = crypto_ahash_digestsize(ahash); 632 #ifdef DEBUG 633 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 634 struct caam_hash_state *state = ahash_request_ctx(req); 635 636 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 637 #endif 638 639 edesc = (struct ahash_edesc *)((char *)desc - 640 offsetof(struct ahash_edesc, hw_desc)); 641 if (err) 642 caam_jr_strstatus(jrdev, err); 643 644 ahash_unmap(jrdev, edesc, req, digestsize); 645 kfree(edesc); 646 647 #ifdef DEBUG 648 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 649 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 650 ctx->ctx_len, 1); 651 if (req->result) 652 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 653 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 654 digestsize, 1); 655 #endif 656 657 req->base.complete(&req->base, err); 658 } 659 660 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 661 void *context) 662 { 663 struct ahash_request *req = context; 664 struct ahash_edesc *edesc; 665 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 666 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 667 #ifdef DEBUG 668 struct caam_hash_state *state = ahash_request_ctx(req); 669 int digestsize = crypto_ahash_digestsize(ahash); 670 671 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 672 #endif 673 674 edesc = (struct ahash_edesc *)((char *)desc - 675 offsetof(struct ahash_edesc, hw_desc)); 676 if (err) 677 caam_jr_strstatus(jrdev, err); 678 679 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 680 kfree(edesc); 681 682 #ifdef DEBUG 683 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 684 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 685 ctx->ctx_len, 1); 686 if (req->result) 687 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 688 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 689 digestsize, 1); 690 #endif 691 692 req->base.complete(&req->base, err); 693 } 694 695 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 696 void *context) 697 { 698 struct ahash_request *req = context; 699 struct ahash_edesc *edesc; 700 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 701 int digestsize = crypto_ahash_digestsize(ahash); 702 #ifdef DEBUG 703 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 704 struct caam_hash_state *state = ahash_request_ctx(req); 705 706 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 707 #endif 708 709 edesc = (struct ahash_edesc *)((char *)desc - 710 offsetof(struct ahash_edesc, hw_desc)); 711 if (err) 712 caam_jr_strstatus(jrdev, err); 713 714 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); 715 kfree(edesc); 716 717 #ifdef DEBUG 718 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 719 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 720 ctx->ctx_len, 1); 721 if (req->result) 722 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 723 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 724 digestsize, 1); 725 #endif 726 727 req->base.complete(&req->base, err); 728 } 729 730 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 731 void *context) 732 { 733 struct ahash_request *req = context; 734 struct ahash_edesc *edesc; 735 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 736 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 737 #ifdef DEBUG 738 struct caam_hash_state *state = ahash_request_ctx(req); 739 int digestsize = crypto_ahash_digestsize(ahash); 740 741 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 742 #endif 743 744 edesc = (struct ahash_edesc *)((char *)desc - 745 offsetof(struct ahash_edesc, hw_desc)); 746 if (err) 747 caam_jr_strstatus(jrdev, err); 748 749 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 750 kfree(edesc); 751 752 #ifdef DEBUG 753 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 754 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 755 ctx->ctx_len, 1); 756 if (req->result) 757 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 758 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 759 digestsize, 1); 760 #endif 761 762 req->base.complete(&req->base, err); 763 } 764 765 /* 766 * Allocate an enhanced descriptor, which contains the hardware descriptor 767 * and space for hardware scatter table containing sg_num entries. 768 */ 769 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 770 int sg_num, u32 *sh_desc, 771 dma_addr_t sh_desc_dma, 772 gfp_t flags) 773 { 774 struct ahash_edesc *edesc; 775 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 776 777 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 778 if (!edesc) { 779 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 780 return NULL; 781 } 782 783 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 784 HDR_SHARE_DEFER | HDR_REVERSE); 785 786 return edesc; 787 } 788 789 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 790 struct ahash_edesc *edesc, 791 struct ahash_request *req, int nents, 792 unsigned int first_sg, 793 unsigned int first_bytes, size_t to_hash) 794 { 795 dma_addr_t src_dma; 796 u32 options; 797 798 if (nents > 1 || first_sg) { 799 struct sec4_sg_entry *sg = edesc->sec4_sg; 800 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 801 802 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 803 804 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 805 if (dma_mapping_error(ctx->jrdev, src_dma)) { 806 dev_err(ctx->jrdev, "unable to map S/G table\n"); 807 return -ENOMEM; 808 } 809 810 edesc->sec4_sg_bytes = sgsize; 811 edesc->sec4_sg_dma = src_dma; 812 options = LDST_SGF; 813 } else { 814 src_dma = sg_dma_address(req->src); 815 options = 0; 816 } 817 818 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 819 options); 820 821 return 0; 822 } 823 824 /* submit update job descriptor */ 825 static int ahash_update_ctx(struct ahash_request *req) 826 { 827 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 828 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 829 struct caam_hash_state *state = ahash_request_ctx(req); 830 struct device *jrdev = ctx->jrdev; 831 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 832 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 833 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 834 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 835 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 836 int *next_buflen = state->current_buf ? &state->buflen_0 : 837 &state->buflen_1, last_buflen; 838 int in_len = *buflen + req->nbytes, to_hash; 839 u32 *desc; 840 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 841 struct ahash_edesc *edesc; 842 int ret = 0; 843 844 last_buflen = *next_buflen; 845 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 846 to_hash = in_len - *next_buflen; 847 848 if (to_hash) { 849 src_nents = sg_nents_for_len(req->src, 850 req->nbytes - (*next_buflen)); 851 if (src_nents < 0) { 852 dev_err(jrdev, "Invalid number of src SG.\n"); 853 return src_nents; 854 } 855 856 if (src_nents) { 857 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 858 DMA_TO_DEVICE); 859 if (!mapped_nents) { 860 dev_err(jrdev, "unable to DMA map source\n"); 861 return -ENOMEM; 862 } 863 } else { 864 mapped_nents = 0; 865 } 866 867 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 868 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 869 sizeof(struct sec4_sg_entry); 870 871 /* 872 * allocate space for base edesc and hw desc commands, 873 * link tables 874 */ 875 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 876 ctx->sh_desc_update, 877 ctx->sh_desc_update_dma, flags); 878 if (!edesc) { 879 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 880 return -ENOMEM; 881 } 882 883 edesc->src_nents = src_nents; 884 edesc->sec4_sg_bytes = sec4_sg_bytes; 885 886 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 887 edesc->sec4_sg, DMA_BIDIRECTIONAL); 888 if (ret) 889 goto unmap_ctx; 890 891 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, 892 edesc->sec4_sg + 1, 893 buf, state->buf_dma, 894 *buflen, last_buflen); 895 896 if (mapped_nents) { 897 sg_to_sec4_sg_last(req->src, mapped_nents, 898 edesc->sec4_sg + sec4_sg_src_index, 899 0); 900 if (*next_buflen) 901 scatterwalk_map_and_copy(next_buf, req->src, 902 to_hash - *buflen, 903 *next_buflen, 0); 904 } else { 905 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 906 cpu_to_caam32(SEC4_SG_LEN_FIN); 907 } 908 909 state->current_buf = !state->current_buf; 910 911 desc = edesc->hw_desc; 912 913 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 914 sec4_sg_bytes, 915 DMA_TO_DEVICE); 916 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 917 dev_err(jrdev, "unable to map S/G table\n"); 918 ret = -ENOMEM; 919 goto unmap_ctx; 920 } 921 922 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 923 to_hash, LDST_SGF); 924 925 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 926 927 #ifdef DEBUG 928 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 929 DUMP_PREFIX_ADDRESS, 16, 4, desc, 930 desc_bytes(desc), 1); 931 #endif 932 933 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 934 if (ret) 935 goto unmap_ctx; 936 937 ret = -EINPROGRESS; 938 } else if (*next_buflen) { 939 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 940 req->nbytes, 0); 941 *buflen = *next_buflen; 942 *next_buflen = last_buflen; 943 } 944 #ifdef DEBUG 945 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 946 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 947 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 948 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 949 *next_buflen, 1); 950 #endif 951 952 return ret; 953 unmap_ctx: 954 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 955 kfree(edesc); 956 return ret; 957 } 958 959 static int ahash_final_ctx(struct ahash_request *req) 960 { 961 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 962 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 963 struct caam_hash_state *state = ahash_request_ctx(req); 964 struct device *jrdev = ctx->jrdev; 965 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 966 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 967 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 968 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 969 int last_buflen = state->current_buf ? state->buflen_0 : 970 state->buflen_1; 971 u32 *desc; 972 int sec4_sg_bytes, sec4_sg_src_index; 973 int digestsize = crypto_ahash_digestsize(ahash); 974 struct ahash_edesc *edesc; 975 int ret; 976 977 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 978 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 979 980 /* allocate space for base edesc and hw desc commands, link tables */ 981 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 982 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 983 flags); 984 if (!edesc) 985 return -ENOMEM; 986 987 desc = edesc->hw_desc; 988 989 edesc->sec4_sg_bytes = sec4_sg_bytes; 990 edesc->src_nents = 0; 991 992 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 993 edesc->sec4_sg, DMA_TO_DEVICE); 994 if (ret) 995 goto unmap_ctx; 996 997 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 998 buf, state->buf_dma, buflen, 999 last_buflen); 1000 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 1001 cpu_to_caam32(SEC4_SG_LEN_FIN); 1002 1003 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1004 sec4_sg_bytes, DMA_TO_DEVICE); 1005 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1006 dev_err(jrdev, "unable to map S/G table\n"); 1007 ret = -ENOMEM; 1008 goto unmap_ctx; 1009 } 1010 1011 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 1012 LDST_SGF); 1013 1014 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1015 digestsize); 1016 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1017 dev_err(jrdev, "unable to map dst\n"); 1018 ret = -ENOMEM; 1019 goto unmap_ctx; 1020 } 1021 1022 #ifdef DEBUG 1023 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1024 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1025 #endif 1026 1027 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1028 if (ret) 1029 goto unmap_ctx; 1030 1031 return -EINPROGRESS; 1032 unmap_ctx: 1033 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1034 kfree(edesc); 1035 return ret; 1036 } 1037 1038 static int ahash_finup_ctx(struct ahash_request *req) 1039 { 1040 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1041 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1042 struct caam_hash_state *state = ahash_request_ctx(req); 1043 struct device *jrdev = ctx->jrdev; 1044 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1045 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1046 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1047 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1048 int last_buflen = state->current_buf ? state->buflen_0 : 1049 state->buflen_1; 1050 u32 *desc; 1051 int sec4_sg_src_index; 1052 int src_nents, mapped_nents; 1053 int digestsize = crypto_ahash_digestsize(ahash); 1054 struct ahash_edesc *edesc; 1055 int ret; 1056 1057 src_nents = sg_nents_for_len(req->src, req->nbytes); 1058 if (src_nents < 0) { 1059 dev_err(jrdev, "Invalid number of src SG.\n"); 1060 return src_nents; 1061 } 1062 1063 if (src_nents) { 1064 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1065 DMA_TO_DEVICE); 1066 if (!mapped_nents) { 1067 dev_err(jrdev, "unable to DMA map source\n"); 1068 return -ENOMEM; 1069 } 1070 } else { 1071 mapped_nents = 0; 1072 } 1073 1074 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1075 1076 /* allocate space for base edesc and hw desc commands, link tables */ 1077 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1078 ctx->sh_desc_finup, ctx->sh_desc_finup_dma, 1079 flags); 1080 if (!edesc) { 1081 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1082 return -ENOMEM; 1083 } 1084 1085 desc = edesc->hw_desc; 1086 1087 edesc->src_nents = src_nents; 1088 1089 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 1090 edesc->sec4_sg, DMA_TO_DEVICE); 1091 if (ret) 1092 goto unmap_ctx; 1093 1094 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 1095 buf, state->buf_dma, buflen, 1096 last_buflen); 1097 1098 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1099 sec4_sg_src_index, ctx->ctx_len + buflen, 1100 req->nbytes); 1101 if (ret) 1102 goto unmap_ctx; 1103 1104 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1105 digestsize); 1106 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1107 dev_err(jrdev, "unable to map dst\n"); 1108 ret = -ENOMEM; 1109 goto unmap_ctx; 1110 } 1111 1112 #ifdef DEBUG 1113 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1114 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1115 #endif 1116 1117 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1118 if (ret) 1119 goto unmap_ctx; 1120 1121 return -EINPROGRESS; 1122 unmap_ctx: 1123 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1124 kfree(edesc); 1125 return ret; 1126 } 1127 1128 static int ahash_digest(struct ahash_request *req) 1129 { 1130 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1131 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1132 struct device *jrdev = ctx->jrdev; 1133 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1134 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1135 u32 *desc; 1136 int digestsize = crypto_ahash_digestsize(ahash); 1137 int src_nents, mapped_nents; 1138 struct ahash_edesc *edesc; 1139 int ret; 1140 1141 src_nents = sg_nents_for_len(req->src, req->nbytes); 1142 if (src_nents < 0) { 1143 dev_err(jrdev, "Invalid number of src SG.\n"); 1144 return src_nents; 1145 } 1146 1147 if (src_nents) { 1148 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1149 DMA_TO_DEVICE); 1150 if (!mapped_nents) { 1151 dev_err(jrdev, "unable to map source for DMA\n"); 1152 return -ENOMEM; 1153 } 1154 } else { 1155 mapped_nents = 0; 1156 } 1157 1158 /* allocate space for base edesc and hw desc commands, link tables */ 1159 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1160 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1161 flags); 1162 if (!edesc) { 1163 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1164 return -ENOMEM; 1165 } 1166 1167 edesc->src_nents = src_nents; 1168 1169 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1170 req->nbytes); 1171 if (ret) { 1172 ahash_unmap(jrdev, edesc, req, digestsize); 1173 kfree(edesc); 1174 return ret; 1175 } 1176 1177 desc = edesc->hw_desc; 1178 1179 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1180 digestsize); 1181 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1182 dev_err(jrdev, "unable to map dst\n"); 1183 ahash_unmap(jrdev, edesc, req, digestsize); 1184 kfree(edesc); 1185 return -ENOMEM; 1186 } 1187 1188 #ifdef DEBUG 1189 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1190 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1191 #endif 1192 1193 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1194 if (!ret) { 1195 ret = -EINPROGRESS; 1196 } else { 1197 ahash_unmap(jrdev, edesc, req, digestsize); 1198 kfree(edesc); 1199 } 1200 1201 return ret; 1202 } 1203 1204 /* submit ahash final if it the first job descriptor */ 1205 static int ahash_final_no_ctx(struct ahash_request *req) 1206 { 1207 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1208 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1209 struct caam_hash_state *state = ahash_request_ctx(req); 1210 struct device *jrdev = ctx->jrdev; 1211 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1212 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1213 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1214 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1215 u32 *desc; 1216 int digestsize = crypto_ahash_digestsize(ahash); 1217 struct ahash_edesc *edesc; 1218 int ret; 1219 1220 /* allocate space for base edesc and hw desc commands, link tables */ 1221 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1222 ctx->sh_desc_digest_dma, flags); 1223 if (!edesc) 1224 return -ENOMEM; 1225 1226 desc = edesc->hw_desc; 1227 1228 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1229 if (dma_mapping_error(jrdev, state->buf_dma)) { 1230 dev_err(jrdev, "unable to map src\n"); 1231 goto unmap; 1232 } 1233 1234 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1235 1236 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1237 digestsize); 1238 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1239 dev_err(jrdev, "unable to map dst\n"); 1240 goto unmap; 1241 } 1242 edesc->src_nents = 0; 1243 1244 #ifdef DEBUG 1245 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1246 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1247 #endif 1248 1249 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1250 if (!ret) { 1251 ret = -EINPROGRESS; 1252 } else { 1253 ahash_unmap(jrdev, edesc, req, digestsize); 1254 kfree(edesc); 1255 } 1256 1257 return ret; 1258 unmap: 1259 ahash_unmap(jrdev, edesc, req, digestsize); 1260 kfree(edesc); 1261 return -ENOMEM; 1262 1263 } 1264 1265 /* submit ahash update if it the first job descriptor after update */ 1266 static int ahash_update_no_ctx(struct ahash_request *req) 1267 { 1268 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1269 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1270 struct caam_hash_state *state = ahash_request_ctx(req); 1271 struct device *jrdev = ctx->jrdev; 1272 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1273 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1274 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1275 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 1276 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 1277 int *next_buflen = state->current_buf ? &state->buflen_0 : 1278 &state->buflen_1; 1279 int in_len = *buflen + req->nbytes, to_hash; 1280 int sec4_sg_bytes, src_nents, mapped_nents; 1281 struct ahash_edesc *edesc; 1282 u32 *desc; 1283 int ret = 0; 1284 1285 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1286 to_hash = in_len - *next_buflen; 1287 1288 if (to_hash) { 1289 src_nents = sg_nents_for_len(req->src, 1290 req->nbytes - *next_buflen); 1291 if (src_nents < 0) { 1292 dev_err(jrdev, "Invalid number of src SG.\n"); 1293 return src_nents; 1294 } 1295 1296 if (src_nents) { 1297 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1298 DMA_TO_DEVICE); 1299 if (!mapped_nents) { 1300 dev_err(jrdev, "unable to DMA map source\n"); 1301 return -ENOMEM; 1302 } 1303 } else { 1304 mapped_nents = 0; 1305 } 1306 1307 sec4_sg_bytes = (1 + mapped_nents) * 1308 sizeof(struct sec4_sg_entry); 1309 1310 /* 1311 * allocate space for base edesc and hw desc commands, 1312 * link tables 1313 */ 1314 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1315 ctx->sh_desc_update_first, 1316 ctx->sh_desc_update_first_dma, 1317 flags); 1318 if (!edesc) { 1319 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1320 return -ENOMEM; 1321 } 1322 1323 edesc->src_nents = src_nents; 1324 edesc->sec4_sg_bytes = sec4_sg_bytes; 1325 edesc->dst_dma = 0; 1326 1327 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1328 buf, *buflen); 1329 sg_to_sec4_sg_last(req->src, mapped_nents, 1330 edesc->sec4_sg + 1, 0); 1331 1332 if (*next_buflen) { 1333 scatterwalk_map_and_copy(next_buf, req->src, 1334 to_hash - *buflen, 1335 *next_buflen, 0); 1336 } 1337 1338 state->current_buf = !state->current_buf; 1339 1340 desc = edesc->hw_desc; 1341 1342 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1343 sec4_sg_bytes, 1344 DMA_TO_DEVICE); 1345 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1346 dev_err(jrdev, "unable to map S/G table\n"); 1347 ret = -ENOMEM; 1348 goto unmap_ctx; 1349 } 1350 1351 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1352 1353 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1354 if (ret) 1355 goto unmap_ctx; 1356 1357 #ifdef DEBUG 1358 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1359 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1360 desc_bytes(desc), 1); 1361 #endif 1362 1363 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1364 if (ret) 1365 goto unmap_ctx; 1366 1367 ret = -EINPROGRESS; 1368 state->update = ahash_update_ctx; 1369 state->finup = ahash_finup_ctx; 1370 state->final = ahash_final_ctx; 1371 } else if (*next_buflen) { 1372 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1373 req->nbytes, 0); 1374 *buflen = *next_buflen; 1375 *next_buflen = 0; 1376 } 1377 #ifdef DEBUG 1378 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1379 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1380 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1381 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1382 *next_buflen, 1); 1383 #endif 1384 1385 return ret; 1386 unmap_ctx: 1387 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1388 kfree(edesc); 1389 return ret; 1390 } 1391 1392 /* submit ahash finup if it the first job descriptor after update */ 1393 static int ahash_finup_no_ctx(struct ahash_request *req) 1394 { 1395 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1396 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1397 struct caam_hash_state *state = ahash_request_ctx(req); 1398 struct device *jrdev = ctx->jrdev; 1399 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1400 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1401 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1402 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1403 int last_buflen = state->current_buf ? state->buflen_0 : 1404 state->buflen_1; 1405 u32 *desc; 1406 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1407 int digestsize = crypto_ahash_digestsize(ahash); 1408 struct ahash_edesc *edesc; 1409 int ret; 1410 1411 src_nents = sg_nents_for_len(req->src, req->nbytes); 1412 if (src_nents < 0) { 1413 dev_err(jrdev, "Invalid number of src SG.\n"); 1414 return src_nents; 1415 } 1416 1417 if (src_nents) { 1418 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1419 DMA_TO_DEVICE); 1420 if (!mapped_nents) { 1421 dev_err(jrdev, "unable to DMA map source\n"); 1422 return -ENOMEM; 1423 } 1424 } else { 1425 mapped_nents = 0; 1426 } 1427 1428 sec4_sg_src_index = 2; 1429 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1430 sizeof(struct sec4_sg_entry); 1431 1432 /* allocate space for base edesc and hw desc commands, link tables */ 1433 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1434 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1435 flags); 1436 if (!edesc) { 1437 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1438 return -ENOMEM; 1439 } 1440 1441 desc = edesc->hw_desc; 1442 1443 edesc->src_nents = src_nents; 1444 edesc->sec4_sg_bytes = sec4_sg_bytes; 1445 1446 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, 1447 state->buf_dma, buflen, 1448 last_buflen); 1449 1450 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1451 req->nbytes); 1452 if (ret) { 1453 dev_err(jrdev, "unable to map S/G table\n"); 1454 goto unmap; 1455 } 1456 1457 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1458 digestsize); 1459 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1460 dev_err(jrdev, "unable to map dst\n"); 1461 goto unmap; 1462 } 1463 1464 #ifdef DEBUG 1465 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1466 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1467 #endif 1468 1469 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1470 if (!ret) { 1471 ret = -EINPROGRESS; 1472 } else { 1473 ahash_unmap(jrdev, edesc, req, digestsize); 1474 kfree(edesc); 1475 } 1476 1477 return ret; 1478 unmap: 1479 ahash_unmap(jrdev, edesc, req, digestsize); 1480 kfree(edesc); 1481 return -ENOMEM; 1482 1483 } 1484 1485 /* submit first update job descriptor after init */ 1486 static int ahash_update_first(struct ahash_request *req) 1487 { 1488 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1489 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1490 struct caam_hash_state *state = ahash_request_ctx(req); 1491 struct device *jrdev = ctx->jrdev; 1492 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1493 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1494 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; 1495 int *next_buflen = state->current_buf ? 1496 &state->buflen_1 : &state->buflen_0; 1497 int to_hash; 1498 u32 *desc; 1499 int src_nents, mapped_nents; 1500 struct ahash_edesc *edesc; 1501 int ret = 0; 1502 1503 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1504 1); 1505 to_hash = req->nbytes - *next_buflen; 1506 1507 if (to_hash) { 1508 src_nents = sg_nents_for_len(req->src, 1509 req->nbytes - *next_buflen); 1510 if (src_nents < 0) { 1511 dev_err(jrdev, "Invalid number of src SG.\n"); 1512 return src_nents; 1513 } 1514 1515 if (src_nents) { 1516 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1517 DMA_TO_DEVICE); 1518 if (!mapped_nents) { 1519 dev_err(jrdev, "unable to map source for DMA\n"); 1520 return -ENOMEM; 1521 } 1522 } else { 1523 mapped_nents = 0; 1524 } 1525 1526 /* 1527 * allocate space for base edesc and hw desc commands, 1528 * link tables 1529 */ 1530 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1531 mapped_nents : 0, 1532 ctx->sh_desc_update_first, 1533 ctx->sh_desc_update_first_dma, 1534 flags); 1535 if (!edesc) { 1536 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1537 return -ENOMEM; 1538 } 1539 1540 edesc->src_nents = src_nents; 1541 edesc->dst_dma = 0; 1542 1543 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1544 to_hash); 1545 if (ret) 1546 goto unmap_ctx; 1547 1548 if (*next_buflen) 1549 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1550 *next_buflen, 0); 1551 1552 desc = edesc->hw_desc; 1553 1554 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1555 if (ret) 1556 goto unmap_ctx; 1557 1558 #ifdef DEBUG 1559 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1560 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1561 desc_bytes(desc), 1); 1562 #endif 1563 1564 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1565 if (ret) 1566 goto unmap_ctx; 1567 1568 ret = -EINPROGRESS; 1569 state->update = ahash_update_ctx; 1570 state->finup = ahash_finup_ctx; 1571 state->final = ahash_final_ctx; 1572 } else if (*next_buflen) { 1573 state->update = ahash_update_no_ctx; 1574 state->finup = ahash_finup_no_ctx; 1575 state->final = ahash_final_no_ctx; 1576 scatterwalk_map_and_copy(next_buf, req->src, 0, 1577 req->nbytes, 0); 1578 } 1579 #ifdef DEBUG 1580 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1581 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1582 *next_buflen, 1); 1583 #endif 1584 1585 return ret; 1586 unmap_ctx: 1587 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1588 kfree(edesc); 1589 return ret; 1590 } 1591 1592 static int ahash_finup_first(struct ahash_request *req) 1593 { 1594 return ahash_digest(req); 1595 } 1596 1597 static int ahash_init(struct ahash_request *req) 1598 { 1599 struct caam_hash_state *state = ahash_request_ctx(req); 1600 1601 state->update = ahash_update_first; 1602 state->finup = ahash_finup_first; 1603 state->final = ahash_final_no_ctx; 1604 1605 state->current_buf = 0; 1606 state->buf_dma = 0; 1607 state->buflen_0 = 0; 1608 state->buflen_1 = 0; 1609 1610 return 0; 1611 } 1612 1613 static int ahash_update(struct ahash_request *req) 1614 { 1615 struct caam_hash_state *state = ahash_request_ctx(req); 1616 1617 return state->update(req); 1618 } 1619 1620 static int ahash_finup(struct ahash_request *req) 1621 { 1622 struct caam_hash_state *state = ahash_request_ctx(req); 1623 1624 return state->finup(req); 1625 } 1626 1627 static int ahash_final(struct ahash_request *req) 1628 { 1629 struct caam_hash_state *state = ahash_request_ctx(req); 1630 1631 return state->final(req); 1632 } 1633 1634 static int ahash_export(struct ahash_request *req, void *out) 1635 { 1636 struct caam_hash_state *state = ahash_request_ctx(req); 1637 struct caam_export_state *export = out; 1638 int len; 1639 u8 *buf; 1640 1641 if (state->current_buf) { 1642 buf = state->buf_1; 1643 len = state->buflen_1; 1644 } else { 1645 buf = state->buf_0; 1646 len = state->buflen_0; 1647 } 1648 1649 memcpy(export->buf, buf, len); 1650 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1651 export->buflen = len; 1652 export->update = state->update; 1653 export->final = state->final; 1654 export->finup = state->finup; 1655 1656 return 0; 1657 } 1658 1659 static int ahash_import(struct ahash_request *req, const void *in) 1660 { 1661 struct caam_hash_state *state = ahash_request_ctx(req); 1662 const struct caam_export_state *export = in; 1663 1664 memset(state, 0, sizeof(*state)); 1665 memcpy(state->buf_0, export->buf, export->buflen); 1666 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1667 state->buflen_0 = export->buflen; 1668 state->update = export->update; 1669 state->final = export->final; 1670 state->finup = export->finup; 1671 1672 return 0; 1673 } 1674 1675 struct caam_hash_template { 1676 char name[CRYPTO_MAX_ALG_NAME]; 1677 char driver_name[CRYPTO_MAX_ALG_NAME]; 1678 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1679 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1680 unsigned int blocksize; 1681 struct ahash_alg template_ahash; 1682 u32 alg_type; 1683 u32 alg_op; 1684 }; 1685 1686 /* ahash descriptors */ 1687 static struct caam_hash_template driver_hash[] = { 1688 { 1689 .name = "sha1", 1690 .driver_name = "sha1-caam", 1691 .hmac_name = "hmac(sha1)", 1692 .hmac_driver_name = "hmac-sha1-caam", 1693 .blocksize = SHA1_BLOCK_SIZE, 1694 .template_ahash = { 1695 .init = ahash_init, 1696 .update = ahash_update, 1697 .final = ahash_final, 1698 .finup = ahash_finup, 1699 .digest = ahash_digest, 1700 .export = ahash_export, 1701 .import = ahash_import, 1702 .setkey = ahash_setkey, 1703 .halg = { 1704 .digestsize = SHA1_DIGEST_SIZE, 1705 .statesize = sizeof(struct caam_export_state), 1706 }, 1707 }, 1708 .alg_type = OP_ALG_ALGSEL_SHA1, 1709 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1710 }, { 1711 .name = "sha224", 1712 .driver_name = "sha224-caam", 1713 .hmac_name = "hmac(sha224)", 1714 .hmac_driver_name = "hmac-sha224-caam", 1715 .blocksize = SHA224_BLOCK_SIZE, 1716 .template_ahash = { 1717 .init = ahash_init, 1718 .update = ahash_update, 1719 .final = ahash_final, 1720 .finup = ahash_finup, 1721 .digest = ahash_digest, 1722 .export = ahash_export, 1723 .import = ahash_import, 1724 .setkey = ahash_setkey, 1725 .halg = { 1726 .digestsize = SHA224_DIGEST_SIZE, 1727 .statesize = sizeof(struct caam_export_state), 1728 }, 1729 }, 1730 .alg_type = OP_ALG_ALGSEL_SHA224, 1731 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1732 }, { 1733 .name = "sha256", 1734 .driver_name = "sha256-caam", 1735 .hmac_name = "hmac(sha256)", 1736 .hmac_driver_name = "hmac-sha256-caam", 1737 .blocksize = SHA256_BLOCK_SIZE, 1738 .template_ahash = { 1739 .init = ahash_init, 1740 .update = ahash_update, 1741 .final = ahash_final, 1742 .finup = ahash_finup, 1743 .digest = ahash_digest, 1744 .export = ahash_export, 1745 .import = ahash_import, 1746 .setkey = ahash_setkey, 1747 .halg = { 1748 .digestsize = SHA256_DIGEST_SIZE, 1749 .statesize = sizeof(struct caam_export_state), 1750 }, 1751 }, 1752 .alg_type = OP_ALG_ALGSEL_SHA256, 1753 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1754 }, { 1755 .name = "sha384", 1756 .driver_name = "sha384-caam", 1757 .hmac_name = "hmac(sha384)", 1758 .hmac_driver_name = "hmac-sha384-caam", 1759 .blocksize = SHA384_BLOCK_SIZE, 1760 .template_ahash = { 1761 .init = ahash_init, 1762 .update = ahash_update, 1763 .final = ahash_final, 1764 .finup = ahash_finup, 1765 .digest = ahash_digest, 1766 .export = ahash_export, 1767 .import = ahash_import, 1768 .setkey = ahash_setkey, 1769 .halg = { 1770 .digestsize = SHA384_DIGEST_SIZE, 1771 .statesize = sizeof(struct caam_export_state), 1772 }, 1773 }, 1774 .alg_type = OP_ALG_ALGSEL_SHA384, 1775 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1776 }, { 1777 .name = "sha512", 1778 .driver_name = "sha512-caam", 1779 .hmac_name = "hmac(sha512)", 1780 .hmac_driver_name = "hmac-sha512-caam", 1781 .blocksize = SHA512_BLOCK_SIZE, 1782 .template_ahash = { 1783 .init = ahash_init, 1784 .update = ahash_update, 1785 .final = ahash_final, 1786 .finup = ahash_finup, 1787 .digest = ahash_digest, 1788 .export = ahash_export, 1789 .import = ahash_import, 1790 .setkey = ahash_setkey, 1791 .halg = { 1792 .digestsize = SHA512_DIGEST_SIZE, 1793 .statesize = sizeof(struct caam_export_state), 1794 }, 1795 }, 1796 .alg_type = OP_ALG_ALGSEL_SHA512, 1797 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1798 }, { 1799 .name = "md5", 1800 .driver_name = "md5-caam", 1801 .hmac_name = "hmac(md5)", 1802 .hmac_driver_name = "hmac-md5-caam", 1803 .blocksize = MD5_BLOCK_WORDS * 4, 1804 .template_ahash = { 1805 .init = ahash_init, 1806 .update = ahash_update, 1807 .final = ahash_final, 1808 .finup = ahash_finup, 1809 .digest = ahash_digest, 1810 .export = ahash_export, 1811 .import = ahash_import, 1812 .setkey = ahash_setkey, 1813 .halg = { 1814 .digestsize = MD5_DIGEST_SIZE, 1815 .statesize = sizeof(struct caam_export_state), 1816 }, 1817 }, 1818 .alg_type = OP_ALG_ALGSEL_MD5, 1819 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1820 }, 1821 }; 1822 1823 struct caam_hash_alg { 1824 struct list_head entry; 1825 int alg_type; 1826 int alg_op; 1827 struct ahash_alg ahash_alg; 1828 }; 1829 1830 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1831 { 1832 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1833 struct crypto_alg *base = tfm->__crt_alg; 1834 struct hash_alg_common *halg = 1835 container_of(base, struct hash_alg_common, base); 1836 struct ahash_alg *alg = 1837 container_of(halg, struct ahash_alg, halg); 1838 struct caam_hash_alg *caam_hash = 1839 container_of(alg, struct caam_hash_alg, ahash_alg); 1840 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1841 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1842 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1843 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1844 HASH_MSG_LEN + 32, 1845 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1846 HASH_MSG_LEN + 64, 1847 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1848 1849 /* 1850 * Get a Job ring from Job Ring driver to ensure in-order 1851 * crypto request processing per tfm 1852 */ 1853 ctx->jrdev = caam_jr_alloc(); 1854 if (IS_ERR(ctx->jrdev)) { 1855 pr_err("Job Ring Device allocation for transform failed\n"); 1856 return PTR_ERR(ctx->jrdev); 1857 } 1858 /* copy descriptor header template value */ 1859 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1860 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; 1861 1862 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 1863 OP_ALG_ALGSEL_SHIFT]; 1864 1865 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1866 sizeof(struct caam_hash_state)); 1867 return ahash_set_sh_desc(ahash); 1868 } 1869 1870 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1871 { 1872 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1873 1874 if (ctx->sh_desc_update_dma && 1875 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) 1876 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, 1877 desc_bytes(ctx->sh_desc_update), 1878 DMA_TO_DEVICE); 1879 if (ctx->sh_desc_update_first_dma && 1880 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) 1881 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, 1882 desc_bytes(ctx->sh_desc_update_first), 1883 DMA_TO_DEVICE); 1884 if (ctx->sh_desc_fin_dma && 1885 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) 1886 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, 1887 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); 1888 if (ctx->sh_desc_digest_dma && 1889 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) 1890 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, 1891 desc_bytes(ctx->sh_desc_digest), 1892 DMA_TO_DEVICE); 1893 if (ctx->sh_desc_finup_dma && 1894 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) 1895 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, 1896 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); 1897 1898 caam_jr_free(ctx->jrdev); 1899 } 1900 1901 static void __exit caam_algapi_hash_exit(void) 1902 { 1903 struct caam_hash_alg *t_alg, *n; 1904 1905 if (!hash_list.next) 1906 return; 1907 1908 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1909 crypto_unregister_ahash(&t_alg->ahash_alg); 1910 list_del(&t_alg->entry); 1911 kfree(t_alg); 1912 } 1913 } 1914 1915 static struct caam_hash_alg * 1916 caam_hash_alloc(struct caam_hash_template *template, 1917 bool keyed) 1918 { 1919 struct caam_hash_alg *t_alg; 1920 struct ahash_alg *halg; 1921 struct crypto_alg *alg; 1922 1923 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1924 if (!t_alg) { 1925 pr_err("failed to allocate t_alg\n"); 1926 return ERR_PTR(-ENOMEM); 1927 } 1928 1929 t_alg->ahash_alg = template->template_ahash; 1930 halg = &t_alg->ahash_alg; 1931 alg = &halg->halg.base; 1932 1933 if (keyed) { 1934 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1935 template->hmac_name); 1936 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1937 template->hmac_driver_name); 1938 } else { 1939 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1940 template->name); 1941 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1942 template->driver_name); 1943 t_alg->ahash_alg.setkey = NULL; 1944 } 1945 alg->cra_module = THIS_MODULE; 1946 alg->cra_init = caam_hash_cra_init; 1947 alg->cra_exit = caam_hash_cra_exit; 1948 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1949 alg->cra_priority = CAAM_CRA_PRIORITY; 1950 alg->cra_blocksize = template->blocksize; 1951 alg->cra_alignmask = 0; 1952 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; 1953 alg->cra_type = &crypto_ahash_type; 1954 1955 t_alg->alg_type = template->alg_type; 1956 t_alg->alg_op = template->alg_op; 1957 1958 return t_alg; 1959 } 1960 1961 static int __init caam_algapi_hash_init(void) 1962 { 1963 struct device_node *dev_node; 1964 struct platform_device *pdev; 1965 struct device *ctrldev; 1966 int i = 0, err = 0; 1967 struct caam_drv_private *priv; 1968 unsigned int md_limit = SHA512_DIGEST_SIZE; 1969 u32 cha_inst, cha_vid; 1970 1971 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1972 if (!dev_node) { 1973 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1974 if (!dev_node) 1975 return -ENODEV; 1976 } 1977 1978 pdev = of_find_device_by_node(dev_node); 1979 if (!pdev) { 1980 of_node_put(dev_node); 1981 return -ENODEV; 1982 } 1983 1984 ctrldev = &pdev->dev; 1985 priv = dev_get_drvdata(ctrldev); 1986 of_node_put(dev_node); 1987 1988 /* 1989 * If priv is NULL, it's probably because the caam driver wasn't 1990 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1991 */ 1992 if (!priv) 1993 return -ENODEV; 1994 1995 /* 1996 * Register crypto algorithms the device supports. First, identify 1997 * presence and attributes of MD block. 1998 */ 1999 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 2000 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 2001 2002 /* 2003 * Skip registration of any hashing algorithms if MD block 2004 * is not present. 2005 */ 2006 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) 2007 return -ENODEV; 2008 2009 /* Limit digest size based on LP256 */ 2010 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) 2011 md_limit = SHA256_DIGEST_SIZE; 2012 2013 INIT_LIST_HEAD(&hash_list); 2014 2015 /* register crypto algorithms the device supports */ 2016 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 2017 struct caam_hash_alg *t_alg; 2018 struct caam_hash_template *alg = driver_hash + i; 2019 2020 /* If MD size is not supported by device, skip registration */ 2021 if (alg->template_ahash.halg.digestsize > md_limit) 2022 continue; 2023 2024 /* register hmac version */ 2025 t_alg = caam_hash_alloc(alg, true); 2026 if (IS_ERR(t_alg)) { 2027 err = PTR_ERR(t_alg); 2028 pr_warn("%s alg allocation failed\n", alg->driver_name); 2029 continue; 2030 } 2031 2032 err = crypto_register_ahash(&t_alg->ahash_alg); 2033 if (err) { 2034 pr_warn("%s alg registration failed: %d\n", 2035 t_alg->ahash_alg.halg.base.cra_driver_name, 2036 err); 2037 kfree(t_alg); 2038 } else 2039 list_add_tail(&t_alg->entry, &hash_list); 2040 2041 /* register unkeyed version */ 2042 t_alg = caam_hash_alloc(alg, false); 2043 if (IS_ERR(t_alg)) { 2044 err = PTR_ERR(t_alg); 2045 pr_warn("%s alg allocation failed\n", alg->driver_name); 2046 continue; 2047 } 2048 2049 err = crypto_register_ahash(&t_alg->ahash_alg); 2050 if (err) { 2051 pr_warn("%s alg registration failed: %d\n", 2052 t_alg->ahash_alg.halg.base.cra_driver_name, 2053 err); 2054 kfree(t_alg); 2055 } else 2056 list_add_tail(&t_alg->entry, &hash_list); 2057 } 2058 2059 return err; 2060 } 2061 2062 module_init(caam_algapi_hash_init); 2063 module_exit(caam_algapi_hash_exit); 2064 2065 MODULE_LICENSE("GPL"); 2066 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 2067 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 2068