1 /* 2 * caam - Freescale FSL CAAM support for ahash functions of crypto API 3 * 4 * Copyright 2011 Freescale Semiconductor, Inc. 5 * 6 * Based on caamalg.c crypto API driver. 7 * 8 * relationship of digest job descriptor or first job descriptor after init to 9 * shared descriptors: 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (hashKey) | 14 * --------------- | (operation) | 15 * --------------- 16 * 17 * relationship of subsequent job descriptors to shared descriptors: 18 * 19 * --------------- --------------- 20 * | JobDesc #2 |-------------------->| ShareDesc | 21 * | *(packet 2) | |------------->| (hashKey) | 22 * --------------- | |-------->| (operation) | 23 * . | | | (load ctx2) | 24 * . | | --------------- 25 * --------------- | | 26 * | JobDesc #3 |------| | 27 * | *(packet 3) | | 28 * --------------- | 29 * . | 30 * . | 31 * --------------- | 32 * | JobDesc #4 |------------ 33 * | *(packet 4) | 34 * --------------- 35 * 36 * The SharedDesc never changes for a connection unless rekeyed, but 37 * each packet will likely be in a different place. So all we need 38 * to know to process the packet is where the input is, where the 39 * output goes, and what context we want to process with. Context is 40 * in the SharedDesc, packet references in the JobDesc. 41 * 42 * So, a job desc looks like: 43 * 44 * --------------------- 45 * | Header | 46 * | ShareDesc Pointer | 47 * | SEQ_OUT_PTR | 48 * | (output buffer) | 49 * | (output length) | 50 * | SEQ_IN_PTR | 51 * | (input buffer) | 52 * | (input length) | 53 * --------------------- 54 */ 55 56 #include "compat.h" 57 58 #include "regs.h" 59 #include "intern.h" 60 #include "desc_constr.h" 61 #include "jr.h" 62 #include "error.h" 63 #include "sg_sw_sec4.h" 64 #include "key_gen.h" 65 66 #define CAAM_CRA_PRIORITY 3000 67 68 /* max hash key is max split key size */ 69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 70 71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 73 74 /* length of descriptors text */ 75 #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ) 76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) 77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 81 82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 83 CAAM_MAX_HASH_KEY_SIZE) 84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 85 86 /* caam context sizes for hashes: running digest + 8 */ 87 #define HASH_MSG_LEN 8 88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 89 90 #ifdef DEBUG 91 /* for print_hex_dumps with line references */ 92 #define debug(format, arg...) printk(format, arg) 93 #else 94 #define debug(format, arg...) 95 #endif 96 97 98 static struct list_head hash_list; 99 100 /* ahash per-session context */ 101 struct caam_hash_ctx { 102 struct device *jrdev; 103 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN]; 104 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN]; 105 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN]; 106 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN]; 107 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN]; 108 dma_addr_t sh_desc_update_dma; 109 dma_addr_t sh_desc_update_first_dma; 110 dma_addr_t sh_desc_fin_dma; 111 dma_addr_t sh_desc_digest_dma; 112 dma_addr_t sh_desc_finup_dma; 113 u32 alg_type; 114 u32 alg_op; 115 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 116 dma_addr_t key_dma; 117 int ctx_len; 118 unsigned int split_key_len; 119 unsigned int split_key_pad_len; 120 }; 121 122 /* ahash state */ 123 struct caam_hash_state { 124 dma_addr_t buf_dma; 125 dma_addr_t ctx_dma; 126 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 127 int buflen_0; 128 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 129 int buflen_1; 130 u8 caam_ctx[MAX_CTX_LEN]; 131 int (*update)(struct ahash_request *req); 132 int (*final)(struct ahash_request *req); 133 int (*finup)(struct ahash_request *req); 134 int current_buf; 135 }; 136 137 /* Common job descriptor seq in/out ptr routines */ 138 139 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 141 struct caam_hash_state *state, 142 int ctx_len) 143 { 144 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 145 ctx_len, DMA_FROM_DEVICE); 146 if (dma_mapping_error(jrdev, state->ctx_dma)) { 147 dev_err(jrdev, "unable to map ctx\n"); 148 return -ENOMEM; 149 } 150 151 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 152 153 return 0; 154 } 155 156 /* Map req->result, and append seq_out_ptr command that points to it */ 157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 158 u8 *result, int digestsize) 159 { 160 dma_addr_t dst_dma; 161 162 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 163 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 164 165 return dst_dma; 166 } 167 168 /* Map current buffer in state and put it in link table */ 169 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, 170 struct sec4_sg_entry *sec4_sg, 171 u8 *buf, int buflen) 172 { 173 dma_addr_t buf_dma; 174 175 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 176 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); 177 178 return buf_dma; 179 } 180 181 /* Map req->src and put it in link table */ 182 static inline void src_map_to_sec4_sg(struct device *jrdev, 183 struct scatterlist *src, int src_nents, 184 struct sec4_sg_entry *sec4_sg, 185 bool chained) 186 { 187 dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); 188 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); 189 } 190 191 /* 192 * Only put buffer in link table if it contains data, which is possible, 193 * since a buffer has previously been used, and needs to be unmapped, 194 */ 195 static inline dma_addr_t 196 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, 197 u8 *buf, dma_addr_t buf_dma, int buflen, 198 int last_buflen) 199 { 200 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) 201 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); 202 if (buflen) 203 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); 204 else 205 buf_dma = 0; 206 207 return buf_dma; 208 } 209 210 /* Map state->caam_ctx, and add it to link table */ 211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, 212 struct caam_hash_state *state, int ctx_len, 213 struct sec4_sg_entry *sec4_sg, u32 flag) 214 { 215 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 216 if (dma_mapping_error(jrdev, state->ctx_dma)) { 217 dev_err(jrdev, "unable to map ctx\n"); 218 return -ENOMEM; 219 } 220 221 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 222 223 return 0; 224 } 225 226 /* Common shared descriptor commands */ 227 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 228 { 229 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 230 ctx->split_key_len, CLASS_2 | 231 KEY_DEST_MDHA_SPLIT | KEY_ENC); 232 } 233 234 /* Append key if it has been set */ 235 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 236 { 237 u32 *key_jump_cmd; 238 239 init_sh_desc(desc, HDR_SHARE_SERIAL); 240 241 if (ctx->split_key_len) { 242 /* Skip if already shared */ 243 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 244 JUMP_COND_SHRD); 245 246 append_key_ahash(desc, ctx); 247 248 set_jump_tgt_here(desc, key_jump_cmd); 249 } 250 251 /* Propagate errors from shared to job descriptor */ 252 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 253 } 254 255 /* 256 * For ahash read data from seqin following state->caam_ctx, 257 * and write resulting class2 context to seqout, which may be state->caam_ctx 258 * or req->result 259 */ 260 static inline void ahash_append_load_str(u32 *desc, int digestsize) 261 { 262 /* Calculate remaining bytes to read */ 263 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 264 265 /* Read remaining bytes */ 266 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | 267 FIFOLD_TYPE_MSG | KEY_VLF); 268 269 /* Store class2 context bytes */ 270 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 271 LDST_SRCDST_BYTE_CONTEXT); 272 } 273 274 /* 275 * For ahash update, final and finup, import context, read and write to seqout 276 */ 277 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, 278 int digestsize, 279 struct caam_hash_ctx *ctx) 280 { 281 init_sh_desc_key_ahash(desc, ctx); 282 283 /* Import context from software */ 284 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 285 LDST_CLASS_2_CCB | ctx->ctx_len); 286 287 /* Class 2 operation */ 288 append_operation(desc, op | state | OP_ALG_ENCRYPT); 289 290 /* 291 * Load from buf and/or src and write to req->result or state->context 292 */ 293 ahash_append_load_str(desc, digestsize); 294 } 295 296 /* For ahash firsts and digest, read and write to seqout */ 297 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, 298 int digestsize, struct caam_hash_ctx *ctx) 299 { 300 init_sh_desc_key_ahash(desc, ctx); 301 302 /* Class 2 operation */ 303 append_operation(desc, op | state | OP_ALG_ENCRYPT); 304 305 /* 306 * Load from buf and/or src and write to req->result or state->context 307 */ 308 ahash_append_load_str(desc, digestsize); 309 } 310 311 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 312 { 313 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 314 int digestsize = crypto_ahash_digestsize(ahash); 315 struct device *jrdev = ctx->jrdev; 316 u32 have_key = 0; 317 u32 *desc; 318 319 if (ctx->split_key_len) 320 have_key = OP_ALG_AAI_HMAC_PRECOMP; 321 322 /* ahash_update shared descriptor */ 323 desc = ctx->sh_desc_update; 324 325 init_sh_desc(desc, HDR_SHARE_SERIAL); 326 327 /* Import context from software */ 328 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 329 LDST_CLASS_2_CCB | ctx->ctx_len); 330 331 /* Class 2 operation */ 332 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | 333 OP_ALG_ENCRYPT); 334 335 /* Load data and write to result or context */ 336 ahash_append_load_str(desc, ctx->ctx_len); 337 338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 339 DMA_TO_DEVICE); 340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { 341 dev_err(jrdev, "unable to map shared descriptor\n"); 342 return -ENOMEM; 343 } 344 #ifdef DEBUG 345 print_hex_dump(KERN_ERR, 346 "ahash update shdesc@"__stringify(__LINE__)": ", 347 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 348 #endif 349 350 /* ahash_update_first shared descriptor */ 351 desc = ctx->sh_desc_update_first; 352 353 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, 354 ctx->ctx_len, ctx); 355 356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, 357 desc_bytes(desc), 358 DMA_TO_DEVICE); 359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { 360 dev_err(jrdev, "unable to map shared descriptor\n"); 361 return -ENOMEM; 362 } 363 #ifdef DEBUG 364 print_hex_dump(KERN_ERR, 365 "ahash update first shdesc@"__stringify(__LINE__)": ", 366 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 367 #endif 368 369 /* ahash_final shared descriptor */ 370 desc = ctx->sh_desc_fin; 371 372 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, 373 OP_ALG_AS_FINALIZE, digestsize, ctx); 374 375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 376 DMA_TO_DEVICE); 377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { 378 dev_err(jrdev, "unable to map shared descriptor\n"); 379 return -ENOMEM; 380 } 381 #ifdef DEBUG 382 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 383 DUMP_PREFIX_ADDRESS, 16, 4, desc, 384 desc_bytes(desc), 1); 385 #endif 386 387 /* ahash_finup shared descriptor */ 388 desc = ctx->sh_desc_finup; 389 390 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, 391 OP_ALG_AS_FINALIZE, digestsize, ctx); 392 393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 394 DMA_TO_DEVICE); 395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { 396 dev_err(jrdev, "unable to map shared descriptor\n"); 397 return -ENOMEM; 398 } 399 #ifdef DEBUG 400 print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ", 401 DUMP_PREFIX_ADDRESS, 16, 4, desc, 402 desc_bytes(desc), 1); 403 #endif 404 405 /* ahash_digest shared descriptor */ 406 desc = ctx->sh_desc_digest; 407 408 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, 409 digestsize, ctx); 410 411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, 412 desc_bytes(desc), 413 DMA_TO_DEVICE); 414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { 415 dev_err(jrdev, "unable to map shared descriptor\n"); 416 return -ENOMEM; 417 } 418 #ifdef DEBUG 419 print_hex_dump(KERN_ERR, 420 "ahash digest shdesc@"__stringify(__LINE__)": ", 421 DUMP_PREFIX_ADDRESS, 16, 4, desc, 422 desc_bytes(desc), 1); 423 #endif 424 425 return 0; 426 } 427 428 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, 429 u32 keylen) 430 { 431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, 432 ctx->split_key_pad_len, key_in, keylen, 433 ctx->alg_op); 434 } 435 436 /* Digest hash size if it is too large */ 437 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 438 u32 *keylen, u8 *key_out, u32 digestsize) 439 { 440 struct device *jrdev = ctx->jrdev; 441 u32 *desc; 442 struct split_key_result result; 443 dma_addr_t src_dma, dst_dma; 444 int ret = 0; 445 446 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 447 if (!desc) { 448 dev_err(jrdev, "unable to allocate key input memory\n"); 449 return -ENOMEM; 450 } 451 452 init_job_desc(desc, 0); 453 454 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 455 DMA_TO_DEVICE); 456 if (dma_mapping_error(jrdev, src_dma)) { 457 dev_err(jrdev, "unable to map key input memory\n"); 458 kfree(desc); 459 return -ENOMEM; 460 } 461 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 462 DMA_FROM_DEVICE); 463 if (dma_mapping_error(jrdev, dst_dma)) { 464 dev_err(jrdev, "unable to map key output memory\n"); 465 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 466 kfree(desc); 467 return -ENOMEM; 468 } 469 470 /* Job descriptor to perform unkeyed hash on key_in */ 471 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | 472 OP_ALG_AS_INITFINAL); 473 append_seq_in_ptr(desc, src_dma, *keylen, 0); 474 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 475 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 476 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 477 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 478 LDST_SRCDST_BYTE_CONTEXT); 479 480 #ifdef DEBUG 481 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 482 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 483 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 484 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 485 #endif 486 487 result.err = 0; 488 init_completion(&result.completion); 489 490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 491 if (!ret) { 492 /* in progress */ 493 wait_for_completion_interruptible(&result.completion); 494 ret = result.err; 495 #ifdef DEBUG 496 print_hex_dump(KERN_ERR, 497 "digested key@"__stringify(__LINE__)": ", 498 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 499 digestsize, 1); 500 #endif 501 } 502 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 503 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 504 505 *keylen = digestsize; 506 507 kfree(desc); 508 509 return ret; 510 } 511 512 static int ahash_setkey(struct crypto_ahash *ahash, 513 const u8 *key, unsigned int keylen) 514 { 515 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 516 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 518 struct device *jrdev = ctx->jrdev; 519 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 520 int digestsize = crypto_ahash_digestsize(ahash); 521 int ret = 0; 522 u8 *hashed_key = NULL; 523 524 #ifdef DEBUG 525 printk(KERN_ERR "keylen %d\n", keylen); 526 #endif 527 528 if (keylen > blocksize) { 529 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL | 530 GFP_DMA); 531 if (!hashed_key) 532 return -ENOMEM; 533 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 534 digestsize); 535 if (ret) 536 goto badkey; 537 key = hashed_key; 538 } 539 540 /* Pick class 2 key length from algorithm submask */ 541 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 542 OP_ALG_ALGSEL_SHIFT] * 2; 543 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 544 545 #ifdef DEBUG 546 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 547 ctx->split_key_len, ctx->split_key_pad_len); 548 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 549 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 550 #endif 551 552 ret = gen_split_hash_key(ctx, key, keylen); 553 if (ret) 554 goto badkey; 555 556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, 557 DMA_TO_DEVICE); 558 if (dma_mapping_error(jrdev, ctx->key_dma)) { 559 dev_err(jrdev, "unable to map key i/o memory\n"); 560 ret = -ENOMEM; 561 goto map_err; 562 } 563 #ifdef DEBUG 564 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 565 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 566 ctx->split_key_pad_len, 1); 567 #endif 568 569 ret = ahash_set_sh_desc(ahash); 570 if (ret) { 571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, 572 DMA_TO_DEVICE); 573 } 574 575 map_err: 576 kfree(hashed_key); 577 return ret; 578 badkey: 579 kfree(hashed_key); 580 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 581 return -EINVAL; 582 } 583 584 /* 585 * ahash_edesc - s/w-extended ahash descriptor 586 * @dst_dma: physical mapped address of req->result 587 * @sec4_sg_dma: physical mapped address of h/w link table 588 * @chained: if source is chained 589 * @src_nents: number of segments in input scatterlist 590 * @sec4_sg_bytes: length of dma mapped sec4_sg space 591 * @sec4_sg: pointer to h/w link table 592 * @hw_desc: the h/w job descriptor followed by any referenced link tables 593 */ 594 struct ahash_edesc { 595 dma_addr_t dst_dma; 596 dma_addr_t sec4_sg_dma; 597 bool chained; 598 int src_nents; 599 int sec4_sg_bytes; 600 struct sec4_sg_entry *sec4_sg; 601 u32 hw_desc[0]; 602 }; 603 604 static inline void ahash_unmap(struct device *dev, 605 struct ahash_edesc *edesc, 606 struct ahash_request *req, int dst_len) 607 { 608 if (edesc->src_nents) 609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents, 610 DMA_TO_DEVICE, edesc->chained); 611 if (edesc->dst_dma) 612 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 613 614 if (edesc->sec4_sg_bytes) 615 dma_unmap_single(dev, edesc->sec4_sg_dma, 616 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 617 } 618 619 static inline void ahash_unmap_ctx(struct device *dev, 620 struct ahash_edesc *edesc, 621 struct ahash_request *req, int dst_len, u32 flag) 622 { 623 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 624 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 625 struct caam_hash_state *state = ahash_request_ctx(req); 626 627 if (state->ctx_dma) 628 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 629 ahash_unmap(dev, edesc, req, dst_len); 630 } 631 632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 633 void *context) 634 { 635 struct ahash_request *req = context; 636 struct ahash_edesc *edesc; 637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 638 int digestsize = crypto_ahash_digestsize(ahash); 639 #ifdef DEBUG 640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 641 struct caam_hash_state *state = ahash_request_ctx(req); 642 643 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 644 #endif 645 646 edesc = (struct ahash_edesc *)((char *)desc - 647 offsetof(struct ahash_edesc, hw_desc)); 648 if (err) 649 caam_jr_strstatus(jrdev, err); 650 651 ahash_unmap(jrdev, edesc, req, digestsize); 652 kfree(edesc); 653 654 #ifdef DEBUG 655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 656 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 657 ctx->ctx_len, 1); 658 if (req->result) 659 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 660 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 661 digestsize, 1); 662 #endif 663 664 req->base.complete(&req->base, err); 665 } 666 667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 668 void *context) 669 { 670 struct ahash_request *req = context; 671 struct ahash_edesc *edesc; 672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 674 #ifdef DEBUG 675 struct caam_hash_state *state = ahash_request_ctx(req); 676 int digestsize = crypto_ahash_digestsize(ahash); 677 678 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 679 #endif 680 681 edesc = (struct ahash_edesc *)((char *)desc - 682 offsetof(struct ahash_edesc, hw_desc)); 683 if (err) 684 caam_jr_strstatus(jrdev, err); 685 686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 687 kfree(edesc); 688 689 #ifdef DEBUG 690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 691 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 692 ctx->ctx_len, 1); 693 if (req->result) 694 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 695 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 696 digestsize, 1); 697 #endif 698 699 req->base.complete(&req->base, err); 700 } 701 702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 703 void *context) 704 { 705 struct ahash_request *req = context; 706 struct ahash_edesc *edesc; 707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 708 int digestsize = crypto_ahash_digestsize(ahash); 709 #ifdef DEBUG 710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 711 struct caam_hash_state *state = ahash_request_ctx(req); 712 713 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 714 #endif 715 716 edesc = (struct ahash_edesc *)((char *)desc - 717 offsetof(struct ahash_edesc, hw_desc)); 718 if (err) 719 caam_jr_strstatus(jrdev, err); 720 721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); 722 kfree(edesc); 723 724 #ifdef DEBUG 725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 726 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 727 ctx->ctx_len, 1); 728 if (req->result) 729 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 730 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 731 digestsize, 1); 732 #endif 733 734 req->base.complete(&req->base, err); 735 } 736 737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 738 void *context) 739 { 740 struct ahash_request *req = context; 741 struct ahash_edesc *edesc; 742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 744 #ifdef DEBUG 745 struct caam_hash_state *state = ahash_request_ctx(req); 746 int digestsize = crypto_ahash_digestsize(ahash); 747 748 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 749 #endif 750 751 edesc = (struct ahash_edesc *)((char *)desc - 752 offsetof(struct ahash_edesc, hw_desc)); 753 if (err) 754 caam_jr_strstatus(jrdev, err); 755 756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 757 kfree(edesc); 758 759 #ifdef DEBUG 760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 761 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 762 ctx->ctx_len, 1); 763 if (req->result) 764 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 765 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 766 digestsize, 1); 767 #endif 768 769 req->base.complete(&req->base, err); 770 } 771 772 /* submit update job descriptor */ 773 static int ahash_update_ctx(struct ahash_request *req) 774 { 775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 777 struct caam_hash_state *state = ahash_request_ctx(req); 778 struct device *jrdev = ctx->jrdev; 779 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 780 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 781 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 782 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 783 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 784 int *next_buflen = state->current_buf ? &state->buflen_0 : 785 &state->buflen_1, last_buflen; 786 int in_len = *buflen + req->nbytes, to_hash; 787 u32 *sh_desc = ctx->sh_desc_update, *desc; 788 dma_addr_t ptr = ctx->sh_desc_update_dma; 789 int src_nents, sec4_sg_bytes, sec4_sg_src_index; 790 struct ahash_edesc *edesc; 791 bool chained = false; 792 int ret = 0; 793 int sh_len; 794 795 last_buflen = *next_buflen; 796 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 797 to_hash = in_len - *next_buflen; 798 799 if (to_hash) { 800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 801 &chained); 802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 804 sizeof(struct sec4_sg_entry); 805 806 /* 807 * allocate space for base edesc and hw desc commands, 808 * link tables 809 */ 810 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 811 sec4_sg_bytes, GFP_DMA | flags); 812 if (!edesc) { 813 dev_err(jrdev, 814 "could not allocate extended descriptor\n"); 815 return -ENOMEM; 816 } 817 818 edesc->src_nents = src_nents; 819 edesc->chained = chained; 820 edesc->sec4_sg_bytes = sec4_sg_bytes; 821 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 822 DESC_JOB_IO_LEN; 823 824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 825 edesc->sec4_sg, DMA_BIDIRECTIONAL); 826 if (ret) 827 return ret; 828 829 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, 830 edesc->sec4_sg + 1, 831 buf, state->buf_dma, 832 *buflen, last_buflen); 833 834 if (src_nents) { 835 src_map_to_sec4_sg(jrdev, req->src, src_nents, 836 edesc->sec4_sg + sec4_sg_src_index, 837 chained); 838 if (*next_buflen) { 839 scatterwalk_map_and_copy(next_buf, req->src, 840 to_hash - *buflen, 841 *next_buflen, 0); 842 state->current_buf = !state->current_buf; 843 } 844 } else { 845 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 846 SEC4_SG_LEN_FIN; 847 } 848 849 sh_len = desc_len(sh_desc); 850 desc = edesc->hw_desc; 851 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 852 HDR_REVERSE); 853 854 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 855 sec4_sg_bytes, 856 DMA_TO_DEVICE); 857 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 858 dev_err(jrdev, "unable to map S/G table\n"); 859 return -ENOMEM; 860 } 861 862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 863 to_hash, LDST_SGF); 864 865 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 866 867 #ifdef DEBUG 868 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 869 DUMP_PREFIX_ADDRESS, 16, 4, desc, 870 desc_bytes(desc), 1); 871 #endif 872 873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 874 if (!ret) { 875 ret = -EINPROGRESS; 876 } else { 877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, 878 DMA_BIDIRECTIONAL); 879 kfree(edesc); 880 } 881 } else if (*next_buflen) { 882 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 883 req->nbytes, 0); 884 *buflen = *next_buflen; 885 *next_buflen = last_buflen; 886 } 887 #ifdef DEBUG 888 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 889 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 890 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 891 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 892 *next_buflen, 1); 893 #endif 894 895 return ret; 896 } 897 898 static int ahash_final_ctx(struct ahash_request *req) 899 { 900 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 901 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 902 struct caam_hash_state *state = ahash_request_ctx(req); 903 struct device *jrdev = ctx->jrdev; 904 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 905 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 906 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 907 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 908 int last_buflen = state->current_buf ? state->buflen_0 : 909 state->buflen_1; 910 u32 *sh_desc = ctx->sh_desc_fin, *desc; 911 dma_addr_t ptr = ctx->sh_desc_fin_dma; 912 int sec4_sg_bytes; 913 int digestsize = crypto_ahash_digestsize(ahash); 914 struct ahash_edesc *edesc; 915 int ret = 0; 916 int sh_len; 917 918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); 919 920 /* allocate space for base edesc and hw desc commands, link tables */ 921 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 922 sec4_sg_bytes, GFP_DMA | flags); 923 if (!edesc) { 924 dev_err(jrdev, "could not allocate extended descriptor\n"); 925 return -ENOMEM; 926 } 927 928 sh_len = desc_len(sh_desc); 929 desc = edesc->hw_desc; 930 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 931 932 edesc->sec4_sg_bytes = sec4_sg_bytes; 933 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 934 DESC_JOB_IO_LEN; 935 edesc->src_nents = 0; 936 937 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 938 edesc->sec4_sg, DMA_TO_DEVICE); 939 if (ret) 940 return ret; 941 942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 943 buf, state->buf_dma, buflen, 944 last_buflen); 945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; 946 947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 948 sec4_sg_bytes, DMA_TO_DEVICE); 949 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 950 dev_err(jrdev, "unable to map S/G table\n"); 951 return -ENOMEM; 952 } 953 954 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 955 LDST_SGF); 956 957 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 958 digestsize); 959 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 960 dev_err(jrdev, "unable to map dst\n"); 961 return -ENOMEM; 962 } 963 964 #ifdef DEBUG 965 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 966 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 967 #endif 968 969 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 970 if (!ret) { 971 ret = -EINPROGRESS; 972 } else { 973 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 974 kfree(edesc); 975 } 976 977 return ret; 978 } 979 980 static int ahash_finup_ctx(struct ahash_request *req) 981 { 982 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 983 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 984 struct caam_hash_state *state = ahash_request_ctx(req); 985 struct device *jrdev = ctx->jrdev; 986 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 987 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 988 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 989 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 990 int last_buflen = state->current_buf ? state->buflen_0 : 991 state->buflen_1; 992 u32 *sh_desc = ctx->sh_desc_finup, *desc; 993 dma_addr_t ptr = ctx->sh_desc_finup_dma; 994 int sec4_sg_bytes, sec4_sg_src_index; 995 int src_nents; 996 int digestsize = crypto_ahash_digestsize(ahash); 997 struct ahash_edesc *edesc; 998 bool chained = false; 999 int ret = 0; 1000 int sh_len; 1001 1002 src_nents = __sg_count(req->src, req->nbytes, &chained); 1003 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1004 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1005 sizeof(struct sec4_sg_entry); 1006 1007 /* allocate space for base edesc and hw desc commands, link tables */ 1008 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 1009 sec4_sg_bytes, GFP_DMA | flags); 1010 if (!edesc) { 1011 dev_err(jrdev, "could not allocate extended descriptor\n"); 1012 return -ENOMEM; 1013 } 1014 1015 sh_len = desc_len(sh_desc); 1016 desc = edesc->hw_desc; 1017 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1018 1019 edesc->src_nents = src_nents; 1020 edesc->chained = chained; 1021 edesc->sec4_sg_bytes = sec4_sg_bytes; 1022 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1023 DESC_JOB_IO_LEN; 1024 1025 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 1026 edesc->sec4_sg, DMA_TO_DEVICE); 1027 if (ret) 1028 return ret; 1029 1030 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 1031 buf, state->buf_dma, buflen, 1032 last_buflen); 1033 1034 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1035 sec4_sg_src_index, chained); 1036 1037 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1038 sec4_sg_bytes, DMA_TO_DEVICE); 1039 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1040 dev_err(jrdev, "unable to map S/G table\n"); 1041 return -ENOMEM; 1042 } 1043 1044 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 1045 buflen + req->nbytes, LDST_SGF); 1046 1047 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1048 digestsize); 1049 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1050 dev_err(jrdev, "unable to map dst\n"); 1051 return -ENOMEM; 1052 } 1053 1054 #ifdef DEBUG 1055 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1056 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1057 #endif 1058 1059 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1060 if (!ret) { 1061 ret = -EINPROGRESS; 1062 } else { 1063 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1064 kfree(edesc); 1065 } 1066 1067 return ret; 1068 } 1069 1070 static int ahash_digest(struct ahash_request *req) 1071 { 1072 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1073 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1074 struct device *jrdev = ctx->jrdev; 1075 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1076 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1077 u32 *sh_desc = ctx->sh_desc_digest, *desc; 1078 dma_addr_t ptr = ctx->sh_desc_digest_dma; 1079 int digestsize = crypto_ahash_digestsize(ahash); 1080 int src_nents, sec4_sg_bytes; 1081 dma_addr_t src_dma; 1082 struct ahash_edesc *edesc; 1083 bool chained = false; 1084 int ret = 0; 1085 u32 options; 1086 int sh_len; 1087 1088 src_nents = sg_count(req->src, req->nbytes, &chained); 1089 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, 1090 chained); 1091 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1092 1093 /* allocate space for base edesc and hw desc commands, link tables */ 1094 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes + 1095 DESC_JOB_IO_LEN, GFP_DMA | flags); 1096 if (!edesc) { 1097 dev_err(jrdev, "could not allocate extended descriptor\n"); 1098 return -ENOMEM; 1099 } 1100 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1101 DESC_JOB_IO_LEN; 1102 edesc->sec4_sg_bytes = sec4_sg_bytes; 1103 edesc->src_nents = src_nents; 1104 edesc->chained = chained; 1105 1106 sh_len = desc_len(sh_desc); 1107 desc = edesc->hw_desc; 1108 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1109 1110 if (src_nents) { 1111 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); 1112 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1113 sec4_sg_bytes, DMA_TO_DEVICE); 1114 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1115 dev_err(jrdev, "unable to map S/G table\n"); 1116 return -ENOMEM; 1117 } 1118 src_dma = edesc->sec4_sg_dma; 1119 options = LDST_SGF; 1120 } else { 1121 src_dma = sg_dma_address(req->src); 1122 options = 0; 1123 } 1124 append_seq_in_ptr(desc, src_dma, req->nbytes, options); 1125 1126 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1127 digestsize); 1128 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1129 dev_err(jrdev, "unable to map dst\n"); 1130 return -ENOMEM; 1131 } 1132 1133 #ifdef DEBUG 1134 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1135 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1136 #endif 1137 1138 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1139 if (!ret) { 1140 ret = -EINPROGRESS; 1141 } else { 1142 ahash_unmap(jrdev, edesc, req, digestsize); 1143 kfree(edesc); 1144 } 1145 1146 return ret; 1147 } 1148 1149 /* submit ahash final if it the first job descriptor */ 1150 static int ahash_final_no_ctx(struct ahash_request *req) 1151 { 1152 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1153 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1154 struct caam_hash_state *state = ahash_request_ctx(req); 1155 struct device *jrdev = ctx->jrdev; 1156 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1157 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1158 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1159 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1160 u32 *sh_desc = ctx->sh_desc_digest, *desc; 1161 dma_addr_t ptr = ctx->sh_desc_digest_dma; 1162 int digestsize = crypto_ahash_digestsize(ahash); 1163 struct ahash_edesc *edesc; 1164 int ret = 0; 1165 int sh_len; 1166 1167 /* allocate space for base edesc and hw desc commands, link tables */ 1168 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN, 1169 GFP_DMA | flags); 1170 if (!edesc) { 1171 dev_err(jrdev, "could not allocate extended descriptor\n"); 1172 return -ENOMEM; 1173 } 1174 1175 sh_len = desc_len(sh_desc); 1176 desc = edesc->hw_desc; 1177 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1178 1179 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1180 if (dma_mapping_error(jrdev, state->buf_dma)) { 1181 dev_err(jrdev, "unable to map src\n"); 1182 return -ENOMEM; 1183 } 1184 1185 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1186 1187 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1188 digestsize); 1189 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1190 dev_err(jrdev, "unable to map dst\n"); 1191 return -ENOMEM; 1192 } 1193 edesc->src_nents = 0; 1194 1195 #ifdef DEBUG 1196 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1197 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1198 #endif 1199 1200 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1201 if (!ret) { 1202 ret = -EINPROGRESS; 1203 } else { 1204 ahash_unmap(jrdev, edesc, req, digestsize); 1205 kfree(edesc); 1206 } 1207 1208 return ret; 1209 } 1210 1211 /* submit ahash update if it the first job descriptor after update */ 1212 static int ahash_update_no_ctx(struct ahash_request *req) 1213 { 1214 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1215 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1216 struct caam_hash_state *state = ahash_request_ctx(req); 1217 struct device *jrdev = ctx->jrdev; 1218 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1219 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1220 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1221 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 1222 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 1223 int *next_buflen = state->current_buf ? &state->buflen_0 : 1224 &state->buflen_1; 1225 int in_len = *buflen + req->nbytes, to_hash; 1226 int sec4_sg_bytes, src_nents; 1227 struct ahash_edesc *edesc; 1228 u32 *desc, *sh_desc = ctx->sh_desc_update_first; 1229 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1230 bool chained = false; 1231 int ret = 0; 1232 int sh_len; 1233 1234 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1235 to_hash = in_len - *next_buflen; 1236 1237 if (to_hash) { 1238 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 1239 &chained); 1240 sec4_sg_bytes = (1 + src_nents) * 1241 sizeof(struct sec4_sg_entry); 1242 1243 /* 1244 * allocate space for base edesc and hw desc commands, 1245 * link tables 1246 */ 1247 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 1248 sec4_sg_bytes, GFP_DMA | flags); 1249 if (!edesc) { 1250 dev_err(jrdev, 1251 "could not allocate extended descriptor\n"); 1252 return -ENOMEM; 1253 } 1254 1255 edesc->src_nents = src_nents; 1256 edesc->chained = chained; 1257 edesc->sec4_sg_bytes = sec4_sg_bytes; 1258 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1259 DESC_JOB_IO_LEN; 1260 edesc->dst_dma = 0; 1261 1262 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1263 buf, *buflen); 1264 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1265 edesc->sec4_sg + 1, chained); 1266 if (*next_buflen) { 1267 scatterwalk_map_and_copy(next_buf, req->src, 1268 to_hash - *buflen, 1269 *next_buflen, 0); 1270 state->current_buf = !state->current_buf; 1271 } 1272 1273 sh_len = desc_len(sh_desc); 1274 desc = edesc->hw_desc; 1275 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 1276 HDR_REVERSE); 1277 1278 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1279 sec4_sg_bytes, 1280 DMA_TO_DEVICE); 1281 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1282 dev_err(jrdev, "unable to map S/G table\n"); 1283 return -ENOMEM; 1284 } 1285 1286 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1287 1288 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1289 if (ret) 1290 return ret; 1291 1292 #ifdef DEBUG 1293 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1294 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1295 desc_bytes(desc), 1); 1296 #endif 1297 1298 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1299 if (!ret) { 1300 ret = -EINPROGRESS; 1301 state->update = ahash_update_ctx; 1302 state->finup = ahash_finup_ctx; 1303 state->final = ahash_final_ctx; 1304 } else { 1305 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, 1306 DMA_TO_DEVICE); 1307 kfree(edesc); 1308 } 1309 } else if (*next_buflen) { 1310 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1311 req->nbytes, 0); 1312 *buflen = *next_buflen; 1313 *next_buflen = 0; 1314 } 1315 #ifdef DEBUG 1316 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1317 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1318 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1319 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1320 *next_buflen, 1); 1321 #endif 1322 1323 return ret; 1324 } 1325 1326 /* submit ahash finup if it the first job descriptor after update */ 1327 static int ahash_finup_no_ctx(struct ahash_request *req) 1328 { 1329 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1330 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1331 struct caam_hash_state *state = ahash_request_ctx(req); 1332 struct device *jrdev = ctx->jrdev; 1333 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1334 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1335 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1336 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1337 int last_buflen = state->current_buf ? state->buflen_0 : 1338 state->buflen_1; 1339 u32 *sh_desc = ctx->sh_desc_digest, *desc; 1340 dma_addr_t ptr = ctx->sh_desc_digest_dma; 1341 int sec4_sg_bytes, sec4_sg_src_index, src_nents; 1342 int digestsize = crypto_ahash_digestsize(ahash); 1343 struct ahash_edesc *edesc; 1344 bool chained = false; 1345 int sh_len; 1346 int ret = 0; 1347 1348 src_nents = __sg_count(req->src, req->nbytes, &chained); 1349 sec4_sg_src_index = 2; 1350 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1351 sizeof(struct sec4_sg_entry); 1352 1353 /* allocate space for base edesc and hw desc commands, link tables */ 1354 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 1355 sec4_sg_bytes, GFP_DMA | flags); 1356 if (!edesc) { 1357 dev_err(jrdev, "could not allocate extended descriptor\n"); 1358 return -ENOMEM; 1359 } 1360 1361 sh_len = desc_len(sh_desc); 1362 desc = edesc->hw_desc; 1363 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1364 1365 edesc->src_nents = src_nents; 1366 edesc->chained = chained; 1367 edesc->sec4_sg_bytes = sec4_sg_bytes; 1368 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1369 DESC_JOB_IO_LEN; 1370 1371 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, 1372 state->buf_dma, buflen, 1373 last_buflen); 1374 1375 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, 1376 chained); 1377 1378 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1379 sec4_sg_bytes, DMA_TO_DEVICE); 1380 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1381 dev_err(jrdev, "unable to map S/G table\n"); 1382 return -ENOMEM; 1383 } 1384 1385 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + 1386 req->nbytes, LDST_SGF); 1387 1388 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1389 digestsize); 1390 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1391 dev_err(jrdev, "unable to map dst\n"); 1392 return -ENOMEM; 1393 } 1394 1395 #ifdef DEBUG 1396 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1397 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1398 #endif 1399 1400 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1401 if (!ret) { 1402 ret = -EINPROGRESS; 1403 } else { 1404 ahash_unmap(jrdev, edesc, req, digestsize); 1405 kfree(edesc); 1406 } 1407 1408 return ret; 1409 } 1410 1411 /* submit first update job descriptor after init */ 1412 static int ahash_update_first(struct ahash_request *req) 1413 { 1414 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1415 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1416 struct caam_hash_state *state = ahash_request_ctx(req); 1417 struct device *jrdev = ctx->jrdev; 1418 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1419 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1420 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; 1421 int *next_buflen = state->current_buf ? 1422 &state->buflen_1 : &state->buflen_0; 1423 int to_hash; 1424 u32 *sh_desc = ctx->sh_desc_update_first, *desc; 1425 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1426 int sec4_sg_bytes, src_nents; 1427 dma_addr_t src_dma; 1428 u32 options; 1429 struct ahash_edesc *edesc; 1430 bool chained = false; 1431 int ret = 0; 1432 int sh_len; 1433 1434 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1435 1); 1436 to_hash = req->nbytes - *next_buflen; 1437 1438 if (to_hash) { 1439 src_nents = sg_count(req->src, req->nbytes - (*next_buflen), 1440 &chained); 1441 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1442 DMA_TO_DEVICE, chained); 1443 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1444 1445 /* 1446 * allocate space for base edesc and hw desc commands, 1447 * link tables 1448 */ 1449 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 1450 sec4_sg_bytes, GFP_DMA | flags); 1451 if (!edesc) { 1452 dev_err(jrdev, 1453 "could not allocate extended descriptor\n"); 1454 return -ENOMEM; 1455 } 1456 1457 edesc->src_nents = src_nents; 1458 edesc->chained = chained; 1459 edesc->sec4_sg_bytes = sec4_sg_bytes; 1460 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1461 DESC_JOB_IO_LEN; 1462 edesc->dst_dma = 0; 1463 1464 if (src_nents) { 1465 sg_to_sec4_sg_last(req->src, src_nents, 1466 edesc->sec4_sg, 0); 1467 edesc->sec4_sg_dma = dma_map_single(jrdev, 1468 edesc->sec4_sg, 1469 sec4_sg_bytes, 1470 DMA_TO_DEVICE); 1471 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1472 dev_err(jrdev, "unable to map S/G table\n"); 1473 return -ENOMEM; 1474 } 1475 src_dma = edesc->sec4_sg_dma; 1476 options = LDST_SGF; 1477 } else { 1478 src_dma = sg_dma_address(req->src); 1479 options = 0; 1480 } 1481 1482 if (*next_buflen) 1483 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1484 *next_buflen, 0); 1485 1486 sh_len = desc_len(sh_desc); 1487 desc = edesc->hw_desc; 1488 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | 1489 HDR_REVERSE); 1490 1491 append_seq_in_ptr(desc, src_dma, to_hash, options); 1492 1493 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1494 if (ret) 1495 return ret; 1496 1497 #ifdef DEBUG 1498 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1499 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1500 desc_bytes(desc), 1); 1501 #endif 1502 1503 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, 1504 req); 1505 if (!ret) { 1506 ret = -EINPROGRESS; 1507 state->update = ahash_update_ctx; 1508 state->finup = ahash_finup_ctx; 1509 state->final = ahash_final_ctx; 1510 } else { 1511 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, 1512 DMA_TO_DEVICE); 1513 kfree(edesc); 1514 } 1515 } else if (*next_buflen) { 1516 state->update = ahash_update_no_ctx; 1517 state->finup = ahash_finup_no_ctx; 1518 state->final = ahash_final_no_ctx; 1519 scatterwalk_map_and_copy(next_buf, req->src, 0, 1520 req->nbytes, 0); 1521 } 1522 #ifdef DEBUG 1523 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1524 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1525 *next_buflen, 1); 1526 #endif 1527 1528 return ret; 1529 } 1530 1531 static int ahash_finup_first(struct ahash_request *req) 1532 { 1533 return ahash_digest(req); 1534 } 1535 1536 static int ahash_init(struct ahash_request *req) 1537 { 1538 struct caam_hash_state *state = ahash_request_ctx(req); 1539 1540 state->update = ahash_update_first; 1541 state->finup = ahash_finup_first; 1542 state->final = ahash_final_no_ctx; 1543 1544 state->current_buf = 0; 1545 state->buf_dma = 0; 1546 1547 return 0; 1548 } 1549 1550 static int ahash_update(struct ahash_request *req) 1551 { 1552 struct caam_hash_state *state = ahash_request_ctx(req); 1553 1554 return state->update(req); 1555 } 1556 1557 static int ahash_finup(struct ahash_request *req) 1558 { 1559 struct caam_hash_state *state = ahash_request_ctx(req); 1560 1561 return state->finup(req); 1562 } 1563 1564 static int ahash_final(struct ahash_request *req) 1565 { 1566 struct caam_hash_state *state = ahash_request_ctx(req); 1567 1568 return state->final(req); 1569 } 1570 1571 static int ahash_export(struct ahash_request *req, void *out) 1572 { 1573 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1574 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1575 struct caam_hash_state *state = ahash_request_ctx(req); 1576 1577 memcpy(out, ctx, sizeof(struct caam_hash_ctx)); 1578 memcpy(out + sizeof(struct caam_hash_ctx), state, 1579 sizeof(struct caam_hash_state)); 1580 return 0; 1581 } 1582 1583 static int ahash_import(struct ahash_request *req, const void *in) 1584 { 1585 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1586 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1587 struct caam_hash_state *state = ahash_request_ctx(req); 1588 1589 memcpy(ctx, in, sizeof(struct caam_hash_ctx)); 1590 memcpy(state, in + sizeof(struct caam_hash_ctx), 1591 sizeof(struct caam_hash_state)); 1592 return 0; 1593 } 1594 1595 struct caam_hash_template { 1596 char name[CRYPTO_MAX_ALG_NAME]; 1597 char driver_name[CRYPTO_MAX_ALG_NAME]; 1598 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1599 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1600 unsigned int blocksize; 1601 struct ahash_alg template_ahash; 1602 u32 alg_type; 1603 u32 alg_op; 1604 }; 1605 1606 /* ahash descriptors */ 1607 static struct caam_hash_template driver_hash[] = { 1608 { 1609 .name = "sha1", 1610 .driver_name = "sha1-caam", 1611 .hmac_name = "hmac(sha1)", 1612 .hmac_driver_name = "hmac-sha1-caam", 1613 .blocksize = SHA1_BLOCK_SIZE, 1614 .template_ahash = { 1615 .init = ahash_init, 1616 .update = ahash_update, 1617 .final = ahash_final, 1618 .finup = ahash_finup, 1619 .digest = ahash_digest, 1620 .export = ahash_export, 1621 .import = ahash_import, 1622 .setkey = ahash_setkey, 1623 .halg = { 1624 .digestsize = SHA1_DIGEST_SIZE, 1625 }, 1626 }, 1627 .alg_type = OP_ALG_ALGSEL_SHA1, 1628 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1629 }, { 1630 .name = "sha224", 1631 .driver_name = "sha224-caam", 1632 .hmac_name = "hmac(sha224)", 1633 .hmac_driver_name = "hmac-sha224-caam", 1634 .blocksize = SHA224_BLOCK_SIZE, 1635 .template_ahash = { 1636 .init = ahash_init, 1637 .update = ahash_update, 1638 .final = ahash_final, 1639 .finup = ahash_finup, 1640 .digest = ahash_digest, 1641 .export = ahash_export, 1642 .import = ahash_import, 1643 .setkey = ahash_setkey, 1644 .halg = { 1645 .digestsize = SHA224_DIGEST_SIZE, 1646 }, 1647 }, 1648 .alg_type = OP_ALG_ALGSEL_SHA224, 1649 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1650 }, { 1651 .name = "sha256", 1652 .driver_name = "sha256-caam", 1653 .hmac_name = "hmac(sha256)", 1654 .hmac_driver_name = "hmac-sha256-caam", 1655 .blocksize = SHA256_BLOCK_SIZE, 1656 .template_ahash = { 1657 .init = ahash_init, 1658 .update = ahash_update, 1659 .final = ahash_final, 1660 .finup = ahash_finup, 1661 .digest = ahash_digest, 1662 .export = ahash_export, 1663 .import = ahash_import, 1664 .setkey = ahash_setkey, 1665 .halg = { 1666 .digestsize = SHA256_DIGEST_SIZE, 1667 }, 1668 }, 1669 .alg_type = OP_ALG_ALGSEL_SHA256, 1670 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1671 }, { 1672 .name = "sha384", 1673 .driver_name = "sha384-caam", 1674 .hmac_name = "hmac(sha384)", 1675 .hmac_driver_name = "hmac-sha384-caam", 1676 .blocksize = SHA384_BLOCK_SIZE, 1677 .template_ahash = { 1678 .init = ahash_init, 1679 .update = ahash_update, 1680 .final = ahash_final, 1681 .finup = ahash_finup, 1682 .digest = ahash_digest, 1683 .export = ahash_export, 1684 .import = ahash_import, 1685 .setkey = ahash_setkey, 1686 .halg = { 1687 .digestsize = SHA384_DIGEST_SIZE, 1688 }, 1689 }, 1690 .alg_type = OP_ALG_ALGSEL_SHA384, 1691 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1692 }, { 1693 .name = "sha512", 1694 .driver_name = "sha512-caam", 1695 .hmac_name = "hmac(sha512)", 1696 .hmac_driver_name = "hmac-sha512-caam", 1697 .blocksize = SHA512_BLOCK_SIZE, 1698 .template_ahash = { 1699 .init = ahash_init, 1700 .update = ahash_update, 1701 .final = ahash_final, 1702 .finup = ahash_finup, 1703 .digest = ahash_digest, 1704 .export = ahash_export, 1705 .import = ahash_import, 1706 .setkey = ahash_setkey, 1707 .halg = { 1708 .digestsize = SHA512_DIGEST_SIZE, 1709 }, 1710 }, 1711 .alg_type = OP_ALG_ALGSEL_SHA512, 1712 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1713 }, { 1714 .name = "md5", 1715 .driver_name = "md5-caam", 1716 .hmac_name = "hmac(md5)", 1717 .hmac_driver_name = "hmac-md5-caam", 1718 .blocksize = MD5_BLOCK_WORDS * 4, 1719 .template_ahash = { 1720 .init = ahash_init, 1721 .update = ahash_update, 1722 .final = ahash_final, 1723 .finup = ahash_finup, 1724 .digest = ahash_digest, 1725 .export = ahash_export, 1726 .import = ahash_import, 1727 .setkey = ahash_setkey, 1728 .halg = { 1729 .digestsize = MD5_DIGEST_SIZE, 1730 }, 1731 }, 1732 .alg_type = OP_ALG_ALGSEL_MD5, 1733 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1734 }, 1735 }; 1736 1737 struct caam_hash_alg { 1738 struct list_head entry; 1739 int alg_type; 1740 int alg_op; 1741 struct ahash_alg ahash_alg; 1742 }; 1743 1744 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1745 { 1746 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1747 struct crypto_alg *base = tfm->__crt_alg; 1748 struct hash_alg_common *halg = 1749 container_of(base, struct hash_alg_common, base); 1750 struct ahash_alg *alg = 1751 container_of(halg, struct ahash_alg, halg); 1752 struct caam_hash_alg *caam_hash = 1753 container_of(alg, struct caam_hash_alg, ahash_alg); 1754 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1755 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1756 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1757 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1758 HASH_MSG_LEN + 32, 1759 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1760 HASH_MSG_LEN + 64, 1761 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1762 int ret = 0; 1763 1764 /* 1765 * Get a Job ring from Job Ring driver to ensure in-order 1766 * crypto request processing per tfm 1767 */ 1768 ctx->jrdev = caam_jr_alloc(); 1769 if (IS_ERR(ctx->jrdev)) { 1770 pr_err("Job Ring Device allocation for transform failed\n"); 1771 return PTR_ERR(ctx->jrdev); 1772 } 1773 /* copy descriptor header template value */ 1774 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1775 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; 1776 1777 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 1778 OP_ALG_ALGSEL_SHIFT]; 1779 1780 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1781 sizeof(struct caam_hash_state)); 1782 1783 ret = ahash_set_sh_desc(ahash); 1784 1785 return ret; 1786 } 1787 1788 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1789 { 1790 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1791 1792 if (ctx->sh_desc_update_dma && 1793 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) 1794 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, 1795 desc_bytes(ctx->sh_desc_update), 1796 DMA_TO_DEVICE); 1797 if (ctx->sh_desc_update_first_dma && 1798 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) 1799 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, 1800 desc_bytes(ctx->sh_desc_update_first), 1801 DMA_TO_DEVICE); 1802 if (ctx->sh_desc_fin_dma && 1803 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) 1804 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, 1805 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); 1806 if (ctx->sh_desc_digest_dma && 1807 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) 1808 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, 1809 desc_bytes(ctx->sh_desc_digest), 1810 DMA_TO_DEVICE); 1811 if (ctx->sh_desc_finup_dma && 1812 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) 1813 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, 1814 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); 1815 1816 caam_jr_free(ctx->jrdev); 1817 } 1818 1819 static void __exit caam_algapi_hash_exit(void) 1820 { 1821 struct caam_hash_alg *t_alg, *n; 1822 1823 if (!hash_list.next) 1824 return; 1825 1826 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1827 crypto_unregister_ahash(&t_alg->ahash_alg); 1828 list_del(&t_alg->entry); 1829 kfree(t_alg); 1830 } 1831 } 1832 1833 static struct caam_hash_alg * 1834 caam_hash_alloc(struct caam_hash_template *template, 1835 bool keyed) 1836 { 1837 struct caam_hash_alg *t_alg; 1838 struct ahash_alg *halg; 1839 struct crypto_alg *alg; 1840 1841 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); 1842 if (!t_alg) { 1843 pr_err("failed to allocate t_alg\n"); 1844 return ERR_PTR(-ENOMEM); 1845 } 1846 1847 t_alg->ahash_alg = template->template_ahash; 1848 halg = &t_alg->ahash_alg; 1849 alg = &halg->halg.base; 1850 1851 if (keyed) { 1852 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1853 template->hmac_name); 1854 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1855 template->hmac_driver_name); 1856 } else { 1857 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1858 template->name); 1859 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1860 template->driver_name); 1861 } 1862 alg->cra_module = THIS_MODULE; 1863 alg->cra_init = caam_hash_cra_init; 1864 alg->cra_exit = caam_hash_cra_exit; 1865 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1866 alg->cra_priority = CAAM_CRA_PRIORITY; 1867 alg->cra_blocksize = template->blocksize; 1868 alg->cra_alignmask = 0; 1869 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; 1870 alg->cra_type = &crypto_ahash_type; 1871 1872 t_alg->alg_type = template->alg_type; 1873 t_alg->alg_op = template->alg_op; 1874 1875 return t_alg; 1876 } 1877 1878 static int __init caam_algapi_hash_init(void) 1879 { 1880 struct device_node *dev_node; 1881 struct platform_device *pdev; 1882 struct device *ctrldev; 1883 void *priv; 1884 int i = 0, err = 0; 1885 1886 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1887 if (!dev_node) { 1888 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1889 if (!dev_node) 1890 return -ENODEV; 1891 } 1892 1893 pdev = of_find_device_by_node(dev_node); 1894 if (!pdev) { 1895 of_node_put(dev_node); 1896 return -ENODEV; 1897 } 1898 1899 ctrldev = &pdev->dev; 1900 priv = dev_get_drvdata(ctrldev); 1901 of_node_put(dev_node); 1902 1903 /* 1904 * If priv is NULL, it's probably because the caam driver wasn't 1905 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1906 */ 1907 if (!priv) 1908 return -ENODEV; 1909 1910 INIT_LIST_HEAD(&hash_list); 1911 1912 /* register crypto algorithms the device supports */ 1913 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1914 /* TODO: check if h/w supports alg */ 1915 struct caam_hash_alg *t_alg; 1916 1917 /* register hmac version */ 1918 t_alg = caam_hash_alloc(&driver_hash[i], true); 1919 if (IS_ERR(t_alg)) { 1920 err = PTR_ERR(t_alg); 1921 pr_warn("%s alg allocation failed\n", 1922 driver_hash[i].driver_name); 1923 continue; 1924 } 1925 1926 err = crypto_register_ahash(&t_alg->ahash_alg); 1927 if (err) { 1928 pr_warn("%s alg registration failed\n", 1929 t_alg->ahash_alg.halg.base.cra_driver_name); 1930 kfree(t_alg); 1931 } else 1932 list_add_tail(&t_alg->entry, &hash_list); 1933 1934 /* register unkeyed version */ 1935 t_alg = caam_hash_alloc(&driver_hash[i], false); 1936 if (IS_ERR(t_alg)) { 1937 err = PTR_ERR(t_alg); 1938 pr_warn("%s alg allocation failed\n", 1939 driver_hash[i].driver_name); 1940 continue; 1941 } 1942 1943 err = crypto_register_ahash(&t_alg->ahash_alg); 1944 if (err) { 1945 pr_warn("%s alg registration failed\n", 1946 t_alg->ahash_alg.halg.base.cra_driver_name); 1947 kfree(t_alg); 1948 } else 1949 list_add_tail(&t_alg->entry, &hash_list); 1950 } 1951 1952 return err; 1953 } 1954 1955 module_init(caam_algapi_hash_init); 1956 module_exit(caam_algapi_hash_exit); 1957 1958 MODULE_LICENSE("GPL"); 1959 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 1960 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 1961