1 /* 2 * caam - Freescale FSL CAAM support for ahash functions of crypto API 3 * 4 * Copyright 2011 Freescale Semiconductor, Inc. 5 * 6 * Based on caamalg.c crypto API driver. 7 * 8 * relationship of digest job descriptor or first job descriptor after init to 9 * shared descriptors: 10 * 11 * --------------- --------------- 12 * | JobDesc #1 |-------------------->| ShareDesc | 13 * | *(packet 1) | | (hashKey) | 14 * --------------- | (operation) | 15 * --------------- 16 * 17 * relationship of subsequent job descriptors to shared descriptors: 18 * 19 * --------------- --------------- 20 * | JobDesc #2 |-------------------->| ShareDesc | 21 * | *(packet 2) | |------------->| (hashKey) | 22 * --------------- | |-------->| (operation) | 23 * . | | | (load ctx2) | 24 * . | | --------------- 25 * --------------- | | 26 * | JobDesc #3 |------| | 27 * | *(packet 3) | | 28 * --------------- | 29 * . | 30 * . | 31 * --------------- | 32 * | JobDesc #4 |------------ 33 * | *(packet 4) | 34 * --------------- 35 * 36 * The SharedDesc never changes for a connection unless rekeyed, but 37 * each packet will likely be in a different place. So all we need 38 * to know to process the packet is where the input is, where the 39 * output goes, and what context we want to process with. Context is 40 * in the SharedDesc, packet references in the JobDesc. 41 * 42 * So, a job desc looks like: 43 * 44 * --------------------- 45 * | Header | 46 * | ShareDesc Pointer | 47 * | SEQ_OUT_PTR | 48 * | (output buffer) | 49 * | (output length) | 50 * | SEQ_IN_PTR | 51 * | (input buffer) | 52 * | (input length) | 53 * --------------------- 54 */ 55 56 #include "compat.h" 57 58 #include "regs.h" 59 #include "intern.h" 60 #include "desc_constr.h" 61 #include "jr.h" 62 #include "error.h" 63 #include "sg_sw_sec4.h" 64 #include "key_gen.h" 65 66 #define CAAM_CRA_PRIORITY 3000 67 68 /* max hash key is max split key size */ 69 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 70 71 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 72 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 73 74 /* length of descriptors text */ 75 #define DESC_AHASH_BASE (3 * CAAM_CMD_SZ) 76 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ) 77 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 78 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 79 #define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) 80 #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) 81 82 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 83 CAAM_MAX_HASH_KEY_SIZE) 84 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 85 86 /* caam context sizes for hashes: running digest + 8 */ 87 #define HASH_MSG_LEN 8 88 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 89 90 #ifdef DEBUG 91 /* for print_hex_dumps with line references */ 92 #define debug(format, arg...) printk(format, arg) 93 #else 94 #define debug(format, arg...) 95 #endif 96 97 98 static struct list_head hash_list; 99 100 /* ahash per-session context */ 101 struct caam_hash_ctx { 102 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 103 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 104 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 105 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 106 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 107 dma_addr_t sh_desc_update_first_dma; 108 dma_addr_t sh_desc_fin_dma; 109 dma_addr_t sh_desc_digest_dma; 110 struct device *jrdev; 111 u32 alg_type; 112 u32 alg_op; 113 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 114 dma_addr_t key_dma; 115 int ctx_len; 116 unsigned int split_key_len; 117 unsigned int split_key_pad_len; 118 }; 119 120 /* ahash state */ 121 struct caam_hash_state { 122 dma_addr_t buf_dma; 123 dma_addr_t ctx_dma; 124 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 125 int buflen_0; 126 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 127 int buflen_1; 128 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 129 int (*update)(struct ahash_request *req); 130 int (*final)(struct ahash_request *req); 131 int (*finup)(struct ahash_request *req); 132 int current_buf; 133 }; 134 135 struct caam_export_state { 136 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 137 u8 caam_ctx[MAX_CTX_LEN]; 138 int buflen; 139 int (*update)(struct ahash_request *req); 140 int (*final)(struct ahash_request *req); 141 int (*finup)(struct ahash_request *req); 142 }; 143 144 /* Common job descriptor seq in/out ptr routines */ 145 146 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 147 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 148 struct caam_hash_state *state, 149 int ctx_len) 150 { 151 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 152 ctx_len, DMA_FROM_DEVICE); 153 if (dma_mapping_error(jrdev, state->ctx_dma)) { 154 dev_err(jrdev, "unable to map ctx\n"); 155 return -ENOMEM; 156 } 157 158 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 159 160 return 0; 161 } 162 163 /* Map req->result, and append seq_out_ptr command that points to it */ 164 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, 165 u8 *result, int digestsize) 166 { 167 dma_addr_t dst_dma; 168 169 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); 170 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 171 172 return dst_dma; 173 } 174 175 /* Map current buffer in state and put it in link table */ 176 static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, 177 struct sec4_sg_entry *sec4_sg, 178 u8 *buf, int buflen) 179 { 180 dma_addr_t buf_dma; 181 182 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 183 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0); 184 185 return buf_dma; 186 } 187 188 /* 189 * Only put buffer in link table if it contains data, which is possible, 190 * since a buffer has previously been used, and needs to be unmapped, 191 */ 192 static inline dma_addr_t 193 try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, 194 u8 *buf, dma_addr_t buf_dma, int buflen, 195 int last_buflen) 196 { 197 if (buf_dma && !dma_mapping_error(jrdev, buf_dma)) 198 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE); 199 if (buflen) 200 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen); 201 else 202 buf_dma = 0; 203 204 return buf_dma; 205 } 206 207 /* Map state->caam_ctx, and add it to link table */ 208 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, 209 struct caam_hash_state *state, int ctx_len, 210 struct sec4_sg_entry *sec4_sg, u32 flag) 211 { 212 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 213 if (dma_mapping_error(jrdev, state->ctx_dma)) { 214 dev_err(jrdev, "unable to map ctx\n"); 215 return -ENOMEM; 216 } 217 218 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 219 220 return 0; 221 } 222 223 /* Common shared descriptor commands */ 224 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 225 { 226 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, 227 ctx->split_key_len, CLASS_2 | 228 KEY_DEST_MDHA_SPLIT | KEY_ENC); 229 } 230 231 /* Append key if it has been set */ 232 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) 233 { 234 u32 *key_jump_cmd; 235 236 init_sh_desc(desc, HDR_SHARE_SERIAL); 237 238 if (ctx->split_key_len) { 239 /* Skip if already shared */ 240 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 241 JUMP_COND_SHRD); 242 243 append_key_ahash(desc, ctx); 244 245 set_jump_tgt_here(desc, key_jump_cmd); 246 } 247 } 248 249 /* 250 * For ahash read data from seqin following state->caam_ctx, 251 * and write resulting class2 context to seqout, which may be state->caam_ctx 252 * or req->result 253 */ 254 static inline void ahash_append_load_str(u32 *desc, int digestsize) 255 { 256 /* Calculate remaining bytes to read */ 257 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); 258 259 /* Read remaining bytes */ 260 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | 261 FIFOLD_TYPE_MSG | KEY_VLF); 262 263 /* Store class2 context bytes */ 264 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 265 LDST_SRCDST_BYTE_CONTEXT); 266 } 267 268 /* 269 * For ahash update, final and finup, import context, read and write to seqout 270 */ 271 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, 272 int digestsize, 273 struct caam_hash_ctx *ctx) 274 { 275 init_sh_desc_key_ahash(desc, ctx); 276 277 /* Import context from software */ 278 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 279 LDST_CLASS_2_CCB | ctx->ctx_len); 280 281 /* Class 2 operation */ 282 append_operation(desc, op | state | OP_ALG_ENCRYPT); 283 284 /* 285 * Load from buf and/or src and write to req->result or state->context 286 */ 287 ahash_append_load_str(desc, digestsize); 288 } 289 290 /* For ahash firsts and digest, read and write to seqout */ 291 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, 292 int digestsize, struct caam_hash_ctx *ctx) 293 { 294 init_sh_desc_key_ahash(desc, ctx); 295 296 /* Class 2 operation */ 297 append_operation(desc, op | state | OP_ALG_ENCRYPT); 298 299 /* 300 * Load from buf and/or src and write to req->result or state->context 301 */ 302 ahash_append_load_str(desc, digestsize); 303 } 304 305 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 306 { 307 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 308 int digestsize = crypto_ahash_digestsize(ahash); 309 struct device *jrdev = ctx->jrdev; 310 u32 have_key = 0; 311 u32 *desc; 312 313 if (ctx->split_key_len) 314 have_key = OP_ALG_AAI_HMAC_PRECOMP; 315 316 /* ahash_update shared descriptor */ 317 desc = ctx->sh_desc_update; 318 319 init_sh_desc(desc, HDR_SHARE_SERIAL); 320 321 /* Import context from software */ 322 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | 323 LDST_CLASS_2_CCB | ctx->ctx_len); 324 325 /* Class 2 operation */ 326 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | 327 OP_ALG_ENCRYPT); 328 329 /* Load data and write to result or context */ 330 ahash_append_load_str(desc, ctx->ctx_len); 331 332 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 333 DMA_TO_DEVICE); 334 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { 335 dev_err(jrdev, "unable to map shared descriptor\n"); 336 return -ENOMEM; 337 } 338 #ifdef DEBUG 339 print_hex_dump(KERN_ERR, 340 "ahash update shdesc@"__stringify(__LINE__)": ", 341 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 342 #endif 343 344 /* ahash_update_first shared descriptor */ 345 desc = ctx->sh_desc_update_first; 346 347 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, 348 ctx->ctx_len, ctx); 349 350 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, 351 desc_bytes(desc), 352 DMA_TO_DEVICE); 353 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { 354 dev_err(jrdev, "unable to map shared descriptor\n"); 355 return -ENOMEM; 356 } 357 #ifdef DEBUG 358 print_hex_dump(KERN_ERR, 359 "ahash update first shdesc@"__stringify(__LINE__)": ", 360 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 361 #endif 362 363 /* ahash_final shared descriptor */ 364 desc = ctx->sh_desc_fin; 365 366 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, 367 OP_ALG_AS_FINALIZE, digestsize, ctx); 368 369 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 370 DMA_TO_DEVICE); 371 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { 372 dev_err(jrdev, "unable to map shared descriptor\n"); 373 return -ENOMEM; 374 } 375 #ifdef DEBUG 376 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 377 DUMP_PREFIX_ADDRESS, 16, 4, desc, 378 desc_bytes(desc), 1); 379 #endif 380 381 /* ahash_digest shared descriptor */ 382 desc = ctx->sh_desc_digest; 383 384 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, 385 digestsize, ctx); 386 387 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, 388 desc_bytes(desc), 389 DMA_TO_DEVICE); 390 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { 391 dev_err(jrdev, "unable to map shared descriptor\n"); 392 return -ENOMEM; 393 } 394 #ifdef DEBUG 395 print_hex_dump(KERN_ERR, 396 "ahash digest shdesc@"__stringify(__LINE__)": ", 397 DUMP_PREFIX_ADDRESS, 16, 4, desc, 398 desc_bytes(desc), 1); 399 #endif 400 401 return 0; 402 } 403 404 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, 405 u32 keylen) 406 { 407 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, 408 ctx->split_key_pad_len, key_in, keylen, 409 ctx->alg_op); 410 } 411 412 /* Digest hash size if it is too large */ 413 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 414 u32 *keylen, u8 *key_out, u32 digestsize) 415 { 416 struct device *jrdev = ctx->jrdev; 417 u32 *desc; 418 struct split_key_result result; 419 dma_addr_t src_dma, dst_dma; 420 int ret; 421 422 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 423 if (!desc) { 424 dev_err(jrdev, "unable to allocate key input memory\n"); 425 return -ENOMEM; 426 } 427 428 init_job_desc(desc, 0); 429 430 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 431 DMA_TO_DEVICE); 432 if (dma_mapping_error(jrdev, src_dma)) { 433 dev_err(jrdev, "unable to map key input memory\n"); 434 kfree(desc); 435 return -ENOMEM; 436 } 437 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 438 DMA_FROM_DEVICE); 439 if (dma_mapping_error(jrdev, dst_dma)) { 440 dev_err(jrdev, "unable to map key output memory\n"); 441 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 442 kfree(desc); 443 return -ENOMEM; 444 } 445 446 /* Job descriptor to perform unkeyed hash on key_in */ 447 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | 448 OP_ALG_AS_INITFINAL); 449 append_seq_in_ptr(desc, src_dma, *keylen, 0); 450 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 451 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 452 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 453 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 454 LDST_SRCDST_BYTE_CONTEXT); 455 456 #ifdef DEBUG 457 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 458 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 459 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 460 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 461 #endif 462 463 result.err = 0; 464 init_completion(&result.completion); 465 466 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 467 if (!ret) { 468 /* in progress */ 469 wait_for_completion_interruptible(&result.completion); 470 ret = result.err; 471 #ifdef DEBUG 472 print_hex_dump(KERN_ERR, 473 "digested key@"__stringify(__LINE__)": ", 474 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 475 digestsize, 1); 476 #endif 477 } 478 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 479 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 480 481 *keylen = digestsize; 482 483 kfree(desc); 484 485 return ret; 486 } 487 488 static int ahash_setkey(struct crypto_ahash *ahash, 489 const u8 *key, unsigned int keylen) 490 { 491 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 492 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 493 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 494 struct device *jrdev = ctx->jrdev; 495 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 496 int digestsize = crypto_ahash_digestsize(ahash); 497 int ret; 498 u8 *hashed_key = NULL; 499 500 #ifdef DEBUG 501 printk(KERN_ERR "keylen %d\n", keylen); 502 #endif 503 504 if (keylen > blocksize) { 505 hashed_key = kmalloc_array(digestsize, 506 sizeof(*hashed_key), 507 GFP_KERNEL | GFP_DMA); 508 if (!hashed_key) 509 return -ENOMEM; 510 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 511 digestsize); 512 if (ret) 513 goto bad_free_key; 514 key = hashed_key; 515 } 516 517 /* Pick class 2 key length from algorithm submask */ 518 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 519 OP_ALG_ALGSEL_SHIFT] * 2; 520 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); 521 522 #ifdef DEBUG 523 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n", 524 ctx->split_key_len, ctx->split_key_pad_len); 525 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 526 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 527 #endif 528 529 ret = gen_split_hash_key(ctx, key, keylen); 530 if (ret) 531 goto bad_free_key; 532 533 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, 534 DMA_TO_DEVICE); 535 if (dma_mapping_error(jrdev, ctx->key_dma)) { 536 dev_err(jrdev, "unable to map key i/o memory\n"); 537 ret = -ENOMEM; 538 goto error_free_key; 539 } 540 #ifdef DEBUG 541 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 542 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 543 ctx->split_key_pad_len, 1); 544 #endif 545 546 ret = ahash_set_sh_desc(ahash); 547 if (ret) { 548 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, 549 DMA_TO_DEVICE); 550 } 551 error_free_key: 552 kfree(hashed_key); 553 return ret; 554 bad_free_key: 555 kfree(hashed_key); 556 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 557 return -EINVAL; 558 } 559 560 /* 561 * ahash_edesc - s/w-extended ahash descriptor 562 * @dst_dma: physical mapped address of req->result 563 * @sec4_sg_dma: physical mapped address of h/w link table 564 * @src_nents: number of segments in input scatterlist 565 * @sec4_sg_bytes: length of dma mapped sec4_sg space 566 * @hw_desc: the h/w job descriptor followed by any referenced link tables 567 * @sec4_sg: h/w link table 568 */ 569 struct ahash_edesc { 570 dma_addr_t dst_dma; 571 dma_addr_t sec4_sg_dma; 572 int src_nents; 573 int sec4_sg_bytes; 574 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 575 struct sec4_sg_entry sec4_sg[0]; 576 }; 577 578 static inline void ahash_unmap(struct device *dev, 579 struct ahash_edesc *edesc, 580 struct ahash_request *req, int dst_len) 581 { 582 if (edesc->src_nents) 583 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 584 if (edesc->dst_dma) 585 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 586 587 if (edesc->sec4_sg_bytes) 588 dma_unmap_single(dev, edesc->sec4_sg_dma, 589 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 590 } 591 592 static inline void ahash_unmap_ctx(struct device *dev, 593 struct ahash_edesc *edesc, 594 struct ahash_request *req, int dst_len, u32 flag) 595 { 596 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 597 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 598 struct caam_hash_state *state = ahash_request_ctx(req); 599 600 if (state->ctx_dma) 601 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); 602 ahash_unmap(dev, edesc, req, dst_len); 603 } 604 605 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 606 void *context) 607 { 608 struct ahash_request *req = context; 609 struct ahash_edesc *edesc; 610 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 611 int digestsize = crypto_ahash_digestsize(ahash); 612 #ifdef DEBUG 613 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 614 struct caam_hash_state *state = ahash_request_ctx(req); 615 616 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 617 #endif 618 619 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 620 if (err) 621 caam_jr_strstatus(jrdev, err); 622 623 ahash_unmap(jrdev, edesc, req, digestsize); 624 kfree(edesc); 625 626 #ifdef DEBUG 627 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 628 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 629 ctx->ctx_len, 1); 630 if (req->result) 631 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 632 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 633 digestsize, 1); 634 #endif 635 636 req->base.complete(&req->base, err); 637 } 638 639 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 640 void *context) 641 { 642 struct ahash_request *req = context; 643 struct ahash_edesc *edesc; 644 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 645 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 646 #ifdef DEBUG 647 struct caam_hash_state *state = ahash_request_ctx(req); 648 int digestsize = crypto_ahash_digestsize(ahash); 649 650 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 651 #endif 652 653 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 654 if (err) 655 caam_jr_strstatus(jrdev, err); 656 657 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 658 kfree(edesc); 659 660 #ifdef DEBUG 661 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 662 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 663 ctx->ctx_len, 1); 664 if (req->result) 665 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 666 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 667 digestsize, 1); 668 #endif 669 670 req->base.complete(&req->base, err); 671 } 672 673 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 674 void *context) 675 { 676 struct ahash_request *req = context; 677 struct ahash_edesc *edesc; 678 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 679 int digestsize = crypto_ahash_digestsize(ahash); 680 #ifdef DEBUG 681 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 682 struct caam_hash_state *state = ahash_request_ctx(req); 683 684 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 685 #endif 686 687 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 688 if (err) 689 caam_jr_strstatus(jrdev, err); 690 691 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); 692 kfree(edesc); 693 694 #ifdef DEBUG 695 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 696 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 697 ctx->ctx_len, 1); 698 if (req->result) 699 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 700 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 701 digestsize, 1); 702 #endif 703 704 req->base.complete(&req->base, err); 705 } 706 707 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 708 void *context) 709 { 710 struct ahash_request *req = context; 711 struct ahash_edesc *edesc; 712 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 713 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 714 #ifdef DEBUG 715 struct caam_hash_state *state = ahash_request_ctx(req); 716 int digestsize = crypto_ahash_digestsize(ahash); 717 718 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 719 #endif 720 721 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 722 if (err) 723 caam_jr_strstatus(jrdev, err); 724 725 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 726 kfree(edesc); 727 728 #ifdef DEBUG 729 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 730 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 731 ctx->ctx_len, 1); 732 if (req->result) 733 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 734 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 735 digestsize, 1); 736 #endif 737 738 req->base.complete(&req->base, err); 739 } 740 741 /* 742 * Allocate an enhanced descriptor, which contains the hardware descriptor 743 * and space for hardware scatter table containing sg_num entries. 744 */ 745 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 746 int sg_num, u32 *sh_desc, 747 dma_addr_t sh_desc_dma, 748 gfp_t flags) 749 { 750 struct ahash_edesc *edesc; 751 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 752 753 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 754 if (!edesc) { 755 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 756 return NULL; 757 } 758 759 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 760 HDR_SHARE_DEFER | HDR_REVERSE); 761 762 return edesc; 763 } 764 765 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 766 struct ahash_edesc *edesc, 767 struct ahash_request *req, int nents, 768 unsigned int first_sg, 769 unsigned int first_bytes, size_t to_hash) 770 { 771 dma_addr_t src_dma; 772 u32 options; 773 774 if (nents > 1 || first_sg) { 775 struct sec4_sg_entry *sg = edesc->sec4_sg; 776 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 777 778 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 779 780 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 781 if (dma_mapping_error(ctx->jrdev, src_dma)) { 782 dev_err(ctx->jrdev, "unable to map S/G table\n"); 783 return -ENOMEM; 784 } 785 786 edesc->sec4_sg_bytes = sgsize; 787 edesc->sec4_sg_dma = src_dma; 788 options = LDST_SGF; 789 } else { 790 src_dma = sg_dma_address(req->src); 791 options = 0; 792 } 793 794 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 795 options); 796 797 return 0; 798 } 799 800 /* submit update job descriptor */ 801 static int ahash_update_ctx(struct ahash_request *req) 802 { 803 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 804 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 805 struct caam_hash_state *state = ahash_request_ctx(req); 806 struct device *jrdev = ctx->jrdev; 807 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 808 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 809 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 810 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 811 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 812 int *next_buflen = state->current_buf ? &state->buflen_0 : 813 &state->buflen_1, last_buflen; 814 int in_len = *buflen + req->nbytes, to_hash; 815 u32 *desc; 816 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 817 struct ahash_edesc *edesc; 818 int ret = 0; 819 820 last_buflen = *next_buflen; 821 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 822 to_hash = in_len - *next_buflen; 823 824 if (to_hash) { 825 src_nents = sg_nents_for_len(req->src, 826 req->nbytes - (*next_buflen)); 827 if (src_nents < 0) { 828 dev_err(jrdev, "Invalid number of src SG.\n"); 829 return src_nents; 830 } 831 832 if (src_nents) { 833 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 834 DMA_TO_DEVICE); 835 if (!mapped_nents) { 836 dev_err(jrdev, "unable to DMA map source\n"); 837 return -ENOMEM; 838 } 839 } else { 840 mapped_nents = 0; 841 } 842 843 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 844 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 845 sizeof(struct sec4_sg_entry); 846 847 /* 848 * allocate space for base edesc and hw desc commands, 849 * link tables 850 */ 851 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 852 ctx->sh_desc_update, 853 ctx->sh_desc_update_dma, flags); 854 if (!edesc) { 855 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 856 return -ENOMEM; 857 } 858 859 edesc->src_nents = src_nents; 860 edesc->sec4_sg_bytes = sec4_sg_bytes; 861 862 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 863 edesc->sec4_sg, DMA_BIDIRECTIONAL); 864 if (ret) 865 goto unmap_ctx; 866 867 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, 868 edesc->sec4_sg + 1, 869 buf, state->buf_dma, 870 *buflen, last_buflen); 871 872 if (mapped_nents) { 873 sg_to_sec4_sg_last(req->src, mapped_nents, 874 edesc->sec4_sg + sec4_sg_src_index, 875 0); 876 if (*next_buflen) 877 scatterwalk_map_and_copy(next_buf, req->src, 878 to_hash - *buflen, 879 *next_buflen, 0); 880 } else { 881 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 882 cpu_to_caam32(SEC4_SG_LEN_FIN); 883 } 884 885 state->current_buf = !state->current_buf; 886 887 desc = edesc->hw_desc; 888 889 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 890 sec4_sg_bytes, 891 DMA_TO_DEVICE); 892 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 893 dev_err(jrdev, "unable to map S/G table\n"); 894 ret = -ENOMEM; 895 goto unmap_ctx; 896 } 897 898 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 899 to_hash, LDST_SGF); 900 901 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 902 903 #ifdef DEBUG 904 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 905 DUMP_PREFIX_ADDRESS, 16, 4, desc, 906 desc_bytes(desc), 1); 907 #endif 908 909 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 910 if (ret) 911 goto unmap_ctx; 912 913 ret = -EINPROGRESS; 914 } else if (*next_buflen) { 915 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 916 req->nbytes, 0); 917 *buflen = *next_buflen; 918 *next_buflen = last_buflen; 919 } 920 #ifdef DEBUG 921 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 922 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 923 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 924 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 925 *next_buflen, 1); 926 #endif 927 928 return ret; 929 unmap_ctx: 930 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 931 kfree(edesc); 932 return ret; 933 } 934 935 static int ahash_final_ctx(struct ahash_request *req) 936 { 937 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 938 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 939 struct caam_hash_state *state = ahash_request_ctx(req); 940 struct device *jrdev = ctx->jrdev; 941 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 942 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 943 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 944 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 945 int last_buflen = state->current_buf ? state->buflen_0 : 946 state->buflen_1; 947 u32 *desc; 948 int sec4_sg_bytes, sec4_sg_src_index; 949 int digestsize = crypto_ahash_digestsize(ahash); 950 struct ahash_edesc *edesc; 951 int ret; 952 953 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 954 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 955 956 /* allocate space for base edesc and hw desc commands, link tables */ 957 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 958 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 959 flags); 960 if (!edesc) 961 return -ENOMEM; 962 963 desc = edesc->hw_desc; 964 965 edesc->sec4_sg_bytes = sec4_sg_bytes; 966 edesc->src_nents = 0; 967 968 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 969 edesc->sec4_sg, DMA_TO_DEVICE); 970 if (ret) 971 goto unmap_ctx; 972 973 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 974 buf, state->buf_dma, buflen, 975 last_buflen); 976 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= 977 cpu_to_caam32(SEC4_SG_LEN_FIN); 978 979 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 980 sec4_sg_bytes, DMA_TO_DEVICE); 981 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 982 dev_err(jrdev, "unable to map S/G table\n"); 983 ret = -ENOMEM; 984 goto unmap_ctx; 985 } 986 987 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 988 LDST_SGF); 989 990 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 991 digestsize); 992 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 993 dev_err(jrdev, "unable to map dst\n"); 994 ret = -ENOMEM; 995 goto unmap_ctx; 996 } 997 998 #ifdef DEBUG 999 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1000 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1001 #endif 1002 1003 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1004 if (ret) 1005 goto unmap_ctx; 1006 1007 return -EINPROGRESS; 1008 unmap_ctx: 1009 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1010 kfree(edesc); 1011 return ret; 1012 } 1013 1014 static int ahash_finup_ctx(struct ahash_request *req) 1015 { 1016 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1017 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1018 struct caam_hash_state *state = ahash_request_ctx(req); 1019 struct device *jrdev = ctx->jrdev; 1020 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1021 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1022 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1023 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1024 int last_buflen = state->current_buf ? state->buflen_0 : 1025 state->buflen_1; 1026 u32 *desc; 1027 int sec4_sg_src_index; 1028 int src_nents, mapped_nents; 1029 int digestsize = crypto_ahash_digestsize(ahash); 1030 struct ahash_edesc *edesc; 1031 int ret; 1032 1033 src_nents = sg_nents_for_len(req->src, req->nbytes); 1034 if (src_nents < 0) { 1035 dev_err(jrdev, "Invalid number of src SG.\n"); 1036 return src_nents; 1037 } 1038 1039 if (src_nents) { 1040 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1041 DMA_TO_DEVICE); 1042 if (!mapped_nents) { 1043 dev_err(jrdev, "unable to DMA map source\n"); 1044 return -ENOMEM; 1045 } 1046 } else { 1047 mapped_nents = 0; 1048 } 1049 1050 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1051 1052 /* allocate space for base edesc and hw desc commands, link tables */ 1053 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1054 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 1055 flags); 1056 if (!edesc) { 1057 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1058 return -ENOMEM; 1059 } 1060 1061 desc = edesc->hw_desc; 1062 1063 edesc->src_nents = src_nents; 1064 1065 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 1066 edesc->sec4_sg, DMA_TO_DEVICE); 1067 if (ret) 1068 goto unmap_ctx; 1069 1070 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 1071 buf, state->buf_dma, buflen, 1072 last_buflen); 1073 1074 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1075 sec4_sg_src_index, ctx->ctx_len + buflen, 1076 req->nbytes); 1077 if (ret) 1078 goto unmap_ctx; 1079 1080 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1081 digestsize); 1082 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1083 dev_err(jrdev, "unable to map dst\n"); 1084 ret = -ENOMEM; 1085 goto unmap_ctx; 1086 } 1087 1088 #ifdef DEBUG 1089 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1090 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1091 #endif 1092 1093 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1094 if (ret) 1095 goto unmap_ctx; 1096 1097 return -EINPROGRESS; 1098 unmap_ctx: 1099 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1100 kfree(edesc); 1101 return ret; 1102 } 1103 1104 static int ahash_digest(struct ahash_request *req) 1105 { 1106 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1107 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1108 struct device *jrdev = ctx->jrdev; 1109 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1110 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1111 u32 *desc; 1112 int digestsize = crypto_ahash_digestsize(ahash); 1113 int src_nents, mapped_nents; 1114 struct ahash_edesc *edesc; 1115 int ret; 1116 1117 src_nents = sg_nents_for_len(req->src, req->nbytes); 1118 if (src_nents < 0) { 1119 dev_err(jrdev, "Invalid number of src SG.\n"); 1120 return src_nents; 1121 } 1122 1123 if (src_nents) { 1124 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1125 DMA_TO_DEVICE); 1126 if (!mapped_nents) { 1127 dev_err(jrdev, "unable to map source for DMA\n"); 1128 return -ENOMEM; 1129 } 1130 } else { 1131 mapped_nents = 0; 1132 } 1133 1134 /* allocate space for base edesc and hw desc commands, link tables */ 1135 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1136 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1137 flags); 1138 if (!edesc) { 1139 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1140 return -ENOMEM; 1141 } 1142 1143 edesc->src_nents = src_nents; 1144 1145 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1146 req->nbytes); 1147 if (ret) { 1148 ahash_unmap(jrdev, edesc, req, digestsize); 1149 kfree(edesc); 1150 return ret; 1151 } 1152 1153 desc = edesc->hw_desc; 1154 1155 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1156 digestsize); 1157 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1158 dev_err(jrdev, "unable to map dst\n"); 1159 ahash_unmap(jrdev, edesc, req, digestsize); 1160 kfree(edesc); 1161 return -ENOMEM; 1162 } 1163 1164 #ifdef DEBUG 1165 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1166 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1167 #endif 1168 1169 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1170 if (!ret) { 1171 ret = -EINPROGRESS; 1172 } else { 1173 ahash_unmap(jrdev, edesc, req, digestsize); 1174 kfree(edesc); 1175 } 1176 1177 return ret; 1178 } 1179 1180 /* submit ahash final if it the first job descriptor */ 1181 static int ahash_final_no_ctx(struct ahash_request *req) 1182 { 1183 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1184 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1185 struct caam_hash_state *state = ahash_request_ctx(req); 1186 struct device *jrdev = ctx->jrdev; 1187 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1188 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1189 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1190 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1191 u32 *desc; 1192 int digestsize = crypto_ahash_digestsize(ahash); 1193 struct ahash_edesc *edesc; 1194 int ret; 1195 1196 /* allocate space for base edesc and hw desc commands, link tables */ 1197 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1198 ctx->sh_desc_digest_dma, flags); 1199 if (!edesc) 1200 return -ENOMEM; 1201 1202 desc = edesc->hw_desc; 1203 1204 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1205 if (dma_mapping_error(jrdev, state->buf_dma)) { 1206 dev_err(jrdev, "unable to map src\n"); 1207 goto unmap; 1208 } 1209 1210 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1211 1212 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1213 digestsize); 1214 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1215 dev_err(jrdev, "unable to map dst\n"); 1216 goto unmap; 1217 } 1218 edesc->src_nents = 0; 1219 1220 #ifdef DEBUG 1221 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1222 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1223 #endif 1224 1225 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1226 if (!ret) { 1227 ret = -EINPROGRESS; 1228 } else { 1229 ahash_unmap(jrdev, edesc, req, digestsize); 1230 kfree(edesc); 1231 } 1232 1233 return ret; 1234 unmap: 1235 ahash_unmap(jrdev, edesc, req, digestsize); 1236 kfree(edesc); 1237 return -ENOMEM; 1238 1239 } 1240 1241 /* submit ahash update if it the first job descriptor after update */ 1242 static int ahash_update_no_ctx(struct ahash_request *req) 1243 { 1244 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1245 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1246 struct caam_hash_state *state = ahash_request_ctx(req); 1247 struct device *jrdev = ctx->jrdev; 1248 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1249 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1250 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1251 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0; 1252 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1; 1253 int *next_buflen = state->current_buf ? &state->buflen_0 : 1254 &state->buflen_1; 1255 int in_len = *buflen + req->nbytes, to_hash; 1256 int sec4_sg_bytes, src_nents, mapped_nents; 1257 struct ahash_edesc *edesc; 1258 u32 *desc; 1259 int ret = 0; 1260 1261 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); 1262 to_hash = in_len - *next_buflen; 1263 1264 if (to_hash) { 1265 src_nents = sg_nents_for_len(req->src, 1266 req->nbytes - *next_buflen); 1267 if (src_nents < 0) { 1268 dev_err(jrdev, "Invalid number of src SG.\n"); 1269 return src_nents; 1270 } 1271 1272 if (src_nents) { 1273 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1274 DMA_TO_DEVICE); 1275 if (!mapped_nents) { 1276 dev_err(jrdev, "unable to DMA map source\n"); 1277 return -ENOMEM; 1278 } 1279 } else { 1280 mapped_nents = 0; 1281 } 1282 1283 sec4_sg_bytes = (1 + mapped_nents) * 1284 sizeof(struct sec4_sg_entry); 1285 1286 /* 1287 * allocate space for base edesc and hw desc commands, 1288 * link tables 1289 */ 1290 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1291 ctx->sh_desc_update_first, 1292 ctx->sh_desc_update_first_dma, 1293 flags); 1294 if (!edesc) { 1295 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1296 return -ENOMEM; 1297 } 1298 1299 edesc->src_nents = src_nents; 1300 edesc->sec4_sg_bytes = sec4_sg_bytes; 1301 edesc->dst_dma = 0; 1302 1303 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1304 buf, *buflen); 1305 sg_to_sec4_sg_last(req->src, mapped_nents, 1306 edesc->sec4_sg + 1, 0); 1307 1308 if (*next_buflen) { 1309 scatterwalk_map_and_copy(next_buf, req->src, 1310 to_hash - *buflen, 1311 *next_buflen, 0); 1312 } 1313 1314 state->current_buf = !state->current_buf; 1315 1316 desc = edesc->hw_desc; 1317 1318 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1319 sec4_sg_bytes, 1320 DMA_TO_DEVICE); 1321 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1322 dev_err(jrdev, "unable to map S/G table\n"); 1323 ret = -ENOMEM; 1324 goto unmap_ctx; 1325 } 1326 1327 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1328 1329 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1330 if (ret) 1331 goto unmap_ctx; 1332 1333 #ifdef DEBUG 1334 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1335 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1336 desc_bytes(desc), 1); 1337 #endif 1338 1339 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1340 if (ret) 1341 goto unmap_ctx; 1342 1343 ret = -EINPROGRESS; 1344 state->update = ahash_update_ctx; 1345 state->finup = ahash_finup_ctx; 1346 state->final = ahash_final_ctx; 1347 } else if (*next_buflen) { 1348 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1349 req->nbytes, 0); 1350 *buflen = *next_buflen; 1351 *next_buflen = 0; 1352 } 1353 #ifdef DEBUG 1354 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1355 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1356 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1357 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1358 *next_buflen, 1); 1359 #endif 1360 1361 return ret; 1362 unmap_ctx: 1363 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1364 kfree(edesc); 1365 return ret; 1366 } 1367 1368 /* submit ahash finup if it the first job descriptor after update */ 1369 static int ahash_finup_no_ctx(struct ahash_request *req) 1370 { 1371 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1372 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1373 struct caam_hash_state *state = ahash_request_ctx(req); 1374 struct device *jrdev = ctx->jrdev; 1375 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1376 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1377 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0; 1378 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0; 1379 int last_buflen = state->current_buf ? state->buflen_0 : 1380 state->buflen_1; 1381 u32 *desc; 1382 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1383 int digestsize = crypto_ahash_digestsize(ahash); 1384 struct ahash_edesc *edesc; 1385 int ret; 1386 1387 src_nents = sg_nents_for_len(req->src, req->nbytes); 1388 if (src_nents < 0) { 1389 dev_err(jrdev, "Invalid number of src SG.\n"); 1390 return src_nents; 1391 } 1392 1393 if (src_nents) { 1394 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1395 DMA_TO_DEVICE); 1396 if (!mapped_nents) { 1397 dev_err(jrdev, "unable to DMA map source\n"); 1398 return -ENOMEM; 1399 } 1400 } else { 1401 mapped_nents = 0; 1402 } 1403 1404 sec4_sg_src_index = 2; 1405 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1406 sizeof(struct sec4_sg_entry); 1407 1408 /* allocate space for base edesc and hw desc commands, link tables */ 1409 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1410 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1411 flags); 1412 if (!edesc) { 1413 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1414 return -ENOMEM; 1415 } 1416 1417 desc = edesc->hw_desc; 1418 1419 edesc->src_nents = src_nents; 1420 edesc->sec4_sg_bytes = sec4_sg_bytes; 1421 1422 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, 1423 state->buf_dma, buflen, 1424 last_buflen); 1425 1426 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1427 req->nbytes); 1428 if (ret) { 1429 dev_err(jrdev, "unable to map S/G table\n"); 1430 goto unmap; 1431 } 1432 1433 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1434 digestsize); 1435 if (dma_mapping_error(jrdev, edesc->dst_dma)) { 1436 dev_err(jrdev, "unable to map dst\n"); 1437 goto unmap; 1438 } 1439 1440 #ifdef DEBUG 1441 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1442 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1443 #endif 1444 1445 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1446 if (!ret) { 1447 ret = -EINPROGRESS; 1448 } else { 1449 ahash_unmap(jrdev, edesc, req, digestsize); 1450 kfree(edesc); 1451 } 1452 1453 return ret; 1454 unmap: 1455 ahash_unmap(jrdev, edesc, req, digestsize); 1456 kfree(edesc); 1457 return -ENOMEM; 1458 1459 } 1460 1461 /* submit first update job descriptor after init */ 1462 static int ahash_update_first(struct ahash_request *req) 1463 { 1464 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1465 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1466 struct caam_hash_state *state = ahash_request_ctx(req); 1467 struct device *jrdev = ctx->jrdev; 1468 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1469 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1470 u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; 1471 int *next_buflen = state->current_buf ? 1472 &state->buflen_1 : &state->buflen_0; 1473 int to_hash; 1474 u32 *desc; 1475 int src_nents, mapped_nents; 1476 struct ahash_edesc *edesc; 1477 int ret = 0; 1478 1479 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1480 1); 1481 to_hash = req->nbytes - *next_buflen; 1482 1483 if (to_hash) { 1484 src_nents = sg_nents_for_len(req->src, 1485 req->nbytes - *next_buflen); 1486 if (src_nents < 0) { 1487 dev_err(jrdev, "Invalid number of src SG.\n"); 1488 return src_nents; 1489 } 1490 1491 if (src_nents) { 1492 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1493 DMA_TO_DEVICE); 1494 if (!mapped_nents) { 1495 dev_err(jrdev, "unable to map source for DMA\n"); 1496 return -ENOMEM; 1497 } 1498 } else { 1499 mapped_nents = 0; 1500 } 1501 1502 /* 1503 * allocate space for base edesc and hw desc commands, 1504 * link tables 1505 */ 1506 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1507 mapped_nents : 0, 1508 ctx->sh_desc_update_first, 1509 ctx->sh_desc_update_first_dma, 1510 flags); 1511 if (!edesc) { 1512 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1513 return -ENOMEM; 1514 } 1515 1516 edesc->src_nents = src_nents; 1517 edesc->dst_dma = 0; 1518 1519 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1520 to_hash); 1521 if (ret) 1522 goto unmap_ctx; 1523 1524 if (*next_buflen) 1525 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1526 *next_buflen, 0); 1527 1528 desc = edesc->hw_desc; 1529 1530 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1531 if (ret) 1532 goto unmap_ctx; 1533 1534 #ifdef DEBUG 1535 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1536 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1537 desc_bytes(desc), 1); 1538 #endif 1539 1540 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1541 if (ret) 1542 goto unmap_ctx; 1543 1544 ret = -EINPROGRESS; 1545 state->update = ahash_update_ctx; 1546 state->finup = ahash_finup_ctx; 1547 state->final = ahash_final_ctx; 1548 } else if (*next_buflen) { 1549 state->update = ahash_update_no_ctx; 1550 state->finup = ahash_finup_no_ctx; 1551 state->final = ahash_final_no_ctx; 1552 scatterwalk_map_and_copy(next_buf, req->src, 0, 1553 req->nbytes, 0); 1554 } 1555 #ifdef DEBUG 1556 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1557 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1558 *next_buflen, 1); 1559 #endif 1560 1561 return ret; 1562 unmap_ctx: 1563 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1564 kfree(edesc); 1565 return ret; 1566 } 1567 1568 static int ahash_finup_first(struct ahash_request *req) 1569 { 1570 return ahash_digest(req); 1571 } 1572 1573 static int ahash_init(struct ahash_request *req) 1574 { 1575 struct caam_hash_state *state = ahash_request_ctx(req); 1576 1577 state->update = ahash_update_first; 1578 state->finup = ahash_finup_first; 1579 state->final = ahash_final_no_ctx; 1580 1581 state->current_buf = 0; 1582 state->buf_dma = 0; 1583 state->buflen_0 = 0; 1584 state->buflen_1 = 0; 1585 1586 return 0; 1587 } 1588 1589 static int ahash_update(struct ahash_request *req) 1590 { 1591 struct caam_hash_state *state = ahash_request_ctx(req); 1592 1593 return state->update(req); 1594 } 1595 1596 static int ahash_finup(struct ahash_request *req) 1597 { 1598 struct caam_hash_state *state = ahash_request_ctx(req); 1599 1600 return state->finup(req); 1601 } 1602 1603 static int ahash_final(struct ahash_request *req) 1604 { 1605 struct caam_hash_state *state = ahash_request_ctx(req); 1606 1607 return state->final(req); 1608 } 1609 1610 static int ahash_export(struct ahash_request *req, void *out) 1611 { 1612 struct caam_hash_state *state = ahash_request_ctx(req); 1613 struct caam_export_state *export = out; 1614 int len; 1615 u8 *buf; 1616 1617 if (state->current_buf) { 1618 buf = state->buf_1; 1619 len = state->buflen_1; 1620 } else { 1621 buf = state->buf_0; 1622 len = state->buflen_0; 1623 } 1624 1625 memcpy(export->buf, buf, len); 1626 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1627 export->buflen = len; 1628 export->update = state->update; 1629 export->final = state->final; 1630 export->finup = state->finup; 1631 1632 return 0; 1633 } 1634 1635 static int ahash_import(struct ahash_request *req, const void *in) 1636 { 1637 struct caam_hash_state *state = ahash_request_ctx(req); 1638 const struct caam_export_state *export = in; 1639 1640 memset(state, 0, sizeof(*state)); 1641 memcpy(state->buf_0, export->buf, export->buflen); 1642 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1643 state->buflen_0 = export->buflen; 1644 state->update = export->update; 1645 state->final = export->final; 1646 state->finup = export->finup; 1647 1648 return 0; 1649 } 1650 1651 struct caam_hash_template { 1652 char name[CRYPTO_MAX_ALG_NAME]; 1653 char driver_name[CRYPTO_MAX_ALG_NAME]; 1654 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1655 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1656 unsigned int blocksize; 1657 struct ahash_alg template_ahash; 1658 u32 alg_type; 1659 u32 alg_op; 1660 }; 1661 1662 /* ahash descriptors */ 1663 static struct caam_hash_template driver_hash[] = { 1664 { 1665 .name = "sha1", 1666 .driver_name = "sha1-caam", 1667 .hmac_name = "hmac(sha1)", 1668 .hmac_driver_name = "hmac-sha1-caam", 1669 .blocksize = SHA1_BLOCK_SIZE, 1670 .template_ahash = { 1671 .init = ahash_init, 1672 .update = ahash_update, 1673 .final = ahash_final, 1674 .finup = ahash_finup, 1675 .digest = ahash_digest, 1676 .export = ahash_export, 1677 .import = ahash_import, 1678 .setkey = ahash_setkey, 1679 .halg = { 1680 .digestsize = SHA1_DIGEST_SIZE, 1681 .statesize = sizeof(struct caam_export_state), 1682 }, 1683 }, 1684 .alg_type = OP_ALG_ALGSEL_SHA1, 1685 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC, 1686 }, { 1687 .name = "sha224", 1688 .driver_name = "sha224-caam", 1689 .hmac_name = "hmac(sha224)", 1690 .hmac_driver_name = "hmac-sha224-caam", 1691 .blocksize = SHA224_BLOCK_SIZE, 1692 .template_ahash = { 1693 .init = ahash_init, 1694 .update = ahash_update, 1695 .final = ahash_final, 1696 .finup = ahash_finup, 1697 .digest = ahash_digest, 1698 .export = ahash_export, 1699 .import = ahash_import, 1700 .setkey = ahash_setkey, 1701 .halg = { 1702 .digestsize = SHA224_DIGEST_SIZE, 1703 .statesize = sizeof(struct caam_export_state), 1704 }, 1705 }, 1706 .alg_type = OP_ALG_ALGSEL_SHA224, 1707 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC, 1708 }, { 1709 .name = "sha256", 1710 .driver_name = "sha256-caam", 1711 .hmac_name = "hmac(sha256)", 1712 .hmac_driver_name = "hmac-sha256-caam", 1713 .blocksize = SHA256_BLOCK_SIZE, 1714 .template_ahash = { 1715 .init = ahash_init, 1716 .update = ahash_update, 1717 .final = ahash_final, 1718 .finup = ahash_finup, 1719 .digest = ahash_digest, 1720 .export = ahash_export, 1721 .import = ahash_import, 1722 .setkey = ahash_setkey, 1723 .halg = { 1724 .digestsize = SHA256_DIGEST_SIZE, 1725 .statesize = sizeof(struct caam_export_state), 1726 }, 1727 }, 1728 .alg_type = OP_ALG_ALGSEL_SHA256, 1729 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC, 1730 }, { 1731 .name = "sha384", 1732 .driver_name = "sha384-caam", 1733 .hmac_name = "hmac(sha384)", 1734 .hmac_driver_name = "hmac-sha384-caam", 1735 .blocksize = SHA384_BLOCK_SIZE, 1736 .template_ahash = { 1737 .init = ahash_init, 1738 .update = ahash_update, 1739 .final = ahash_final, 1740 .finup = ahash_finup, 1741 .digest = ahash_digest, 1742 .export = ahash_export, 1743 .import = ahash_import, 1744 .setkey = ahash_setkey, 1745 .halg = { 1746 .digestsize = SHA384_DIGEST_SIZE, 1747 .statesize = sizeof(struct caam_export_state), 1748 }, 1749 }, 1750 .alg_type = OP_ALG_ALGSEL_SHA384, 1751 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC, 1752 }, { 1753 .name = "sha512", 1754 .driver_name = "sha512-caam", 1755 .hmac_name = "hmac(sha512)", 1756 .hmac_driver_name = "hmac-sha512-caam", 1757 .blocksize = SHA512_BLOCK_SIZE, 1758 .template_ahash = { 1759 .init = ahash_init, 1760 .update = ahash_update, 1761 .final = ahash_final, 1762 .finup = ahash_finup, 1763 .digest = ahash_digest, 1764 .export = ahash_export, 1765 .import = ahash_import, 1766 .setkey = ahash_setkey, 1767 .halg = { 1768 .digestsize = SHA512_DIGEST_SIZE, 1769 .statesize = sizeof(struct caam_export_state), 1770 }, 1771 }, 1772 .alg_type = OP_ALG_ALGSEL_SHA512, 1773 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, 1774 }, { 1775 .name = "md5", 1776 .driver_name = "md5-caam", 1777 .hmac_name = "hmac(md5)", 1778 .hmac_driver_name = "hmac-md5-caam", 1779 .blocksize = MD5_BLOCK_WORDS * 4, 1780 .template_ahash = { 1781 .init = ahash_init, 1782 .update = ahash_update, 1783 .final = ahash_final, 1784 .finup = ahash_finup, 1785 .digest = ahash_digest, 1786 .export = ahash_export, 1787 .import = ahash_import, 1788 .setkey = ahash_setkey, 1789 .halg = { 1790 .digestsize = MD5_DIGEST_SIZE, 1791 .statesize = sizeof(struct caam_export_state), 1792 }, 1793 }, 1794 .alg_type = OP_ALG_ALGSEL_MD5, 1795 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC, 1796 }, 1797 }; 1798 1799 struct caam_hash_alg { 1800 struct list_head entry; 1801 int alg_type; 1802 int alg_op; 1803 struct ahash_alg ahash_alg; 1804 }; 1805 1806 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1807 { 1808 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1809 struct crypto_alg *base = tfm->__crt_alg; 1810 struct hash_alg_common *halg = 1811 container_of(base, struct hash_alg_common, base); 1812 struct ahash_alg *alg = 1813 container_of(halg, struct ahash_alg, halg); 1814 struct caam_hash_alg *caam_hash = 1815 container_of(alg, struct caam_hash_alg, ahash_alg); 1816 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1817 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1818 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1819 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1820 HASH_MSG_LEN + 32, 1821 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1822 HASH_MSG_LEN + 64, 1823 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1824 1825 /* 1826 * Get a Job ring from Job Ring driver to ensure in-order 1827 * crypto request processing per tfm 1828 */ 1829 ctx->jrdev = caam_jr_alloc(); 1830 if (IS_ERR(ctx->jrdev)) { 1831 pr_err("Job Ring Device allocation for transform failed\n"); 1832 return PTR_ERR(ctx->jrdev); 1833 } 1834 /* copy descriptor header template value */ 1835 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1836 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; 1837 1838 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> 1839 OP_ALG_ALGSEL_SHIFT]; 1840 1841 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1842 sizeof(struct caam_hash_state)); 1843 return ahash_set_sh_desc(ahash); 1844 } 1845 1846 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1847 { 1848 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1849 1850 if (ctx->sh_desc_update_dma && 1851 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) 1852 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, 1853 desc_bytes(ctx->sh_desc_update), 1854 DMA_TO_DEVICE); 1855 if (ctx->sh_desc_update_first_dma && 1856 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) 1857 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, 1858 desc_bytes(ctx->sh_desc_update_first), 1859 DMA_TO_DEVICE); 1860 if (ctx->sh_desc_fin_dma && 1861 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) 1862 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, 1863 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); 1864 if (ctx->sh_desc_digest_dma && 1865 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) 1866 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, 1867 desc_bytes(ctx->sh_desc_digest), 1868 DMA_TO_DEVICE); 1869 1870 caam_jr_free(ctx->jrdev); 1871 } 1872 1873 static void __exit caam_algapi_hash_exit(void) 1874 { 1875 struct caam_hash_alg *t_alg, *n; 1876 1877 if (!hash_list.next) 1878 return; 1879 1880 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1881 crypto_unregister_ahash(&t_alg->ahash_alg); 1882 list_del(&t_alg->entry); 1883 kfree(t_alg); 1884 } 1885 } 1886 1887 static struct caam_hash_alg * 1888 caam_hash_alloc(struct caam_hash_template *template, 1889 bool keyed) 1890 { 1891 struct caam_hash_alg *t_alg; 1892 struct ahash_alg *halg; 1893 struct crypto_alg *alg; 1894 1895 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1896 if (!t_alg) { 1897 pr_err("failed to allocate t_alg\n"); 1898 return ERR_PTR(-ENOMEM); 1899 } 1900 1901 t_alg->ahash_alg = template->template_ahash; 1902 halg = &t_alg->ahash_alg; 1903 alg = &halg->halg.base; 1904 1905 if (keyed) { 1906 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1907 template->hmac_name); 1908 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1909 template->hmac_driver_name); 1910 } else { 1911 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1912 template->name); 1913 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1914 template->driver_name); 1915 t_alg->ahash_alg.setkey = NULL; 1916 } 1917 alg->cra_module = THIS_MODULE; 1918 alg->cra_init = caam_hash_cra_init; 1919 alg->cra_exit = caam_hash_cra_exit; 1920 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1921 alg->cra_priority = CAAM_CRA_PRIORITY; 1922 alg->cra_blocksize = template->blocksize; 1923 alg->cra_alignmask = 0; 1924 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH; 1925 alg->cra_type = &crypto_ahash_type; 1926 1927 t_alg->alg_type = template->alg_type; 1928 t_alg->alg_op = template->alg_op; 1929 1930 return t_alg; 1931 } 1932 1933 static int __init caam_algapi_hash_init(void) 1934 { 1935 struct device_node *dev_node; 1936 struct platform_device *pdev; 1937 struct device *ctrldev; 1938 int i = 0, err = 0; 1939 struct caam_drv_private *priv; 1940 unsigned int md_limit = SHA512_DIGEST_SIZE; 1941 u32 cha_inst, cha_vid; 1942 1943 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1944 if (!dev_node) { 1945 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1946 if (!dev_node) 1947 return -ENODEV; 1948 } 1949 1950 pdev = of_find_device_by_node(dev_node); 1951 if (!pdev) { 1952 of_node_put(dev_node); 1953 return -ENODEV; 1954 } 1955 1956 ctrldev = &pdev->dev; 1957 priv = dev_get_drvdata(ctrldev); 1958 of_node_put(dev_node); 1959 1960 /* 1961 * If priv is NULL, it's probably because the caam driver wasn't 1962 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 1963 */ 1964 if (!priv) 1965 return -ENODEV; 1966 1967 /* 1968 * Register crypto algorithms the device supports. First, identify 1969 * presence and attributes of MD block. 1970 */ 1971 cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); 1972 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); 1973 1974 /* 1975 * Skip registration of any hashing algorithms if MD block 1976 * is not present. 1977 */ 1978 if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) 1979 return -ENODEV; 1980 1981 /* Limit digest size based on LP256 */ 1982 if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) 1983 md_limit = SHA256_DIGEST_SIZE; 1984 1985 INIT_LIST_HEAD(&hash_list); 1986 1987 /* register crypto algorithms the device supports */ 1988 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 1989 struct caam_hash_alg *t_alg; 1990 struct caam_hash_template *alg = driver_hash + i; 1991 1992 /* If MD size is not supported by device, skip registration */ 1993 if (alg->template_ahash.halg.digestsize > md_limit) 1994 continue; 1995 1996 /* register hmac version */ 1997 t_alg = caam_hash_alloc(alg, true); 1998 if (IS_ERR(t_alg)) { 1999 err = PTR_ERR(t_alg); 2000 pr_warn("%s alg allocation failed\n", alg->driver_name); 2001 continue; 2002 } 2003 2004 err = crypto_register_ahash(&t_alg->ahash_alg); 2005 if (err) { 2006 pr_warn("%s alg registration failed: %d\n", 2007 t_alg->ahash_alg.halg.base.cra_driver_name, 2008 err); 2009 kfree(t_alg); 2010 } else 2011 list_add_tail(&t_alg->entry, &hash_list); 2012 2013 /* register unkeyed version */ 2014 t_alg = caam_hash_alloc(alg, false); 2015 if (IS_ERR(t_alg)) { 2016 err = PTR_ERR(t_alg); 2017 pr_warn("%s alg allocation failed\n", alg->driver_name); 2018 continue; 2019 } 2020 2021 err = crypto_register_ahash(&t_alg->ahash_alg); 2022 if (err) { 2023 pr_warn("%s alg registration failed: %d\n", 2024 t_alg->ahash_alg.halg.base.cra_driver_name, 2025 err); 2026 kfree(t_alg); 2027 } else 2028 list_add_tail(&t_alg->entry, &hash_list); 2029 } 2030 2031 return err; 2032 } 2033 2034 module_init(caam_algapi_hash_init); 2035 module_exit(caam_algapi_hash_exit); 2036 2037 MODULE_LICENSE("GPL"); 2038 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 2039 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 2040