1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * caam - Freescale FSL CAAM support for ahash functions of crypto API 4 * 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * Copyright 2018-2019 NXP 7 * 8 * Based on caamalg.c crypto API driver. 9 * 10 * relationship of digest job descriptor or first job descriptor after init to 11 * shared descriptors: 12 * 13 * --------------- --------------- 14 * | JobDesc #1 |-------------------->| ShareDesc | 15 * | *(packet 1) | | (hashKey) | 16 * --------------- | (operation) | 17 * --------------- 18 * 19 * relationship of subsequent job descriptors to shared descriptors: 20 * 21 * --------------- --------------- 22 * | JobDesc #2 |-------------------->| ShareDesc | 23 * | *(packet 2) | |------------->| (hashKey) | 24 * --------------- | |-------->| (operation) | 25 * . | | | (load ctx2) | 26 * . | | --------------- 27 * --------------- | | 28 * | JobDesc #3 |------| | 29 * | *(packet 3) | | 30 * --------------- | 31 * . | 32 * . | 33 * --------------- | 34 * | JobDesc #4 |------------ 35 * | *(packet 4) | 36 * --------------- 37 * 38 * The SharedDesc never changes for a connection unless rekeyed, but 39 * each packet will likely be in a different place. So all we need 40 * to know to process the packet is where the input is, where the 41 * output goes, and what context we want to process with. Context is 42 * in the SharedDesc, packet references in the JobDesc. 43 * 44 * So, a job desc looks like: 45 * 46 * --------------------- 47 * | Header | 48 * | ShareDesc Pointer | 49 * | SEQ_OUT_PTR | 50 * | (output buffer) | 51 * | (output length) | 52 * | SEQ_IN_PTR | 53 * | (input buffer) | 54 * | (input length) | 55 * --------------------- 56 */ 57 58 #include "compat.h" 59 60 #include "regs.h" 61 #include "intern.h" 62 #include "desc_constr.h" 63 #include "jr.h" 64 #include "error.h" 65 #include "sg_sw_sec4.h" 66 #include "key_gen.h" 67 #include "caamhash_desc.h" 68 69 #define CAAM_CRA_PRIORITY 3000 70 71 /* max hash key is max split key size */ 72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 73 74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 76 77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 78 CAAM_MAX_HASH_KEY_SIZE) 79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 80 81 /* caam context sizes for hashes: running digest + 8 */ 82 #define HASH_MSG_LEN 8 83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 84 85 #ifdef DEBUG 86 /* for print_hex_dumps with line references */ 87 #define debug(format, arg...) printk(format, arg) 88 #else 89 #define debug(format, arg...) 90 #endif 91 92 93 static struct list_head hash_list; 94 95 /* ahash per-session context */ 96 struct caam_hash_ctx { 97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 101 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; 102 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 103 dma_addr_t sh_desc_update_first_dma; 104 dma_addr_t sh_desc_fin_dma; 105 dma_addr_t sh_desc_digest_dma; 106 dma_addr_t key_dma; 107 enum dma_data_direction dir; 108 struct device *jrdev; 109 int ctx_len; 110 struct alginfo adata; 111 }; 112 113 /* ahash state */ 114 struct caam_hash_state { 115 dma_addr_t buf_dma; 116 dma_addr_t ctx_dma; 117 int ctx_dma_len; 118 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 119 int buflen_0; 120 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 121 int buflen_1; 122 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 123 int (*update)(struct ahash_request *req); 124 int (*final)(struct ahash_request *req); 125 int (*finup)(struct ahash_request *req); 126 int current_buf; 127 }; 128 129 struct caam_export_state { 130 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 131 u8 caam_ctx[MAX_CTX_LEN]; 132 int buflen; 133 int (*update)(struct ahash_request *req); 134 int (*final)(struct ahash_request *req); 135 int (*finup)(struct ahash_request *req); 136 }; 137 138 static inline void switch_buf(struct caam_hash_state *state) 139 { 140 state->current_buf ^= 1; 141 } 142 143 static inline u8 *current_buf(struct caam_hash_state *state) 144 { 145 return state->current_buf ? state->buf_1 : state->buf_0; 146 } 147 148 static inline u8 *alt_buf(struct caam_hash_state *state) 149 { 150 return state->current_buf ? state->buf_0 : state->buf_1; 151 } 152 153 static inline int *current_buflen(struct caam_hash_state *state) 154 { 155 return state->current_buf ? &state->buflen_1 : &state->buflen_0; 156 } 157 158 static inline int *alt_buflen(struct caam_hash_state *state) 159 { 160 return state->current_buf ? &state->buflen_0 : &state->buflen_1; 161 } 162 163 static inline bool is_cmac_aes(u32 algtype) 164 { 165 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == 166 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); 167 } 168 /* Common job descriptor seq in/out ptr routines */ 169 170 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 171 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 172 struct caam_hash_state *state, 173 int ctx_len) 174 { 175 state->ctx_dma_len = ctx_len; 176 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 177 ctx_len, DMA_FROM_DEVICE); 178 if (dma_mapping_error(jrdev, state->ctx_dma)) { 179 dev_err(jrdev, "unable to map ctx\n"); 180 state->ctx_dma = 0; 181 return -ENOMEM; 182 } 183 184 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 185 186 return 0; 187 } 188 189 /* Map current buffer in state (if length > 0) and put it in link table */ 190 static inline int buf_map_to_sec4_sg(struct device *jrdev, 191 struct sec4_sg_entry *sec4_sg, 192 struct caam_hash_state *state) 193 { 194 int buflen = *current_buflen(state); 195 196 if (!buflen) 197 return 0; 198 199 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, 200 DMA_TO_DEVICE); 201 if (dma_mapping_error(jrdev, state->buf_dma)) { 202 dev_err(jrdev, "unable to map buf\n"); 203 state->buf_dma = 0; 204 return -ENOMEM; 205 } 206 207 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 208 209 return 0; 210 } 211 212 /* Map state->caam_ctx, and add it to link table */ 213 static inline int ctx_map_to_sec4_sg(struct device *jrdev, 214 struct caam_hash_state *state, int ctx_len, 215 struct sec4_sg_entry *sec4_sg, u32 flag) 216 { 217 state->ctx_dma_len = ctx_len; 218 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 219 if (dma_mapping_error(jrdev, state->ctx_dma)) { 220 dev_err(jrdev, "unable to map ctx\n"); 221 state->ctx_dma = 0; 222 return -ENOMEM; 223 } 224 225 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 226 227 return 0; 228 } 229 230 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 231 { 232 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 233 int digestsize = crypto_ahash_digestsize(ahash); 234 struct device *jrdev = ctx->jrdev; 235 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 236 u32 *desc; 237 238 ctx->adata.key_virt = ctx->key; 239 240 /* ahash_update shared descriptor */ 241 desc = ctx->sh_desc_update; 242 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 243 ctx->ctx_len, true, ctrlpriv->era); 244 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 245 desc_bytes(desc), ctx->dir); 246 #ifdef DEBUG 247 print_hex_dump(KERN_ERR, 248 "ahash update shdesc@"__stringify(__LINE__)": ", 249 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 250 #endif 251 252 /* ahash_update_first shared descriptor */ 253 desc = ctx->sh_desc_update_first; 254 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 255 ctx->ctx_len, false, ctrlpriv->era); 256 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 257 desc_bytes(desc), ctx->dir); 258 #ifdef DEBUG 259 print_hex_dump(KERN_ERR, 260 "ahash update first shdesc@"__stringify(__LINE__)": ", 261 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 262 #endif 263 264 /* ahash_final shared descriptor */ 265 desc = ctx->sh_desc_fin; 266 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 267 ctx->ctx_len, true, ctrlpriv->era); 268 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 269 desc_bytes(desc), ctx->dir); 270 #ifdef DEBUG 271 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 272 DUMP_PREFIX_ADDRESS, 16, 4, desc, 273 desc_bytes(desc), 1); 274 #endif 275 276 /* ahash_digest shared descriptor */ 277 desc = ctx->sh_desc_digest; 278 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 279 ctx->ctx_len, false, ctrlpriv->era); 280 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 281 desc_bytes(desc), ctx->dir); 282 #ifdef DEBUG 283 print_hex_dump(KERN_ERR, 284 "ahash digest shdesc@"__stringify(__LINE__)": ", 285 DUMP_PREFIX_ADDRESS, 16, 4, desc, 286 desc_bytes(desc), 1); 287 #endif 288 289 return 0; 290 } 291 292 static int axcbc_set_sh_desc(struct crypto_ahash *ahash) 293 { 294 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 295 int digestsize = crypto_ahash_digestsize(ahash); 296 struct device *jrdev = ctx->jrdev; 297 u32 *desc; 298 299 /* key is loaded from memory for UPDATE and FINALIZE states */ 300 ctx->adata.key_dma = ctx->key_dma; 301 302 /* shared descriptor for ahash_update */ 303 desc = ctx->sh_desc_update; 304 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 305 ctx->ctx_len, ctx->ctx_len, 0); 306 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 307 desc_bytes(desc), ctx->dir); 308 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", 309 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 310 1); 311 312 /* shared descriptor for ahash_{final,finup} */ 313 desc = ctx->sh_desc_fin; 314 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 315 digestsize, ctx->ctx_len, 0); 316 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 317 desc_bytes(desc), ctx->dir); 318 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", 319 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 320 1); 321 322 /* key is immediate data for INIT and INITFINAL states */ 323 ctx->adata.key_virt = ctx->key; 324 325 /* shared descriptor for first invocation of ahash_update */ 326 desc = ctx->sh_desc_update_first; 327 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 328 ctx->ctx_len, ctx->key_dma); 329 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 330 desc_bytes(desc), ctx->dir); 331 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", 332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 333 1); 334 335 /* shared descriptor for ahash_digest */ 336 desc = ctx->sh_desc_digest; 337 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 338 digestsize, ctx->ctx_len, 0); 339 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 340 desc_bytes(desc), ctx->dir); 341 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", 342 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 343 1); 344 return 0; 345 } 346 347 static int acmac_set_sh_desc(struct crypto_ahash *ahash) 348 { 349 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 350 int digestsize = crypto_ahash_digestsize(ahash); 351 struct device *jrdev = ctx->jrdev; 352 u32 *desc; 353 354 /* shared descriptor for ahash_update */ 355 desc = ctx->sh_desc_update; 356 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 357 ctx->ctx_len, ctx->ctx_len, 0); 358 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 359 desc_bytes(desc), ctx->dir); 360 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", 361 DUMP_PREFIX_ADDRESS, 16, 4, desc, 362 desc_bytes(desc), 1); 363 364 /* shared descriptor for ahash_{final,finup} */ 365 desc = ctx->sh_desc_fin; 366 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 367 digestsize, ctx->ctx_len, 0); 368 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 369 desc_bytes(desc), ctx->dir); 370 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", 371 DUMP_PREFIX_ADDRESS, 16, 4, desc, 372 desc_bytes(desc), 1); 373 374 /* shared descriptor for first invocation of ahash_update */ 375 desc = ctx->sh_desc_update_first; 376 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 377 ctx->ctx_len, 0); 378 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 379 desc_bytes(desc), ctx->dir); 380 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", 381 DUMP_PREFIX_ADDRESS, 16, 4, desc, 382 desc_bytes(desc), 1); 383 384 /* shared descriptor for ahash_digest */ 385 desc = ctx->sh_desc_digest; 386 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 387 digestsize, ctx->ctx_len, 0); 388 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 389 desc_bytes(desc), ctx->dir); 390 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", 391 DUMP_PREFIX_ADDRESS, 16, 4, desc, 392 desc_bytes(desc), 1); 393 394 return 0; 395 } 396 397 /* Digest hash size if it is too large */ 398 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, 399 u32 *keylen, u8 *key_out, u32 digestsize) 400 { 401 struct device *jrdev = ctx->jrdev; 402 u32 *desc; 403 struct split_key_result result; 404 dma_addr_t src_dma, dst_dma; 405 int ret; 406 407 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 408 if (!desc) { 409 dev_err(jrdev, "unable to allocate key input memory\n"); 410 return -ENOMEM; 411 } 412 413 init_job_desc(desc, 0); 414 415 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, 416 DMA_TO_DEVICE); 417 if (dma_mapping_error(jrdev, src_dma)) { 418 dev_err(jrdev, "unable to map key input memory\n"); 419 kfree(desc); 420 return -ENOMEM; 421 } 422 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, 423 DMA_FROM_DEVICE); 424 if (dma_mapping_error(jrdev, dst_dma)) { 425 dev_err(jrdev, "unable to map key output memory\n"); 426 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 427 kfree(desc); 428 return -ENOMEM; 429 } 430 431 /* Job descriptor to perform unkeyed hash on key_in */ 432 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 433 OP_ALG_AS_INITFINAL); 434 append_seq_in_ptr(desc, src_dma, *keylen, 0); 435 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 436 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 437 append_seq_out_ptr(desc, dst_dma, digestsize, 0); 438 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 439 LDST_SRCDST_BYTE_CONTEXT); 440 441 #ifdef DEBUG 442 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 443 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); 444 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 445 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 446 #endif 447 448 result.err = 0; 449 init_completion(&result.completion); 450 451 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 452 if (!ret) { 453 /* in progress */ 454 wait_for_completion(&result.completion); 455 ret = result.err; 456 #ifdef DEBUG 457 print_hex_dump(KERN_ERR, 458 "digested key@"__stringify(__LINE__)": ", 459 DUMP_PREFIX_ADDRESS, 16, 4, key_in, 460 digestsize, 1); 461 #endif 462 } 463 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); 464 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); 465 466 *keylen = digestsize; 467 468 kfree(desc); 469 470 return ret; 471 } 472 473 static int ahash_setkey(struct crypto_ahash *ahash, 474 const u8 *key, unsigned int keylen) 475 { 476 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 477 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 478 int digestsize = crypto_ahash_digestsize(ahash); 479 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 480 int ret; 481 u8 *hashed_key = NULL; 482 483 #ifdef DEBUG 484 printk(KERN_ERR "keylen %d\n", keylen); 485 #endif 486 487 if (keylen > blocksize) { 488 hashed_key = kmalloc_array(digestsize, 489 sizeof(*hashed_key), 490 GFP_KERNEL | GFP_DMA); 491 if (!hashed_key) 492 return -ENOMEM; 493 ret = hash_digest_key(ctx, key, &keylen, hashed_key, 494 digestsize); 495 if (ret) 496 goto bad_free_key; 497 key = hashed_key; 498 } 499 500 /* 501 * If DKP is supported, use it in the shared descriptor to generate 502 * the split key. 503 */ 504 if (ctrlpriv->era >= 6) { 505 ctx->adata.key_inline = true; 506 ctx->adata.keylen = keylen; 507 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 508 OP_ALG_ALGSEL_MASK); 509 510 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 511 goto bad_free_key; 512 513 memcpy(ctx->key, key, keylen); 514 } else { 515 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 516 keylen, CAAM_MAX_HASH_KEY_SIZE); 517 if (ret) 518 goto bad_free_key; 519 } 520 521 kfree(hashed_key); 522 return ahash_set_sh_desc(ahash); 523 bad_free_key: 524 kfree(hashed_key); 525 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 526 return -EINVAL; 527 } 528 529 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, 530 unsigned int keylen) 531 { 532 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 533 struct device *jrdev = ctx->jrdev; 534 535 memcpy(ctx->key, key, keylen); 536 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 537 ctx->adata.keylen = keylen; 538 539 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", 540 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); 541 542 return axcbc_set_sh_desc(ahash); 543 } 544 545 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, 546 unsigned int keylen) 547 { 548 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 549 550 /* key is immediate data for all cmac shared descriptors */ 551 ctx->adata.key_virt = key; 552 ctx->adata.keylen = keylen; 553 554 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", 555 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 556 557 return acmac_set_sh_desc(ahash); 558 } 559 560 /* 561 * ahash_edesc - s/w-extended ahash descriptor 562 * @sec4_sg_dma: physical mapped address of h/w link table 563 * @src_nents: number of segments in input scatterlist 564 * @sec4_sg_bytes: length of dma mapped sec4_sg space 565 * @hw_desc: the h/w job descriptor followed by any referenced link tables 566 * @sec4_sg: h/w link table 567 */ 568 struct ahash_edesc { 569 dma_addr_t sec4_sg_dma; 570 int src_nents; 571 int sec4_sg_bytes; 572 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 573 struct sec4_sg_entry sec4_sg[0]; 574 }; 575 576 static inline void ahash_unmap(struct device *dev, 577 struct ahash_edesc *edesc, 578 struct ahash_request *req, int dst_len) 579 { 580 struct caam_hash_state *state = ahash_request_ctx(req); 581 582 if (edesc->src_nents) 583 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 584 585 if (edesc->sec4_sg_bytes) 586 dma_unmap_single(dev, edesc->sec4_sg_dma, 587 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 588 589 if (state->buf_dma) { 590 dma_unmap_single(dev, state->buf_dma, *current_buflen(state), 591 DMA_TO_DEVICE); 592 state->buf_dma = 0; 593 } 594 } 595 596 static inline void ahash_unmap_ctx(struct device *dev, 597 struct ahash_edesc *edesc, 598 struct ahash_request *req, int dst_len, u32 flag) 599 { 600 struct caam_hash_state *state = ahash_request_ctx(req); 601 602 if (state->ctx_dma) { 603 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); 604 state->ctx_dma = 0; 605 } 606 ahash_unmap(dev, edesc, req, dst_len); 607 } 608 609 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 610 void *context) 611 { 612 struct ahash_request *req = context; 613 struct ahash_edesc *edesc; 614 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 615 int digestsize = crypto_ahash_digestsize(ahash); 616 struct caam_hash_state *state = ahash_request_ctx(req); 617 #ifdef DEBUG 618 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 619 620 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 621 #endif 622 623 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 624 if (err) 625 caam_jr_strstatus(jrdev, err); 626 627 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 628 memcpy(req->result, state->caam_ctx, digestsize); 629 kfree(edesc); 630 631 #ifdef DEBUG 632 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 633 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 634 ctx->ctx_len, 1); 635 #endif 636 637 req->base.complete(&req->base, err); 638 } 639 640 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 641 void *context) 642 { 643 struct ahash_request *req = context; 644 struct ahash_edesc *edesc; 645 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 646 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 647 struct caam_hash_state *state = ahash_request_ctx(req); 648 #ifdef DEBUG 649 int digestsize = crypto_ahash_digestsize(ahash); 650 651 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 652 #endif 653 654 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 655 if (err) 656 caam_jr_strstatus(jrdev, err); 657 658 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 659 switch_buf(state); 660 kfree(edesc); 661 662 #ifdef DEBUG 663 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 664 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 665 ctx->ctx_len, 1); 666 if (req->result) 667 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 668 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 669 digestsize, 1); 670 #endif 671 672 req->base.complete(&req->base, err); 673 } 674 675 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 676 void *context) 677 { 678 struct ahash_request *req = context; 679 struct ahash_edesc *edesc; 680 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 681 int digestsize = crypto_ahash_digestsize(ahash); 682 struct caam_hash_state *state = ahash_request_ctx(req); 683 #ifdef DEBUG 684 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 685 686 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 687 #endif 688 689 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 690 if (err) 691 caam_jr_strstatus(jrdev, err); 692 693 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 694 memcpy(req->result, state->caam_ctx, digestsize); 695 kfree(edesc); 696 697 #ifdef DEBUG 698 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 699 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 700 ctx->ctx_len, 1); 701 #endif 702 703 req->base.complete(&req->base, err); 704 } 705 706 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 707 void *context) 708 { 709 struct ahash_request *req = context; 710 struct ahash_edesc *edesc; 711 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 712 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 713 struct caam_hash_state *state = ahash_request_ctx(req); 714 #ifdef DEBUG 715 int digestsize = crypto_ahash_digestsize(ahash); 716 717 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 718 #endif 719 720 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 721 if (err) 722 caam_jr_strstatus(jrdev, err); 723 724 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 725 switch_buf(state); 726 kfree(edesc); 727 728 #ifdef DEBUG 729 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 730 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 731 ctx->ctx_len, 1); 732 if (req->result) 733 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 734 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 735 digestsize, 1); 736 #endif 737 738 req->base.complete(&req->base, err); 739 } 740 741 /* 742 * Allocate an enhanced descriptor, which contains the hardware descriptor 743 * and space for hardware scatter table containing sg_num entries. 744 */ 745 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 746 int sg_num, u32 *sh_desc, 747 dma_addr_t sh_desc_dma, 748 gfp_t flags) 749 { 750 struct ahash_edesc *edesc; 751 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 752 753 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 754 if (!edesc) { 755 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 756 return NULL; 757 } 758 759 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 760 HDR_SHARE_DEFER | HDR_REVERSE); 761 762 return edesc; 763 } 764 765 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 766 struct ahash_edesc *edesc, 767 struct ahash_request *req, int nents, 768 unsigned int first_sg, 769 unsigned int first_bytes, size_t to_hash) 770 { 771 dma_addr_t src_dma; 772 u32 options; 773 774 if (nents > 1 || first_sg) { 775 struct sec4_sg_entry *sg = edesc->sec4_sg; 776 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 777 778 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 779 780 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 781 if (dma_mapping_error(ctx->jrdev, src_dma)) { 782 dev_err(ctx->jrdev, "unable to map S/G table\n"); 783 return -ENOMEM; 784 } 785 786 edesc->sec4_sg_bytes = sgsize; 787 edesc->sec4_sg_dma = src_dma; 788 options = LDST_SGF; 789 } else { 790 src_dma = sg_dma_address(req->src); 791 options = 0; 792 } 793 794 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 795 options); 796 797 return 0; 798 } 799 800 /* submit update job descriptor */ 801 static int ahash_update_ctx(struct ahash_request *req) 802 { 803 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 804 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 805 struct caam_hash_state *state = ahash_request_ctx(req); 806 struct device *jrdev = ctx->jrdev; 807 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 808 GFP_KERNEL : GFP_ATOMIC; 809 u8 *buf = current_buf(state); 810 int *buflen = current_buflen(state); 811 u8 *next_buf = alt_buf(state); 812 int blocksize = crypto_ahash_blocksize(ahash); 813 int *next_buflen = alt_buflen(state), last_buflen; 814 int in_len = *buflen + req->nbytes, to_hash; 815 u32 *desc; 816 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 817 struct ahash_edesc *edesc; 818 int ret = 0; 819 820 last_buflen = *next_buflen; 821 *next_buflen = in_len & (blocksize - 1); 822 to_hash = in_len - *next_buflen; 823 824 /* 825 * For XCBC and CMAC, if to_hash is multiple of block size, 826 * keep last block in internal buffer 827 */ 828 if ((is_xcbc_aes(ctx->adata.algtype) || 829 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 830 (*next_buflen == 0)) { 831 *next_buflen = blocksize; 832 to_hash -= blocksize; 833 } 834 835 if (to_hash) { 836 src_nents = sg_nents_for_len(req->src, 837 req->nbytes - (*next_buflen)); 838 if (src_nents < 0) { 839 dev_err(jrdev, "Invalid number of src SG.\n"); 840 return src_nents; 841 } 842 843 if (src_nents) { 844 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 845 DMA_TO_DEVICE); 846 if (!mapped_nents) { 847 dev_err(jrdev, "unable to DMA map source\n"); 848 return -ENOMEM; 849 } 850 } else { 851 mapped_nents = 0; 852 } 853 854 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 855 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 856 sizeof(struct sec4_sg_entry); 857 858 /* 859 * allocate space for base edesc and hw desc commands, 860 * link tables 861 */ 862 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 863 ctx->sh_desc_update, 864 ctx->sh_desc_update_dma, flags); 865 if (!edesc) { 866 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 867 return -ENOMEM; 868 } 869 870 edesc->src_nents = src_nents; 871 edesc->sec4_sg_bytes = sec4_sg_bytes; 872 873 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 874 edesc->sec4_sg, DMA_BIDIRECTIONAL); 875 if (ret) 876 goto unmap_ctx; 877 878 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 879 if (ret) 880 goto unmap_ctx; 881 882 if (mapped_nents) { 883 sg_to_sec4_sg_last(req->src, mapped_nents, 884 edesc->sec4_sg + sec4_sg_src_index, 885 0); 886 if (*next_buflen) 887 scatterwalk_map_and_copy(next_buf, req->src, 888 to_hash - *buflen, 889 *next_buflen, 0); 890 } else { 891 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 892 1); 893 } 894 895 desc = edesc->hw_desc; 896 897 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 898 sec4_sg_bytes, 899 DMA_TO_DEVICE); 900 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 901 dev_err(jrdev, "unable to map S/G table\n"); 902 ret = -ENOMEM; 903 goto unmap_ctx; 904 } 905 906 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 907 to_hash, LDST_SGF); 908 909 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 910 911 #ifdef DEBUG 912 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 913 DUMP_PREFIX_ADDRESS, 16, 4, desc, 914 desc_bytes(desc), 1); 915 #endif 916 917 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 918 if (ret) 919 goto unmap_ctx; 920 921 ret = -EINPROGRESS; 922 } else if (*next_buflen) { 923 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 924 req->nbytes, 0); 925 *buflen = *next_buflen; 926 *next_buflen = last_buflen; 927 } 928 #ifdef DEBUG 929 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 930 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 931 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 932 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 933 *next_buflen, 1); 934 #endif 935 936 return ret; 937 unmap_ctx: 938 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 939 kfree(edesc); 940 return ret; 941 } 942 943 static int ahash_final_ctx(struct ahash_request *req) 944 { 945 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 946 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 947 struct caam_hash_state *state = ahash_request_ctx(req); 948 struct device *jrdev = ctx->jrdev; 949 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 950 GFP_KERNEL : GFP_ATOMIC; 951 int buflen = *current_buflen(state); 952 u32 *desc; 953 int sec4_sg_bytes, sec4_sg_src_index; 954 int digestsize = crypto_ahash_digestsize(ahash); 955 struct ahash_edesc *edesc; 956 int ret; 957 958 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 959 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 960 961 /* allocate space for base edesc and hw desc commands, link tables */ 962 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 963 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 964 flags); 965 if (!edesc) 966 return -ENOMEM; 967 968 desc = edesc->hw_desc; 969 970 edesc->sec4_sg_bytes = sec4_sg_bytes; 971 972 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 973 edesc->sec4_sg, DMA_BIDIRECTIONAL); 974 if (ret) 975 goto unmap_ctx; 976 977 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 978 if (ret) 979 goto unmap_ctx; 980 981 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 982 983 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 984 sec4_sg_bytes, DMA_TO_DEVICE); 985 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 986 dev_err(jrdev, "unable to map S/G table\n"); 987 ret = -ENOMEM; 988 goto unmap_ctx; 989 } 990 991 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 992 LDST_SGF); 993 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 994 995 #ifdef DEBUG 996 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 997 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 998 #endif 999 1000 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1001 if (ret) 1002 goto unmap_ctx; 1003 1004 return -EINPROGRESS; 1005 unmap_ctx: 1006 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 1007 kfree(edesc); 1008 return ret; 1009 } 1010 1011 static int ahash_finup_ctx(struct ahash_request *req) 1012 { 1013 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1014 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1015 struct caam_hash_state *state = ahash_request_ctx(req); 1016 struct device *jrdev = ctx->jrdev; 1017 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1018 GFP_KERNEL : GFP_ATOMIC; 1019 int buflen = *current_buflen(state); 1020 u32 *desc; 1021 int sec4_sg_src_index; 1022 int src_nents, mapped_nents; 1023 int digestsize = crypto_ahash_digestsize(ahash); 1024 struct ahash_edesc *edesc; 1025 int ret; 1026 1027 src_nents = sg_nents_for_len(req->src, req->nbytes); 1028 if (src_nents < 0) { 1029 dev_err(jrdev, "Invalid number of src SG.\n"); 1030 return src_nents; 1031 } 1032 1033 if (src_nents) { 1034 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1035 DMA_TO_DEVICE); 1036 if (!mapped_nents) { 1037 dev_err(jrdev, "unable to DMA map source\n"); 1038 return -ENOMEM; 1039 } 1040 } else { 1041 mapped_nents = 0; 1042 } 1043 1044 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1045 1046 /* allocate space for base edesc and hw desc commands, link tables */ 1047 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1048 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 1049 flags); 1050 if (!edesc) { 1051 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1052 return -ENOMEM; 1053 } 1054 1055 desc = edesc->hw_desc; 1056 1057 edesc->src_nents = src_nents; 1058 1059 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 1060 edesc->sec4_sg, DMA_BIDIRECTIONAL); 1061 if (ret) 1062 goto unmap_ctx; 1063 1064 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 1065 if (ret) 1066 goto unmap_ctx; 1067 1068 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1069 sec4_sg_src_index, ctx->ctx_len + buflen, 1070 req->nbytes); 1071 if (ret) 1072 goto unmap_ctx; 1073 1074 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 1075 1076 #ifdef DEBUG 1077 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1078 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1079 #endif 1080 1081 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1082 if (ret) 1083 goto unmap_ctx; 1084 1085 return -EINPROGRESS; 1086 unmap_ctx: 1087 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 1088 kfree(edesc); 1089 return ret; 1090 } 1091 1092 static int ahash_digest(struct ahash_request *req) 1093 { 1094 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1095 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1096 struct caam_hash_state *state = ahash_request_ctx(req); 1097 struct device *jrdev = ctx->jrdev; 1098 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1099 GFP_KERNEL : GFP_ATOMIC; 1100 u32 *desc; 1101 int digestsize = crypto_ahash_digestsize(ahash); 1102 int src_nents, mapped_nents; 1103 struct ahash_edesc *edesc; 1104 int ret; 1105 1106 state->buf_dma = 0; 1107 1108 src_nents = sg_nents_for_len(req->src, req->nbytes); 1109 if (src_nents < 0) { 1110 dev_err(jrdev, "Invalid number of src SG.\n"); 1111 return src_nents; 1112 } 1113 1114 if (src_nents) { 1115 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1116 DMA_TO_DEVICE); 1117 if (!mapped_nents) { 1118 dev_err(jrdev, "unable to map source for DMA\n"); 1119 return -ENOMEM; 1120 } 1121 } else { 1122 mapped_nents = 0; 1123 } 1124 1125 /* allocate space for base edesc and hw desc commands, link tables */ 1126 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1127 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1128 flags); 1129 if (!edesc) { 1130 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1131 return -ENOMEM; 1132 } 1133 1134 edesc->src_nents = src_nents; 1135 1136 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1137 req->nbytes); 1138 if (ret) { 1139 ahash_unmap(jrdev, edesc, req, digestsize); 1140 kfree(edesc); 1141 return ret; 1142 } 1143 1144 desc = edesc->hw_desc; 1145 1146 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1147 if (ret) { 1148 ahash_unmap(jrdev, edesc, req, digestsize); 1149 kfree(edesc); 1150 return -ENOMEM; 1151 } 1152 1153 #ifdef DEBUG 1154 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1155 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1156 #endif 1157 1158 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1159 if (!ret) { 1160 ret = -EINPROGRESS; 1161 } else { 1162 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1163 kfree(edesc); 1164 } 1165 1166 return ret; 1167 } 1168 1169 /* submit ahash final if it the first job descriptor */ 1170 static int ahash_final_no_ctx(struct ahash_request *req) 1171 { 1172 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1173 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1174 struct caam_hash_state *state = ahash_request_ctx(req); 1175 struct device *jrdev = ctx->jrdev; 1176 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1177 GFP_KERNEL : GFP_ATOMIC; 1178 u8 *buf = current_buf(state); 1179 int buflen = *current_buflen(state); 1180 u32 *desc; 1181 int digestsize = crypto_ahash_digestsize(ahash); 1182 struct ahash_edesc *edesc; 1183 int ret; 1184 1185 /* allocate space for base edesc and hw desc commands, link tables */ 1186 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1187 ctx->sh_desc_digest_dma, flags); 1188 if (!edesc) 1189 return -ENOMEM; 1190 1191 desc = edesc->hw_desc; 1192 1193 if (buflen) { 1194 state->buf_dma = dma_map_single(jrdev, buf, buflen, 1195 DMA_TO_DEVICE); 1196 if (dma_mapping_error(jrdev, state->buf_dma)) { 1197 dev_err(jrdev, "unable to map src\n"); 1198 goto unmap; 1199 } 1200 1201 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1202 } 1203 1204 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1205 if (ret) 1206 goto unmap; 1207 1208 #ifdef DEBUG 1209 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1210 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1211 #endif 1212 1213 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1214 if (!ret) { 1215 ret = -EINPROGRESS; 1216 } else { 1217 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1218 kfree(edesc); 1219 } 1220 1221 return ret; 1222 unmap: 1223 ahash_unmap(jrdev, edesc, req, digestsize); 1224 kfree(edesc); 1225 return -ENOMEM; 1226 1227 } 1228 1229 /* submit ahash update if it the first job descriptor after update */ 1230 static int ahash_update_no_ctx(struct ahash_request *req) 1231 { 1232 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1233 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1234 struct caam_hash_state *state = ahash_request_ctx(req); 1235 struct device *jrdev = ctx->jrdev; 1236 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1237 GFP_KERNEL : GFP_ATOMIC; 1238 u8 *buf = current_buf(state); 1239 int *buflen = current_buflen(state); 1240 int blocksize = crypto_ahash_blocksize(ahash); 1241 u8 *next_buf = alt_buf(state); 1242 int *next_buflen = alt_buflen(state); 1243 int in_len = *buflen + req->nbytes, to_hash; 1244 int sec4_sg_bytes, src_nents, mapped_nents; 1245 struct ahash_edesc *edesc; 1246 u32 *desc; 1247 int ret = 0; 1248 1249 *next_buflen = in_len & (blocksize - 1); 1250 to_hash = in_len - *next_buflen; 1251 1252 /* 1253 * For XCBC and CMAC, if to_hash is multiple of block size, 1254 * keep last block in internal buffer 1255 */ 1256 if ((is_xcbc_aes(ctx->adata.algtype) || 1257 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1258 (*next_buflen == 0)) { 1259 *next_buflen = blocksize; 1260 to_hash -= blocksize; 1261 } 1262 1263 if (to_hash) { 1264 src_nents = sg_nents_for_len(req->src, 1265 req->nbytes - *next_buflen); 1266 if (src_nents < 0) { 1267 dev_err(jrdev, "Invalid number of src SG.\n"); 1268 return src_nents; 1269 } 1270 1271 if (src_nents) { 1272 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1273 DMA_TO_DEVICE); 1274 if (!mapped_nents) { 1275 dev_err(jrdev, "unable to DMA map source\n"); 1276 return -ENOMEM; 1277 } 1278 } else { 1279 mapped_nents = 0; 1280 } 1281 1282 sec4_sg_bytes = (1 + mapped_nents) * 1283 sizeof(struct sec4_sg_entry); 1284 1285 /* 1286 * allocate space for base edesc and hw desc commands, 1287 * link tables 1288 */ 1289 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1290 ctx->sh_desc_update_first, 1291 ctx->sh_desc_update_first_dma, 1292 flags); 1293 if (!edesc) { 1294 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1295 return -ENOMEM; 1296 } 1297 1298 edesc->src_nents = src_nents; 1299 edesc->sec4_sg_bytes = sec4_sg_bytes; 1300 1301 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1302 if (ret) 1303 goto unmap_ctx; 1304 1305 sg_to_sec4_sg_last(req->src, mapped_nents, 1306 edesc->sec4_sg + 1, 0); 1307 1308 if (*next_buflen) { 1309 scatterwalk_map_and_copy(next_buf, req->src, 1310 to_hash - *buflen, 1311 *next_buflen, 0); 1312 } 1313 1314 desc = edesc->hw_desc; 1315 1316 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1317 sec4_sg_bytes, 1318 DMA_TO_DEVICE); 1319 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1320 dev_err(jrdev, "unable to map S/G table\n"); 1321 ret = -ENOMEM; 1322 goto unmap_ctx; 1323 } 1324 1325 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1326 1327 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1328 if (ret) 1329 goto unmap_ctx; 1330 1331 #ifdef DEBUG 1332 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1333 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1334 desc_bytes(desc), 1); 1335 #endif 1336 1337 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1338 if (ret) 1339 goto unmap_ctx; 1340 1341 ret = -EINPROGRESS; 1342 state->update = ahash_update_ctx; 1343 state->finup = ahash_finup_ctx; 1344 state->final = ahash_final_ctx; 1345 } else if (*next_buflen) { 1346 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1347 req->nbytes, 0); 1348 *buflen = *next_buflen; 1349 *next_buflen = 0; 1350 } 1351 #ifdef DEBUG 1352 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1353 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1354 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1355 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1356 *next_buflen, 1); 1357 #endif 1358 1359 return ret; 1360 unmap_ctx: 1361 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1362 kfree(edesc); 1363 return ret; 1364 } 1365 1366 /* submit ahash finup if it the first job descriptor after update */ 1367 static int ahash_finup_no_ctx(struct ahash_request *req) 1368 { 1369 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1370 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1371 struct caam_hash_state *state = ahash_request_ctx(req); 1372 struct device *jrdev = ctx->jrdev; 1373 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1374 GFP_KERNEL : GFP_ATOMIC; 1375 int buflen = *current_buflen(state); 1376 u32 *desc; 1377 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1378 int digestsize = crypto_ahash_digestsize(ahash); 1379 struct ahash_edesc *edesc; 1380 int ret; 1381 1382 src_nents = sg_nents_for_len(req->src, req->nbytes); 1383 if (src_nents < 0) { 1384 dev_err(jrdev, "Invalid number of src SG.\n"); 1385 return src_nents; 1386 } 1387 1388 if (src_nents) { 1389 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1390 DMA_TO_DEVICE); 1391 if (!mapped_nents) { 1392 dev_err(jrdev, "unable to DMA map source\n"); 1393 return -ENOMEM; 1394 } 1395 } else { 1396 mapped_nents = 0; 1397 } 1398 1399 sec4_sg_src_index = 2; 1400 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1401 sizeof(struct sec4_sg_entry); 1402 1403 /* allocate space for base edesc and hw desc commands, link tables */ 1404 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1405 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1406 flags); 1407 if (!edesc) { 1408 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1409 return -ENOMEM; 1410 } 1411 1412 desc = edesc->hw_desc; 1413 1414 edesc->src_nents = src_nents; 1415 edesc->sec4_sg_bytes = sec4_sg_bytes; 1416 1417 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1418 if (ret) 1419 goto unmap; 1420 1421 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1422 req->nbytes); 1423 if (ret) { 1424 dev_err(jrdev, "unable to map S/G table\n"); 1425 goto unmap; 1426 } 1427 1428 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1429 if (ret) 1430 goto unmap; 1431 1432 #ifdef DEBUG 1433 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1434 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1435 #endif 1436 1437 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1438 if (!ret) { 1439 ret = -EINPROGRESS; 1440 } else { 1441 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1442 kfree(edesc); 1443 } 1444 1445 return ret; 1446 unmap: 1447 ahash_unmap(jrdev, edesc, req, digestsize); 1448 kfree(edesc); 1449 return -ENOMEM; 1450 1451 } 1452 1453 /* submit first update job descriptor after init */ 1454 static int ahash_update_first(struct ahash_request *req) 1455 { 1456 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1457 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1458 struct caam_hash_state *state = ahash_request_ctx(req); 1459 struct device *jrdev = ctx->jrdev; 1460 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1461 GFP_KERNEL : GFP_ATOMIC; 1462 u8 *next_buf = alt_buf(state); 1463 int *next_buflen = alt_buflen(state); 1464 int to_hash; 1465 int blocksize = crypto_ahash_blocksize(ahash); 1466 u32 *desc; 1467 int src_nents, mapped_nents; 1468 struct ahash_edesc *edesc; 1469 int ret = 0; 1470 1471 *next_buflen = req->nbytes & (blocksize - 1); 1472 to_hash = req->nbytes - *next_buflen; 1473 1474 /* 1475 * For XCBC and CMAC, if to_hash is multiple of block size, 1476 * keep last block in internal buffer 1477 */ 1478 if ((is_xcbc_aes(ctx->adata.algtype) || 1479 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1480 (*next_buflen == 0)) { 1481 *next_buflen = blocksize; 1482 to_hash -= blocksize; 1483 } 1484 1485 if (to_hash) { 1486 src_nents = sg_nents_for_len(req->src, 1487 req->nbytes - *next_buflen); 1488 if (src_nents < 0) { 1489 dev_err(jrdev, "Invalid number of src SG.\n"); 1490 return src_nents; 1491 } 1492 1493 if (src_nents) { 1494 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1495 DMA_TO_DEVICE); 1496 if (!mapped_nents) { 1497 dev_err(jrdev, "unable to map source for DMA\n"); 1498 return -ENOMEM; 1499 } 1500 } else { 1501 mapped_nents = 0; 1502 } 1503 1504 /* 1505 * allocate space for base edesc and hw desc commands, 1506 * link tables 1507 */ 1508 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1509 mapped_nents : 0, 1510 ctx->sh_desc_update_first, 1511 ctx->sh_desc_update_first_dma, 1512 flags); 1513 if (!edesc) { 1514 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1515 return -ENOMEM; 1516 } 1517 1518 edesc->src_nents = src_nents; 1519 1520 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1521 to_hash); 1522 if (ret) 1523 goto unmap_ctx; 1524 1525 if (*next_buflen) 1526 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1527 *next_buflen, 0); 1528 1529 desc = edesc->hw_desc; 1530 1531 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1532 if (ret) 1533 goto unmap_ctx; 1534 1535 #ifdef DEBUG 1536 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1537 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1538 desc_bytes(desc), 1); 1539 #endif 1540 1541 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1542 if (ret) 1543 goto unmap_ctx; 1544 1545 ret = -EINPROGRESS; 1546 state->update = ahash_update_ctx; 1547 state->finup = ahash_finup_ctx; 1548 state->final = ahash_final_ctx; 1549 } else if (*next_buflen) { 1550 state->update = ahash_update_no_ctx; 1551 state->finup = ahash_finup_no_ctx; 1552 state->final = ahash_final_no_ctx; 1553 scatterwalk_map_and_copy(next_buf, req->src, 0, 1554 req->nbytes, 0); 1555 switch_buf(state); 1556 } 1557 #ifdef DEBUG 1558 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1559 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1560 *next_buflen, 1); 1561 #endif 1562 1563 return ret; 1564 unmap_ctx: 1565 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1566 kfree(edesc); 1567 return ret; 1568 } 1569 1570 static int ahash_finup_first(struct ahash_request *req) 1571 { 1572 return ahash_digest(req); 1573 } 1574 1575 static int ahash_init(struct ahash_request *req) 1576 { 1577 struct caam_hash_state *state = ahash_request_ctx(req); 1578 1579 state->update = ahash_update_first; 1580 state->finup = ahash_finup_first; 1581 state->final = ahash_final_no_ctx; 1582 1583 state->ctx_dma = 0; 1584 state->ctx_dma_len = 0; 1585 state->current_buf = 0; 1586 state->buf_dma = 0; 1587 state->buflen_0 = 0; 1588 state->buflen_1 = 0; 1589 1590 return 0; 1591 } 1592 1593 static int ahash_update(struct ahash_request *req) 1594 { 1595 struct caam_hash_state *state = ahash_request_ctx(req); 1596 1597 return state->update(req); 1598 } 1599 1600 static int ahash_finup(struct ahash_request *req) 1601 { 1602 struct caam_hash_state *state = ahash_request_ctx(req); 1603 1604 return state->finup(req); 1605 } 1606 1607 static int ahash_final(struct ahash_request *req) 1608 { 1609 struct caam_hash_state *state = ahash_request_ctx(req); 1610 1611 return state->final(req); 1612 } 1613 1614 static int ahash_export(struct ahash_request *req, void *out) 1615 { 1616 struct caam_hash_state *state = ahash_request_ctx(req); 1617 struct caam_export_state *export = out; 1618 int len; 1619 u8 *buf; 1620 1621 if (state->current_buf) { 1622 buf = state->buf_1; 1623 len = state->buflen_1; 1624 } else { 1625 buf = state->buf_0; 1626 len = state->buflen_0; 1627 } 1628 1629 memcpy(export->buf, buf, len); 1630 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1631 export->buflen = len; 1632 export->update = state->update; 1633 export->final = state->final; 1634 export->finup = state->finup; 1635 1636 return 0; 1637 } 1638 1639 static int ahash_import(struct ahash_request *req, const void *in) 1640 { 1641 struct caam_hash_state *state = ahash_request_ctx(req); 1642 const struct caam_export_state *export = in; 1643 1644 memset(state, 0, sizeof(*state)); 1645 memcpy(state->buf_0, export->buf, export->buflen); 1646 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1647 state->buflen_0 = export->buflen; 1648 state->update = export->update; 1649 state->final = export->final; 1650 state->finup = export->finup; 1651 1652 return 0; 1653 } 1654 1655 struct caam_hash_template { 1656 char name[CRYPTO_MAX_ALG_NAME]; 1657 char driver_name[CRYPTO_MAX_ALG_NAME]; 1658 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1659 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1660 unsigned int blocksize; 1661 struct ahash_alg template_ahash; 1662 u32 alg_type; 1663 }; 1664 1665 /* ahash descriptors */ 1666 static struct caam_hash_template driver_hash[] = { 1667 { 1668 .name = "sha1", 1669 .driver_name = "sha1-caam", 1670 .hmac_name = "hmac(sha1)", 1671 .hmac_driver_name = "hmac-sha1-caam", 1672 .blocksize = SHA1_BLOCK_SIZE, 1673 .template_ahash = { 1674 .init = ahash_init, 1675 .update = ahash_update, 1676 .final = ahash_final, 1677 .finup = ahash_finup, 1678 .digest = ahash_digest, 1679 .export = ahash_export, 1680 .import = ahash_import, 1681 .setkey = ahash_setkey, 1682 .halg = { 1683 .digestsize = SHA1_DIGEST_SIZE, 1684 .statesize = sizeof(struct caam_export_state), 1685 }, 1686 }, 1687 .alg_type = OP_ALG_ALGSEL_SHA1, 1688 }, { 1689 .name = "sha224", 1690 .driver_name = "sha224-caam", 1691 .hmac_name = "hmac(sha224)", 1692 .hmac_driver_name = "hmac-sha224-caam", 1693 .blocksize = SHA224_BLOCK_SIZE, 1694 .template_ahash = { 1695 .init = ahash_init, 1696 .update = ahash_update, 1697 .final = ahash_final, 1698 .finup = ahash_finup, 1699 .digest = ahash_digest, 1700 .export = ahash_export, 1701 .import = ahash_import, 1702 .setkey = ahash_setkey, 1703 .halg = { 1704 .digestsize = SHA224_DIGEST_SIZE, 1705 .statesize = sizeof(struct caam_export_state), 1706 }, 1707 }, 1708 .alg_type = OP_ALG_ALGSEL_SHA224, 1709 }, { 1710 .name = "sha256", 1711 .driver_name = "sha256-caam", 1712 .hmac_name = "hmac(sha256)", 1713 .hmac_driver_name = "hmac-sha256-caam", 1714 .blocksize = SHA256_BLOCK_SIZE, 1715 .template_ahash = { 1716 .init = ahash_init, 1717 .update = ahash_update, 1718 .final = ahash_final, 1719 .finup = ahash_finup, 1720 .digest = ahash_digest, 1721 .export = ahash_export, 1722 .import = ahash_import, 1723 .setkey = ahash_setkey, 1724 .halg = { 1725 .digestsize = SHA256_DIGEST_SIZE, 1726 .statesize = sizeof(struct caam_export_state), 1727 }, 1728 }, 1729 .alg_type = OP_ALG_ALGSEL_SHA256, 1730 }, { 1731 .name = "sha384", 1732 .driver_name = "sha384-caam", 1733 .hmac_name = "hmac(sha384)", 1734 .hmac_driver_name = "hmac-sha384-caam", 1735 .blocksize = SHA384_BLOCK_SIZE, 1736 .template_ahash = { 1737 .init = ahash_init, 1738 .update = ahash_update, 1739 .final = ahash_final, 1740 .finup = ahash_finup, 1741 .digest = ahash_digest, 1742 .export = ahash_export, 1743 .import = ahash_import, 1744 .setkey = ahash_setkey, 1745 .halg = { 1746 .digestsize = SHA384_DIGEST_SIZE, 1747 .statesize = sizeof(struct caam_export_state), 1748 }, 1749 }, 1750 .alg_type = OP_ALG_ALGSEL_SHA384, 1751 }, { 1752 .name = "sha512", 1753 .driver_name = "sha512-caam", 1754 .hmac_name = "hmac(sha512)", 1755 .hmac_driver_name = "hmac-sha512-caam", 1756 .blocksize = SHA512_BLOCK_SIZE, 1757 .template_ahash = { 1758 .init = ahash_init, 1759 .update = ahash_update, 1760 .final = ahash_final, 1761 .finup = ahash_finup, 1762 .digest = ahash_digest, 1763 .export = ahash_export, 1764 .import = ahash_import, 1765 .setkey = ahash_setkey, 1766 .halg = { 1767 .digestsize = SHA512_DIGEST_SIZE, 1768 .statesize = sizeof(struct caam_export_state), 1769 }, 1770 }, 1771 .alg_type = OP_ALG_ALGSEL_SHA512, 1772 }, { 1773 .name = "md5", 1774 .driver_name = "md5-caam", 1775 .hmac_name = "hmac(md5)", 1776 .hmac_driver_name = "hmac-md5-caam", 1777 .blocksize = MD5_BLOCK_WORDS * 4, 1778 .template_ahash = { 1779 .init = ahash_init, 1780 .update = ahash_update, 1781 .final = ahash_final, 1782 .finup = ahash_finup, 1783 .digest = ahash_digest, 1784 .export = ahash_export, 1785 .import = ahash_import, 1786 .setkey = ahash_setkey, 1787 .halg = { 1788 .digestsize = MD5_DIGEST_SIZE, 1789 .statesize = sizeof(struct caam_export_state), 1790 }, 1791 }, 1792 .alg_type = OP_ALG_ALGSEL_MD5, 1793 }, { 1794 .hmac_name = "xcbc(aes)", 1795 .hmac_driver_name = "xcbc-aes-caam", 1796 .blocksize = AES_BLOCK_SIZE, 1797 .template_ahash = { 1798 .init = ahash_init, 1799 .update = ahash_update, 1800 .final = ahash_final, 1801 .finup = ahash_finup, 1802 .digest = ahash_digest, 1803 .export = ahash_export, 1804 .import = ahash_import, 1805 .setkey = axcbc_setkey, 1806 .halg = { 1807 .digestsize = AES_BLOCK_SIZE, 1808 .statesize = sizeof(struct caam_export_state), 1809 }, 1810 }, 1811 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, 1812 }, { 1813 .hmac_name = "cmac(aes)", 1814 .hmac_driver_name = "cmac-aes-caam", 1815 .blocksize = AES_BLOCK_SIZE, 1816 .template_ahash = { 1817 .init = ahash_init, 1818 .update = ahash_update, 1819 .final = ahash_final, 1820 .finup = ahash_finup, 1821 .digest = ahash_digest, 1822 .export = ahash_export, 1823 .import = ahash_import, 1824 .setkey = acmac_setkey, 1825 .halg = { 1826 .digestsize = AES_BLOCK_SIZE, 1827 .statesize = sizeof(struct caam_export_state), 1828 }, 1829 }, 1830 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, 1831 }, 1832 }; 1833 1834 struct caam_hash_alg { 1835 struct list_head entry; 1836 int alg_type; 1837 struct ahash_alg ahash_alg; 1838 }; 1839 1840 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1841 { 1842 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1843 struct crypto_alg *base = tfm->__crt_alg; 1844 struct hash_alg_common *halg = 1845 container_of(base, struct hash_alg_common, base); 1846 struct ahash_alg *alg = 1847 container_of(halg, struct ahash_alg, halg); 1848 struct caam_hash_alg *caam_hash = 1849 container_of(alg, struct caam_hash_alg, ahash_alg); 1850 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1851 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1852 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1853 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1854 HASH_MSG_LEN + 32, 1855 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1856 HASH_MSG_LEN + 64, 1857 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1858 dma_addr_t dma_addr; 1859 struct caam_drv_private *priv; 1860 1861 /* 1862 * Get a Job ring from Job Ring driver to ensure in-order 1863 * crypto request processing per tfm 1864 */ 1865 ctx->jrdev = caam_jr_alloc(); 1866 if (IS_ERR(ctx->jrdev)) { 1867 pr_err("Job Ring Device allocation for transform failed\n"); 1868 return PTR_ERR(ctx->jrdev); 1869 } 1870 1871 priv = dev_get_drvdata(ctx->jrdev->parent); 1872 1873 if (is_xcbc_aes(caam_hash->alg_type)) { 1874 ctx->dir = DMA_TO_DEVICE; 1875 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1876 ctx->ctx_len = 48; 1877 1878 ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, 1879 ARRAY_SIZE(ctx->key), 1880 DMA_BIDIRECTIONAL, 1881 DMA_ATTR_SKIP_CPU_SYNC); 1882 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 1883 dev_err(ctx->jrdev, "unable to map key\n"); 1884 caam_jr_free(ctx->jrdev); 1885 return -ENOMEM; 1886 } 1887 } else if (is_cmac_aes(caam_hash->alg_type)) { 1888 ctx->dir = DMA_TO_DEVICE; 1889 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1890 ctx->ctx_len = 32; 1891 } else { 1892 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1893 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1894 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1895 OP_ALG_ALGSEL_SUBMASK) >> 1896 OP_ALG_ALGSEL_SHIFT]; 1897 } 1898 1899 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1900 offsetof(struct caam_hash_ctx, key), 1901 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1902 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1903 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1904 1905 if (is_xcbc_aes(caam_hash->alg_type)) 1906 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1907 ARRAY_SIZE(ctx->key), 1908 DMA_BIDIRECTIONAL, 1909 DMA_ATTR_SKIP_CPU_SYNC); 1910 1911 caam_jr_free(ctx->jrdev); 1912 return -ENOMEM; 1913 } 1914 1915 ctx->sh_desc_update_dma = dma_addr; 1916 ctx->sh_desc_update_first_dma = dma_addr + 1917 offsetof(struct caam_hash_ctx, 1918 sh_desc_update_first); 1919 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1920 sh_desc_fin); 1921 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1922 sh_desc_digest); 1923 1924 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1925 sizeof(struct caam_hash_state)); 1926 1927 /* 1928 * For keyed hash algorithms shared descriptors 1929 * will be created later in setkey() callback 1930 */ 1931 return alg->setkey ? 0 : ahash_set_sh_desc(ahash); 1932 } 1933 1934 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1935 { 1936 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1937 1938 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1939 offsetof(struct caam_hash_ctx, key), 1940 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1941 if (is_xcbc_aes(ctx->adata.algtype)) 1942 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1943 ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, 1944 DMA_ATTR_SKIP_CPU_SYNC); 1945 caam_jr_free(ctx->jrdev); 1946 } 1947 1948 static void __exit caam_algapi_hash_exit(void) 1949 { 1950 struct caam_hash_alg *t_alg, *n; 1951 1952 if (!hash_list.next) 1953 return; 1954 1955 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1956 crypto_unregister_ahash(&t_alg->ahash_alg); 1957 list_del(&t_alg->entry); 1958 kfree(t_alg); 1959 } 1960 } 1961 1962 static struct caam_hash_alg * 1963 caam_hash_alloc(struct caam_hash_template *template, 1964 bool keyed) 1965 { 1966 struct caam_hash_alg *t_alg; 1967 struct ahash_alg *halg; 1968 struct crypto_alg *alg; 1969 1970 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1971 if (!t_alg) { 1972 pr_err("failed to allocate t_alg\n"); 1973 return ERR_PTR(-ENOMEM); 1974 } 1975 1976 t_alg->ahash_alg = template->template_ahash; 1977 halg = &t_alg->ahash_alg; 1978 alg = &halg->halg.base; 1979 1980 if (keyed) { 1981 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1982 template->hmac_name); 1983 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1984 template->hmac_driver_name); 1985 } else { 1986 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1987 template->name); 1988 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1989 template->driver_name); 1990 t_alg->ahash_alg.setkey = NULL; 1991 } 1992 alg->cra_module = THIS_MODULE; 1993 alg->cra_init = caam_hash_cra_init; 1994 alg->cra_exit = caam_hash_cra_exit; 1995 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1996 alg->cra_priority = CAAM_CRA_PRIORITY; 1997 alg->cra_blocksize = template->blocksize; 1998 alg->cra_alignmask = 0; 1999 alg->cra_flags = CRYPTO_ALG_ASYNC; 2000 2001 t_alg->alg_type = template->alg_type; 2002 2003 return t_alg; 2004 } 2005 2006 static int __init caam_algapi_hash_init(void) 2007 { 2008 struct device_node *dev_node; 2009 struct platform_device *pdev; 2010 struct device *ctrldev; 2011 int i = 0, err = 0; 2012 struct caam_drv_private *priv; 2013 unsigned int md_limit = SHA512_DIGEST_SIZE; 2014 u32 md_inst, md_vid; 2015 2016 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2017 if (!dev_node) { 2018 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 2019 if (!dev_node) 2020 return -ENODEV; 2021 } 2022 2023 pdev = of_find_device_by_node(dev_node); 2024 if (!pdev) { 2025 of_node_put(dev_node); 2026 return -ENODEV; 2027 } 2028 2029 ctrldev = &pdev->dev; 2030 priv = dev_get_drvdata(ctrldev); 2031 of_node_put(dev_node); 2032 2033 /* 2034 * If priv is NULL, it's probably because the caam driver wasn't 2035 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 2036 */ 2037 if (!priv) 2038 return -ENODEV; 2039 2040 /* 2041 * Register crypto algorithms the device supports. First, identify 2042 * presence and attributes of MD block. 2043 */ 2044 if (priv->era < 10) { 2045 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & 2046 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2047 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 2048 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2049 } else { 2050 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2051 2052 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2053 md_inst = mdha & CHA_VER_NUM_MASK; 2054 } 2055 2056 /* 2057 * Skip registration of any hashing algorithms if MD block 2058 * is not present. 2059 */ 2060 if (!md_inst) 2061 return -ENODEV; 2062 2063 /* Limit digest size based on LP256 */ 2064 if (md_vid == CHA_VER_VID_MD_LP256) 2065 md_limit = SHA256_DIGEST_SIZE; 2066 2067 INIT_LIST_HEAD(&hash_list); 2068 2069 /* register crypto algorithms the device supports */ 2070 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 2071 struct caam_hash_alg *t_alg; 2072 struct caam_hash_template *alg = driver_hash + i; 2073 2074 /* If MD size is not supported by device, skip registration */ 2075 if (is_mdha(alg->alg_type) && 2076 alg->template_ahash.halg.digestsize > md_limit) 2077 continue; 2078 2079 /* register hmac version */ 2080 t_alg = caam_hash_alloc(alg, true); 2081 if (IS_ERR(t_alg)) { 2082 err = PTR_ERR(t_alg); 2083 pr_warn("%s alg allocation failed\n", 2084 alg->hmac_driver_name); 2085 continue; 2086 } 2087 2088 err = crypto_register_ahash(&t_alg->ahash_alg); 2089 if (err) { 2090 pr_warn("%s alg registration failed: %d\n", 2091 t_alg->ahash_alg.halg.base.cra_driver_name, 2092 err); 2093 kfree(t_alg); 2094 } else 2095 list_add_tail(&t_alg->entry, &hash_list); 2096 2097 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) 2098 continue; 2099 2100 /* register unkeyed version */ 2101 t_alg = caam_hash_alloc(alg, false); 2102 if (IS_ERR(t_alg)) { 2103 err = PTR_ERR(t_alg); 2104 pr_warn("%s alg allocation failed\n", alg->driver_name); 2105 continue; 2106 } 2107 2108 err = crypto_register_ahash(&t_alg->ahash_alg); 2109 if (err) { 2110 pr_warn("%s alg registration failed: %d\n", 2111 t_alg->ahash_alg.halg.base.cra_driver_name, 2112 err); 2113 kfree(t_alg); 2114 } else 2115 list_add_tail(&t_alg->entry, &hash_list); 2116 } 2117 2118 return err; 2119 } 2120 2121 module_init(caam_algapi_hash_init); 2122 module_exit(caam_algapi_hash_exit); 2123 2124 MODULE_LICENSE("GPL"); 2125 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 2126 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 2127