1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * caam - Freescale FSL CAAM support for ahash functions of crypto API 4 * 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * Copyright 2018-2019 NXP 7 * 8 * Based on caamalg.c crypto API driver. 9 * 10 * relationship of digest job descriptor or first job descriptor after init to 11 * shared descriptors: 12 * 13 * --------------- --------------- 14 * | JobDesc #1 |-------------------->| ShareDesc | 15 * | *(packet 1) | | (hashKey) | 16 * --------------- | (operation) | 17 * --------------- 18 * 19 * relationship of subsequent job descriptors to shared descriptors: 20 * 21 * --------------- --------------- 22 * | JobDesc #2 |-------------------->| ShareDesc | 23 * | *(packet 2) | |------------->| (hashKey) | 24 * --------------- | |-------->| (operation) | 25 * . | | | (load ctx2) | 26 * . | | --------------- 27 * --------------- | | 28 * | JobDesc #3 |------| | 29 * | *(packet 3) | | 30 * --------------- | 31 * . | 32 * . | 33 * --------------- | 34 * | JobDesc #4 |------------ 35 * | *(packet 4) | 36 * --------------- 37 * 38 * The SharedDesc never changes for a connection unless rekeyed, but 39 * each packet will likely be in a different place. So all we need 40 * to know to process the packet is where the input is, where the 41 * output goes, and what context we want to process with. Context is 42 * in the SharedDesc, packet references in the JobDesc. 43 * 44 * So, a job desc looks like: 45 * 46 * --------------------- 47 * | Header | 48 * | ShareDesc Pointer | 49 * | SEQ_OUT_PTR | 50 * | (output buffer) | 51 * | (output length) | 52 * | SEQ_IN_PTR | 53 * | (input buffer) | 54 * | (input length) | 55 * --------------------- 56 */ 57 58 #include "compat.h" 59 60 #include "regs.h" 61 #include "intern.h" 62 #include "desc_constr.h" 63 #include "jr.h" 64 #include "error.h" 65 #include "sg_sw_sec4.h" 66 #include "key_gen.h" 67 #include "caamhash_desc.h" 68 69 #define CAAM_CRA_PRIORITY 3000 70 71 /* max hash key is max split key size */ 72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 73 74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 76 77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 78 CAAM_MAX_HASH_KEY_SIZE) 79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 80 81 /* caam context sizes for hashes: running digest + 8 */ 82 #define HASH_MSG_LEN 8 83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 84 85 #ifdef DEBUG 86 /* for print_hex_dumps with line references */ 87 #define debug(format, arg...) printk(format, arg) 88 #else 89 #define debug(format, arg...) 90 #endif 91 92 93 static struct list_head hash_list; 94 95 /* ahash per-session context */ 96 struct caam_hash_ctx { 97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 101 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; 102 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 103 dma_addr_t sh_desc_update_first_dma; 104 dma_addr_t sh_desc_fin_dma; 105 dma_addr_t sh_desc_digest_dma; 106 dma_addr_t key_dma; 107 enum dma_data_direction dir; 108 struct device *jrdev; 109 int ctx_len; 110 struct alginfo adata; 111 }; 112 113 /* ahash state */ 114 struct caam_hash_state { 115 dma_addr_t buf_dma; 116 dma_addr_t ctx_dma; 117 int ctx_dma_len; 118 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 119 int buflen_0; 120 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 121 int buflen_1; 122 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 123 int (*update)(struct ahash_request *req); 124 int (*final)(struct ahash_request *req); 125 int (*finup)(struct ahash_request *req); 126 int current_buf; 127 }; 128 129 struct caam_export_state { 130 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 131 u8 caam_ctx[MAX_CTX_LEN]; 132 int buflen; 133 int (*update)(struct ahash_request *req); 134 int (*final)(struct ahash_request *req); 135 int (*finup)(struct ahash_request *req); 136 }; 137 138 static inline void switch_buf(struct caam_hash_state *state) 139 { 140 state->current_buf ^= 1; 141 } 142 143 static inline u8 *current_buf(struct caam_hash_state *state) 144 { 145 return state->current_buf ? state->buf_1 : state->buf_0; 146 } 147 148 static inline u8 *alt_buf(struct caam_hash_state *state) 149 { 150 return state->current_buf ? state->buf_0 : state->buf_1; 151 } 152 153 static inline int *current_buflen(struct caam_hash_state *state) 154 { 155 return state->current_buf ? &state->buflen_1 : &state->buflen_0; 156 } 157 158 static inline int *alt_buflen(struct caam_hash_state *state) 159 { 160 return state->current_buf ? &state->buflen_0 : &state->buflen_1; 161 } 162 163 static inline bool is_cmac_aes(u32 algtype) 164 { 165 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == 166 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); 167 } 168 /* Common job descriptor seq in/out ptr routines */ 169 170 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 171 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 172 struct caam_hash_state *state, 173 int ctx_len) 174 { 175 state->ctx_dma_len = ctx_len; 176 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 177 ctx_len, DMA_FROM_DEVICE); 178 if (dma_mapping_error(jrdev, state->ctx_dma)) { 179 dev_err(jrdev, "unable to map ctx\n"); 180 state->ctx_dma = 0; 181 return -ENOMEM; 182 } 183 184 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 185 186 return 0; 187 } 188 189 /* Map current buffer in state (if length > 0) and put it in link table */ 190 static inline int buf_map_to_sec4_sg(struct device *jrdev, 191 struct sec4_sg_entry *sec4_sg, 192 struct caam_hash_state *state) 193 { 194 int buflen = *current_buflen(state); 195 196 if (!buflen) 197 return 0; 198 199 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, 200 DMA_TO_DEVICE); 201 if (dma_mapping_error(jrdev, state->buf_dma)) { 202 dev_err(jrdev, "unable to map buf\n"); 203 state->buf_dma = 0; 204 return -ENOMEM; 205 } 206 207 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 208 209 return 0; 210 } 211 212 /* Map state->caam_ctx, and add it to link table */ 213 static inline int ctx_map_to_sec4_sg(struct device *jrdev, 214 struct caam_hash_state *state, int ctx_len, 215 struct sec4_sg_entry *sec4_sg, u32 flag) 216 { 217 state->ctx_dma_len = ctx_len; 218 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 219 if (dma_mapping_error(jrdev, state->ctx_dma)) { 220 dev_err(jrdev, "unable to map ctx\n"); 221 state->ctx_dma = 0; 222 return -ENOMEM; 223 } 224 225 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 226 227 return 0; 228 } 229 230 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 231 { 232 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 233 int digestsize = crypto_ahash_digestsize(ahash); 234 struct device *jrdev = ctx->jrdev; 235 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 236 u32 *desc; 237 238 ctx->adata.key_virt = ctx->key; 239 240 /* ahash_update shared descriptor */ 241 desc = ctx->sh_desc_update; 242 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 243 ctx->ctx_len, true, ctrlpriv->era); 244 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 245 desc_bytes(desc), ctx->dir); 246 #ifdef DEBUG 247 print_hex_dump(KERN_ERR, 248 "ahash update shdesc@"__stringify(__LINE__)": ", 249 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 250 #endif 251 252 /* ahash_update_first shared descriptor */ 253 desc = ctx->sh_desc_update_first; 254 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 255 ctx->ctx_len, false, ctrlpriv->era); 256 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 257 desc_bytes(desc), ctx->dir); 258 #ifdef DEBUG 259 print_hex_dump(KERN_ERR, 260 "ahash update first shdesc@"__stringify(__LINE__)": ", 261 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 262 #endif 263 264 /* ahash_final shared descriptor */ 265 desc = ctx->sh_desc_fin; 266 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 267 ctx->ctx_len, true, ctrlpriv->era); 268 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 269 desc_bytes(desc), ctx->dir); 270 #ifdef DEBUG 271 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 272 DUMP_PREFIX_ADDRESS, 16, 4, desc, 273 desc_bytes(desc), 1); 274 #endif 275 276 /* ahash_digest shared descriptor */ 277 desc = ctx->sh_desc_digest; 278 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 279 ctx->ctx_len, false, ctrlpriv->era); 280 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 281 desc_bytes(desc), ctx->dir); 282 #ifdef DEBUG 283 print_hex_dump(KERN_ERR, 284 "ahash digest shdesc@"__stringify(__LINE__)": ", 285 DUMP_PREFIX_ADDRESS, 16, 4, desc, 286 desc_bytes(desc), 1); 287 #endif 288 289 return 0; 290 } 291 292 static int axcbc_set_sh_desc(struct crypto_ahash *ahash) 293 { 294 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 295 int digestsize = crypto_ahash_digestsize(ahash); 296 struct device *jrdev = ctx->jrdev; 297 u32 *desc; 298 299 /* key is loaded from memory for UPDATE and FINALIZE states */ 300 ctx->adata.key_dma = ctx->key_dma; 301 302 /* shared descriptor for ahash_update */ 303 desc = ctx->sh_desc_update; 304 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 305 ctx->ctx_len, ctx->ctx_len, 0); 306 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 307 desc_bytes(desc), ctx->dir); 308 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", 309 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 310 1); 311 312 /* shared descriptor for ahash_{final,finup} */ 313 desc = ctx->sh_desc_fin; 314 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 315 digestsize, ctx->ctx_len, 0); 316 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 317 desc_bytes(desc), ctx->dir); 318 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", 319 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 320 1); 321 322 /* key is immediate data for INIT and INITFINAL states */ 323 ctx->adata.key_virt = ctx->key; 324 325 /* shared descriptor for first invocation of ahash_update */ 326 desc = ctx->sh_desc_update_first; 327 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 328 ctx->ctx_len, ctx->key_dma); 329 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 330 desc_bytes(desc), ctx->dir); 331 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", 332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 333 1); 334 335 /* shared descriptor for ahash_digest */ 336 desc = ctx->sh_desc_digest; 337 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 338 digestsize, ctx->ctx_len, 0); 339 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 340 desc_bytes(desc), ctx->dir); 341 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", 342 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 343 1); 344 return 0; 345 } 346 347 static int acmac_set_sh_desc(struct crypto_ahash *ahash) 348 { 349 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 350 int digestsize = crypto_ahash_digestsize(ahash); 351 struct device *jrdev = ctx->jrdev; 352 u32 *desc; 353 354 /* shared descriptor for ahash_update */ 355 desc = ctx->sh_desc_update; 356 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 357 ctx->ctx_len, ctx->ctx_len, 0); 358 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 359 desc_bytes(desc), ctx->dir); 360 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", 361 DUMP_PREFIX_ADDRESS, 16, 4, desc, 362 desc_bytes(desc), 1); 363 364 /* shared descriptor for ahash_{final,finup} */ 365 desc = ctx->sh_desc_fin; 366 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 367 digestsize, ctx->ctx_len, 0); 368 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 369 desc_bytes(desc), ctx->dir); 370 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", 371 DUMP_PREFIX_ADDRESS, 16, 4, desc, 372 desc_bytes(desc), 1); 373 374 /* shared descriptor for first invocation of ahash_update */ 375 desc = ctx->sh_desc_update_first; 376 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 377 ctx->ctx_len, 0); 378 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 379 desc_bytes(desc), ctx->dir); 380 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", 381 DUMP_PREFIX_ADDRESS, 16, 4, desc, 382 desc_bytes(desc), 1); 383 384 /* shared descriptor for ahash_digest */ 385 desc = ctx->sh_desc_digest; 386 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 387 digestsize, ctx->ctx_len, 0); 388 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 389 desc_bytes(desc), ctx->dir); 390 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", 391 DUMP_PREFIX_ADDRESS, 16, 4, desc, 392 desc_bytes(desc), 1); 393 394 return 0; 395 } 396 397 /* Digest hash size if it is too large */ 398 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, 399 u32 digestsize) 400 { 401 struct device *jrdev = ctx->jrdev; 402 u32 *desc; 403 struct split_key_result result; 404 dma_addr_t key_dma; 405 int ret; 406 407 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 408 if (!desc) { 409 dev_err(jrdev, "unable to allocate key input memory\n"); 410 return -ENOMEM; 411 } 412 413 init_job_desc(desc, 0); 414 415 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); 416 if (dma_mapping_error(jrdev, key_dma)) { 417 dev_err(jrdev, "unable to map key memory\n"); 418 kfree(desc); 419 return -ENOMEM; 420 } 421 422 /* Job descriptor to perform unkeyed hash on key_in */ 423 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 424 OP_ALG_AS_INITFINAL); 425 append_seq_in_ptr(desc, key_dma, *keylen, 0); 426 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 427 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 428 append_seq_out_ptr(desc, key_dma, digestsize, 0); 429 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 430 LDST_SRCDST_BYTE_CONTEXT); 431 432 #ifdef DEBUG 433 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 434 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 435 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 436 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 437 #endif 438 439 result.err = 0; 440 init_completion(&result.completion); 441 442 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 443 if (!ret) { 444 /* in progress */ 445 wait_for_completion(&result.completion); 446 ret = result.err; 447 #ifdef DEBUG 448 print_hex_dump(KERN_ERR, 449 "digested key@"__stringify(__LINE__)": ", 450 DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); 451 #endif 452 } 453 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); 454 455 *keylen = digestsize; 456 457 kfree(desc); 458 459 return ret; 460 } 461 462 static int ahash_setkey(struct crypto_ahash *ahash, 463 const u8 *key, unsigned int keylen) 464 { 465 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 466 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 467 int digestsize = crypto_ahash_digestsize(ahash); 468 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 469 int ret; 470 u8 *hashed_key = NULL; 471 472 #ifdef DEBUG 473 printk(KERN_ERR "keylen %d\n", keylen); 474 #endif 475 476 if (keylen > blocksize) { 477 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 478 if (!hashed_key) 479 return -ENOMEM; 480 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 481 if (ret) 482 goto bad_free_key; 483 key = hashed_key; 484 } 485 486 /* 487 * If DKP is supported, use it in the shared descriptor to generate 488 * the split key. 489 */ 490 if (ctrlpriv->era >= 6) { 491 ctx->adata.key_inline = true; 492 ctx->adata.keylen = keylen; 493 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 494 OP_ALG_ALGSEL_MASK); 495 496 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 497 goto bad_free_key; 498 499 memcpy(ctx->key, key, keylen); 500 } else { 501 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 502 keylen, CAAM_MAX_HASH_KEY_SIZE); 503 if (ret) 504 goto bad_free_key; 505 } 506 507 kfree(hashed_key); 508 return ahash_set_sh_desc(ahash); 509 bad_free_key: 510 kfree(hashed_key); 511 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 512 return -EINVAL; 513 } 514 515 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, 516 unsigned int keylen) 517 { 518 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 519 struct device *jrdev = ctx->jrdev; 520 521 memcpy(ctx->key, key, keylen); 522 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 523 ctx->adata.keylen = keylen; 524 525 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", 526 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); 527 528 return axcbc_set_sh_desc(ahash); 529 } 530 531 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, 532 unsigned int keylen) 533 { 534 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 535 536 /* key is immediate data for all cmac shared descriptors */ 537 ctx->adata.key_virt = key; 538 ctx->adata.keylen = keylen; 539 540 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", 541 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 542 543 return acmac_set_sh_desc(ahash); 544 } 545 546 /* 547 * ahash_edesc - s/w-extended ahash descriptor 548 * @sec4_sg_dma: physical mapped address of h/w link table 549 * @src_nents: number of segments in input scatterlist 550 * @sec4_sg_bytes: length of dma mapped sec4_sg space 551 * @hw_desc: the h/w job descriptor followed by any referenced link tables 552 * @sec4_sg: h/w link table 553 */ 554 struct ahash_edesc { 555 dma_addr_t sec4_sg_dma; 556 int src_nents; 557 int sec4_sg_bytes; 558 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 559 struct sec4_sg_entry sec4_sg[0]; 560 }; 561 562 static inline void ahash_unmap(struct device *dev, 563 struct ahash_edesc *edesc, 564 struct ahash_request *req, int dst_len) 565 { 566 struct caam_hash_state *state = ahash_request_ctx(req); 567 568 if (edesc->src_nents) 569 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 570 571 if (edesc->sec4_sg_bytes) 572 dma_unmap_single(dev, edesc->sec4_sg_dma, 573 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 574 575 if (state->buf_dma) { 576 dma_unmap_single(dev, state->buf_dma, *current_buflen(state), 577 DMA_TO_DEVICE); 578 state->buf_dma = 0; 579 } 580 } 581 582 static inline void ahash_unmap_ctx(struct device *dev, 583 struct ahash_edesc *edesc, 584 struct ahash_request *req, int dst_len, u32 flag) 585 { 586 struct caam_hash_state *state = ahash_request_ctx(req); 587 588 if (state->ctx_dma) { 589 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); 590 state->ctx_dma = 0; 591 } 592 ahash_unmap(dev, edesc, req, dst_len); 593 } 594 595 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 596 void *context) 597 { 598 struct ahash_request *req = context; 599 struct ahash_edesc *edesc; 600 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 601 int digestsize = crypto_ahash_digestsize(ahash); 602 struct caam_hash_state *state = ahash_request_ctx(req); 603 #ifdef DEBUG 604 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 605 606 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 607 #endif 608 609 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 610 if (err) 611 caam_jr_strstatus(jrdev, err); 612 613 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 614 memcpy(req->result, state->caam_ctx, digestsize); 615 kfree(edesc); 616 617 #ifdef DEBUG 618 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 619 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 620 ctx->ctx_len, 1); 621 #endif 622 623 req->base.complete(&req->base, err); 624 } 625 626 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 627 void *context) 628 { 629 struct ahash_request *req = context; 630 struct ahash_edesc *edesc; 631 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 632 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 633 struct caam_hash_state *state = ahash_request_ctx(req); 634 #ifdef DEBUG 635 int digestsize = crypto_ahash_digestsize(ahash); 636 637 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 638 #endif 639 640 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 641 if (err) 642 caam_jr_strstatus(jrdev, err); 643 644 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 645 switch_buf(state); 646 kfree(edesc); 647 648 #ifdef DEBUG 649 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 650 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 651 ctx->ctx_len, 1); 652 if (req->result) 653 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 654 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 655 digestsize, 1); 656 #endif 657 658 req->base.complete(&req->base, err); 659 } 660 661 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 662 void *context) 663 { 664 struct ahash_request *req = context; 665 struct ahash_edesc *edesc; 666 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 667 int digestsize = crypto_ahash_digestsize(ahash); 668 struct caam_hash_state *state = ahash_request_ctx(req); 669 #ifdef DEBUG 670 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 671 672 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 673 #endif 674 675 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 676 if (err) 677 caam_jr_strstatus(jrdev, err); 678 679 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 680 memcpy(req->result, state->caam_ctx, digestsize); 681 kfree(edesc); 682 683 #ifdef DEBUG 684 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 685 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 686 ctx->ctx_len, 1); 687 #endif 688 689 req->base.complete(&req->base, err); 690 } 691 692 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 693 void *context) 694 { 695 struct ahash_request *req = context; 696 struct ahash_edesc *edesc; 697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 698 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 699 struct caam_hash_state *state = ahash_request_ctx(req); 700 #ifdef DEBUG 701 int digestsize = crypto_ahash_digestsize(ahash); 702 703 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 704 #endif 705 706 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 707 if (err) 708 caam_jr_strstatus(jrdev, err); 709 710 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 711 switch_buf(state); 712 kfree(edesc); 713 714 #ifdef DEBUG 715 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 716 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 717 ctx->ctx_len, 1); 718 if (req->result) 719 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 720 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 721 digestsize, 1); 722 #endif 723 724 req->base.complete(&req->base, err); 725 } 726 727 /* 728 * Allocate an enhanced descriptor, which contains the hardware descriptor 729 * and space for hardware scatter table containing sg_num entries. 730 */ 731 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 732 int sg_num, u32 *sh_desc, 733 dma_addr_t sh_desc_dma, 734 gfp_t flags) 735 { 736 struct ahash_edesc *edesc; 737 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 738 739 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 740 if (!edesc) { 741 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 742 return NULL; 743 } 744 745 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 746 HDR_SHARE_DEFER | HDR_REVERSE); 747 748 return edesc; 749 } 750 751 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 752 struct ahash_edesc *edesc, 753 struct ahash_request *req, int nents, 754 unsigned int first_sg, 755 unsigned int first_bytes, size_t to_hash) 756 { 757 dma_addr_t src_dma; 758 u32 options; 759 760 if (nents > 1 || first_sg) { 761 struct sec4_sg_entry *sg = edesc->sec4_sg; 762 unsigned int sgsize = sizeof(*sg) * (first_sg + nents); 763 764 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 765 766 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 767 if (dma_mapping_error(ctx->jrdev, src_dma)) { 768 dev_err(ctx->jrdev, "unable to map S/G table\n"); 769 return -ENOMEM; 770 } 771 772 edesc->sec4_sg_bytes = sgsize; 773 edesc->sec4_sg_dma = src_dma; 774 options = LDST_SGF; 775 } else { 776 src_dma = sg_dma_address(req->src); 777 options = 0; 778 } 779 780 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 781 options); 782 783 return 0; 784 } 785 786 /* submit update job descriptor */ 787 static int ahash_update_ctx(struct ahash_request *req) 788 { 789 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 790 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 791 struct caam_hash_state *state = ahash_request_ctx(req); 792 struct device *jrdev = ctx->jrdev; 793 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 794 GFP_KERNEL : GFP_ATOMIC; 795 u8 *buf = current_buf(state); 796 int *buflen = current_buflen(state); 797 u8 *next_buf = alt_buf(state); 798 int blocksize = crypto_ahash_blocksize(ahash); 799 int *next_buflen = alt_buflen(state), last_buflen; 800 int in_len = *buflen + req->nbytes, to_hash; 801 u32 *desc; 802 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 803 struct ahash_edesc *edesc; 804 int ret = 0; 805 806 last_buflen = *next_buflen; 807 *next_buflen = in_len & (blocksize - 1); 808 to_hash = in_len - *next_buflen; 809 810 /* 811 * For XCBC and CMAC, if to_hash is multiple of block size, 812 * keep last block in internal buffer 813 */ 814 if ((is_xcbc_aes(ctx->adata.algtype) || 815 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 816 (*next_buflen == 0)) { 817 *next_buflen = blocksize; 818 to_hash -= blocksize; 819 } 820 821 if (to_hash) { 822 src_nents = sg_nents_for_len(req->src, 823 req->nbytes - (*next_buflen)); 824 if (src_nents < 0) { 825 dev_err(jrdev, "Invalid number of src SG.\n"); 826 return src_nents; 827 } 828 829 if (src_nents) { 830 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 831 DMA_TO_DEVICE); 832 if (!mapped_nents) { 833 dev_err(jrdev, "unable to DMA map source\n"); 834 return -ENOMEM; 835 } 836 } else { 837 mapped_nents = 0; 838 } 839 840 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 841 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 842 sizeof(struct sec4_sg_entry); 843 844 /* 845 * allocate space for base edesc and hw desc commands, 846 * link tables 847 */ 848 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 849 ctx->sh_desc_update, 850 ctx->sh_desc_update_dma, flags); 851 if (!edesc) { 852 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 853 return -ENOMEM; 854 } 855 856 edesc->src_nents = src_nents; 857 edesc->sec4_sg_bytes = sec4_sg_bytes; 858 859 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 860 edesc->sec4_sg, DMA_BIDIRECTIONAL); 861 if (ret) 862 goto unmap_ctx; 863 864 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 865 if (ret) 866 goto unmap_ctx; 867 868 if (mapped_nents) { 869 sg_to_sec4_sg_last(req->src, mapped_nents, 870 edesc->sec4_sg + sec4_sg_src_index, 871 0); 872 if (*next_buflen) 873 scatterwalk_map_and_copy(next_buf, req->src, 874 to_hash - *buflen, 875 *next_buflen, 0); 876 } else { 877 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 878 1); 879 } 880 881 desc = edesc->hw_desc; 882 883 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 884 sec4_sg_bytes, 885 DMA_TO_DEVICE); 886 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 887 dev_err(jrdev, "unable to map S/G table\n"); 888 ret = -ENOMEM; 889 goto unmap_ctx; 890 } 891 892 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 893 to_hash, LDST_SGF); 894 895 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 896 897 #ifdef DEBUG 898 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 899 DUMP_PREFIX_ADDRESS, 16, 4, desc, 900 desc_bytes(desc), 1); 901 #endif 902 903 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 904 if (ret) 905 goto unmap_ctx; 906 907 ret = -EINPROGRESS; 908 } else if (*next_buflen) { 909 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 910 req->nbytes, 0); 911 *buflen = *next_buflen; 912 *next_buflen = last_buflen; 913 } 914 #ifdef DEBUG 915 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 916 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 917 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 918 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 919 *next_buflen, 1); 920 #endif 921 922 return ret; 923 unmap_ctx: 924 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 925 kfree(edesc); 926 return ret; 927 } 928 929 static int ahash_final_ctx(struct ahash_request *req) 930 { 931 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 932 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 933 struct caam_hash_state *state = ahash_request_ctx(req); 934 struct device *jrdev = ctx->jrdev; 935 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 936 GFP_KERNEL : GFP_ATOMIC; 937 int buflen = *current_buflen(state); 938 u32 *desc; 939 int sec4_sg_bytes, sec4_sg_src_index; 940 int digestsize = crypto_ahash_digestsize(ahash); 941 struct ahash_edesc *edesc; 942 int ret; 943 944 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 945 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry); 946 947 /* allocate space for base edesc and hw desc commands, link tables */ 948 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index, 949 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 950 flags); 951 if (!edesc) 952 return -ENOMEM; 953 954 desc = edesc->hw_desc; 955 956 edesc->sec4_sg_bytes = sec4_sg_bytes; 957 958 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 959 edesc->sec4_sg, DMA_BIDIRECTIONAL); 960 if (ret) 961 goto unmap_ctx; 962 963 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 964 if (ret) 965 goto unmap_ctx; 966 967 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); 968 969 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 970 sec4_sg_bytes, DMA_TO_DEVICE); 971 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 972 dev_err(jrdev, "unable to map S/G table\n"); 973 ret = -ENOMEM; 974 goto unmap_ctx; 975 } 976 977 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 978 LDST_SGF); 979 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 980 981 #ifdef DEBUG 982 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 983 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 984 #endif 985 986 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 987 if (ret) 988 goto unmap_ctx; 989 990 return -EINPROGRESS; 991 unmap_ctx: 992 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 993 kfree(edesc); 994 return ret; 995 } 996 997 static int ahash_finup_ctx(struct ahash_request *req) 998 { 999 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1000 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1001 struct caam_hash_state *state = ahash_request_ctx(req); 1002 struct device *jrdev = ctx->jrdev; 1003 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1004 GFP_KERNEL : GFP_ATOMIC; 1005 int buflen = *current_buflen(state); 1006 u32 *desc; 1007 int sec4_sg_src_index; 1008 int src_nents, mapped_nents; 1009 int digestsize = crypto_ahash_digestsize(ahash); 1010 struct ahash_edesc *edesc; 1011 int ret; 1012 1013 src_nents = sg_nents_for_len(req->src, req->nbytes); 1014 if (src_nents < 0) { 1015 dev_err(jrdev, "Invalid number of src SG.\n"); 1016 return src_nents; 1017 } 1018 1019 if (src_nents) { 1020 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1021 DMA_TO_DEVICE); 1022 if (!mapped_nents) { 1023 dev_err(jrdev, "unable to DMA map source\n"); 1024 return -ENOMEM; 1025 } 1026 } else { 1027 mapped_nents = 0; 1028 } 1029 1030 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1031 1032 /* allocate space for base edesc and hw desc commands, link tables */ 1033 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1034 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 1035 flags); 1036 if (!edesc) { 1037 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1038 return -ENOMEM; 1039 } 1040 1041 desc = edesc->hw_desc; 1042 1043 edesc->src_nents = src_nents; 1044 1045 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 1046 edesc->sec4_sg, DMA_BIDIRECTIONAL); 1047 if (ret) 1048 goto unmap_ctx; 1049 1050 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 1051 if (ret) 1052 goto unmap_ctx; 1053 1054 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1055 sec4_sg_src_index, ctx->ctx_len + buflen, 1056 req->nbytes); 1057 if (ret) 1058 goto unmap_ctx; 1059 1060 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 1061 1062 #ifdef DEBUG 1063 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1064 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1065 #endif 1066 1067 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1068 if (ret) 1069 goto unmap_ctx; 1070 1071 return -EINPROGRESS; 1072 unmap_ctx: 1073 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 1074 kfree(edesc); 1075 return ret; 1076 } 1077 1078 static int ahash_digest(struct ahash_request *req) 1079 { 1080 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1081 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1082 struct caam_hash_state *state = ahash_request_ctx(req); 1083 struct device *jrdev = ctx->jrdev; 1084 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1085 GFP_KERNEL : GFP_ATOMIC; 1086 u32 *desc; 1087 int digestsize = crypto_ahash_digestsize(ahash); 1088 int src_nents, mapped_nents; 1089 struct ahash_edesc *edesc; 1090 int ret; 1091 1092 state->buf_dma = 0; 1093 1094 src_nents = sg_nents_for_len(req->src, req->nbytes); 1095 if (src_nents < 0) { 1096 dev_err(jrdev, "Invalid number of src SG.\n"); 1097 return src_nents; 1098 } 1099 1100 if (src_nents) { 1101 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1102 DMA_TO_DEVICE); 1103 if (!mapped_nents) { 1104 dev_err(jrdev, "unable to map source for DMA\n"); 1105 return -ENOMEM; 1106 } 1107 } else { 1108 mapped_nents = 0; 1109 } 1110 1111 /* allocate space for base edesc and hw desc commands, link tables */ 1112 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1113 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1114 flags); 1115 if (!edesc) { 1116 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1117 return -ENOMEM; 1118 } 1119 1120 edesc->src_nents = src_nents; 1121 1122 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1123 req->nbytes); 1124 if (ret) { 1125 ahash_unmap(jrdev, edesc, req, digestsize); 1126 kfree(edesc); 1127 return ret; 1128 } 1129 1130 desc = edesc->hw_desc; 1131 1132 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1133 if (ret) { 1134 ahash_unmap(jrdev, edesc, req, digestsize); 1135 kfree(edesc); 1136 return -ENOMEM; 1137 } 1138 1139 #ifdef DEBUG 1140 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1141 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1142 #endif 1143 1144 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1145 if (!ret) { 1146 ret = -EINPROGRESS; 1147 } else { 1148 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1149 kfree(edesc); 1150 } 1151 1152 return ret; 1153 } 1154 1155 /* submit ahash final if it the first job descriptor */ 1156 static int ahash_final_no_ctx(struct ahash_request *req) 1157 { 1158 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1159 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1160 struct caam_hash_state *state = ahash_request_ctx(req); 1161 struct device *jrdev = ctx->jrdev; 1162 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1163 GFP_KERNEL : GFP_ATOMIC; 1164 u8 *buf = current_buf(state); 1165 int buflen = *current_buflen(state); 1166 u32 *desc; 1167 int digestsize = crypto_ahash_digestsize(ahash); 1168 struct ahash_edesc *edesc; 1169 int ret; 1170 1171 /* allocate space for base edesc and hw desc commands, link tables */ 1172 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1173 ctx->sh_desc_digest_dma, flags); 1174 if (!edesc) 1175 return -ENOMEM; 1176 1177 desc = edesc->hw_desc; 1178 1179 if (buflen) { 1180 state->buf_dma = dma_map_single(jrdev, buf, buflen, 1181 DMA_TO_DEVICE); 1182 if (dma_mapping_error(jrdev, state->buf_dma)) { 1183 dev_err(jrdev, "unable to map src\n"); 1184 goto unmap; 1185 } 1186 1187 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1188 } 1189 1190 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1191 if (ret) 1192 goto unmap; 1193 1194 #ifdef DEBUG 1195 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1196 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1197 #endif 1198 1199 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1200 if (!ret) { 1201 ret = -EINPROGRESS; 1202 } else { 1203 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1204 kfree(edesc); 1205 } 1206 1207 return ret; 1208 unmap: 1209 ahash_unmap(jrdev, edesc, req, digestsize); 1210 kfree(edesc); 1211 return -ENOMEM; 1212 1213 } 1214 1215 /* submit ahash update if it the first job descriptor after update */ 1216 static int ahash_update_no_ctx(struct ahash_request *req) 1217 { 1218 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1219 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1220 struct caam_hash_state *state = ahash_request_ctx(req); 1221 struct device *jrdev = ctx->jrdev; 1222 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1223 GFP_KERNEL : GFP_ATOMIC; 1224 u8 *buf = current_buf(state); 1225 int *buflen = current_buflen(state); 1226 int blocksize = crypto_ahash_blocksize(ahash); 1227 u8 *next_buf = alt_buf(state); 1228 int *next_buflen = alt_buflen(state); 1229 int in_len = *buflen + req->nbytes, to_hash; 1230 int sec4_sg_bytes, src_nents, mapped_nents; 1231 struct ahash_edesc *edesc; 1232 u32 *desc; 1233 int ret = 0; 1234 1235 *next_buflen = in_len & (blocksize - 1); 1236 to_hash = in_len - *next_buflen; 1237 1238 /* 1239 * For XCBC and CMAC, if to_hash is multiple of block size, 1240 * keep last block in internal buffer 1241 */ 1242 if ((is_xcbc_aes(ctx->adata.algtype) || 1243 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1244 (*next_buflen == 0)) { 1245 *next_buflen = blocksize; 1246 to_hash -= blocksize; 1247 } 1248 1249 if (to_hash) { 1250 src_nents = sg_nents_for_len(req->src, 1251 req->nbytes - *next_buflen); 1252 if (src_nents < 0) { 1253 dev_err(jrdev, "Invalid number of src SG.\n"); 1254 return src_nents; 1255 } 1256 1257 if (src_nents) { 1258 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1259 DMA_TO_DEVICE); 1260 if (!mapped_nents) { 1261 dev_err(jrdev, "unable to DMA map source\n"); 1262 return -ENOMEM; 1263 } 1264 } else { 1265 mapped_nents = 0; 1266 } 1267 1268 sec4_sg_bytes = (1 + mapped_nents) * 1269 sizeof(struct sec4_sg_entry); 1270 1271 /* 1272 * allocate space for base edesc and hw desc commands, 1273 * link tables 1274 */ 1275 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents, 1276 ctx->sh_desc_update_first, 1277 ctx->sh_desc_update_first_dma, 1278 flags); 1279 if (!edesc) { 1280 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1281 return -ENOMEM; 1282 } 1283 1284 edesc->src_nents = src_nents; 1285 edesc->sec4_sg_bytes = sec4_sg_bytes; 1286 1287 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1288 if (ret) 1289 goto unmap_ctx; 1290 1291 sg_to_sec4_sg_last(req->src, mapped_nents, 1292 edesc->sec4_sg + 1, 0); 1293 1294 if (*next_buflen) { 1295 scatterwalk_map_and_copy(next_buf, req->src, 1296 to_hash - *buflen, 1297 *next_buflen, 0); 1298 } 1299 1300 desc = edesc->hw_desc; 1301 1302 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1303 sec4_sg_bytes, 1304 DMA_TO_DEVICE); 1305 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1306 dev_err(jrdev, "unable to map S/G table\n"); 1307 ret = -ENOMEM; 1308 goto unmap_ctx; 1309 } 1310 1311 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1312 1313 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1314 if (ret) 1315 goto unmap_ctx; 1316 1317 #ifdef DEBUG 1318 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1319 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1320 desc_bytes(desc), 1); 1321 #endif 1322 1323 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1324 if (ret) 1325 goto unmap_ctx; 1326 1327 ret = -EINPROGRESS; 1328 state->update = ahash_update_ctx; 1329 state->finup = ahash_finup_ctx; 1330 state->final = ahash_final_ctx; 1331 } else if (*next_buflen) { 1332 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1333 req->nbytes, 0); 1334 *buflen = *next_buflen; 1335 *next_buflen = 0; 1336 } 1337 #ifdef DEBUG 1338 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1339 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1340 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1341 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1342 *next_buflen, 1); 1343 #endif 1344 1345 return ret; 1346 unmap_ctx: 1347 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1348 kfree(edesc); 1349 return ret; 1350 } 1351 1352 /* submit ahash finup if it the first job descriptor after update */ 1353 static int ahash_finup_no_ctx(struct ahash_request *req) 1354 { 1355 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1356 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1357 struct caam_hash_state *state = ahash_request_ctx(req); 1358 struct device *jrdev = ctx->jrdev; 1359 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1360 GFP_KERNEL : GFP_ATOMIC; 1361 int buflen = *current_buflen(state); 1362 u32 *desc; 1363 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1364 int digestsize = crypto_ahash_digestsize(ahash); 1365 struct ahash_edesc *edesc; 1366 int ret; 1367 1368 src_nents = sg_nents_for_len(req->src, req->nbytes); 1369 if (src_nents < 0) { 1370 dev_err(jrdev, "Invalid number of src SG.\n"); 1371 return src_nents; 1372 } 1373 1374 if (src_nents) { 1375 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1376 DMA_TO_DEVICE); 1377 if (!mapped_nents) { 1378 dev_err(jrdev, "unable to DMA map source\n"); 1379 return -ENOMEM; 1380 } 1381 } else { 1382 mapped_nents = 0; 1383 } 1384 1385 sec4_sg_src_index = 2; 1386 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1387 sizeof(struct sec4_sg_entry); 1388 1389 /* allocate space for base edesc and hw desc commands, link tables */ 1390 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1391 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1392 flags); 1393 if (!edesc) { 1394 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1395 return -ENOMEM; 1396 } 1397 1398 desc = edesc->hw_desc; 1399 1400 edesc->src_nents = src_nents; 1401 edesc->sec4_sg_bytes = sec4_sg_bytes; 1402 1403 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1404 if (ret) 1405 goto unmap; 1406 1407 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1408 req->nbytes); 1409 if (ret) { 1410 dev_err(jrdev, "unable to map S/G table\n"); 1411 goto unmap; 1412 } 1413 1414 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1415 if (ret) 1416 goto unmap; 1417 1418 #ifdef DEBUG 1419 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1420 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1421 #endif 1422 1423 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1424 if (!ret) { 1425 ret = -EINPROGRESS; 1426 } else { 1427 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1428 kfree(edesc); 1429 } 1430 1431 return ret; 1432 unmap: 1433 ahash_unmap(jrdev, edesc, req, digestsize); 1434 kfree(edesc); 1435 return -ENOMEM; 1436 1437 } 1438 1439 /* submit first update job descriptor after init */ 1440 static int ahash_update_first(struct ahash_request *req) 1441 { 1442 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1443 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1444 struct caam_hash_state *state = ahash_request_ctx(req); 1445 struct device *jrdev = ctx->jrdev; 1446 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1447 GFP_KERNEL : GFP_ATOMIC; 1448 u8 *next_buf = alt_buf(state); 1449 int *next_buflen = alt_buflen(state); 1450 int to_hash; 1451 int blocksize = crypto_ahash_blocksize(ahash); 1452 u32 *desc; 1453 int src_nents, mapped_nents; 1454 struct ahash_edesc *edesc; 1455 int ret = 0; 1456 1457 *next_buflen = req->nbytes & (blocksize - 1); 1458 to_hash = req->nbytes - *next_buflen; 1459 1460 /* 1461 * For XCBC and CMAC, if to_hash is multiple of block size, 1462 * keep last block in internal buffer 1463 */ 1464 if ((is_xcbc_aes(ctx->adata.algtype) || 1465 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1466 (*next_buflen == 0)) { 1467 *next_buflen = blocksize; 1468 to_hash -= blocksize; 1469 } 1470 1471 if (to_hash) { 1472 src_nents = sg_nents_for_len(req->src, 1473 req->nbytes - *next_buflen); 1474 if (src_nents < 0) { 1475 dev_err(jrdev, "Invalid number of src SG.\n"); 1476 return src_nents; 1477 } 1478 1479 if (src_nents) { 1480 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1481 DMA_TO_DEVICE); 1482 if (!mapped_nents) { 1483 dev_err(jrdev, "unable to map source for DMA\n"); 1484 return -ENOMEM; 1485 } 1486 } else { 1487 mapped_nents = 0; 1488 } 1489 1490 /* 1491 * allocate space for base edesc and hw desc commands, 1492 * link tables 1493 */ 1494 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1495 mapped_nents : 0, 1496 ctx->sh_desc_update_first, 1497 ctx->sh_desc_update_first_dma, 1498 flags); 1499 if (!edesc) { 1500 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1501 return -ENOMEM; 1502 } 1503 1504 edesc->src_nents = src_nents; 1505 1506 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1507 to_hash); 1508 if (ret) 1509 goto unmap_ctx; 1510 1511 if (*next_buflen) 1512 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1513 *next_buflen, 0); 1514 1515 desc = edesc->hw_desc; 1516 1517 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1518 if (ret) 1519 goto unmap_ctx; 1520 1521 #ifdef DEBUG 1522 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1523 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1524 desc_bytes(desc), 1); 1525 #endif 1526 1527 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1528 if (ret) 1529 goto unmap_ctx; 1530 1531 ret = -EINPROGRESS; 1532 state->update = ahash_update_ctx; 1533 state->finup = ahash_finup_ctx; 1534 state->final = ahash_final_ctx; 1535 } else if (*next_buflen) { 1536 state->update = ahash_update_no_ctx; 1537 state->finup = ahash_finup_no_ctx; 1538 state->final = ahash_final_no_ctx; 1539 scatterwalk_map_and_copy(next_buf, req->src, 0, 1540 req->nbytes, 0); 1541 switch_buf(state); 1542 } 1543 #ifdef DEBUG 1544 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1545 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1546 *next_buflen, 1); 1547 #endif 1548 1549 return ret; 1550 unmap_ctx: 1551 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1552 kfree(edesc); 1553 return ret; 1554 } 1555 1556 static int ahash_finup_first(struct ahash_request *req) 1557 { 1558 return ahash_digest(req); 1559 } 1560 1561 static int ahash_init(struct ahash_request *req) 1562 { 1563 struct caam_hash_state *state = ahash_request_ctx(req); 1564 1565 state->update = ahash_update_first; 1566 state->finup = ahash_finup_first; 1567 state->final = ahash_final_no_ctx; 1568 1569 state->ctx_dma = 0; 1570 state->ctx_dma_len = 0; 1571 state->current_buf = 0; 1572 state->buf_dma = 0; 1573 state->buflen_0 = 0; 1574 state->buflen_1 = 0; 1575 1576 return 0; 1577 } 1578 1579 static int ahash_update(struct ahash_request *req) 1580 { 1581 struct caam_hash_state *state = ahash_request_ctx(req); 1582 1583 return state->update(req); 1584 } 1585 1586 static int ahash_finup(struct ahash_request *req) 1587 { 1588 struct caam_hash_state *state = ahash_request_ctx(req); 1589 1590 return state->finup(req); 1591 } 1592 1593 static int ahash_final(struct ahash_request *req) 1594 { 1595 struct caam_hash_state *state = ahash_request_ctx(req); 1596 1597 return state->final(req); 1598 } 1599 1600 static int ahash_export(struct ahash_request *req, void *out) 1601 { 1602 struct caam_hash_state *state = ahash_request_ctx(req); 1603 struct caam_export_state *export = out; 1604 int len; 1605 u8 *buf; 1606 1607 if (state->current_buf) { 1608 buf = state->buf_1; 1609 len = state->buflen_1; 1610 } else { 1611 buf = state->buf_0; 1612 len = state->buflen_0; 1613 } 1614 1615 memcpy(export->buf, buf, len); 1616 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1617 export->buflen = len; 1618 export->update = state->update; 1619 export->final = state->final; 1620 export->finup = state->finup; 1621 1622 return 0; 1623 } 1624 1625 static int ahash_import(struct ahash_request *req, const void *in) 1626 { 1627 struct caam_hash_state *state = ahash_request_ctx(req); 1628 const struct caam_export_state *export = in; 1629 1630 memset(state, 0, sizeof(*state)); 1631 memcpy(state->buf_0, export->buf, export->buflen); 1632 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1633 state->buflen_0 = export->buflen; 1634 state->update = export->update; 1635 state->final = export->final; 1636 state->finup = export->finup; 1637 1638 return 0; 1639 } 1640 1641 struct caam_hash_template { 1642 char name[CRYPTO_MAX_ALG_NAME]; 1643 char driver_name[CRYPTO_MAX_ALG_NAME]; 1644 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1645 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1646 unsigned int blocksize; 1647 struct ahash_alg template_ahash; 1648 u32 alg_type; 1649 }; 1650 1651 /* ahash descriptors */ 1652 static struct caam_hash_template driver_hash[] = { 1653 { 1654 .name = "sha1", 1655 .driver_name = "sha1-caam", 1656 .hmac_name = "hmac(sha1)", 1657 .hmac_driver_name = "hmac-sha1-caam", 1658 .blocksize = SHA1_BLOCK_SIZE, 1659 .template_ahash = { 1660 .init = ahash_init, 1661 .update = ahash_update, 1662 .final = ahash_final, 1663 .finup = ahash_finup, 1664 .digest = ahash_digest, 1665 .export = ahash_export, 1666 .import = ahash_import, 1667 .setkey = ahash_setkey, 1668 .halg = { 1669 .digestsize = SHA1_DIGEST_SIZE, 1670 .statesize = sizeof(struct caam_export_state), 1671 }, 1672 }, 1673 .alg_type = OP_ALG_ALGSEL_SHA1, 1674 }, { 1675 .name = "sha224", 1676 .driver_name = "sha224-caam", 1677 .hmac_name = "hmac(sha224)", 1678 .hmac_driver_name = "hmac-sha224-caam", 1679 .blocksize = SHA224_BLOCK_SIZE, 1680 .template_ahash = { 1681 .init = ahash_init, 1682 .update = ahash_update, 1683 .final = ahash_final, 1684 .finup = ahash_finup, 1685 .digest = ahash_digest, 1686 .export = ahash_export, 1687 .import = ahash_import, 1688 .setkey = ahash_setkey, 1689 .halg = { 1690 .digestsize = SHA224_DIGEST_SIZE, 1691 .statesize = sizeof(struct caam_export_state), 1692 }, 1693 }, 1694 .alg_type = OP_ALG_ALGSEL_SHA224, 1695 }, { 1696 .name = "sha256", 1697 .driver_name = "sha256-caam", 1698 .hmac_name = "hmac(sha256)", 1699 .hmac_driver_name = "hmac-sha256-caam", 1700 .blocksize = SHA256_BLOCK_SIZE, 1701 .template_ahash = { 1702 .init = ahash_init, 1703 .update = ahash_update, 1704 .final = ahash_final, 1705 .finup = ahash_finup, 1706 .digest = ahash_digest, 1707 .export = ahash_export, 1708 .import = ahash_import, 1709 .setkey = ahash_setkey, 1710 .halg = { 1711 .digestsize = SHA256_DIGEST_SIZE, 1712 .statesize = sizeof(struct caam_export_state), 1713 }, 1714 }, 1715 .alg_type = OP_ALG_ALGSEL_SHA256, 1716 }, { 1717 .name = "sha384", 1718 .driver_name = "sha384-caam", 1719 .hmac_name = "hmac(sha384)", 1720 .hmac_driver_name = "hmac-sha384-caam", 1721 .blocksize = SHA384_BLOCK_SIZE, 1722 .template_ahash = { 1723 .init = ahash_init, 1724 .update = ahash_update, 1725 .final = ahash_final, 1726 .finup = ahash_finup, 1727 .digest = ahash_digest, 1728 .export = ahash_export, 1729 .import = ahash_import, 1730 .setkey = ahash_setkey, 1731 .halg = { 1732 .digestsize = SHA384_DIGEST_SIZE, 1733 .statesize = sizeof(struct caam_export_state), 1734 }, 1735 }, 1736 .alg_type = OP_ALG_ALGSEL_SHA384, 1737 }, { 1738 .name = "sha512", 1739 .driver_name = "sha512-caam", 1740 .hmac_name = "hmac(sha512)", 1741 .hmac_driver_name = "hmac-sha512-caam", 1742 .blocksize = SHA512_BLOCK_SIZE, 1743 .template_ahash = { 1744 .init = ahash_init, 1745 .update = ahash_update, 1746 .final = ahash_final, 1747 .finup = ahash_finup, 1748 .digest = ahash_digest, 1749 .export = ahash_export, 1750 .import = ahash_import, 1751 .setkey = ahash_setkey, 1752 .halg = { 1753 .digestsize = SHA512_DIGEST_SIZE, 1754 .statesize = sizeof(struct caam_export_state), 1755 }, 1756 }, 1757 .alg_type = OP_ALG_ALGSEL_SHA512, 1758 }, { 1759 .name = "md5", 1760 .driver_name = "md5-caam", 1761 .hmac_name = "hmac(md5)", 1762 .hmac_driver_name = "hmac-md5-caam", 1763 .blocksize = MD5_BLOCK_WORDS * 4, 1764 .template_ahash = { 1765 .init = ahash_init, 1766 .update = ahash_update, 1767 .final = ahash_final, 1768 .finup = ahash_finup, 1769 .digest = ahash_digest, 1770 .export = ahash_export, 1771 .import = ahash_import, 1772 .setkey = ahash_setkey, 1773 .halg = { 1774 .digestsize = MD5_DIGEST_SIZE, 1775 .statesize = sizeof(struct caam_export_state), 1776 }, 1777 }, 1778 .alg_type = OP_ALG_ALGSEL_MD5, 1779 }, { 1780 .hmac_name = "xcbc(aes)", 1781 .hmac_driver_name = "xcbc-aes-caam", 1782 .blocksize = AES_BLOCK_SIZE, 1783 .template_ahash = { 1784 .init = ahash_init, 1785 .update = ahash_update, 1786 .final = ahash_final, 1787 .finup = ahash_finup, 1788 .digest = ahash_digest, 1789 .export = ahash_export, 1790 .import = ahash_import, 1791 .setkey = axcbc_setkey, 1792 .halg = { 1793 .digestsize = AES_BLOCK_SIZE, 1794 .statesize = sizeof(struct caam_export_state), 1795 }, 1796 }, 1797 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, 1798 }, { 1799 .hmac_name = "cmac(aes)", 1800 .hmac_driver_name = "cmac-aes-caam", 1801 .blocksize = AES_BLOCK_SIZE, 1802 .template_ahash = { 1803 .init = ahash_init, 1804 .update = ahash_update, 1805 .final = ahash_final, 1806 .finup = ahash_finup, 1807 .digest = ahash_digest, 1808 .export = ahash_export, 1809 .import = ahash_import, 1810 .setkey = acmac_setkey, 1811 .halg = { 1812 .digestsize = AES_BLOCK_SIZE, 1813 .statesize = sizeof(struct caam_export_state), 1814 }, 1815 }, 1816 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, 1817 }, 1818 }; 1819 1820 struct caam_hash_alg { 1821 struct list_head entry; 1822 int alg_type; 1823 struct ahash_alg ahash_alg; 1824 }; 1825 1826 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1827 { 1828 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1829 struct crypto_alg *base = tfm->__crt_alg; 1830 struct hash_alg_common *halg = 1831 container_of(base, struct hash_alg_common, base); 1832 struct ahash_alg *alg = 1833 container_of(halg, struct ahash_alg, halg); 1834 struct caam_hash_alg *caam_hash = 1835 container_of(alg, struct caam_hash_alg, ahash_alg); 1836 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1837 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1838 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1839 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1840 HASH_MSG_LEN + 32, 1841 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1842 HASH_MSG_LEN + 64, 1843 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1844 dma_addr_t dma_addr; 1845 struct caam_drv_private *priv; 1846 1847 /* 1848 * Get a Job ring from Job Ring driver to ensure in-order 1849 * crypto request processing per tfm 1850 */ 1851 ctx->jrdev = caam_jr_alloc(); 1852 if (IS_ERR(ctx->jrdev)) { 1853 pr_err("Job Ring Device allocation for transform failed\n"); 1854 return PTR_ERR(ctx->jrdev); 1855 } 1856 1857 priv = dev_get_drvdata(ctx->jrdev->parent); 1858 1859 if (is_xcbc_aes(caam_hash->alg_type)) { 1860 ctx->dir = DMA_TO_DEVICE; 1861 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1862 ctx->ctx_len = 48; 1863 1864 ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, 1865 ARRAY_SIZE(ctx->key), 1866 DMA_BIDIRECTIONAL, 1867 DMA_ATTR_SKIP_CPU_SYNC); 1868 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 1869 dev_err(ctx->jrdev, "unable to map key\n"); 1870 caam_jr_free(ctx->jrdev); 1871 return -ENOMEM; 1872 } 1873 } else if (is_cmac_aes(caam_hash->alg_type)) { 1874 ctx->dir = DMA_TO_DEVICE; 1875 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1876 ctx->ctx_len = 32; 1877 } else { 1878 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1879 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1880 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1881 OP_ALG_ALGSEL_SUBMASK) >> 1882 OP_ALG_ALGSEL_SHIFT]; 1883 } 1884 1885 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1886 offsetof(struct caam_hash_ctx, key), 1887 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1888 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1889 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1890 1891 if (is_xcbc_aes(caam_hash->alg_type)) 1892 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1893 ARRAY_SIZE(ctx->key), 1894 DMA_BIDIRECTIONAL, 1895 DMA_ATTR_SKIP_CPU_SYNC); 1896 1897 caam_jr_free(ctx->jrdev); 1898 return -ENOMEM; 1899 } 1900 1901 ctx->sh_desc_update_dma = dma_addr; 1902 ctx->sh_desc_update_first_dma = dma_addr + 1903 offsetof(struct caam_hash_ctx, 1904 sh_desc_update_first); 1905 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1906 sh_desc_fin); 1907 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1908 sh_desc_digest); 1909 1910 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1911 sizeof(struct caam_hash_state)); 1912 1913 /* 1914 * For keyed hash algorithms shared descriptors 1915 * will be created later in setkey() callback 1916 */ 1917 return alg->setkey ? 0 : ahash_set_sh_desc(ahash); 1918 } 1919 1920 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1921 { 1922 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1923 1924 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1925 offsetof(struct caam_hash_ctx, key), 1926 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1927 if (is_xcbc_aes(ctx->adata.algtype)) 1928 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1929 ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, 1930 DMA_ATTR_SKIP_CPU_SYNC); 1931 caam_jr_free(ctx->jrdev); 1932 } 1933 1934 static void __exit caam_algapi_hash_exit(void) 1935 { 1936 struct caam_hash_alg *t_alg, *n; 1937 1938 if (!hash_list.next) 1939 return; 1940 1941 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1942 crypto_unregister_ahash(&t_alg->ahash_alg); 1943 list_del(&t_alg->entry); 1944 kfree(t_alg); 1945 } 1946 } 1947 1948 static struct caam_hash_alg * 1949 caam_hash_alloc(struct caam_hash_template *template, 1950 bool keyed) 1951 { 1952 struct caam_hash_alg *t_alg; 1953 struct ahash_alg *halg; 1954 struct crypto_alg *alg; 1955 1956 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1957 if (!t_alg) { 1958 pr_err("failed to allocate t_alg\n"); 1959 return ERR_PTR(-ENOMEM); 1960 } 1961 1962 t_alg->ahash_alg = template->template_ahash; 1963 halg = &t_alg->ahash_alg; 1964 alg = &halg->halg.base; 1965 1966 if (keyed) { 1967 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1968 template->hmac_name); 1969 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1970 template->hmac_driver_name); 1971 } else { 1972 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1973 template->name); 1974 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1975 template->driver_name); 1976 t_alg->ahash_alg.setkey = NULL; 1977 } 1978 alg->cra_module = THIS_MODULE; 1979 alg->cra_init = caam_hash_cra_init; 1980 alg->cra_exit = caam_hash_cra_exit; 1981 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1982 alg->cra_priority = CAAM_CRA_PRIORITY; 1983 alg->cra_blocksize = template->blocksize; 1984 alg->cra_alignmask = 0; 1985 alg->cra_flags = CRYPTO_ALG_ASYNC; 1986 1987 t_alg->alg_type = template->alg_type; 1988 1989 return t_alg; 1990 } 1991 1992 static int __init caam_algapi_hash_init(void) 1993 { 1994 struct device_node *dev_node; 1995 struct platform_device *pdev; 1996 int i = 0, err = 0; 1997 struct caam_drv_private *priv; 1998 unsigned int md_limit = SHA512_DIGEST_SIZE; 1999 u32 md_inst, md_vid; 2000 2001 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2002 if (!dev_node) { 2003 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 2004 if (!dev_node) 2005 return -ENODEV; 2006 } 2007 2008 pdev = of_find_device_by_node(dev_node); 2009 if (!pdev) { 2010 of_node_put(dev_node); 2011 return -ENODEV; 2012 } 2013 2014 priv = dev_get_drvdata(&pdev->dev); 2015 of_node_put(dev_node); 2016 2017 /* 2018 * If priv is NULL, it's probably because the caam driver wasn't 2019 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 2020 */ 2021 if (!priv) { 2022 err = -ENODEV; 2023 goto out_put_dev; 2024 } 2025 2026 /* 2027 * Register crypto algorithms the device supports. First, identify 2028 * presence and attributes of MD block. 2029 */ 2030 if (priv->era < 10) { 2031 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & 2032 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2033 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 2034 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2035 } else { 2036 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2037 2038 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2039 md_inst = mdha & CHA_VER_NUM_MASK; 2040 } 2041 2042 /* 2043 * Skip registration of any hashing algorithms if MD block 2044 * is not present. 2045 */ 2046 if (!md_inst) { 2047 err = -ENODEV; 2048 goto out_put_dev; 2049 } 2050 2051 /* Limit digest size based on LP256 */ 2052 if (md_vid == CHA_VER_VID_MD_LP256) 2053 md_limit = SHA256_DIGEST_SIZE; 2054 2055 INIT_LIST_HEAD(&hash_list); 2056 2057 /* register crypto algorithms the device supports */ 2058 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 2059 struct caam_hash_alg *t_alg; 2060 struct caam_hash_template *alg = driver_hash + i; 2061 2062 /* If MD size is not supported by device, skip registration */ 2063 if (is_mdha(alg->alg_type) && 2064 alg->template_ahash.halg.digestsize > md_limit) 2065 continue; 2066 2067 /* register hmac version */ 2068 t_alg = caam_hash_alloc(alg, true); 2069 if (IS_ERR(t_alg)) { 2070 err = PTR_ERR(t_alg); 2071 pr_warn("%s alg allocation failed\n", 2072 alg->hmac_driver_name); 2073 continue; 2074 } 2075 2076 err = crypto_register_ahash(&t_alg->ahash_alg); 2077 if (err) { 2078 pr_warn("%s alg registration failed: %d\n", 2079 t_alg->ahash_alg.halg.base.cra_driver_name, 2080 err); 2081 kfree(t_alg); 2082 } else 2083 list_add_tail(&t_alg->entry, &hash_list); 2084 2085 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) 2086 continue; 2087 2088 /* register unkeyed version */ 2089 t_alg = caam_hash_alloc(alg, false); 2090 if (IS_ERR(t_alg)) { 2091 err = PTR_ERR(t_alg); 2092 pr_warn("%s alg allocation failed\n", alg->driver_name); 2093 continue; 2094 } 2095 2096 err = crypto_register_ahash(&t_alg->ahash_alg); 2097 if (err) { 2098 pr_warn("%s alg registration failed: %d\n", 2099 t_alg->ahash_alg.halg.base.cra_driver_name, 2100 err); 2101 kfree(t_alg); 2102 } else 2103 list_add_tail(&t_alg->entry, &hash_list); 2104 } 2105 2106 out_put_dev: 2107 put_device(&pdev->dev); 2108 return err; 2109 } 2110 2111 module_init(caam_algapi_hash_init); 2112 module_exit(caam_algapi_hash_exit); 2113 2114 MODULE_LICENSE("GPL"); 2115 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API"); 2116 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 2117