1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * caam - Freescale FSL CAAM support for ahash functions of crypto API 4 * 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * Copyright 2018-2019 NXP 7 * 8 * Based on caamalg.c crypto API driver. 9 * 10 * relationship of digest job descriptor or first job descriptor after init to 11 * shared descriptors: 12 * 13 * --------------- --------------- 14 * | JobDesc #1 |-------------------->| ShareDesc | 15 * | *(packet 1) | | (hashKey) | 16 * --------------- | (operation) | 17 * --------------- 18 * 19 * relationship of subsequent job descriptors to shared descriptors: 20 * 21 * --------------- --------------- 22 * | JobDesc #2 |-------------------->| ShareDesc | 23 * | *(packet 2) | |------------->| (hashKey) | 24 * --------------- | |-------->| (operation) | 25 * . | | | (load ctx2) | 26 * . | | --------------- 27 * --------------- | | 28 * | JobDesc #3 |------| | 29 * | *(packet 3) | | 30 * --------------- | 31 * . | 32 * . | 33 * --------------- | 34 * | JobDesc #4 |------------ 35 * | *(packet 4) | 36 * --------------- 37 * 38 * The SharedDesc never changes for a connection unless rekeyed, but 39 * each packet will likely be in a different place. So all we need 40 * to know to process the packet is where the input is, where the 41 * output goes, and what context we want to process with. Context is 42 * in the SharedDesc, packet references in the JobDesc. 43 * 44 * So, a job desc looks like: 45 * 46 * --------------------- 47 * | Header | 48 * | ShareDesc Pointer | 49 * | SEQ_OUT_PTR | 50 * | (output buffer) | 51 * | (output length) | 52 * | SEQ_IN_PTR | 53 * | (input buffer) | 54 * | (input length) | 55 * --------------------- 56 */ 57 58 #include "compat.h" 59 60 #include "regs.h" 61 #include "intern.h" 62 #include "desc_constr.h" 63 #include "jr.h" 64 #include "error.h" 65 #include "sg_sw_sec4.h" 66 #include "key_gen.h" 67 #include "caamhash_desc.h" 68 69 #define CAAM_CRA_PRIORITY 3000 70 71 /* max hash key is max split key size */ 72 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) 73 74 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE 75 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 76 77 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ 78 CAAM_MAX_HASH_KEY_SIZE) 79 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) 80 81 /* caam context sizes for hashes: running digest + 8 */ 82 #define HASH_MSG_LEN 8 83 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) 84 85 #ifdef DEBUG 86 /* for print_hex_dumps with line references */ 87 #define debug(format, arg...) printk(format, arg) 88 #else 89 #define debug(format, arg...) 90 #endif 91 92 93 static struct list_head hash_list; 94 95 /* ahash per-session context */ 96 struct caam_hash_ctx { 97 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 98 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 99 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 100 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; 101 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; 102 dma_addr_t sh_desc_update_dma ____cacheline_aligned; 103 dma_addr_t sh_desc_update_first_dma; 104 dma_addr_t sh_desc_fin_dma; 105 dma_addr_t sh_desc_digest_dma; 106 dma_addr_t key_dma; 107 enum dma_data_direction dir; 108 struct device *jrdev; 109 int ctx_len; 110 struct alginfo adata; 111 }; 112 113 /* ahash state */ 114 struct caam_hash_state { 115 dma_addr_t buf_dma; 116 dma_addr_t ctx_dma; 117 int ctx_dma_len; 118 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 119 int buflen_0; 120 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; 121 int buflen_1; 122 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; 123 int (*update)(struct ahash_request *req); 124 int (*final)(struct ahash_request *req); 125 int (*finup)(struct ahash_request *req); 126 int current_buf; 127 }; 128 129 struct caam_export_state { 130 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; 131 u8 caam_ctx[MAX_CTX_LEN]; 132 int buflen; 133 int (*update)(struct ahash_request *req); 134 int (*final)(struct ahash_request *req); 135 int (*finup)(struct ahash_request *req); 136 }; 137 138 static inline void switch_buf(struct caam_hash_state *state) 139 { 140 state->current_buf ^= 1; 141 } 142 143 static inline u8 *current_buf(struct caam_hash_state *state) 144 { 145 return state->current_buf ? state->buf_1 : state->buf_0; 146 } 147 148 static inline u8 *alt_buf(struct caam_hash_state *state) 149 { 150 return state->current_buf ? state->buf_0 : state->buf_1; 151 } 152 153 static inline int *current_buflen(struct caam_hash_state *state) 154 { 155 return state->current_buf ? &state->buflen_1 : &state->buflen_0; 156 } 157 158 static inline int *alt_buflen(struct caam_hash_state *state) 159 { 160 return state->current_buf ? &state->buflen_0 : &state->buflen_1; 161 } 162 163 static inline bool is_cmac_aes(u32 algtype) 164 { 165 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == 166 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); 167 } 168 /* Common job descriptor seq in/out ptr routines */ 169 170 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ 171 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, 172 struct caam_hash_state *state, 173 int ctx_len) 174 { 175 state->ctx_dma_len = ctx_len; 176 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, 177 ctx_len, DMA_FROM_DEVICE); 178 if (dma_mapping_error(jrdev, state->ctx_dma)) { 179 dev_err(jrdev, "unable to map ctx\n"); 180 state->ctx_dma = 0; 181 return -ENOMEM; 182 } 183 184 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); 185 186 return 0; 187 } 188 189 /* Map current buffer in state (if length > 0) and put it in link table */ 190 static inline int buf_map_to_sec4_sg(struct device *jrdev, 191 struct sec4_sg_entry *sec4_sg, 192 struct caam_hash_state *state) 193 { 194 int buflen = *current_buflen(state); 195 196 if (!buflen) 197 return 0; 198 199 state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen, 200 DMA_TO_DEVICE); 201 if (dma_mapping_error(jrdev, state->buf_dma)) { 202 dev_err(jrdev, "unable to map buf\n"); 203 state->buf_dma = 0; 204 return -ENOMEM; 205 } 206 207 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); 208 209 return 0; 210 } 211 212 /* Map state->caam_ctx, and add it to link table */ 213 static inline int ctx_map_to_sec4_sg(struct device *jrdev, 214 struct caam_hash_state *state, int ctx_len, 215 struct sec4_sg_entry *sec4_sg, u32 flag) 216 { 217 state->ctx_dma_len = ctx_len; 218 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); 219 if (dma_mapping_error(jrdev, state->ctx_dma)) { 220 dev_err(jrdev, "unable to map ctx\n"); 221 state->ctx_dma = 0; 222 return -ENOMEM; 223 } 224 225 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); 226 227 return 0; 228 } 229 230 static int ahash_set_sh_desc(struct crypto_ahash *ahash) 231 { 232 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 233 int digestsize = crypto_ahash_digestsize(ahash); 234 struct device *jrdev = ctx->jrdev; 235 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 236 u32 *desc; 237 238 ctx->adata.key_virt = ctx->key; 239 240 /* ahash_update shared descriptor */ 241 desc = ctx->sh_desc_update; 242 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, 243 ctx->ctx_len, true, ctrlpriv->era); 244 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 245 desc_bytes(desc), ctx->dir); 246 #ifdef DEBUG 247 print_hex_dump(KERN_ERR, 248 "ahash update shdesc@"__stringify(__LINE__)": ", 249 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 250 #endif 251 252 /* ahash_update_first shared descriptor */ 253 desc = ctx->sh_desc_update_first; 254 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 255 ctx->ctx_len, false, ctrlpriv->era); 256 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 257 desc_bytes(desc), ctx->dir); 258 #ifdef DEBUG 259 print_hex_dump(KERN_ERR, 260 "ahash update first shdesc@"__stringify(__LINE__)": ", 261 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 262 #endif 263 264 /* ahash_final shared descriptor */ 265 desc = ctx->sh_desc_fin; 266 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, 267 ctx->ctx_len, true, ctrlpriv->era); 268 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 269 desc_bytes(desc), ctx->dir); 270 #ifdef DEBUG 271 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 272 DUMP_PREFIX_ADDRESS, 16, 4, desc, 273 desc_bytes(desc), 1); 274 #endif 275 276 /* ahash_digest shared descriptor */ 277 desc = ctx->sh_desc_digest; 278 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, 279 ctx->ctx_len, false, ctrlpriv->era); 280 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 281 desc_bytes(desc), ctx->dir); 282 #ifdef DEBUG 283 print_hex_dump(KERN_ERR, 284 "ahash digest shdesc@"__stringify(__LINE__)": ", 285 DUMP_PREFIX_ADDRESS, 16, 4, desc, 286 desc_bytes(desc), 1); 287 #endif 288 289 return 0; 290 } 291 292 static int axcbc_set_sh_desc(struct crypto_ahash *ahash) 293 { 294 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 295 int digestsize = crypto_ahash_digestsize(ahash); 296 struct device *jrdev = ctx->jrdev; 297 u32 *desc; 298 299 /* key is loaded from memory for UPDATE and FINALIZE states */ 300 ctx->adata.key_dma = ctx->key_dma; 301 302 /* shared descriptor for ahash_update */ 303 desc = ctx->sh_desc_update; 304 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 305 ctx->ctx_len, ctx->ctx_len, 0); 306 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 307 desc_bytes(desc), ctx->dir); 308 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", 309 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 310 1); 311 312 /* shared descriptor for ahash_{final,finup} */ 313 desc = ctx->sh_desc_fin; 314 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 315 digestsize, ctx->ctx_len, 0); 316 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 317 desc_bytes(desc), ctx->dir); 318 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", 319 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 320 1); 321 322 /* key is immediate data for INIT and INITFINAL states */ 323 ctx->adata.key_virt = ctx->key; 324 325 /* shared descriptor for first invocation of ahash_update */ 326 desc = ctx->sh_desc_update_first; 327 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 328 ctx->ctx_len, ctx->key_dma); 329 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 330 desc_bytes(desc), ctx->dir); 331 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", 332 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 333 1); 334 335 /* shared descriptor for ahash_digest */ 336 desc = ctx->sh_desc_digest; 337 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 338 digestsize, ctx->ctx_len, 0); 339 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 340 desc_bytes(desc), ctx->dir); 341 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", 342 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 343 1); 344 return 0; 345 } 346 347 static int acmac_set_sh_desc(struct crypto_ahash *ahash) 348 { 349 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 350 int digestsize = crypto_ahash_digestsize(ahash); 351 struct device *jrdev = ctx->jrdev; 352 u32 *desc; 353 354 /* shared descriptor for ahash_update */ 355 desc = ctx->sh_desc_update; 356 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, 357 ctx->ctx_len, ctx->ctx_len, 0); 358 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 359 desc_bytes(desc), ctx->dir); 360 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", 361 DUMP_PREFIX_ADDRESS, 16, 4, desc, 362 desc_bytes(desc), 1); 363 364 /* shared descriptor for ahash_{final,finup} */ 365 desc = ctx->sh_desc_fin; 366 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, 367 digestsize, ctx->ctx_len, 0); 368 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 369 desc_bytes(desc), ctx->dir); 370 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", 371 DUMP_PREFIX_ADDRESS, 16, 4, desc, 372 desc_bytes(desc), 1); 373 374 /* shared descriptor for first invocation of ahash_update */ 375 desc = ctx->sh_desc_update_first; 376 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, 377 ctx->ctx_len, 0); 378 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 379 desc_bytes(desc), ctx->dir); 380 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", 381 DUMP_PREFIX_ADDRESS, 16, 4, desc, 382 desc_bytes(desc), 1); 383 384 /* shared descriptor for ahash_digest */ 385 desc = ctx->sh_desc_digest; 386 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, 387 digestsize, ctx->ctx_len, 0); 388 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 389 desc_bytes(desc), ctx->dir); 390 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", 391 DUMP_PREFIX_ADDRESS, 16, 4, desc, 392 desc_bytes(desc), 1); 393 394 return 0; 395 } 396 397 /* Digest hash size if it is too large */ 398 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, 399 u32 digestsize) 400 { 401 struct device *jrdev = ctx->jrdev; 402 u32 *desc; 403 struct split_key_result result; 404 dma_addr_t key_dma; 405 int ret; 406 407 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 408 if (!desc) { 409 dev_err(jrdev, "unable to allocate key input memory\n"); 410 return -ENOMEM; 411 } 412 413 init_job_desc(desc, 0); 414 415 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); 416 if (dma_mapping_error(jrdev, key_dma)) { 417 dev_err(jrdev, "unable to map key memory\n"); 418 kfree(desc); 419 return -ENOMEM; 420 } 421 422 /* Job descriptor to perform unkeyed hash on key_in */ 423 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | 424 OP_ALG_AS_INITFINAL); 425 append_seq_in_ptr(desc, key_dma, *keylen, 0); 426 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | 427 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); 428 append_seq_out_ptr(desc, key_dma, digestsize, 0); 429 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | 430 LDST_SRCDST_BYTE_CONTEXT); 431 432 #ifdef DEBUG 433 print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", 434 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); 435 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 436 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 437 #endif 438 439 result.err = 0; 440 init_completion(&result.completion); 441 442 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); 443 if (!ret) { 444 /* in progress */ 445 wait_for_completion(&result.completion); 446 ret = result.err; 447 #ifdef DEBUG 448 print_hex_dump(KERN_ERR, 449 "digested key@"__stringify(__LINE__)": ", 450 DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); 451 #endif 452 } 453 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); 454 455 *keylen = digestsize; 456 457 kfree(desc); 458 459 return ret; 460 } 461 462 static int ahash_setkey(struct crypto_ahash *ahash, 463 const u8 *key, unsigned int keylen) 464 { 465 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 466 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 467 int digestsize = crypto_ahash_digestsize(ahash); 468 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 469 int ret; 470 u8 *hashed_key = NULL; 471 472 #ifdef DEBUG 473 printk(KERN_ERR "keylen %d\n", keylen); 474 #endif 475 476 if (keylen > blocksize) { 477 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); 478 if (!hashed_key) 479 return -ENOMEM; 480 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 481 if (ret) 482 goto bad_free_key; 483 key = hashed_key; 484 } 485 486 /* 487 * If DKP is supported, use it in the shared descriptor to generate 488 * the split key. 489 */ 490 if (ctrlpriv->era >= 6) { 491 ctx->adata.key_inline = true; 492 ctx->adata.keylen = keylen; 493 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 494 OP_ALG_ALGSEL_MASK); 495 496 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 497 goto bad_free_key; 498 499 memcpy(ctx->key, key, keylen); 500 } else { 501 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 502 keylen, CAAM_MAX_HASH_KEY_SIZE); 503 if (ret) 504 goto bad_free_key; 505 } 506 507 kfree(hashed_key); 508 return ahash_set_sh_desc(ahash); 509 bad_free_key: 510 kfree(hashed_key); 511 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 512 return -EINVAL; 513 } 514 515 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, 516 unsigned int keylen) 517 { 518 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 519 struct device *jrdev = ctx->jrdev; 520 521 memcpy(ctx->key, key, keylen); 522 dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 523 ctx->adata.keylen = keylen; 524 525 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", 526 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); 527 528 return axcbc_set_sh_desc(ahash); 529 } 530 531 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, 532 unsigned int keylen) 533 { 534 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 535 536 /* key is immediate data for all cmac shared descriptors */ 537 ctx->adata.key_virt = key; 538 ctx->adata.keylen = keylen; 539 540 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", 541 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 542 543 return acmac_set_sh_desc(ahash); 544 } 545 546 /* 547 * ahash_edesc - s/w-extended ahash descriptor 548 * @sec4_sg_dma: physical mapped address of h/w link table 549 * @src_nents: number of segments in input scatterlist 550 * @sec4_sg_bytes: length of dma mapped sec4_sg space 551 * @hw_desc: the h/w job descriptor followed by any referenced link tables 552 * @sec4_sg: h/w link table 553 */ 554 struct ahash_edesc { 555 dma_addr_t sec4_sg_dma; 556 int src_nents; 557 int sec4_sg_bytes; 558 u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned; 559 struct sec4_sg_entry sec4_sg[0]; 560 }; 561 562 static inline void ahash_unmap(struct device *dev, 563 struct ahash_edesc *edesc, 564 struct ahash_request *req, int dst_len) 565 { 566 struct caam_hash_state *state = ahash_request_ctx(req); 567 568 if (edesc->src_nents) 569 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 570 571 if (edesc->sec4_sg_bytes) 572 dma_unmap_single(dev, edesc->sec4_sg_dma, 573 edesc->sec4_sg_bytes, DMA_TO_DEVICE); 574 575 if (state->buf_dma) { 576 dma_unmap_single(dev, state->buf_dma, *current_buflen(state), 577 DMA_TO_DEVICE); 578 state->buf_dma = 0; 579 } 580 } 581 582 static inline void ahash_unmap_ctx(struct device *dev, 583 struct ahash_edesc *edesc, 584 struct ahash_request *req, int dst_len, u32 flag) 585 { 586 struct caam_hash_state *state = ahash_request_ctx(req); 587 588 if (state->ctx_dma) { 589 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); 590 state->ctx_dma = 0; 591 } 592 ahash_unmap(dev, edesc, req, dst_len); 593 } 594 595 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, 596 void *context) 597 { 598 struct ahash_request *req = context; 599 struct ahash_edesc *edesc; 600 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 601 int digestsize = crypto_ahash_digestsize(ahash); 602 struct caam_hash_state *state = ahash_request_ctx(req); 603 #ifdef DEBUG 604 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 605 606 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 607 #endif 608 609 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 610 if (err) 611 caam_jr_strstatus(jrdev, err); 612 613 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 614 memcpy(req->result, state->caam_ctx, digestsize); 615 kfree(edesc); 616 617 #ifdef DEBUG 618 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 619 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 620 ctx->ctx_len, 1); 621 #endif 622 623 req->base.complete(&req->base, err); 624 } 625 626 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, 627 void *context) 628 { 629 struct ahash_request *req = context; 630 struct ahash_edesc *edesc; 631 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 632 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 633 struct caam_hash_state *state = ahash_request_ctx(req); 634 #ifdef DEBUG 635 int digestsize = crypto_ahash_digestsize(ahash); 636 637 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 638 #endif 639 640 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 641 if (err) 642 caam_jr_strstatus(jrdev, err); 643 644 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 645 switch_buf(state); 646 kfree(edesc); 647 648 #ifdef DEBUG 649 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 650 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 651 ctx->ctx_len, 1); 652 if (req->result) 653 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 654 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 655 digestsize, 1); 656 #endif 657 658 req->base.complete(&req->base, err); 659 } 660 661 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, 662 void *context) 663 { 664 struct ahash_request *req = context; 665 struct ahash_edesc *edesc; 666 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 667 int digestsize = crypto_ahash_digestsize(ahash); 668 struct caam_hash_state *state = ahash_request_ctx(req); 669 #ifdef DEBUG 670 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 671 672 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 673 #endif 674 675 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 676 if (err) 677 caam_jr_strstatus(jrdev, err); 678 679 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 680 memcpy(req->result, state->caam_ctx, digestsize); 681 kfree(edesc); 682 683 #ifdef DEBUG 684 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 685 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 686 ctx->ctx_len, 1); 687 #endif 688 689 req->base.complete(&req->base, err); 690 } 691 692 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, 693 void *context) 694 { 695 struct ahash_request *req = context; 696 struct ahash_edesc *edesc; 697 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 698 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 699 struct caam_hash_state *state = ahash_request_ctx(req); 700 #ifdef DEBUG 701 int digestsize = crypto_ahash_digestsize(ahash); 702 703 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 704 #endif 705 706 edesc = container_of(desc, struct ahash_edesc, hw_desc[0]); 707 if (err) 708 caam_jr_strstatus(jrdev, err); 709 710 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); 711 switch_buf(state); 712 kfree(edesc); 713 714 #ifdef DEBUG 715 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", 716 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, 717 ctx->ctx_len, 1); 718 if (req->result) 719 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", 720 DUMP_PREFIX_ADDRESS, 16, 4, req->result, 721 digestsize, 1); 722 #endif 723 724 req->base.complete(&req->base, err); 725 } 726 727 /* 728 * Allocate an enhanced descriptor, which contains the hardware descriptor 729 * and space for hardware scatter table containing sg_num entries. 730 */ 731 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, 732 int sg_num, u32 *sh_desc, 733 dma_addr_t sh_desc_dma, 734 gfp_t flags) 735 { 736 struct ahash_edesc *edesc; 737 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); 738 739 edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags); 740 if (!edesc) { 741 dev_err(ctx->jrdev, "could not allocate extended descriptor\n"); 742 return NULL; 743 } 744 745 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), 746 HDR_SHARE_DEFER | HDR_REVERSE); 747 748 return edesc; 749 } 750 751 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, 752 struct ahash_edesc *edesc, 753 struct ahash_request *req, int nents, 754 unsigned int first_sg, 755 unsigned int first_bytes, size_t to_hash) 756 { 757 dma_addr_t src_dma; 758 u32 options; 759 760 if (nents > 1 || first_sg) { 761 struct sec4_sg_entry *sg = edesc->sec4_sg; 762 unsigned int sgsize = sizeof(*sg) * 763 pad_sg_nents(first_sg + nents); 764 765 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 766 767 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 768 if (dma_mapping_error(ctx->jrdev, src_dma)) { 769 dev_err(ctx->jrdev, "unable to map S/G table\n"); 770 return -ENOMEM; 771 } 772 773 edesc->sec4_sg_bytes = sgsize; 774 edesc->sec4_sg_dma = src_dma; 775 options = LDST_SGF; 776 } else { 777 src_dma = sg_dma_address(req->src); 778 options = 0; 779 } 780 781 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, 782 options); 783 784 return 0; 785 } 786 787 /* submit update job descriptor */ 788 static int ahash_update_ctx(struct ahash_request *req) 789 { 790 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 791 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 792 struct caam_hash_state *state = ahash_request_ctx(req); 793 struct device *jrdev = ctx->jrdev; 794 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 795 GFP_KERNEL : GFP_ATOMIC; 796 u8 *buf = current_buf(state); 797 int *buflen = current_buflen(state); 798 u8 *next_buf = alt_buf(state); 799 int blocksize = crypto_ahash_blocksize(ahash); 800 int *next_buflen = alt_buflen(state), last_buflen; 801 int in_len = *buflen + req->nbytes, to_hash; 802 u32 *desc; 803 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; 804 struct ahash_edesc *edesc; 805 int ret = 0; 806 807 last_buflen = *next_buflen; 808 *next_buflen = in_len & (blocksize - 1); 809 to_hash = in_len - *next_buflen; 810 811 /* 812 * For XCBC and CMAC, if to_hash is multiple of block size, 813 * keep last block in internal buffer 814 */ 815 if ((is_xcbc_aes(ctx->adata.algtype) || 816 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 817 (*next_buflen == 0)) { 818 *next_buflen = blocksize; 819 to_hash -= blocksize; 820 } 821 822 if (to_hash) { 823 int pad_nents; 824 825 src_nents = sg_nents_for_len(req->src, 826 req->nbytes - (*next_buflen)); 827 if (src_nents < 0) { 828 dev_err(jrdev, "Invalid number of src SG.\n"); 829 return src_nents; 830 } 831 832 if (src_nents) { 833 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 834 DMA_TO_DEVICE); 835 if (!mapped_nents) { 836 dev_err(jrdev, "unable to DMA map source\n"); 837 return -ENOMEM; 838 } 839 } else { 840 mapped_nents = 0; 841 } 842 843 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 844 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); 845 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 846 847 /* 848 * allocate space for base edesc and hw desc commands, 849 * link tables 850 */ 851 edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update, 852 ctx->sh_desc_update_dma, flags); 853 if (!edesc) { 854 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 855 return -ENOMEM; 856 } 857 858 edesc->src_nents = src_nents; 859 edesc->sec4_sg_bytes = sec4_sg_bytes; 860 861 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 862 edesc->sec4_sg, DMA_BIDIRECTIONAL); 863 if (ret) 864 goto unmap_ctx; 865 866 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 867 if (ret) 868 goto unmap_ctx; 869 870 if (mapped_nents) 871 sg_to_sec4_sg_last(req->src, mapped_nents, 872 edesc->sec4_sg + sec4_sg_src_index, 873 0); 874 else 875 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 876 1); 877 878 if (*next_buflen) 879 scatterwalk_map_and_copy(next_buf, req->src, 880 to_hash - *buflen, 881 *next_buflen, 0); 882 desc = edesc->hw_desc; 883 884 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 885 sec4_sg_bytes, 886 DMA_TO_DEVICE); 887 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 888 dev_err(jrdev, "unable to map S/G table\n"); 889 ret = -ENOMEM; 890 goto unmap_ctx; 891 } 892 893 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 894 to_hash, LDST_SGF); 895 896 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); 897 898 #ifdef DEBUG 899 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 900 DUMP_PREFIX_ADDRESS, 16, 4, desc, 901 desc_bytes(desc), 1); 902 #endif 903 904 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); 905 if (ret) 906 goto unmap_ctx; 907 908 ret = -EINPROGRESS; 909 } else if (*next_buflen) { 910 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 911 req->nbytes, 0); 912 *buflen = *next_buflen; 913 *next_buflen = last_buflen; 914 } 915 #ifdef DEBUG 916 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 917 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 918 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 919 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 920 *next_buflen, 1); 921 #endif 922 923 return ret; 924 unmap_ctx: 925 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); 926 kfree(edesc); 927 return ret; 928 } 929 930 static int ahash_final_ctx(struct ahash_request *req) 931 { 932 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 933 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 934 struct caam_hash_state *state = ahash_request_ctx(req); 935 struct device *jrdev = ctx->jrdev; 936 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 937 GFP_KERNEL : GFP_ATOMIC; 938 int buflen = *current_buflen(state); 939 u32 *desc; 940 int sec4_sg_bytes; 941 int digestsize = crypto_ahash_digestsize(ahash); 942 struct ahash_edesc *edesc; 943 int ret; 944 945 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * 946 sizeof(struct sec4_sg_entry); 947 948 /* allocate space for base edesc and hw desc commands, link tables */ 949 edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin, 950 ctx->sh_desc_fin_dma, flags); 951 if (!edesc) 952 return -ENOMEM; 953 954 desc = edesc->hw_desc; 955 956 edesc->sec4_sg_bytes = sec4_sg_bytes; 957 958 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 959 edesc->sec4_sg, DMA_BIDIRECTIONAL); 960 if (ret) 961 goto unmap_ctx; 962 963 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 964 if (ret) 965 goto unmap_ctx; 966 967 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); 968 969 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 970 sec4_sg_bytes, DMA_TO_DEVICE); 971 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 972 dev_err(jrdev, "unable to map S/G table\n"); 973 ret = -ENOMEM; 974 goto unmap_ctx; 975 } 976 977 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, 978 LDST_SGF); 979 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 980 981 #ifdef DEBUG 982 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 983 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 984 #endif 985 986 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 987 if (ret) 988 goto unmap_ctx; 989 990 return -EINPROGRESS; 991 unmap_ctx: 992 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 993 kfree(edesc); 994 return ret; 995 } 996 997 static int ahash_finup_ctx(struct ahash_request *req) 998 { 999 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1000 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1001 struct caam_hash_state *state = ahash_request_ctx(req); 1002 struct device *jrdev = ctx->jrdev; 1003 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1004 GFP_KERNEL : GFP_ATOMIC; 1005 int buflen = *current_buflen(state); 1006 u32 *desc; 1007 int sec4_sg_src_index; 1008 int src_nents, mapped_nents; 1009 int digestsize = crypto_ahash_digestsize(ahash); 1010 struct ahash_edesc *edesc; 1011 int ret; 1012 1013 src_nents = sg_nents_for_len(req->src, req->nbytes); 1014 if (src_nents < 0) { 1015 dev_err(jrdev, "Invalid number of src SG.\n"); 1016 return src_nents; 1017 } 1018 1019 if (src_nents) { 1020 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1021 DMA_TO_DEVICE); 1022 if (!mapped_nents) { 1023 dev_err(jrdev, "unable to DMA map source\n"); 1024 return -ENOMEM; 1025 } 1026 } else { 1027 mapped_nents = 0; 1028 } 1029 1030 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 1031 1032 /* allocate space for base edesc and hw desc commands, link tables */ 1033 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1034 ctx->sh_desc_fin, ctx->sh_desc_fin_dma, 1035 flags); 1036 if (!edesc) { 1037 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1038 return -ENOMEM; 1039 } 1040 1041 desc = edesc->hw_desc; 1042 1043 edesc->src_nents = src_nents; 1044 1045 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 1046 edesc->sec4_sg, DMA_BIDIRECTIONAL); 1047 if (ret) 1048 goto unmap_ctx; 1049 1050 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); 1051 if (ret) 1052 goto unmap_ctx; 1053 1054 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1055 sec4_sg_src_index, ctx->ctx_len + buflen, 1056 req->nbytes); 1057 if (ret) 1058 goto unmap_ctx; 1059 1060 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); 1061 1062 #ifdef DEBUG 1063 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1064 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1065 #endif 1066 1067 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); 1068 if (ret) 1069 goto unmap_ctx; 1070 1071 return -EINPROGRESS; 1072 unmap_ctx: 1073 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); 1074 kfree(edesc); 1075 return ret; 1076 } 1077 1078 static int ahash_digest(struct ahash_request *req) 1079 { 1080 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1081 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1082 struct caam_hash_state *state = ahash_request_ctx(req); 1083 struct device *jrdev = ctx->jrdev; 1084 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1085 GFP_KERNEL : GFP_ATOMIC; 1086 u32 *desc; 1087 int digestsize = crypto_ahash_digestsize(ahash); 1088 int src_nents, mapped_nents; 1089 struct ahash_edesc *edesc; 1090 int ret; 1091 1092 state->buf_dma = 0; 1093 1094 src_nents = sg_nents_for_len(req->src, req->nbytes); 1095 if (src_nents < 0) { 1096 dev_err(jrdev, "Invalid number of src SG.\n"); 1097 return src_nents; 1098 } 1099 1100 if (src_nents) { 1101 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1102 DMA_TO_DEVICE); 1103 if (!mapped_nents) { 1104 dev_err(jrdev, "unable to map source for DMA\n"); 1105 return -ENOMEM; 1106 } 1107 } else { 1108 mapped_nents = 0; 1109 } 1110 1111 /* allocate space for base edesc and hw desc commands, link tables */ 1112 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, 1113 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1114 flags); 1115 if (!edesc) { 1116 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1117 return -ENOMEM; 1118 } 1119 1120 edesc->src_nents = src_nents; 1121 1122 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1123 req->nbytes); 1124 if (ret) { 1125 ahash_unmap(jrdev, edesc, req, digestsize); 1126 kfree(edesc); 1127 return ret; 1128 } 1129 1130 desc = edesc->hw_desc; 1131 1132 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1133 if (ret) { 1134 ahash_unmap(jrdev, edesc, req, digestsize); 1135 kfree(edesc); 1136 return -ENOMEM; 1137 } 1138 1139 #ifdef DEBUG 1140 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1141 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1142 #endif 1143 1144 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1145 if (!ret) { 1146 ret = -EINPROGRESS; 1147 } else { 1148 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1149 kfree(edesc); 1150 } 1151 1152 return ret; 1153 } 1154 1155 /* submit ahash final if it the first job descriptor */ 1156 static int ahash_final_no_ctx(struct ahash_request *req) 1157 { 1158 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1159 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1160 struct caam_hash_state *state = ahash_request_ctx(req); 1161 struct device *jrdev = ctx->jrdev; 1162 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1163 GFP_KERNEL : GFP_ATOMIC; 1164 u8 *buf = current_buf(state); 1165 int buflen = *current_buflen(state); 1166 u32 *desc; 1167 int digestsize = crypto_ahash_digestsize(ahash); 1168 struct ahash_edesc *edesc; 1169 int ret; 1170 1171 /* allocate space for base edesc and hw desc commands, link tables */ 1172 edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, 1173 ctx->sh_desc_digest_dma, flags); 1174 if (!edesc) 1175 return -ENOMEM; 1176 1177 desc = edesc->hw_desc; 1178 1179 if (buflen) { 1180 state->buf_dma = dma_map_single(jrdev, buf, buflen, 1181 DMA_TO_DEVICE); 1182 if (dma_mapping_error(jrdev, state->buf_dma)) { 1183 dev_err(jrdev, "unable to map src\n"); 1184 goto unmap; 1185 } 1186 1187 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1188 } 1189 1190 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1191 if (ret) 1192 goto unmap; 1193 1194 #ifdef DEBUG 1195 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1196 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1197 #endif 1198 1199 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1200 if (!ret) { 1201 ret = -EINPROGRESS; 1202 } else { 1203 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1204 kfree(edesc); 1205 } 1206 1207 return ret; 1208 unmap: 1209 ahash_unmap(jrdev, edesc, req, digestsize); 1210 kfree(edesc); 1211 return -ENOMEM; 1212 1213 } 1214 1215 /* submit ahash update if it the first job descriptor after update */ 1216 static int ahash_update_no_ctx(struct ahash_request *req) 1217 { 1218 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1219 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1220 struct caam_hash_state *state = ahash_request_ctx(req); 1221 struct device *jrdev = ctx->jrdev; 1222 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1223 GFP_KERNEL : GFP_ATOMIC; 1224 u8 *buf = current_buf(state); 1225 int *buflen = current_buflen(state); 1226 int blocksize = crypto_ahash_blocksize(ahash); 1227 u8 *next_buf = alt_buf(state); 1228 int *next_buflen = alt_buflen(state); 1229 int in_len = *buflen + req->nbytes, to_hash; 1230 int sec4_sg_bytes, src_nents, mapped_nents; 1231 struct ahash_edesc *edesc; 1232 u32 *desc; 1233 int ret = 0; 1234 1235 *next_buflen = in_len & (blocksize - 1); 1236 to_hash = in_len - *next_buflen; 1237 1238 /* 1239 * For XCBC and CMAC, if to_hash is multiple of block size, 1240 * keep last block in internal buffer 1241 */ 1242 if ((is_xcbc_aes(ctx->adata.algtype) || 1243 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1244 (*next_buflen == 0)) { 1245 *next_buflen = blocksize; 1246 to_hash -= blocksize; 1247 } 1248 1249 if (to_hash) { 1250 int pad_nents; 1251 1252 src_nents = sg_nents_for_len(req->src, 1253 req->nbytes - *next_buflen); 1254 if (src_nents < 0) { 1255 dev_err(jrdev, "Invalid number of src SG.\n"); 1256 return src_nents; 1257 } 1258 1259 if (src_nents) { 1260 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1261 DMA_TO_DEVICE); 1262 if (!mapped_nents) { 1263 dev_err(jrdev, "unable to DMA map source\n"); 1264 return -ENOMEM; 1265 } 1266 } else { 1267 mapped_nents = 0; 1268 } 1269 1270 pad_nents = pad_sg_nents(1 + mapped_nents); 1271 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); 1272 1273 /* 1274 * allocate space for base edesc and hw desc commands, 1275 * link tables 1276 */ 1277 edesc = ahash_edesc_alloc(ctx, pad_nents, 1278 ctx->sh_desc_update_first, 1279 ctx->sh_desc_update_first_dma, 1280 flags); 1281 if (!edesc) { 1282 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1283 return -ENOMEM; 1284 } 1285 1286 edesc->src_nents = src_nents; 1287 edesc->sec4_sg_bytes = sec4_sg_bytes; 1288 1289 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1290 if (ret) 1291 goto unmap_ctx; 1292 1293 sg_to_sec4_sg_last(req->src, mapped_nents, 1294 edesc->sec4_sg + 1, 0); 1295 1296 if (*next_buflen) { 1297 scatterwalk_map_and_copy(next_buf, req->src, 1298 to_hash - *buflen, 1299 *next_buflen, 0); 1300 } 1301 1302 desc = edesc->hw_desc; 1303 1304 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1305 sec4_sg_bytes, 1306 DMA_TO_DEVICE); 1307 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { 1308 dev_err(jrdev, "unable to map S/G table\n"); 1309 ret = -ENOMEM; 1310 goto unmap_ctx; 1311 } 1312 1313 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); 1314 1315 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1316 if (ret) 1317 goto unmap_ctx; 1318 1319 #ifdef DEBUG 1320 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1321 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1322 desc_bytes(desc), 1); 1323 #endif 1324 1325 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1326 if (ret) 1327 goto unmap_ctx; 1328 1329 ret = -EINPROGRESS; 1330 state->update = ahash_update_ctx; 1331 state->finup = ahash_finup_ctx; 1332 state->final = ahash_final_ctx; 1333 } else if (*next_buflen) { 1334 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, 1335 req->nbytes, 0); 1336 *buflen = *next_buflen; 1337 *next_buflen = 0; 1338 } 1339 #ifdef DEBUG 1340 print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ", 1341 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); 1342 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1343 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1344 *next_buflen, 1); 1345 #endif 1346 1347 return ret; 1348 unmap_ctx: 1349 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1350 kfree(edesc); 1351 return ret; 1352 } 1353 1354 /* submit ahash finup if it the first job descriptor after update */ 1355 static int ahash_finup_no_ctx(struct ahash_request *req) 1356 { 1357 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1358 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1359 struct caam_hash_state *state = ahash_request_ctx(req); 1360 struct device *jrdev = ctx->jrdev; 1361 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1362 GFP_KERNEL : GFP_ATOMIC; 1363 int buflen = *current_buflen(state); 1364 u32 *desc; 1365 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; 1366 int digestsize = crypto_ahash_digestsize(ahash); 1367 struct ahash_edesc *edesc; 1368 int ret; 1369 1370 src_nents = sg_nents_for_len(req->src, req->nbytes); 1371 if (src_nents < 0) { 1372 dev_err(jrdev, "Invalid number of src SG.\n"); 1373 return src_nents; 1374 } 1375 1376 if (src_nents) { 1377 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1378 DMA_TO_DEVICE); 1379 if (!mapped_nents) { 1380 dev_err(jrdev, "unable to DMA map source\n"); 1381 return -ENOMEM; 1382 } 1383 } else { 1384 mapped_nents = 0; 1385 } 1386 1387 sec4_sg_src_index = 2; 1388 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * 1389 sizeof(struct sec4_sg_entry); 1390 1391 /* allocate space for base edesc and hw desc commands, link tables */ 1392 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, 1393 ctx->sh_desc_digest, ctx->sh_desc_digest_dma, 1394 flags); 1395 if (!edesc) { 1396 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1397 return -ENOMEM; 1398 } 1399 1400 desc = edesc->hw_desc; 1401 1402 edesc->src_nents = src_nents; 1403 edesc->sec4_sg_bytes = sec4_sg_bytes; 1404 1405 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1406 if (ret) 1407 goto unmap; 1408 1409 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, 1410 req->nbytes); 1411 if (ret) { 1412 dev_err(jrdev, "unable to map S/G table\n"); 1413 goto unmap; 1414 } 1415 1416 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); 1417 if (ret) 1418 goto unmap; 1419 1420 #ifdef DEBUG 1421 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1422 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); 1423 #endif 1424 1425 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); 1426 if (!ret) { 1427 ret = -EINPROGRESS; 1428 } else { 1429 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); 1430 kfree(edesc); 1431 } 1432 1433 return ret; 1434 unmap: 1435 ahash_unmap(jrdev, edesc, req, digestsize); 1436 kfree(edesc); 1437 return -ENOMEM; 1438 1439 } 1440 1441 /* submit first update job descriptor after init */ 1442 static int ahash_update_first(struct ahash_request *req) 1443 { 1444 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1445 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1446 struct caam_hash_state *state = ahash_request_ctx(req); 1447 struct device *jrdev = ctx->jrdev; 1448 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1449 GFP_KERNEL : GFP_ATOMIC; 1450 u8 *next_buf = alt_buf(state); 1451 int *next_buflen = alt_buflen(state); 1452 int to_hash; 1453 int blocksize = crypto_ahash_blocksize(ahash); 1454 u32 *desc; 1455 int src_nents, mapped_nents; 1456 struct ahash_edesc *edesc; 1457 int ret = 0; 1458 1459 *next_buflen = req->nbytes & (blocksize - 1); 1460 to_hash = req->nbytes - *next_buflen; 1461 1462 /* 1463 * For XCBC and CMAC, if to_hash is multiple of block size, 1464 * keep last block in internal buffer 1465 */ 1466 if ((is_xcbc_aes(ctx->adata.algtype) || 1467 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && 1468 (*next_buflen == 0)) { 1469 *next_buflen = blocksize; 1470 to_hash -= blocksize; 1471 } 1472 1473 if (to_hash) { 1474 src_nents = sg_nents_for_len(req->src, 1475 req->nbytes - *next_buflen); 1476 if (src_nents < 0) { 1477 dev_err(jrdev, "Invalid number of src SG.\n"); 1478 return src_nents; 1479 } 1480 1481 if (src_nents) { 1482 mapped_nents = dma_map_sg(jrdev, req->src, src_nents, 1483 DMA_TO_DEVICE); 1484 if (!mapped_nents) { 1485 dev_err(jrdev, "unable to map source for DMA\n"); 1486 return -ENOMEM; 1487 } 1488 } else { 1489 mapped_nents = 0; 1490 } 1491 1492 /* 1493 * allocate space for base edesc and hw desc commands, 1494 * link tables 1495 */ 1496 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? 1497 mapped_nents : 0, 1498 ctx->sh_desc_update_first, 1499 ctx->sh_desc_update_first_dma, 1500 flags); 1501 if (!edesc) { 1502 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); 1503 return -ENOMEM; 1504 } 1505 1506 edesc->src_nents = src_nents; 1507 1508 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1509 to_hash); 1510 if (ret) 1511 goto unmap_ctx; 1512 1513 if (*next_buflen) 1514 scatterwalk_map_and_copy(next_buf, req->src, to_hash, 1515 *next_buflen, 0); 1516 1517 desc = edesc->hw_desc; 1518 1519 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); 1520 if (ret) 1521 goto unmap_ctx; 1522 1523 #ifdef DEBUG 1524 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", 1525 DUMP_PREFIX_ADDRESS, 16, 4, desc, 1526 desc_bytes(desc), 1); 1527 #endif 1528 1529 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); 1530 if (ret) 1531 goto unmap_ctx; 1532 1533 ret = -EINPROGRESS; 1534 state->update = ahash_update_ctx; 1535 state->finup = ahash_finup_ctx; 1536 state->final = ahash_final_ctx; 1537 } else if (*next_buflen) { 1538 state->update = ahash_update_no_ctx; 1539 state->finup = ahash_finup_no_ctx; 1540 state->final = ahash_final_no_ctx; 1541 scatterwalk_map_and_copy(next_buf, req->src, 0, 1542 req->nbytes, 0); 1543 switch_buf(state); 1544 } 1545 #ifdef DEBUG 1546 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", 1547 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, 1548 *next_buflen, 1); 1549 #endif 1550 1551 return ret; 1552 unmap_ctx: 1553 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); 1554 kfree(edesc); 1555 return ret; 1556 } 1557 1558 static int ahash_finup_first(struct ahash_request *req) 1559 { 1560 return ahash_digest(req); 1561 } 1562 1563 static int ahash_init(struct ahash_request *req) 1564 { 1565 struct caam_hash_state *state = ahash_request_ctx(req); 1566 1567 state->update = ahash_update_first; 1568 state->finup = ahash_finup_first; 1569 state->final = ahash_final_no_ctx; 1570 1571 state->ctx_dma = 0; 1572 state->ctx_dma_len = 0; 1573 state->current_buf = 0; 1574 state->buf_dma = 0; 1575 state->buflen_0 = 0; 1576 state->buflen_1 = 0; 1577 1578 return 0; 1579 } 1580 1581 static int ahash_update(struct ahash_request *req) 1582 { 1583 struct caam_hash_state *state = ahash_request_ctx(req); 1584 1585 return state->update(req); 1586 } 1587 1588 static int ahash_finup(struct ahash_request *req) 1589 { 1590 struct caam_hash_state *state = ahash_request_ctx(req); 1591 1592 return state->finup(req); 1593 } 1594 1595 static int ahash_final(struct ahash_request *req) 1596 { 1597 struct caam_hash_state *state = ahash_request_ctx(req); 1598 1599 return state->final(req); 1600 } 1601 1602 static int ahash_export(struct ahash_request *req, void *out) 1603 { 1604 struct caam_hash_state *state = ahash_request_ctx(req); 1605 struct caam_export_state *export = out; 1606 int len; 1607 u8 *buf; 1608 1609 if (state->current_buf) { 1610 buf = state->buf_1; 1611 len = state->buflen_1; 1612 } else { 1613 buf = state->buf_0; 1614 len = state->buflen_0; 1615 } 1616 1617 memcpy(export->buf, buf, len); 1618 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); 1619 export->buflen = len; 1620 export->update = state->update; 1621 export->final = state->final; 1622 export->finup = state->finup; 1623 1624 return 0; 1625 } 1626 1627 static int ahash_import(struct ahash_request *req, const void *in) 1628 { 1629 struct caam_hash_state *state = ahash_request_ctx(req); 1630 const struct caam_export_state *export = in; 1631 1632 memset(state, 0, sizeof(*state)); 1633 memcpy(state->buf_0, export->buf, export->buflen); 1634 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); 1635 state->buflen_0 = export->buflen; 1636 state->update = export->update; 1637 state->final = export->final; 1638 state->finup = export->finup; 1639 1640 return 0; 1641 } 1642 1643 struct caam_hash_template { 1644 char name[CRYPTO_MAX_ALG_NAME]; 1645 char driver_name[CRYPTO_MAX_ALG_NAME]; 1646 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1647 char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1648 unsigned int blocksize; 1649 struct ahash_alg template_ahash; 1650 u32 alg_type; 1651 }; 1652 1653 /* ahash descriptors */ 1654 static struct caam_hash_template driver_hash[] = { 1655 { 1656 .name = "sha1", 1657 .driver_name = "sha1-caam", 1658 .hmac_name = "hmac(sha1)", 1659 .hmac_driver_name = "hmac-sha1-caam", 1660 .blocksize = SHA1_BLOCK_SIZE, 1661 .template_ahash = { 1662 .init = ahash_init, 1663 .update = ahash_update, 1664 .final = ahash_final, 1665 .finup = ahash_finup, 1666 .digest = ahash_digest, 1667 .export = ahash_export, 1668 .import = ahash_import, 1669 .setkey = ahash_setkey, 1670 .halg = { 1671 .digestsize = SHA1_DIGEST_SIZE, 1672 .statesize = sizeof(struct caam_export_state), 1673 }, 1674 }, 1675 .alg_type = OP_ALG_ALGSEL_SHA1, 1676 }, { 1677 .name = "sha224", 1678 .driver_name = "sha224-caam", 1679 .hmac_name = "hmac(sha224)", 1680 .hmac_driver_name = "hmac-sha224-caam", 1681 .blocksize = SHA224_BLOCK_SIZE, 1682 .template_ahash = { 1683 .init = ahash_init, 1684 .update = ahash_update, 1685 .final = ahash_final, 1686 .finup = ahash_finup, 1687 .digest = ahash_digest, 1688 .export = ahash_export, 1689 .import = ahash_import, 1690 .setkey = ahash_setkey, 1691 .halg = { 1692 .digestsize = SHA224_DIGEST_SIZE, 1693 .statesize = sizeof(struct caam_export_state), 1694 }, 1695 }, 1696 .alg_type = OP_ALG_ALGSEL_SHA224, 1697 }, { 1698 .name = "sha256", 1699 .driver_name = "sha256-caam", 1700 .hmac_name = "hmac(sha256)", 1701 .hmac_driver_name = "hmac-sha256-caam", 1702 .blocksize = SHA256_BLOCK_SIZE, 1703 .template_ahash = { 1704 .init = ahash_init, 1705 .update = ahash_update, 1706 .final = ahash_final, 1707 .finup = ahash_finup, 1708 .digest = ahash_digest, 1709 .export = ahash_export, 1710 .import = ahash_import, 1711 .setkey = ahash_setkey, 1712 .halg = { 1713 .digestsize = SHA256_DIGEST_SIZE, 1714 .statesize = sizeof(struct caam_export_state), 1715 }, 1716 }, 1717 .alg_type = OP_ALG_ALGSEL_SHA256, 1718 }, { 1719 .name = "sha384", 1720 .driver_name = "sha384-caam", 1721 .hmac_name = "hmac(sha384)", 1722 .hmac_driver_name = "hmac-sha384-caam", 1723 .blocksize = SHA384_BLOCK_SIZE, 1724 .template_ahash = { 1725 .init = ahash_init, 1726 .update = ahash_update, 1727 .final = ahash_final, 1728 .finup = ahash_finup, 1729 .digest = ahash_digest, 1730 .export = ahash_export, 1731 .import = ahash_import, 1732 .setkey = ahash_setkey, 1733 .halg = { 1734 .digestsize = SHA384_DIGEST_SIZE, 1735 .statesize = sizeof(struct caam_export_state), 1736 }, 1737 }, 1738 .alg_type = OP_ALG_ALGSEL_SHA384, 1739 }, { 1740 .name = "sha512", 1741 .driver_name = "sha512-caam", 1742 .hmac_name = "hmac(sha512)", 1743 .hmac_driver_name = "hmac-sha512-caam", 1744 .blocksize = SHA512_BLOCK_SIZE, 1745 .template_ahash = { 1746 .init = ahash_init, 1747 .update = ahash_update, 1748 .final = ahash_final, 1749 .finup = ahash_finup, 1750 .digest = ahash_digest, 1751 .export = ahash_export, 1752 .import = ahash_import, 1753 .setkey = ahash_setkey, 1754 .halg = { 1755 .digestsize = SHA512_DIGEST_SIZE, 1756 .statesize = sizeof(struct caam_export_state), 1757 }, 1758 }, 1759 .alg_type = OP_ALG_ALGSEL_SHA512, 1760 }, { 1761 .name = "md5", 1762 .driver_name = "md5-caam", 1763 .hmac_name = "hmac(md5)", 1764 .hmac_driver_name = "hmac-md5-caam", 1765 .blocksize = MD5_BLOCK_WORDS * 4, 1766 .template_ahash = { 1767 .init = ahash_init, 1768 .update = ahash_update, 1769 .final = ahash_final, 1770 .finup = ahash_finup, 1771 .digest = ahash_digest, 1772 .export = ahash_export, 1773 .import = ahash_import, 1774 .setkey = ahash_setkey, 1775 .halg = { 1776 .digestsize = MD5_DIGEST_SIZE, 1777 .statesize = sizeof(struct caam_export_state), 1778 }, 1779 }, 1780 .alg_type = OP_ALG_ALGSEL_MD5, 1781 }, { 1782 .hmac_name = "xcbc(aes)", 1783 .hmac_driver_name = "xcbc-aes-caam", 1784 .blocksize = AES_BLOCK_SIZE, 1785 .template_ahash = { 1786 .init = ahash_init, 1787 .update = ahash_update, 1788 .final = ahash_final, 1789 .finup = ahash_finup, 1790 .digest = ahash_digest, 1791 .export = ahash_export, 1792 .import = ahash_import, 1793 .setkey = axcbc_setkey, 1794 .halg = { 1795 .digestsize = AES_BLOCK_SIZE, 1796 .statesize = sizeof(struct caam_export_state), 1797 }, 1798 }, 1799 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, 1800 }, { 1801 .hmac_name = "cmac(aes)", 1802 .hmac_driver_name = "cmac-aes-caam", 1803 .blocksize = AES_BLOCK_SIZE, 1804 .template_ahash = { 1805 .init = ahash_init, 1806 .update = ahash_update, 1807 .final = ahash_final, 1808 .finup = ahash_finup, 1809 .digest = ahash_digest, 1810 .export = ahash_export, 1811 .import = ahash_import, 1812 .setkey = acmac_setkey, 1813 .halg = { 1814 .digestsize = AES_BLOCK_SIZE, 1815 .statesize = sizeof(struct caam_export_state), 1816 }, 1817 }, 1818 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, 1819 }, 1820 }; 1821 1822 struct caam_hash_alg { 1823 struct list_head entry; 1824 int alg_type; 1825 struct ahash_alg ahash_alg; 1826 }; 1827 1828 static int caam_hash_cra_init(struct crypto_tfm *tfm) 1829 { 1830 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 1831 struct crypto_alg *base = tfm->__crt_alg; 1832 struct hash_alg_common *halg = 1833 container_of(base, struct hash_alg_common, base); 1834 struct ahash_alg *alg = 1835 container_of(halg, struct ahash_alg, halg); 1836 struct caam_hash_alg *caam_hash = 1837 container_of(alg, struct caam_hash_alg, ahash_alg); 1838 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1839 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1840 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1841 HASH_MSG_LEN + SHA1_DIGEST_SIZE, 1842 HASH_MSG_LEN + 32, 1843 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1844 HASH_MSG_LEN + 64, 1845 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1846 dma_addr_t dma_addr; 1847 struct caam_drv_private *priv; 1848 1849 /* 1850 * Get a Job ring from Job Ring driver to ensure in-order 1851 * crypto request processing per tfm 1852 */ 1853 ctx->jrdev = caam_jr_alloc(); 1854 if (IS_ERR(ctx->jrdev)) { 1855 pr_err("Job Ring Device allocation for transform failed\n"); 1856 return PTR_ERR(ctx->jrdev); 1857 } 1858 1859 priv = dev_get_drvdata(ctx->jrdev->parent); 1860 1861 if (is_xcbc_aes(caam_hash->alg_type)) { 1862 ctx->dir = DMA_TO_DEVICE; 1863 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1864 ctx->ctx_len = 48; 1865 1866 ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, 1867 ARRAY_SIZE(ctx->key), 1868 DMA_BIDIRECTIONAL, 1869 DMA_ATTR_SKIP_CPU_SYNC); 1870 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 1871 dev_err(ctx->jrdev, "unable to map key\n"); 1872 caam_jr_free(ctx->jrdev); 1873 return -ENOMEM; 1874 } 1875 } else if (is_cmac_aes(caam_hash->alg_type)) { 1876 ctx->dir = DMA_TO_DEVICE; 1877 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; 1878 ctx->ctx_len = 32; 1879 } else { 1880 ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1881 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1882 ctx->ctx_len = runninglen[(ctx->adata.algtype & 1883 OP_ALG_ALGSEL_SUBMASK) >> 1884 OP_ALG_ALGSEL_SHIFT]; 1885 } 1886 1887 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1888 offsetof(struct caam_hash_ctx, key), 1889 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1890 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1891 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1892 1893 if (is_xcbc_aes(caam_hash->alg_type)) 1894 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1895 ARRAY_SIZE(ctx->key), 1896 DMA_BIDIRECTIONAL, 1897 DMA_ATTR_SKIP_CPU_SYNC); 1898 1899 caam_jr_free(ctx->jrdev); 1900 return -ENOMEM; 1901 } 1902 1903 ctx->sh_desc_update_dma = dma_addr; 1904 ctx->sh_desc_update_first_dma = dma_addr + 1905 offsetof(struct caam_hash_ctx, 1906 sh_desc_update_first); 1907 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, 1908 sh_desc_fin); 1909 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, 1910 sh_desc_digest); 1911 1912 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1913 sizeof(struct caam_hash_state)); 1914 1915 /* 1916 * For keyed hash algorithms shared descriptors 1917 * will be created later in setkey() callback 1918 */ 1919 return alg->setkey ? 0 : ahash_set_sh_desc(ahash); 1920 } 1921 1922 static void caam_hash_cra_exit(struct crypto_tfm *tfm) 1923 { 1924 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1925 1926 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1927 offsetof(struct caam_hash_ctx, key), 1928 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1929 if (is_xcbc_aes(ctx->adata.algtype)) 1930 dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, 1931 ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, 1932 DMA_ATTR_SKIP_CPU_SYNC); 1933 caam_jr_free(ctx->jrdev); 1934 } 1935 1936 void caam_algapi_hash_exit(void) 1937 { 1938 struct caam_hash_alg *t_alg, *n; 1939 1940 if (!hash_list.next) 1941 return; 1942 1943 list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1944 crypto_unregister_ahash(&t_alg->ahash_alg); 1945 list_del(&t_alg->entry); 1946 kfree(t_alg); 1947 } 1948 } 1949 1950 static struct caam_hash_alg * 1951 caam_hash_alloc(struct caam_hash_template *template, 1952 bool keyed) 1953 { 1954 struct caam_hash_alg *t_alg; 1955 struct ahash_alg *halg; 1956 struct crypto_alg *alg; 1957 1958 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1959 if (!t_alg) { 1960 pr_err("failed to allocate t_alg\n"); 1961 return ERR_PTR(-ENOMEM); 1962 } 1963 1964 t_alg->ahash_alg = template->template_ahash; 1965 halg = &t_alg->ahash_alg; 1966 alg = &halg->halg.base; 1967 1968 if (keyed) { 1969 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1970 template->hmac_name); 1971 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1972 template->hmac_driver_name); 1973 } else { 1974 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 1975 template->name); 1976 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1977 template->driver_name); 1978 t_alg->ahash_alg.setkey = NULL; 1979 } 1980 alg->cra_module = THIS_MODULE; 1981 alg->cra_init = caam_hash_cra_init; 1982 alg->cra_exit = caam_hash_cra_exit; 1983 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); 1984 alg->cra_priority = CAAM_CRA_PRIORITY; 1985 alg->cra_blocksize = template->blocksize; 1986 alg->cra_alignmask = 0; 1987 alg->cra_flags = CRYPTO_ALG_ASYNC; 1988 1989 t_alg->alg_type = template->alg_type; 1990 1991 return t_alg; 1992 } 1993 1994 int caam_algapi_hash_init(struct device *ctrldev) 1995 { 1996 int i = 0, err = 0; 1997 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 1998 unsigned int md_limit = SHA512_DIGEST_SIZE; 1999 u32 md_inst, md_vid; 2000 2001 /* 2002 * Register crypto algorithms the device supports. First, identify 2003 * presence and attributes of MD block. 2004 */ 2005 if (priv->era < 10) { 2006 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & 2007 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2008 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 2009 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; 2010 } else { 2011 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); 2012 2013 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; 2014 md_inst = mdha & CHA_VER_NUM_MASK; 2015 } 2016 2017 /* 2018 * Skip registration of any hashing algorithms if MD block 2019 * is not present. 2020 */ 2021 if (!md_inst) 2022 return -ENODEV; 2023 2024 /* Limit digest size based on LP256 */ 2025 if (md_vid == CHA_VER_VID_MD_LP256) 2026 md_limit = SHA256_DIGEST_SIZE; 2027 2028 INIT_LIST_HEAD(&hash_list); 2029 2030 /* register crypto algorithms the device supports */ 2031 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { 2032 struct caam_hash_alg *t_alg; 2033 struct caam_hash_template *alg = driver_hash + i; 2034 2035 /* If MD size is not supported by device, skip registration */ 2036 if (is_mdha(alg->alg_type) && 2037 alg->template_ahash.halg.digestsize > md_limit) 2038 continue; 2039 2040 /* register hmac version */ 2041 t_alg = caam_hash_alloc(alg, true); 2042 if (IS_ERR(t_alg)) { 2043 err = PTR_ERR(t_alg); 2044 pr_warn("%s alg allocation failed\n", 2045 alg->hmac_driver_name); 2046 continue; 2047 } 2048 2049 err = crypto_register_ahash(&t_alg->ahash_alg); 2050 if (err) { 2051 pr_warn("%s alg registration failed: %d\n", 2052 t_alg->ahash_alg.halg.base.cra_driver_name, 2053 err); 2054 kfree(t_alg); 2055 } else 2056 list_add_tail(&t_alg->entry, &hash_list); 2057 2058 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) 2059 continue; 2060 2061 /* register unkeyed version */ 2062 t_alg = caam_hash_alloc(alg, false); 2063 if (IS_ERR(t_alg)) { 2064 err = PTR_ERR(t_alg); 2065 pr_warn("%s alg allocation failed\n", alg->driver_name); 2066 continue; 2067 } 2068 2069 err = crypto_register_ahash(&t_alg->ahash_alg); 2070 if (err) { 2071 pr_warn("%s alg registration failed: %d\n", 2072 t_alg->ahash_alg.halg.base.cra_driver_name, 2073 err); 2074 kfree(t_alg); 2075 } else 2076 list_add_tail(&t_alg->entry, &hash_list); 2077 } 2078 2079 return err; 2080 } 2081