1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 4 * 5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 6 * Author: Arnaud Ebalard <arno@natisbad.org> 7 * 8 * This work is based on an initial version written by 9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 10 */ 11 12 #include <crypto/hmac.h> 13 #include <crypto/md5.h> 14 #include <crypto/sha.h> 15 16 #include "cesa.h" 17 18 struct mv_cesa_ahash_dma_iter { 19 struct mv_cesa_dma_iter base; 20 struct mv_cesa_sg_dma_iter src; 21 }; 22 23 static inline void 24 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 25 struct ahash_request *req) 26 { 27 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 28 unsigned int len = req->nbytes + creq->cache_ptr; 29 30 if (!creq->last_req) 31 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 32 33 mv_cesa_req_dma_iter_init(&iter->base, len); 34 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 35 iter->src.op_offset = creq->cache_ptr; 36 } 37 38 static inline bool 39 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 40 { 41 iter->src.op_offset = 0; 42 43 return mv_cesa_req_dma_iter_next_op(&iter->base); 44 } 45 46 static inline int 47 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) 48 { 49 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 50 &req->cache_dma); 51 if (!req->cache) 52 return -ENOMEM; 53 54 return 0; 55 } 56 57 static inline void 58 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) 59 { 60 if (!req->cache) 61 return; 62 63 dma_pool_free(cesa_dev->dma->cache_pool, req->cache, 64 req->cache_dma); 65 } 66 67 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 68 gfp_t flags) 69 { 70 if (req->padding) 71 return 0; 72 73 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 74 &req->padding_dma); 75 if (!req->padding) 76 return -ENOMEM; 77 78 return 0; 79 } 80 81 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 82 { 83 if (!req->padding) 84 return; 85 86 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 87 req->padding_dma); 88 req->padding = NULL; 89 } 90 91 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 92 { 93 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 94 95 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 96 } 97 98 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 99 { 100 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 101 102 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 103 mv_cesa_ahash_dma_free_cache(&creq->req.dma); 104 mv_cesa_dma_cleanup(&creq->base); 105 } 106 107 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 108 { 109 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 110 111 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 112 mv_cesa_ahash_dma_cleanup(req); 113 } 114 115 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 116 { 117 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 118 119 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 120 mv_cesa_ahash_dma_last_cleanup(req); 121 } 122 123 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 124 { 125 unsigned int index, padlen; 126 127 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 128 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 129 130 return padlen; 131 } 132 133 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 134 { 135 unsigned int padlen; 136 137 buf[0] = 0x80; 138 /* Pad out to 56 mod 64 */ 139 padlen = mv_cesa_ahash_pad_len(creq); 140 memset(buf + 1, 0, padlen - 1); 141 142 if (creq->algo_le) { 143 __le64 bits = cpu_to_le64(creq->len << 3); 144 145 memcpy(buf + padlen, &bits, sizeof(bits)); 146 } else { 147 __be64 bits = cpu_to_be64(creq->len << 3); 148 149 memcpy(buf + padlen, &bits, sizeof(bits)); 150 } 151 152 return padlen + 8; 153 } 154 155 static void mv_cesa_ahash_std_step(struct ahash_request *req) 156 { 157 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 158 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 159 struct mv_cesa_engine *engine = creq->base.engine; 160 struct mv_cesa_op_ctx *op; 161 unsigned int new_cache_ptr = 0; 162 u32 frag_mode; 163 size_t len; 164 unsigned int digsize; 165 int i; 166 167 mv_cesa_adjust_op(engine, &creq->op_tmpl); 168 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 169 170 if (!sreq->offset) { 171 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 172 for (i = 0; i < digsize / 4; i++) 173 writel_relaxed(creq->state[i], 174 engine->regs + CESA_IVDIG(i)); 175 } 176 177 if (creq->cache_ptr) 178 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 179 creq->cache, creq->cache_ptr); 180 181 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 182 CESA_SA_SRAM_PAYLOAD_SIZE); 183 184 if (!creq->last_req) { 185 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 186 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 187 } 188 189 if (len - creq->cache_ptr) 190 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 191 engine->sram + 192 CESA_SA_DATA_SRAM_OFFSET + 193 creq->cache_ptr, 194 len - creq->cache_ptr, 195 sreq->offset); 196 197 op = &creq->op_tmpl; 198 199 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 200 201 if (creq->last_req && sreq->offset == req->nbytes && 202 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 203 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 204 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 205 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 206 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 207 } 208 209 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 210 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 211 if (len && 212 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 213 mv_cesa_set_mac_op_total_len(op, creq->len); 214 } else { 215 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 216 217 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 218 len &= CESA_HASH_BLOCK_SIZE_MSK; 219 new_cache_ptr = 64 - trailerlen; 220 memcpy_fromio(creq->cache, 221 engine->sram + 222 CESA_SA_DATA_SRAM_OFFSET + len, 223 new_cache_ptr); 224 } else { 225 len += mv_cesa_ahash_pad_req(creq, 226 engine->sram + len + 227 CESA_SA_DATA_SRAM_OFFSET); 228 } 229 230 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 231 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 232 else 233 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 234 } 235 } 236 237 mv_cesa_set_mac_op_frag_len(op, len); 238 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 239 240 /* FIXME: only update enc_len field */ 241 memcpy_toio(engine->sram, op, sizeof(*op)); 242 243 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 244 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 245 CESA_SA_DESC_CFG_FRAG_MSK); 246 247 creq->cache_ptr = new_cache_ptr; 248 249 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 250 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 251 WARN_ON(readl(engine->regs + CESA_SA_CMD) & 252 CESA_SA_CMD_EN_CESA_SA_ACCL0); 253 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 254 } 255 256 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 257 { 258 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 259 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 260 261 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 262 return -EINPROGRESS; 263 264 return 0; 265 } 266 267 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 268 { 269 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 270 struct mv_cesa_req *basereq = &creq->base; 271 272 mv_cesa_dma_prepare(basereq, basereq->engine); 273 } 274 275 static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 276 { 277 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 278 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 279 280 sreq->offset = 0; 281 } 282 283 static void mv_cesa_ahash_dma_step(struct ahash_request *req) 284 { 285 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 286 struct mv_cesa_req *base = &creq->base; 287 288 /* We must explicitly set the digest state. */ 289 if (base->chain.first->flags & CESA_TDMA_SET_STATE) { 290 struct mv_cesa_engine *engine = base->engine; 291 int i; 292 293 /* Set the hash state in the IVDIG regs. */ 294 for (i = 0; i < ARRAY_SIZE(creq->state); i++) 295 writel_relaxed(creq->state[i], engine->regs + 296 CESA_IVDIG(i)); 297 } 298 299 mv_cesa_dma_step(base); 300 } 301 302 static void mv_cesa_ahash_step(struct crypto_async_request *req) 303 { 304 struct ahash_request *ahashreq = ahash_request_cast(req); 305 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 306 307 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 308 mv_cesa_ahash_dma_step(ahashreq); 309 else 310 mv_cesa_ahash_std_step(ahashreq); 311 } 312 313 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 314 { 315 struct ahash_request *ahashreq = ahash_request_cast(req); 316 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 317 318 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 319 return mv_cesa_dma_process(&creq->base, status); 320 321 return mv_cesa_ahash_std_process(ahashreq, status); 322 } 323 324 static void mv_cesa_ahash_complete(struct crypto_async_request *req) 325 { 326 struct ahash_request *ahashreq = ahash_request_cast(req); 327 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 328 struct mv_cesa_engine *engine = creq->base.engine; 329 unsigned int digsize; 330 int i; 331 332 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 333 334 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ && 335 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == 336 CESA_TDMA_RESULT) { 337 __le32 *data = NULL; 338 339 /* 340 * Result is already in the correct endianness when the SA is 341 * used 342 */ 343 data = creq->base.chain.last->op->ctx.hash.hash; 344 for (i = 0; i < digsize / 4; i++) 345 creq->state[i] = cpu_to_le32(data[i]); 346 347 memcpy(ahashreq->result, data, digsize); 348 } else { 349 for (i = 0; i < digsize / 4; i++) 350 creq->state[i] = readl_relaxed(engine->regs + 351 CESA_IVDIG(i)); 352 if (creq->last_req) { 353 /* 354 * Hardware's MD5 digest is in little endian format, but 355 * SHA in big endian format 356 */ 357 if (creq->algo_le) { 358 __le32 *result = (void *)ahashreq->result; 359 360 for (i = 0; i < digsize / 4; i++) 361 result[i] = cpu_to_le32(creq->state[i]); 362 } else { 363 __be32 *result = (void *)ahashreq->result; 364 365 for (i = 0; i < digsize / 4; i++) 366 result[i] = cpu_to_be32(creq->state[i]); 367 } 368 } 369 } 370 371 atomic_sub(ahashreq->nbytes, &engine->load); 372 } 373 374 static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 375 struct mv_cesa_engine *engine) 376 { 377 struct ahash_request *ahashreq = ahash_request_cast(req); 378 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 379 380 creq->base.engine = engine; 381 382 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 383 mv_cesa_ahash_dma_prepare(ahashreq); 384 else 385 mv_cesa_ahash_std_prepare(ahashreq); 386 } 387 388 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 389 { 390 struct ahash_request *ahashreq = ahash_request_cast(req); 391 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 392 393 if (creq->last_req) 394 mv_cesa_ahash_last_cleanup(ahashreq); 395 396 mv_cesa_ahash_cleanup(ahashreq); 397 398 if (creq->cache_ptr) 399 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 400 creq->cache, 401 creq->cache_ptr, 402 ahashreq->nbytes - creq->cache_ptr); 403 } 404 405 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 406 .step = mv_cesa_ahash_step, 407 .process = mv_cesa_ahash_process, 408 .cleanup = mv_cesa_ahash_req_cleanup, 409 .complete = mv_cesa_ahash_complete, 410 }; 411 412 static void mv_cesa_ahash_init(struct ahash_request *req, 413 struct mv_cesa_op_ctx *tmpl, bool algo_le) 414 { 415 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 416 417 memset(creq, 0, sizeof(*creq)); 418 mv_cesa_update_op_cfg(tmpl, 419 CESA_SA_DESC_CFG_OP_MAC_ONLY | 420 CESA_SA_DESC_CFG_FIRST_FRAG, 421 CESA_SA_DESC_CFG_OP_MSK | 422 CESA_SA_DESC_CFG_FRAG_MSK); 423 mv_cesa_set_mac_op_total_len(tmpl, 0); 424 mv_cesa_set_mac_op_frag_len(tmpl, 0); 425 creq->op_tmpl = *tmpl; 426 creq->len = 0; 427 creq->algo_le = algo_le; 428 } 429 430 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 431 { 432 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 433 434 ctx->base.ops = &mv_cesa_ahash_req_ops; 435 436 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 437 sizeof(struct mv_cesa_ahash_req)); 438 return 0; 439 } 440 441 static bool mv_cesa_ahash_cache_req(struct ahash_request *req) 442 { 443 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 444 bool cached = false; 445 446 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && 447 !creq->last_req) { 448 cached = true; 449 450 if (!req->nbytes) 451 return cached; 452 453 sg_pcopy_to_buffer(req->src, creq->src_nents, 454 creq->cache + creq->cache_ptr, 455 req->nbytes, 0); 456 457 creq->cache_ptr += req->nbytes; 458 } 459 460 return cached; 461 } 462 463 static struct mv_cesa_op_ctx * 464 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, 465 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len, 466 gfp_t flags) 467 { 468 struct mv_cesa_op_ctx *op; 469 int ret; 470 471 op = mv_cesa_dma_add_op(chain, tmpl, false, flags); 472 if (IS_ERR(op)) 473 return op; 474 475 /* Set the operation block fragment length. */ 476 mv_cesa_set_mac_op_frag_len(op, frag_len); 477 478 /* Append dummy desc to launch operation */ 479 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 480 if (ret) 481 return ERR_PTR(ret); 482 483 if (mv_cesa_mac_op_is_first_frag(tmpl)) 484 mv_cesa_update_op_cfg(tmpl, 485 CESA_SA_DESC_CFG_MID_FRAG, 486 CESA_SA_DESC_CFG_FRAG_MSK); 487 488 return op; 489 } 490 491 static int 492 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 493 struct mv_cesa_ahash_req *creq, 494 gfp_t flags) 495 { 496 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 497 int ret; 498 499 if (!creq->cache_ptr) 500 return 0; 501 502 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags); 503 if (ret) 504 return ret; 505 506 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); 507 508 return mv_cesa_dma_add_data_transfer(chain, 509 CESA_SA_DATA_SRAM_OFFSET, 510 ahashdreq->cache_dma, 511 creq->cache_ptr, 512 CESA_TDMA_DST_IN_SRAM, 513 flags); 514 } 515 516 static struct mv_cesa_op_ctx * 517 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 518 struct mv_cesa_ahash_dma_iter *dma_iter, 519 struct mv_cesa_ahash_req *creq, 520 unsigned int frag_len, gfp_t flags) 521 { 522 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 523 unsigned int len, trailerlen, padoff = 0; 524 struct mv_cesa_op_ctx *op; 525 int ret; 526 527 /* 528 * If the transfer is smaller than our maximum length, and we have 529 * some data outstanding, we can ask the engine to finish the hash. 530 */ 531 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) { 532 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len, 533 flags); 534 if (IS_ERR(op)) 535 return op; 536 537 mv_cesa_set_mac_op_total_len(op, creq->len); 538 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ? 539 CESA_SA_DESC_CFG_NOT_FRAG : 540 CESA_SA_DESC_CFG_LAST_FRAG, 541 CESA_SA_DESC_CFG_FRAG_MSK); 542 543 ret = mv_cesa_dma_add_result_op(chain, 544 CESA_SA_CFG_SRAM_OFFSET, 545 CESA_SA_DATA_SRAM_OFFSET, 546 CESA_TDMA_SRC_IN_SRAM, flags); 547 if (ret) 548 return ERR_PTR(-ENOMEM); 549 return op; 550 } 551 552 /* 553 * The request is longer than the engine can handle, or we have 554 * no data outstanding. Manually generate the padding, adding it 555 * as a "mid" fragment. 556 */ 557 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 558 if (ret) 559 return ERR_PTR(ret); 560 561 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 562 563 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen); 564 if (len) { 565 ret = mv_cesa_dma_add_data_transfer(chain, 566 CESA_SA_DATA_SRAM_OFFSET + 567 frag_len, 568 ahashdreq->padding_dma, 569 len, CESA_TDMA_DST_IN_SRAM, 570 flags); 571 if (ret) 572 return ERR_PTR(ret); 573 574 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len, 575 flags); 576 if (IS_ERR(op)) 577 return op; 578 579 if (len == trailerlen) 580 return op; 581 582 padoff += len; 583 } 584 585 ret = mv_cesa_dma_add_data_transfer(chain, 586 CESA_SA_DATA_SRAM_OFFSET, 587 ahashdreq->padding_dma + 588 padoff, 589 trailerlen - padoff, 590 CESA_TDMA_DST_IN_SRAM, 591 flags); 592 if (ret) 593 return ERR_PTR(ret); 594 595 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff, 596 flags); 597 } 598 599 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 600 { 601 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 602 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 603 GFP_KERNEL : GFP_ATOMIC; 604 struct mv_cesa_req *basereq = &creq->base; 605 struct mv_cesa_ahash_dma_iter iter; 606 struct mv_cesa_op_ctx *op = NULL; 607 unsigned int frag_len; 608 bool set_state = false; 609 int ret; 610 u32 type; 611 612 basereq->chain.first = NULL; 613 basereq->chain.last = NULL; 614 615 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl)) 616 set_state = true; 617 618 if (creq->src_nents) { 619 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 620 DMA_TO_DEVICE); 621 if (!ret) { 622 ret = -ENOMEM; 623 goto err; 624 } 625 } 626 627 mv_cesa_tdma_desc_iter_init(&basereq->chain); 628 mv_cesa_ahash_req_iter_init(&iter, req); 629 630 /* 631 * Add the cache (left-over data from a previous block) first. 632 * This will never overflow the SRAM size. 633 */ 634 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags); 635 if (ret) 636 goto err_free_tdma; 637 638 if (iter.src.sg) { 639 /* 640 * Add all the new data, inserting an operation block and 641 * launch command between each full SRAM block-worth of 642 * data. We intentionally do not add the final op block. 643 */ 644 while (true) { 645 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, 646 &iter.base, 647 &iter.src, flags); 648 if (ret) 649 goto err_free_tdma; 650 651 frag_len = iter.base.op_len; 652 653 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 654 break; 655 656 op = mv_cesa_dma_add_frag(&basereq->chain, 657 &creq->op_tmpl, 658 frag_len, flags); 659 if (IS_ERR(op)) { 660 ret = PTR_ERR(op); 661 goto err_free_tdma; 662 } 663 } 664 } else { 665 /* Account for the data that was in the cache. */ 666 frag_len = iter.base.op_len; 667 } 668 669 /* 670 * At this point, frag_len indicates whether we have any data 671 * outstanding which needs an operation. Queue up the final 672 * operation, which depends whether this is the final request. 673 */ 674 if (creq->last_req) 675 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq, 676 frag_len, flags); 677 else if (frag_len) 678 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 679 frag_len, flags); 680 681 if (IS_ERR(op)) { 682 ret = PTR_ERR(op); 683 goto err_free_tdma; 684 } 685 686 /* 687 * If results are copied via DMA, this means that this 688 * request can be directly processed by the engine, 689 * without partial updates. So we can chain it at the 690 * DMA level with other requests. 691 */ 692 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK; 693 694 if (op && type != CESA_TDMA_RESULT) { 695 /* Add dummy desc to wait for crypto operation end */ 696 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); 697 if (ret) 698 goto err_free_tdma; 699 } 700 701 if (!creq->last_req) 702 creq->cache_ptr = req->nbytes + creq->cache_ptr - 703 iter.base.len; 704 else 705 creq->cache_ptr = 0; 706 707 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; 708 709 if (type != CESA_TDMA_RESULT) 710 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; 711 712 if (set_state) { 713 /* 714 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to 715 * let the step logic know that the IVDIG registers should be 716 * explicitly set before launching a TDMA chain. 717 */ 718 basereq->chain.first->flags |= CESA_TDMA_SET_STATE; 719 } 720 721 return 0; 722 723 err_free_tdma: 724 mv_cesa_dma_cleanup(basereq); 725 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 726 727 err: 728 mv_cesa_ahash_last_cleanup(req); 729 730 return ret; 731 } 732 733 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 734 { 735 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 736 737 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 738 if (creq->src_nents < 0) { 739 dev_err(cesa_dev->dev, "Invalid number of src SG"); 740 return creq->src_nents; 741 } 742 743 *cached = mv_cesa_ahash_cache_req(req); 744 745 if (*cached) 746 return 0; 747 748 if (cesa_dev->caps->has_tdma) 749 return mv_cesa_ahash_dma_req_init(req); 750 else 751 return 0; 752 } 753 754 static int mv_cesa_ahash_queue_req(struct ahash_request *req) 755 { 756 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 757 struct mv_cesa_engine *engine; 758 bool cached = false; 759 int ret; 760 761 ret = mv_cesa_ahash_req_init(req, &cached); 762 if (ret) 763 return ret; 764 765 if (cached) 766 return 0; 767 768 engine = mv_cesa_select_engine(req->nbytes); 769 mv_cesa_ahash_prepare(&req->base, engine); 770 771 ret = mv_cesa_queue_req(&req->base, &creq->base); 772 773 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 774 mv_cesa_ahash_cleanup(req); 775 776 return ret; 777 } 778 779 static int mv_cesa_ahash_update(struct ahash_request *req) 780 { 781 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 782 783 creq->len += req->nbytes; 784 785 return mv_cesa_ahash_queue_req(req); 786 } 787 788 static int mv_cesa_ahash_final(struct ahash_request *req) 789 { 790 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 791 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 792 793 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 794 creq->last_req = true; 795 req->nbytes = 0; 796 797 return mv_cesa_ahash_queue_req(req); 798 } 799 800 static int mv_cesa_ahash_finup(struct ahash_request *req) 801 { 802 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 803 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 804 805 creq->len += req->nbytes; 806 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 807 creq->last_req = true; 808 809 return mv_cesa_ahash_queue_req(req); 810 } 811 812 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 813 u64 *len, void *cache) 814 { 815 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 816 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 817 unsigned int digsize = crypto_ahash_digestsize(ahash); 818 unsigned int blocksize; 819 820 blocksize = crypto_ahash_blocksize(ahash); 821 822 *len = creq->len; 823 memcpy(hash, creq->state, digsize); 824 memset(cache, 0, blocksize); 825 memcpy(cache, creq->cache, creq->cache_ptr); 826 827 return 0; 828 } 829 830 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, 831 u64 len, const void *cache) 832 { 833 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 834 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 835 unsigned int digsize = crypto_ahash_digestsize(ahash); 836 unsigned int blocksize; 837 unsigned int cache_ptr; 838 int ret; 839 840 ret = crypto_ahash_init(req); 841 if (ret) 842 return ret; 843 844 blocksize = crypto_ahash_blocksize(ahash); 845 if (len >= blocksize) 846 mv_cesa_update_op_cfg(&creq->op_tmpl, 847 CESA_SA_DESC_CFG_MID_FRAG, 848 CESA_SA_DESC_CFG_FRAG_MSK); 849 850 creq->len = len; 851 memcpy(creq->state, hash, digsize); 852 creq->cache_ptr = 0; 853 854 cache_ptr = do_div(len, blocksize); 855 if (!cache_ptr) 856 return 0; 857 858 memcpy(creq->cache, cache, cache_ptr); 859 creq->cache_ptr = cache_ptr; 860 861 return 0; 862 } 863 864 static int mv_cesa_md5_init(struct ahash_request *req) 865 { 866 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 867 struct mv_cesa_op_ctx tmpl = { }; 868 869 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 870 871 mv_cesa_ahash_init(req, &tmpl, true); 872 873 creq->state[0] = MD5_H0; 874 creq->state[1] = MD5_H1; 875 creq->state[2] = MD5_H2; 876 creq->state[3] = MD5_H3; 877 878 return 0; 879 } 880 881 static int mv_cesa_md5_export(struct ahash_request *req, void *out) 882 { 883 struct md5_state *out_state = out; 884 885 return mv_cesa_ahash_export(req, out_state->hash, 886 &out_state->byte_count, out_state->block); 887 } 888 889 static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 890 { 891 const struct md5_state *in_state = in; 892 893 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count, 894 in_state->block); 895 } 896 897 static int mv_cesa_md5_digest(struct ahash_request *req) 898 { 899 int ret; 900 901 ret = mv_cesa_md5_init(req); 902 if (ret) 903 return ret; 904 905 return mv_cesa_ahash_finup(req); 906 } 907 908 struct ahash_alg mv_md5_alg = { 909 .init = mv_cesa_md5_init, 910 .update = mv_cesa_ahash_update, 911 .final = mv_cesa_ahash_final, 912 .finup = mv_cesa_ahash_finup, 913 .digest = mv_cesa_md5_digest, 914 .export = mv_cesa_md5_export, 915 .import = mv_cesa_md5_import, 916 .halg = { 917 .digestsize = MD5_DIGEST_SIZE, 918 .statesize = sizeof(struct md5_state), 919 .base = { 920 .cra_name = "md5", 921 .cra_driver_name = "mv-md5", 922 .cra_priority = 300, 923 .cra_flags = CRYPTO_ALG_ASYNC | 924 CRYPTO_ALG_ALLOCATES_MEMORY | 925 CRYPTO_ALG_KERN_DRIVER_ONLY, 926 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 927 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 928 .cra_init = mv_cesa_ahash_cra_init, 929 .cra_module = THIS_MODULE, 930 } 931 } 932 }; 933 934 static int mv_cesa_sha1_init(struct ahash_request *req) 935 { 936 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 937 struct mv_cesa_op_ctx tmpl = { }; 938 939 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 940 941 mv_cesa_ahash_init(req, &tmpl, false); 942 943 creq->state[0] = SHA1_H0; 944 creq->state[1] = SHA1_H1; 945 creq->state[2] = SHA1_H2; 946 creq->state[3] = SHA1_H3; 947 creq->state[4] = SHA1_H4; 948 949 return 0; 950 } 951 952 static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 953 { 954 struct sha1_state *out_state = out; 955 956 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 957 out_state->buffer); 958 } 959 960 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 961 { 962 const struct sha1_state *in_state = in; 963 964 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 965 in_state->buffer); 966 } 967 968 static int mv_cesa_sha1_digest(struct ahash_request *req) 969 { 970 int ret; 971 972 ret = mv_cesa_sha1_init(req); 973 if (ret) 974 return ret; 975 976 return mv_cesa_ahash_finup(req); 977 } 978 979 struct ahash_alg mv_sha1_alg = { 980 .init = mv_cesa_sha1_init, 981 .update = mv_cesa_ahash_update, 982 .final = mv_cesa_ahash_final, 983 .finup = mv_cesa_ahash_finup, 984 .digest = mv_cesa_sha1_digest, 985 .export = mv_cesa_sha1_export, 986 .import = mv_cesa_sha1_import, 987 .halg = { 988 .digestsize = SHA1_DIGEST_SIZE, 989 .statesize = sizeof(struct sha1_state), 990 .base = { 991 .cra_name = "sha1", 992 .cra_driver_name = "mv-sha1", 993 .cra_priority = 300, 994 .cra_flags = CRYPTO_ALG_ASYNC | 995 CRYPTO_ALG_ALLOCATES_MEMORY | 996 CRYPTO_ALG_KERN_DRIVER_ONLY, 997 .cra_blocksize = SHA1_BLOCK_SIZE, 998 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 999 .cra_init = mv_cesa_ahash_cra_init, 1000 .cra_module = THIS_MODULE, 1001 } 1002 } 1003 }; 1004 1005 static int mv_cesa_sha256_init(struct ahash_request *req) 1006 { 1007 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 1008 struct mv_cesa_op_ctx tmpl = { }; 1009 1010 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 1011 1012 mv_cesa_ahash_init(req, &tmpl, false); 1013 1014 creq->state[0] = SHA256_H0; 1015 creq->state[1] = SHA256_H1; 1016 creq->state[2] = SHA256_H2; 1017 creq->state[3] = SHA256_H3; 1018 creq->state[4] = SHA256_H4; 1019 creq->state[5] = SHA256_H5; 1020 creq->state[6] = SHA256_H6; 1021 creq->state[7] = SHA256_H7; 1022 1023 return 0; 1024 } 1025 1026 static int mv_cesa_sha256_digest(struct ahash_request *req) 1027 { 1028 int ret; 1029 1030 ret = mv_cesa_sha256_init(req); 1031 if (ret) 1032 return ret; 1033 1034 return mv_cesa_ahash_finup(req); 1035 } 1036 1037 static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 1038 { 1039 struct sha256_state *out_state = out; 1040 1041 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 1042 out_state->buf); 1043 } 1044 1045 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 1046 { 1047 const struct sha256_state *in_state = in; 1048 1049 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 1050 in_state->buf); 1051 } 1052 1053 struct ahash_alg mv_sha256_alg = { 1054 .init = mv_cesa_sha256_init, 1055 .update = mv_cesa_ahash_update, 1056 .final = mv_cesa_ahash_final, 1057 .finup = mv_cesa_ahash_finup, 1058 .digest = mv_cesa_sha256_digest, 1059 .export = mv_cesa_sha256_export, 1060 .import = mv_cesa_sha256_import, 1061 .halg = { 1062 .digestsize = SHA256_DIGEST_SIZE, 1063 .statesize = sizeof(struct sha256_state), 1064 .base = { 1065 .cra_name = "sha256", 1066 .cra_driver_name = "mv-sha256", 1067 .cra_priority = 300, 1068 .cra_flags = CRYPTO_ALG_ASYNC | 1069 CRYPTO_ALG_ALLOCATES_MEMORY | 1070 CRYPTO_ALG_KERN_DRIVER_ONLY, 1071 .cra_blocksize = SHA256_BLOCK_SIZE, 1072 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1073 .cra_init = mv_cesa_ahash_cra_init, 1074 .cra_module = THIS_MODULE, 1075 } 1076 } 1077 }; 1078 1079 struct mv_cesa_ahash_result { 1080 struct completion completion; 1081 int error; 1082 }; 1083 1084 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1085 int error) 1086 { 1087 struct mv_cesa_ahash_result *result = req->data; 1088 1089 if (error == -EINPROGRESS) 1090 return; 1091 1092 result->error = error; 1093 complete(&result->completion); 1094 } 1095 1096 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1097 void *state, unsigned int blocksize) 1098 { 1099 struct mv_cesa_ahash_result result; 1100 struct scatterlist sg; 1101 int ret; 1102 1103 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1104 mv_cesa_hmac_ahash_complete, &result); 1105 sg_init_one(&sg, pad, blocksize); 1106 ahash_request_set_crypt(req, &sg, pad, blocksize); 1107 init_completion(&result.completion); 1108 1109 ret = crypto_ahash_init(req); 1110 if (ret) 1111 return ret; 1112 1113 ret = crypto_ahash_update(req); 1114 if (ret && ret != -EINPROGRESS) 1115 return ret; 1116 1117 wait_for_completion_interruptible(&result.completion); 1118 if (result.error) 1119 return result.error; 1120 1121 ret = crypto_ahash_export(req, state); 1122 if (ret) 1123 return ret; 1124 1125 return 0; 1126 } 1127 1128 static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1129 const u8 *key, unsigned int keylen, 1130 u8 *ipad, u8 *opad, 1131 unsigned int blocksize) 1132 { 1133 struct mv_cesa_ahash_result result; 1134 struct scatterlist sg; 1135 int ret; 1136 int i; 1137 1138 if (keylen <= blocksize) { 1139 memcpy(ipad, key, keylen); 1140 } else { 1141 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1142 1143 if (!keydup) 1144 return -ENOMEM; 1145 1146 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1147 mv_cesa_hmac_ahash_complete, 1148 &result); 1149 sg_init_one(&sg, keydup, keylen); 1150 ahash_request_set_crypt(req, &sg, ipad, keylen); 1151 init_completion(&result.completion); 1152 1153 ret = crypto_ahash_digest(req); 1154 if (ret == -EINPROGRESS) { 1155 wait_for_completion_interruptible(&result.completion); 1156 ret = result.error; 1157 } 1158 1159 /* Set the memory region to 0 to avoid any leak. */ 1160 kfree_sensitive(keydup); 1161 1162 if (ret) 1163 return ret; 1164 1165 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1166 } 1167 1168 memset(ipad + keylen, 0, blocksize - keylen); 1169 memcpy(opad, ipad, blocksize); 1170 1171 for (i = 0; i < blocksize; i++) { 1172 ipad[i] ^= HMAC_IPAD_VALUE; 1173 opad[i] ^= HMAC_OPAD_VALUE; 1174 } 1175 1176 return 0; 1177 } 1178 1179 static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1180 const u8 *key, unsigned int keylen, 1181 void *istate, void *ostate) 1182 { 1183 struct ahash_request *req; 1184 struct crypto_ahash *tfm; 1185 unsigned int blocksize; 1186 u8 *ipad = NULL; 1187 u8 *opad; 1188 int ret; 1189 1190 tfm = crypto_alloc_ahash(hash_alg_name, 0, 0); 1191 if (IS_ERR(tfm)) 1192 return PTR_ERR(tfm); 1193 1194 req = ahash_request_alloc(tfm, GFP_KERNEL); 1195 if (!req) { 1196 ret = -ENOMEM; 1197 goto free_ahash; 1198 } 1199 1200 crypto_ahash_clear_flags(tfm, ~0); 1201 1202 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1203 1204 ipad = kcalloc(2, blocksize, GFP_KERNEL); 1205 if (!ipad) { 1206 ret = -ENOMEM; 1207 goto free_req; 1208 } 1209 1210 opad = ipad + blocksize; 1211 1212 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1213 if (ret) 1214 goto free_ipad; 1215 1216 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1217 if (ret) 1218 goto free_ipad; 1219 1220 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1221 1222 free_ipad: 1223 kfree(ipad); 1224 free_req: 1225 ahash_request_free(req); 1226 free_ahash: 1227 crypto_free_ahash(tfm); 1228 1229 return ret; 1230 } 1231 1232 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1233 { 1234 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1235 1236 ctx->base.ops = &mv_cesa_ahash_req_ops; 1237 1238 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1239 sizeof(struct mv_cesa_ahash_req)); 1240 return 0; 1241 } 1242 1243 static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1244 { 1245 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1246 struct mv_cesa_op_ctx tmpl = { }; 1247 1248 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1249 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1250 1251 mv_cesa_ahash_init(req, &tmpl, true); 1252 1253 return 0; 1254 } 1255 1256 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1257 unsigned int keylen) 1258 { 1259 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1260 struct md5_state istate, ostate; 1261 int ret, i; 1262 1263 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1264 if (ret) 1265 return ret; 1266 1267 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1268 ctx->iv[i] = be32_to_cpu(istate.hash[i]); 1269 1270 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1271 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); 1272 1273 return 0; 1274 } 1275 1276 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1277 { 1278 int ret; 1279 1280 ret = mv_cesa_ahmac_md5_init(req); 1281 if (ret) 1282 return ret; 1283 1284 return mv_cesa_ahash_finup(req); 1285 } 1286 1287 struct ahash_alg mv_ahmac_md5_alg = { 1288 .init = mv_cesa_ahmac_md5_init, 1289 .update = mv_cesa_ahash_update, 1290 .final = mv_cesa_ahash_final, 1291 .finup = mv_cesa_ahash_finup, 1292 .digest = mv_cesa_ahmac_md5_digest, 1293 .setkey = mv_cesa_ahmac_md5_setkey, 1294 .export = mv_cesa_md5_export, 1295 .import = mv_cesa_md5_import, 1296 .halg = { 1297 .digestsize = MD5_DIGEST_SIZE, 1298 .statesize = sizeof(struct md5_state), 1299 .base = { 1300 .cra_name = "hmac(md5)", 1301 .cra_driver_name = "mv-hmac-md5", 1302 .cra_priority = 300, 1303 .cra_flags = CRYPTO_ALG_ASYNC | 1304 CRYPTO_ALG_ALLOCATES_MEMORY | 1305 CRYPTO_ALG_KERN_DRIVER_ONLY, 1306 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1307 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1308 .cra_init = mv_cesa_ahmac_cra_init, 1309 .cra_module = THIS_MODULE, 1310 } 1311 } 1312 }; 1313 1314 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1315 { 1316 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1317 struct mv_cesa_op_ctx tmpl = { }; 1318 1319 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1320 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1321 1322 mv_cesa_ahash_init(req, &tmpl, false); 1323 1324 return 0; 1325 } 1326 1327 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1328 unsigned int keylen) 1329 { 1330 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1331 struct sha1_state istate, ostate; 1332 int ret, i; 1333 1334 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1335 if (ret) 1336 return ret; 1337 1338 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1339 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1340 1341 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1342 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1343 1344 return 0; 1345 } 1346 1347 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1348 { 1349 int ret; 1350 1351 ret = mv_cesa_ahmac_sha1_init(req); 1352 if (ret) 1353 return ret; 1354 1355 return mv_cesa_ahash_finup(req); 1356 } 1357 1358 struct ahash_alg mv_ahmac_sha1_alg = { 1359 .init = mv_cesa_ahmac_sha1_init, 1360 .update = mv_cesa_ahash_update, 1361 .final = mv_cesa_ahash_final, 1362 .finup = mv_cesa_ahash_finup, 1363 .digest = mv_cesa_ahmac_sha1_digest, 1364 .setkey = mv_cesa_ahmac_sha1_setkey, 1365 .export = mv_cesa_sha1_export, 1366 .import = mv_cesa_sha1_import, 1367 .halg = { 1368 .digestsize = SHA1_DIGEST_SIZE, 1369 .statesize = sizeof(struct sha1_state), 1370 .base = { 1371 .cra_name = "hmac(sha1)", 1372 .cra_driver_name = "mv-hmac-sha1", 1373 .cra_priority = 300, 1374 .cra_flags = CRYPTO_ALG_ASYNC | 1375 CRYPTO_ALG_ALLOCATES_MEMORY | 1376 CRYPTO_ALG_KERN_DRIVER_ONLY, 1377 .cra_blocksize = SHA1_BLOCK_SIZE, 1378 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1379 .cra_init = mv_cesa_ahmac_cra_init, 1380 .cra_module = THIS_MODULE, 1381 } 1382 } 1383 }; 1384 1385 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1386 unsigned int keylen) 1387 { 1388 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1389 struct sha256_state istate, ostate; 1390 int ret, i; 1391 1392 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1393 if (ret) 1394 return ret; 1395 1396 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1397 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1398 1399 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1400 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1401 1402 return 0; 1403 } 1404 1405 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1406 { 1407 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1408 struct mv_cesa_op_ctx tmpl = { }; 1409 1410 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1411 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1412 1413 mv_cesa_ahash_init(req, &tmpl, false); 1414 1415 return 0; 1416 } 1417 1418 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1419 { 1420 int ret; 1421 1422 ret = mv_cesa_ahmac_sha256_init(req); 1423 if (ret) 1424 return ret; 1425 1426 return mv_cesa_ahash_finup(req); 1427 } 1428 1429 struct ahash_alg mv_ahmac_sha256_alg = { 1430 .init = mv_cesa_ahmac_sha256_init, 1431 .update = mv_cesa_ahash_update, 1432 .final = mv_cesa_ahash_final, 1433 .finup = mv_cesa_ahash_finup, 1434 .digest = mv_cesa_ahmac_sha256_digest, 1435 .setkey = mv_cesa_ahmac_sha256_setkey, 1436 .export = mv_cesa_sha256_export, 1437 .import = mv_cesa_sha256_import, 1438 .halg = { 1439 .digestsize = SHA256_DIGEST_SIZE, 1440 .statesize = sizeof(struct sha256_state), 1441 .base = { 1442 .cra_name = "hmac(sha256)", 1443 .cra_driver_name = "mv-hmac-sha256", 1444 .cra_priority = 300, 1445 .cra_flags = CRYPTO_ALG_ASYNC | 1446 CRYPTO_ALG_ALLOCATES_MEMORY | 1447 CRYPTO_ALG_KERN_DRIVER_ONLY, 1448 .cra_blocksize = SHA256_BLOCK_SIZE, 1449 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1450 .cra_init = mv_cesa_ahmac_cra_init, 1451 .cra_module = THIS_MODULE, 1452 } 1453 } 1454 }; 1455