1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 4 * 5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 6 * Author: Arnaud Ebalard <arno@natisbad.org> 7 * 8 * This work is based on an initial version written by 9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 10 */ 11 12 #include <crypto/hmac.h> 13 #include <crypto/md5.h> 14 #include <crypto/sha.h> 15 16 #include "cesa.h" 17 18 struct mv_cesa_ahash_dma_iter { 19 struct mv_cesa_dma_iter base; 20 struct mv_cesa_sg_dma_iter src; 21 }; 22 23 static inline void 24 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 25 struct ahash_request *req) 26 { 27 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 28 unsigned int len = req->nbytes + creq->cache_ptr; 29 30 if (!creq->last_req) 31 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 32 33 mv_cesa_req_dma_iter_init(&iter->base, len); 34 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 35 iter->src.op_offset = creq->cache_ptr; 36 } 37 38 static inline bool 39 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 40 { 41 iter->src.op_offset = 0; 42 43 return mv_cesa_req_dma_iter_next_op(&iter->base); 44 } 45 46 static inline int 47 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) 48 { 49 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 50 &req->cache_dma); 51 if (!req->cache) 52 return -ENOMEM; 53 54 return 0; 55 } 56 57 static inline void 58 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) 59 { 60 if (!req->cache) 61 return; 62 63 dma_pool_free(cesa_dev->dma->cache_pool, req->cache, 64 req->cache_dma); 65 } 66 67 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 68 gfp_t flags) 69 { 70 if (req->padding) 71 return 0; 72 73 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 74 &req->padding_dma); 75 if (!req->padding) 76 return -ENOMEM; 77 78 return 0; 79 } 80 81 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 82 { 83 if (!req->padding) 84 return; 85 86 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 87 req->padding_dma); 88 req->padding = NULL; 89 } 90 91 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 92 { 93 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 94 95 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 96 } 97 98 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 99 { 100 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 101 102 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 103 mv_cesa_ahash_dma_free_cache(&creq->req.dma); 104 mv_cesa_dma_cleanup(&creq->base); 105 } 106 107 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 108 { 109 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 110 111 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 112 mv_cesa_ahash_dma_cleanup(req); 113 } 114 115 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 116 { 117 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 118 119 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 120 mv_cesa_ahash_dma_last_cleanup(req); 121 } 122 123 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 124 { 125 unsigned int index, padlen; 126 127 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 128 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 129 130 return padlen; 131 } 132 133 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 134 { 135 unsigned int padlen; 136 137 buf[0] = 0x80; 138 /* Pad out to 56 mod 64 */ 139 padlen = mv_cesa_ahash_pad_len(creq); 140 memset(buf + 1, 0, padlen - 1); 141 142 if (creq->algo_le) { 143 __le64 bits = cpu_to_le64(creq->len << 3); 144 145 memcpy(buf + padlen, &bits, sizeof(bits)); 146 } else { 147 __be64 bits = cpu_to_be64(creq->len << 3); 148 149 memcpy(buf + padlen, &bits, sizeof(bits)); 150 } 151 152 return padlen + 8; 153 } 154 155 static void mv_cesa_ahash_std_step(struct ahash_request *req) 156 { 157 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 158 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 159 struct mv_cesa_engine *engine = creq->base.engine; 160 struct mv_cesa_op_ctx *op; 161 unsigned int new_cache_ptr = 0; 162 u32 frag_mode; 163 size_t len; 164 unsigned int digsize; 165 int i; 166 167 mv_cesa_adjust_op(engine, &creq->op_tmpl); 168 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 169 170 if (!sreq->offset) { 171 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 172 for (i = 0; i < digsize / 4; i++) 173 writel_relaxed(creq->state[i], 174 engine->regs + CESA_IVDIG(i)); 175 } 176 177 if (creq->cache_ptr) 178 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 179 creq->cache, creq->cache_ptr); 180 181 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 182 CESA_SA_SRAM_PAYLOAD_SIZE); 183 184 if (!creq->last_req) { 185 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 186 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 187 } 188 189 if (len - creq->cache_ptr) 190 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 191 engine->sram + 192 CESA_SA_DATA_SRAM_OFFSET + 193 creq->cache_ptr, 194 len - creq->cache_ptr, 195 sreq->offset); 196 197 op = &creq->op_tmpl; 198 199 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 200 201 if (creq->last_req && sreq->offset == req->nbytes && 202 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 203 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 204 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 205 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 206 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 207 } 208 209 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 210 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 211 if (len && 212 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 213 mv_cesa_set_mac_op_total_len(op, creq->len); 214 } else { 215 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 216 217 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 218 len &= CESA_HASH_BLOCK_SIZE_MSK; 219 new_cache_ptr = 64 - trailerlen; 220 memcpy_fromio(creq->cache, 221 engine->sram + 222 CESA_SA_DATA_SRAM_OFFSET + len, 223 new_cache_ptr); 224 } else { 225 len += mv_cesa_ahash_pad_req(creq, 226 engine->sram + len + 227 CESA_SA_DATA_SRAM_OFFSET); 228 } 229 230 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 231 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 232 else 233 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 234 } 235 } 236 237 mv_cesa_set_mac_op_frag_len(op, len); 238 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 239 240 /* FIXME: only update enc_len field */ 241 memcpy_toio(engine->sram, op, sizeof(*op)); 242 243 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 244 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 245 CESA_SA_DESC_CFG_FRAG_MSK); 246 247 creq->cache_ptr = new_cache_ptr; 248 249 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 250 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 251 WARN_ON(readl(engine->regs + CESA_SA_CMD) & 252 CESA_SA_CMD_EN_CESA_SA_ACCL0); 253 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 254 } 255 256 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 257 { 258 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 259 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 260 261 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 262 return -EINPROGRESS; 263 264 return 0; 265 } 266 267 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 268 { 269 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 270 struct mv_cesa_req *basereq = &creq->base; 271 272 mv_cesa_dma_prepare(basereq, basereq->engine); 273 } 274 275 static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 276 { 277 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 278 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 279 280 sreq->offset = 0; 281 } 282 283 static void mv_cesa_ahash_dma_step(struct ahash_request *req) 284 { 285 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 286 struct mv_cesa_req *base = &creq->base; 287 288 /* We must explicitly set the digest state. */ 289 if (base->chain.first->flags & CESA_TDMA_SET_STATE) { 290 struct mv_cesa_engine *engine = base->engine; 291 int i; 292 293 /* Set the hash state in the IVDIG regs. */ 294 for (i = 0; i < ARRAY_SIZE(creq->state); i++) 295 writel_relaxed(creq->state[i], engine->regs + 296 CESA_IVDIG(i)); 297 } 298 299 mv_cesa_dma_step(base); 300 } 301 302 static void mv_cesa_ahash_step(struct crypto_async_request *req) 303 { 304 struct ahash_request *ahashreq = ahash_request_cast(req); 305 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 306 307 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 308 mv_cesa_ahash_dma_step(ahashreq); 309 else 310 mv_cesa_ahash_std_step(ahashreq); 311 } 312 313 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 314 { 315 struct ahash_request *ahashreq = ahash_request_cast(req); 316 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 317 318 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 319 return mv_cesa_dma_process(&creq->base, status); 320 321 return mv_cesa_ahash_std_process(ahashreq, status); 322 } 323 324 static void mv_cesa_ahash_complete(struct crypto_async_request *req) 325 { 326 struct ahash_request *ahashreq = ahash_request_cast(req); 327 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 328 struct mv_cesa_engine *engine = creq->base.engine; 329 unsigned int digsize; 330 int i; 331 332 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 333 334 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ && 335 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == 336 CESA_TDMA_RESULT) { 337 __le32 *data = NULL; 338 339 /* 340 * Result is already in the correct endianness when the SA is 341 * used 342 */ 343 data = creq->base.chain.last->op->ctx.hash.hash; 344 for (i = 0; i < digsize / 4; i++) 345 creq->state[i] = cpu_to_le32(data[i]); 346 347 memcpy(ahashreq->result, data, digsize); 348 } else { 349 for (i = 0; i < digsize / 4; i++) 350 creq->state[i] = readl_relaxed(engine->regs + 351 CESA_IVDIG(i)); 352 if (creq->last_req) { 353 /* 354 * Hardware's MD5 digest is in little endian format, but 355 * SHA in big endian format 356 */ 357 if (creq->algo_le) { 358 __le32 *result = (void *)ahashreq->result; 359 360 for (i = 0; i < digsize / 4; i++) 361 result[i] = cpu_to_le32(creq->state[i]); 362 } else { 363 __be32 *result = (void *)ahashreq->result; 364 365 for (i = 0; i < digsize / 4; i++) 366 result[i] = cpu_to_be32(creq->state[i]); 367 } 368 } 369 } 370 371 atomic_sub(ahashreq->nbytes, &engine->load); 372 } 373 374 static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 375 struct mv_cesa_engine *engine) 376 { 377 struct ahash_request *ahashreq = ahash_request_cast(req); 378 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 379 380 creq->base.engine = engine; 381 382 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 383 mv_cesa_ahash_dma_prepare(ahashreq); 384 else 385 mv_cesa_ahash_std_prepare(ahashreq); 386 } 387 388 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 389 { 390 struct ahash_request *ahashreq = ahash_request_cast(req); 391 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 392 393 if (creq->last_req) 394 mv_cesa_ahash_last_cleanup(ahashreq); 395 396 mv_cesa_ahash_cleanup(ahashreq); 397 398 if (creq->cache_ptr) 399 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 400 creq->cache, 401 creq->cache_ptr, 402 ahashreq->nbytes - creq->cache_ptr); 403 } 404 405 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 406 .step = mv_cesa_ahash_step, 407 .process = mv_cesa_ahash_process, 408 .cleanup = mv_cesa_ahash_req_cleanup, 409 .complete = mv_cesa_ahash_complete, 410 }; 411 412 static void mv_cesa_ahash_init(struct ahash_request *req, 413 struct mv_cesa_op_ctx *tmpl, bool algo_le) 414 { 415 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 416 417 memset(creq, 0, sizeof(*creq)); 418 mv_cesa_update_op_cfg(tmpl, 419 CESA_SA_DESC_CFG_OP_MAC_ONLY | 420 CESA_SA_DESC_CFG_FIRST_FRAG, 421 CESA_SA_DESC_CFG_OP_MSK | 422 CESA_SA_DESC_CFG_FRAG_MSK); 423 mv_cesa_set_mac_op_total_len(tmpl, 0); 424 mv_cesa_set_mac_op_frag_len(tmpl, 0); 425 creq->op_tmpl = *tmpl; 426 creq->len = 0; 427 creq->algo_le = algo_le; 428 } 429 430 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 431 { 432 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 433 434 ctx->base.ops = &mv_cesa_ahash_req_ops; 435 436 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 437 sizeof(struct mv_cesa_ahash_req)); 438 return 0; 439 } 440 441 static bool mv_cesa_ahash_cache_req(struct ahash_request *req) 442 { 443 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 444 bool cached = false; 445 446 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && 447 !creq->last_req) { 448 cached = true; 449 450 if (!req->nbytes) 451 return cached; 452 453 sg_pcopy_to_buffer(req->src, creq->src_nents, 454 creq->cache + creq->cache_ptr, 455 req->nbytes, 0); 456 457 creq->cache_ptr += req->nbytes; 458 } 459 460 return cached; 461 } 462 463 static struct mv_cesa_op_ctx * 464 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, 465 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len, 466 gfp_t flags) 467 { 468 struct mv_cesa_op_ctx *op; 469 int ret; 470 471 op = mv_cesa_dma_add_op(chain, tmpl, false, flags); 472 if (IS_ERR(op)) 473 return op; 474 475 /* Set the operation block fragment length. */ 476 mv_cesa_set_mac_op_frag_len(op, frag_len); 477 478 /* Append dummy desc to launch operation */ 479 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 480 if (ret) 481 return ERR_PTR(ret); 482 483 if (mv_cesa_mac_op_is_first_frag(tmpl)) 484 mv_cesa_update_op_cfg(tmpl, 485 CESA_SA_DESC_CFG_MID_FRAG, 486 CESA_SA_DESC_CFG_FRAG_MSK); 487 488 return op; 489 } 490 491 static int 492 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 493 struct mv_cesa_ahash_req *creq, 494 gfp_t flags) 495 { 496 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 497 int ret; 498 499 if (!creq->cache_ptr) 500 return 0; 501 502 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags); 503 if (ret) 504 return ret; 505 506 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); 507 508 return mv_cesa_dma_add_data_transfer(chain, 509 CESA_SA_DATA_SRAM_OFFSET, 510 ahashdreq->cache_dma, 511 creq->cache_ptr, 512 CESA_TDMA_DST_IN_SRAM, 513 flags); 514 } 515 516 static struct mv_cesa_op_ctx * 517 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 518 struct mv_cesa_ahash_dma_iter *dma_iter, 519 struct mv_cesa_ahash_req *creq, 520 unsigned int frag_len, gfp_t flags) 521 { 522 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 523 unsigned int len, trailerlen, padoff = 0; 524 struct mv_cesa_op_ctx *op; 525 int ret; 526 527 /* 528 * If the transfer is smaller than our maximum length, and we have 529 * some data outstanding, we can ask the engine to finish the hash. 530 */ 531 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) { 532 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len, 533 flags); 534 if (IS_ERR(op)) 535 return op; 536 537 mv_cesa_set_mac_op_total_len(op, creq->len); 538 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ? 539 CESA_SA_DESC_CFG_NOT_FRAG : 540 CESA_SA_DESC_CFG_LAST_FRAG, 541 CESA_SA_DESC_CFG_FRAG_MSK); 542 543 ret = mv_cesa_dma_add_result_op(chain, 544 CESA_SA_CFG_SRAM_OFFSET, 545 CESA_SA_DATA_SRAM_OFFSET, 546 CESA_TDMA_SRC_IN_SRAM, flags); 547 if (ret) 548 return ERR_PTR(-ENOMEM); 549 return op; 550 } 551 552 /* 553 * The request is longer than the engine can handle, or we have 554 * no data outstanding. Manually generate the padding, adding it 555 * as a "mid" fragment. 556 */ 557 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 558 if (ret) 559 return ERR_PTR(ret); 560 561 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 562 563 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen); 564 if (len) { 565 ret = mv_cesa_dma_add_data_transfer(chain, 566 CESA_SA_DATA_SRAM_OFFSET + 567 frag_len, 568 ahashdreq->padding_dma, 569 len, CESA_TDMA_DST_IN_SRAM, 570 flags); 571 if (ret) 572 return ERR_PTR(ret); 573 574 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len, 575 flags); 576 if (IS_ERR(op)) 577 return op; 578 579 if (len == trailerlen) 580 return op; 581 582 padoff += len; 583 } 584 585 ret = mv_cesa_dma_add_data_transfer(chain, 586 CESA_SA_DATA_SRAM_OFFSET, 587 ahashdreq->padding_dma + 588 padoff, 589 trailerlen - padoff, 590 CESA_TDMA_DST_IN_SRAM, 591 flags); 592 if (ret) 593 return ERR_PTR(ret); 594 595 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff, 596 flags); 597 } 598 599 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 600 { 601 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 602 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 603 GFP_KERNEL : GFP_ATOMIC; 604 struct mv_cesa_req *basereq = &creq->base; 605 struct mv_cesa_ahash_dma_iter iter; 606 struct mv_cesa_op_ctx *op = NULL; 607 unsigned int frag_len; 608 bool set_state = false; 609 int ret; 610 u32 type; 611 612 basereq->chain.first = NULL; 613 basereq->chain.last = NULL; 614 615 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl)) 616 set_state = true; 617 618 if (creq->src_nents) { 619 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 620 DMA_TO_DEVICE); 621 if (!ret) { 622 ret = -ENOMEM; 623 goto err; 624 } 625 } 626 627 mv_cesa_tdma_desc_iter_init(&basereq->chain); 628 mv_cesa_ahash_req_iter_init(&iter, req); 629 630 /* 631 * Add the cache (left-over data from a previous block) first. 632 * This will never overflow the SRAM size. 633 */ 634 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags); 635 if (ret) 636 goto err_free_tdma; 637 638 if (iter.src.sg) { 639 /* 640 * Add all the new data, inserting an operation block and 641 * launch command between each full SRAM block-worth of 642 * data. We intentionally do not add the final op block. 643 */ 644 while (true) { 645 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, 646 &iter.base, 647 &iter.src, flags); 648 if (ret) 649 goto err_free_tdma; 650 651 frag_len = iter.base.op_len; 652 653 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 654 break; 655 656 op = mv_cesa_dma_add_frag(&basereq->chain, 657 &creq->op_tmpl, 658 frag_len, flags); 659 if (IS_ERR(op)) { 660 ret = PTR_ERR(op); 661 goto err_free_tdma; 662 } 663 } 664 } else { 665 /* Account for the data that was in the cache. */ 666 frag_len = iter.base.op_len; 667 } 668 669 /* 670 * At this point, frag_len indicates whether we have any data 671 * outstanding which needs an operation. Queue up the final 672 * operation, which depends whether this is the final request. 673 */ 674 if (creq->last_req) 675 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq, 676 frag_len, flags); 677 else if (frag_len) 678 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 679 frag_len, flags); 680 681 if (IS_ERR(op)) { 682 ret = PTR_ERR(op); 683 goto err_free_tdma; 684 } 685 686 /* 687 * If results are copied via DMA, this means that this 688 * request can be directly processed by the engine, 689 * without partial updates. So we can chain it at the 690 * DMA level with other requests. 691 */ 692 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK; 693 694 if (op && type != CESA_TDMA_RESULT) { 695 /* Add dummy desc to wait for crypto operation end */ 696 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); 697 if (ret) 698 goto err_free_tdma; 699 } 700 701 if (!creq->last_req) 702 creq->cache_ptr = req->nbytes + creq->cache_ptr - 703 iter.base.len; 704 else 705 creq->cache_ptr = 0; 706 707 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; 708 709 if (type != CESA_TDMA_RESULT) 710 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; 711 712 if (set_state) { 713 /* 714 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to 715 * let the step logic know that the IVDIG registers should be 716 * explicitly set before launching a TDMA chain. 717 */ 718 basereq->chain.first->flags |= CESA_TDMA_SET_STATE; 719 } 720 721 return 0; 722 723 err_free_tdma: 724 mv_cesa_dma_cleanup(basereq); 725 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 726 727 err: 728 mv_cesa_ahash_last_cleanup(req); 729 730 return ret; 731 } 732 733 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 734 { 735 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 736 737 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 738 if (creq->src_nents < 0) { 739 dev_err(cesa_dev->dev, "Invalid number of src SG"); 740 return creq->src_nents; 741 } 742 743 *cached = mv_cesa_ahash_cache_req(req); 744 745 if (*cached) 746 return 0; 747 748 if (cesa_dev->caps->has_tdma) 749 return mv_cesa_ahash_dma_req_init(req); 750 else 751 return 0; 752 } 753 754 static int mv_cesa_ahash_queue_req(struct ahash_request *req) 755 { 756 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 757 struct mv_cesa_engine *engine; 758 bool cached = false; 759 int ret; 760 761 ret = mv_cesa_ahash_req_init(req, &cached); 762 if (ret) 763 return ret; 764 765 if (cached) 766 return 0; 767 768 engine = mv_cesa_select_engine(req->nbytes); 769 mv_cesa_ahash_prepare(&req->base, engine); 770 771 ret = mv_cesa_queue_req(&req->base, &creq->base); 772 773 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 774 mv_cesa_ahash_cleanup(req); 775 776 return ret; 777 } 778 779 static int mv_cesa_ahash_update(struct ahash_request *req) 780 { 781 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 782 783 creq->len += req->nbytes; 784 785 return mv_cesa_ahash_queue_req(req); 786 } 787 788 static int mv_cesa_ahash_final(struct ahash_request *req) 789 { 790 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 791 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 792 793 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 794 creq->last_req = true; 795 req->nbytes = 0; 796 797 return mv_cesa_ahash_queue_req(req); 798 } 799 800 static int mv_cesa_ahash_finup(struct ahash_request *req) 801 { 802 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 803 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 804 805 creq->len += req->nbytes; 806 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 807 creq->last_req = true; 808 809 return mv_cesa_ahash_queue_req(req); 810 } 811 812 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 813 u64 *len, void *cache) 814 { 815 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 816 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 817 unsigned int digsize = crypto_ahash_digestsize(ahash); 818 unsigned int blocksize; 819 820 blocksize = crypto_ahash_blocksize(ahash); 821 822 *len = creq->len; 823 memcpy(hash, creq->state, digsize); 824 memset(cache, 0, blocksize); 825 memcpy(cache, creq->cache, creq->cache_ptr); 826 827 return 0; 828 } 829 830 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, 831 u64 len, const void *cache) 832 { 833 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 834 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 835 unsigned int digsize = crypto_ahash_digestsize(ahash); 836 unsigned int blocksize; 837 unsigned int cache_ptr; 838 int ret; 839 840 ret = crypto_ahash_init(req); 841 if (ret) 842 return ret; 843 844 blocksize = crypto_ahash_blocksize(ahash); 845 if (len >= blocksize) 846 mv_cesa_update_op_cfg(&creq->op_tmpl, 847 CESA_SA_DESC_CFG_MID_FRAG, 848 CESA_SA_DESC_CFG_FRAG_MSK); 849 850 creq->len = len; 851 memcpy(creq->state, hash, digsize); 852 creq->cache_ptr = 0; 853 854 cache_ptr = do_div(len, blocksize); 855 if (!cache_ptr) 856 return 0; 857 858 memcpy(creq->cache, cache, cache_ptr); 859 creq->cache_ptr = cache_ptr; 860 861 return 0; 862 } 863 864 static int mv_cesa_md5_init(struct ahash_request *req) 865 { 866 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 867 struct mv_cesa_op_ctx tmpl = { }; 868 869 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 870 871 mv_cesa_ahash_init(req, &tmpl, true); 872 873 creq->state[0] = MD5_H0; 874 creq->state[1] = MD5_H1; 875 creq->state[2] = MD5_H2; 876 creq->state[3] = MD5_H3; 877 878 return 0; 879 } 880 881 static int mv_cesa_md5_export(struct ahash_request *req, void *out) 882 { 883 struct md5_state *out_state = out; 884 885 return mv_cesa_ahash_export(req, out_state->hash, 886 &out_state->byte_count, out_state->block); 887 } 888 889 static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 890 { 891 const struct md5_state *in_state = in; 892 893 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count, 894 in_state->block); 895 } 896 897 static int mv_cesa_md5_digest(struct ahash_request *req) 898 { 899 int ret; 900 901 ret = mv_cesa_md5_init(req); 902 if (ret) 903 return ret; 904 905 return mv_cesa_ahash_finup(req); 906 } 907 908 struct ahash_alg mv_md5_alg = { 909 .init = mv_cesa_md5_init, 910 .update = mv_cesa_ahash_update, 911 .final = mv_cesa_ahash_final, 912 .finup = mv_cesa_ahash_finup, 913 .digest = mv_cesa_md5_digest, 914 .export = mv_cesa_md5_export, 915 .import = mv_cesa_md5_import, 916 .halg = { 917 .digestsize = MD5_DIGEST_SIZE, 918 .statesize = sizeof(struct md5_state), 919 .base = { 920 .cra_name = "md5", 921 .cra_driver_name = "mv-md5", 922 .cra_priority = 300, 923 .cra_flags = CRYPTO_ALG_ASYNC | 924 CRYPTO_ALG_KERN_DRIVER_ONLY, 925 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 926 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 927 .cra_init = mv_cesa_ahash_cra_init, 928 .cra_module = THIS_MODULE, 929 } 930 } 931 }; 932 933 static int mv_cesa_sha1_init(struct ahash_request *req) 934 { 935 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 936 struct mv_cesa_op_ctx tmpl = { }; 937 938 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 939 940 mv_cesa_ahash_init(req, &tmpl, false); 941 942 creq->state[0] = SHA1_H0; 943 creq->state[1] = SHA1_H1; 944 creq->state[2] = SHA1_H2; 945 creq->state[3] = SHA1_H3; 946 creq->state[4] = SHA1_H4; 947 948 return 0; 949 } 950 951 static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 952 { 953 struct sha1_state *out_state = out; 954 955 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 956 out_state->buffer); 957 } 958 959 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 960 { 961 const struct sha1_state *in_state = in; 962 963 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 964 in_state->buffer); 965 } 966 967 static int mv_cesa_sha1_digest(struct ahash_request *req) 968 { 969 int ret; 970 971 ret = mv_cesa_sha1_init(req); 972 if (ret) 973 return ret; 974 975 return mv_cesa_ahash_finup(req); 976 } 977 978 struct ahash_alg mv_sha1_alg = { 979 .init = mv_cesa_sha1_init, 980 .update = mv_cesa_ahash_update, 981 .final = mv_cesa_ahash_final, 982 .finup = mv_cesa_ahash_finup, 983 .digest = mv_cesa_sha1_digest, 984 .export = mv_cesa_sha1_export, 985 .import = mv_cesa_sha1_import, 986 .halg = { 987 .digestsize = SHA1_DIGEST_SIZE, 988 .statesize = sizeof(struct sha1_state), 989 .base = { 990 .cra_name = "sha1", 991 .cra_driver_name = "mv-sha1", 992 .cra_priority = 300, 993 .cra_flags = CRYPTO_ALG_ASYNC | 994 CRYPTO_ALG_KERN_DRIVER_ONLY, 995 .cra_blocksize = SHA1_BLOCK_SIZE, 996 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 997 .cra_init = mv_cesa_ahash_cra_init, 998 .cra_module = THIS_MODULE, 999 } 1000 } 1001 }; 1002 1003 static int mv_cesa_sha256_init(struct ahash_request *req) 1004 { 1005 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 1006 struct mv_cesa_op_ctx tmpl = { }; 1007 1008 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 1009 1010 mv_cesa_ahash_init(req, &tmpl, false); 1011 1012 creq->state[0] = SHA256_H0; 1013 creq->state[1] = SHA256_H1; 1014 creq->state[2] = SHA256_H2; 1015 creq->state[3] = SHA256_H3; 1016 creq->state[4] = SHA256_H4; 1017 creq->state[5] = SHA256_H5; 1018 creq->state[6] = SHA256_H6; 1019 creq->state[7] = SHA256_H7; 1020 1021 return 0; 1022 } 1023 1024 static int mv_cesa_sha256_digest(struct ahash_request *req) 1025 { 1026 int ret; 1027 1028 ret = mv_cesa_sha256_init(req); 1029 if (ret) 1030 return ret; 1031 1032 return mv_cesa_ahash_finup(req); 1033 } 1034 1035 static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 1036 { 1037 struct sha256_state *out_state = out; 1038 1039 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 1040 out_state->buf); 1041 } 1042 1043 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 1044 { 1045 const struct sha256_state *in_state = in; 1046 1047 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 1048 in_state->buf); 1049 } 1050 1051 struct ahash_alg mv_sha256_alg = { 1052 .init = mv_cesa_sha256_init, 1053 .update = mv_cesa_ahash_update, 1054 .final = mv_cesa_ahash_final, 1055 .finup = mv_cesa_ahash_finup, 1056 .digest = mv_cesa_sha256_digest, 1057 .export = mv_cesa_sha256_export, 1058 .import = mv_cesa_sha256_import, 1059 .halg = { 1060 .digestsize = SHA256_DIGEST_SIZE, 1061 .statesize = sizeof(struct sha256_state), 1062 .base = { 1063 .cra_name = "sha256", 1064 .cra_driver_name = "mv-sha256", 1065 .cra_priority = 300, 1066 .cra_flags = CRYPTO_ALG_ASYNC | 1067 CRYPTO_ALG_KERN_DRIVER_ONLY, 1068 .cra_blocksize = SHA256_BLOCK_SIZE, 1069 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1070 .cra_init = mv_cesa_ahash_cra_init, 1071 .cra_module = THIS_MODULE, 1072 } 1073 } 1074 }; 1075 1076 struct mv_cesa_ahash_result { 1077 struct completion completion; 1078 int error; 1079 }; 1080 1081 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1082 int error) 1083 { 1084 struct mv_cesa_ahash_result *result = req->data; 1085 1086 if (error == -EINPROGRESS) 1087 return; 1088 1089 result->error = error; 1090 complete(&result->completion); 1091 } 1092 1093 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1094 void *state, unsigned int blocksize) 1095 { 1096 struct mv_cesa_ahash_result result; 1097 struct scatterlist sg; 1098 int ret; 1099 1100 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1101 mv_cesa_hmac_ahash_complete, &result); 1102 sg_init_one(&sg, pad, blocksize); 1103 ahash_request_set_crypt(req, &sg, pad, blocksize); 1104 init_completion(&result.completion); 1105 1106 ret = crypto_ahash_init(req); 1107 if (ret) 1108 return ret; 1109 1110 ret = crypto_ahash_update(req); 1111 if (ret && ret != -EINPROGRESS) 1112 return ret; 1113 1114 wait_for_completion_interruptible(&result.completion); 1115 if (result.error) 1116 return result.error; 1117 1118 ret = crypto_ahash_export(req, state); 1119 if (ret) 1120 return ret; 1121 1122 return 0; 1123 } 1124 1125 static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1126 const u8 *key, unsigned int keylen, 1127 u8 *ipad, u8 *opad, 1128 unsigned int blocksize) 1129 { 1130 struct mv_cesa_ahash_result result; 1131 struct scatterlist sg; 1132 int ret; 1133 int i; 1134 1135 if (keylen <= blocksize) { 1136 memcpy(ipad, key, keylen); 1137 } else { 1138 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1139 1140 if (!keydup) 1141 return -ENOMEM; 1142 1143 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1144 mv_cesa_hmac_ahash_complete, 1145 &result); 1146 sg_init_one(&sg, keydup, keylen); 1147 ahash_request_set_crypt(req, &sg, ipad, keylen); 1148 init_completion(&result.completion); 1149 1150 ret = crypto_ahash_digest(req); 1151 if (ret == -EINPROGRESS) { 1152 wait_for_completion_interruptible(&result.completion); 1153 ret = result.error; 1154 } 1155 1156 /* Set the memory region to 0 to avoid any leak. */ 1157 kzfree(keydup); 1158 1159 if (ret) 1160 return ret; 1161 1162 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1163 } 1164 1165 memset(ipad + keylen, 0, blocksize - keylen); 1166 memcpy(opad, ipad, blocksize); 1167 1168 for (i = 0; i < blocksize; i++) { 1169 ipad[i] ^= HMAC_IPAD_VALUE; 1170 opad[i] ^= HMAC_OPAD_VALUE; 1171 } 1172 1173 return 0; 1174 } 1175 1176 static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1177 const u8 *key, unsigned int keylen, 1178 void *istate, void *ostate) 1179 { 1180 struct ahash_request *req; 1181 struct crypto_ahash *tfm; 1182 unsigned int blocksize; 1183 u8 *ipad = NULL; 1184 u8 *opad; 1185 int ret; 1186 1187 tfm = crypto_alloc_ahash(hash_alg_name, 0, 0); 1188 if (IS_ERR(tfm)) 1189 return PTR_ERR(tfm); 1190 1191 req = ahash_request_alloc(tfm, GFP_KERNEL); 1192 if (!req) { 1193 ret = -ENOMEM; 1194 goto free_ahash; 1195 } 1196 1197 crypto_ahash_clear_flags(tfm, ~0); 1198 1199 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1200 1201 ipad = kcalloc(2, blocksize, GFP_KERNEL); 1202 if (!ipad) { 1203 ret = -ENOMEM; 1204 goto free_req; 1205 } 1206 1207 opad = ipad + blocksize; 1208 1209 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1210 if (ret) 1211 goto free_ipad; 1212 1213 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1214 if (ret) 1215 goto free_ipad; 1216 1217 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1218 1219 free_ipad: 1220 kfree(ipad); 1221 free_req: 1222 ahash_request_free(req); 1223 free_ahash: 1224 crypto_free_ahash(tfm); 1225 1226 return ret; 1227 } 1228 1229 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1230 { 1231 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1232 1233 ctx->base.ops = &mv_cesa_ahash_req_ops; 1234 1235 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1236 sizeof(struct mv_cesa_ahash_req)); 1237 return 0; 1238 } 1239 1240 static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1241 { 1242 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1243 struct mv_cesa_op_ctx tmpl = { }; 1244 1245 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1246 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1247 1248 mv_cesa_ahash_init(req, &tmpl, true); 1249 1250 return 0; 1251 } 1252 1253 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1254 unsigned int keylen) 1255 { 1256 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1257 struct md5_state istate, ostate; 1258 int ret, i; 1259 1260 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1261 if (ret) 1262 return ret; 1263 1264 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1265 ctx->iv[i] = be32_to_cpu(istate.hash[i]); 1266 1267 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1268 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); 1269 1270 return 0; 1271 } 1272 1273 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1274 { 1275 int ret; 1276 1277 ret = mv_cesa_ahmac_md5_init(req); 1278 if (ret) 1279 return ret; 1280 1281 return mv_cesa_ahash_finup(req); 1282 } 1283 1284 struct ahash_alg mv_ahmac_md5_alg = { 1285 .init = mv_cesa_ahmac_md5_init, 1286 .update = mv_cesa_ahash_update, 1287 .final = mv_cesa_ahash_final, 1288 .finup = mv_cesa_ahash_finup, 1289 .digest = mv_cesa_ahmac_md5_digest, 1290 .setkey = mv_cesa_ahmac_md5_setkey, 1291 .export = mv_cesa_md5_export, 1292 .import = mv_cesa_md5_import, 1293 .halg = { 1294 .digestsize = MD5_DIGEST_SIZE, 1295 .statesize = sizeof(struct md5_state), 1296 .base = { 1297 .cra_name = "hmac(md5)", 1298 .cra_driver_name = "mv-hmac-md5", 1299 .cra_priority = 300, 1300 .cra_flags = CRYPTO_ALG_ASYNC | 1301 CRYPTO_ALG_KERN_DRIVER_ONLY, 1302 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1303 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1304 .cra_init = mv_cesa_ahmac_cra_init, 1305 .cra_module = THIS_MODULE, 1306 } 1307 } 1308 }; 1309 1310 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1311 { 1312 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1313 struct mv_cesa_op_ctx tmpl = { }; 1314 1315 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1316 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1317 1318 mv_cesa_ahash_init(req, &tmpl, false); 1319 1320 return 0; 1321 } 1322 1323 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1324 unsigned int keylen) 1325 { 1326 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1327 struct sha1_state istate, ostate; 1328 int ret, i; 1329 1330 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1331 if (ret) 1332 return ret; 1333 1334 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1335 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1336 1337 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1338 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1339 1340 return 0; 1341 } 1342 1343 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1344 { 1345 int ret; 1346 1347 ret = mv_cesa_ahmac_sha1_init(req); 1348 if (ret) 1349 return ret; 1350 1351 return mv_cesa_ahash_finup(req); 1352 } 1353 1354 struct ahash_alg mv_ahmac_sha1_alg = { 1355 .init = mv_cesa_ahmac_sha1_init, 1356 .update = mv_cesa_ahash_update, 1357 .final = mv_cesa_ahash_final, 1358 .finup = mv_cesa_ahash_finup, 1359 .digest = mv_cesa_ahmac_sha1_digest, 1360 .setkey = mv_cesa_ahmac_sha1_setkey, 1361 .export = mv_cesa_sha1_export, 1362 .import = mv_cesa_sha1_import, 1363 .halg = { 1364 .digestsize = SHA1_DIGEST_SIZE, 1365 .statesize = sizeof(struct sha1_state), 1366 .base = { 1367 .cra_name = "hmac(sha1)", 1368 .cra_driver_name = "mv-hmac-sha1", 1369 .cra_priority = 300, 1370 .cra_flags = CRYPTO_ALG_ASYNC | 1371 CRYPTO_ALG_KERN_DRIVER_ONLY, 1372 .cra_blocksize = SHA1_BLOCK_SIZE, 1373 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1374 .cra_init = mv_cesa_ahmac_cra_init, 1375 .cra_module = THIS_MODULE, 1376 } 1377 } 1378 }; 1379 1380 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1381 unsigned int keylen) 1382 { 1383 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1384 struct sha256_state istate, ostate; 1385 int ret, i; 1386 1387 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1388 if (ret) 1389 return ret; 1390 1391 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1392 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1393 1394 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1395 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1396 1397 return 0; 1398 } 1399 1400 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1401 { 1402 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1403 struct mv_cesa_op_ctx tmpl = { }; 1404 1405 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1406 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1407 1408 mv_cesa_ahash_init(req, &tmpl, false); 1409 1410 return 0; 1411 } 1412 1413 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1414 { 1415 int ret; 1416 1417 ret = mv_cesa_ahmac_sha256_init(req); 1418 if (ret) 1419 return ret; 1420 1421 return mv_cesa_ahash_finup(req); 1422 } 1423 1424 struct ahash_alg mv_ahmac_sha256_alg = { 1425 .init = mv_cesa_ahmac_sha256_init, 1426 .update = mv_cesa_ahash_update, 1427 .final = mv_cesa_ahash_final, 1428 .finup = mv_cesa_ahash_finup, 1429 .digest = mv_cesa_ahmac_sha256_digest, 1430 .setkey = mv_cesa_ahmac_sha256_setkey, 1431 .export = mv_cesa_sha256_export, 1432 .import = mv_cesa_sha256_import, 1433 .halg = { 1434 .digestsize = SHA256_DIGEST_SIZE, 1435 .statesize = sizeof(struct sha256_state), 1436 .base = { 1437 .cra_name = "hmac(sha256)", 1438 .cra_driver_name = "mv-hmac-sha256", 1439 .cra_priority = 300, 1440 .cra_flags = CRYPTO_ALG_ASYNC | 1441 CRYPTO_ALG_KERN_DRIVER_ONLY, 1442 .cra_blocksize = SHA256_BLOCK_SIZE, 1443 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1444 .cra_init = mv_cesa_ahmac_cra_init, 1445 .cra_module = THIS_MODULE, 1446 } 1447 } 1448 }; 1449