1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 4 * 5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 6 * Author: Arnaud Ebalard <arno@natisbad.org> 7 * 8 * This work is based on an initial version written by 9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 10 */ 11 12 #include <crypto/hmac.h> 13 #include <crypto/md5.h> 14 #include <crypto/sha1.h> 15 #include <crypto/sha2.h> 16 #include <linux/device.h> 17 #include <linux/dma-mapping.h> 18 19 #include "cesa.h" 20 21 struct mv_cesa_ahash_dma_iter { 22 struct mv_cesa_dma_iter base; 23 struct mv_cesa_sg_dma_iter src; 24 }; 25 26 static inline void 27 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 28 struct ahash_request *req) 29 { 30 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 31 unsigned int len = req->nbytes + creq->cache_ptr; 32 33 if (!creq->last_req) 34 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 35 36 mv_cesa_req_dma_iter_init(&iter->base, len); 37 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 38 iter->src.op_offset = creq->cache_ptr; 39 } 40 41 static inline bool 42 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 43 { 44 iter->src.op_offset = 0; 45 46 return mv_cesa_req_dma_iter_next_op(&iter->base); 47 } 48 49 static inline int 50 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) 51 { 52 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 53 &req->cache_dma); 54 if (!req->cache) 55 return -ENOMEM; 56 57 return 0; 58 } 59 60 static inline void 61 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) 62 { 63 if (!req->cache) 64 return; 65 66 dma_pool_free(cesa_dev->dma->cache_pool, req->cache, 67 req->cache_dma); 68 } 69 70 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 71 gfp_t flags) 72 { 73 if (req->padding) 74 return 0; 75 76 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 77 &req->padding_dma); 78 if (!req->padding) 79 return -ENOMEM; 80 81 return 0; 82 } 83 84 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 85 { 86 if (!req->padding) 87 return; 88 89 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 90 req->padding_dma); 91 req->padding = NULL; 92 } 93 94 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 95 { 96 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 97 98 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 99 } 100 101 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 102 { 103 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 104 105 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 106 mv_cesa_ahash_dma_free_cache(&creq->req.dma); 107 mv_cesa_dma_cleanup(&creq->base); 108 } 109 110 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 111 { 112 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 113 114 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 115 mv_cesa_ahash_dma_cleanup(req); 116 } 117 118 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 119 { 120 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 121 122 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 123 mv_cesa_ahash_dma_last_cleanup(req); 124 } 125 126 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 127 { 128 unsigned int index, padlen; 129 130 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 131 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 132 133 return padlen; 134 } 135 136 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 137 { 138 unsigned int padlen; 139 140 buf[0] = 0x80; 141 /* Pad out to 56 mod 64 */ 142 padlen = mv_cesa_ahash_pad_len(creq); 143 memset(buf + 1, 0, padlen - 1); 144 145 if (creq->algo_le) { 146 __le64 bits = cpu_to_le64(creq->len << 3); 147 148 memcpy(buf + padlen, &bits, sizeof(bits)); 149 } else { 150 __be64 bits = cpu_to_be64(creq->len << 3); 151 152 memcpy(buf + padlen, &bits, sizeof(bits)); 153 } 154 155 return padlen + 8; 156 } 157 158 static void mv_cesa_ahash_std_step(struct ahash_request *req) 159 { 160 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 161 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 162 struct mv_cesa_engine *engine = creq->base.engine; 163 struct mv_cesa_op_ctx *op; 164 unsigned int new_cache_ptr = 0; 165 u32 frag_mode; 166 size_t len; 167 unsigned int digsize; 168 int i; 169 170 mv_cesa_adjust_op(engine, &creq->op_tmpl); 171 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 172 173 if (!sreq->offset) { 174 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 175 for (i = 0; i < digsize / 4; i++) 176 writel_relaxed(creq->state[i], 177 engine->regs + CESA_IVDIG(i)); 178 } 179 180 if (creq->cache_ptr) 181 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 182 creq->cache, creq->cache_ptr); 183 184 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 185 CESA_SA_SRAM_PAYLOAD_SIZE); 186 187 if (!creq->last_req) { 188 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 189 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 190 } 191 192 if (len - creq->cache_ptr) 193 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 194 engine->sram + 195 CESA_SA_DATA_SRAM_OFFSET + 196 creq->cache_ptr, 197 len - creq->cache_ptr, 198 sreq->offset); 199 200 op = &creq->op_tmpl; 201 202 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 203 204 if (creq->last_req && sreq->offset == req->nbytes && 205 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 206 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 207 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 208 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 209 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 210 } 211 212 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 213 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 214 if (len && 215 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 216 mv_cesa_set_mac_op_total_len(op, creq->len); 217 } else { 218 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 219 220 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 221 len &= CESA_HASH_BLOCK_SIZE_MSK; 222 new_cache_ptr = 64 - trailerlen; 223 memcpy_fromio(creq->cache, 224 engine->sram + 225 CESA_SA_DATA_SRAM_OFFSET + len, 226 new_cache_ptr); 227 } else { 228 i = mv_cesa_ahash_pad_req(creq, creq->cache); 229 len += i; 230 memcpy_toio(engine->sram + len + 231 CESA_SA_DATA_SRAM_OFFSET, 232 creq->cache, i); 233 } 234 235 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 236 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 237 else 238 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 239 } 240 } 241 242 mv_cesa_set_mac_op_frag_len(op, len); 243 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 244 245 /* FIXME: only update enc_len field */ 246 memcpy_toio(engine->sram, op, sizeof(*op)); 247 248 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 249 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 250 CESA_SA_DESC_CFG_FRAG_MSK); 251 252 creq->cache_ptr = new_cache_ptr; 253 254 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 255 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 256 WARN_ON(readl(engine->regs + CESA_SA_CMD) & 257 CESA_SA_CMD_EN_CESA_SA_ACCL0); 258 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 259 } 260 261 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 262 { 263 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 264 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 265 266 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 267 return -EINPROGRESS; 268 269 return 0; 270 } 271 272 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 273 { 274 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 275 struct mv_cesa_req *basereq = &creq->base; 276 277 mv_cesa_dma_prepare(basereq, basereq->engine); 278 } 279 280 static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 281 { 282 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 283 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 284 285 sreq->offset = 0; 286 } 287 288 static void mv_cesa_ahash_dma_step(struct ahash_request *req) 289 { 290 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 291 struct mv_cesa_req *base = &creq->base; 292 293 /* We must explicitly set the digest state. */ 294 if (base->chain.first->flags & CESA_TDMA_SET_STATE) { 295 struct mv_cesa_engine *engine = base->engine; 296 int i; 297 298 /* Set the hash state in the IVDIG regs. */ 299 for (i = 0; i < ARRAY_SIZE(creq->state); i++) 300 writel_relaxed(creq->state[i], engine->regs + 301 CESA_IVDIG(i)); 302 } 303 304 mv_cesa_dma_step(base); 305 } 306 307 static void mv_cesa_ahash_step(struct crypto_async_request *req) 308 { 309 struct ahash_request *ahashreq = ahash_request_cast(req); 310 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 311 312 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 313 mv_cesa_ahash_dma_step(ahashreq); 314 else 315 mv_cesa_ahash_std_step(ahashreq); 316 } 317 318 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 319 { 320 struct ahash_request *ahashreq = ahash_request_cast(req); 321 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 322 323 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 324 return mv_cesa_dma_process(&creq->base, status); 325 326 return mv_cesa_ahash_std_process(ahashreq, status); 327 } 328 329 static void mv_cesa_ahash_complete(struct crypto_async_request *req) 330 { 331 struct ahash_request *ahashreq = ahash_request_cast(req); 332 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 333 struct mv_cesa_engine *engine = creq->base.engine; 334 unsigned int digsize; 335 int i; 336 337 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 338 339 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ && 340 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == 341 CESA_TDMA_RESULT) { 342 __le32 *data = NULL; 343 344 /* 345 * Result is already in the correct endianness when the SA is 346 * used 347 */ 348 data = creq->base.chain.last->op->ctx.hash.hash; 349 for (i = 0; i < digsize / 4; i++) 350 creq->state[i] = le32_to_cpu(data[i]); 351 352 memcpy(ahashreq->result, data, digsize); 353 } else { 354 for (i = 0; i < digsize / 4; i++) 355 creq->state[i] = readl_relaxed(engine->regs + 356 CESA_IVDIG(i)); 357 if (creq->last_req) { 358 /* 359 * Hardware's MD5 digest is in little endian format, but 360 * SHA in big endian format 361 */ 362 if (creq->algo_le) { 363 __le32 *result = (void *)ahashreq->result; 364 365 for (i = 0; i < digsize / 4; i++) 366 result[i] = cpu_to_le32(creq->state[i]); 367 } else { 368 __be32 *result = (void *)ahashreq->result; 369 370 for (i = 0; i < digsize / 4; i++) 371 result[i] = cpu_to_be32(creq->state[i]); 372 } 373 } 374 } 375 376 atomic_sub(ahashreq->nbytes, &engine->load); 377 } 378 379 static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 380 struct mv_cesa_engine *engine) 381 { 382 struct ahash_request *ahashreq = ahash_request_cast(req); 383 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 384 385 creq->base.engine = engine; 386 387 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 388 mv_cesa_ahash_dma_prepare(ahashreq); 389 else 390 mv_cesa_ahash_std_prepare(ahashreq); 391 } 392 393 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 394 { 395 struct ahash_request *ahashreq = ahash_request_cast(req); 396 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 397 398 if (creq->last_req) 399 mv_cesa_ahash_last_cleanup(ahashreq); 400 401 mv_cesa_ahash_cleanup(ahashreq); 402 403 if (creq->cache_ptr) 404 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 405 creq->cache, 406 creq->cache_ptr, 407 ahashreq->nbytes - creq->cache_ptr); 408 } 409 410 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 411 .step = mv_cesa_ahash_step, 412 .process = mv_cesa_ahash_process, 413 .cleanup = mv_cesa_ahash_req_cleanup, 414 .complete = mv_cesa_ahash_complete, 415 }; 416 417 static void mv_cesa_ahash_init(struct ahash_request *req, 418 struct mv_cesa_op_ctx *tmpl, bool algo_le) 419 { 420 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 421 422 memset(creq, 0, sizeof(*creq)); 423 mv_cesa_update_op_cfg(tmpl, 424 CESA_SA_DESC_CFG_OP_MAC_ONLY | 425 CESA_SA_DESC_CFG_FIRST_FRAG, 426 CESA_SA_DESC_CFG_OP_MSK | 427 CESA_SA_DESC_CFG_FRAG_MSK); 428 mv_cesa_set_mac_op_total_len(tmpl, 0); 429 mv_cesa_set_mac_op_frag_len(tmpl, 0); 430 creq->op_tmpl = *tmpl; 431 creq->len = 0; 432 creq->algo_le = algo_le; 433 } 434 435 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 436 { 437 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 438 439 ctx->base.ops = &mv_cesa_ahash_req_ops; 440 441 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 442 sizeof(struct mv_cesa_ahash_req)); 443 return 0; 444 } 445 446 static bool mv_cesa_ahash_cache_req(struct ahash_request *req) 447 { 448 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 449 bool cached = false; 450 451 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && 452 !creq->last_req) { 453 cached = true; 454 455 if (!req->nbytes) 456 return cached; 457 458 sg_pcopy_to_buffer(req->src, creq->src_nents, 459 creq->cache + creq->cache_ptr, 460 req->nbytes, 0); 461 462 creq->cache_ptr += req->nbytes; 463 } 464 465 return cached; 466 } 467 468 static struct mv_cesa_op_ctx * 469 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, 470 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len, 471 gfp_t flags) 472 { 473 struct mv_cesa_op_ctx *op; 474 int ret; 475 476 op = mv_cesa_dma_add_op(chain, tmpl, false, flags); 477 if (IS_ERR(op)) 478 return op; 479 480 /* Set the operation block fragment length. */ 481 mv_cesa_set_mac_op_frag_len(op, frag_len); 482 483 /* Append dummy desc to launch operation */ 484 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 485 if (ret) 486 return ERR_PTR(ret); 487 488 if (mv_cesa_mac_op_is_first_frag(tmpl)) 489 mv_cesa_update_op_cfg(tmpl, 490 CESA_SA_DESC_CFG_MID_FRAG, 491 CESA_SA_DESC_CFG_FRAG_MSK); 492 493 return op; 494 } 495 496 static int 497 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 498 struct mv_cesa_ahash_req *creq, 499 gfp_t flags) 500 { 501 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 502 int ret; 503 504 if (!creq->cache_ptr) 505 return 0; 506 507 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags); 508 if (ret) 509 return ret; 510 511 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); 512 513 return mv_cesa_dma_add_data_transfer(chain, 514 CESA_SA_DATA_SRAM_OFFSET, 515 ahashdreq->cache_dma, 516 creq->cache_ptr, 517 CESA_TDMA_DST_IN_SRAM, 518 flags); 519 } 520 521 static struct mv_cesa_op_ctx * 522 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 523 struct mv_cesa_ahash_dma_iter *dma_iter, 524 struct mv_cesa_ahash_req *creq, 525 unsigned int frag_len, gfp_t flags) 526 { 527 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 528 unsigned int len, trailerlen, padoff = 0; 529 struct mv_cesa_op_ctx *op; 530 int ret; 531 532 /* 533 * If the transfer is smaller than our maximum length, and we have 534 * some data outstanding, we can ask the engine to finish the hash. 535 */ 536 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) { 537 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len, 538 flags); 539 if (IS_ERR(op)) 540 return op; 541 542 mv_cesa_set_mac_op_total_len(op, creq->len); 543 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ? 544 CESA_SA_DESC_CFG_NOT_FRAG : 545 CESA_SA_DESC_CFG_LAST_FRAG, 546 CESA_SA_DESC_CFG_FRAG_MSK); 547 548 ret = mv_cesa_dma_add_result_op(chain, 549 CESA_SA_CFG_SRAM_OFFSET, 550 CESA_SA_DATA_SRAM_OFFSET, 551 CESA_TDMA_SRC_IN_SRAM, flags); 552 if (ret) 553 return ERR_PTR(-ENOMEM); 554 return op; 555 } 556 557 /* 558 * The request is longer than the engine can handle, or we have 559 * no data outstanding. Manually generate the padding, adding it 560 * as a "mid" fragment. 561 */ 562 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 563 if (ret) 564 return ERR_PTR(ret); 565 566 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 567 568 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen); 569 if (len) { 570 ret = mv_cesa_dma_add_data_transfer(chain, 571 CESA_SA_DATA_SRAM_OFFSET + 572 frag_len, 573 ahashdreq->padding_dma, 574 len, CESA_TDMA_DST_IN_SRAM, 575 flags); 576 if (ret) 577 return ERR_PTR(ret); 578 579 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len, 580 flags); 581 if (IS_ERR(op)) 582 return op; 583 584 if (len == trailerlen) 585 return op; 586 587 padoff += len; 588 } 589 590 ret = mv_cesa_dma_add_data_transfer(chain, 591 CESA_SA_DATA_SRAM_OFFSET, 592 ahashdreq->padding_dma + 593 padoff, 594 trailerlen - padoff, 595 CESA_TDMA_DST_IN_SRAM, 596 flags); 597 if (ret) 598 return ERR_PTR(ret); 599 600 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff, 601 flags); 602 } 603 604 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 605 { 606 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 607 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 608 GFP_KERNEL : GFP_ATOMIC; 609 struct mv_cesa_req *basereq = &creq->base; 610 struct mv_cesa_ahash_dma_iter iter; 611 struct mv_cesa_op_ctx *op = NULL; 612 unsigned int frag_len; 613 bool set_state = false; 614 int ret; 615 u32 type; 616 617 basereq->chain.first = NULL; 618 basereq->chain.last = NULL; 619 620 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl)) 621 set_state = true; 622 623 if (creq->src_nents) { 624 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 625 DMA_TO_DEVICE); 626 if (!ret) { 627 ret = -ENOMEM; 628 goto err; 629 } 630 } 631 632 mv_cesa_tdma_desc_iter_init(&basereq->chain); 633 mv_cesa_ahash_req_iter_init(&iter, req); 634 635 /* 636 * Add the cache (left-over data from a previous block) first. 637 * This will never overflow the SRAM size. 638 */ 639 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags); 640 if (ret) 641 goto err_free_tdma; 642 643 if (iter.src.sg) { 644 /* 645 * Add all the new data, inserting an operation block and 646 * launch command between each full SRAM block-worth of 647 * data. We intentionally do not add the final op block. 648 */ 649 while (true) { 650 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, 651 &iter.base, 652 &iter.src, flags); 653 if (ret) 654 goto err_free_tdma; 655 656 frag_len = iter.base.op_len; 657 658 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 659 break; 660 661 op = mv_cesa_dma_add_frag(&basereq->chain, 662 &creq->op_tmpl, 663 frag_len, flags); 664 if (IS_ERR(op)) { 665 ret = PTR_ERR(op); 666 goto err_free_tdma; 667 } 668 } 669 } else { 670 /* Account for the data that was in the cache. */ 671 frag_len = iter.base.op_len; 672 } 673 674 /* 675 * At this point, frag_len indicates whether we have any data 676 * outstanding which needs an operation. Queue up the final 677 * operation, which depends whether this is the final request. 678 */ 679 if (creq->last_req) 680 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq, 681 frag_len, flags); 682 else if (frag_len) 683 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 684 frag_len, flags); 685 686 if (IS_ERR(op)) { 687 ret = PTR_ERR(op); 688 goto err_free_tdma; 689 } 690 691 /* 692 * If results are copied via DMA, this means that this 693 * request can be directly processed by the engine, 694 * without partial updates. So we can chain it at the 695 * DMA level with other requests. 696 */ 697 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK; 698 699 if (op && type != CESA_TDMA_RESULT) { 700 /* Add dummy desc to wait for crypto operation end */ 701 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); 702 if (ret) 703 goto err_free_tdma; 704 } 705 706 if (!creq->last_req) 707 creq->cache_ptr = req->nbytes + creq->cache_ptr - 708 iter.base.len; 709 else 710 creq->cache_ptr = 0; 711 712 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; 713 714 if (type != CESA_TDMA_RESULT) 715 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; 716 717 if (set_state) { 718 /* 719 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to 720 * let the step logic know that the IVDIG registers should be 721 * explicitly set before launching a TDMA chain. 722 */ 723 basereq->chain.first->flags |= CESA_TDMA_SET_STATE; 724 } 725 726 return 0; 727 728 err_free_tdma: 729 mv_cesa_dma_cleanup(basereq); 730 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 731 732 err: 733 mv_cesa_ahash_last_cleanup(req); 734 735 return ret; 736 } 737 738 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 739 { 740 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 741 742 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 743 if (creq->src_nents < 0) { 744 dev_err(cesa_dev->dev, "Invalid number of src SG"); 745 return creq->src_nents; 746 } 747 748 *cached = mv_cesa_ahash_cache_req(req); 749 750 if (*cached) 751 return 0; 752 753 if (cesa_dev->caps->has_tdma) 754 return mv_cesa_ahash_dma_req_init(req); 755 else 756 return 0; 757 } 758 759 static int mv_cesa_ahash_queue_req(struct ahash_request *req) 760 { 761 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 762 struct mv_cesa_engine *engine; 763 bool cached = false; 764 int ret; 765 766 ret = mv_cesa_ahash_req_init(req, &cached); 767 if (ret) 768 return ret; 769 770 if (cached) 771 return 0; 772 773 engine = mv_cesa_select_engine(req->nbytes); 774 mv_cesa_ahash_prepare(&req->base, engine); 775 776 ret = mv_cesa_queue_req(&req->base, &creq->base); 777 778 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 779 mv_cesa_ahash_cleanup(req); 780 781 return ret; 782 } 783 784 static int mv_cesa_ahash_update(struct ahash_request *req) 785 { 786 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 787 788 creq->len += req->nbytes; 789 790 return mv_cesa_ahash_queue_req(req); 791 } 792 793 static int mv_cesa_ahash_final(struct ahash_request *req) 794 { 795 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 796 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 797 798 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 799 creq->last_req = true; 800 req->nbytes = 0; 801 802 return mv_cesa_ahash_queue_req(req); 803 } 804 805 static int mv_cesa_ahash_finup(struct ahash_request *req) 806 { 807 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 808 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 809 810 creq->len += req->nbytes; 811 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 812 creq->last_req = true; 813 814 return mv_cesa_ahash_queue_req(req); 815 } 816 817 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 818 u64 *len, void *cache) 819 { 820 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 821 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 822 unsigned int digsize = crypto_ahash_digestsize(ahash); 823 unsigned int blocksize; 824 825 blocksize = crypto_ahash_blocksize(ahash); 826 827 *len = creq->len; 828 memcpy(hash, creq->state, digsize); 829 memset(cache, 0, blocksize); 830 memcpy(cache, creq->cache, creq->cache_ptr); 831 832 return 0; 833 } 834 835 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, 836 u64 len, const void *cache) 837 { 838 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 839 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 840 unsigned int digsize = crypto_ahash_digestsize(ahash); 841 unsigned int blocksize; 842 unsigned int cache_ptr; 843 int ret; 844 845 ret = crypto_ahash_init(req); 846 if (ret) 847 return ret; 848 849 blocksize = crypto_ahash_blocksize(ahash); 850 if (len >= blocksize) 851 mv_cesa_update_op_cfg(&creq->op_tmpl, 852 CESA_SA_DESC_CFG_MID_FRAG, 853 CESA_SA_DESC_CFG_FRAG_MSK); 854 855 creq->len = len; 856 memcpy(creq->state, hash, digsize); 857 creq->cache_ptr = 0; 858 859 cache_ptr = do_div(len, blocksize); 860 if (!cache_ptr) 861 return 0; 862 863 memcpy(creq->cache, cache, cache_ptr); 864 creq->cache_ptr = cache_ptr; 865 866 return 0; 867 } 868 869 static int mv_cesa_md5_init(struct ahash_request *req) 870 { 871 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 872 struct mv_cesa_op_ctx tmpl = { }; 873 874 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 875 876 mv_cesa_ahash_init(req, &tmpl, true); 877 878 creq->state[0] = MD5_H0; 879 creq->state[1] = MD5_H1; 880 creq->state[2] = MD5_H2; 881 creq->state[3] = MD5_H3; 882 883 return 0; 884 } 885 886 static int mv_cesa_md5_export(struct ahash_request *req, void *out) 887 { 888 struct md5_state *out_state = out; 889 890 return mv_cesa_ahash_export(req, out_state->hash, 891 &out_state->byte_count, out_state->block); 892 } 893 894 static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 895 { 896 const struct md5_state *in_state = in; 897 898 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count, 899 in_state->block); 900 } 901 902 static int mv_cesa_md5_digest(struct ahash_request *req) 903 { 904 int ret; 905 906 ret = mv_cesa_md5_init(req); 907 if (ret) 908 return ret; 909 910 return mv_cesa_ahash_finup(req); 911 } 912 913 struct ahash_alg mv_md5_alg = { 914 .init = mv_cesa_md5_init, 915 .update = mv_cesa_ahash_update, 916 .final = mv_cesa_ahash_final, 917 .finup = mv_cesa_ahash_finup, 918 .digest = mv_cesa_md5_digest, 919 .export = mv_cesa_md5_export, 920 .import = mv_cesa_md5_import, 921 .halg = { 922 .digestsize = MD5_DIGEST_SIZE, 923 .statesize = sizeof(struct md5_state), 924 .base = { 925 .cra_name = "md5", 926 .cra_driver_name = "mv-md5", 927 .cra_priority = 300, 928 .cra_flags = CRYPTO_ALG_ASYNC | 929 CRYPTO_ALG_ALLOCATES_MEMORY | 930 CRYPTO_ALG_KERN_DRIVER_ONLY, 931 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 932 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 933 .cra_init = mv_cesa_ahash_cra_init, 934 .cra_module = THIS_MODULE, 935 } 936 } 937 }; 938 939 static int mv_cesa_sha1_init(struct ahash_request *req) 940 { 941 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 942 struct mv_cesa_op_ctx tmpl = { }; 943 944 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 945 946 mv_cesa_ahash_init(req, &tmpl, false); 947 948 creq->state[0] = SHA1_H0; 949 creq->state[1] = SHA1_H1; 950 creq->state[2] = SHA1_H2; 951 creq->state[3] = SHA1_H3; 952 creq->state[4] = SHA1_H4; 953 954 return 0; 955 } 956 957 static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 958 { 959 struct sha1_state *out_state = out; 960 961 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 962 out_state->buffer); 963 } 964 965 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 966 { 967 const struct sha1_state *in_state = in; 968 969 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 970 in_state->buffer); 971 } 972 973 static int mv_cesa_sha1_digest(struct ahash_request *req) 974 { 975 int ret; 976 977 ret = mv_cesa_sha1_init(req); 978 if (ret) 979 return ret; 980 981 return mv_cesa_ahash_finup(req); 982 } 983 984 struct ahash_alg mv_sha1_alg = { 985 .init = mv_cesa_sha1_init, 986 .update = mv_cesa_ahash_update, 987 .final = mv_cesa_ahash_final, 988 .finup = mv_cesa_ahash_finup, 989 .digest = mv_cesa_sha1_digest, 990 .export = mv_cesa_sha1_export, 991 .import = mv_cesa_sha1_import, 992 .halg = { 993 .digestsize = SHA1_DIGEST_SIZE, 994 .statesize = sizeof(struct sha1_state), 995 .base = { 996 .cra_name = "sha1", 997 .cra_driver_name = "mv-sha1", 998 .cra_priority = 300, 999 .cra_flags = CRYPTO_ALG_ASYNC | 1000 CRYPTO_ALG_ALLOCATES_MEMORY | 1001 CRYPTO_ALG_KERN_DRIVER_ONLY, 1002 .cra_blocksize = SHA1_BLOCK_SIZE, 1003 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1004 .cra_init = mv_cesa_ahash_cra_init, 1005 .cra_module = THIS_MODULE, 1006 } 1007 } 1008 }; 1009 1010 static int mv_cesa_sha256_init(struct ahash_request *req) 1011 { 1012 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 1013 struct mv_cesa_op_ctx tmpl = { }; 1014 1015 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 1016 1017 mv_cesa_ahash_init(req, &tmpl, false); 1018 1019 creq->state[0] = SHA256_H0; 1020 creq->state[1] = SHA256_H1; 1021 creq->state[2] = SHA256_H2; 1022 creq->state[3] = SHA256_H3; 1023 creq->state[4] = SHA256_H4; 1024 creq->state[5] = SHA256_H5; 1025 creq->state[6] = SHA256_H6; 1026 creq->state[7] = SHA256_H7; 1027 1028 return 0; 1029 } 1030 1031 static int mv_cesa_sha256_digest(struct ahash_request *req) 1032 { 1033 int ret; 1034 1035 ret = mv_cesa_sha256_init(req); 1036 if (ret) 1037 return ret; 1038 1039 return mv_cesa_ahash_finup(req); 1040 } 1041 1042 static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 1043 { 1044 struct sha256_state *out_state = out; 1045 1046 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 1047 out_state->buf); 1048 } 1049 1050 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 1051 { 1052 const struct sha256_state *in_state = in; 1053 1054 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 1055 in_state->buf); 1056 } 1057 1058 struct ahash_alg mv_sha256_alg = { 1059 .init = mv_cesa_sha256_init, 1060 .update = mv_cesa_ahash_update, 1061 .final = mv_cesa_ahash_final, 1062 .finup = mv_cesa_ahash_finup, 1063 .digest = mv_cesa_sha256_digest, 1064 .export = mv_cesa_sha256_export, 1065 .import = mv_cesa_sha256_import, 1066 .halg = { 1067 .digestsize = SHA256_DIGEST_SIZE, 1068 .statesize = sizeof(struct sha256_state), 1069 .base = { 1070 .cra_name = "sha256", 1071 .cra_driver_name = "mv-sha256", 1072 .cra_priority = 300, 1073 .cra_flags = CRYPTO_ALG_ASYNC | 1074 CRYPTO_ALG_ALLOCATES_MEMORY | 1075 CRYPTO_ALG_KERN_DRIVER_ONLY, 1076 .cra_blocksize = SHA256_BLOCK_SIZE, 1077 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1078 .cra_init = mv_cesa_ahash_cra_init, 1079 .cra_module = THIS_MODULE, 1080 } 1081 } 1082 }; 1083 1084 struct mv_cesa_ahash_result { 1085 struct completion completion; 1086 int error; 1087 }; 1088 1089 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1090 int error) 1091 { 1092 struct mv_cesa_ahash_result *result = req->data; 1093 1094 if (error == -EINPROGRESS) 1095 return; 1096 1097 result->error = error; 1098 complete(&result->completion); 1099 } 1100 1101 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1102 void *state, unsigned int blocksize) 1103 { 1104 struct mv_cesa_ahash_result result; 1105 struct scatterlist sg; 1106 int ret; 1107 1108 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1109 mv_cesa_hmac_ahash_complete, &result); 1110 sg_init_one(&sg, pad, blocksize); 1111 ahash_request_set_crypt(req, &sg, pad, blocksize); 1112 init_completion(&result.completion); 1113 1114 ret = crypto_ahash_init(req); 1115 if (ret) 1116 return ret; 1117 1118 ret = crypto_ahash_update(req); 1119 if (ret && ret != -EINPROGRESS) 1120 return ret; 1121 1122 wait_for_completion_interruptible(&result.completion); 1123 if (result.error) 1124 return result.error; 1125 1126 ret = crypto_ahash_export(req, state); 1127 if (ret) 1128 return ret; 1129 1130 return 0; 1131 } 1132 1133 static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1134 const u8 *key, unsigned int keylen, 1135 u8 *ipad, u8 *opad, 1136 unsigned int blocksize) 1137 { 1138 struct mv_cesa_ahash_result result; 1139 struct scatterlist sg; 1140 int ret; 1141 int i; 1142 1143 if (keylen <= blocksize) { 1144 memcpy(ipad, key, keylen); 1145 } else { 1146 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1147 1148 if (!keydup) 1149 return -ENOMEM; 1150 1151 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1152 mv_cesa_hmac_ahash_complete, 1153 &result); 1154 sg_init_one(&sg, keydup, keylen); 1155 ahash_request_set_crypt(req, &sg, ipad, keylen); 1156 init_completion(&result.completion); 1157 1158 ret = crypto_ahash_digest(req); 1159 if (ret == -EINPROGRESS) { 1160 wait_for_completion_interruptible(&result.completion); 1161 ret = result.error; 1162 } 1163 1164 /* Set the memory region to 0 to avoid any leak. */ 1165 kfree_sensitive(keydup); 1166 1167 if (ret) 1168 return ret; 1169 1170 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1171 } 1172 1173 memset(ipad + keylen, 0, blocksize - keylen); 1174 memcpy(opad, ipad, blocksize); 1175 1176 for (i = 0; i < blocksize; i++) { 1177 ipad[i] ^= HMAC_IPAD_VALUE; 1178 opad[i] ^= HMAC_OPAD_VALUE; 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1185 const u8 *key, unsigned int keylen, 1186 void *istate, void *ostate) 1187 { 1188 struct ahash_request *req; 1189 struct crypto_ahash *tfm; 1190 unsigned int blocksize; 1191 u8 *ipad = NULL; 1192 u8 *opad; 1193 int ret; 1194 1195 tfm = crypto_alloc_ahash(hash_alg_name, 0, 0); 1196 if (IS_ERR(tfm)) 1197 return PTR_ERR(tfm); 1198 1199 req = ahash_request_alloc(tfm, GFP_KERNEL); 1200 if (!req) { 1201 ret = -ENOMEM; 1202 goto free_ahash; 1203 } 1204 1205 crypto_ahash_clear_flags(tfm, ~0); 1206 1207 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1208 1209 ipad = kcalloc(2, blocksize, GFP_KERNEL); 1210 if (!ipad) { 1211 ret = -ENOMEM; 1212 goto free_req; 1213 } 1214 1215 opad = ipad + blocksize; 1216 1217 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1218 if (ret) 1219 goto free_ipad; 1220 1221 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1222 if (ret) 1223 goto free_ipad; 1224 1225 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1226 1227 free_ipad: 1228 kfree(ipad); 1229 free_req: 1230 ahash_request_free(req); 1231 free_ahash: 1232 crypto_free_ahash(tfm); 1233 1234 return ret; 1235 } 1236 1237 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1238 { 1239 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1240 1241 ctx->base.ops = &mv_cesa_ahash_req_ops; 1242 1243 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1244 sizeof(struct mv_cesa_ahash_req)); 1245 return 0; 1246 } 1247 1248 static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1249 { 1250 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1251 struct mv_cesa_op_ctx tmpl = { }; 1252 1253 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1254 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1255 1256 mv_cesa_ahash_init(req, &tmpl, true); 1257 1258 return 0; 1259 } 1260 1261 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1262 unsigned int keylen) 1263 { 1264 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1265 struct md5_state istate, ostate; 1266 int ret, i; 1267 1268 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1269 if (ret) 1270 return ret; 1271 1272 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1273 ctx->iv[i] = cpu_to_be32(istate.hash[i]); 1274 1275 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1276 ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]); 1277 1278 return 0; 1279 } 1280 1281 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1282 { 1283 int ret; 1284 1285 ret = mv_cesa_ahmac_md5_init(req); 1286 if (ret) 1287 return ret; 1288 1289 return mv_cesa_ahash_finup(req); 1290 } 1291 1292 struct ahash_alg mv_ahmac_md5_alg = { 1293 .init = mv_cesa_ahmac_md5_init, 1294 .update = mv_cesa_ahash_update, 1295 .final = mv_cesa_ahash_final, 1296 .finup = mv_cesa_ahash_finup, 1297 .digest = mv_cesa_ahmac_md5_digest, 1298 .setkey = mv_cesa_ahmac_md5_setkey, 1299 .export = mv_cesa_md5_export, 1300 .import = mv_cesa_md5_import, 1301 .halg = { 1302 .digestsize = MD5_DIGEST_SIZE, 1303 .statesize = sizeof(struct md5_state), 1304 .base = { 1305 .cra_name = "hmac(md5)", 1306 .cra_driver_name = "mv-hmac-md5", 1307 .cra_priority = 300, 1308 .cra_flags = CRYPTO_ALG_ASYNC | 1309 CRYPTO_ALG_ALLOCATES_MEMORY | 1310 CRYPTO_ALG_KERN_DRIVER_ONLY, 1311 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1312 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1313 .cra_init = mv_cesa_ahmac_cra_init, 1314 .cra_module = THIS_MODULE, 1315 } 1316 } 1317 }; 1318 1319 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1320 { 1321 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1322 struct mv_cesa_op_ctx tmpl = { }; 1323 1324 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1325 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1326 1327 mv_cesa_ahash_init(req, &tmpl, false); 1328 1329 return 0; 1330 } 1331 1332 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1333 unsigned int keylen) 1334 { 1335 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1336 struct sha1_state istate, ostate; 1337 int ret, i; 1338 1339 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1340 if (ret) 1341 return ret; 1342 1343 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1344 ctx->iv[i] = cpu_to_be32(istate.state[i]); 1345 1346 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1347 ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]); 1348 1349 return 0; 1350 } 1351 1352 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1353 { 1354 int ret; 1355 1356 ret = mv_cesa_ahmac_sha1_init(req); 1357 if (ret) 1358 return ret; 1359 1360 return mv_cesa_ahash_finup(req); 1361 } 1362 1363 struct ahash_alg mv_ahmac_sha1_alg = { 1364 .init = mv_cesa_ahmac_sha1_init, 1365 .update = mv_cesa_ahash_update, 1366 .final = mv_cesa_ahash_final, 1367 .finup = mv_cesa_ahash_finup, 1368 .digest = mv_cesa_ahmac_sha1_digest, 1369 .setkey = mv_cesa_ahmac_sha1_setkey, 1370 .export = mv_cesa_sha1_export, 1371 .import = mv_cesa_sha1_import, 1372 .halg = { 1373 .digestsize = SHA1_DIGEST_SIZE, 1374 .statesize = sizeof(struct sha1_state), 1375 .base = { 1376 .cra_name = "hmac(sha1)", 1377 .cra_driver_name = "mv-hmac-sha1", 1378 .cra_priority = 300, 1379 .cra_flags = CRYPTO_ALG_ASYNC | 1380 CRYPTO_ALG_ALLOCATES_MEMORY | 1381 CRYPTO_ALG_KERN_DRIVER_ONLY, 1382 .cra_blocksize = SHA1_BLOCK_SIZE, 1383 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1384 .cra_init = mv_cesa_ahmac_cra_init, 1385 .cra_module = THIS_MODULE, 1386 } 1387 } 1388 }; 1389 1390 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1391 unsigned int keylen) 1392 { 1393 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1394 struct sha256_state istate, ostate; 1395 int ret, i; 1396 1397 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1398 if (ret) 1399 return ret; 1400 1401 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1402 ctx->iv[i] = cpu_to_be32(istate.state[i]); 1403 1404 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1405 ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]); 1406 1407 return 0; 1408 } 1409 1410 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1411 { 1412 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1413 struct mv_cesa_op_ctx tmpl = { }; 1414 1415 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1416 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1417 1418 mv_cesa_ahash_init(req, &tmpl, false); 1419 1420 return 0; 1421 } 1422 1423 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1424 { 1425 int ret; 1426 1427 ret = mv_cesa_ahmac_sha256_init(req); 1428 if (ret) 1429 return ret; 1430 1431 return mv_cesa_ahash_finup(req); 1432 } 1433 1434 struct ahash_alg mv_ahmac_sha256_alg = { 1435 .init = mv_cesa_ahmac_sha256_init, 1436 .update = mv_cesa_ahash_update, 1437 .final = mv_cesa_ahash_final, 1438 .finup = mv_cesa_ahash_finup, 1439 .digest = mv_cesa_ahmac_sha256_digest, 1440 .setkey = mv_cesa_ahmac_sha256_setkey, 1441 .export = mv_cesa_sha256_export, 1442 .import = mv_cesa_sha256_import, 1443 .halg = { 1444 .digestsize = SHA256_DIGEST_SIZE, 1445 .statesize = sizeof(struct sha256_state), 1446 .base = { 1447 .cra_name = "hmac(sha256)", 1448 .cra_driver_name = "mv-hmac-sha256", 1449 .cra_priority = 300, 1450 .cra_flags = CRYPTO_ALG_ASYNC | 1451 CRYPTO_ALG_ALLOCATES_MEMORY | 1452 CRYPTO_ALG_KERN_DRIVER_ONLY, 1453 .cra_blocksize = SHA256_BLOCK_SIZE, 1454 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1455 .cra_init = mv_cesa_ahmac_cra_init, 1456 .cra_module = THIS_MODULE, 1457 } 1458 } 1459 }; 1460