1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file is part of STM32 Crypto driver for Linux. 4 * 5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved 6 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics. 7 */ 8 9 #include <linux/clk.h> 10 #include <linux/crypto.h> 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/iopoll.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/of_device.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_runtime.h> 21 #include <linux/reset.h> 22 23 #include <crypto/engine.h> 24 #include <crypto/hash.h> 25 #include <crypto/md5.h> 26 #include <crypto/scatterwalk.h> 27 #include <crypto/sha.h> 28 #include <crypto/internal/hash.h> 29 30 #define HASH_CR 0x00 31 #define HASH_DIN 0x04 32 #define HASH_STR 0x08 33 #define HASH_IMR 0x20 34 #define HASH_SR 0x24 35 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04)) 36 #define HASH_HREG(x) (0x310 + ((x) * 0x04)) 37 #define HASH_HWCFGR 0x3F0 38 #define HASH_VER 0x3F4 39 #define HASH_ID 0x3F8 40 41 /* Control Register */ 42 #define HASH_CR_INIT BIT(2) 43 #define HASH_CR_DMAE BIT(3) 44 #define HASH_CR_DATATYPE_POS 4 45 #define HASH_CR_MODE BIT(6) 46 #define HASH_CR_MDMAT BIT(13) 47 #define HASH_CR_DMAA BIT(14) 48 #define HASH_CR_LKEY BIT(16) 49 50 #define HASH_CR_ALGO_SHA1 0x0 51 #define HASH_CR_ALGO_MD5 0x80 52 #define HASH_CR_ALGO_SHA224 0x40000 53 #define HASH_CR_ALGO_SHA256 0x40080 54 55 /* Interrupt */ 56 #define HASH_DINIE BIT(0) 57 #define HASH_DCIE BIT(1) 58 59 /* Interrupt Mask */ 60 #define HASH_MASK_CALC_COMPLETION BIT(0) 61 #define HASH_MASK_DATA_INPUT BIT(1) 62 63 /* Context swap register */ 64 #define HASH_CSR_REGISTER_NUMBER 53 65 66 /* Status Flags */ 67 #define HASH_SR_DATA_INPUT_READY BIT(0) 68 #define HASH_SR_OUTPUT_READY BIT(1) 69 #define HASH_SR_DMA_ACTIVE BIT(2) 70 #define HASH_SR_BUSY BIT(3) 71 72 /* STR Register */ 73 #define HASH_STR_NBLW_MASK GENMASK(4, 0) 74 #define HASH_STR_DCAL BIT(8) 75 76 #define HASH_FLAGS_INIT BIT(0) 77 #define HASH_FLAGS_OUTPUT_READY BIT(1) 78 #define HASH_FLAGS_CPU BIT(2) 79 #define HASH_FLAGS_DMA_READY BIT(3) 80 #define HASH_FLAGS_DMA_ACTIVE BIT(4) 81 #define HASH_FLAGS_HMAC_INIT BIT(5) 82 #define HASH_FLAGS_HMAC_FINAL BIT(6) 83 #define HASH_FLAGS_HMAC_KEY BIT(7) 84 85 #define HASH_FLAGS_FINAL BIT(15) 86 #define HASH_FLAGS_FINUP BIT(16) 87 #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18) 88 #define HASH_FLAGS_MD5 BIT(18) 89 #define HASH_FLAGS_SHA1 BIT(19) 90 #define HASH_FLAGS_SHA224 BIT(20) 91 #define HASH_FLAGS_SHA256 BIT(21) 92 #define HASH_FLAGS_ERRORS BIT(22) 93 #define HASH_FLAGS_HMAC BIT(23) 94 95 #define HASH_OP_UPDATE 1 96 #define HASH_OP_FINAL 2 97 98 enum stm32_hash_data_format { 99 HASH_DATA_32_BITS = 0x0, 100 HASH_DATA_16_BITS = 0x1, 101 HASH_DATA_8_BITS = 0x2, 102 HASH_DATA_1_BIT = 0x3 103 }; 104 105 #define HASH_BUFLEN 256 106 #define HASH_LONG_KEY 64 107 #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8) 108 #define HASH_QUEUE_LENGTH 16 109 #define HASH_DMA_THRESHOLD 50 110 111 #define HASH_AUTOSUSPEND_DELAY 50 112 113 struct stm32_hash_ctx { 114 struct crypto_engine_ctx enginectx; 115 struct stm32_hash_dev *hdev; 116 unsigned long flags; 117 118 u8 key[HASH_MAX_KEY_SIZE]; 119 int keylen; 120 }; 121 122 struct stm32_hash_request_ctx { 123 struct stm32_hash_dev *hdev; 124 unsigned long flags; 125 unsigned long op; 126 127 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); 128 size_t digcnt; 129 size_t bufcnt; 130 size_t buflen; 131 132 /* DMA */ 133 struct scatterlist *sg; 134 unsigned int offset; 135 unsigned int total; 136 struct scatterlist sg_key; 137 138 dma_addr_t dma_addr; 139 size_t dma_ct; 140 int nents; 141 142 u8 data_type; 143 144 u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32)); 145 146 /* Export Context */ 147 u32 *hw_context; 148 }; 149 150 struct stm32_hash_algs_info { 151 struct ahash_alg *algs_list; 152 size_t size; 153 }; 154 155 struct stm32_hash_pdata { 156 struct stm32_hash_algs_info *algs_info; 157 size_t algs_info_size; 158 }; 159 160 struct stm32_hash_dev { 161 struct list_head list; 162 struct device *dev; 163 struct clk *clk; 164 struct reset_control *rst; 165 void __iomem *io_base; 166 phys_addr_t phys_base; 167 u32 dma_mode; 168 u32 dma_maxburst; 169 170 struct ahash_request *req; 171 struct crypto_engine *engine; 172 173 int err; 174 unsigned long flags; 175 176 struct dma_chan *dma_lch; 177 struct completion dma_completion; 178 179 const struct stm32_hash_pdata *pdata; 180 }; 181 182 struct stm32_hash_drv { 183 struct list_head dev_list; 184 spinlock_t lock; /* List protection access */ 185 }; 186 187 static struct stm32_hash_drv stm32_hash = { 188 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list), 189 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock), 190 }; 191 192 static void stm32_hash_dma_callback(void *param); 193 194 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset) 195 { 196 return readl_relaxed(hdev->io_base + offset); 197 } 198 199 static inline void stm32_hash_write(struct stm32_hash_dev *hdev, 200 u32 offset, u32 value) 201 { 202 writel_relaxed(value, hdev->io_base + offset); 203 } 204 205 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) 206 { 207 u32 status; 208 209 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, 210 !(status & HASH_SR_BUSY), 10, 10000); 211 } 212 213 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length) 214 { 215 u32 reg; 216 217 reg = stm32_hash_read(hdev, HASH_STR); 218 reg &= ~(HASH_STR_NBLW_MASK); 219 reg |= (8U * ((length) % 4U)); 220 stm32_hash_write(hdev, HASH_STR, reg); 221 } 222 223 static int stm32_hash_write_key(struct stm32_hash_dev *hdev) 224 { 225 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); 226 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); 227 u32 reg; 228 int keylen = ctx->keylen; 229 void *key = ctx->key; 230 231 if (keylen) { 232 stm32_hash_set_nblw(hdev, keylen); 233 234 while (keylen > 0) { 235 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key); 236 keylen -= 4; 237 key += 4; 238 } 239 240 reg = stm32_hash_read(hdev, HASH_STR); 241 reg |= HASH_STR_DCAL; 242 stm32_hash_write(hdev, HASH_STR, reg); 243 244 return -EINPROGRESS; 245 } 246 247 return 0; 248 } 249 250 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) 251 { 252 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); 253 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); 254 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); 255 256 u32 reg = HASH_CR_INIT; 257 258 if (!(hdev->flags & HASH_FLAGS_INIT)) { 259 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { 260 case HASH_FLAGS_MD5: 261 reg |= HASH_CR_ALGO_MD5; 262 break; 263 case HASH_FLAGS_SHA1: 264 reg |= HASH_CR_ALGO_SHA1; 265 break; 266 case HASH_FLAGS_SHA224: 267 reg |= HASH_CR_ALGO_SHA224; 268 break; 269 case HASH_FLAGS_SHA256: 270 reg |= HASH_CR_ALGO_SHA256; 271 break; 272 default: 273 reg |= HASH_CR_ALGO_MD5; 274 } 275 276 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); 277 278 if (rctx->flags & HASH_FLAGS_HMAC) { 279 hdev->flags |= HASH_FLAGS_HMAC; 280 reg |= HASH_CR_MODE; 281 if (ctx->keylen > HASH_LONG_KEY) 282 reg |= HASH_CR_LKEY; 283 } 284 285 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); 286 287 stm32_hash_write(hdev, HASH_CR, reg); 288 289 hdev->flags |= HASH_FLAGS_INIT; 290 291 dev_dbg(hdev->dev, "Write Control %x\n", reg); 292 } 293 } 294 295 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) 296 { 297 size_t count; 298 299 while ((rctx->bufcnt < rctx->buflen) && rctx->total) { 300 count = min(rctx->sg->length - rctx->offset, rctx->total); 301 count = min(count, rctx->buflen - rctx->bufcnt); 302 303 if (count <= 0) { 304 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { 305 rctx->sg = sg_next(rctx->sg); 306 continue; 307 } else { 308 break; 309 } 310 } 311 312 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg, 313 rctx->offset, count, 0); 314 315 rctx->bufcnt += count; 316 rctx->offset += count; 317 rctx->total -= count; 318 319 if (rctx->offset == rctx->sg->length) { 320 rctx->sg = sg_next(rctx->sg); 321 if (rctx->sg) 322 rctx->offset = 0; 323 else 324 rctx->total = 0; 325 } 326 } 327 } 328 329 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, 330 const u8 *buf, size_t length, int final) 331 { 332 unsigned int count, len32; 333 const u32 *buffer = (const u32 *)buf; 334 u32 reg; 335 336 if (final) 337 hdev->flags |= HASH_FLAGS_FINAL; 338 339 len32 = DIV_ROUND_UP(length, sizeof(u32)); 340 341 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n", 342 __func__, length, final, len32); 343 344 hdev->flags |= HASH_FLAGS_CPU; 345 346 stm32_hash_write_ctrl(hdev); 347 348 if (stm32_hash_wait_busy(hdev)) 349 return -ETIMEDOUT; 350 351 if ((hdev->flags & HASH_FLAGS_HMAC) && 352 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { 353 hdev->flags |= HASH_FLAGS_HMAC_KEY; 354 stm32_hash_write_key(hdev); 355 if (stm32_hash_wait_busy(hdev)) 356 return -ETIMEDOUT; 357 } 358 359 for (count = 0; count < len32; count++) 360 stm32_hash_write(hdev, HASH_DIN, buffer[count]); 361 362 if (final) { 363 stm32_hash_set_nblw(hdev, length); 364 reg = stm32_hash_read(hdev, HASH_STR); 365 reg |= HASH_STR_DCAL; 366 stm32_hash_write(hdev, HASH_STR, reg); 367 if (hdev->flags & HASH_FLAGS_HMAC) { 368 if (stm32_hash_wait_busy(hdev)) 369 return -ETIMEDOUT; 370 stm32_hash_write_key(hdev); 371 } 372 return -EINPROGRESS; 373 } 374 375 return 0; 376 } 377 378 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) 379 { 380 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); 381 int bufcnt, err = 0, final; 382 383 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags); 384 385 final = (rctx->flags & HASH_FLAGS_FINUP); 386 387 while ((rctx->total >= rctx->buflen) || 388 (rctx->bufcnt + rctx->total >= rctx->buflen)) { 389 stm32_hash_append_sg(rctx); 390 bufcnt = rctx->bufcnt; 391 rctx->bufcnt = 0; 392 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0); 393 } 394 395 stm32_hash_append_sg(rctx); 396 397 if (final) { 398 bufcnt = rctx->bufcnt; 399 rctx->bufcnt = 0; 400 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 401 (rctx->flags & HASH_FLAGS_FINUP)); 402 } 403 404 return err; 405 } 406 407 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, 408 struct scatterlist *sg, int length, int mdma) 409 { 410 struct dma_async_tx_descriptor *in_desc; 411 dma_cookie_t cookie; 412 u32 reg; 413 int err; 414 415 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, 416 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | 417 DMA_CTRL_ACK); 418 if (!in_desc) { 419 dev_err(hdev->dev, "dmaengine_prep_slave error\n"); 420 return -ENOMEM; 421 } 422 423 reinit_completion(&hdev->dma_completion); 424 in_desc->callback = stm32_hash_dma_callback; 425 in_desc->callback_param = hdev; 426 427 hdev->flags |= HASH_FLAGS_FINAL; 428 hdev->flags |= HASH_FLAGS_DMA_ACTIVE; 429 430 reg = stm32_hash_read(hdev, HASH_CR); 431 432 if (mdma) 433 reg |= HASH_CR_MDMAT; 434 else 435 reg &= ~HASH_CR_MDMAT; 436 437 reg |= HASH_CR_DMAE; 438 439 stm32_hash_write(hdev, HASH_CR, reg); 440 441 stm32_hash_set_nblw(hdev, length); 442 443 cookie = dmaengine_submit(in_desc); 444 err = dma_submit_error(cookie); 445 if (err) 446 return -ENOMEM; 447 448 dma_async_issue_pending(hdev->dma_lch); 449 450 if (!wait_for_completion_timeout(&hdev->dma_completion, 451 msecs_to_jiffies(100))) 452 err = -ETIMEDOUT; 453 454 if (dma_async_is_tx_complete(hdev->dma_lch, cookie, 455 NULL, NULL) != DMA_COMPLETE) 456 err = -ETIMEDOUT; 457 458 if (err) { 459 dev_err(hdev->dev, "DMA Error %i\n", err); 460 dmaengine_terminate_all(hdev->dma_lch); 461 return err; 462 } 463 464 return -EINPROGRESS; 465 } 466 467 static void stm32_hash_dma_callback(void *param) 468 { 469 struct stm32_hash_dev *hdev = param; 470 471 complete(&hdev->dma_completion); 472 473 hdev->flags |= HASH_FLAGS_DMA_READY; 474 } 475 476 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) 477 { 478 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); 479 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); 480 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); 481 int err; 482 483 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) { 484 err = stm32_hash_write_key(hdev); 485 if (stm32_hash_wait_busy(hdev)) 486 return -ETIMEDOUT; 487 } else { 488 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY)) 489 sg_init_one(&rctx->sg_key, ctx->key, 490 ALIGN(ctx->keylen, sizeof(u32))); 491 492 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1, 493 DMA_TO_DEVICE); 494 if (rctx->dma_ct == 0) { 495 dev_err(hdev->dev, "dma_map_sg error\n"); 496 return -ENOMEM; 497 } 498 499 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0); 500 501 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE); 502 } 503 504 return err; 505 } 506 507 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) 508 { 509 struct dma_slave_config dma_conf; 510 struct dma_chan *chan; 511 int err; 512 513 memset(&dma_conf, 0, sizeof(dma_conf)); 514 515 dma_conf.direction = DMA_MEM_TO_DEV; 516 dma_conf.dst_addr = hdev->phys_base + HASH_DIN; 517 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 518 dma_conf.src_maxburst = hdev->dma_maxburst; 519 dma_conf.dst_maxburst = hdev->dma_maxburst; 520 dma_conf.device_fc = false; 521 522 chan = dma_request_chan(hdev->dev, "in"); 523 if (IS_ERR(chan)) 524 return PTR_ERR(chan); 525 526 hdev->dma_lch = chan; 527 528 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); 529 if (err) { 530 dma_release_channel(hdev->dma_lch); 531 hdev->dma_lch = NULL; 532 dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); 533 return err; 534 } 535 536 init_completion(&hdev->dma_completion); 537 538 return 0; 539 } 540 541 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) 542 { 543 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); 544 struct scatterlist sg[1], *tsg; 545 int err = 0, len = 0, reg, ncp = 0; 546 unsigned int i; 547 u32 *buffer = (void *)rctx->buffer; 548 549 rctx->sg = hdev->req->src; 550 rctx->total = hdev->req->nbytes; 551 552 rctx->nents = sg_nents(rctx->sg); 553 554 if (rctx->nents < 0) 555 return -EINVAL; 556 557 stm32_hash_write_ctrl(hdev); 558 559 if (hdev->flags & HASH_FLAGS_HMAC) { 560 err = stm32_hash_hmac_dma_send(hdev); 561 if (err != -EINPROGRESS) 562 return err; 563 } 564 565 for_each_sg(rctx->sg, tsg, rctx->nents, i) { 566 len = sg->length; 567 568 sg[0] = *tsg; 569 if (sg_is_last(sg)) { 570 if (hdev->dma_mode == 1) { 571 len = (ALIGN(sg->length, 16) - 16); 572 573 ncp = sg_pcopy_to_buffer( 574 rctx->sg, rctx->nents, 575 rctx->buffer, sg->length - len, 576 rctx->total - sg->length + len); 577 578 sg->length = len; 579 } else { 580 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { 581 len = sg->length; 582 sg->length = ALIGN(sg->length, 583 sizeof(u32)); 584 } 585 } 586 } 587 588 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, 589 DMA_TO_DEVICE); 590 if (rctx->dma_ct == 0) { 591 dev_err(hdev->dev, "dma_map_sg error\n"); 592 return -ENOMEM; 593 } 594 595 err = stm32_hash_xmit_dma(hdev, sg, len, 596 !sg_is_last(sg)); 597 598 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); 599 600 if (err == -ENOMEM) 601 return err; 602 } 603 604 if (hdev->dma_mode == 1) { 605 if (stm32_hash_wait_busy(hdev)) 606 return -ETIMEDOUT; 607 reg = stm32_hash_read(hdev, HASH_CR); 608 reg &= ~HASH_CR_DMAE; 609 reg |= HASH_CR_DMAA; 610 stm32_hash_write(hdev, HASH_CR, reg); 611 612 if (ncp) { 613 memset(buffer + ncp, 0, 614 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); 615 writesl(hdev->io_base + HASH_DIN, buffer, 616 DIV_ROUND_UP(ncp, sizeof(u32))); 617 } 618 stm32_hash_set_nblw(hdev, ncp); 619 reg = stm32_hash_read(hdev, HASH_STR); 620 reg |= HASH_STR_DCAL; 621 stm32_hash_write(hdev, HASH_STR, reg); 622 err = -EINPROGRESS; 623 } 624 625 if (hdev->flags & HASH_FLAGS_HMAC) { 626 if (stm32_hash_wait_busy(hdev)) 627 return -ETIMEDOUT; 628 err = stm32_hash_hmac_dma_send(hdev); 629 } 630 631 return err; 632 } 633 634 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) 635 { 636 struct stm32_hash_dev *hdev = NULL, *tmp; 637 638 spin_lock_bh(&stm32_hash.lock); 639 if (!ctx->hdev) { 640 list_for_each_entry(tmp, &stm32_hash.dev_list, list) { 641 hdev = tmp; 642 break; 643 } 644 ctx->hdev = hdev; 645 } else { 646 hdev = ctx->hdev; 647 } 648 649 spin_unlock_bh(&stm32_hash.lock); 650 651 return hdev; 652 } 653 654 static bool stm32_hash_dma_aligned_data(struct ahash_request *req) 655 { 656 struct scatterlist *sg; 657 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 658 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 659 int i; 660 661 if (req->nbytes <= HASH_DMA_THRESHOLD) 662 return false; 663 664 if (sg_nents(req->src) > 1) { 665 if (hdev->dma_mode == 1) 666 return false; 667 for_each_sg(req->src, sg, sg_nents(req->src), i) { 668 if ((!IS_ALIGNED(sg->length, sizeof(u32))) && 669 (!sg_is_last(sg))) 670 return false; 671 } 672 } 673 674 if (req->src->offset % 4) 675 return false; 676 677 return true; 678 } 679 680 static int stm32_hash_init(struct ahash_request *req) 681 { 682 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 683 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); 684 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 685 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 686 687 rctx->hdev = hdev; 688 689 rctx->flags = HASH_FLAGS_CPU; 690 691 rctx->digcnt = crypto_ahash_digestsize(tfm); 692 switch (rctx->digcnt) { 693 case MD5_DIGEST_SIZE: 694 rctx->flags |= HASH_FLAGS_MD5; 695 break; 696 case SHA1_DIGEST_SIZE: 697 rctx->flags |= HASH_FLAGS_SHA1; 698 break; 699 case SHA224_DIGEST_SIZE: 700 rctx->flags |= HASH_FLAGS_SHA224; 701 break; 702 case SHA256_DIGEST_SIZE: 703 rctx->flags |= HASH_FLAGS_SHA256; 704 break; 705 default: 706 return -EINVAL; 707 } 708 709 rctx->bufcnt = 0; 710 rctx->buflen = HASH_BUFLEN; 711 rctx->total = 0; 712 rctx->offset = 0; 713 rctx->data_type = HASH_DATA_8_BITS; 714 715 memset(rctx->buffer, 0, HASH_BUFLEN); 716 717 if (ctx->flags & HASH_FLAGS_HMAC) 718 rctx->flags |= HASH_FLAGS_HMAC; 719 720 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags); 721 722 return 0; 723 } 724 725 static int stm32_hash_update_req(struct stm32_hash_dev *hdev) 726 { 727 return stm32_hash_update_cpu(hdev); 728 } 729 730 static int stm32_hash_final_req(struct stm32_hash_dev *hdev) 731 { 732 struct ahash_request *req = hdev->req; 733 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 734 int err; 735 int buflen = rctx->bufcnt; 736 737 rctx->bufcnt = 0; 738 739 if (!(rctx->flags & HASH_FLAGS_CPU)) 740 err = stm32_hash_dma_send(hdev); 741 else 742 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1); 743 744 745 return err; 746 } 747 748 static void stm32_hash_copy_hash(struct ahash_request *req) 749 { 750 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 751 u32 *hash = (u32 *)rctx->digest; 752 unsigned int i, hashsize; 753 754 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) { 755 case HASH_FLAGS_MD5: 756 hashsize = MD5_DIGEST_SIZE; 757 break; 758 case HASH_FLAGS_SHA1: 759 hashsize = SHA1_DIGEST_SIZE; 760 break; 761 case HASH_FLAGS_SHA224: 762 hashsize = SHA224_DIGEST_SIZE; 763 break; 764 case HASH_FLAGS_SHA256: 765 hashsize = SHA256_DIGEST_SIZE; 766 break; 767 default: 768 return; 769 } 770 771 for (i = 0; i < hashsize / sizeof(u32); i++) 772 hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev, 773 HASH_HREG(i))); 774 } 775 776 static int stm32_hash_finish(struct ahash_request *req) 777 { 778 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 779 780 if (!req->result) 781 return -EINVAL; 782 783 memcpy(req->result, rctx->digest, rctx->digcnt); 784 785 return 0; 786 } 787 788 static void stm32_hash_finish_req(struct ahash_request *req, int err) 789 { 790 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 791 struct stm32_hash_dev *hdev = rctx->hdev; 792 793 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { 794 stm32_hash_copy_hash(req); 795 err = stm32_hash_finish(req); 796 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU | 797 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY | 798 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC | 799 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL | 800 HASH_FLAGS_HMAC_KEY); 801 } else { 802 rctx->flags |= HASH_FLAGS_ERRORS; 803 } 804 805 pm_runtime_mark_last_busy(hdev->dev); 806 pm_runtime_put_autosuspend(hdev->dev); 807 808 crypto_finalize_hash_request(hdev->engine, req, err); 809 } 810 811 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, 812 struct stm32_hash_request_ctx *rctx) 813 { 814 pm_runtime_get_sync(hdev->dev); 815 816 if (!(HASH_FLAGS_INIT & hdev->flags)) { 817 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); 818 stm32_hash_write(hdev, HASH_STR, 0); 819 stm32_hash_write(hdev, HASH_DIN, 0); 820 stm32_hash_write(hdev, HASH_IMR, 0); 821 hdev->err = 0; 822 } 823 824 return 0; 825 } 826 827 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq); 828 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq); 829 830 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev, 831 struct ahash_request *req) 832 { 833 return crypto_transfer_hash_request_to_engine(hdev->engine, req); 834 } 835 836 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq) 837 { 838 struct ahash_request *req = container_of(areq, struct ahash_request, 839 base); 840 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 841 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 842 struct stm32_hash_request_ctx *rctx; 843 844 if (!hdev) 845 return -ENODEV; 846 847 hdev->req = req; 848 849 rctx = ahash_request_ctx(req); 850 851 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n", 852 rctx->op, req->nbytes); 853 854 return stm32_hash_hw_init(hdev, rctx); 855 } 856 857 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) 858 { 859 struct ahash_request *req = container_of(areq, struct ahash_request, 860 base); 861 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 862 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 863 struct stm32_hash_request_ctx *rctx; 864 int err = 0; 865 866 if (!hdev) 867 return -ENODEV; 868 869 hdev->req = req; 870 871 rctx = ahash_request_ctx(req); 872 873 if (rctx->op == HASH_OP_UPDATE) 874 err = stm32_hash_update_req(hdev); 875 else if (rctx->op == HASH_OP_FINAL) 876 err = stm32_hash_final_req(hdev); 877 878 if (err != -EINPROGRESS) 879 /* done task will not finish it, so do it here */ 880 stm32_hash_finish_req(req, err); 881 882 return 0; 883 } 884 885 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) 886 { 887 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 888 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 889 struct stm32_hash_dev *hdev = ctx->hdev; 890 891 rctx->op = op; 892 893 return stm32_hash_handle_queue(hdev, req); 894 } 895 896 static int stm32_hash_update(struct ahash_request *req) 897 { 898 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 899 900 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU)) 901 return 0; 902 903 rctx->total = req->nbytes; 904 rctx->sg = req->src; 905 rctx->offset = 0; 906 907 if ((rctx->bufcnt + rctx->total < rctx->buflen)) { 908 stm32_hash_append_sg(rctx); 909 return 0; 910 } 911 912 return stm32_hash_enqueue(req, HASH_OP_UPDATE); 913 } 914 915 static int stm32_hash_final(struct ahash_request *req) 916 { 917 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 918 919 rctx->flags |= HASH_FLAGS_FINUP; 920 921 return stm32_hash_enqueue(req, HASH_OP_FINAL); 922 } 923 924 static int stm32_hash_finup(struct ahash_request *req) 925 { 926 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 927 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 928 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 929 int err1, err2; 930 931 rctx->flags |= HASH_FLAGS_FINUP; 932 933 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req)) 934 rctx->flags &= ~HASH_FLAGS_CPU; 935 936 err1 = stm32_hash_update(req); 937 938 if (err1 == -EINPROGRESS || err1 == -EBUSY) 939 return err1; 940 941 /* 942 * final() has to be always called to cleanup resources 943 * even if update() failed, except EINPROGRESS 944 */ 945 err2 = stm32_hash_final(req); 946 947 return err1 ?: err2; 948 } 949 950 static int stm32_hash_digest(struct ahash_request *req) 951 { 952 return stm32_hash_init(req) ?: stm32_hash_finup(req); 953 } 954 955 static int stm32_hash_export(struct ahash_request *req, void *out) 956 { 957 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 958 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 959 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 960 u32 *preg; 961 unsigned int i; 962 963 pm_runtime_get_sync(hdev->dev); 964 965 while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) 966 cpu_relax(); 967 968 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER, 969 sizeof(u32), 970 GFP_KERNEL); 971 972 preg = rctx->hw_context; 973 974 *preg++ = stm32_hash_read(hdev, HASH_IMR); 975 *preg++ = stm32_hash_read(hdev, HASH_STR); 976 *preg++ = stm32_hash_read(hdev, HASH_CR); 977 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) 978 *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); 979 980 pm_runtime_mark_last_busy(hdev->dev); 981 pm_runtime_put_autosuspend(hdev->dev); 982 983 memcpy(out, rctx, sizeof(*rctx)); 984 985 return 0; 986 } 987 988 static int stm32_hash_import(struct ahash_request *req, const void *in) 989 { 990 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 991 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); 992 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); 993 const u32 *preg = in; 994 u32 reg; 995 unsigned int i; 996 997 memcpy(rctx, in, sizeof(*rctx)); 998 999 preg = rctx->hw_context; 1000 1001 pm_runtime_get_sync(hdev->dev); 1002 1003 stm32_hash_write(hdev, HASH_IMR, *preg++); 1004 stm32_hash_write(hdev, HASH_STR, *preg++); 1005 stm32_hash_write(hdev, HASH_CR, *preg); 1006 reg = *preg++ | HASH_CR_INIT; 1007 stm32_hash_write(hdev, HASH_CR, reg); 1008 1009 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++) 1010 stm32_hash_write(hdev, HASH_CSR(i), *preg++); 1011 1012 pm_runtime_mark_last_busy(hdev->dev); 1013 pm_runtime_put_autosuspend(hdev->dev); 1014 1015 kfree(rctx->hw_context); 1016 1017 return 0; 1018 } 1019 1020 static int stm32_hash_setkey(struct crypto_ahash *tfm, 1021 const u8 *key, unsigned int keylen) 1022 { 1023 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1024 1025 if (keylen <= HASH_MAX_KEY_SIZE) { 1026 memcpy(ctx->key, key, keylen); 1027 ctx->keylen = keylen; 1028 } else { 1029 return -ENOMEM; 1030 } 1031 1032 return 0; 1033 } 1034 1035 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, 1036 const char *algs_hmac_name) 1037 { 1038 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1039 1040 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1041 sizeof(struct stm32_hash_request_ctx)); 1042 1043 ctx->keylen = 0; 1044 1045 if (algs_hmac_name) 1046 ctx->flags |= HASH_FLAGS_HMAC; 1047 1048 ctx->enginectx.op.do_one_request = stm32_hash_one_request; 1049 ctx->enginectx.op.prepare_request = stm32_hash_prepare_req; 1050 ctx->enginectx.op.unprepare_request = NULL; 1051 return 0; 1052 } 1053 1054 static int stm32_hash_cra_init(struct crypto_tfm *tfm) 1055 { 1056 return stm32_hash_cra_init_algs(tfm, NULL); 1057 } 1058 1059 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm) 1060 { 1061 return stm32_hash_cra_init_algs(tfm, "md5"); 1062 } 1063 1064 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm) 1065 { 1066 return stm32_hash_cra_init_algs(tfm, "sha1"); 1067 } 1068 1069 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm) 1070 { 1071 return stm32_hash_cra_init_algs(tfm, "sha224"); 1072 } 1073 1074 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm) 1075 { 1076 return stm32_hash_cra_init_algs(tfm, "sha256"); 1077 } 1078 1079 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) 1080 { 1081 struct stm32_hash_dev *hdev = dev_id; 1082 1083 if (HASH_FLAGS_CPU & hdev->flags) { 1084 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { 1085 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; 1086 goto finish; 1087 } 1088 } else if (HASH_FLAGS_DMA_READY & hdev->flags) { 1089 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { 1090 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; 1091 goto finish; 1092 } 1093 } 1094 1095 return IRQ_HANDLED; 1096 1097 finish: 1098 /* Finish current request */ 1099 stm32_hash_finish_req(hdev->req, 0); 1100 1101 return IRQ_HANDLED; 1102 } 1103 1104 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) 1105 { 1106 struct stm32_hash_dev *hdev = dev_id; 1107 u32 reg; 1108 1109 reg = stm32_hash_read(hdev, HASH_SR); 1110 if (reg & HASH_SR_OUTPUT_READY) { 1111 reg &= ~HASH_SR_OUTPUT_READY; 1112 stm32_hash_write(hdev, HASH_SR, reg); 1113 hdev->flags |= HASH_FLAGS_OUTPUT_READY; 1114 /* Disable IT*/ 1115 stm32_hash_write(hdev, HASH_IMR, 0); 1116 return IRQ_WAKE_THREAD; 1117 } 1118 1119 return IRQ_NONE; 1120 } 1121 1122 static struct ahash_alg algs_md5_sha1[] = { 1123 { 1124 .init = stm32_hash_init, 1125 .update = stm32_hash_update, 1126 .final = stm32_hash_final, 1127 .finup = stm32_hash_finup, 1128 .digest = stm32_hash_digest, 1129 .export = stm32_hash_export, 1130 .import = stm32_hash_import, 1131 .halg = { 1132 .digestsize = MD5_DIGEST_SIZE, 1133 .statesize = sizeof(struct stm32_hash_request_ctx), 1134 .base = { 1135 .cra_name = "md5", 1136 .cra_driver_name = "stm32-md5", 1137 .cra_priority = 200, 1138 .cra_flags = CRYPTO_ALG_ASYNC | 1139 CRYPTO_ALG_KERN_DRIVER_ONLY, 1140 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1141 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1142 .cra_alignmask = 3, 1143 .cra_init = stm32_hash_cra_init, 1144 .cra_module = THIS_MODULE, 1145 } 1146 } 1147 }, 1148 { 1149 .init = stm32_hash_init, 1150 .update = stm32_hash_update, 1151 .final = stm32_hash_final, 1152 .finup = stm32_hash_finup, 1153 .digest = stm32_hash_digest, 1154 .export = stm32_hash_export, 1155 .import = stm32_hash_import, 1156 .setkey = stm32_hash_setkey, 1157 .halg = { 1158 .digestsize = MD5_DIGEST_SIZE, 1159 .statesize = sizeof(struct stm32_hash_request_ctx), 1160 .base = { 1161 .cra_name = "hmac(md5)", 1162 .cra_driver_name = "stm32-hmac-md5", 1163 .cra_priority = 200, 1164 .cra_flags = CRYPTO_ALG_ASYNC | 1165 CRYPTO_ALG_KERN_DRIVER_ONLY, 1166 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1167 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1168 .cra_alignmask = 3, 1169 .cra_init = stm32_hash_cra_md5_init, 1170 .cra_module = THIS_MODULE, 1171 } 1172 } 1173 }, 1174 { 1175 .init = stm32_hash_init, 1176 .update = stm32_hash_update, 1177 .final = stm32_hash_final, 1178 .finup = stm32_hash_finup, 1179 .digest = stm32_hash_digest, 1180 .export = stm32_hash_export, 1181 .import = stm32_hash_import, 1182 .halg = { 1183 .digestsize = SHA1_DIGEST_SIZE, 1184 .statesize = sizeof(struct stm32_hash_request_ctx), 1185 .base = { 1186 .cra_name = "sha1", 1187 .cra_driver_name = "stm32-sha1", 1188 .cra_priority = 200, 1189 .cra_flags = CRYPTO_ALG_ASYNC | 1190 CRYPTO_ALG_KERN_DRIVER_ONLY, 1191 .cra_blocksize = SHA1_BLOCK_SIZE, 1192 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1193 .cra_alignmask = 3, 1194 .cra_init = stm32_hash_cra_init, 1195 .cra_module = THIS_MODULE, 1196 } 1197 } 1198 }, 1199 { 1200 .init = stm32_hash_init, 1201 .update = stm32_hash_update, 1202 .final = stm32_hash_final, 1203 .finup = stm32_hash_finup, 1204 .digest = stm32_hash_digest, 1205 .export = stm32_hash_export, 1206 .import = stm32_hash_import, 1207 .setkey = stm32_hash_setkey, 1208 .halg = { 1209 .digestsize = SHA1_DIGEST_SIZE, 1210 .statesize = sizeof(struct stm32_hash_request_ctx), 1211 .base = { 1212 .cra_name = "hmac(sha1)", 1213 .cra_driver_name = "stm32-hmac-sha1", 1214 .cra_priority = 200, 1215 .cra_flags = CRYPTO_ALG_ASYNC | 1216 CRYPTO_ALG_KERN_DRIVER_ONLY, 1217 .cra_blocksize = SHA1_BLOCK_SIZE, 1218 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1219 .cra_alignmask = 3, 1220 .cra_init = stm32_hash_cra_sha1_init, 1221 .cra_module = THIS_MODULE, 1222 } 1223 } 1224 }, 1225 }; 1226 1227 static struct ahash_alg algs_sha224_sha256[] = { 1228 { 1229 .init = stm32_hash_init, 1230 .update = stm32_hash_update, 1231 .final = stm32_hash_final, 1232 .finup = stm32_hash_finup, 1233 .digest = stm32_hash_digest, 1234 .export = stm32_hash_export, 1235 .import = stm32_hash_import, 1236 .halg = { 1237 .digestsize = SHA224_DIGEST_SIZE, 1238 .statesize = sizeof(struct stm32_hash_request_ctx), 1239 .base = { 1240 .cra_name = "sha224", 1241 .cra_driver_name = "stm32-sha224", 1242 .cra_priority = 200, 1243 .cra_flags = CRYPTO_ALG_ASYNC | 1244 CRYPTO_ALG_KERN_DRIVER_ONLY, 1245 .cra_blocksize = SHA224_BLOCK_SIZE, 1246 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1247 .cra_alignmask = 3, 1248 .cra_init = stm32_hash_cra_init, 1249 .cra_module = THIS_MODULE, 1250 } 1251 } 1252 }, 1253 { 1254 .init = stm32_hash_init, 1255 .update = stm32_hash_update, 1256 .final = stm32_hash_final, 1257 .finup = stm32_hash_finup, 1258 .digest = stm32_hash_digest, 1259 .setkey = stm32_hash_setkey, 1260 .export = stm32_hash_export, 1261 .import = stm32_hash_import, 1262 .halg = { 1263 .digestsize = SHA224_DIGEST_SIZE, 1264 .statesize = sizeof(struct stm32_hash_request_ctx), 1265 .base = { 1266 .cra_name = "hmac(sha224)", 1267 .cra_driver_name = "stm32-hmac-sha224", 1268 .cra_priority = 200, 1269 .cra_flags = CRYPTO_ALG_ASYNC | 1270 CRYPTO_ALG_KERN_DRIVER_ONLY, 1271 .cra_blocksize = SHA224_BLOCK_SIZE, 1272 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1273 .cra_alignmask = 3, 1274 .cra_init = stm32_hash_cra_sha224_init, 1275 .cra_module = THIS_MODULE, 1276 } 1277 } 1278 }, 1279 { 1280 .init = stm32_hash_init, 1281 .update = stm32_hash_update, 1282 .final = stm32_hash_final, 1283 .finup = stm32_hash_finup, 1284 .digest = stm32_hash_digest, 1285 .export = stm32_hash_export, 1286 .import = stm32_hash_import, 1287 .halg = { 1288 .digestsize = SHA256_DIGEST_SIZE, 1289 .statesize = sizeof(struct stm32_hash_request_ctx), 1290 .base = { 1291 .cra_name = "sha256", 1292 .cra_driver_name = "stm32-sha256", 1293 .cra_priority = 200, 1294 .cra_flags = CRYPTO_ALG_ASYNC | 1295 CRYPTO_ALG_KERN_DRIVER_ONLY, 1296 .cra_blocksize = SHA256_BLOCK_SIZE, 1297 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1298 .cra_alignmask = 3, 1299 .cra_init = stm32_hash_cra_init, 1300 .cra_module = THIS_MODULE, 1301 } 1302 } 1303 }, 1304 { 1305 .init = stm32_hash_init, 1306 .update = stm32_hash_update, 1307 .final = stm32_hash_final, 1308 .finup = stm32_hash_finup, 1309 .digest = stm32_hash_digest, 1310 .export = stm32_hash_export, 1311 .import = stm32_hash_import, 1312 .setkey = stm32_hash_setkey, 1313 .halg = { 1314 .digestsize = SHA256_DIGEST_SIZE, 1315 .statesize = sizeof(struct stm32_hash_request_ctx), 1316 .base = { 1317 .cra_name = "hmac(sha256)", 1318 .cra_driver_name = "stm32-hmac-sha256", 1319 .cra_priority = 200, 1320 .cra_flags = CRYPTO_ALG_ASYNC | 1321 CRYPTO_ALG_KERN_DRIVER_ONLY, 1322 .cra_blocksize = SHA256_BLOCK_SIZE, 1323 .cra_ctxsize = sizeof(struct stm32_hash_ctx), 1324 .cra_alignmask = 3, 1325 .cra_init = stm32_hash_cra_sha256_init, 1326 .cra_module = THIS_MODULE, 1327 } 1328 } 1329 }, 1330 }; 1331 1332 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) 1333 { 1334 unsigned int i, j; 1335 int err; 1336 1337 for (i = 0; i < hdev->pdata->algs_info_size; i++) { 1338 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { 1339 err = crypto_register_ahash( 1340 &hdev->pdata->algs_info[i].algs_list[j]); 1341 if (err) 1342 goto err_algs; 1343 } 1344 } 1345 1346 return 0; 1347 err_algs: 1348 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); 1349 for (; i--; ) { 1350 for (; j--;) 1351 crypto_unregister_ahash( 1352 &hdev->pdata->algs_info[i].algs_list[j]); 1353 } 1354 1355 return err; 1356 } 1357 1358 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev) 1359 { 1360 unsigned int i, j; 1361 1362 for (i = 0; i < hdev->pdata->algs_info_size; i++) { 1363 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) 1364 crypto_unregister_ahash( 1365 &hdev->pdata->algs_info[i].algs_list[j]); 1366 } 1367 1368 return 0; 1369 } 1370 1371 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = { 1372 { 1373 .algs_list = algs_md5_sha1, 1374 .size = ARRAY_SIZE(algs_md5_sha1), 1375 }, 1376 }; 1377 1378 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = { 1379 .algs_info = stm32_hash_algs_info_stm32f4, 1380 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4), 1381 }; 1382 1383 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = { 1384 { 1385 .algs_list = algs_md5_sha1, 1386 .size = ARRAY_SIZE(algs_md5_sha1), 1387 }, 1388 { 1389 .algs_list = algs_sha224_sha256, 1390 .size = ARRAY_SIZE(algs_sha224_sha256), 1391 }, 1392 }; 1393 1394 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = { 1395 .algs_info = stm32_hash_algs_info_stm32f7, 1396 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7), 1397 }; 1398 1399 static const struct of_device_id stm32_hash_of_match[] = { 1400 { 1401 .compatible = "st,stm32f456-hash", 1402 .data = &stm32_hash_pdata_stm32f4, 1403 }, 1404 { 1405 .compatible = "st,stm32f756-hash", 1406 .data = &stm32_hash_pdata_stm32f7, 1407 }, 1408 {}, 1409 }; 1410 1411 MODULE_DEVICE_TABLE(of, stm32_hash_of_match); 1412 1413 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, 1414 struct device *dev) 1415 { 1416 hdev->pdata = of_device_get_match_data(dev); 1417 if (!hdev->pdata) { 1418 dev_err(dev, "no compatible OF match\n"); 1419 return -EINVAL; 1420 } 1421 1422 if (of_property_read_u32(dev->of_node, "dma-maxburst", 1423 &hdev->dma_maxburst)) { 1424 dev_info(dev, "dma-maxburst not specified, using 0\n"); 1425 hdev->dma_maxburst = 0; 1426 } 1427 1428 return 0; 1429 } 1430 1431 static int stm32_hash_probe(struct platform_device *pdev) 1432 { 1433 struct stm32_hash_dev *hdev; 1434 struct device *dev = &pdev->dev; 1435 struct resource *res; 1436 int ret, irq; 1437 1438 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL); 1439 if (!hdev) 1440 return -ENOMEM; 1441 1442 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1443 hdev->io_base = devm_ioremap_resource(dev, res); 1444 if (IS_ERR(hdev->io_base)) 1445 return PTR_ERR(hdev->io_base); 1446 1447 hdev->phys_base = res->start; 1448 1449 ret = stm32_hash_get_of_match(hdev, dev); 1450 if (ret) 1451 return ret; 1452 1453 irq = platform_get_irq(pdev, 0); 1454 if (irq < 0) 1455 return irq; 1456 1457 ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, 1458 stm32_hash_irq_thread, IRQF_ONESHOT, 1459 dev_name(dev), hdev); 1460 if (ret) { 1461 dev_err(dev, "Cannot grab IRQ\n"); 1462 return ret; 1463 } 1464 1465 hdev->clk = devm_clk_get(&pdev->dev, NULL); 1466 if (IS_ERR(hdev->clk)) { 1467 if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) { 1468 dev_err(dev, "failed to get clock for hash (%lu)\n", 1469 PTR_ERR(hdev->clk)); 1470 } 1471 1472 return PTR_ERR(hdev->clk); 1473 } 1474 1475 ret = clk_prepare_enable(hdev->clk); 1476 if (ret) { 1477 dev_err(dev, "failed to enable hash clock (%d)\n", ret); 1478 return ret; 1479 } 1480 1481 pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY); 1482 pm_runtime_use_autosuspend(dev); 1483 1484 pm_runtime_get_noresume(dev); 1485 pm_runtime_set_active(dev); 1486 pm_runtime_enable(dev); 1487 1488 hdev->rst = devm_reset_control_get(&pdev->dev, NULL); 1489 if (IS_ERR(hdev->rst)) { 1490 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) { 1491 ret = -EPROBE_DEFER; 1492 goto err_reset; 1493 } 1494 } else { 1495 reset_control_assert(hdev->rst); 1496 udelay(2); 1497 reset_control_deassert(hdev->rst); 1498 } 1499 1500 hdev->dev = dev; 1501 1502 platform_set_drvdata(pdev, hdev); 1503 1504 ret = stm32_hash_dma_init(hdev); 1505 switch (ret) { 1506 case 0: 1507 break; 1508 case -ENOENT: 1509 dev_dbg(dev, "DMA mode not available\n"); 1510 break; 1511 default: 1512 goto err_dma; 1513 } 1514 1515 spin_lock(&stm32_hash.lock); 1516 list_add_tail(&hdev->list, &stm32_hash.dev_list); 1517 spin_unlock(&stm32_hash.lock); 1518 1519 /* Initialize crypto engine */ 1520 hdev->engine = crypto_engine_alloc_init(dev, 1); 1521 if (!hdev->engine) { 1522 ret = -ENOMEM; 1523 goto err_engine; 1524 } 1525 1526 ret = crypto_engine_start(hdev->engine); 1527 if (ret) 1528 goto err_engine_start; 1529 1530 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR); 1531 1532 /* Register algos */ 1533 ret = stm32_hash_register_algs(hdev); 1534 if (ret) 1535 goto err_algs; 1536 1537 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n", 1538 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode); 1539 1540 pm_runtime_put_sync(dev); 1541 1542 return 0; 1543 1544 err_algs: 1545 err_engine_start: 1546 crypto_engine_exit(hdev->engine); 1547 err_engine: 1548 spin_lock(&stm32_hash.lock); 1549 list_del(&hdev->list); 1550 spin_unlock(&stm32_hash.lock); 1551 err_dma: 1552 if (hdev->dma_lch) 1553 dma_release_channel(hdev->dma_lch); 1554 err_reset: 1555 pm_runtime_disable(dev); 1556 pm_runtime_put_noidle(dev); 1557 1558 clk_disable_unprepare(hdev->clk); 1559 1560 return ret; 1561 } 1562 1563 static int stm32_hash_remove(struct platform_device *pdev) 1564 { 1565 struct stm32_hash_dev *hdev; 1566 int ret; 1567 1568 hdev = platform_get_drvdata(pdev); 1569 if (!hdev) 1570 return -ENODEV; 1571 1572 ret = pm_runtime_get_sync(hdev->dev); 1573 if (ret < 0) 1574 return ret; 1575 1576 stm32_hash_unregister_algs(hdev); 1577 1578 crypto_engine_exit(hdev->engine); 1579 1580 spin_lock(&stm32_hash.lock); 1581 list_del(&hdev->list); 1582 spin_unlock(&stm32_hash.lock); 1583 1584 if (hdev->dma_lch) 1585 dma_release_channel(hdev->dma_lch); 1586 1587 pm_runtime_disable(hdev->dev); 1588 pm_runtime_put_noidle(hdev->dev); 1589 1590 clk_disable_unprepare(hdev->clk); 1591 1592 return 0; 1593 } 1594 1595 #ifdef CONFIG_PM 1596 static int stm32_hash_runtime_suspend(struct device *dev) 1597 { 1598 struct stm32_hash_dev *hdev = dev_get_drvdata(dev); 1599 1600 clk_disable_unprepare(hdev->clk); 1601 1602 return 0; 1603 } 1604 1605 static int stm32_hash_runtime_resume(struct device *dev) 1606 { 1607 struct stm32_hash_dev *hdev = dev_get_drvdata(dev); 1608 int ret; 1609 1610 ret = clk_prepare_enable(hdev->clk); 1611 if (ret) { 1612 dev_err(hdev->dev, "Failed to prepare_enable clock\n"); 1613 return ret; 1614 } 1615 1616 return 0; 1617 } 1618 #endif 1619 1620 static const struct dev_pm_ops stm32_hash_pm_ops = { 1621 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1622 pm_runtime_force_resume) 1623 SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend, 1624 stm32_hash_runtime_resume, NULL) 1625 }; 1626 1627 static struct platform_driver stm32_hash_driver = { 1628 .probe = stm32_hash_probe, 1629 .remove = stm32_hash_remove, 1630 .driver = { 1631 .name = "stm32-hash", 1632 .pm = &stm32_hash_pm_ops, 1633 .of_match_table = stm32_hash_of_match, 1634 } 1635 }; 1636 1637 module_platform_driver(stm32_hash_driver); 1638 1639 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver"); 1640 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>"); 1641 MODULE_LICENSE("GPL v2"); 1642