1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cryptographic API. 4 * 5 * Support for ATMEL SHA1/SHA256 HW acceleration. 6 * 7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL 8 * Author: Nicolas Royer <nicolas@eukrea.com> 9 * 10 * Some ideas are from omap-sham.c drivers. 11 */ 12 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/err.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 #include <linux/hw_random.h> 21 #include <linux/platform_device.h> 22 23 #include <linux/device.h> 24 #include <linux/init.h> 25 #include <linux/errno.h> 26 #include <linux/interrupt.h> 27 #include <linux/irq.h> 28 #include <linux/scatterlist.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/of_device.h> 31 #include <linux/delay.h> 32 #include <linux/crypto.h> 33 #include <linux/cryptohash.h> 34 #include <crypto/scatterwalk.h> 35 #include <crypto/algapi.h> 36 #include <crypto/sha.h> 37 #include <crypto/hash.h> 38 #include <crypto/internal/hash.h> 39 #include <linux/platform_data/crypto-atmel.h> 40 #include "atmel-sha-regs.h" 41 #include "atmel-authenc.h" 42 43 #define ATMEL_SHA_PRIORITY 300 44 45 /* SHA flags */ 46 #define SHA_FLAGS_BUSY BIT(0) 47 #define SHA_FLAGS_FINAL BIT(1) 48 #define SHA_FLAGS_DMA_ACTIVE BIT(2) 49 #define SHA_FLAGS_OUTPUT_READY BIT(3) 50 #define SHA_FLAGS_INIT BIT(4) 51 #define SHA_FLAGS_CPU BIT(5) 52 #define SHA_FLAGS_DMA_READY BIT(6) 53 #define SHA_FLAGS_DUMP_REG BIT(7) 54 55 /* bits[11:8] are reserved. */ 56 57 #define SHA_FLAGS_FINUP BIT(16) 58 #define SHA_FLAGS_SG BIT(17) 59 #define SHA_FLAGS_ERROR BIT(23) 60 #define SHA_FLAGS_PAD BIT(24) 61 #define SHA_FLAGS_RESTORE BIT(25) 62 #define SHA_FLAGS_IDATAR0 BIT(26) 63 #define SHA_FLAGS_WAIT_DATARDY BIT(27) 64 65 #define SHA_OP_INIT 0 66 #define SHA_OP_UPDATE 1 67 #define SHA_OP_FINAL 2 68 #define SHA_OP_DIGEST 3 69 70 #define SHA_BUFFER_LEN (PAGE_SIZE / 16) 71 72 #define ATMEL_SHA_DMA_THRESHOLD 56 73 74 struct atmel_sha_caps { 75 bool has_dma; 76 bool has_dualbuff; 77 bool has_sha224; 78 bool has_sha_384_512; 79 bool has_uihv; 80 bool has_hmac; 81 }; 82 83 struct atmel_sha_dev; 84 85 /* 86 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as 87 * tested by the ahash_prepare_alg() function. 88 */ 89 struct atmel_sha_reqctx { 90 struct atmel_sha_dev *dd; 91 unsigned long flags; 92 unsigned long op; 93 94 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); 95 u64 digcnt[2]; 96 size_t bufcnt; 97 size_t buflen; 98 dma_addr_t dma_addr; 99 100 /* walk state */ 101 struct scatterlist *sg; 102 unsigned int offset; /* offset in current sg */ 103 unsigned int total; /* total request */ 104 105 size_t block_size; 106 size_t hash_size; 107 108 u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 109 }; 110 111 typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *); 112 113 struct atmel_sha_ctx { 114 struct atmel_sha_dev *dd; 115 atmel_sha_fn_t start; 116 117 unsigned long flags; 118 }; 119 120 #define ATMEL_SHA_QUEUE_LENGTH 50 121 122 struct atmel_sha_dma { 123 struct dma_chan *chan; 124 struct dma_slave_config dma_conf; 125 struct scatterlist *sg; 126 int nents; 127 unsigned int last_sg_length; 128 }; 129 130 struct atmel_sha_dev { 131 struct list_head list; 132 unsigned long phys_base; 133 struct device *dev; 134 struct clk *iclk; 135 int irq; 136 void __iomem *io_base; 137 138 spinlock_t lock; 139 struct tasklet_struct done_task; 140 struct tasklet_struct queue_task; 141 142 unsigned long flags; 143 struct crypto_queue queue; 144 struct ahash_request *req; 145 bool is_async; 146 bool force_complete; 147 atmel_sha_fn_t resume; 148 atmel_sha_fn_t cpu_transfer_complete; 149 150 struct atmel_sha_dma dma_lch_in; 151 152 struct atmel_sha_caps caps; 153 154 struct scatterlist tmp; 155 156 u32 hw_version; 157 }; 158 159 struct atmel_sha_drv { 160 struct list_head dev_list; 161 spinlock_t lock; 162 }; 163 164 static struct atmel_sha_drv atmel_sha = { 165 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), 166 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), 167 }; 168 169 #ifdef VERBOSE_DEBUG 170 static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr) 171 { 172 switch (offset) { 173 case SHA_CR: 174 return "CR"; 175 176 case SHA_MR: 177 return "MR"; 178 179 case SHA_IER: 180 return "IER"; 181 182 case SHA_IDR: 183 return "IDR"; 184 185 case SHA_IMR: 186 return "IMR"; 187 188 case SHA_ISR: 189 return "ISR"; 190 191 case SHA_MSR: 192 return "MSR"; 193 194 case SHA_BCR: 195 return "BCR"; 196 197 case SHA_REG_DIN(0): 198 case SHA_REG_DIN(1): 199 case SHA_REG_DIN(2): 200 case SHA_REG_DIN(3): 201 case SHA_REG_DIN(4): 202 case SHA_REG_DIN(5): 203 case SHA_REG_DIN(6): 204 case SHA_REG_DIN(7): 205 case SHA_REG_DIN(8): 206 case SHA_REG_DIN(9): 207 case SHA_REG_DIN(10): 208 case SHA_REG_DIN(11): 209 case SHA_REG_DIN(12): 210 case SHA_REG_DIN(13): 211 case SHA_REG_DIN(14): 212 case SHA_REG_DIN(15): 213 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2); 214 break; 215 216 case SHA_REG_DIGEST(0): 217 case SHA_REG_DIGEST(1): 218 case SHA_REG_DIGEST(2): 219 case SHA_REG_DIGEST(3): 220 case SHA_REG_DIGEST(4): 221 case SHA_REG_DIGEST(5): 222 case SHA_REG_DIGEST(6): 223 case SHA_REG_DIGEST(7): 224 case SHA_REG_DIGEST(8): 225 case SHA_REG_DIGEST(9): 226 case SHA_REG_DIGEST(10): 227 case SHA_REG_DIGEST(11): 228 case SHA_REG_DIGEST(12): 229 case SHA_REG_DIGEST(13): 230 case SHA_REG_DIGEST(14): 231 case SHA_REG_DIGEST(15): 232 if (wr) 233 snprintf(tmp, sz, "IDATAR[%u]", 234 16u + ((offset - SHA_REG_DIGEST(0)) >> 2)); 235 else 236 snprintf(tmp, sz, "ODATAR[%u]", 237 (offset - SHA_REG_DIGEST(0)) >> 2); 238 break; 239 240 case SHA_HW_VERSION: 241 return "HWVER"; 242 243 default: 244 snprintf(tmp, sz, "0x%02x", offset); 245 break; 246 } 247 248 return tmp; 249 } 250 251 #endif /* VERBOSE_DEBUG */ 252 253 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) 254 { 255 u32 value = readl_relaxed(dd->io_base + offset); 256 257 #ifdef VERBOSE_DEBUG 258 if (dd->flags & SHA_FLAGS_DUMP_REG) { 259 char tmp[16]; 260 261 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, 262 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false)); 263 } 264 #endif /* VERBOSE_DEBUG */ 265 266 return value; 267 } 268 269 static inline void atmel_sha_write(struct atmel_sha_dev *dd, 270 u32 offset, u32 value) 271 { 272 #ifdef VERBOSE_DEBUG 273 if (dd->flags & SHA_FLAGS_DUMP_REG) { 274 char tmp[16]; 275 276 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, 277 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true)); 278 } 279 #endif /* VERBOSE_DEBUG */ 280 281 writel_relaxed(value, dd->io_base + offset); 282 } 283 284 static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err) 285 { 286 struct ahash_request *req = dd->req; 287 288 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | 289 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY | 290 SHA_FLAGS_DUMP_REG); 291 292 clk_disable(dd->iclk); 293 294 if ((dd->is_async || dd->force_complete) && req->base.complete) 295 req->base.complete(&req->base, err); 296 297 /* handle new request */ 298 tasklet_schedule(&dd->queue_task); 299 300 return err; 301 } 302 303 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) 304 { 305 size_t count; 306 307 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { 308 count = min(ctx->sg->length - ctx->offset, ctx->total); 309 count = min(count, ctx->buflen - ctx->bufcnt); 310 311 if (count <= 0) { 312 /* 313 * Check if count <= 0 because the buffer is full or 314 * because the sg length is 0. In the latest case, 315 * check if there is another sg in the list, a 0 length 316 * sg doesn't necessarily mean the end of the sg list. 317 */ 318 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { 319 ctx->sg = sg_next(ctx->sg); 320 continue; 321 } else { 322 break; 323 } 324 } 325 326 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, 327 ctx->offset, count, 0); 328 329 ctx->bufcnt += count; 330 ctx->offset += count; 331 ctx->total -= count; 332 333 if (ctx->offset == ctx->sg->length) { 334 ctx->sg = sg_next(ctx->sg); 335 if (ctx->sg) 336 ctx->offset = 0; 337 else 338 ctx->total = 0; 339 } 340 } 341 342 return 0; 343 } 344 345 /* 346 * The purpose of this padding is to ensure that the padded message is a 347 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). 348 * The bit "1" is appended at the end of the message followed by 349 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or 350 * 128 bits block (SHA384/SHA512) equals to the message length in bits 351 * is appended. 352 * 353 * For SHA1/SHA224/SHA256, padlen is calculated as followed: 354 * - if message length < 56 bytes then padlen = 56 - message length 355 * - else padlen = 64 + 56 - message length 356 * 357 * For SHA384/SHA512, padlen is calculated as followed: 358 * - if message length < 112 bytes then padlen = 112 - message length 359 * - else padlen = 128 + 112 - message length 360 */ 361 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) 362 { 363 unsigned int index, padlen; 364 __be64 bits[2]; 365 u64 size[2]; 366 367 size[0] = ctx->digcnt[0]; 368 size[1] = ctx->digcnt[1]; 369 370 size[0] += ctx->bufcnt; 371 if (size[0] < ctx->bufcnt) 372 size[1]++; 373 374 size[0] += length; 375 if (size[0] < length) 376 size[1]++; 377 378 bits[1] = cpu_to_be64(size[0] << 3); 379 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); 380 381 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 382 case SHA_FLAGS_SHA384: 383 case SHA_FLAGS_SHA512: 384 index = ctx->bufcnt & 0x7f; 385 padlen = (index < 112) ? (112 - index) : ((128+112) - index); 386 *(ctx->buffer + ctx->bufcnt) = 0x80; 387 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); 388 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 389 ctx->bufcnt += padlen + 16; 390 ctx->flags |= SHA_FLAGS_PAD; 391 break; 392 393 default: 394 index = ctx->bufcnt & 0x3f; 395 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 396 *(ctx->buffer + ctx->bufcnt) = 0x80; 397 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); 398 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 399 ctx->bufcnt += padlen + 8; 400 ctx->flags |= SHA_FLAGS_PAD; 401 break; 402 } 403 } 404 405 static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx) 406 { 407 struct atmel_sha_dev *dd = NULL; 408 struct atmel_sha_dev *tmp; 409 410 spin_lock_bh(&atmel_sha.lock); 411 if (!tctx->dd) { 412 list_for_each_entry(tmp, &atmel_sha.dev_list, list) { 413 dd = tmp; 414 break; 415 } 416 tctx->dd = dd; 417 } else { 418 dd = tctx->dd; 419 } 420 421 spin_unlock_bh(&atmel_sha.lock); 422 423 return dd; 424 } 425 426 static int atmel_sha_init(struct ahash_request *req) 427 { 428 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 429 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); 430 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 431 struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx); 432 433 ctx->dd = dd; 434 435 ctx->flags = 0; 436 437 dev_dbg(dd->dev, "init: digest size: %d\n", 438 crypto_ahash_digestsize(tfm)); 439 440 switch (crypto_ahash_digestsize(tfm)) { 441 case SHA1_DIGEST_SIZE: 442 ctx->flags |= SHA_FLAGS_SHA1; 443 ctx->block_size = SHA1_BLOCK_SIZE; 444 break; 445 case SHA224_DIGEST_SIZE: 446 ctx->flags |= SHA_FLAGS_SHA224; 447 ctx->block_size = SHA224_BLOCK_SIZE; 448 break; 449 case SHA256_DIGEST_SIZE: 450 ctx->flags |= SHA_FLAGS_SHA256; 451 ctx->block_size = SHA256_BLOCK_SIZE; 452 break; 453 case SHA384_DIGEST_SIZE: 454 ctx->flags |= SHA_FLAGS_SHA384; 455 ctx->block_size = SHA384_BLOCK_SIZE; 456 break; 457 case SHA512_DIGEST_SIZE: 458 ctx->flags |= SHA_FLAGS_SHA512; 459 ctx->block_size = SHA512_BLOCK_SIZE; 460 break; 461 default: 462 return -EINVAL; 463 break; 464 } 465 466 ctx->bufcnt = 0; 467 ctx->digcnt[0] = 0; 468 ctx->digcnt[1] = 0; 469 ctx->buflen = SHA_BUFFER_LEN; 470 471 return 0; 472 } 473 474 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) 475 { 476 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 477 u32 valmr = SHA_MR_MODE_AUTO; 478 unsigned int i, hashsize = 0; 479 480 if (likely(dma)) { 481 if (!dd->caps.has_dma) 482 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); 483 valmr = SHA_MR_MODE_PDC; 484 if (dd->caps.has_dualbuff) 485 valmr |= SHA_MR_DUALBUFF; 486 } else { 487 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 488 } 489 490 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 491 case SHA_FLAGS_SHA1: 492 valmr |= SHA_MR_ALGO_SHA1; 493 hashsize = SHA1_DIGEST_SIZE; 494 break; 495 496 case SHA_FLAGS_SHA224: 497 valmr |= SHA_MR_ALGO_SHA224; 498 hashsize = SHA256_DIGEST_SIZE; 499 break; 500 501 case SHA_FLAGS_SHA256: 502 valmr |= SHA_MR_ALGO_SHA256; 503 hashsize = SHA256_DIGEST_SIZE; 504 break; 505 506 case SHA_FLAGS_SHA384: 507 valmr |= SHA_MR_ALGO_SHA384; 508 hashsize = SHA512_DIGEST_SIZE; 509 break; 510 511 case SHA_FLAGS_SHA512: 512 valmr |= SHA_MR_ALGO_SHA512; 513 hashsize = SHA512_DIGEST_SIZE; 514 break; 515 516 default: 517 break; 518 } 519 520 /* Setting CR_FIRST only for the first iteration */ 521 if (!(ctx->digcnt[0] || ctx->digcnt[1])) { 522 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 523 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { 524 const u32 *hash = (const u32 *)ctx->digest; 525 526 /* 527 * Restore the hardware context: update the User Initialize 528 * Hash Value (UIHV) with the value saved when the latest 529 * 'update' operation completed on this very same crypto 530 * request. 531 */ 532 ctx->flags &= ~SHA_FLAGS_RESTORE; 533 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 534 for (i = 0; i < hashsize / sizeof(u32); ++i) 535 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]); 536 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 537 valmr |= SHA_MR_UIHV; 538 } 539 /* 540 * WARNING: If the UIHV feature is not available, the hardware CANNOT 541 * process concurrent requests: the internal registers used to store 542 * the hash/digest are still set to the partial digest output values 543 * computed during the latest round. 544 */ 545 546 atmel_sha_write(dd, SHA_MR, valmr); 547 } 548 549 static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd, 550 atmel_sha_fn_t resume) 551 { 552 u32 isr = atmel_sha_read(dd, SHA_ISR); 553 554 if (unlikely(isr & SHA_INT_DATARDY)) 555 return resume(dd); 556 557 dd->resume = resume; 558 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 559 return -EINPROGRESS; 560 } 561 562 static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, 563 size_t length, int final) 564 { 565 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 566 int count, len32; 567 const u32 *buffer = (const u32 *)buf; 568 569 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", 570 ctx->digcnt[1], ctx->digcnt[0], length, final); 571 572 atmel_sha_write_ctrl(dd, 0); 573 574 /* should be non-zero before next lines to disable clocks later */ 575 ctx->digcnt[0] += length; 576 if (ctx->digcnt[0] < length) 577 ctx->digcnt[1]++; 578 579 if (final) 580 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 581 582 len32 = DIV_ROUND_UP(length, sizeof(u32)); 583 584 dd->flags |= SHA_FLAGS_CPU; 585 586 for (count = 0; count < len32; count++) 587 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); 588 589 return -EINPROGRESS; 590 } 591 592 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 593 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 594 { 595 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 596 int len32; 597 598 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", 599 ctx->digcnt[1], ctx->digcnt[0], length1, final); 600 601 len32 = DIV_ROUND_UP(length1, sizeof(u32)); 602 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); 603 atmel_sha_write(dd, SHA_TPR, dma_addr1); 604 atmel_sha_write(dd, SHA_TCR, len32); 605 606 len32 = DIV_ROUND_UP(length2, sizeof(u32)); 607 atmel_sha_write(dd, SHA_TNPR, dma_addr2); 608 atmel_sha_write(dd, SHA_TNCR, len32); 609 610 atmel_sha_write_ctrl(dd, 1); 611 612 /* should be non-zero before next lines to disable clocks later */ 613 ctx->digcnt[0] += length1; 614 if (ctx->digcnt[0] < length1) 615 ctx->digcnt[1]++; 616 617 if (final) 618 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 619 620 dd->flags |= SHA_FLAGS_DMA_ACTIVE; 621 622 /* Start DMA transfer */ 623 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); 624 625 return -EINPROGRESS; 626 } 627 628 static void atmel_sha_dma_callback(void *data) 629 { 630 struct atmel_sha_dev *dd = data; 631 632 dd->is_async = true; 633 634 /* dma_lch_in - completed - wait DATRDY */ 635 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 636 } 637 638 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 639 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 640 { 641 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 642 struct dma_async_tx_descriptor *in_desc; 643 struct scatterlist sg[2]; 644 645 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", 646 ctx->digcnt[1], ctx->digcnt[0], length1, final); 647 648 dd->dma_lch_in.dma_conf.src_maxburst = 16; 649 dd->dma_lch_in.dma_conf.dst_maxburst = 16; 650 651 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); 652 653 if (length2) { 654 sg_init_table(sg, 2); 655 sg_dma_address(&sg[0]) = dma_addr1; 656 sg_dma_len(&sg[0]) = length1; 657 sg_dma_address(&sg[1]) = dma_addr2; 658 sg_dma_len(&sg[1]) = length2; 659 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, 660 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 661 } else { 662 sg_init_table(sg, 1); 663 sg_dma_address(&sg[0]) = dma_addr1; 664 sg_dma_len(&sg[0]) = length1; 665 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, 666 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 667 } 668 if (!in_desc) 669 return atmel_sha_complete(dd, -EINVAL); 670 671 in_desc->callback = atmel_sha_dma_callback; 672 in_desc->callback_param = dd; 673 674 atmel_sha_write_ctrl(dd, 1); 675 676 /* should be non-zero before next lines to disable clocks later */ 677 ctx->digcnt[0] += length1; 678 if (ctx->digcnt[0] < length1) 679 ctx->digcnt[1]++; 680 681 if (final) 682 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 683 684 dd->flags |= SHA_FLAGS_DMA_ACTIVE; 685 686 /* Start DMA transfer */ 687 dmaengine_submit(in_desc); 688 dma_async_issue_pending(dd->dma_lch_in.chan); 689 690 return -EINPROGRESS; 691 } 692 693 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 694 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 695 { 696 if (dd->caps.has_dma) 697 return atmel_sha_xmit_dma(dd, dma_addr1, length1, 698 dma_addr2, length2, final); 699 else 700 return atmel_sha_xmit_pdc(dd, dma_addr1, length1, 701 dma_addr2, length2, final); 702 } 703 704 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) 705 { 706 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 707 int bufcnt; 708 709 atmel_sha_append_sg(ctx); 710 atmel_sha_fill_padding(ctx, 0); 711 bufcnt = ctx->bufcnt; 712 ctx->bufcnt = 0; 713 714 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); 715 } 716 717 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, 718 struct atmel_sha_reqctx *ctx, 719 size_t length, int final) 720 { 721 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, 722 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 723 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 724 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen + 725 ctx->block_size); 726 return atmel_sha_complete(dd, -EINVAL); 727 } 728 729 ctx->flags &= ~SHA_FLAGS_SG; 730 731 /* next call does not fail... so no unmap in the case of error */ 732 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); 733 } 734 735 static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) 736 { 737 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 738 unsigned int final; 739 size_t count; 740 741 atmel_sha_append_sg(ctx); 742 743 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 744 745 dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n", 746 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); 747 748 if (final) 749 atmel_sha_fill_padding(ctx, 0); 750 751 if (final || (ctx->bufcnt == ctx->buflen)) { 752 count = ctx->bufcnt; 753 ctx->bufcnt = 0; 754 return atmel_sha_xmit_dma_map(dd, ctx, count, final); 755 } 756 757 return 0; 758 } 759 760 static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) 761 { 762 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 763 unsigned int length, final, tail; 764 struct scatterlist *sg; 765 unsigned int count; 766 767 if (!ctx->total) 768 return 0; 769 770 if (ctx->bufcnt || ctx->offset) 771 return atmel_sha_update_dma_slow(dd); 772 773 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n", 774 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); 775 776 sg = ctx->sg; 777 778 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 779 return atmel_sha_update_dma_slow(dd); 780 781 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) 782 /* size is not ctx->block_size aligned */ 783 return atmel_sha_update_dma_slow(dd); 784 785 length = min(ctx->total, sg->length); 786 787 if (sg_is_last(sg)) { 788 if (!(ctx->flags & SHA_FLAGS_FINUP)) { 789 /* not last sg must be ctx->block_size aligned */ 790 tail = length & (ctx->block_size - 1); 791 length -= tail; 792 } 793 } 794 795 ctx->total -= length; 796 ctx->offset = length; /* offset where to start slow */ 797 798 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 799 800 /* Add padding */ 801 if (final) { 802 tail = length & (ctx->block_size - 1); 803 length -= tail; 804 ctx->total += tail; 805 ctx->offset = length; /* offset where to start slow */ 806 807 sg = ctx->sg; 808 atmel_sha_append_sg(ctx); 809 810 atmel_sha_fill_padding(ctx, length); 811 812 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, 813 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 814 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 815 dev_err(dd->dev, "dma %zu bytes error\n", 816 ctx->buflen + ctx->block_size); 817 return atmel_sha_complete(dd, -EINVAL); 818 } 819 820 if (length == 0) { 821 ctx->flags &= ~SHA_FLAGS_SG; 822 count = ctx->bufcnt; 823 ctx->bufcnt = 0; 824 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, 825 0, final); 826 } else { 827 ctx->sg = sg; 828 if (!dma_map_sg(dd->dev, ctx->sg, 1, 829 DMA_TO_DEVICE)) { 830 dev_err(dd->dev, "dma_map_sg error\n"); 831 return atmel_sha_complete(dd, -EINVAL); 832 } 833 834 ctx->flags |= SHA_FLAGS_SG; 835 836 count = ctx->bufcnt; 837 ctx->bufcnt = 0; 838 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), 839 length, ctx->dma_addr, count, final); 840 } 841 } 842 843 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 844 dev_err(dd->dev, "dma_map_sg error\n"); 845 return atmel_sha_complete(dd, -EINVAL); 846 } 847 848 ctx->flags |= SHA_FLAGS_SG; 849 850 /* next call does not fail... so no unmap in the case of error */ 851 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, 852 0, final); 853 } 854 855 static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) 856 { 857 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 858 859 if (ctx->flags & SHA_FLAGS_SG) { 860 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 861 if (ctx->sg->length == ctx->offset) { 862 ctx->sg = sg_next(ctx->sg); 863 if (ctx->sg) 864 ctx->offset = 0; 865 } 866 if (ctx->flags & SHA_FLAGS_PAD) { 867 dma_unmap_single(dd->dev, ctx->dma_addr, 868 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 869 } 870 } else { 871 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + 872 ctx->block_size, DMA_TO_DEVICE); 873 } 874 } 875 876 static int atmel_sha_update_req(struct atmel_sha_dev *dd) 877 { 878 struct ahash_request *req = dd->req; 879 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 880 int err; 881 882 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", 883 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); 884 885 if (ctx->flags & SHA_FLAGS_CPU) 886 err = atmel_sha_update_cpu(dd); 887 else 888 err = atmel_sha_update_dma_start(dd); 889 890 /* wait for dma completion before can take more data */ 891 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", 892 err, ctx->digcnt[1], ctx->digcnt[0]); 893 894 return err; 895 } 896 897 static int atmel_sha_final_req(struct atmel_sha_dev *dd) 898 { 899 struct ahash_request *req = dd->req; 900 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 901 int err = 0; 902 int count; 903 904 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { 905 atmel_sha_fill_padding(ctx, 0); 906 count = ctx->bufcnt; 907 ctx->bufcnt = 0; 908 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); 909 } 910 /* faster to handle last block with cpu */ 911 else { 912 atmel_sha_fill_padding(ctx, 0); 913 count = ctx->bufcnt; 914 ctx->bufcnt = 0; 915 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); 916 } 917 918 dev_dbg(dd->dev, "final_req: err: %d\n", err); 919 920 return err; 921 } 922 923 static void atmel_sha_copy_hash(struct ahash_request *req) 924 { 925 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 926 u32 *hash = (u32 *)ctx->digest; 927 unsigned int i, hashsize; 928 929 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 930 case SHA_FLAGS_SHA1: 931 hashsize = SHA1_DIGEST_SIZE; 932 break; 933 934 case SHA_FLAGS_SHA224: 935 case SHA_FLAGS_SHA256: 936 hashsize = SHA256_DIGEST_SIZE; 937 break; 938 939 case SHA_FLAGS_SHA384: 940 case SHA_FLAGS_SHA512: 941 hashsize = SHA512_DIGEST_SIZE; 942 break; 943 944 default: 945 /* Should not happen... */ 946 return; 947 } 948 949 for (i = 0; i < hashsize / sizeof(u32); ++i) 950 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); 951 ctx->flags |= SHA_FLAGS_RESTORE; 952 } 953 954 static void atmel_sha_copy_ready_hash(struct ahash_request *req) 955 { 956 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 957 958 if (!req->result) 959 return; 960 961 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 962 default: 963 case SHA_FLAGS_SHA1: 964 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); 965 break; 966 967 case SHA_FLAGS_SHA224: 968 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); 969 break; 970 971 case SHA_FLAGS_SHA256: 972 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); 973 break; 974 975 case SHA_FLAGS_SHA384: 976 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); 977 break; 978 979 case SHA_FLAGS_SHA512: 980 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); 981 break; 982 } 983 } 984 985 static int atmel_sha_finish(struct ahash_request *req) 986 { 987 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 988 struct atmel_sha_dev *dd = ctx->dd; 989 990 if (ctx->digcnt[0] || ctx->digcnt[1]) 991 atmel_sha_copy_ready_hash(req); 992 993 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1], 994 ctx->digcnt[0], ctx->bufcnt); 995 996 return 0; 997 } 998 999 static void atmel_sha_finish_req(struct ahash_request *req, int err) 1000 { 1001 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1002 struct atmel_sha_dev *dd = ctx->dd; 1003 1004 if (!err) { 1005 atmel_sha_copy_hash(req); 1006 if (SHA_FLAGS_FINAL & dd->flags) 1007 err = atmel_sha_finish(req); 1008 } else { 1009 ctx->flags |= SHA_FLAGS_ERROR; 1010 } 1011 1012 /* atomic operation is not needed here */ 1013 (void)atmel_sha_complete(dd, err); 1014 } 1015 1016 static int atmel_sha_hw_init(struct atmel_sha_dev *dd) 1017 { 1018 int err; 1019 1020 err = clk_enable(dd->iclk); 1021 if (err) 1022 return err; 1023 1024 if (!(SHA_FLAGS_INIT & dd->flags)) { 1025 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); 1026 dd->flags |= SHA_FLAGS_INIT; 1027 } 1028 1029 return 0; 1030 } 1031 1032 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) 1033 { 1034 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; 1035 } 1036 1037 static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd) 1038 { 1039 int err; 1040 1041 err = atmel_sha_hw_init(dd); 1042 if (err) 1043 return err; 1044 1045 dd->hw_version = atmel_sha_get_version(dd); 1046 1047 dev_info(dd->dev, 1048 "version: 0x%x\n", dd->hw_version); 1049 1050 clk_disable(dd->iclk); 1051 1052 return 0; 1053 } 1054 1055 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, 1056 struct ahash_request *req) 1057 { 1058 struct crypto_async_request *async_req, *backlog; 1059 struct atmel_sha_ctx *ctx; 1060 unsigned long flags; 1061 bool start_async; 1062 int err = 0, ret = 0; 1063 1064 spin_lock_irqsave(&dd->lock, flags); 1065 if (req) 1066 ret = ahash_enqueue_request(&dd->queue, req); 1067 1068 if (SHA_FLAGS_BUSY & dd->flags) { 1069 spin_unlock_irqrestore(&dd->lock, flags); 1070 return ret; 1071 } 1072 1073 backlog = crypto_get_backlog(&dd->queue); 1074 async_req = crypto_dequeue_request(&dd->queue); 1075 if (async_req) 1076 dd->flags |= SHA_FLAGS_BUSY; 1077 1078 spin_unlock_irqrestore(&dd->lock, flags); 1079 1080 if (!async_req) 1081 return ret; 1082 1083 if (backlog) 1084 backlog->complete(backlog, -EINPROGRESS); 1085 1086 ctx = crypto_tfm_ctx(async_req->tfm); 1087 1088 dd->req = ahash_request_cast(async_req); 1089 start_async = (dd->req != req); 1090 dd->is_async = start_async; 1091 dd->force_complete = false; 1092 1093 /* WARNING: ctx->start() MAY change dd->is_async. */ 1094 err = ctx->start(dd); 1095 return (start_async) ? ret : err; 1096 } 1097 1098 static int atmel_sha_done(struct atmel_sha_dev *dd); 1099 1100 static int atmel_sha_start(struct atmel_sha_dev *dd) 1101 { 1102 struct ahash_request *req = dd->req; 1103 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1104 int err; 1105 1106 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 1107 ctx->op, req->nbytes); 1108 1109 err = atmel_sha_hw_init(dd); 1110 if (err) 1111 return atmel_sha_complete(dd, err); 1112 1113 /* 1114 * atmel_sha_update_req() and atmel_sha_final_req() can return either: 1115 * -EINPROGRESS: the hardware is busy and the SHA driver will resume 1116 * its job later in the done_task. 1117 * This is the main path. 1118 * 1119 * 0: the SHA driver can continue its job then release the hardware 1120 * later, if needed, with atmel_sha_finish_req(). 1121 * This is the alternate path. 1122 * 1123 * < 0: an error has occurred so atmel_sha_complete(dd, err) has already 1124 * been called, hence the hardware has been released. 1125 * The SHA driver must stop its job without calling 1126 * atmel_sha_finish_req(), otherwise atmel_sha_complete() would be 1127 * called a second time. 1128 * 1129 * Please note that currently, atmel_sha_final_req() never returns 0. 1130 */ 1131 1132 dd->resume = atmel_sha_done; 1133 if (ctx->op == SHA_OP_UPDATE) { 1134 err = atmel_sha_update_req(dd); 1135 if (!err && (ctx->flags & SHA_FLAGS_FINUP)) 1136 /* no final() after finup() */ 1137 err = atmel_sha_final_req(dd); 1138 } else if (ctx->op == SHA_OP_FINAL) { 1139 err = atmel_sha_final_req(dd); 1140 } 1141 1142 if (!err) 1143 /* done_task will not finish it, so do it here */ 1144 atmel_sha_finish_req(req, err); 1145 1146 dev_dbg(dd->dev, "exit, err: %d\n", err); 1147 1148 return err; 1149 } 1150 1151 static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) 1152 { 1153 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1154 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 1155 struct atmel_sha_dev *dd = tctx->dd; 1156 1157 ctx->op = op; 1158 1159 return atmel_sha_handle_queue(dd, req); 1160 } 1161 1162 static int atmel_sha_update(struct ahash_request *req) 1163 { 1164 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1165 1166 if (!req->nbytes) 1167 return 0; 1168 1169 ctx->total = req->nbytes; 1170 ctx->sg = req->src; 1171 ctx->offset = 0; 1172 1173 if (ctx->flags & SHA_FLAGS_FINUP) { 1174 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) 1175 /* faster to use CPU for short transfers */ 1176 ctx->flags |= SHA_FLAGS_CPU; 1177 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { 1178 atmel_sha_append_sg(ctx); 1179 return 0; 1180 } 1181 return atmel_sha_enqueue(req, SHA_OP_UPDATE); 1182 } 1183 1184 static int atmel_sha_final(struct ahash_request *req) 1185 { 1186 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1187 1188 ctx->flags |= SHA_FLAGS_FINUP; 1189 1190 if (ctx->flags & SHA_FLAGS_ERROR) 1191 return 0; /* uncompleted hash is not needed */ 1192 1193 if (ctx->flags & SHA_FLAGS_PAD) 1194 /* copy ready hash (+ finalize hmac) */ 1195 return atmel_sha_finish(req); 1196 1197 return atmel_sha_enqueue(req, SHA_OP_FINAL); 1198 } 1199 1200 static int atmel_sha_finup(struct ahash_request *req) 1201 { 1202 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1203 int err1, err2; 1204 1205 ctx->flags |= SHA_FLAGS_FINUP; 1206 1207 err1 = atmel_sha_update(req); 1208 if (err1 == -EINPROGRESS || 1209 (err1 == -EBUSY && (ahash_request_flags(req) & 1210 CRYPTO_TFM_REQ_MAY_BACKLOG))) 1211 return err1; 1212 1213 /* 1214 * final() has to be always called to cleanup resources 1215 * even if udpate() failed, except EINPROGRESS 1216 */ 1217 err2 = atmel_sha_final(req); 1218 1219 return err1 ?: err2; 1220 } 1221 1222 static int atmel_sha_digest(struct ahash_request *req) 1223 { 1224 return atmel_sha_init(req) ?: atmel_sha_finup(req); 1225 } 1226 1227 1228 static int atmel_sha_export(struct ahash_request *req, void *out) 1229 { 1230 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1231 1232 memcpy(out, ctx, sizeof(*ctx)); 1233 return 0; 1234 } 1235 1236 static int atmel_sha_import(struct ahash_request *req, const void *in) 1237 { 1238 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1239 1240 memcpy(ctx, in, sizeof(*ctx)); 1241 return 0; 1242 } 1243 1244 static int atmel_sha_cra_init(struct crypto_tfm *tfm) 1245 { 1246 struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm); 1247 1248 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1249 sizeof(struct atmel_sha_reqctx)); 1250 ctx->start = atmel_sha_start; 1251 1252 return 0; 1253 } 1254 1255 static struct ahash_alg sha_1_256_algs[] = { 1256 { 1257 .init = atmel_sha_init, 1258 .update = atmel_sha_update, 1259 .final = atmel_sha_final, 1260 .finup = atmel_sha_finup, 1261 .digest = atmel_sha_digest, 1262 .export = atmel_sha_export, 1263 .import = atmel_sha_import, 1264 .halg = { 1265 .digestsize = SHA1_DIGEST_SIZE, 1266 .statesize = sizeof(struct atmel_sha_reqctx), 1267 .base = { 1268 .cra_name = "sha1", 1269 .cra_driver_name = "atmel-sha1", 1270 .cra_priority = ATMEL_SHA_PRIORITY, 1271 .cra_flags = CRYPTO_ALG_ASYNC, 1272 .cra_blocksize = SHA1_BLOCK_SIZE, 1273 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1274 .cra_alignmask = 0, 1275 .cra_module = THIS_MODULE, 1276 .cra_init = atmel_sha_cra_init, 1277 } 1278 } 1279 }, 1280 { 1281 .init = atmel_sha_init, 1282 .update = atmel_sha_update, 1283 .final = atmel_sha_final, 1284 .finup = atmel_sha_finup, 1285 .digest = atmel_sha_digest, 1286 .export = atmel_sha_export, 1287 .import = atmel_sha_import, 1288 .halg = { 1289 .digestsize = SHA256_DIGEST_SIZE, 1290 .statesize = sizeof(struct atmel_sha_reqctx), 1291 .base = { 1292 .cra_name = "sha256", 1293 .cra_driver_name = "atmel-sha256", 1294 .cra_priority = ATMEL_SHA_PRIORITY, 1295 .cra_flags = CRYPTO_ALG_ASYNC, 1296 .cra_blocksize = SHA256_BLOCK_SIZE, 1297 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1298 .cra_alignmask = 0, 1299 .cra_module = THIS_MODULE, 1300 .cra_init = atmel_sha_cra_init, 1301 } 1302 } 1303 }, 1304 }; 1305 1306 static struct ahash_alg sha_224_alg = { 1307 .init = atmel_sha_init, 1308 .update = atmel_sha_update, 1309 .final = atmel_sha_final, 1310 .finup = atmel_sha_finup, 1311 .digest = atmel_sha_digest, 1312 .export = atmel_sha_export, 1313 .import = atmel_sha_import, 1314 .halg = { 1315 .digestsize = SHA224_DIGEST_SIZE, 1316 .statesize = sizeof(struct atmel_sha_reqctx), 1317 .base = { 1318 .cra_name = "sha224", 1319 .cra_driver_name = "atmel-sha224", 1320 .cra_priority = ATMEL_SHA_PRIORITY, 1321 .cra_flags = CRYPTO_ALG_ASYNC, 1322 .cra_blocksize = SHA224_BLOCK_SIZE, 1323 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1324 .cra_alignmask = 0, 1325 .cra_module = THIS_MODULE, 1326 .cra_init = atmel_sha_cra_init, 1327 } 1328 } 1329 }; 1330 1331 static struct ahash_alg sha_384_512_algs[] = { 1332 { 1333 .init = atmel_sha_init, 1334 .update = atmel_sha_update, 1335 .final = atmel_sha_final, 1336 .finup = atmel_sha_finup, 1337 .digest = atmel_sha_digest, 1338 .export = atmel_sha_export, 1339 .import = atmel_sha_import, 1340 .halg = { 1341 .digestsize = SHA384_DIGEST_SIZE, 1342 .statesize = sizeof(struct atmel_sha_reqctx), 1343 .base = { 1344 .cra_name = "sha384", 1345 .cra_driver_name = "atmel-sha384", 1346 .cra_priority = ATMEL_SHA_PRIORITY, 1347 .cra_flags = CRYPTO_ALG_ASYNC, 1348 .cra_blocksize = SHA384_BLOCK_SIZE, 1349 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1350 .cra_alignmask = 0x3, 1351 .cra_module = THIS_MODULE, 1352 .cra_init = atmel_sha_cra_init, 1353 } 1354 } 1355 }, 1356 { 1357 .init = atmel_sha_init, 1358 .update = atmel_sha_update, 1359 .final = atmel_sha_final, 1360 .finup = atmel_sha_finup, 1361 .digest = atmel_sha_digest, 1362 .export = atmel_sha_export, 1363 .import = atmel_sha_import, 1364 .halg = { 1365 .digestsize = SHA512_DIGEST_SIZE, 1366 .statesize = sizeof(struct atmel_sha_reqctx), 1367 .base = { 1368 .cra_name = "sha512", 1369 .cra_driver_name = "atmel-sha512", 1370 .cra_priority = ATMEL_SHA_PRIORITY, 1371 .cra_flags = CRYPTO_ALG_ASYNC, 1372 .cra_blocksize = SHA512_BLOCK_SIZE, 1373 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1374 .cra_alignmask = 0x3, 1375 .cra_module = THIS_MODULE, 1376 .cra_init = atmel_sha_cra_init, 1377 } 1378 } 1379 }, 1380 }; 1381 1382 static void atmel_sha_queue_task(unsigned long data) 1383 { 1384 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; 1385 1386 atmel_sha_handle_queue(dd, NULL); 1387 } 1388 1389 static int atmel_sha_done(struct atmel_sha_dev *dd) 1390 { 1391 int err = 0; 1392 1393 if (SHA_FLAGS_CPU & dd->flags) { 1394 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { 1395 dd->flags &= ~SHA_FLAGS_OUTPUT_READY; 1396 goto finish; 1397 } 1398 } else if (SHA_FLAGS_DMA_READY & dd->flags) { 1399 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { 1400 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; 1401 atmel_sha_update_dma_stop(dd); 1402 } 1403 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { 1404 /* hash or semi-hash ready */ 1405 dd->flags &= ~(SHA_FLAGS_DMA_READY | 1406 SHA_FLAGS_OUTPUT_READY); 1407 err = atmel_sha_update_dma_start(dd); 1408 if (err != -EINPROGRESS) 1409 goto finish; 1410 } 1411 } 1412 return err; 1413 1414 finish: 1415 /* finish curent request */ 1416 atmel_sha_finish_req(dd->req, err); 1417 1418 return err; 1419 } 1420 1421 static void atmel_sha_done_task(unsigned long data) 1422 { 1423 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; 1424 1425 dd->is_async = true; 1426 (void)dd->resume(dd); 1427 } 1428 1429 static irqreturn_t atmel_sha_irq(int irq, void *dev_id) 1430 { 1431 struct atmel_sha_dev *sha_dd = dev_id; 1432 u32 reg; 1433 1434 reg = atmel_sha_read(sha_dd, SHA_ISR); 1435 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { 1436 atmel_sha_write(sha_dd, SHA_IDR, reg); 1437 if (SHA_FLAGS_BUSY & sha_dd->flags) { 1438 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; 1439 if (!(SHA_FLAGS_CPU & sha_dd->flags)) 1440 sha_dd->flags |= SHA_FLAGS_DMA_READY; 1441 tasklet_schedule(&sha_dd->done_task); 1442 } else { 1443 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); 1444 } 1445 return IRQ_HANDLED; 1446 } 1447 1448 return IRQ_NONE; 1449 } 1450 1451 1452 /* DMA transfer functions */ 1453 1454 static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd, 1455 struct scatterlist *sg, 1456 size_t len) 1457 { 1458 struct atmel_sha_dma *dma = &dd->dma_lch_in; 1459 struct ahash_request *req = dd->req; 1460 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1461 size_t bs = ctx->block_size; 1462 int nents; 1463 1464 for (nents = 0; sg; sg = sg_next(sg), ++nents) { 1465 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 1466 return false; 1467 1468 /* 1469 * This is the last sg, the only one that is allowed to 1470 * have an unaligned length. 1471 */ 1472 if (len <= sg->length) { 1473 dma->nents = nents + 1; 1474 dma->last_sg_length = sg->length; 1475 sg->length = ALIGN(len, sizeof(u32)); 1476 return true; 1477 } 1478 1479 /* All other sg lengths MUST be aligned to the block size. */ 1480 if (!IS_ALIGNED(sg->length, bs)) 1481 return false; 1482 1483 len -= sg->length; 1484 } 1485 1486 return false; 1487 } 1488 1489 static void atmel_sha_dma_callback2(void *data) 1490 { 1491 struct atmel_sha_dev *dd = data; 1492 struct atmel_sha_dma *dma = &dd->dma_lch_in; 1493 struct scatterlist *sg; 1494 int nents; 1495 1496 dmaengine_terminate_all(dma->chan); 1497 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); 1498 1499 sg = dma->sg; 1500 for (nents = 0; nents < dma->nents - 1; ++nents) 1501 sg = sg_next(sg); 1502 sg->length = dma->last_sg_length; 1503 1504 dd->is_async = true; 1505 (void)atmel_sha_wait_for_data_ready(dd, dd->resume); 1506 } 1507 1508 static int atmel_sha_dma_start(struct atmel_sha_dev *dd, 1509 struct scatterlist *src, 1510 size_t len, 1511 atmel_sha_fn_t resume) 1512 { 1513 struct atmel_sha_dma *dma = &dd->dma_lch_in; 1514 struct dma_slave_config *config = &dma->dma_conf; 1515 struct dma_chan *chan = dma->chan; 1516 struct dma_async_tx_descriptor *desc; 1517 dma_cookie_t cookie; 1518 unsigned int sg_len; 1519 int err; 1520 1521 dd->resume = resume; 1522 1523 /* 1524 * dma->nents has already been initialized by 1525 * atmel_sha_dma_check_aligned(). 1526 */ 1527 dma->sg = src; 1528 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); 1529 if (!sg_len) { 1530 err = -ENOMEM; 1531 goto exit; 1532 } 1533 1534 config->src_maxburst = 16; 1535 config->dst_maxburst = 16; 1536 err = dmaengine_slave_config(chan, config); 1537 if (err) 1538 goto unmap_sg; 1539 1540 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV, 1541 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1542 if (!desc) { 1543 err = -ENOMEM; 1544 goto unmap_sg; 1545 } 1546 1547 desc->callback = atmel_sha_dma_callback2; 1548 desc->callback_param = dd; 1549 cookie = dmaengine_submit(desc); 1550 err = dma_submit_error(cookie); 1551 if (err) 1552 goto unmap_sg; 1553 1554 dma_async_issue_pending(chan); 1555 1556 return -EINPROGRESS; 1557 1558 unmap_sg: 1559 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); 1560 exit: 1561 return atmel_sha_complete(dd, err); 1562 } 1563 1564 1565 /* CPU transfer functions */ 1566 1567 static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd) 1568 { 1569 struct ahash_request *req = dd->req; 1570 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1571 const u32 *words = (const u32 *)ctx->buffer; 1572 size_t i, num_words; 1573 u32 isr, din, din_inc; 1574 1575 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1; 1576 for (;;) { 1577 /* Write data into the Input Data Registers. */ 1578 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32)); 1579 for (i = 0, din = 0; i < num_words; ++i, din += din_inc) 1580 atmel_sha_write(dd, SHA_REG_DIN(din), words[i]); 1581 1582 ctx->offset += ctx->bufcnt; 1583 ctx->total -= ctx->bufcnt; 1584 1585 if (!ctx->total) 1586 break; 1587 1588 /* 1589 * Prepare next block: 1590 * Fill ctx->buffer now with the next data to be written into 1591 * IDATARx: it gives time for the SHA hardware to process 1592 * the current data so the SHA_INT_DATARDY flag might be set 1593 * in SHA_ISR when polling this register at the beginning of 1594 * the next loop. 1595 */ 1596 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); 1597 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, 1598 ctx->offset, ctx->bufcnt, 0); 1599 1600 /* Wait for hardware to be ready again. */ 1601 isr = atmel_sha_read(dd, SHA_ISR); 1602 if (!(isr & SHA_INT_DATARDY)) { 1603 /* Not ready yet. */ 1604 dd->resume = atmel_sha_cpu_transfer; 1605 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 1606 return -EINPROGRESS; 1607 } 1608 } 1609 1610 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY))) 1611 return dd->cpu_transfer_complete(dd); 1612 1613 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete); 1614 } 1615 1616 static int atmel_sha_cpu_start(struct atmel_sha_dev *dd, 1617 struct scatterlist *sg, 1618 unsigned int len, 1619 bool idatar0_only, 1620 bool wait_data_ready, 1621 atmel_sha_fn_t resume) 1622 { 1623 struct ahash_request *req = dd->req; 1624 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1625 1626 if (!len) 1627 return resume(dd); 1628 1629 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY); 1630 1631 if (idatar0_only) 1632 ctx->flags |= SHA_FLAGS_IDATAR0; 1633 1634 if (wait_data_ready) 1635 ctx->flags |= SHA_FLAGS_WAIT_DATARDY; 1636 1637 ctx->sg = sg; 1638 ctx->total = len; 1639 ctx->offset = 0; 1640 1641 /* Prepare the first block to be written. */ 1642 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); 1643 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, 1644 ctx->offset, ctx->bufcnt, 0); 1645 1646 dd->cpu_transfer_complete = resume; 1647 return atmel_sha_cpu_transfer(dd); 1648 } 1649 1650 static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd, 1651 const void *data, unsigned int datalen, 1652 bool auto_padding, 1653 atmel_sha_fn_t resume) 1654 { 1655 struct ahash_request *req = dd->req; 1656 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1657 u32 msglen = (auto_padding) ? datalen : 0; 1658 u32 mr = SHA_MR_MODE_AUTO; 1659 1660 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding)) 1661 return atmel_sha_complete(dd, -EINVAL); 1662 1663 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); 1664 atmel_sha_write(dd, SHA_MR, mr); 1665 atmel_sha_write(dd, SHA_MSR, msglen); 1666 atmel_sha_write(dd, SHA_BCR, msglen); 1667 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 1668 1669 sg_init_one(&dd->tmp, data, datalen); 1670 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume); 1671 } 1672 1673 1674 /* hmac functions */ 1675 1676 struct atmel_sha_hmac_key { 1677 bool valid; 1678 unsigned int keylen; 1679 u8 buffer[SHA512_BLOCK_SIZE]; 1680 u8 *keydup; 1681 }; 1682 1683 static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey) 1684 { 1685 memset(hkey, 0, sizeof(*hkey)); 1686 } 1687 1688 static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey) 1689 { 1690 kfree(hkey->keydup); 1691 memset(hkey, 0, sizeof(*hkey)); 1692 } 1693 1694 static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey, 1695 const u8 *key, 1696 unsigned int keylen) 1697 { 1698 atmel_sha_hmac_key_release(hkey); 1699 1700 if (keylen > sizeof(hkey->buffer)) { 1701 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL); 1702 if (!hkey->keydup) 1703 return -ENOMEM; 1704 1705 } else { 1706 memcpy(hkey->buffer, key, keylen); 1707 } 1708 1709 hkey->valid = true; 1710 hkey->keylen = keylen; 1711 return 0; 1712 } 1713 1714 static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey, 1715 const u8 **key, 1716 unsigned int *keylen) 1717 { 1718 if (!hkey->valid) 1719 return false; 1720 1721 *keylen = hkey->keylen; 1722 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer; 1723 return true; 1724 } 1725 1726 1727 struct atmel_sha_hmac_ctx { 1728 struct atmel_sha_ctx base; 1729 1730 struct atmel_sha_hmac_key hkey; 1731 u32 ipad[SHA512_BLOCK_SIZE / sizeof(u32)]; 1732 u32 opad[SHA512_BLOCK_SIZE / sizeof(u32)]; 1733 atmel_sha_fn_t resume; 1734 }; 1735 1736 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd, 1737 atmel_sha_fn_t resume); 1738 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd, 1739 const u8 *key, unsigned int keylen); 1740 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd); 1741 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd); 1742 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd); 1743 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd); 1744 1745 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd); 1746 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd); 1747 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd); 1748 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd); 1749 1750 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd, 1751 atmel_sha_fn_t resume) 1752 { 1753 struct ahash_request *req = dd->req; 1754 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1755 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1756 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1757 unsigned int keylen; 1758 const u8 *key; 1759 size_t bs; 1760 1761 hmac->resume = resume; 1762 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 1763 case SHA_FLAGS_SHA1: 1764 ctx->block_size = SHA1_BLOCK_SIZE; 1765 ctx->hash_size = SHA1_DIGEST_SIZE; 1766 break; 1767 1768 case SHA_FLAGS_SHA224: 1769 ctx->block_size = SHA224_BLOCK_SIZE; 1770 ctx->hash_size = SHA256_DIGEST_SIZE; 1771 break; 1772 1773 case SHA_FLAGS_SHA256: 1774 ctx->block_size = SHA256_BLOCK_SIZE; 1775 ctx->hash_size = SHA256_DIGEST_SIZE; 1776 break; 1777 1778 case SHA_FLAGS_SHA384: 1779 ctx->block_size = SHA384_BLOCK_SIZE; 1780 ctx->hash_size = SHA512_DIGEST_SIZE; 1781 break; 1782 1783 case SHA_FLAGS_SHA512: 1784 ctx->block_size = SHA512_BLOCK_SIZE; 1785 ctx->hash_size = SHA512_DIGEST_SIZE; 1786 break; 1787 1788 default: 1789 return atmel_sha_complete(dd, -EINVAL); 1790 } 1791 bs = ctx->block_size; 1792 1793 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen))) 1794 return resume(dd); 1795 1796 /* Compute K' from K. */ 1797 if (unlikely(keylen > bs)) 1798 return atmel_sha_hmac_prehash_key(dd, key, keylen); 1799 1800 /* Prepare ipad. */ 1801 memcpy((u8 *)hmac->ipad, key, keylen); 1802 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen); 1803 return atmel_sha_hmac_compute_ipad_hash(dd); 1804 } 1805 1806 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd, 1807 const u8 *key, unsigned int keylen) 1808 { 1809 return atmel_sha_cpu_hash(dd, key, keylen, true, 1810 atmel_sha_hmac_prehash_key_done); 1811 } 1812 1813 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd) 1814 { 1815 struct ahash_request *req = dd->req; 1816 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1817 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1818 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1819 size_t ds = crypto_ahash_digestsize(tfm); 1820 size_t bs = ctx->block_size; 1821 size_t i, num_words = ds / sizeof(u32); 1822 1823 /* Prepare ipad. */ 1824 for (i = 0; i < num_words; ++i) 1825 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1826 memset((u8 *)hmac->ipad + ds, 0, bs - ds); 1827 return atmel_sha_hmac_compute_ipad_hash(dd); 1828 } 1829 1830 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd) 1831 { 1832 struct ahash_request *req = dd->req; 1833 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1834 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1835 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1836 size_t bs = ctx->block_size; 1837 size_t i, num_words = bs / sizeof(u32); 1838 1839 memcpy(hmac->opad, hmac->ipad, bs); 1840 for (i = 0; i < num_words; ++i) { 1841 hmac->ipad[i] ^= 0x36363636; 1842 hmac->opad[i] ^= 0x5c5c5c5c; 1843 } 1844 1845 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false, 1846 atmel_sha_hmac_compute_opad_hash); 1847 } 1848 1849 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd) 1850 { 1851 struct ahash_request *req = dd->req; 1852 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1853 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1854 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1855 size_t bs = ctx->block_size; 1856 size_t hs = ctx->hash_size; 1857 size_t i, num_words = hs / sizeof(u32); 1858 1859 for (i = 0; i < num_words; ++i) 1860 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1861 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false, 1862 atmel_sha_hmac_setup_done); 1863 } 1864 1865 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd) 1866 { 1867 struct ahash_request *req = dd->req; 1868 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1869 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1870 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1871 size_t hs = ctx->hash_size; 1872 size_t i, num_words = hs / sizeof(u32); 1873 1874 for (i = 0; i < num_words; ++i) 1875 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1876 atmel_sha_hmac_key_release(&hmac->hkey); 1877 return hmac->resume(dd); 1878 } 1879 1880 static int atmel_sha_hmac_start(struct atmel_sha_dev *dd) 1881 { 1882 struct ahash_request *req = dd->req; 1883 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1884 int err; 1885 1886 err = atmel_sha_hw_init(dd); 1887 if (err) 1888 return atmel_sha_complete(dd, err); 1889 1890 switch (ctx->op) { 1891 case SHA_OP_INIT: 1892 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done); 1893 break; 1894 1895 case SHA_OP_UPDATE: 1896 dd->resume = atmel_sha_done; 1897 err = atmel_sha_update_req(dd); 1898 break; 1899 1900 case SHA_OP_FINAL: 1901 dd->resume = atmel_sha_hmac_final; 1902 err = atmel_sha_final_req(dd); 1903 break; 1904 1905 case SHA_OP_DIGEST: 1906 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2); 1907 break; 1908 1909 default: 1910 return atmel_sha_complete(dd, -EINVAL); 1911 } 1912 1913 return err; 1914 } 1915 1916 static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, 1917 unsigned int keylen) 1918 { 1919 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1920 1921 if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) { 1922 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1923 return -EINVAL; 1924 } 1925 1926 return 0; 1927 } 1928 1929 static int atmel_sha_hmac_init(struct ahash_request *req) 1930 { 1931 int err; 1932 1933 err = atmel_sha_init(req); 1934 if (err) 1935 return err; 1936 1937 return atmel_sha_enqueue(req, SHA_OP_INIT); 1938 } 1939 1940 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd) 1941 { 1942 struct ahash_request *req = dd->req; 1943 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1944 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1945 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1946 size_t bs = ctx->block_size; 1947 size_t hs = ctx->hash_size; 1948 1949 ctx->bufcnt = 0; 1950 ctx->digcnt[0] = bs; 1951 ctx->digcnt[1] = 0; 1952 ctx->flags |= SHA_FLAGS_RESTORE; 1953 memcpy(ctx->digest, hmac->ipad, hs); 1954 return atmel_sha_complete(dd, 0); 1955 } 1956 1957 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd) 1958 { 1959 struct ahash_request *req = dd->req; 1960 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1961 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1962 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1963 u32 *digest = (u32 *)ctx->digest; 1964 size_t ds = crypto_ahash_digestsize(tfm); 1965 size_t bs = ctx->block_size; 1966 size_t hs = ctx->hash_size; 1967 size_t i, num_words; 1968 u32 mr; 1969 1970 /* Save d = SHA((K' + ipad) | msg). */ 1971 num_words = ds / sizeof(u32); 1972 for (i = 0; i < num_words; ++i) 1973 digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1974 1975 /* Restore context to finish computing SHA((K' + opad) | d). */ 1976 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 1977 num_words = hs / sizeof(u32); 1978 for (i = 0; i < num_words; ++i) 1979 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); 1980 1981 mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV; 1982 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); 1983 atmel_sha_write(dd, SHA_MR, mr); 1984 atmel_sha_write(dd, SHA_MSR, bs + ds); 1985 atmel_sha_write(dd, SHA_BCR, ds); 1986 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 1987 1988 sg_init_one(&dd->tmp, digest, ds); 1989 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true, 1990 atmel_sha_hmac_final_done); 1991 } 1992 1993 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd) 1994 { 1995 /* 1996 * req->result might not be sizeof(u32) aligned, so copy the 1997 * digest into ctx->digest[] before memcpy() the data into 1998 * req->result. 1999 */ 2000 atmel_sha_copy_hash(dd->req); 2001 atmel_sha_copy_ready_hash(dd->req); 2002 return atmel_sha_complete(dd, 0); 2003 } 2004 2005 static int atmel_sha_hmac_digest(struct ahash_request *req) 2006 { 2007 int err; 2008 2009 err = atmel_sha_init(req); 2010 if (err) 2011 return err; 2012 2013 return atmel_sha_enqueue(req, SHA_OP_DIGEST); 2014 } 2015 2016 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd) 2017 { 2018 struct ahash_request *req = dd->req; 2019 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 2020 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2021 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 2022 size_t hs = ctx->hash_size; 2023 size_t i, num_words = hs / sizeof(u32); 2024 bool use_dma = false; 2025 u32 mr; 2026 2027 /* Special case for empty message. */ 2028 if (!req->nbytes) 2029 return atmel_sha_complete(dd, -EINVAL); // TODO: 2030 2031 /* Check DMA threshold and alignment. */ 2032 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD && 2033 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes)) 2034 use_dma = true; 2035 2036 /* Write both initial hash values to compute a HMAC. */ 2037 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 2038 for (i = 0; i < num_words; ++i) 2039 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); 2040 2041 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV); 2042 for (i = 0; i < num_words; ++i) 2043 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); 2044 2045 /* Write the Mode, Message Size, Bytes Count then Control Registers. */ 2046 mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF); 2047 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; 2048 if (use_dma) 2049 mr |= SHA_MR_MODE_IDATAR0; 2050 else 2051 mr |= SHA_MR_MODE_AUTO; 2052 atmel_sha_write(dd, SHA_MR, mr); 2053 2054 atmel_sha_write(dd, SHA_MSR, req->nbytes); 2055 atmel_sha_write(dd, SHA_BCR, req->nbytes); 2056 2057 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 2058 2059 /* Process data. */ 2060 if (use_dma) 2061 return atmel_sha_dma_start(dd, req->src, req->nbytes, 2062 atmel_sha_hmac_final_done); 2063 2064 return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true, 2065 atmel_sha_hmac_final_done); 2066 } 2067 2068 static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm) 2069 { 2070 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm); 2071 2072 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2073 sizeof(struct atmel_sha_reqctx)); 2074 hmac->base.start = atmel_sha_hmac_start; 2075 atmel_sha_hmac_key_init(&hmac->hkey); 2076 2077 return 0; 2078 } 2079 2080 static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm) 2081 { 2082 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm); 2083 2084 atmel_sha_hmac_key_release(&hmac->hkey); 2085 } 2086 2087 static struct ahash_alg sha_hmac_algs[] = { 2088 { 2089 .init = atmel_sha_hmac_init, 2090 .update = atmel_sha_update, 2091 .final = atmel_sha_final, 2092 .digest = atmel_sha_hmac_digest, 2093 .setkey = atmel_sha_hmac_setkey, 2094 .export = atmel_sha_export, 2095 .import = atmel_sha_import, 2096 .halg = { 2097 .digestsize = SHA1_DIGEST_SIZE, 2098 .statesize = sizeof(struct atmel_sha_reqctx), 2099 .base = { 2100 .cra_name = "hmac(sha1)", 2101 .cra_driver_name = "atmel-hmac-sha1", 2102 .cra_priority = ATMEL_SHA_PRIORITY, 2103 .cra_flags = CRYPTO_ALG_ASYNC, 2104 .cra_blocksize = SHA1_BLOCK_SIZE, 2105 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), 2106 .cra_alignmask = 0, 2107 .cra_module = THIS_MODULE, 2108 .cra_init = atmel_sha_hmac_cra_init, 2109 .cra_exit = atmel_sha_hmac_cra_exit, 2110 } 2111 } 2112 }, 2113 { 2114 .init = atmel_sha_hmac_init, 2115 .update = atmel_sha_update, 2116 .final = atmel_sha_final, 2117 .digest = atmel_sha_hmac_digest, 2118 .setkey = atmel_sha_hmac_setkey, 2119 .export = atmel_sha_export, 2120 .import = atmel_sha_import, 2121 .halg = { 2122 .digestsize = SHA224_DIGEST_SIZE, 2123 .statesize = sizeof(struct atmel_sha_reqctx), 2124 .base = { 2125 .cra_name = "hmac(sha224)", 2126 .cra_driver_name = "atmel-hmac-sha224", 2127 .cra_priority = ATMEL_SHA_PRIORITY, 2128 .cra_flags = CRYPTO_ALG_ASYNC, 2129 .cra_blocksize = SHA224_BLOCK_SIZE, 2130 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), 2131 .cra_alignmask = 0, 2132 .cra_module = THIS_MODULE, 2133 .cra_init = atmel_sha_hmac_cra_init, 2134 .cra_exit = atmel_sha_hmac_cra_exit, 2135 } 2136 } 2137 }, 2138 { 2139 .init = atmel_sha_hmac_init, 2140 .update = atmel_sha_update, 2141 .final = atmel_sha_final, 2142 .digest = atmel_sha_hmac_digest, 2143 .setkey = atmel_sha_hmac_setkey, 2144 .export = atmel_sha_export, 2145 .import = atmel_sha_import, 2146 .halg = { 2147 .digestsize = SHA256_DIGEST_SIZE, 2148 .statesize = sizeof(struct atmel_sha_reqctx), 2149 .base = { 2150 .cra_name = "hmac(sha256)", 2151 .cra_driver_name = "atmel-hmac-sha256", 2152 .cra_priority = ATMEL_SHA_PRIORITY, 2153 .cra_flags = CRYPTO_ALG_ASYNC, 2154 .cra_blocksize = SHA256_BLOCK_SIZE, 2155 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), 2156 .cra_alignmask = 0, 2157 .cra_module = THIS_MODULE, 2158 .cra_init = atmel_sha_hmac_cra_init, 2159 .cra_exit = atmel_sha_hmac_cra_exit, 2160 } 2161 } 2162 }, 2163 { 2164 .init = atmel_sha_hmac_init, 2165 .update = atmel_sha_update, 2166 .final = atmel_sha_final, 2167 .digest = atmel_sha_hmac_digest, 2168 .setkey = atmel_sha_hmac_setkey, 2169 .export = atmel_sha_export, 2170 .import = atmel_sha_import, 2171 .halg = { 2172 .digestsize = SHA384_DIGEST_SIZE, 2173 .statesize = sizeof(struct atmel_sha_reqctx), 2174 .base = { 2175 .cra_name = "hmac(sha384)", 2176 .cra_driver_name = "atmel-hmac-sha384", 2177 .cra_priority = ATMEL_SHA_PRIORITY, 2178 .cra_flags = CRYPTO_ALG_ASYNC, 2179 .cra_blocksize = SHA384_BLOCK_SIZE, 2180 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), 2181 .cra_alignmask = 0, 2182 .cra_module = THIS_MODULE, 2183 .cra_init = atmel_sha_hmac_cra_init, 2184 .cra_exit = atmel_sha_hmac_cra_exit, 2185 } 2186 } 2187 }, 2188 { 2189 .init = atmel_sha_hmac_init, 2190 .update = atmel_sha_update, 2191 .final = atmel_sha_final, 2192 .digest = atmel_sha_hmac_digest, 2193 .setkey = atmel_sha_hmac_setkey, 2194 .export = atmel_sha_export, 2195 .import = atmel_sha_import, 2196 .halg = { 2197 .digestsize = SHA512_DIGEST_SIZE, 2198 .statesize = sizeof(struct atmel_sha_reqctx), 2199 .base = { 2200 .cra_name = "hmac(sha512)", 2201 .cra_driver_name = "atmel-hmac-sha512", 2202 .cra_priority = ATMEL_SHA_PRIORITY, 2203 .cra_flags = CRYPTO_ALG_ASYNC, 2204 .cra_blocksize = SHA512_BLOCK_SIZE, 2205 .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), 2206 .cra_alignmask = 0, 2207 .cra_module = THIS_MODULE, 2208 .cra_init = atmel_sha_hmac_cra_init, 2209 .cra_exit = atmel_sha_hmac_cra_exit, 2210 } 2211 } 2212 }, 2213 }; 2214 2215 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 2216 /* authenc functions */ 2217 2218 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd); 2219 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd); 2220 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd); 2221 2222 2223 struct atmel_sha_authenc_ctx { 2224 struct crypto_ahash *tfm; 2225 }; 2226 2227 struct atmel_sha_authenc_reqctx { 2228 struct atmel_sha_reqctx base; 2229 2230 atmel_aes_authenc_fn_t cb; 2231 struct atmel_aes_dev *aes_dev; 2232 2233 /* _init() parameters. */ 2234 struct scatterlist *assoc; 2235 u32 assoclen; 2236 u32 textlen; 2237 2238 /* _final() parameters. */ 2239 u32 *digest; 2240 unsigned int digestlen; 2241 }; 2242 2243 static void atmel_sha_authenc_complete(struct crypto_async_request *areq, 2244 int err) 2245 { 2246 struct ahash_request *req = areq->data; 2247 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2248 2249 authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); 2250 } 2251 2252 static int atmel_sha_authenc_start(struct atmel_sha_dev *dd) 2253 { 2254 struct ahash_request *req = dd->req; 2255 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2256 int err; 2257 2258 /* 2259 * Force atmel_sha_complete() to call req->base.complete(), ie 2260 * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). 2261 */ 2262 dd->force_complete = true; 2263 2264 err = atmel_sha_hw_init(dd); 2265 return authctx->cb(authctx->aes_dev, err, dd->is_async); 2266 } 2267 2268 bool atmel_sha_authenc_is_ready(void) 2269 { 2270 struct atmel_sha_ctx dummy; 2271 2272 dummy.dd = NULL; 2273 return (atmel_sha_find_dev(&dummy) != NULL); 2274 } 2275 EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready); 2276 2277 unsigned int atmel_sha_authenc_get_reqsize(void) 2278 { 2279 return sizeof(struct atmel_sha_authenc_reqctx); 2280 } 2281 EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize); 2282 2283 struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode) 2284 { 2285 struct atmel_sha_authenc_ctx *auth; 2286 struct crypto_ahash *tfm; 2287 struct atmel_sha_ctx *tctx; 2288 const char *name; 2289 int err = -EINVAL; 2290 2291 switch (mode & SHA_FLAGS_MODE_MASK) { 2292 case SHA_FLAGS_HMAC_SHA1: 2293 name = "atmel-hmac-sha1"; 2294 break; 2295 2296 case SHA_FLAGS_HMAC_SHA224: 2297 name = "atmel-hmac-sha224"; 2298 break; 2299 2300 case SHA_FLAGS_HMAC_SHA256: 2301 name = "atmel-hmac-sha256"; 2302 break; 2303 2304 case SHA_FLAGS_HMAC_SHA384: 2305 name = "atmel-hmac-sha384"; 2306 break; 2307 2308 case SHA_FLAGS_HMAC_SHA512: 2309 name = "atmel-hmac-sha512"; 2310 break; 2311 2312 default: 2313 goto error; 2314 } 2315 2316 tfm = crypto_alloc_ahash(name, 0, 0); 2317 if (IS_ERR(tfm)) { 2318 err = PTR_ERR(tfm); 2319 goto error; 2320 } 2321 tctx = crypto_ahash_ctx(tfm); 2322 tctx->start = atmel_sha_authenc_start; 2323 tctx->flags = mode; 2324 2325 auth = kzalloc(sizeof(*auth), GFP_KERNEL); 2326 if (!auth) { 2327 err = -ENOMEM; 2328 goto err_free_ahash; 2329 } 2330 auth->tfm = tfm; 2331 2332 return auth; 2333 2334 err_free_ahash: 2335 crypto_free_ahash(tfm); 2336 error: 2337 return ERR_PTR(err); 2338 } 2339 EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn); 2340 2341 void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth) 2342 { 2343 if (auth) 2344 crypto_free_ahash(auth->tfm); 2345 kfree(auth); 2346 } 2347 EXPORT_SYMBOL_GPL(atmel_sha_authenc_free); 2348 2349 int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, 2350 const u8 *key, unsigned int keylen, 2351 u32 *flags) 2352 { 2353 struct crypto_ahash *tfm = auth->tfm; 2354 int err; 2355 2356 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); 2357 crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK); 2358 err = crypto_ahash_setkey(tfm, key, keylen); 2359 *flags = crypto_ahash_get_flags(tfm); 2360 2361 return err; 2362 } 2363 EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey); 2364 2365 int atmel_sha_authenc_schedule(struct ahash_request *req, 2366 struct atmel_sha_authenc_ctx *auth, 2367 atmel_aes_authenc_fn_t cb, 2368 struct atmel_aes_dev *aes_dev) 2369 { 2370 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2371 struct atmel_sha_reqctx *ctx = &authctx->base; 2372 struct crypto_ahash *tfm = auth->tfm; 2373 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); 2374 struct atmel_sha_dev *dd; 2375 2376 /* Reset request context (MUST be done first). */ 2377 memset(authctx, 0, sizeof(*authctx)); 2378 2379 /* Get SHA device. */ 2380 dd = atmel_sha_find_dev(tctx); 2381 if (!dd) 2382 return cb(aes_dev, -ENODEV, false); 2383 2384 /* Init request context. */ 2385 ctx->dd = dd; 2386 ctx->buflen = SHA_BUFFER_LEN; 2387 authctx->cb = cb; 2388 authctx->aes_dev = aes_dev; 2389 ahash_request_set_tfm(req, tfm); 2390 ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req); 2391 2392 return atmel_sha_handle_queue(dd, req); 2393 } 2394 EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule); 2395 2396 int atmel_sha_authenc_init(struct ahash_request *req, 2397 struct scatterlist *assoc, unsigned int assoclen, 2398 unsigned int textlen, 2399 atmel_aes_authenc_fn_t cb, 2400 struct atmel_aes_dev *aes_dev) 2401 { 2402 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2403 struct atmel_sha_reqctx *ctx = &authctx->base; 2404 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2405 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 2406 struct atmel_sha_dev *dd = ctx->dd; 2407 2408 if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32)))) 2409 return atmel_sha_complete(dd, -EINVAL); 2410 2411 authctx->cb = cb; 2412 authctx->aes_dev = aes_dev; 2413 authctx->assoc = assoc; 2414 authctx->assoclen = assoclen; 2415 authctx->textlen = textlen; 2416 2417 ctx->flags = hmac->base.flags; 2418 return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2); 2419 } 2420 EXPORT_SYMBOL_GPL(atmel_sha_authenc_init); 2421 2422 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd) 2423 { 2424 struct ahash_request *req = dd->req; 2425 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2426 struct atmel_sha_reqctx *ctx = &authctx->base; 2427 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2428 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 2429 size_t hs = ctx->hash_size; 2430 size_t i, num_words = hs / sizeof(u32); 2431 u32 mr, msg_size; 2432 2433 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 2434 for (i = 0; i < num_words; ++i) 2435 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); 2436 2437 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV); 2438 for (i = 0; i < num_words; ++i) 2439 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); 2440 2441 mr = (SHA_MR_MODE_IDATAR0 | 2442 SHA_MR_HMAC | 2443 SHA_MR_DUALBUFF); 2444 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; 2445 atmel_sha_write(dd, SHA_MR, mr); 2446 2447 msg_size = authctx->assoclen + authctx->textlen; 2448 atmel_sha_write(dd, SHA_MSR, msg_size); 2449 atmel_sha_write(dd, SHA_BCR, msg_size); 2450 2451 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 2452 2453 /* Process assoc data. */ 2454 return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, 2455 true, false, 2456 atmel_sha_authenc_init_done); 2457 } 2458 2459 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd) 2460 { 2461 struct ahash_request *req = dd->req; 2462 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2463 2464 return authctx->cb(authctx->aes_dev, 0, dd->is_async); 2465 } 2466 2467 int atmel_sha_authenc_final(struct ahash_request *req, 2468 u32 *digest, unsigned int digestlen, 2469 atmel_aes_authenc_fn_t cb, 2470 struct atmel_aes_dev *aes_dev) 2471 { 2472 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2473 struct atmel_sha_reqctx *ctx = &authctx->base; 2474 struct atmel_sha_dev *dd = ctx->dd; 2475 2476 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 2477 case SHA_FLAGS_SHA1: 2478 authctx->digestlen = SHA1_DIGEST_SIZE; 2479 break; 2480 2481 case SHA_FLAGS_SHA224: 2482 authctx->digestlen = SHA224_DIGEST_SIZE; 2483 break; 2484 2485 case SHA_FLAGS_SHA256: 2486 authctx->digestlen = SHA256_DIGEST_SIZE; 2487 break; 2488 2489 case SHA_FLAGS_SHA384: 2490 authctx->digestlen = SHA384_DIGEST_SIZE; 2491 break; 2492 2493 case SHA_FLAGS_SHA512: 2494 authctx->digestlen = SHA512_DIGEST_SIZE; 2495 break; 2496 2497 default: 2498 return atmel_sha_complete(dd, -EINVAL); 2499 } 2500 if (authctx->digestlen > digestlen) 2501 authctx->digestlen = digestlen; 2502 2503 authctx->cb = cb; 2504 authctx->aes_dev = aes_dev; 2505 authctx->digest = digest; 2506 return atmel_sha_wait_for_data_ready(dd, 2507 atmel_sha_authenc_final_done); 2508 } 2509 EXPORT_SYMBOL_GPL(atmel_sha_authenc_final); 2510 2511 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd) 2512 { 2513 struct ahash_request *req = dd->req; 2514 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2515 size_t i, num_words = authctx->digestlen / sizeof(u32); 2516 2517 for (i = 0; i < num_words; ++i) 2518 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 2519 2520 return atmel_sha_complete(dd, 0); 2521 } 2522 2523 void atmel_sha_authenc_abort(struct ahash_request *req) 2524 { 2525 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2526 struct atmel_sha_reqctx *ctx = &authctx->base; 2527 struct atmel_sha_dev *dd = ctx->dd; 2528 2529 /* Prevent atmel_sha_complete() from calling req->base.complete(). */ 2530 dd->is_async = false; 2531 dd->force_complete = false; 2532 (void)atmel_sha_complete(dd, 0); 2533 } 2534 EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort); 2535 2536 #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */ 2537 2538 2539 static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) 2540 { 2541 int i; 2542 2543 if (dd->caps.has_hmac) 2544 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) 2545 crypto_unregister_ahash(&sha_hmac_algs[i]); 2546 2547 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) 2548 crypto_unregister_ahash(&sha_1_256_algs[i]); 2549 2550 if (dd->caps.has_sha224) 2551 crypto_unregister_ahash(&sha_224_alg); 2552 2553 if (dd->caps.has_sha_384_512) { 2554 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) 2555 crypto_unregister_ahash(&sha_384_512_algs[i]); 2556 } 2557 } 2558 2559 static int atmel_sha_register_algs(struct atmel_sha_dev *dd) 2560 { 2561 int err, i, j; 2562 2563 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { 2564 err = crypto_register_ahash(&sha_1_256_algs[i]); 2565 if (err) 2566 goto err_sha_1_256_algs; 2567 } 2568 2569 if (dd->caps.has_sha224) { 2570 err = crypto_register_ahash(&sha_224_alg); 2571 if (err) 2572 goto err_sha_224_algs; 2573 } 2574 2575 if (dd->caps.has_sha_384_512) { 2576 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { 2577 err = crypto_register_ahash(&sha_384_512_algs[i]); 2578 if (err) 2579 goto err_sha_384_512_algs; 2580 } 2581 } 2582 2583 if (dd->caps.has_hmac) { 2584 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) { 2585 err = crypto_register_ahash(&sha_hmac_algs[i]); 2586 if (err) 2587 goto err_sha_hmac_algs; 2588 } 2589 } 2590 2591 return 0; 2592 2593 /*i = ARRAY_SIZE(sha_hmac_algs);*/ 2594 err_sha_hmac_algs: 2595 for (j = 0; j < i; j++) 2596 crypto_unregister_ahash(&sha_hmac_algs[j]); 2597 i = ARRAY_SIZE(sha_384_512_algs); 2598 err_sha_384_512_algs: 2599 for (j = 0; j < i; j++) 2600 crypto_unregister_ahash(&sha_384_512_algs[j]); 2601 crypto_unregister_ahash(&sha_224_alg); 2602 err_sha_224_algs: 2603 i = ARRAY_SIZE(sha_1_256_algs); 2604 err_sha_1_256_algs: 2605 for (j = 0; j < i; j++) 2606 crypto_unregister_ahash(&sha_1_256_algs[j]); 2607 2608 return err; 2609 } 2610 2611 static int atmel_sha_dma_init(struct atmel_sha_dev *dd, 2612 struct crypto_platform_data *pdata) 2613 { 2614 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); 2615 if (IS_ERR(dd->dma_lch_in.chan)) { 2616 int ret = PTR_ERR(dd->dma_lch_in.chan); 2617 2618 if (ret != -EPROBE_DEFER) 2619 dev_warn(dd->dev, "no DMA channel available\n"); 2620 return ret; 2621 } 2622 2623 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; 2624 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + 2625 SHA_REG_DIN(0); 2626 dd->dma_lch_in.dma_conf.src_maxburst = 1; 2627 dd->dma_lch_in.dma_conf.src_addr_width = 2628 DMA_SLAVE_BUSWIDTH_4_BYTES; 2629 dd->dma_lch_in.dma_conf.dst_maxburst = 1; 2630 dd->dma_lch_in.dma_conf.dst_addr_width = 2631 DMA_SLAVE_BUSWIDTH_4_BYTES; 2632 dd->dma_lch_in.dma_conf.device_fc = false; 2633 2634 return 0; 2635 } 2636 2637 static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) 2638 { 2639 dma_release_channel(dd->dma_lch_in.chan); 2640 } 2641 2642 static void atmel_sha_get_cap(struct atmel_sha_dev *dd) 2643 { 2644 2645 dd->caps.has_dma = 0; 2646 dd->caps.has_dualbuff = 0; 2647 dd->caps.has_sha224 = 0; 2648 dd->caps.has_sha_384_512 = 0; 2649 dd->caps.has_uihv = 0; 2650 dd->caps.has_hmac = 0; 2651 2652 /* keep only major version number */ 2653 switch (dd->hw_version & 0xff0) { 2654 case 0x510: 2655 dd->caps.has_dma = 1; 2656 dd->caps.has_dualbuff = 1; 2657 dd->caps.has_sha224 = 1; 2658 dd->caps.has_sha_384_512 = 1; 2659 dd->caps.has_uihv = 1; 2660 dd->caps.has_hmac = 1; 2661 break; 2662 case 0x420: 2663 dd->caps.has_dma = 1; 2664 dd->caps.has_dualbuff = 1; 2665 dd->caps.has_sha224 = 1; 2666 dd->caps.has_sha_384_512 = 1; 2667 dd->caps.has_uihv = 1; 2668 break; 2669 case 0x410: 2670 dd->caps.has_dma = 1; 2671 dd->caps.has_dualbuff = 1; 2672 dd->caps.has_sha224 = 1; 2673 dd->caps.has_sha_384_512 = 1; 2674 break; 2675 case 0x400: 2676 dd->caps.has_dma = 1; 2677 dd->caps.has_dualbuff = 1; 2678 dd->caps.has_sha224 = 1; 2679 break; 2680 case 0x320: 2681 break; 2682 default: 2683 dev_warn(dd->dev, 2684 "Unmanaged sha version, set minimum capabilities\n"); 2685 break; 2686 } 2687 } 2688 2689 #if defined(CONFIG_OF) 2690 static const struct of_device_id atmel_sha_dt_ids[] = { 2691 { .compatible = "atmel,at91sam9g46-sha" }, 2692 { /* sentinel */ } 2693 }; 2694 2695 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); 2696 2697 static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev) 2698 { 2699 struct device_node *np = pdev->dev.of_node; 2700 struct crypto_platform_data *pdata; 2701 2702 if (!np) { 2703 dev_err(&pdev->dev, "device node not found\n"); 2704 return ERR_PTR(-EINVAL); 2705 } 2706 2707 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 2708 if (!pdata) 2709 return ERR_PTR(-ENOMEM); 2710 2711 return pdata; 2712 } 2713 #else /* CONFIG_OF */ 2714 static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev) 2715 { 2716 return ERR_PTR(-EINVAL); 2717 } 2718 #endif 2719 2720 static int atmel_sha_probe(struct platform_device *pdev) 2721 { 2722 struct atmel_sha_dev *sha_dd; 2723 struct crypto_platform_data *pdata; 2724 struct device *dev = &pdev->dev; 2725 struct resource *sha_res; 2726 int err; 2727 2728 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); 2729 if (!sha_dd) 2730 return -ENOMEM; 2731 2732 sha_dd->dev = dev; 2733 2734 platform_set_drvdata(pdev, sha_dd); 2735 2736 INIT_LIST_HEAD(&sha_dd->list); 2737 spin_lock_init(&sha_dd->lock); 2738 2739 tasklet_init(&sha_dd->done_task, atmel_sha_done_task, 2740 (unsigned long)sha_dd); 2741 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, 2742 (unsigned long)sha_dd); 2743 2744 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); 2745 2746 /* Get the base address */ 2747 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2748 if (!sha_res) { 2749 dev_err(dev, "no MEM resource info\n"); 2750 err = -ENODEV; 2751 goto err_tasklet_kill; 2752 } 2753 sha_dd->phys_base = sha_res->start; 2754 2755 /* Get the IRQ */ 2756 sha_dd->irq = platform_get_irq(pdev, 0); 2757 if (sha_dd->irq < 0) { 2758 err = sha_dd->irq; 2759 goto err_tasklet_kill; 2760 } 2761 2762 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, 2763 IRQF_SHARED, "atmel-sha", sha_dd); 2764 if (err) { 2765 dev_err(dev, "unable to request sha irq.\n"); 2766 goto err_tasklet_kill; 2767 } 2768 2769 /* Initializing the clock */ 2770 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); 2771 if (IS_ERR(sha_dd->iclk)) { 2772 dev_err(dev, "clock initialization failed.\n"); 2773 err = PTR_ERR(sha_dd->iclk); 2774 goto err_tasklet_kill; 2775 } 2776 2777 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); 2778 if (IS_ERR(sha_dd->io_base)) { 2779 dev_err(dev, "can't ioremap\n"); 2780 err = PTR_ERR(sha_dd->io_base); 2781 goto err_tasklet_kill; 2782 } 2783 2784 err = clk_prepare(sha_dd->iclk); 2785 if (err) 2786 goto err_tasklet_kill; 2787 2788 err = atmel_sha_hw_version_init(sha_dd); 2789 if (err) 2790 goto err_iclk_unprepare; 2791 2792 atmel_sha_get_cap(sha_dd); 2793 2794 if (sha_dd->caps.has_dma) { 2795 pdata = pdev->dev.platform_data; 2796 if (!pdata) { 2797 pdata = atmel_sha_of_init(pdev); 2798 if (IS_ERR(pdata)) { 2799 dev_err(&pdev->dev, "platform data not available\n"); 2800 err = PTR_ERR(pdata); 2801 goto err_iclk_unprepare; 2802 } 2803 } 2804 2805 err = atmel_sha_dma_init(sha_dd, pdata); 2806 if (err) 2807 goto err_iclk_unprepare; 2808 2809 dev_info(dev, "using %s for DMA transfers\n", 2810 dma_chan_name(sha_dd->dma_lch_in.chan)); 2811 } 2812 2813 spin_lock(&atmel_sha.lock); 2814 list_add_tail(&sha_dd->list, &atmel_sha.dev_list); 2815 spin_unlock(&atmel_sha.lock); 2816 2817 err = atmel_sha_register_algs(sha_dd); 2818 if (err) 2819 goto err_algs; 2820 2821 dev_info(dev, "Atmel SHA1/SHA256%s%s\n", 2822 sha_dd->caps.has_sha224 ? "/SHA224" : "", 2823 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); 2824 2825 return 0; 2826 2827 err_algs: 2828 spin_lock(&atmel_sha.lock); 2829 list_del(&sha_dd->list); 2830 spin_unlock(&atmel_sha.lock); 2831 if (sha_dd->caps.has_dma) 2832 atmel_sha_dma_cleanup(sha_dd); 2833 err_iclk_unprepare: 2834 clk_unprepare(sha_dd->iclk); 2835 err_tasklet_kill: 2836 tasklet_kill(&sha_dd->queue_task); 2837 tasklet_kill(&sha_dd->done_task); 2838 2839 return err; 2840 } 2841 2842 static int atmel_sha_remove(struct platform_device *pdev) 2843 { 2844 struct atmel_sha_dev *sha_dd; 2845 2846 sha_dd = platform_get_drvdata(pdev); 2847 if (!sha_dd) 2848 return -ENODEV; 2849 spin_lock(&atmel_sha.lock); 2850 list_del(&sha_dd->list); 2851 spin_unlock(&atmel_sha.lock); 2852 2853 atmel_sha_unregister_algs(sha_dd); 2854 2855 tasklet_kill(&sha_dd->queue_task); 2856 tasklet_kill(&sha_dd->done_task); 2857 2858 if (sha_dd->caps.has_dma) 2859 atmel_sha_dma_cleanup(sha_dd); 2860 2861 clk_unprepare(sha_dd->iclk); 2862 2863 return 0; 2864 } 2865 2866 static struct platform_driver atmel_sha_driver = { 2867 .probe = atmel_sha_probe, 2868 .remove = atmel_sha_remove, 2869 .driver = { 2870 .name = "atmel_sha", 2871 .of_match_table = of_match_ptr(atmel_sha_dt_ids), 2872 }, 2873 }; 2874 2875 module_platform_driver(atmel_sha_driver); 2876 2877 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); 2878 MODULE_LICENSE("GPL v2"); 2879 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); 2880