1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cryptographic API. 4 * 5 * Support for ATMEL SHA1/SHA256 HW acceleration. 6 * 7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL 8 * Author: Nicolas Royer <nicolas@eukrea.com> 9 * 10 * Some ideas are from omap-sham.c drivers. 11 */ 12 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/err.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 #include <linux/hw_random.h> 21 #include <linux/platform_device.h> 22 23 #include <linux/device.h> 24 #include <linux/dmaengine.h> 25 #include <linux/init.h> 26 #include <linux/errno.h> 27 #include <linux/interrupt.h> 28 #include <linux/irq.h> 29 #include <linux/scatterlist.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/mod_devicetable.h> 32 #include <linux/delay.h> 33 #include <linux/crypto.h> 34 #include <crypto/scatterwalk.h> 35 #include <crypto/algapi.h> 36 #include <crypto/sha1.h> 37 #include <crypto/sha2.h> 38 #include <crypto/hash.h> 39 #include <crypto/internal/hash.h> 40 #include "atmel-sha-regs.h" 41 #include "atmel-authenc.h" 42 43 #define ATMEL_SHA_PRIORITY 300 44 45 /* SHA flags */ 46 #define SHA_FLAGS_BUSY BIT(0) 47 #define SHA_FLAGS_FINAL BIT(1) 48 #define SHA_FLAGS_DMA_ACTIVE BIT(2) 49 #define SHA_FLAGS_OUTPUT_READY BIT(3) 50 #define SHA_FLAGS_INIT BIT(4) 51 #define SHA_FLAGS_CPU BIT(5) 52 #define SHA_FLAGS_DMA_READY BIT(6) 53 #define SHA_FLAGS_DUMP_REG BIT(7) 54 55 /* bits[11:8] are reserved. */ 56 57 #define SHA_FLAGS_FINUP BIT(16) 58 #define SHA_FLAGS_SG BIT(17) 59 #define SHA_FLAGS_ERROR BIT(23) 60 #define SHA_FLAGS_PAD BIT(24) 61 #define SHA_FLAGS_RESTORE BIT(25) 62 #define SHA_FLAGS_IDATAR0 BIT(26) 63 #define SHA_FLAGS_WAIT_DATARDY BIT(27) 64 65 #define SHA_OP_INIT 0 66 #define SHA_OP_UPDATE 1 67 #define SHA_OP_FINAL 2 68 #define SHA_OP_DIGEST 3 69 70 #define SHA_BUFFER_LEN (PAGE_SIZE / 16) 71 72 #define ATMEL_SHA_DMA_THRESHOLD 56 73 74 struct atmel_sha_caps { 75 bool has_dma; 76 bool has_dualbuff; 77 bool has_sha224; 78 bool has_sha_384_512; 79 bool has_uihv; 80 bool has_hmac; 81 }; 82 83 struct atmel_sha_dev; 84 85 /* 86 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as 87 * tested by the ahash_prepare_alg() function. 88 */ 89 struct atmel_sha_reqctx { 90 struct atmel_sha_dev *dd; 91 unsigned long flags; 92 unsigned long op; 93 94 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); 95 u64 digcnt[2]; 96 size_t bufcnt; 97 size_t buflen; 98 dma_addr_t dma_addr; 99 100 /* walk state */ 101 struct scatterlist *sg; 102 unsigned int offset; /* offset in current sg */ 103 unsigned int total; /* total request */ 104 105 size_t block_size; 106 size_t hash_size; 107 108 u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 109 }; 110 111 typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *); 112 113 struct atmel_sha_ctx { 114 struct atmel_sha_dev *dd; 115 atmel_sha_fn_t start; 116 117 unsigned long flags; 118 }; 119 120 #define ATMEL_SHA_QUEUE_LENGTH 50 121 122 struct atmel_sha_dma { 123 struct dma_chan *chan; 124 struct dma_slave_config dma_conf; 125 struct scatterlist *sg; 126 int nents; 127 unsigned int last_sg_length; 128 }; 129 130 struct atmel_sha_dev { 131 struct list_head list; 132 unsigned long phys_base; 133 struct device *dev; 134 struct clk *iclk; 135 int irq; 136 void __iomem *io_base; 137 138 spinlock_t lock; 139 struct tasklet_struct done_task; 140 struct tasklet_struct queue_task; 141 142 unsigned long flags; 143 struct crypto_queue queue; 144 struct ahash_request *req; 145 bool is_async; 146 bool force_complete; 147 atmel_sha_fn_t resume; 148 atmel_sha_fn_t cpu_transfer_complete; 149 150 struct atmel_sha_dma dma_lch_in; 151 152 struct atmel_sha_caps caps; 153 154 struct scatterlist tmp; 155 156 u32 hw_version; 157 }; 158 159 struct atmel_sha_drv { 160 struct list_head dev_list; 161 spinlock_t lock; 162 }; 163 164 static struct atmel_sha_drv atmel_sha = { 165 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), 166 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), 167 }; 168 169 #ifdef VERBOSE_DEBUG 170 static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr) 171 { 172 switch (offset) { 173 case SHA_CR: 174 return "CR"; 175 176 case SHA_MR: 177 return "MR"; 178 179 case SHA_IER: 180 return "IER"; 181 182 case SHA_IDR: 183 return "IDR"; 184 185 case SHA_IMR: 186 return "IMR"; 187 188 case SHA_ISR: 189 return "ISR"; 190 191 case SHA_MSR: 192 return "MSR"; 193 194 case SHA_BCR: 195 return "BCR"; 196 197 case SHA_REG_DIN(0): 198 case SHA_REG_DIN(1): 199 case SHA_REG_DIN(2): 200 case SHA_REG_DIN(3): 201 case SHA_REG_DIN(4): 202 case SHA_REG_DIN(5): 203 case SHA_REG_DIN(6): 204 case SHA_REG_DIN(7): 205 case SHA_REG_DIN(8): 206 case SHA_REG_DIN(9): 207 case SHA_REG_DIN(10): 208 case SHA_REG_DIN(11): 209 case SHA_REG_DIN(12): 210 case SHA_REG_DIN(13): 211 case SHA_REG_DIN(14): 212 case SHA_REG_DIN(15): 213 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2); 214 break; 215 216 case SHA_REG_DIGEST(0): 217 case SHA_REG_DIGEST(1): 218 case SHA_REG_DIGEST(2): 219 case SHA_REG_DIGEST(3): 220 case SHA_REG_DIGEST(4): 221 case SHA_REG_DIGEST(5): 222 case SHA_REG_DIGEST(6): 223 case SHA_REG_DIGEST(7): 224 case SHA_REG_DIGEST(8): 225 case SHA_REG_DIGEST(9): 226 case SHA_REG_DIGEST(10): 227 case SHA_REG_DIGEST(11): 228 case SHA_REG_DIGEST(12): 229 case SHA_REG_DIGEST(13): 230 case SHA_REG_DIGEST(14): 231 case SHA_REG_DIGEST(15): 232 if (wr) 233 snprintf(tmp, sz, "IDATAR[%u]", 234 16u + ((offset - SHA_REG_DIGEST(0)) >> 2)); 235 else 236 snprintf(tmp, sz, "ODATAR[%u]", 237 (offset - SHA_REG_DIGEST(0)) >> 2); 238 break; 239 240 case SHA_HW_VERSION: 241 return "HWVER"; 242 243 default: 244 snprintf(tmp, sz, "0x%02x", offset); 245 break; 246 } 247 248 return tmp; 249 } 250 251 #endif /* VERBOSE_DEBUG */ 252 253 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) 254 { 255 u32 value = readl_relaxed(dd->io_base + offset); 256 257 #ifdef VERBOSE_DEBUG 258 if (dd->flags & SHA_FLAGS_DUMP_REG) { 259 char tmp[16]; 260 261 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, 262 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false)); 263 } 264 #endif /* VERBOSE_DEBUG */ 265 266 return value; 267 } 268 269 static inline void atmel_sha_write(struct atmel_sha_dev *dd, 270 u32 offset, u32 value) 271 { 272 #ifdef VERBOSE_DEBUG 273 if (dd->flags & SHA_FLAGS_DUMP_REG) { 274 char tmp[16]; 275 276 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, 277 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true)); 278 } 279 #endif /* VERBOSE_DEBUG */ 280 281 writel_relaxed(value, dd->io_base + offset); 282 } 283 284 static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err) 285 { 286 struct ahash_request *req = dd->req; 287 288 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | 289 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY | 290 SHA_FLAGS_DUMP_REG); 291 292 clk_disable(dd->iclk); 293 294 if ((dd->is_async || dd->force_complete) && req->base.complete) 295 ahash_request_complete(req, err); 296 297 /* handle new request */ 298 tasklet_schedule(&dd->queue_task); 299 300 return err; 301 } 302 303 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) 304 { 305 size_t count; 306 307 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { 308 count = min(ctx->sg->length - ctx->offset, ctx->total); 309 count = min(count, ctx->buflen - ctx->bufcnt); 310 311 if (count <= 0) { 312 /* 313 * Check if count <= 0 because the buffer is full or 314 * because the sg length is 0. In the latest case, 315 * check if there is another sg in the list, a 0 length 316 * sg doesn't necessarily mean the end of the sg list. 317 */ 318 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { 319 ctx->sg = sg_next(ctx->sg); 320 continue; 321 } else { 322 break; 323 } 324 } 325 326 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, 327 ctx->offset, count, 0); 328 329 ctx->bufcnt += count; 330 ctx->offset += count; 331 ctx->total -= count; 332 333 if (ctx->offset == ctx->sg->length) { 334 ctx->sg = sg_next(ctx->sg); 335 if (ctx->sg) 336 ctx->offset = 0; 337 else 338 ctx->total = 0; 339 } 340 } 341 342 return 0; 343 } 344 345 /* 346 * The purpose of this padding is to ensure that the padded message is a 347 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). 348 * The bit "1" is appended at the end of the message followed by 349 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or 350 * 128 bits block (SHA384/SHA512) equals to the message length in bits 351 * is appended. 352 * 353 * For SHA1/SHA224/SHA256, padlen is calculated as followed: 354 * - if message length < 56 bytes then padlen = 56 - message length 355 * - else padlen = 64 + 56 - message length 356 * 357 * For SHA384/SHA512, padlen is calculated as followed: 358 * - if message length < 112 bytes then padlen = 112 - message length 359 * - else padlen = 128 + 112 - message length 360 */ 361 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) 362 { 363 unsigned int index, padlen; 364 __be64 bits[2]; 365 u64 size[2]; 366 367 size[0] = ctx->digcnt[0]; 368 size[1] = ctx->digcnt[1]; 369 370 size[0] += ctx->bufcnt; 371 if (size[0] < ctx->bufcnt) 372 size[1]++; 373 374 size[0] += length; 375 if (size[0] < length) 376 size[1]++; 377 378 bits[1] = cpu_to_be64(size[0] << 3); 379 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); 380 381 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 382 case SHA_FLAGS_SHA384: 383 case SHA_FLAGS_SHA512: 384 index = ctx->bufcnt & 0x7f; 385 padlen = (index < 112) ? (112 - index) : ((128+112) - index); 386 *(ctx->buffer + ctx->bufcnt) = 0x80; 387 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); 388 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 389 ctx->bufcnt += padlen + 16; 390 ctx->flags |= SHA_FLAGS_PAD; 391 break; 392 393 default: 394 index = ctx->bufcnt & 0x3f; 395 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 396 *(ctx->buffer + ctx->bufcnt) = 0x80; 397 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); 398 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 399 ctx->bufcnt += padlen + 8; 400 ctx->flags |= SHA_FLAGS_PAD; 401 break; 402 } 403 } 404 405 static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx) 406 { 407 struct atmel_sha_dev *dd = NULL; 408 struct atmel_sha_dev *tmp; 409 410 spin_lock_bh(&atmel_sha.lock); 411 if (!tctx->dd) { 412 list_for_each_entry(tmp, &atmel_sha.dev_list, list) { 413 dd = tmp; 414 break; 415 } 416 tctx->dd = dd; 417 } else { 418 dd = tctx->dd; 419 } 420 421 spin_unlock_bh(&atmel_sha.lock); 422 423 return dd; 424 } 425 426 static int atmel_sha_init(struct ahash_request *req) 427 { 428 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 429 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); 430 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 431 struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx); 432 433 ctx->dd = dd; 434 435 ctx->flags = 0; 436 437 dev_dbg(dd->dev, "init: digest size: %u\n", 438 crypto_ahash_digestsize(tfm)); 439 440 switch (crypto_ahash_digestsize(tfm)) { 441 case SHA1_DIGEST_SIZE: 442 ctx->flags |= SHA_FLAGS_SHA1; 443 ctx->block_size = SHA1_BLOCK_SIZE; 444 break; 445 case SHA224_DIGEST_SIZE: 446 ctx->flags |= SHA_FLAGS_SHA224; 447 ctx->block_size = SHA224_BLOCK_SIZE; 448 break; 449 case SHA256_DIGEST_SIZE: 450 ctx->flags |= SHA_FLAGS_SHA256; 451 ctx->block_size = SHA256_BLOCK_SIZE; 452 break; 453 case SHA384_DIGEST_SIZE: 454 ctx->flags |= SHA_FLAGS_SHA384; 455 ctx->block_size = SHA384_BLOCK_SIZE; 456 break; 457 case SHA512_DIGEST_SIZE: 458 ctx->flags |= SHA_FLAGS_SHA512; 459 ctx->block_size = SHA512_BLOCK_SIZE; 460 break; 461 default: 462 return -EINVAL; 463 } 464 465 ctx->bufcnt = 0; 466 ctx->digcnt[0] = 0; 467 ctx->digcnt[1] = 0; 468 ctx->buflen = SHA_BUFFER_LEN; 469 470 return 0; 471 } 472 473 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) 474 { 475 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 476 u32 valmr = SHA_MR_MODE_AUTO; 477 unsigned int i, hashsize = 0; 478 479 if (likely(dma)) { 480 if (!dd->caps.has_dma) 481 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); 482 valmr = SHA_MR_MODE_PDC; 483 if (dd->caps.has_dualbuff) 484 valmr |= SHA_MR_DUALBUFF; 485 } else { 486 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 487 } 488 489 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 490 case SHA_FLAGS_SHA1: 491 valmr |= SHA_MR_ALGO_SHA1; 492 hashsize = SHA1_DIGEST_SIZE; 493 break; 494 495 case SHA_FLAGS_SHA224: 496 valmr |= SHA_MR_ALGO_SHA224; 497 hashsize = SHA256_DIGEST_SIZE; 498 break; 499 500 case SHA_FLAGS_SHA256: 501 valmr |= SHA_MR_ALGO_SHA256; 502 hashsize = SHA256_DIGEST_SIZE; 503 break; 504 505 case SHA_FLAGS_SHA384: 506 valmr |= SHA_MR_ALGO_SHA384; 507 hashsize = SHA512_DIGEST_SIZE; 508 break; 509 510 case SHA_FLAGS_SHA512: 511 valmr |= SHA_MR_ALGO_SHA512; 512 hashsize = SHA512_DIGEST_SIZE; 513 break; 514 515 default: 516 break; 517 } 518 519 /* Setting CR_FIRST only for the first iteration */ 520 if (!(ctx->digcnt[0] || ctx->digcnt[1])) { 521 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 522 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { 523 const u32 *hash = (const u32 *)ctx->digest; 524 525 /* 526 * Restore the hardware context: update the User Initialize 527 * Hash Value (UIHV) with the value saved when the latest 528 * 'update' operation completed on this very same crypto 529 * request. 530 */ 531 ctx->flags &= ~SHA_FLAGS_RESTORE; 532 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 533 for (i = 0; i < hashsize / sizeof(u32); ++i) 534 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]); 535 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 536 valmr |= SHA_MR_UIHV; 537 } 538 /* 539 * WARNING: If the UIHV feature is not available, the hardware CANNOT 540 * process concurrent requests: the internal registers used to store 541 * the hash/digest are still set to the partial digest output values 542 * computed during the latest round. 543 */ 544 545 atmel_sha_write(dd, SHA_MR, valmr); 546 } 547 548 static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd, 549 atmel_sha_fn_t resume) 550 { 551 u32 isr = atmel_sha_read(dd, SHA_ISR); 552 553 if (unlikely(isr & SHA_INT_DATARDY)) 554 return resume(dd); 555 556 dd->resume = resume; 557 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 558 return -EINPROGRESS; 559 } 560 561 static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, 562 size_t length, int final) 563 { 564 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 565 int count, len32; 566 const u32 *buffer = (const u32 *)buf; 567 568 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", 569 ctx->digcnt[1], ctx->digcnt[0], length, final); 570 571 atmel_sha_write_ctrl(dd, 0); 572 573 /* should be non-zero before next lines to disable clocks later */ 574 ctx->digcnt[0] += length; 575 if (ctx->digcnt[0] < length) 576 ctx->digcnt[1]++; 577 578 if (final) 579 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 580 581 len32 = DIV_ROUND_UP(length, sizeof(u32)); 582 583 dd->flags |= SHA_FLAGS_CPU; 584 585 for (count = 0; count < len32; count++) 586 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); 587 588 return -EINPROGRESS; 589 } 590 591 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 592 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 593 { 594 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 595 int len32; 596 597 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", 598 ctx->digcnt[1], ctx->digcnt[0], length1, final); 599 600 len32 = DIV_ROUND_UP(length1, sizeof(u32)); 601 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); 602 atmel_sha_write(dd, SHA_TPR, dma_addr1); 603 atmel_sha_write(dd, SHA_TCR, len32); 604 605 len32 = DIV_ROUND_UP(length2, sizeof(u32)); 606 atmel_sha_write(dd, SHA_TNPR, dma_addr2); 607 atmel_sha_write(dd, SHA_TNCR, len32); 608 609 atmel_sha_write_ctrl(dd, 1); 610 611 /* should be non-zero before next lines to disable clocks later */ 612 ctx->digcnt[0] += length1; 613 if (ctx->digcnt[0] < length1) 614 ctx->digcnt[1]++; 615 616 if (final) 617 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 618 619 dd->flags |= SHA_FLAGS_DMA_ACTIVE; 620 621 /* Start DMA transfer */ 622 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); 623 624 return -EINPROGRESS; 625 } 626 627 static void atmel_sha_dma_callback(void *data) 628 { 629 struct atmel_sha_dev *dd = data; 630 631 dd->is_async = true; 632 633 /* dma_lch_in - completed - wait DATRDY */ 634 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 635 } 636 637 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 638 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 639 { 640 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 641 struct dma_async_tx_descriptor *in_desc; 642 struct scatterlist sg[2]; 643 644 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", 645 ctx->digcnt[1], ctx->digcnt[0], length1, final); 646 647 dd->dma_lch_in.dma_conf.src_maxburst = 16; 648 dd->dma_lch_in.dma_conf.dst_maxburst = 16; 649 650 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); 651 652 if (length2) { 653 sg_init_table(sg, 2); 654 sg_dma_address(&sg[0]) = dma_addr1; 655 sg_dma_len(&sg[0]) = length1; 656 sg_dma_address(&sg[1]) = dma_addr2; 657 sg_dma_len(&sg[1]) = length2; 658 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, 659 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 660 } else { 661 sg_init_table(sg, 1); 662 sg_dma_address(&sg[0]) = dma_addr1; 663 sg_dma_len(&sg[0]) = length1; 664 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, 665 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 666 } 667 if (!in_desc) 668 return atmel_sha_complete(dd, -EINVAL); 669 670 in_desc->callback = atmel_sha_dma_callback; 671 in_desc->callback_param = dd; 672 673 atmel_sha_write_ctrl(dd, 1); 674 675 /* should be non-zero before next lines to disable clocks later */ 676 ctx->digcnt[0] += length1; 677 if (ctx->digcnt[0] < length1) 678 ctx->digcnt[1]++; 679 680 if (final) 681 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 682 683 dd->flags |= SHA_FLAGS_DMA_ACTIVE; 684 685 /* Start DMA transfer */ 686 dmaengine_submit(in_desc); 687 dma_async_issue_pending(dd->dma_lch_in.chan); 688 689 return -EINPROGRESS; 690 } 691 692 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 693 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 694 { 695 if (dd->caps.has_dma) 696 return atmel_sha_xmit_dma(dd, dma_addr1, length1, 697 dma_addr2, length2, final); 698 else 699 return atmel_sha_xmit_pdc(dd, dma_addr1, length1, 700 dma_addr2, length2, final); 701 } 702 703 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) 704 { 705 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 706 int bufcnt; 707 708 atmel_sha_append_sg(ctx); 709 atmel_sha_fill_padding(ctx, 0); 710 bufcnt = ctx->bufcnt; 711 ctx->bufcnt = 0; 712 713 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); 714 } 715 716 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, 717 struct atmel_sha_reqctx *ctx, 718 size_t length, int final) 719 { 720 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, 721 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 722 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 723 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen + 724 ctx->block_size); 725 return atmel_sha_complete(dd, -EINVAL); 726 } 727 728 ctx->flags &= ~SHA_FLAGS_SG; 729 730 /* next call does not fail... so no unmap in the case of error */ 731 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); 732 } 733 734 static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) 735 { 736 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 737 unsigned int final; 738 size_t count; 739 740 atmel_sha_append_sg(ctx); 741 742 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 743 744 dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n", 745 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); 746 747 if (final) 748 atmel_sha_fill_padding(ctx, 0); 749 750 if (final || (ctx->bufcnt == ctx->buflen)) { 751 count = ctx->bufcnt; 752 ctx->bufcnt = 0; 753 return atmel_sha_xmit_dma_map(dd, ctx, count, final); 754 } 755 756 return 0; 757 } 758 759 static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) 760 { 761 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 762 unsigned int length, final, tail; 763 struct scatterlist *sg; 764 unsigned int count; 765 766 if (!ctx->total) 767 return 0; 768 769 if (ctx->bufcnt || ctx->offset) 770 return atmel_sha_update_dma_slow(dd); 771 772 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n", 773 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); 774 775 sg = ctx->sg; 776 777 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 778 return atmel_sha_update_dma_slow(dd); 779 780 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) 781 /* size is not ctx->block_size aligned */ 782 return atmel_sha_update_dma_slow(dd); 783 784 length = min(ctx->total, sg->length); 785 786 if (sg_is_last(sg)) { 787 if (!(ctx->flags & SHA_FLAGS_FINUP)) { 788 /* not last sg must be ctx->block_size aligned */ 789 tail = length & (ctx->block_size - 1); 790 length -= tail; 791 } 792 } 793 794 ctx->total -= length; 795 ctx->offset = length; /* offset where to start slow */ 796 797 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 798 799 /* Add padding */ 800 if (final) { 801 tail = length & (ctx->block_size - 1); 802 length -= tail; 803 ctx->total += tail; 804 ctx->offset = length; /* offset where to start slow */ 805 806 sg = ctx->sg; 807 atmel_sha_append_sg(ctx); 808 809 atmel_sha_fill_padding(ctx, length); 810 811 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, 812 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 813 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 814 dev_err(dd->dev, "dma %zu bytes error\n", 815 ctx->buflen + ctx->block_size); 816 return atmel_sha_complete(dd, -EINVAL); 817 } 818 819 if (length == 0) { 820 ctx->flags &= ~SHA_FLAGS_SG; 821 count = ctx->bufcnt; 822 ctx->bufcnt = 0; 823 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, 824 0, final); 825 } else { 826 ctx->sg = sg; 827 if (!dma_map_sg(dd->dev, ctx->sg, 1, 828 DMA_TO_DEVICE)) { 829 dev_err(dd->dev, "dma_map_sg error\n"); 830 return atmel_sha_complete(dd, -EINVAL); 831 } 832 833 ctx->flags |= SHA_FLAGS_SG; 834 835 count = ctx->bufcnt; 836 ctx->bufcnt = 0; 837 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), 838 length, ctx->dma_addr, count, final); 839 } 840 } 841 842 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 843 dev_err(dd->dev, "dma_map_sg error\n"); 844 return atmel_sha_complete(dd, -EINVAL); 845 } 846 847 ctx->flags |= SHA_FLAGS_SG; 848 849 /* next call does not fail... so no unmap in the case of error */ 850 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, 851 0, final); 852 } 853 854 static void atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) 855 { 856 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 857 858 if (ctx->flags & SHA_FLAGS_SG) { 859 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 860 if (ctx->sg->length == ctx->offset) { 861 ctx->sg = sg_next(ctx->sg); 862 if (ctx->sg) 863 ctx->offset = 0; 864 } 865 if (ctx->flags & SHA_FLAGS_PAD) { 866 dma_unmap_single(dd->dev, ctx->dma_addr, 867 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 868 } 869 } else { 870 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + 871 ctx->block_size, DMA_TO_DEVICE); 872 } 873 } 874 875 static int atmel_sha_update_req(struct atmel_sha_dev *dd) 876 { 877 struct ahash_request *req = dd->req; 878 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 879 int err; 880 881 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", 882 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); 883 884 if (ctx->flags & SHA_FLAGS_CPU) 885 err = atmel_sha_update_cpu(dd); 886 else 887 err = atmel_sha_update_dma_start(dd); 888 889 /* wait for dma completion before can take more data */ 890 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", 891 err, ctx->digcnt[1], ctx->digcnt[0]); 892 893 return err; 894 } 895 896 static int atmel_sha_final_req(struct atmel_sha_dev *dd) 897 { 898 struct ahash_request *req = dd->req; 899 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 900 int err = 0; 901 int count; 902 903 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { 904 atmel_sha_fill_padding(ctx, 0); 905 count = ctx->bufcnt; 906 ctx->bufcnt = 0; 907 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); 908 } 909 /* faster to handle last block with cpu */ 910 else { 911 atmel_sha_fill_padding(ctx, 0); 912 count = ctx->bufcnt; 913 ctx->bufcnt = 0; 914 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); 915 } 916 917 dev_dbg(dd->dev, "final_req: err: %d\n", err); 918 919 return err; 920 } 921 922 static void atmel_sha_copy_hash(struct ahash_request *req) 923 { 924 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 925 u32 *hash = (u32 *)ctx->digest; 926 unsigned int i, hashsize; 927 928 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 929 case SHA_FLAGS_SHA1: 930 hashsize = SHA1_DIGEST_SIZE; 931 break; 932 933 case SHA_FLAGS_SHA224: 934 case SHA_FLAGS_SHA256: 935 hashsize = SHA256_DIGEST_SIZE; 936 break; 937 938 case SHA_FLAGS_SHA384: 939 case SHA_FLAGS_SHA512: 940 hashsize = SHA512_DIGEST_SIZE; 941 break; 942 943 default: 944 /* Should not happen... */ 945 return; 946 } 947 948 for (i = 0; i < hashsize / sizeof(u32); ++i) 949 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); 950 ctx->flags |= SHA_FLAGS_RESTORE; 951 } 952 953 static void atmel_sha_copy_ready_hash(struct ahash_request *req) 954 { 955 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 956 957 if (!req->result) 958 return; 959 960 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 961 default: 962 case SHA_FLAGS_SHA1: 963 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); 964 break; 965 966 case SHA_FLAGS_SHA224: 967 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); 968 break; 969 970 case SHA_FLAGS_SHA256: 971 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); 972 break; 973 974 case SHA_FLAGS_SHA384: 975 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); 976 break; 977 978 case SHA_FLAGS_SHA512: 979 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); 980 break; 981 } 982 } 983 984 static int atmel_sha_finish(struct ahash_request *req) 985 { 986 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 987 struct atmel_sha_dev *dd = ctx->dd; 988 989 if (ctx->digcnt[0] || ctx->digcnt[1]) 990 atmel_sha_copy_ready_hash(req); 991 992 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1], 993 ctx->digcnt[0], ctx->bufcnt); 994 995 return 0; 996 } 997 998 static void atmel_sha_finish_req(struct ahash_request *req, int err) 999 { 1000 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1001 struct atmel_sha_dev *dd = ctx->dd; 1002 1003 if (!err) { 1004 atmel_sha_copy_hash(req); 1005 if (SHA_FLAGS_FINAL & dd->flags) 1006 err = atmel_sha_finish(req); 1007 } else { 1008 ctx->flags |= SHA_FLAGS_ERROR; 1009 } 1010 1011 /* atomic operation is not needed here */ 1012 (void)atmel_sha_complete(dd, err); 1013 } 1014 1015 static int atmel_sha_hw_init(struct atmel_sha_dev *dd) 1016 { 1017 int err; 1018 1019 err = clk_enable(dd->iclk); 1020 if (err) 1021 return err; 1022 1023 if (!(SHA_FLAGS_INIT & dd->flags)) { 1024 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); 1025 dd->flags |= SHA_FLAGS_INIT; 1026 } 1027 1028 return 0; 1029 } 1030 1031 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) 1032 { 1033 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; 1034 } 1035 1036 static int atmel_sha_hw_version_init(struct atmel_sha_dev *dd) 1037 { 1038 int err; 1039 1040 err = atmel_sha_hw_init(dd); 1041 if (err) 1042 return err; 1043 1044 dd->hw_version = atmel_sha_get_version(dd); 1045 1046 dev_info(dd->dev, 1047 "version: 0x%x\n", dd->hw_version); 1048 1049 clk_disable(dd->iclk); 1050 1051 return 0; 1052 } 1053 1054 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, 1055 struct ahash_request *req) 1056 { 1057 struct crypto_async_request *async_req, *backlog; 1058 struct atmel_sha_ctx *ctx; 1059 unsigned long flags; 1060 bool start_async; 1061 int err = 0, ret = 0; 1062 1063 spin_lock_irqsave(&dd->lock, flags); 1064 if (req) 1065 ret = ahash_enqueue_request(&dd->queue, req); 1066 1067 if (SHA_FLAGS_BUSY & dd->flags) { 1068 spin_unlock_irqrestore(&dd->lock, flags); 1069 return ret; 1070 } 1071 1072 backlog = crypto_get_backlog(&dd->queue); 1073 async_req = crypto_dequeue_request(&dd->queue); 1074 if (async_req) 1075 dd->flags |= SHA_FLAGS_BUSY; 1076 1077 spin_unlock_irqrestore(&dd->lock, flags); 1078 1079 if (!async_req) 1080 return ret; 1081 1082 if (backlog) 1083 crypto_request_complete(backlog, -EINPROGRESS); 1084 1085 ctx = crypto_tfm_ctx(async_req->tfm); 1086 1087 dd->req = ahash_request_cast(async_req); 1088 start_async = (dd->req != req); 1089 dd->is_async = start_async; 1090 dd->force_complete = false; 1091 1092 /* WARNING: ctx->start() MAY change dd->is_async. */ 1093 err = ctx->start(dd); 1094 return (start_async) ? ret : err; 1095 } 1096 1097 static int atmel_sha_done(struct atmel_sha_dev *dd); 1098 1099 static int atmel_sha_start(struct atmel_sha_dev *dd) 1100 { 1101 struct ahash_request *req = dd->req; 1102 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1103 int err; 1104 1105 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n", 1106 ctx->op, req->nbytes); 1107 1108 err = atmel_sha_hw_init(dd); 1109 if (err) 1110 return atmel_sha_complete(dd, err); 1111 1112 /* 1113 * atmel_sha_update_req() and atmel_sha_final_req() can return either: 1114 * -EINPROGRESS: the hardware is busy and the SHA driver will resume 1115 * its job later in the done_task. 1116 * This is the main path. 1117 * 1118 * 0: the SHA driver can continue its job then release the hardware 1119 * later, if needed, with atmel_sha_finish_req(). 1120 * This is the alternate path. 1121 * 1122 * < 0: an error has occurred so atmel_sha_complete(dd, err) has already 1123 * been called, hence the hardware has been released. 1124 * The SHA driver must stop its job without calling 1125 * atmel_sha_finish_req(), otherwise atmel_sha_complete() would be 1126 * called a second time. 1127 * 1128 * Please note that currently, atmel_sha_final_req() never returns 0. 1129 */ 1130 1131 dd->resume = atmel_sha_done; 1132 if (ctx->op == SHA_OP_UPDATE) { 1133 err = atmel_sha_update_req(dd); 1134 if (!err && (ctx->flags & SHA_FLAGS_FINUP)) 1135 /* no final() after finup() */ 1136 err = atmel_sha_final_req(dd); 1137 } else if (ctx->op == SHA_OP_FINAL) { 1138 err = atmel_sha_final_req(dd); 1139 } 1140 1141 if (!err) 1142 /* done_task will not finish it, so do it here */ 1143 atmel_sha_finish_req(req, err); 1144 1145 dev_dbg(dd->dev, "exit, err: %d\n", err); 1146 1147 return err; 1148 } 1149 1150 static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) 1151 { 1152 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1153 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 1154 struct atmel_sha_dev *dd = tctx->dd; 1155 1156 ctx->op = op; 1157 1158 return atmel_sha_handle_queue(dd, req); 1159 } 1160 1161 static int atmel_sha_update(struct ahash_request *req) 1162 { 1163 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1164 1165 if (!req->nbytes) 1166 return 0; 1167 1168 ctx->total = req->nbytes; 1169 ctx->sg = req->src; 1170 ctx->offset = 0; 1171 1172 if (ctx->flags & SHA_FLAGS_FINUP) { 1173 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) 1174 /* faster to use CPU for short transfers */ 1175 ctx->flags |= SHA_FLAGS_CPU; 1176 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { 1177 atmel_sha_append_sg(ctx); 1178 return 0; 1179 } 1180 return atmel_sha_enqueue(req, SHA_OP_UPDATE); 1181 } 1182 1183 static int atmel_sha_final(struct ahash_request *req) 1184 { 1185 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1186 1187 ctx->flags |= SHA_FLAGS_FINUP; 1188 1189 if (ctx->flags & SHA_FLAGS_ERROR) 1190 return 0; /* uncompleted hash is not needed */ 1191 1192 if (ctx->flags & SHA_FLAGS_PAD) 1193 /* copy ready hash (+ finalize hmac) */ 1194 return atmel_sha_finish(req); 1195 1196 return atmel_sha_enqueue(req, SHA_OP_FINAL); 1197 } 1198 1199 static int atmel_sha_finup(struct ahash_request *req) 1200 { 1201 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1202 int err1, err2; 1203 1204 ctx->flags |= SHA_FLAGS_FINUP; 1205 1206 err1 = atmel_sha_update(req); 1207 if (err1 == -EINPROGRESS || 1208 (err1 == -EBUSY && (ahash_request_flags(req) & 1209 CRYPTO_TFM_REQ_MAY_BACKLOG))) 1210 return err1; 1211 1212 /* 1213 * final() has to be always called to cleanup resources 1214 * even if udpate() failed, except EINPROGRESS 1215 */ 1216 err2 = atmel_sha_final(req); 1217 1218 return err1 ?: err2; 1219 } 1220 1221 static int atmel_sha_digest(struct ahash_request *req) 1222 { 1223 return atmel_sha_init(req) ?: atmel_sha_finup(req); 1224 } 1225 1226 1227 static int atmel_sha_export(struct ahash_request *req, void *out) 1228 { 1229 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1230 1231 memcpy(out, ctx, sizeof(*ctx)); 1232 return 0; 1233 } 1234 1235 static int atmel_sha_import(struct ahash_request *req, const void *in) 1236 { 1237 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1238 1239 memcpy(ctx, in, sizeof(*ctx)); 1240 return 0; 1241 } 1242 1243 static int atmel_sha_cra_init(struct crypto_tfm *tfm) 1244 { 1245 struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm); 1246 1247 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1248 sizeof(struct atmel_sha_reqctx)); 1249 ctx->start = atmel_sha_start; 1250 1251 return 0; 1252 } 1253 1254 static void atmel_sha_alg_init(struct ahash_alg *alg) 1255 { 1256 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; 1257 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; 1258 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx); 1259 alg->halg.base.cra_module = THIS_MODULE; 1260 alg->halg.base.cra_init = atmel_sha_cra_init; 1261 1262 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); 1263 1264 alg->init = atmel_sha_init; 1265 alg->update = atmel_sha_update; 1266 alg->final = atmel_sha_final; 1267 alg->finup = atmel_sha_finup; 1268 alg->digest = atmel_sha_digest; 1269 alg->export = atmel_sha_export; 1270 alg->import = atmel_sha_import; 1271 } 1272 1273 static struct ahash_alg sha_1_256_algs[] = { 1274 { 1275 .halg.base.cra_name = "sha1", 1276 .halg.base.cra_driver_name = "atmel-sha1", 1277 .halg.base.cra_blocksize = SHA1_BLOCK_SIZE, 1278 1279 .halg.digestsize = SHA1_DIGEST_SIZE, 1280 }, 1281 { 1282 .halg.base.cra_name = "sha256", 1283 .halg.base.cra_driver_name = "atmel-sha256", 1284 .halg.base.cra_blocksize = SHA256_BLOCK_SIZE, 1285 1286 .halg.digestsize = SHA256_DIGEST_SIZE, 1287 }, 1288 }; 1289 1290 static struct ahash_alg sha_224_alg = { 1291 .halg.base.cra_name = "sha224", 1292 .halg.base.cra_driver_name = "atmel-sha224", 1293 .halg.base.cra_blocksize = SHA224_BLOCK_SIZE, 1294 1295 .halg.digestsize = SHA224_DIGEST_SIZE, 1296 }; 1297 1298 static struct ahash_alg sha_384_512_algs[] = { 1299 { 1300 .halg.base.cra_name = "sha384", 1301 .halg.base.cra_driver_name = "atmel-sha384", 1302 .halg.base.cra_blocksize = SHA384_BLOCK_SIZE, 1303 .halg.base.cra_alignmask = 0x3, 1304 1305 .halg.digestsize = SHA384_DIGEST_SIZE, 1306 }, 1307 { 1308 .halg.base.cra_name = "sha512", 1309 .halg.base.cra_driver_name = "atmel-sha512", 1310 .halg.base.cra_blocksize = SHA512_BLOCK_SIZE, 1311 .halg.base.cra_alignmask = 0x3, 1312 1313 .halg.digestsize = SHA512_DIGEST_SIZE, 1314 }, 1315 }; 1316 1317 static void atmel_sha_queue_task(unsigned long data) 1318 { 1319 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; 1320 1321 atmel_sha_handle_queue(dd, NULL); 1322 } 1323 1324 static int atmel_sha_done(struct atmel_sha_dev *dd) 1325 { 1326 int err = 0; 1327 1328 if (SHA_FLAGS_CPU & dd->flags) { 1329 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { 1330 dd->flags &= ~SHA_FLAGS_OUTPUT_READY; 1331 goto finish; 1332 } 1333 } else if (SHA_FLAGS_DMA_READY & dd->flags) { 1334 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { 1335 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; 1336 atmel_sha_update_dma_stop(dd); 1337 } 1338 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { 1339 /* hash or semi-hash ready */ 1340 dd->flags &= ~(SHA_FLAGS_DMA_READY | 1341 SHA_FLAGS_OUTPUT_READY); 1342 err = atmel_sha_update_dma_start(dd); 1343 if (err != -EINPROGRESS) 1344 goto finish; 1345 } 1346 } 1347 return err; 1348 1349 finish: 1350 /* finish curent request */ 1351 atmel_sha_finish_req(dd->req, err); 1352 1353 return err; 1354 } 1355 1356 static void atmel_sha_done_task(unsigned long data) 1357 { 1358 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; 1359 1360 dd->is_async = true; 1361 (void)dd->resume(dd); 1362 } 1363 1364 static irqreturn_t atmel_sha_irq(int irq, void *dev_id) 1365 { 1366 struct atmel_sha_dev *sha_dd = dev_id; 1367 u32 reg; 1368 1369 reg = atmel_sha_read(sha_dd, SHA_ISR); 1370 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { 1371 atmel_sha_write(sha_dd, SHA_IDR, reg); 1372 if (SHA_FLAGS_BUSY & sha_dd->flags) { 1373 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; 1374 if (!(SHA_FLAGS_CPU & sha_dd->flags)) 1375 sha_dd->flags |= SHA_FLAGS_DMA_READY; 1376 tasklet_schedule(&sha_dd->done_task); 1377 } else { 1378 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); 1379 } 1380 return IRQ_HANDLED; 1381 } 1382 1383 return IRQ_NONE; 1384 } 1385 1386 1387 /* DMA transfer functions */ 1388 1389 static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd, 1390 struct scatterlist *sg, 1391 size_t len) 1392 { 1393 struct atmel_sha_dma *dma = &dd->dma_lch_in; 1394 struct ahash_request *req = dd->req; 1395 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1396 size_t bs = ctx->block_size; 1397 int nents; 1398 1399 for (nents = 0; sg; sg = sg_next(sg), ++nents) { 1400 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 1401 return false; 1402 1403 /* 1404 * This is the last sg, the only one that is allowed to 1405 * have an unaligned length. 1406 */ 1407 if (len <= sg->length) { 1408 dma->nents = nents + 1; 1409 dma->last_sg_length = sg->length; 1410 sg->length = ALIGN(len, sizeof(u32)); 1411 return true; 1412 } 1413 1414 /* All other sg lengths MUST be aligned to the block size. */ 1415 if (!IS_ALIGNED(sg->length, bs)) 1416 return false; 1417 1418 len -= sg->length; 1419 } 1420 1421 return false; 1422 } 1423 1424 static void atmel_sha_dma_callback2(void *data) 1425 { 1426 struct atmel_sha_dev *dd = data; 1427 struct atmel_sha_dma *dma = &dd->dma_lch_in; 1428 struct scatterlist *sg; 1429 int nents; 1430 1431 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); 1432 1433 sg = dma->sg; 1434 for (nents = 0; nents < dma->nents - 1; ++nents) 1435 sg = sg_next(sg); 1436 sg->length = dma->last_sg_length; 1437 1438 dd->is_async = true; 1439 (void)atmel_sha_wait_for_data_ready(dd, dd->resume); 1440 } 1441 1442 static int atmel_sha_dma_start(struct atmel_sha_dev *dd, 1443 struct scatterlist *src, 1444 size_t len, 1445 atmel_sha_fn_t resume) 1446 { 1447 struct atmel_sha_dma *dma = &dd->dma_lch_in; 1448 struct dma_slave_config *config = &dma->dma_conf; 1449 struct dma_chan *chan = dma->chan; 1450 struct dma_async_tx_descriptor *desc; 1451 dma_cookie_t cookie; 1452 unsigned int sg_len; 1453 int err; 1454 1455 dd->resume = resume; 1456 1457 /* 1458 * dma->nents has already been initialized by 1459 * atmel_sha_dma_check_aligned(). 1460 */ 1461 dma->sg = src; 1462 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); 1463 if (!sg_len) { 1464 err = -ENOMEM; 1465 goto exit; 1466 } 1467 1468 config->src_maxburst = 16; 1469 config->dst_maxburst = 16; 1470 err = dmaengine_slave_config(chan, config); 1471 if (err) 1472 goto unmap_sg; 1473 1474 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV, 1475 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1476 if (!desc) { 1477 err = -ENOMEM; 1478 goto unmap_sg; 1479 } 1480 1481 desc->callback = atmel_sha_dma_callback2; 1482 desc->callback_param = dd; 1483 cookie = dmaengine_submit(desc); 1484 err = dma_submit_error(cookie); 1485 if (err) 1486 goto unmap_sg; 1487 1488 dma_async_issue_pending(chan); 1489 1490 return -EINPROGRESS; 1491 1492 unmap_sg: 1493 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); 1494 exit: 1495 return atmel_sha_complete(dd, err); 1496 } 1497 1498 1499 /* CPU transfer functions */ 1500 1501 static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd) 1502 { 1503 struct ahash_request *req = dd->req; 1504 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1505 const u32 *words = (const u32 *)ctx->buffer; 1506 size_t i, num_words; 1507 u32 isr, din, din_inc; 1508 1509 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1; 1510 for (;;) { 1511 /* Write data into the Input Data Registers. */ 1512 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32)); 1513 for (i = 0, din = 0; i < num_words; ++i, din += din_inc) 1514 atmel_sha_write(dd, SHA_REG_DIN(din), words[i]); 1515 1516 ctx->offset += ctx->bufcnt; 1517 ctx->total -= ctx->bufcnt; 1518 1519 if (!ctx->total) 1520 break; 1521 1522 /* 1523 * Prepare next block: 1524 * Fill ctx->buffer now with the next data to be written into 1525 * IDATARx: it gives time for the SHA hardware to process 1526 * the current data so the SHA_INT_DATARDY flag might be set 1527 * in SHA_ISR when polling this register at the beginning of 1528 * the next loop. 1529 */ 1530 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); 1531 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, 1532 ctx->offset, ctx->bufcnt, 0); 1533 1534 /* Wait for hardware to be ready again. */ 1535 isr = atmel_sha_read(dd, SHA_ISR); 1536 if (!(isr & SHA_INT_DATARDY)) { 1537 /* Not ready yet. */ 1538 dd->resume = atmel_sha_cpu_transfer; 1539 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 1540 return -EINPROGRESS; 1541 } 1542 } 1543 1544 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY))) 1545 return dd->cpu_transfer_complete(dd); 1546 1547 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete); 1548 } 1549 1550 static int atmel_sha_cpu_start(struct atmel_sha_dev *dd, 1551 struct scatterlist *sg, 1552 unsigned int len, 1553 bool idatar0_only, 1554 bool wait_data_ready, 1555 atmel_sha_fn_t resume) 1556 { 1557 struct ahash_request *req = dd->req; 1558 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1559 1560 if (!len) 1561 return resume(dd); 1562 1563 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY); 1564 1565 if (idatar0_only) 1566 ctx->flags |= SHA_FLAGS_IDATAR0; 1567 1568 if (wait_data_ready) 1569 ctx->flags |= SHA_FLAGS_WAIT_DATARDY; 1570 1571 ctx->sg = sg; 1572 ctx->total = len; 1573 ctx->offset = 0; 1574 1575 /* Prepare the first block to be written. */ 1576 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); 1577 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, 1578 ctx->offset, ctx->bufcnt, 0); 1579 1580 dd->cpu_transfer_complete = resume; 1581 return atmel_sha_cpu_transfer(dd); 1582 } 1583 1584 static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd, 1585 const void *data, unsigned int datalen, 1586 bool auto_padding, 1587 atmel_sha_fn_t resume) 1588 { 1589 struct ahash_request *req = dd->req; 1590 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1591 u32 msglen = (auto_padding) ? datalen : 0; 1592 u32 mr = SHA_MR_MODE_AUTO; 1593 1594 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding)) 1595 return atmel_sha_complete(dd, -EINVAL); 1596 1597 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); 1598 atmel_sha_write(dd, SHA_MR, mr); 1599 atmel_sha_write(dd, SHA_MSR, msglen); 1600 atmel_sha_write(dd, SHA_BCR, msglen); 1601 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 1602 1603 sg_init_one(&dd->tmp, data, datalen); 1604 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume); 1605 } 1606 1607 1608 /* hmac functions */ 1609 1610 struct atmel_sha_hmac_key { 1611 bool valid; 1612 unsigned int keylen; 1613 u8 buffer[SHA512_BLOCK_SIZE]; 1614 u8 *keydup; 1615 }; 1616 1617 static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey) 1618 { 1619 memset(hkey, 0, sizeof(*hkey)); 1620 } 1621 1622 static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey) 1623 { 1624 kfree(hkey->keydup); 1625 memset(hkey, 0, sizeof(*hkey)); 1626 } 1627 1628 static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey, 1629 const u8 *key, 1630 unsigned int keylen) 1631 { 1632 atmel_sha_hmac_key_release(hkey); 1633 1634 if (keylen > sizeof(hkey->buffer)) { 1635 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL); 1636 if (!hkey->keydup) 1637 return -ENOMEM; 1638 1639 } else { 1640 memcpy(hkey->buffer, key, keylen); 1641 } 1642 1643 hkey->valid = true; 1644 hkey->keylen = keylen; 1645 return 0; 1646 } 1647 1648 static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey, 1649 const u8 **key, 1650 unsigned int *keylen) 1651 { 1652 if (!hkey->valid) 1653 return false; 1654 1655 *keylen = hkey->keylen; 1656 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer; 1657 return true; 1658 } 1659 1660 1661 struct atmel_sha_hmac_ctx { 1662 struct atmel_sha_ctx base; 1663 1664 struct atmel_sha_hmac_key hkey; 1665 u32 ipad[SHA512_BLOCK_SIZE / sizeof(u32)]; 1666 u32 opad[SHA512_BLOCK_SIZE / sizeof(u32)]; 1667 atmel_sha_fn_t resume; 1668 }; 1669 1670 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd, 1671 atmel_sha_fn_t resume); 1672 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd, 1673 const u8 *key, unsigned int keylen); 1674 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd); 1675 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd); 1676 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd); 1677 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd); 1678 1679 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd); 1680 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd); 1681 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd); 1682 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd); 1683 1684 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd, 1685 atmel_sha_fn_t resume) 1686 { 1687 struct ahash_request *req = dd->req; 1688 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1689 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1690 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1691 unsigned int keylen; 1692 const u8 *key; 1693 size_t bs; 1694 1695 hmac->resume = resume; 1696 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 1697 case SHA_FLAGS_SHA1: 1698 ctx->block_size = SHA1_BLOCK_SIZE; 1699 ctx->hash_size = SHA1_DIGEST_SIZE; 1700 break; 1701 1702 case SHA_FLAGS_SHA224: 1703 ctx->block_size = SHA224_BLOCK_SIZE; 1704 ctx->hash_size = SHA256_DIGEST_SIZE; 1705 break; 1706 1707 case SHA_FLAGS_SHA256: 1708 ctx->block_size = SHA256_BLOCK_SIZE; 1709 ctx->hash_size = SHA256_DIGEST_SIZE; 1710 break; 1711 1712 case SHA_FLAGS_SHA384: 1713 ctx->block_size = SHA384_BLOCK_SIZE; 1714 ctx->hash_size = SHA512_DIGEST_SIZE; 1715 break; 1716 1717 case SHA_FLAGS_SHA512: 1718 ctx->block_size = SHA512_BLOCK_SIZE; 1719 ctx->hash_size = SHA512_DIGEST_SIZE; 1720 break; 1721 1722 default: 1723 return atmel_sha_complete(dd, -EINVAL); 1724 } 1725 bs = ctx->block_size; 1726 1727 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen))) 1728 return resume(dd); 1729 1730 /* Compute K' from K. */ 1731 if (unlikely(keylen > bs)) 1732 return atmel_sha_hmac_prehash_key(dd, key, keylen); 1733 1734 /* Prepare ipad. */ 1735 memcpy((u8 *)hmac->ipad, key, keylen); 1736 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen); 1737 return atmel_sha_hmac_compute_ipad_hash(dd); 1738 } 1739 1740 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd, 1741 const u8 *key, unsigned int keylen) 1742 { 1743 return atmel_sha_cpu_hash(dd, key, keylen, true, 1744 atmel_sha_hmac_prehash_key_done); 1745 } 1746 1747 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd) 1748 { 1749 struct ahash_request *req = dd->req; 1750 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1751 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1752 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1753 size_t ds = crypto_ahash_digestsize(tfm); 1754 size_t bs = ctx->block_size; 1755 size_t i, num_words = ds / sizeof(u32); 1756 1757 /* Prepare ipad. */ 1758 for (i = 0; i < num_words; ++i) 1759 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1760 memset((u8 *)hmac->ipad + ds, 0, bs - ds); 1761 return atmel_sha_hmac_compute_ipad_hash(dd); 1762 } 1763 1764 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd) 1765 { 1766 struct ahash_request *req = dd->req; 1767 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1768 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1769 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1770 size_t bs = ctx->block_size; 1771 size_t i, num_words = bs / sizeof(u32); 1772 1773 unsafe_memcpy(hmac->opad, hmac->ipad, bs, 1774 "fortified memcpy causes -Wrestrict warning"); 1775 for (i = 0; i < num_words; ++i) { 1776 hmac->ipad[i] ^= 0x36363636; 1777 hmac->opad[i] ^= 0x5c5c5c5c; 1778 } 1779 1780 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false, 1781 atmel_sha_hmac_compute_opad_hash); 1782 } 1783 1784 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd) 1785 { 1786 struct ahash_request *req = dd->req; 1787 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1788 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1789 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1790 size_t bs = ctx->block_size; 1791 size_t hs = ctx->hash_size; 1792 size_t i, num_words = hs / sizeof(u32); 1793 1794 for (i = 0; i < num_words; ++i) 1795 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1796 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false, 1797 atmel_sha_hmac_setup_done); 1798 } 1799 1800 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd) 1801 { 1802 struct ahash_request *req = dd->req; 1803 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1804 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1805 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1806 size_t hs = ctx->hash_size; 1807 size_t i, num_words = hs / sizeof(u32); 1808 1809 for (i = 0; i < num_words; ++i) 1810 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1811 atmel_sha_hmac_key_release(&hmac->hkey); 1812 return hmac->resume(dd); 1813 } 1814 1815 static int atmel_sha_hmac_start(struct atmel_sha_dev *dd) 1816 { 1817 struct ahash_request *req = dd->req; 1818 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1819 int err; 1820 1821 err = atmel_sha_hw_init(dd); 1822 if (err) 1823 return atmel_sha_complete(dd, err); 1824 1825 switch (ctx->op) { 1826 case SHA_OP_INIT: 1827 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done); 1828 break; 1829 1830 case SHA_OP_UPDATE: 1831 dd->resume = atmel_sha_done; 1832 err = atmel_sha_update_req(dd); 1833 break; 1834 1835 case SHA_OP_FINAL: 1836 dd->resume = atmel_sha_hmac_final; 1837 err = atmel_sha_final_req(dd); 1838 break; 1839 1840 case SHA_OP_DIGEST: 1841 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2); 1842 break; 1843 1844 default: 1845 return atmel_sha_complete(dd, -EINVAL); 1846 } 1847 1848 return err; 1849 } 1850 1851 static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, 1852 unsigned int keylen) 1853 { 1854 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1855 1856 return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen); 1857 } 1858 1859 static int atmel_sha_hmac_init(struct ahash_request *req) 1860 { 1861 int err; 1862 1863 err = atmel_sha_init(req); 1864 if (err) 1865 return err; 1866 1867 return atmel_sha_enqueue(req, SHA_OP_INIT); 1868 } 1869 1870 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd) 1871 { 1872 struct ahash_request *req = dd->req; 1873 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1874 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1875 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1876 size_t bs = ctx->block_size; 1877 size_t hs = ctx->hash_size; 1878 1879 ctx->bufcnt = 0; 1880 ctx->digcnt[0] = bs; 1881 ctx->digcnt[1] = 0; 1882 ctx->flags |= SHA_FLAGS_RESTORE; 1883 memcpy(ctx->digest, hmac->ipad, hs); 1884 return atmel_sha_complete(dd, 0); 1885 } 1886 1887 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd) 1888 { 1889 struct ahash_request *req = dd->req; 1890 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1891 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1892 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1893 u32 *digest = (u32 *)ctx->digest; 1894 size_t ds = crypto_ahash_digestsize(tfm); 1895 size_t bs = ctx->block_size; 1896 size_t hs = ctx->hash_size; 1897 size_t i, num_words; 1898 u32 mr; 1899 1900 /* Save d = SHA((K' + ipad) | msg). */ 1901 num_words = ds / sizeof(u32); 1902 for (i = 0; i < num_words; ++i) 1903 digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 1904 1905 /* Restore context to finish computing SHA((K' + opad) | d). */ 1906 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 1907 num_words = hs / sizeof(u32); 1908 for (i = 0; i < num_words; ++i) 1909 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); 1910 1911 mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV; 1912 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); 1913 atmel_sha_write(dd, SHA_MR, mr); 1914 atmel_sha_write(dd, SHA_MSR, bs + ds); 1915 atmel_sha_write(dd, SHA_BCR, ds); 1916 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 1917 1918 sg_init_one(&dd->tmp, digest, ds); 1919 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true, 1920 atmel_sha_hmac_final_done); 1921 } 1922 1923 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd) 1924 { 1925 /* 1926 * req->result might not be sizeof(u32) aligned, so copy the 1927 * digest into ctx->digest[] before memcpy() the data into 1928 * req->result. 1929 */ 1930 atmel_sha_copy_hash(dd->req); 1931 atmel_sha_copy_ready_hash(dd->req); 1932 return atmel_sha_complete(dd, 0); 1933 } 1934 1935 static int atmel_sha_hmac_digest(struct ahash_request *req) 1936 { 1937 int err; 1938 1939 err = atmel_sha_init(req); 1940 if (err) 1941 return err; 1942 1943 return atmel_sha_enqueue(req, SHA_OP_DIGEST); 1944 } 1945 1946 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd) 1947 { 1948 struct ahash_request *req = dd->req; 1949 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1950 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1951 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 1952 struct scatterlist *sgbuf; 1953 size_t hs = ctx->hash_size; 1954 size_t i, num_words = hs / sizeof(u32); 1955 bool use_dma = false; 1956 u32 mr; 1957 1958 /* Special case for empty message. */ 1959 if (!req->nbytes) { 1960 req->nbytes = 0; 1961 ctx->bufcnt = 0; 1962 ctx->digcnt[0] = 0; 1963 ctx->digcnt[1] = 0; 1964 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 1965 case SHA_FLAGS_SHA1: 1966 case SHA_FLAGS_SHA224: 1967 case SHA_FLAGS_SHA256: 1968 atmel_sha_fill_padding(ctx, 64); 1969 break; 1970 1971 case SHA_FLAGS_SHA384: 1972 case SHA_FLAGS_SHA512: 1973 atmel_sha_fill_padding(ctx, 128); 1974 break; 1975 } 1976 sg_init_one(&dd->tmp, ctx->buffer, ctx->bufcnt); 1977 } 1978 1979 /* Check DMA threshold and alignment. */ 1980 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD && 1981 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes)) 1982 use_dma = true; 1983 1984 /* Write both initial hash values to compute a HMAC. */ 1985 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 1986 for (i = 0; i < num_words; ++i) 1987 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); 1988 1989 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV); 1990 for (i = 0; i < num_words; ++i) 1991 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); 1992 1993 /* Write the Mode, Message Size, Bytes Count then Control Registers. */ 1994 mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF); 1995 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; 1996 if (use_dma) 1997 mr |= SHA_MR_MODE_IDATAR0; 1998 else 1999 mr |= SHA_MR_MODE_AUTO; 2000 atmel_sha_write(dd, SHA_MR, mr); 2001 2002 atmel_sha_write(dd, SHA_MSR, req->nbytes); 2003 atmel_sha_write(dd, SHA_BCR, req->nbytes); 2004 2005 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 2006 2007 /* Special case for empty message. */ 2008 if (!req->nbytes) { 2009 sgbuf = &dd->tmp; 2010 req->nbytes = ctx->bufcnt; 2011 } else { 2012 sgbuf = req->src; 2013 } 2014 2015 /* Process data. */ 2016 if (use_dma) 2017 return atmel_sha_dma_start(dd, sgbuf, req->nbytes, 2018 atmel_sha_hmac_final_done); 2019 2020 return atmel_sha_cpu_start(dd, sgbuf, req->nbytes, false, true, 2021 atmel_sha_hmac_final_done); 2022 } 2023 2024 static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm) 2025 { 2026 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm); 2027 2028 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2029 sizeof(struct atmel_sha_reqctx)); 2030 hmac->base.start = atmel_sha_hmac_start; 2031 atmel_sha_hmac_key_init(&hmac->hkey); 2032 2033 return 0; 2034 } 2035 2036 static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm) 2037 { 2038 struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm); 2039 2040 atmel_sha_hmac_key_release(&hmac->hkey); 2041 } 2042 2043 static void atmel_sha_hmac_alg_init(struct ahash_alg *alg) 2044 { 2045 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; 2046 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; 2047 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx); 2048 alg->halg.base.cra_module = THIS_MODULE; 2049 alg->halg.base.cra_init = atmel_sha_hmac_cra_init; 2050 alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit; 2051 2052 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); 2053 2054 alg->init = atmel_sha_hmac_init; 2055 alg->update = atmel_sha_update; 2056 alg->final = atmel_sha_final; 2057 alg->digest = atmel_sha_hmac_digest; 2058 alg->setkey = atmel_sha_hmac_setkey; 2059 alg->export = atmel_sha_export; 2060 alg->import = atmel_sha_import; 2061 } 2062 2063 static struct ahash_alg sha_hmac_algs[] = { 2064 { 2065 .halg.base.cra_name = "hmac(sha1)", 2066 .halg.base.cra_driver_name = "atmel-hmac-sha1", 2067 .halg.base.cra_blocksize = SHA1_BLOCK_SIZE, 2068 2069 .halg.digestsize = SHA1_DIGEST_SIZE, 2070 }, 2071 { 2072 .halg.base.cra_name = "hmac(sha224)", 2073 .halg.base.cra_driver_name = "atmel-hmac-sha224", 2074 .halg.base.cra_blocksize = SHA224_BLOCK_SIZE, 2075 2076 .halg.digestsize = SHA224_DIGEST_SIZE, 2077 }, 2078 { 2079 .halg.base.cra_name = "hmac(sha256)", 2080 .halg.base.cra_driver_name = "atmel-hmac-sha256", 2081 .halg.base.cra_blocksize = SHA256_BLOCK_SIZE, 2082 2083 .halg.digestsize = SHA256_DIGEST_SIZE, 2084 }, 2085 { 2086 .halg.base.cra_name = "hmac(sha384)", 2087 .halg.base.cra_driver_name = "atmel-hmac-sha384", 2088 .halg.base.cra_blocksize = SHA384_BLOCK_SIZE, 2089 2090 .halg.digestsize = SHA384_DIGEST_SIZE, 2091 }, 2092 { 2093 .halg.base.cra_name = "hmac(sha512)", 2094 .halg.base.cra_driver_name = "atmel-hmac-sha512", 2095 .halg.base.cra_blocksize = SHA512_BLOCK_SIZE, 2096 2097 .halg.digestsize = SHA512_DIGEST_SIZE, 2098 }, 2099 }; 2100 2101 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 2102 /* authenc functions */ 2103 2104 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd); 2105 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd); 2106 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd); 2107 2108 2109 struct atmel_sha_authenc_ctx { 2110 struct crypto_ahash *tfm; 2111 }; 2112 2113 struct atmel_sha_authenc_reqctx { 2114 struct atmel_sha_reqctx base; 2115 2116 atmel_aes_authenc_fn_t cb; 2117 struct atmel_aes_dev *aes_dev; 2118 2119 /* _init() parameters. */ 2120 struct scatterlist *assoc; 2121 u32 assoclen; 2122 u32 textlen; 2123 2124 /* _final() parameters. */ 2125 u32 *digest; 2126 unsigned int digestlen; 2127 }; 2128 2129 static void atmel_sha_authenc_complete(void *data, int err) 2130 { 2131 struct ahash_request *req = data; 2132 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2133 2134 authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); 2135 } 2136 2137 static int atmel_sha_authenc_start(struct atmel_sha_dev *dd) 2138 { 2139 struct ahash_request *req = dd->req; 2140 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2141 int err; 2142 2143 /* 2144 * Force atmel_sha_complete() to call req->base.complete(), ie 2145 * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). 2146 */ 2147 dd->force_complete = true; 2148 2149 err = atmel_sha_hw_init(dd); 2150 return authctx->cb(authctx->aes_dev, err, dd->is_async); 2151 } 2152 2153 bool atmel_sha_authenc_is_ready(void) 2154 { 2155 struct atmel_sha_ctx dummy; 2156 2157 dummy.dd = NULL; 2158 return (atmel_sha_find_dev(&dummy) != NULL); 2159 } 2160 EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready); 2161 2162 unsigned int atmel_sha_authenc_get_reqsize(void) 2163 { 2164 return sizeof(struct atmel_sha_authenc_reqctx); 2165 } 2166 EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize); 2167 2168 struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode) 2169 { 2170 struct atmel_sha_authenc_ctx *auth; 2171 struct crypto_ahash *tfm; 2172 struct atmel_sha_ctx *tctx; 2173 const char *name; 2174 int err = -EINVAL; 2175 2176 switch (mode & SHA_FLAGS_MODE_MASK) { 2177 case SHA_FLAGS_HMAC_SHA1: 2178 name = "atmel-hmac-sha1"; 2179 break; 2180 2181 case SHA_FLAGS_HMAC_SHA224: 2182 name = "atmel-hmac-sha224"; 2183 break; 2184 2185 case SHA_FLAGS_HMAC_SHA256: 2186 name = "atmel-hmac-sha256"; 2187 break; 2188 2189 case SHA_FLAGS_HMAC_SHA384: 2190 name = "atmel-hmac-sha384"; 2191 break; 2192 2193 case SHA_FLAGS_HMAC_SHA512: 2194 name = "atmel-hmac-sha512"; 2195 break; 2196 2197 default: 2198 goto error; 2199 } 2200 2201 tfm = crypto_alloc_ahash(name, 0, 0); 2202 if (IS_ERR(tfm)) { 2203 err = PTR_ERR(tfm); 2204 goto error; 2205 } 2206 tctx = crypto_ahash_ctx(tfm); 2207 tctx->start = atmel_sha_authenc_start; 2208 tctx->flags = mode; 2209 2210 auth = kzalloc(sizeof(*auth), GFP_KERNEL); 2211 if (!auth) { 2212 err = -ENOMEM; 2213 goto err_free_ahash; 2214 } 2215 auth->tfm = tfm; 2216 2217 return auth; 2218 2219 err_free_ahash: 2220 crypto_free_ahash(tfm); 2221 error: 2222 return ERR_PTR(err); 2223 } 2224 EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn); 2225 2226 void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth) 2227 { 2228 if (auth) 2229 crypto_free_ahash(auth->tfm); 2230 kfree(auth); 2231 } 2232 EXPORT_SYMBOL_GPL(atmel_sha_authenc_free); 2233 2234 int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, 2235 const u8 *key, unsigned int keylen, u32 flags) 2236 { 2237 struct crypto_ahash *tfm = auth->tfm; 2238 2239 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); 2240 crypto_ahash_set_flags(tfm, flags & CRYPTO_TFM_REQ_MASK); 2241 return crypto_ahash_setkey(tfm, key, keylen); 2242 } 2243 EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey); 2244 2245 int atmel_sha_authenc_schedule(struct ahash_request *req, 2246 struct atmel_sha_authenc_ctx *auth, 2247 atmel_aes_authenc_fn_t cb, 2248 struct atmel_aes_dev *aes_dev) 2249 { 2250 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2251 struct atmel_sha_reqctx *ctx = &authctx->base; 2252 struct crypto_ahash *tfm = auth->tfm; 2253 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); 2254 struct atmel_sha_dev *dd; 2255 2256 /* Reset request context (MUST be done first). */ 2257 memset(authctx, 0, sizeof(*authctx)); 2258 2259 /* Get SHA device. */ 2260 dd = atmel_sha_find_dev(tctx); 2261 if (!dd) 2262 return cb(aes_dev, -ENODEV, false); 2263 2264 /* Init request context. */ 2265 ctx->dd = dd; 2266 ctx->buflen = SHA_BUFFER_LEN; 2267 authctx->cb = cb; 2268 authctx->aes_dev = aes_dev; 2269 ahash_request_set_tfm(req, tfm); 2270 ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req); 2271 2272 return atmel_sha_handle_queue(dd, req); 2273 } 2274 EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule); 2275 2276 int atmel_sha_authenc_init(struct ahash_request *req, 2277 struct scatterlist *assoc, unsigned int assoclen, 2278 unsigned int textlen, 2279 atmel_aes_authenc_fn_t cb, 2280 struct atmel_aes_dev *aes_dev) 2281 { 2282 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2283 struct atmel_sha_reqctx *ctx = &authctx->base; 2284 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2285 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 2286 struct atmel_sha_dev *dd = ctx->dd; 2287 2288 if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32)))) 2289 return atmel_sha_complete(dd, -EINVAL); 2290 2291 authctx->cb = cb; 2292 authctx->aes_dev = aes_dev; 2293 authctx->assoc = assoc; 2294 authctx->assoclen = assoclen; 2295 authctx->textlen = textlen; 2296 2297 ctx->flags = hmac->base.flags; 2298 return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2); 2299 } 2300 EXPORT_SYMBOL_GPL(atmel_sha_authenc_init); 2301 2302 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd) 2303 { 2304 struct ahash_request *req = dd->req; 2305 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2306 struct atmel_sha_reqctx *ctx = &authctx->base; 2307 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2308 struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); 2309 size_t hs = ctx->hash_size; 2310 size_t i, num_words = hs / sizeof(u32); 2311 u32 mr, msg_size; 2312 2313 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 2314 for (i = 0; i < num_words; ++i) 2315 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); 2316 2317 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV); 2318 for (i = 0; i < num_words; ++i) 2319 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); 2320 2321 mr = (SHA_MR_MODE_IDATAR0 | 2322 SHA_MR_HMAC | 2323 SHA_MR_DUALBUFF); 2324 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; 2325 atmel_sha_write(dd, SHA_MR, mr); 2326 2327 msg_size = authctx->assoclen + authctx->textlen; 2328 atmel_sha_write(dd, SHA_MSR, msg_size); 2329 atmel_sha_write(dd, SHA_BCR, msg_size); 2330 2331 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 2332 2333 /* Process assoc data. */ 2334 return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, 2335 true, false, 2336 atmel_sha_authenc_init_done); 2337 } 2338 2339 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd) 2340 { 2341 struct ahash_request *req = dd->req; 2342 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2343 2344 return authctx->cb(authctx->aes_dev, 0, dd->is_async); 2345 } 2346 2347 int atmel_sha_authenc_final(struct ahash_request *req, 2348 u32 *digest, unsigned int digestlen, 2349 atmel_aes_authenc_fn_t cb, 2350 struct atmel_aes_dev *aes_dev) 2351 { 2352 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2353 struct atmel_sha_reqctx *ctx = &authctx->base; 2354 struct atmel_sha_dev *dd = ctx->dd; 2355 2356 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 2357 case SHA_FLAGS_SHA1: 2358 authctx->digestlen = SHA1_DIGEST_SIZE; 2359 break; 2360 2361 case SHA_FLAGS_SHA224: 2362 authctx->digestlen = SHA224_DIGEST_SIZE; 2363 break; 2364 2365 case SHA_FLAGS_SHA256: 2366 authctx->digestlen = SHA256_DIGEST_SIZE; 2367 break; 2368 2369 case SHA_FLAGS_SHA384: 2370 authctx->digestlen = SHA384_DIGEST_SIZE; 2371 break; 2372 2373 case SHA_FLAGS_SHA512: 2374 authctx->digestlen = SHA512_DIGEST_SIZE; 2375 break; 2376 2377 default: 2378 return atmel_sha_complete(dd, -EINVAL); 2379 } 2380 if (authctx->digestlen > digestlen) 2381 authctx->digestlen = digestlen; 2382 2383 authctx->cb = cb; 2384 authctx->aes_dev = aes_dev; 2385 authctx->digest = digest; 2386 return atmel_sha_wait_for_data_ready(dd, 2387 atmel_sha_authenc_final_done); 2388 } 2389 EXPORT_SYMBOL_GPL(atmel_sha_authenc_final); 2390 2391 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd) 2392 { 2393 struct ahash_request *req = dd->req; 2394 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2395 size_t i, num_words = authctx->digestlen / sizeof(u32); 2396 2397 for (i = 0; i < num_words; ++i) 2398 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); 2399 2400 return atmel_sha_complete(dd, 0); 2401 } 2402 2403 void atmel_sha_authenc_abort(struct ahash_request *req) 2404 { 2405 struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); 2406 struct atmel_sha_reqctx *ctx = &authctx->base; 2407 struct atmel_sha_dev *dd = ctx->dd; 2408 2409 /* Prevent atmel_sha_complete() from calling req->base.complete(). */ 2410 dd->is_async = false; 2411 dd->force_complete = false; 2412 (void)atmel_sha_complete(dd, 0); 2413 } 2414 EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort); 2415 2416 #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */ 2417 2418 2419 static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) 2420 { 2421 int i; 2422 2423 if (dd->caps.has_hmac) 2424 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) 2425 crypto_unregister_ahash(&sha_hmac_algs[i]); 2426 2427 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) 2428 crypto_unregister_ahash(&sha_1_256_algs[i]); 2429 2430 if (dd->caps.has_sha224) 2431 crypto_unregister_ahash(&sha_224_alg); 2432 2433 if (dd->caps.has_sha_384_512) { 2434 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) 2435 crypto_unregister_ahash(&sha_384_512_algs[i]); 2436 } 2437 } 2438 2439 static int atmel_sha_register_algs(struct atmel_sha_dev *dd) 2440 { 2441 int err, i, j; 2442 2443 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { 2444 atmel_sha_alg_init(&sha_1_256_algs[i]); 2445 2446 err = crypto_register_ahash(&sha_1_256_algs[i]); 2447 if (err) 2448 goto err_sha_1_256_algs; 2449 } 2450 2451 if (dd->caps.has_sha224) { 2452 atmel_sha_alg_init(&sha_224_alg); 2453 2454 err = crypto_register_ahash(&sha_224_alg); 2455 if (err) 2456 goto err_sha_224_algs; 2457 } 2458 2459 if (dd->caps.has_sha_384_512) { 2460 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { 2461 atmel_sha_alg_init(&sha_384_512_algs[i]); 2462 2463 err = crypto_register_ahash(&sha_384_512_algs[i]); 2464 if (err) 2465 goto err_sha_384_512_algs; 2466 } 2467 } 2468 2469 if (dd->caps.has_hmac) { 2470 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) { 2471 atmel_sha_hmac_alg_init(&sha_hmac_algs[i]); 2472 2473 err = crypto_register_ahash(&sha_hmac_algs[i]); 2474 if (err) 2475 goto err_sha_hmac_algs; 2476 } 2477 } 2478 2479 return 0; 2480 2481 /*i = ARRAY_SIZE(sha_hmac_algs);*/ 2482 err_sha_hmac_algs: 2483 for (j = 0; j < i; j++) 2484 crypto_unregister_ahash(&sha_hmac_algs[j]); 2485 i = ARRAY_SIZE(sha_384_512_algs); 2486 err_sha_384_512_algs: 2487 for (j = 0; j < i; j++) 2488 crypto_unregister_ahash(&sha_384_512_algs[j]); 2489 crypto_unregister_ahash(&sha_224_alg); 2490 err_sha_224_algs: 2491 i = ARRAY_SIZE(sha_1_256_algs); 2492 err_sha_1_256_algs: 2493 for (j = 0; j < i; j++) 2494 crypto_unregister_ahash(&sha_1_256_algs[j]); 2495 2496 return err; 2497 } 2498 2499 static int atmel_sha_dma_init(struct atmel_sha_dev *dd) 2500 { 2501 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); 2502 if (IS_ERR(dd->dma_lch_in.chan)) { 2503 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan), 2504 "DMA channel is not available\n"); 2505 } 2506 2507 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + 2508 SHA_REG_DIN(0); 2509 dd->dma_lch_in.dma_conf.src_maxburst = 1; 2510 dd->dma_lch_in.dma_conf.src_addr_width = 2511 DMA_SLAVE_BUSWIDTH_4_BYTES; 2512 dd->dma_lch_in.dma_conf.dst_maxburst = 1; 2513 dd->dma_lch_in.dma_conf.dst_addr_width = 2514 DMA_SLAVE_BUSWIDTH_4_BYTES; 2515 dd->dma_lch_in.dma_conf.device_fc = false; 2516 2517 return 0; 2518 } 2519 2520 static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) 2521 { 2522 dma_release_channel(dd->dma_lch_in.chan); 2523 } 2524 2525 static void atmel_sha_get_cap(struct atmel_sha_dev *dd) 2526 { 2527 2528 dd->caps.has_dma = 0; 2529 dd->caps.has_dualbuff = 0; 2530 dd->caps.has_sha224 = 0; 2531 dd->caps.has_sha_384_512 = 0; 2532 dd->caps.has_uihv = 0; 2533 dd->caps.has_hmac = 0; 2534 2535 /* keep only major version number */ 2536 switch (dd->hw_version & 0xff0) { 2537 case 0x700: 2538 case 0x600: 2539 case 0x510: 2540 dd->caps.has_dma = 1; 2541 dd->caps.has_dualbuff = 1; 2542 dd->caps.has_sha224 = 1; 2543 dd->caps.has_sha_384_512 = 1; 2544 dd->caps.has_uihv = 1; 2545 dd->caps.has_hmac = 1; 2546 break; 2547 case 0x420: 2548 dd->caps.has_dma = 1; 2549 dd->caps.has_dualbuff = 1; 2550 dd->caps.has_sha224 = 1; 2551 dd->caps.has_sha_384_512 = 1; 2552 dd->caps.has_uihv = 1; 2553 break; 2554 case 0x410: 2555 dd->caps.has_dma = 1; 2556 dd->caps.has_dualbuff = 1; 2557 dd->caps.has_sha224 = 1; 2558 dd->caps.has_sha_384_512 = 1; 2559 break; 2560 case 0x400: 2561 dd->caps.has_dma = 1; 2562 dd->caps.has_dualbuff = 1; 2563 dd->caps.has_sha224 = 1; 2564 break; 2565 case 0x320: 2566 break; 2567 default: 2568 dev_warn(dd->dev, 2569 "Unmanaged sha version, set minimum capabilities\n"); 2570 break; 2571 } 2572 } 2573 2574 static const struct of_device_id atmel_sha_dt_ids[] = { 2575 { .compatible = "atmel,at91sam9g46-sha" }, 2576 { /* sentinel */ } 2577 }; 2578 2579 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); 2580 2581 static int atmel_sha_probe(struct platform_device *pdev) 2582 { 2583 struct atmel_sha_dev *sha_dd; 2584 struct device *dev = &pdev->dev; 2585 struct resource *sha_res; 2586 int err; 2587 2588 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); 2589 if (!sha_dd) 2590 return -ENOMEM; 2591 2592 sha_dd->dev = dev; 2593 2594 platform_set_drvdata(pdev, sha_dd); 2595 2596 INIT_LIST_HEAD(&sha_dd->list); 2597 spin_lock_init(&sha_dd->lock); 2598 2599 tasklet_init(&sha_dd->done_task, atmel_sha_done_task, 2600 (unsigned long)sha_dd); 2601 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, 2602 (unsigned long)sha_dd); 2603 2604 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); 2605 2606 sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res); 2607 if (IS_ERR(sha_dd->io_base)) { 2608 err = PTR_ERR(sha_dd->io_base); 2609 goto err_tasklet_kill; 2610 } 2611 sha_dd->phys_base = sha_res->start; 2612 2613 /* Get the IRQ */ 2614 sha_dd->irq = platform_get_irq(pdev, 0); 2615 if (sha_dd->irq < 0) { 2616 err = sha_dd->irq; 2617 goto err_tasklet_kill; 2618 } 2619 2620 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, 2621 IRQF_SHARED, "atmel-sha", sha_dd); 2622 if (err) { 2623 dev_err(dev, "unable to request sha irq.\n"); 2624 goto err_tasklet_kill; 2625 } 2626 2627 /* Initializing the clock */ 2628 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); 2629 if (IS_ERR(sha_dd->iclk)) { 2630 dev_err(dev, "clock initialization failed.\n"); 2631 err = PTR_ERR(sha_dd->iclk); 2632 goto err_tasklet_kill; 2633 } 2634 2635 err = clk_prepare(sha_dd->iclk); 2636 if (err) 2637 goto err_tasklet_kill; 2638 2639 err = atmel_sha_hw_version_init(sha_dd); 2640 if (err) 2641 goto err_iclk_unprepare; 2642 2643 atmel_sha_get_cap(sha_dd); 2644 2645 if (sha_dd->caps.has_dma) { 2646 err = atmel_sha_dma_init(sha_dd); 2647 if (err) 2648 goto err_iclk_unprepare; 2649 2650 dev_info(dev, "using %s for DMA transfers\n", 2651 dma_chan_name(sha_dd->dma_lch_in.chan)); 2652 } 2653 2654 spin_lock(&atmel_sha.lock); 2655 list_add_tail(&sha_dd->list, &atmel_sha.dev_list); 2656 spin_unlock(&atmel_sha.lock); 2657 2658 err = atmel_sha_register_algs(sha_dd); 2659 if (err) 2660 goto err_algs; 2661 2662 dev_info(dev, "Atmel SHA1/SHA256%s%s\n", 2663 sha_dd->caps.has_sha224 ? "/SHA224" : "", 2664 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); 2665 2666 return 0; 2667 2668 err_algs: 2669 spin_lock(&atmel_sha.lock); 2670 list_del(&sha_dd->list); 2671 spin_unlock(&atmel_sha.lock); 2672 if (sha_dd->caps.has_dma) 2673 atmel_sha_dma_cleanup(sha_dd); 2674 err_iclk_unprepare: 2675 clk_unprepare(sha_dd->iclk); 2676 err_tasklet_kill: 2677 tasklet_kill(&sha_dd->queue_task); 2678 tasklet_kill(&sha_dd->done_task); 2679 2680 return err; 2681 } 2682 2683 static int atmel_sha_remove(struct platform_device *pdev) 2684 { 2685 struct atmel_sha_dev *sha_dd = platform_get_drvdata(pdev); 2686 2687 spin_lock(&atmel_sha.lock); 2688 list_del(&sha_dd->list); 2689 spin_unlock(&atmel_sha.lock); 2690 2691 atmel_sha_unregister_algs(sha_dd); 2692 2693 tasklet_kill(&sha_dd->queue_task); 2694 tasklet_kill(&sha_dd->done_task); 2695 2696 if (sha_dd->caps.has_dma) 2697 atmel_sha_dma_cleanup(sha_dd); 2698 2699 clk_unprepare(sha_dd->iclk); 2700 2701 return 0; 2702 } 2703 2704 static struct platform_driver atmel_sha_driver = { 2705 .probe = atmel_sha_probe, 2706 .remove = atmel_sha_remove, 2707 .driver = { 2708 .name = "atmel_sha", 2709 .of_match_table = atmel_sha_dt_ids, 2710 }, 2711 }; 2712 2713 module_platform_driver(atmel_sha_driver); 2714 2715 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); 2716 MODULE_LICENSE("GPL v2"); 2717 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); 2718