1 /* 2 * Cryptographic API. 3 * 4 * Support for ATMEL SHA1/SHA256 HW acceleration. 5 * 6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL 7 * Author: Nicolas Royer <nicolas@eukrea.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as published 11 * by the Free Software Foundation. 12 * 13 * Some ideas are from omap-sham.c drivers. 14 */ 15 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/clk.h> 22 #include <linux/io.h> 23 #include <linux/hw_random.h> 24 #include <linux/platform_device.h> 25 26 #include <linux/device.h> 27 #include <linux/init.h> 28 #include <linux/errno.h> 29 #include <linux/interrupt.h> 30 #include <linux/irq.h> 31 #include <linux/scatterlist.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/of_device.h> 34 #include <linux/delay.h> 35 #include <linux/crypto.h> 36 #include <linux/cryptohash.h> 37 #include <crypto/scatterwalk.h> 38 #include <crypto/algapi.h> 39 #include <crypto/sha.h> 40 #include <crypto/hash.h> 41 #include <crypto/internal/hash.h> 42 #include <linux/platform_data/crypto-atmel.h> 43 #include "atmel-sha-regs.h" 44 45 /* SHA flags */ 46 #define SHA_FLAGS_BUSY BIT(0) 47 #define SHA_FLAGS_FINAL BIT(1) 48 #define SHA_FLAGS_DMA_ACTIVE BIT(2) 49 #define SHA_FLAGS_OUTPUT_READY BIT(3) 50 #define SHA_FLAGS_INIT BIT(4) 51 #define SHA_FLAGS_CPU BIT(5) 52 #define SHA_FLAGS_DMA_READY BIT(6) 53 54 #define SHA_FLAGS_FINUP BIT(16) 55 #define SHA_FLAGS_SG BIT(17) 56 #define SHA_FLAGS_ALGO_MASK GENMASK(22, 18) 57 #define SHA_FLAGS_SHA1 BIT(18) 58 #define SHA_FLAGS_SHA224 BIT(19) 59 #define SHA_FLAGS_SHA256 BIT(20) 60 #define SHA_FLAGS_SHA384 BIT(21) 61 #define SHA_FLAGS_SHA512 BIT(22) 62 #define SHA_FLAGS_ERROR BIT(23) 63 #define SHA_FLAGS_PAD BIT(24) 64 #define SHA_FLAGS_RESTORE BIT(25) 65 66 #define SHA_OP_UPDATE 1 67 #define SHA_OP_FINAL 2 68 69 #define SHA_BUFFER_LEN (PAGE_SIZE / 16) 70 71 #define ATMEL_SHA_DMA_THRESHOLD 56 72 73 struct atmel_sha_caps { 74 bool has_dma; 75 bool has_dualbuff; 76 bool has_sha224; 77 bool has_sha_384_512; 78 bool has_uihv; 79 }; 80 81 struct atmel_sha_dev; 82 83 /* 84 * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as 85 * tested by the ahash_prepare_alg() function. 86 */ 87 struct atmel_sha_reqctx { 88 struct atmel_sha_dev *dd; 89 unsigned long flags; 90 unsigned long op; 91 92 u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); 93 u64 digcnt[2]; 94 size_t bufcnt; 95 size_t buflen; 96 dma_addr_t dma_addr; 97 98 /* walk state */ 99 struct scatterlist *sg; 100 unsigned int offset; /* offset in current sg */ 101 unsigned int total; /* total request */ 102 103 size_t block_size; 104 105 u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 106 }; 107 108 struct atmel_sha_ctx { 109 struct atmel_sha_dev *dd; 110 111 unsigned long flags; 112 }; 113 114 #define ATMEL_SHA_QUEUE_LENGTH 50 115 116 struct atmel_sha_dma { 117 struct dma_chan *chan; 118 struct dma_slave_config dma_conf; 119 }; 120 121 struct atmel_sha_dev { 122 struct list_head list; 123 unsigned long phys_base; 124 struct device *dev; 125 struct clk *iclk; 126 int irq; 127 void __iomem *io_base; 128 129 spinlock_t lock; 130 int err; 131 struct tasklet_struct done_task; 132 struct tasklet_struct queue_task; 133 134 unsigned long flags; 135 struct crypto_queue queue; 136 struct ahash_request *req; 137 138 struct atmel_sha_dma dma_lch_in; 139 140 struct atmel_sha_caps caps; 141 142 u32 hw_version; 143 }; 144 145 struct atmel_sha_drv { 146 struct list_head dev_list; 147 spinlock_t lock; 148 }; 149 150 static struct atmel_sha_drv atmel_sha = { 151 .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), 152 .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), 153 }; 154 155 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) 156 { 157 return readl_relaxed(dd->io_base + offset); 158 } 159 160 static inline void atmel_sha_write(struct atmel_sha_dev *dd, 161 u32 offset, u32 value) 162 { 163 writel_relaxed(value, dd->io_base + offset); 164 } 165 166 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) 167 { 168 size_t count; 169 170 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { 171 count = min(ctx->sg->length - ctx->offset, ctx->total); 172 count = min(count, ctx->buflen - ctx->bufcnt); 173 174 if (count <= 0) { 175 /* 176 * Check if count <= 0 because the buffer is full or 177 * because the sg length is 0. In the latest case, 178 * check if there is another sg in the list, a 0 length 179 * sg doesn't necessarily mean the end of the sg list. 180 */ 181 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { 182 ctx->sg = sg_next(ctx->sg); 183 continue; 184 } else { 185 break; 186 } 187 } 188 189 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, 190 ctx->offset, count, 0); 191 192 ctx->bufcnt += count; 193 ctx->offset += count; 194 ctx->total -= count; 195 196 if (ctx->offset == ctx->sg->length) { 197 ctx->sg = sg_next(ctx->sg); 198 if (ctx->sg) 199 ctx->offset = 0; 200 else 201 ctx->total = 0; 202 } 203 } 204 205 return 0; 206 } 207 208 /* 209 * The purpose of this padding is to ensure that the padded message is a 210 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). 211 * The bit "1" is appended at the end of the message followed by 212 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or 213 * 128 bits block (SHA384/SHA512) equals to the message length in bits 214 * is appended. 215 * 216 * For SHA1/SHA224/SHA256, padlen is calculated as followed: 217 * - if message length < 56 bytes then padlen = 56 - message length 218 * - else padlen = 64 + 56 - message length 219 * 220 * For SHA384/SHA512, padlen is calculated as followed: 221 * - if message length < 112 bytes then padlen = 112 - message length 222 * - else padlen = 128 + 112 - message length 223 */ 224 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) 225 { 226 unsigned int index, padlen; 227 u64 bits[2]; 228 u64 size[2]; 229 230 size[0] = ctx->digcnt[0]; 231 size[1] = ctx->digcnt[1]; 232 233 size[0] += ctx->bufcnt; 234 if (size[0] < ctx->bufcnt) 235 size[1]++; 236 237 size[0] += length; 238 if (size[0] < length) 239 size[1]++; 240 241 bits[1] = cpu_to_be64(size[0] << 3); 242 bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); 243 244 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { 245 index = ctx->bufcnt & 0x7f; 246 padlen = (index < 112) ? (112 - index) : ((128+112) - index); 247 *(ctx->buffer + ctx->bufcnt) = 0x80; 248 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); 249 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 250 ctx->bufcnt += padlen + 16; 251 ctx->flags |= SHA_FLAGS_PAD; 252 } else { 253 index = ctx->bufcnt & 0x3f; 254 padlen = (index < 56) ? (56 - index) : ((64+56) - index); 255 *(ctx->buffer + ctx->bufcnt) = 0x80; 256 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); 257 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 258 ctx->bufcnt += padlen + 8; 259 ctx->flags |= SHA_FLAGS_PAD; 260 } 261 } 262 263 static int atmel_sha_init(struct ahash_request *req) 264 { 265 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 266 struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); 267 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 268 struct atmel_sha_dev *dd = NULL; 269 struct atmel_sha_dev *tmp; 270 271 spin_lock_bh(&atmel_sha.lock); 272 if (!tctx->dd) { 273 list_for_each_entry(tmp, &atmel_sha.dev_list, list) { 274 dd = tmp; 275 break; 276 } 277 tctx->dd = dd; 278 } else { 279 dd = tctx->dd; 280 } 281 282 spin_unlock_bh(&atmel_sha.lock); 283 284 ctx->dd = dd; 285 286 ctx->flags = 0; 287 288 dev_dbg(dd->dev, "init: digest size: %d\n", 289 crypto_ahash_digestsize(tfm)); 290 291 switch (crypto_ahash_digestsize(tfm)) { 292 case SHA1_DIGEST_SIZE: 293 ctx->flags |= SHA_FLAGS_SHA1; 294 ctx->block_size = SHA1_BLOCK_SIZE; 295 break; 296 case SHA224_DIGEST_SIZE: 297 ctx->flags |= SHA_FLAGS_SHA224; 298 ctx->block_size = SHA224_BLOCK_SIZE; 299 break; 300 case SHA256_DIGEST_SIZE: 301 ctx->flags |= SHA_FLAGS_SHA256; 302 ctx->block_size = SHA256_BLOCK_SIZE; 303 break; 304 case SHA384_DIGEST_SIZE: 305 ctx->flags |= SHA_FLAGS_SHA384; 306 ctx->block_size = SHA384_BLOCK_SIZE; 307 break; 308 case SHA512_DIGEST_SIZE: 309 ctx->flags |= SHA_FLAGS_SHA512; 310 ctx->block_size = SHA512_BLOCK_SIZE; 311 break; 312 default: 313 return -EINVAL; 314 break; 315 } 316 317 ctx->bufcnt = 0; 318 ctx->digcnt[0] = 0; 319 ctx->digcnt[1] = 0; 320 ctx->buflen = SHA_BUFFER_LEN; 321 322 return 0; 323 } 324 325 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) 326 { 327 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 328 u32 valmr = SHA_MR_MODE_AUTO; 329 unsigned int i, hashsize = 0; 330 331 if (likely(dma)) { 332 if (!dd->caps.has_dma) 333 atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); 334 valmr = SHA_MR_MODE_PDC; 335 if (dd->caps.has_dualbuff) 336 valmr |= SHA_MR_DUALBUFF; 337 } else { 338 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 339 } 340 341 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 342 case SHA_FLAGS_SHA1: 343 valmr |= SHA_MR_ALGO_SHA1; 344 hashsize = SHA1_DIGEST_SIZE; 345 break; 346 347 case SHA_FLAGS_SHA224: 348 valmr |= SHA_MR_ALGO_SHA224; 349 hashsize = SHA256_DIGEST_SIZE; 350 break; 351 352 case SHA_FLAGS_SHA256: 353 valmr |= SHA_MR_ALGO_SHA256; 354 hashsize = SHA256_DIGEST_SIZE; 355 break; 356 357 case SHA_FLAGS_SHA384: 358 valmr |= SHA_MR_ALGO_SHA384; 359 hashsize = SHA512_DIGEST_SIZE; 360 break; 361 362 case SHA_FLAGS_SHA512: 363 valmr |= SHA_MR_ALGO_SHA512; 364 hashsize = SHA512_DIGEST_SIZE; 365 break; 366 367 default: 368 break; 369 } 370 371 /* Setting CR_FIRST only for the first iteration */ 372 if (!(ctx->digcnt[0] || ctx->digcnt[1])) { 373 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 374 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { 375 const u32 *hash = (const u32 *)ctx->digest; 376 377 /* 378 * Restore the hardware context: update the User Initialize 379 * Hash Value (UIHV) with the value saved when the latest 380 * 'update' operation completed on this very same crypto 381 * request. 382 */ 383 ctx->flags &= ~SHA_FLAGS_RESTORE; 384 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); 385 for (i = 0; i < hashsize / sizeof(u32); ++i) 386 atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]); 387 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); 388 valmr |= SHA_MR_UIHV; 389 } 390 /* 391 * WARNING: If the UIHV feature is not available, the hardware CANNOT 392 * process concurrent requests: the internal registers used to store 393 * the hash/digest are still set to the partial digest output values 394 * computed during the latest round. 395 */ 396 397 atmel_sha_write(dd, SHA_MR, valmr); 398 } 399 400 static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, 401 size_t length, int final) 402 { 403 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 404 int count, len32; 405 const u32 *buffer = (const u32 *)buf; 406 407 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", 408 ctx->digcnt[1], ctx->digcnt[0], length, final); 409 410 atmel_sha_write_ctrl(dd, 0); 411 412 /* should be non-zero before next lines to disable clocks later */ 413 ctx->digcnt[0] += length; 414 if (ctx->digcnt[0] < length) 415 ctx->digcnt[1]++; 416 417 if (final) 418 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 419 420 len32 = DIV_ROUND_UP(length, sizeof(u32)); 421 422 dd->flags |= SHA_FLAGS_CPU; 423 424 for (count = 0; count < len32; count++) 425 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); 426 427 return -EINPROGRESS; 428 } 429 430 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 431 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 432 { 433 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 434 int len32; 435 436 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", 437 ctx->digcnt[1], ctx->digcnt[0], length1, final); 438 439 len32 = DIV_ROUND_UP(length1, sizeof(u32)); 440 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); 441 atmel_sha_write(dd, SHA_TPR, dma_addr1); 442 atmel_sha_write(dd, SHA_TCR, len32); 443 444 len32 = DIV_ROUND_UP(length2, sizeof(u32)); 445 atmel_sha_write(dd, SHA_TNPR, dma_addr2); 446 atmel_sha_write(dd, SHA_TNCR, len32); 447 448 atmel_sha_write_ctrl(dd, 1); 449 450 /* should be non-zero before next lines to disable clocks later */ 451 ctx->digcnt[0] += length1; 452 if (ctx->digcnt[0] < length1) 453 ctx->digcnt[1]++; 454 455 if (final) 456 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 457 458 dd->flags |= SHA_FLAGS_DMA_ACTIVE; 459 460 /* Start DMA transfer */ 461 atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); 462 463 return -EINPROGRESS; 464 } 465 466 static void atmel_sha_dma_callback(void *data) 467 { 468 struct atmel_sha_dev *dd = data; 469 470 /* dma_lch_in - completed - wait DATRDY */ 471 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); 472 } 473 474 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 475 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 476 { 477 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 478 struct dma_async_tx_descriptor *in_desc; 479 struct scatterlist sg[2]; 480 481 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %d, final: %d\n", 482 ctx->digcnt[1], ctx->digcnt[0], length1, final); 483 484 dd->dma_lch_in.dma_conf.src_maxburst = 16; 485 dd->dma_lch_in.dma_conf.dst_maxburst = 16; 486 487 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); 488 489 if (length2) { 490 sg_init_table(sg, 2); 491 sg_dma_address(&sg[0]) = dma_addr1; 492 sg_dma_len(&sg[0]) = length1; 493 sg_dma_address(&sg[1]) = dma_addr2; 494 sg_dma_len(&sg[1]) = length2; 495 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, 496 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 497 } else { 498 sg_init_table(sg, 1); 499 sg_dma_address(&sg[0]) = dma_addr1; 500 sg_dma_len(&sg[0]) = length1; 501 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, 502 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 503 } 504 if (!in_desc) 505 return -EINVAL; 506 507 in_desc->callback = atmel_sha_dma_callback; 508 in_desc->callback_param = dd; 509 510 atmel_sha_write_ctrl(dd, 1); 511 512 /* should be non-zero before next lines to disable clocks later */ 513 ctx->digcnt[0] += length1; 514 if (ctx->digcnt[0] < length1) 515 ctx->digcnt[1]++; 516 517 if (final) 518 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ 519 520 dd->flags |= SHA_FLAGS_DMA_ACTIVE; 521 522 /* Start DMA transfer */ 523 dmaengine_submit(in_desc); 524 dma_async_issue_pending(dd->dma_lch_in.chan); 525 526 return -EINPROGRESS; 527 } 528 529 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, 530 size_t length1, dma_addr_t dma_addr2, size_t length2, int final) 531 { 532 if (dd->caps.has_dma) 533 return atmel_sha_xmit_dma(dd, dma_addr1, length1, 534 dma_addr2, length2, final); 535 else 536 return atmel_sha_xmit_pdc(dd, dma_addr1, length1, 537 dma_addr2, length2, final); 538 } 539 540 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) 541 { 542 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 543 int bufcnt; 544 545 atmel_sha_append_sg(ctx); 546 atmel_sha_fill_padding(ctx, 0); 547 bufcnt = ctx->bufcnt; 548 ctx->bufcnt = 0; 549 550 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); 551 } 552 553 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, 554 struct atmel_sha_reqctx *ctx, 555 size_t length, int final) 556 { 557 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, 558 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 559 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 560 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + 561 ctx->block_size); 562 return -EINVAL; 563 } 564 565 ctx->flags &= ~SHA_FLAGS_SG; 566 567 /* next call does not fail... so no unmap in the case of error */ 568 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); 569 } 570 571 static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) 572 { 573 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 574 unsigned int final; 575 size_t count; 576 577 atmel_sha_append_sg(ctx); 578 579 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 580 581 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: 0x%llx 0x%llx, final: %d\n", 582 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); 583 584 if (final) 585 atmel_sha_fill_padding(ctx, 0); 586 587 if (final || (ctx->bufcnt == ctx->buflen)) { 588 count = ctx->bufcnt; 589 ctx->bufcnt = 0; 590 return atmel_sha_xmit_dma_map(dd, ctx, count, final); 591 } 592 593 return 0; 594 } 595 596 static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) 597 { 598 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 599 unsigned int length, final, tail; 600 struct scatterlist *sg; 601 unsigned int count; 602 603 if (!ctx->total) 604 return 0; 605 606 if (ctx->bufcnt || ctx->offset) 607 return atmel_sha_update_dma_slow(dd); 608 609 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %u, total: %u\n", 610 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); 611 612 sg = ctx->sg; 613 614 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 615 return atmel_sha_update_dma_slow(dd); 616 617 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) 618 /* size is not ctx->block_size aligned */ 619 return atmel_sha_update_dma_slow(dd); 620 621 length = min(ctx->total, sg->length); 622 623 if (sg_is_last(sg)) { 624 if (!(ctx->flags & SHA_FLAGS_FINUP)) { 625 /* not last sg must be ctx->block_size aligned */ 626 tail = length & (ctx->block_size - 1); 627 length -= tail; 628 } 629 } 630 631 ctx->total -= length; 632 ctx->offset = length; /* offset where to start slow */ 633 634 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 635 636 /* Add padding */ 637 if (final) { 638 tail = length & (ctx->block_size - 1); 639 length -= tail; 640 ctx->total += tail; 641 ctx->offset = length; /* offset where to start slow */ 642 643 sg = ctx->sg; 644 atmel_sha_append_sg(ctx); 645 646 atmel_sha_fill_padding(ctx, length); 647 648 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, 649 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 650 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { 651 dev_err(dd->dev, "dma %u bytes error\n", 652 ctx->buflen + ctx->block_size); 653 return -EINVAL; 654 } 655 656 if (length == 0) { 657 ctx->flags &= ~SHA_FLAGS_SG; 658 count = ctx->bufcnt; 659 ctx->bufcnt = 0; 660 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, 661 0, final); 662 } else { 663 ctx->sg = sg; 664 if (!dma_map_sg(dd->dev, ctx->sg, 1, 665 DMA_TO_DEVICE)) { 666 dev_err(dd->dev, "dma_map_sg error\n"); 667 return -EINVAL; 668 } 669 670 ctx->flags |= SHA_FLAGS_SG; 671 672 count = ctx->bufcnt; 673 ctx->bufcnt = 0; 674 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), 675 length, ctx->dma_addr, count, final); 676 } 677 } 678 679 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 680 dev_err(dd->dev, "dma_map_sg error\n"); 681 return -EINVAL; 682 } 683 684 ctx->flags |= SHA_FLAGS_SG; 685 686 /* next call does not fail... so no unmap in the case of error */ 687 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, 688 0, final); 689 } 690 691 static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) 692 { 693 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); 694 695 if (ctx->flags & SHA_FLAGS_SG) { 696 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); 697 if (ctx->sg->length == ctx->offset) { 698 ctx->sg = sg_next(ctx->sg); 699 if (ctx->sg) 700 ctx->offset = 0; 701 } 702 if (ctx->flags & SHA_FLAGS_PAD) { 703 dma_unmap_single(dd->dev, ctx->dma_addr, 704 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); 705 } 706 } else { 707 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + 708 ctx->block_size, DMA_TO_DEVICE); 709 } 710 711 return 0; 712 } 713 714 static int atmel_sha_update_req(struct atmel_sha_dev *dd) 715 { 716 struct ahash_request *req = dd->req; 717 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 718 int err; 719 720 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", 721 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); 722 723 if (ctx->flags & SHA_FLAGS_CPU) 724 err = atmel_sha_update_cpu(dd); 725 else 726 err = atmel_sha_update_dma_start(dd); 727 728 /* wait for dma completion before can take more data */ 729 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", 730 err, ctx->digcnt[1], ctx->digcnt[0]); 731 732 return err; 733 } 734 735 static int atmel_sha_final_req(struct atmel_sha_dev *dd) 736 { 737 struct ahash_request *req = dd->req; 738 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 739 int err = 0; 740 int count; 741 742 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { 743 atmel_sha_fill_padding(ctx, 0); 744 count = ctx->bufcnt; 745 ctx->bufcnt = 0; 746 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); 747 } 748 /* faster to handle last block with cpu */ 749 else { 750 atmel_sha_fill_padding(ctx, 0); 751 count = ctx->bufcnt; 752 ctx->bufcnt = 0; 753 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); 754 } 755 756 dev_dbg(dd->dev, "final_req: err: %d\n", err); 757 758 return err; 759 } 760 761 static void atmel_sha_copy_hash(struct ahash_request *req) 762 { 763 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 764 u32 *hash = (u32 *)ctx->digest; 765 unsigned int i, hashsize; 766 767 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { 768 case SHA_FLAGS_SHA1: 769 hashsize = SHA1_DIGEST_SIZE; 770 break; 771 772 case SHA_FLAGS_SHA224: 773 case SHA_FLAGS_SHA256: 774 hashsize = SHA256_DIGEST_SIZE; 775 break; 776 777 case SHA_FLAGS_SHA384: 778 case SHA_FLAGS_SHA512: 779 hashsize = SHA512_DIGEST_SIZE; 780 break; 781 782 default: 783 /* Should not happen... */ 784 return; 785 } 786 787 for (i = 0; i < hashsize / sizeof(u32); ++i) 788 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); 789 ctx->flags |= SHA_FLAGS_RESTORE; 790 } 791 792 static void atmel_sha_copy_ready_hash(struct ahash_request *req) 793 { 794 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 795 796 if (!req->result) 797 return; 798 799 if (ctx->flags & SHA_FLAGS_SHA1) 800 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); 801 else if (ctx->flags & SHA_FLAGS_SHA224) 802 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); 803 else if (ctx->flags & SHA_FLAGS_SHA256) 804 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); 805 else if (ctx->flags & SHA_FLAGS_SHA384) 806 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); 807 else 808 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); 809 } 810 811 static int atmel_sha_finish(struct ahash_request *req) 812 { 813 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 814 struct atmel_sha_dev *dd = ctx->dd; 815 816 if (ctx->digcnt[0] || ctx->digcnt[1]) 817 atmel_sha_copy_ready_hash(req); 818 819 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], 820 ctx->digcnt[0], ctx->bufcnt); 821 822 return 0; 823 } 824 825 static void atmel_sha_finish_req(struct ahash_request *req, int err) 826 { 827 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 828 struct atmel_sha_dev *dd = ctx->dd; 829 830 if (!err) { 831 atmel_sha_copy_hash(req); 832 if (SHA_FLAGS_FINAL & dd->flags) 833 err = atmel_sha_finish(req); 834 } else { 835 ctx->flags |= SHA_FLAGS_ERROR; 836 } 837 838 /* atomic operation is not needed here */ 839 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | 840 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); 841 842 clk_disable_unprepare(dd->iclk); 843 844 if (req->base.complete) 845 req->base.complete(&req->base, err); 846 847 /* handle new request */ 848 tasklet_schedule(&dd->queue_task); 849 } 850 851 static int atmel_sha_hw_init(struct atmel_sha_dev *dd) 852 { 853 int err; 854 855 err = clk_prepare_enable(dd->iclk); 856 if (err) 857 return err; 858 859 if (!(SHA_FLAGS_INIT & dd->flags)) { 860 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); 861 dd->flags |= SHA_FLAGS_INIT; 862 dd->err = 0; 863 } 864 865 return 0; 866 } 867 868 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) 869 { 870 return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; 871 } 872 873 static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) 874 { 875 atmel_sha_hw_init(dd); 876 877 dd->hw_version = atmel_sha_get_version(dd); 878 879 dev_info(dd->dev, 880 "version: 0x%x\n", dd->hw_version); 881 882 clk_disable_unprepare(dd->iclk); 883 } 884 885 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, 886 struct ahash_request *req) 887 { 888 struct crypto_async_request *async_req, *backlog; 889 struct atmel_sha_reqctx *ctx; 890 unsigned long flags; 891 int err = 0, ret = 0; 892 893 spin_lock_irqsave(&dd->lock, flags); 894 if (req) 895 ret = ahash_enqueue_request(&dd->queue, req); 896 897 if (SHA_FLAGS_BUSY & dd->flags) { 898 spin_unlock_irqrestore(&dd->lock, flags); 899 return ret; 900 } 901 902 backlog = crypto_get_backlog(&dd->queue); 903 async_req = crypto_dequeue_request(&dd->queue); 904 if (async_req) 905 dd->flags |= SHA_FLAGS_BUSY; 906 907 spin_unlock_irqrestore(&dd->lock, flags); 908 909 if (!async_req) 910 return ret; 911 912 if (backlog) 913 backlog->complete(backlog, -EINPROGRESS); 914 915 req = ahash_request_cast(async_req); 916 dd->req = req; 917 ctx = ahash_request_ctx(req); 918 919 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 920 ctx->op, req->nbytes); 921 922 err = atmel_sha_hw_init(dd); 923 924 if (err) 925 goto err1; 926 927 if (ctx->op == SHA_OP_UPDATE) { 928 err = atmel_sha_update_req(dd); 929 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) 930 /* no final() after finup() */ 931 err = atmel_sha_final_req(dd); 932 } else if (ctx->op == SHA_OP_FINAL) { 933 err = atmel_sha_final_req(dd); 934 } 935 936 err1: 937 if (err != -EINPROGRESS) 938 /* done_task will not finish it, so do it here */ 939 atmel_sha_finish_req(req, err); 940 941 dev_dbg(dd->dev, "exit, err: %d\n", err); 942 943 return ret; 944 } 945 946 static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) 947 { 948 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 949 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 950 struct atmel_sha_dev *dd = tctx->dd; 951 952 ctx->op = op; 953 954 return atmel_sha_handle_queue(dd, req); 955 } 956 957 static int atmel_sha_update(struct ahash_request *req) 958 { 959 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 960 961 if (!req->nbytes) 962 return 0; 963 964 ctx->total = req->nbytes; 965 ctx->sg = req->src; 966 ctx->offset = 0; 967 968 if (ctx->flags & SHA_FLAGS_FINUP) { 969 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) 970 /* faster to use CPU for short transfers */ 971 ctx->flags |= SHA_FLAGS_CPU; 972 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { 973 atmel_sha_append_sg(ctx); 974 return 0; 975 } 976 return atmel_sha_enqueue(req, SHA_OP_UPDATE); 977 } 978 979 static int atmel_sha_final(struct ahash_request *req) 980 { 981 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 982 983 ctx->flags |= SHA_FLAGS_FINUP; 984 985 if (ctx->flags & SHA_FLAGS_ERROR) 986 return 0; /* uncompleted hash is not needed */ 987 988 if (ctx->flags & SHA_FLAGS_PAD) 989 /* copy ready hash (+ finalize hmac) */ 990 return atmel_sha_finish(req); 991 992 return atmel_sha_enqueue(req, SHA_OP_FINAL); 993 } 994 995 static int atmel_sha_finup(struct ahash_request *req) 996 { 997 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 998 int err1, err2; 999 1000 ctx->flags |= SHA_FLAGS_FINUP; 1001 1002 err1 = atmel_sha_update(req); 1003 if (err1 == -EINPROGRESS || err1 == -EBUSY) 1004 return err1; 1005 1006 /* 1007 * final() has to be always called to cleanup resources 1008 * even if udpate() failed, except EINPROGRESS 1009 */ 1010 err2 = atmel_sha_final(req); 1011 1012 return err1 ?: err2; 1013 } 1014 1015 static int atmel_sha_digest(struct ahash_request *req) 1016 { 1017 return atmel_sha_init(req) ?: atmel_sha_finup(req); 1018 } 1019 1020 1021 static int atmel_sha_export(struct ahash_request *req, void *out) 1022 { 1023 const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1024 1025 memcpy(out, ctx, sizeof(*ctx)); 1026 return 0; 1027 } 1028 1029 static int atmel_sha_import(struct ahash_request *req, const void *in) 1030 { 1031 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); 1032 1033 memcpy(ctx, in, sizeof(*ctx)); 1034 return 0; 1035 } 1036 1037 static int atmel_sha_cra_init(struct crypto_tfm *tfm) 1038 { 1039 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1040 sizeof(struct atmel_sha_reqctx)); 1041 1042 return 0; 1043 } 1044 1045 static struct ahash_alg sha_1_256_algs[] = { 1046 { 1047 .init = atmel_sha_init, 1048 .update = atmel_sha_update, 1049 .final = atmel_sha_final, 1050 .finup = atmel_sha_finup, 1051 .digest = atmel_sha_digest, 1052 .export = atmel_sha_export, 1053 .import = atmel_sha_import, 1054 .halg = { 1055 .digestsize = SHA1_DIGEST_SIZE, 1056 .statesize = sizeof(struct atmel_sha_reqctx), 1057 .base = { 1058 .cra_name = "sha1", 1059 .cra_driver_name = "atmel-sha1", 1060 .cra_priority = 100, 1061 .cra_flags = CRYPTO_ALG_ASYNC, 1062 .cra_blocksize = SHA1_BLOCK_SIZE, 1063 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1064 .cra_alignmask = 0, 1065 .cra_module = THIS_MODULE, 1066 .cra_init = atmel_sha_cra_init, 1067 } 1068 } 1069 }, 1070 { 1071 .init = atmel_sha_init, 1072 .update = atmel_sha_update, 1073 .final = atmel_sha_final, 1074 .finup = atmel_sha_finup, 1075 .digest = atmel_sha_digest, 1076 .export = atmel_sha_export, 1077 .import = atmel_sha_import, 1078 .halg = { 1079 .digestsize = SHA256_DIGEST_SIZE, 1080 .statesize = sizeof(struct atmel_sha_reqctx), 1081 .base = { 1082 .cra_name = "sha256", 1083 .cra_driver_name = "atmel-sha256", 1084 .cra_priority = 100, 1085 .cra_flags = CRYPTO_ALG_ASYNC, 1086 .cra_blocksize = SHA256_BLOCK_SIZE, 1087 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1088 .cra_alignmask = 0, 1089 .cra_module = THIS_MODULE, 1090 .cra_init = atmel_sha_cra_init, 1091 } 1092 } 1093 }, 1094 }; 1095 1096 static struct ahash_alg sha_224_alg = { 1097 .init = atmel_sha_init, 1098 .update = atmel_sha_update, 1099 .final = atmel_sha_final, 1100 .finup = atmel_sha_finup, 1101 .digest = atmel_sha_digest, 1102 .export = atmel_sha_export, 1103 .import = atmel_sha_import, 1104 .halg = { 1105 .digestsize = SHA224_DIGEST_SIZE, 1106 .statesize = sizeof(struct atmel_sha_reqctx), 1107 .base = { 1108 .cra_name = "sha224", 1109 .cra_driver_name = "atmel-sha224", 1110 .cra_priority = 100, 1111 .cra_flags = CRYPTO_ALG_ASYNC, 1112 .cra_blocksize = SHA224_BLOCK_SIZE, 1113 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1114 .cra_alignmask = 0, 1115 .cra_module = THIS_MODULE, 1116 .cra_init = atmel_sha_cra_init, 1117 } 1118 } 1119 }; 1120 1121 static struct ahash_alg sha_384_512_algs[] = { 1122 { 1123 .init = atmel_sha_init, 1124 .update = atmel_sha_update, 1125 .final = atmel_sha_final, 1126 .finup = atmel_sha_finup, 1127 .digest = atmel_sha_digest, 1128 .export = atmel_sha_export, 1129 .import = atmel_sha_import, 1130 .halg = { 1131 .digestsize = SHA384_DIGEST_SIZE, 1132 .statesize = sizeof(struct atmel_sha_reqctx), 1133 .base = { 1134 .cra_name = "sha384", 1135 .cra_driver_name = "atmel-sha384", 1136 .cra_priority = 100, 1137 .cra_flags = CRYPTO_ALG_ASYNC, 1138 .cra_blocksize = SHA384_BLOCK_SIZE, 1139 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1140 .cra_alignmask = 0x3, 1141 .cra_module = THIS_MODULE, 1142 .cra_init = atmel_sha_cra_init, 1143 } 1144 } 1145 }, 1146 { 1147 .init = atmel_sha_init, 1148 .update = atmel_sha_update, 1149 .final = atmel_sha_final, 1150 .finup = atmel_sha_finup, 1151 .digest = atmel_sha_digest, 1152 .export = atmel_sha_export, 1153 .import = atmel_sha_import, 1154 .halg = { 1155 .digestsize = SHA512_DIGEST_SIZE, 1156 .statesize = sizeof(struct atmel_sha_reqctx), 1157 .base = { 1158 .cra_name = "sha512", 1159 .cra_driver_name = "atmel-sha512", 1160 .cra_priority = 100, 1161 .cra_flags = CRYPTO_ALG_ASYNC, 1162 .cra_blocksize = SHA512_BLOCK_SIZE, 1163 .cra_ctxsize = sizeof(struct atmel_sha_ctx), 1164 .cra_alignmask = 0x3, 1165 .cra_module = THIS_MODULE, 1166 .cra_init = atmel_sha_cra_init, 1167 } 1168 } 1169 }, 1170 }; 1171 1172 static void atmel_sha_queue_task(unsigned long data) 1173 { 1174 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; 1175 1176 atmel_sha_handle_queue(dd, NULL); 1177 } 1178 1179 static void atmel_sha_done_task(unsigned long data) 1180 { 1181 struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; 1182 int err = 0; 1183 1184 if (SHA_FLAGS_CPU & dd->flags) { 1185 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { 1186 dd->flags &= ~SHA_FLAGS_OUTPUT_READY; 1187 goto finish; 1188 } 1189 } else if (SHA_FLAGS_DMA_READY & dd->flags) { 1190 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { 1191 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; 1192 atmel_sha_update_dma_stop(dd); 1193 if (dd->err) { 1194 err = dd->err; 1195 goto finish; 1196 } 1197 } 1198 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { 1199 /* hash or semi-hash ready */ 1200 dd->flags &= ~(SHA_FLAGS_DMA_READY | 1201 SHA_FLAGS_OUTPUT_READY); 1202 err = atmel_sha_update_dma_start(dd); 1203 if (err != -EINPROGRESS) 1204 goto finish; 1205 } 1206 } 1207 return; 1208 1209 finish: 1210 /* finish curent request */ 1211 atmel_sha_finish_req(dd->req, err); 1212 } 1213 1214 static irqreturn_t atmel_sha_irq(int irq, void *dev_id) 1215 { 1216 struct atmel_sha_dev *sha_dd = dev_id; 1217 u32 reg; 1218 1219 reg = atmel_sha_read(sha_dd, SHA_ISR); 1220 if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { 1221 atmel_sha_write(sha_dd, SHA_IDR, reg); 1222 if (SHA_FLAGS_BUSY & sha_dd->flags) { 1223 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; 1224 if (!(SHA_FLAGS_CPU & sha_dd->flags)) 1225 sha_dd->flags |= SHA_FLAGS_DMA_READY; 1226 tasklet_schedule(&sha_dd->done_task); 1227 } else { 1228 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); 1229 } 1230 return IRQ_HANDLED; 1231 } 1232 1233 return IRQ_NONE; 1234 } 1235 1236 static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) 1237 { 1238 int i; 1239 1240 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) 1241 crypto_unregister_ahash(&sha_1_256_algs[i]); 1242 1243 if (dd->caps.has_sha224) 1244 crypto_unregister_ahash(&sha_224_alg); 1245 1246 if (dd->caps.has_sha_384_512) { 1247 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) 1248 crypto_unregister_ahash(&sha_384_512_algs[i]); 1249 } 1250 } 1251 1252 static int atmel_sha_register_algs(struct atmel_sha_dev *dd) 1253 { 1254 int err, i, j; 1255 1256 for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { 1257 err = crypto_register_ahash(&sha_1_256_algs[i]); 1258 if (err) 1259 goto err_sha_1_256_algs; 1260 } 1261 1262 if (dd->caps.has_sha224) { 1263 err = crypto_register_ahash(&sha_224_alg); 1264 if (err) 1265 goto err_sha_224_algs; 1266 } 1267 1268 if (dd->caps.has_sha_384_512) { 1269 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { 1270 err = crypto_register_ahash(&sha_384_512_algs[i]); 1271 if (err) 1272 goto err_sha_384_512_algs; 1273 } 1274 } 1275 1276 return 0; 1277 1278 err_sha_384_512_algs: 1279 for (j = 0; j < i; j++) 1280 crypto_unregister_ahash(&sha_384_512_algs[j]); 1281 crypto_unregister_ahash(&sha_224_alg); 1282 err_sha_224_algs: 1283 i = ARRAY_SIZE(sha_1_256_algs); 1284 err_sha_1_256_algs: 1285 for (j = 0; j < i; j++) 1286 crypto_unregister_ahash(&sha_1_256_algs[j]); 1287 1288 return err; 1289 } 1290 1291 static bool atmel_sha_filter(struct dma_chan *chan, void *slave) 1292 { 1293 struct at_dma_slave *sl = slave; 1294 1295 if (sl && sl->dma_dev == chan->device->dev) { 1296 chan->private = sl; 1297 return true; 1298 } else { 1299 return false; 1300 } 1301 } 1302 1303 static int atmel_sha_dma_init(struct atmel_sha_dev *dd, 1304 struct crypto_platform_data *pdata) 1305 { 1306 int err = -ENOMEM; 1307 dma_cap_mask_t mask_in; 1308 1309 /* Try to grab DMA channel */ 1310 dma_cap_zero(mask_in); 1311 dma_cap_set(DMA_SLAVE, mask_in); 1312 1313 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in, 1314 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); 1315 if (!dd->dma_lch_in.chan) { 1316 dev_warn(dd->dev, "no DMA channel available\n"); 1317 return err; 1318 } 1319 1320 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; 1321 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + 1322 SHA_REG_DIN(0); 1323 dd->dma_lch_in.dma_conf.src_maxburst = 1; 1324 dd->dma_lch_in.dma_conf.src_addr_width = 1325 DMA_SLAVE_BUSWIDTH_4_BYTES; 1326 dd->dma_lch_in.dma_conf.dst_maxburst = 1; 1327 dd->dma_lch_in.dma_conf.dst_addr_width = 1328 DMA_SLAVE_BUSWIDTH_4_BYTES; 1329 dd->dma_lch_in.dma_conf.device_fc = false; 1330 1331 return 0; 1332 } 1333 1334 static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) 1335 { 1336 dma_release_channel(dd->dma_lch_in.chan); 1337 } 1338 1339 static void atmel_sha_get_cap(struct atmel_sha_dev *dd) 1340 { 1341 1342 dd->caps.has_dma = 0; 1343 dd->caps.has_dualbuff = 0; 1344 dd->caps.has_sha224 = 0; 1345 dd->caps.has_sha_384_512 = 0; 1346 dd->caps.has_uihv = 0; 1347 1348 /* keep only major version number */ 1349 switch (dd->hw_version & 0xff0) { 1350 case 0x510: 1351 dd->caps.has_dma = 1; 1352 dd->caps.has_dualbuff = 1; 1353 dd->caps.has_sha224 = 1; 1354 dd->caps.has_sha_384_512 = 1; 1355 dd->caps.has_uihv = 1; 1356 break; 1357 case 0x420: 1358 dd->caps.has_dma = 1; 1359 dd->caps.has_dualbuff = 1; 1360 dd->caps.has_sha224 = 1; 1361 dd->caps.has_sha_384_512 = 1; 1362 dd->caps.has_uihv = 1; 1363 break; 1364 case 0x410: 1365 dd->caps.has_dma = 1; 1366 dd->caps.has_dualbuff = 1; 1367 dd->caps.has_sha224 = 1; 1368 dd->caps.has_sha_384_512 = 1; 1369 break; 1370 case 0x400: 1371 dd->caps.has_dma = 1; 1372 dd->caps.has_dualbuff = 1; 1373 dd->caps.has_sha224 = 1; 1374 break; 1375 case 0x320: 1376 break; 1377 default: 1378 dev_warn(dd->dev, 1379 "Unmanaged sha version, set minimum capabilities\n"); 1380 break; 1381 } 1382 } 1383 1384 #if defined(CONFIG_OF) 1385 static const struct of_device_id atmel_sha_dt_ids[] = { 1386 { .compatible = "atmel,at91sam9g46-sha" }, 1387 { /* sentinel */ } 1388 }; 1389 1390 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); 1391 1392 static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev) 1393 { 1394 struct device_node *np = pdev->dev.of_node; 1395 struct crypto_platform_data *pdata; 1396 1397 if (!np) { 1398 dev_err(&pdev->dev, "device node not found\n"); 1399 return ERR_PTR(-EINVAL); 1400 } 1401 1402 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1403 if (!pdata) { 1404 dev_err(&pdev->dev, "could not allocate memory for pdata\n"); 1405 return ERR_PTR(-ENOMEM); 1406 } 1407 1408 pdata->dma_slave = devm_kzalloc(&pdev->dev, 1409 sizeof(*(pdata->dma_slave)), 1410 GFP_KERNEL); 1411 if (!pdata->dma_slave) { 1412 dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); 1413 return ERR_PTR(-ENOMEM); 1414 } 1415 1416 return pdata; 1417 } 1418 #else /* CONFIG_OF */ 1419 static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev) 1420 { 1421 return ERR_PTR(-EINVAL); 1422 } 1423 #endif 1424 1425 static int atmel_sha_probe(struct platform_device *pdev) 1426 { 1427 struct atmel_sha_dev *sha_dd; 1428 struct crypto_platform_data *pdata; 1429 struct device *dev = &pdev->dev; 1430 struct resource *sha_res; 1431 int err; 1432 1433 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); 1434 if (sha_dd == NULL) { 1435 dev_err(dev, "unable to alloc data struct.\n"); 1436 err = -ENOMEM; 1437 goto sha_dd_err; 1438 } 1439 1440 sha_dd->dev = dev; 1441 1442 platform_set_drvdata(pdev, sha_dd); 1443 1444 INIT_LIST_HEAD(&sha_dd->list); 1445 spin_lock_init(&sha_dd->lock); 1446 1447 tasklet_init(&sha_dd->done_task, atmel_sha_done_task, 1448 (unsigned long)sha_dd); 1449 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, 1450 (unsigned long)sha_dd); 1451 1452 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); 1453 1454 sha_dd->irq = -1; 1455 1456 /* Get the base address */ 1457 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1458 if (!sha_res) { 1459 dev_err(dev, "no MEM resource info\n"); 1460 err = -ENODEV; 1461 goto res_err; 1462 } 1463 sha_dd->phys_base = sha_res->start; 1464 1465 /* Get the IRQ */ 1466 sha_dd->irq = platform_get_irq(pdev, 0); 1467 if (sha_dd->irq < 0) { 1468 dev_err(dev, "no IRQ resource info\n"); 1469 err = sha_dd->irq; 1470 goto res_err; 1471 } 1472 1473 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, 1474 IRQF_SHARED, "atmel-sha", sha_dd); 1475 if (err) { 1476 dev_err(dev, "unable to request sha irq.\n"); 1477 goto res_err; 1478 } 1479 1480 /* Initializing the clock */ 1481 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); 1482 if (IS_ERR(sha_dd->iclk)) { 1483 dev_err(dev, "clock initialization failed.\n"); 1484 err = PTR_ERR(sha_dd->iclk); 1485 goto res_err; 1486 } 1487 1488 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); 1489 if (IS_ERR(sha_dd->io_base)) { 1490 dev_err(dev, "can't ioremap\n"); 1491 err = PTR_ERR(sha_dd->io_base); 1492 goto res_err; 1493 } 1494 1495 atmel_sha_hw_version_init(sha_dd); 1496 1497 atmel_sha_get_cap(sha_dd); 1498 1499 if (sha_dd->caps.has_dma) { 1500 pdata = pdev->dev.platform_data; 1501 if (!pdata) { 1502 pdata = atmel_sha_of_init(pdev); 1503 if (IS_ERR(pdata)) { 1504 dev_err(&pdev->dev, "platform data not available\n"); 1505 err = PTR_ERR(pdata); 1506 goto res_err; 1507 } 1508 } 1509 if (!pdata->dma_slave) { 1510 err = -ENXIO; 1511 goto res_err; 1512 } 1513 err = atmel_sha_dma_init(sha_dd, pdata); 1514 if (err) 1515 goto err_sha_dma; 1516 1517 dev_info(dev, "using %s for DMA transfers\n", 1518 dma_chan_name(sha_dd->dma_lch_in.chan)); 1519 } 1520 1521 spin_lock(&atmel_sha.lock); 1522 list_add_tail(&sha_dd->list, &atmel_sha.dev_list); 1523 spin_unlock(&atmel_sha.lock); 1524 1525 err = atmel_sha_register_algs(sha_dd); 1526 if (err) 1527 goto err_algs; 1528 1529 dev_info(dev, "Atmel SHA1/SHA256%s%s\n", 1530 sha_dd->caps.has_sha224 ? "/SHA224" : "", 1531 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); 1532 1533 return 0; 1534 1535 err_algs: 1536 spin_lock(&atmel_sha.lock); 1537 list_del(&sha_dd->list); 1538 spin_unlock(&atmel_sha.lock); 1539 if (sha_dd->caps.has_dma) 1540 atmel_sha_dma_cleanup(sha_dd); 1541 err_sha_dma: 1542 res_err: 1543 tasklet_kill(&sha_dd->queue_task); 1544 tasklet_kill(&sha_dd->done_task); 1545 sha_dd_err: 1546 dev_err(dev, "initialization failed.\n"); 1547 1548 return err; 1549 } 1550 1551 static int atmel_sha_remove(struct platform_device *pdev) 1552 { 1553 static struct atmel_sha_dev *sha_dd; 1554 1555 sha_dd = platform_get_drvdata(pdev); 1556 if (!sha_dd) 1557 return -ENODEV; 1558 spin_lock(&atmel_sha.lock); 1559 list_del(&sha_dd->list); 1560 spin_unlock(&atmel_sha.lock); 1561 1562 atmel_sha_unregister_algs(sha_dd); 1563 1564 tasklet_kill(&sha_dd->queue_task); 1565 tasklet_kill(&sha_dd->done_task); 1566 1567 if (sha_dd->caps.has_dma) 1568 atmel_sha_dma_cleanup(sha_dd); 1569 1570 iounmap(sha_dd->io_base); 1571 1572 clk_put(sha_dd->iclk); 1573 1574 if (sha_dd->irq >= 0) 1575 free_irq(sha_dd->irq, sha_dd); 1576 1577 return 0; 1578 } 1579 1580 static struct platform_driver atmel_sha_driver = { 1581 .probe = atmel_sha_probe, 1582 .remove = atmel_sha_remove, 1583 .driver = { 1584 .name = "atmel_sha", 1585 .of_match_table = of_match_ptr(atmel_sha_dt_ids), 1586 }, 1587 }; 1588 1589 module_platform_driver(atmel_sha_driver); 1590 1591 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); 1592 MODULE_LICENSE("GPL v2"); 1593 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); 1594