1 /* 2 * Cryptographic API. 3 * 4 * Support for OMAP SHA1/MD5 HW acceleration. 5 * 6 * Copyright (c) 2010 Nokia Corporation 7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 8 * Copyright (c) 2011 Texas Instruments Incorporated 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 * 14 * Some ideas are from old omap-sha1-md5.c driver. 15 */ 16 17 #define pr_fmt(fmt) "%s: " fmt, __func__ 18 19 #include <linux/err.h> 20 #include <linux/device.h> 21 #include <linux/module.h> 22 #include <linux/init.h> 23 #include <linux/errno.h> 24 #include <linux/interrupt.h> 25 #include <linux/kernel.h> 26 #include <linux/irq.h> 27 #include <linux/io.h> 28 #include <linux/platform_device.h> 29 #include <linux/scatterlist.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/dmaengine.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/of.h> 34 #include <linux/of_device.h> 35 #include <linux/of_address.h> 36 #include <linux/of_irq.h> 37 #include <linux/delay.h> 38 #include <linux/crypto.h> 39 #include <linux/cryptohash.h> 40 #include <crypto/scatterwalk.h> 41 #include <crypto/algapi.h> 42 #include <crypto/sha.h> 43 #include <crypto/hash.h> 44 #include <crypto/hmac.h> 45 #include <crypto/internal/hash.h> 46 47 #define MD5_DIGEST_SIZE 16 48 49 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) 50 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) 51 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) 52 53 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04)) 54 55 #define SHA_REG_CTRL 0x18 56 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) 57 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4) 58 #define SHA_REG_CTRL_ALGO_CONST (1 << 3) 59 #define SHA_REG_CTRL_ALGO (1 << 2) 60 #define SHA_REG_CTRL_INPUT_READY (1 << 1) 61 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) 62 63 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs) 64 65 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs) 66 #define SHA_REG_MASK_DMA_EN (1 << 3) 67 #define SHA_REG_MASK_IT_EN (1 << 2) 68 #define SHA_REG_MASK_SOFTRESET (1 << 1) 69 #define SHA_REG_AUTOIDLE (1 << 0) 70 71 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) 72 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) 73 74 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs) 75 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) 76 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) 77 #define SHA_REG_MODE_CLOSE_HASH (1 << 4) 78 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) 79 80 #define SHA_REG_MODE_ALGO_MASK (7 << 0) 81 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) 82 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) 83 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) 84 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) 85 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0) 86 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0) 87 88 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs) 89 90 #define SHA_REG_IRQSTATUS 0x118 91 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) 92 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2) 93 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1) 94 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0) 95 96 #define SHA_REG_IRQENA 0x11C 97 #define SHA_REG_IRQENA_CTX_RDY (1 << 3) 98 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2) 99 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1) 100 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0) 101 102 #define DEFAULT_TIMEOUT_INTERVAL HZ 103 104 #define DEFAULT_AUTOSUSPEND_DELAY 1000 105 106 /* mostly device flags */ 107 #define FLAGS_BUSY 0 108 #define FLAGS_FINAL 1 109 #define FLAGS_DMA_ACTIVE 2 110 #define FLAGS_OUTPUT_READY 3 111 #define FLAGS_INIT 4 112 #define FLAGS_CPU 5 113 #define FLAGS_DMA_READY 6 114 #define FLAGS_AUTO_XOR 7 115 #define FLAGS_BE32_SHA1 8 116 #define FLAGS_SGS_COPIED 9 117 #define FLAGS_SGS_ALLOCED 10 118 /* context flags */ 119 #define FLAGS_FINUP 16 120 121 #define FLAGS_MODE_SHIFT 18 122 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT) 123 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT) 124 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT) 125 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT) 126 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT) 127 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT) 128 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT) 129 130 #define FLAGS_HMAC 21 131 #define FLAGS_ERROR 22 132 133 #define OP_UPDATE 1 134 #define OP_FINAL 2 135 136 #define OMAP_ALIGN_MASK (sizeof(u32)-1) 137 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) 138 139 #define BUFLEN SHA512_BLOCK_SIZE 140 #define OMAP_SHA_DMA_THRESHOLD 256 141 142 struct omap_sham_dev; 143 144 struct omap_sham_reqctx { 145 struct omap_sham_dev *dd; 146 unsigned long flags; 147 unsigned long op; 148 149 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED; 150 size_t digcnt; 151 size_t bufcnt; 152 size_t buflen; 153 154 /* walk state */ 155 struct scatterlist *sg; 156 struct scatterlist sgl[2]; 157 int offset; /* offset in current sg */ 158 int sg_len; 159 unsigned int total; /* total request */ 160 161 u8 buffer[0] OMAP_ALIGNED; 162 }; 163 164 struct omap_sham_hmac_ctx { 165 struct crypto_shash *shash; 166 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; 167 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED; 168 }; 169 170 struct omap_sham_ctx { 171 struct omap_sham_dev *dd; 172 173 unsigned long flags; 174 175 /* fallback stuff */ 176 struct crypto_shash *fallback; 177 178 struct omap_sham_hmac_ctx base[0]; 179 }; 180 181 #define OMAP_SHAM_QUEUE_LENGTH 10 182 183 struct omap_sham_algs_info { 184 struct ahash_alg *algs_list; 185 unsigned int size; 186 unsigned int registered; 187 }; 188 189 struct omap_sham_pdata { 190 struct omap_sham_algs_info *algs_info; 191 unsigned int algs_info_size; 192 unsigned long flags; 193 int digest_size; 194 195 void (*copy_hash)(struct ahash_request *req, int out); 196 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length, 197 int final, int dma); 198 void (*trigger)(struct omap_sham_dev *dd, size_t length); 199 int (*poll_irq)(struct omap_sham_dev *dd); 200 irqreturn_t (*intr_hdlr)(int irq, void *dev_id); 201 202 u32 odigest_ofs; 203 u32 idigest_ofs; 204 u32 din_ofs; 205 u32 digcnt_ofs; 206 u32 rev_ofs; 207 u32 mask_ofs; 208 u32 sysstatus_ofs; 209 u32 mode_ofs; 210 u32 length_ofs; 211 212 u32 major_mask; 213 u32 major_shift; 214 u32 minor_mask; 215 u32 minor_shift; 216 }; 217 218 struct omap_sham_dev { 219 struct list_head list; 220 unsigned long phys_base; 221 struct device *dev; 222 void __iomem *io_base; 223 int irq; 224 spinlock_t lock; 225 int err; 226 struct dma_chan *dma_lch; 227 struct tasklet_struct done_task; 228 u8 polling_mode; 229 u8 xmit_buf[BUFLEN] OMAP_ALIGNED; 230 231 unsigned long flags; 232 struct crypto_queue queue; 233 struct ahash_request *req; 234 235 const struct omap_sham_pdata *pdata; 236 }; 237 238 struct omap_sham_drv { 239 struct list_head dev_list; 240 spinlock_t lock; 241 unsigned long flags; 242 }; 243 244 static struct omap_sham_drv sham = { 245 .dev_list = LIST_HEAD_INIT(sham.dev_list), 246 .lock = __SPIN_LOCK_UNLOCKED(sham.lock), 247 }; 248 249 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) 250 { 251 return __raw_readl(dd->io_base + offset); 252 } 253 254 static inline void omap_sham_write(struct omap_sham_dev *dd, 255 u32 offset, u32 value) 256 { 257 __raw_writel(value, dd->io_base + offset); 258 } 259 260 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, 261 u32 value, u32 mask) 262 { 263 u32 val; 264 265 val = omap_sham_read(dd, address); 266 val &= ~mask; 267 val |= value; 268 omap_sham_write(dd, address, val); 269 } 270 271 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) 272 { 273 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; 274 275 while (!(omap_sham_read(dd, offset) & bit)) { 276 if (time_is_before_jiffies(timeout)) 277 return -ETIMEDOUT; 278 } 279 280 return 0; 281 } 282 283 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out) 284 { 285 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 286 struct omap_sham_dev *dd = ctx->dd; 287 u32 *hash = (u32 *)ctx->digest; 288 int i; 289 290 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { 291 if (out) 292 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i)); 293 else 294 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]); 295 } 296 } 297 298 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out) 299 { 300 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 301 struct omap_sham_dev *dd = ctx->dd; 302 int i; 303 304 if (ctx->flags & BIT(FLAGS_HMAC)) { 305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); 306 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 307 struct omap_sham_hmac_ctx *bctx = tctx->base; 308 u32 *opad = (u32 *)bctx->opad; 309 310 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { 311 if (out) 312 opad[i] = omap_sham_read(dd, 313 SHA_REG_ODIGEST(dd, i)); 314 else 315 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i), 316 opad[i]); 317 } 318 } 319 320 omap_sham_copy_hash_omap2(req, out); 321 } 322 323 static void omap_sham_copy_ready_hash(struct ahash_request *req) 324 { 325 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 326 u32 *in = (u32 *)ctx->digest; 327 u32 *hash = (u32 *)req->result; 328 int i, d, big_endian = 0; 329 330 if (!hash) 331 return; 332 333 switch (ctx->flags & FLAGS_MODE_MASK) { 334 case FLAGS_MODE_MD5: 335 d = MD5_DIGEST_SIZE / sizeof(u32); 336 break; 337 case FLAGS_MODE_SHA1: 338 /* OMAP2 SHA1 is big endian */ 339 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags)) 340 big_endian = 1; 341 d = SHA1_DIGEST_SIZE / sizeof(u32); 342 break; 343 case FLAGS_MODE_SHA224: 344 d = SHA224_DIGEST_SIZE / sizeof(u32); 345 break; 346 case FLAGS_MODE_SHA256: 347 d = SHA256_DIGEST_SIZE / sizeof(u32); 348 break; 349 case FLAGS_MODE_SHA384: 350 d = SHA384_DIGEST_SIZE / sizeof(u32); 351 break; 352 case FLAGS_MODE_SHA512: 353 d = SHA512_DIGEST_SIZE / sizeof(u32); 354 break; 355 default: 356 d = 0; 357 } 358 359 if (big_endian) 360 for (i = 0; i < d; i++) 361 hash[i] = be32_to_cpu(in[i]); 362 else 363 for (i = 0; i < d; i++) 364 hash[i] = le32_to_cpu(in[i]); 365 } 366 367 static int omap_sham_hw_init(struct omap_sham_dev *dd) 368 { 369 int err; 370 371 err = pm_runtime_get_sync(dd->dev); 372 if (err < 0) { 373 dev_err(dd->dev, "failed to get sync: %d\n", err); 374 return err; 375 } 376 377 if (!test_bit(FLAGS_INIT, &dd->flags)) { 378 set_bit(FLAGS_INIT, &dd->flags); 379 dd->err = 0; 380 } 381 382 return 0; 383 } 384 385 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length, 386 int final, int dma) 387 { 388 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 389 u32 val = length << 5, mask; 390 391 if (likely(ctx->digcnt)) 392 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); 393 394 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 395 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), 396 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); 397 /* 398 * Setting ALGO_CONST only for the first iteration 399 * and CLOSE_HASH only for the last one. 400 */ 401 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1) 402 val |= SHA_REG_CTRL_ALGO; 403 if (!ctx->digcnt) 404 val |= SHA_REG_CTRL_ALGO_CONST; 405 if (final) 406 val |= SHA_REG_CTRL_CLOSE_HASH; 407 408 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | 409 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; 410 411 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); 412 } 413 414 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length) 415 { 416 } 417 418 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd) 419 { 420 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); 421 } 422 423 static int get_block_size(struct omap_sham_reqctx *ctx) 424 { 425 int d; 426 427 switch (ctx->flags & FLAGS_MODE_MASK) { 428 case FLAGS_MODE_MD5: 429 case FLAGS_MODE_SHA1: 430 d = SHA1_BLOCK_SIZE; 431 break; 432 case FLAGS_MODE_SHA224: 433 case FLAGS_MODE_SHA256: 434 d = SHA256_BLOCK_SIZE; 435 break; 436 case FLAGS_MODE_SHA384: 437 case FLAGS_MODE_SHA512: 438 d = SHA512_BLOCK_SIZE; 439 break; 440 default: 441 d = 0; 442 } 443 444 return d; 445 } 446 447 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, 448 u32 *value, int count) 449 { 450 for (; count--; value++, offset += 4) 451 omap_sham_write(dd, offset, *value); 452 } 453 454 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, 455 int final, int dma) 456 { 457 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 458 u32 val, mask; 459 460 /* 461 * Setting ALGO_CONST only for the first iteration and 462 * CLOSE_HASH only for the last one. Note that flags mode bits 463 * correspond to algorithm encoding in mode register. 464 */ 465 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT); 466 if (!ctx->digcnt) { 467 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); 468 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 469 struct omap_sham_hmac_ctx *bctx = tctx->base; 470 int bs, nr_dr; 471 472 val |= SHA_REG_MODE_ALGO_CONSTANT; 473 474 if (ctx->flags & BIT(FLAGS_HMAC)) { 475 bs = get_block_size(ctx); 476 nr_dr = bs / (2 * sizeof(u32)); 477 val |= SHA_REG_MODE_HMAC_KEY_PROC; 478 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0), 479 (u32 *)bctx->ipad, nr_dr); 480 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0), 481 (u32 *)bctx->ipad + nr_dr, nr_dr); 482 ctx->digcnt += bs; 483 } 484 } 485 486 if (final) { 487 val |= SHA_REG_MODE_CLOSE_HASH; 488 489 if (ctx->flags & BIT(FLAGS_HMAC)) 490 val |= SHA_REG_MODE_HMAC_OUTER_HASH; 491 } 492 493 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH | 494 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH | 495 SHA_REG_MODE_HMAC_KEY_PROC; 496 497 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); 498 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask); 499 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); 500 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 501 SHA_REG_MASK_IT_EN | 502 (dma ? SHA_REG_MASK_DMA_EN : 0), 503 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); 504 } 505 506 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) 507 { 508 omap_sham_write(dd, SHA_REG_LENGTH(dd), length); 509 } 510 511 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) 512 { 513 return omap_sham_wait(dd, SHA_REG_IRQSTATUS, 514 SHA_REG_IRQSTATUS_INPUT_RDY); 515 } 516 517 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length, 518 int final) 519 { 520 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 521 int count, len32, bs32, offset = 0; 522 const u32 *buffer; 523 int mlen; 524 struct sg_mapping_iter mi; 525 526 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", 527 ctx->digcnt, length, final); 528 529 dd->pdata->write_ctrl(dd, length, final, 0); 530 dd->pdata->trigger(dd, length); 531 532 /* should be non-zero before next lines to disable clocks later */ 533 ctx->digcnt += length; 534 ctx->total -= length; 535 536 if (final) 537 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ 538 539 set_bit(FLAGS_CPU, &dd->flags); 540 541 len32 = DIV_ROUND_UP(length, sizeof(u32)); 542 bs32 = get_block_size(ctx) / sizeof(u32); 543 544 sg_miter_start(&mi, ctx->sg, ctx->sg_len, 545 SG_MITER_FROM_SG | SG_MITER_ATOMIC); 546 547 mlen = 0; 548 549 while (len32) { 550 if (dd->pdata->poll_irq(dd)) 551 return -ETIMEDOUT; 552 553 for (count = 0; count < min(len32, bs32); count++, offset++) { 554 if (!mlen) { 555 sg_miter_next(&mi); 556 mlen = mi.length; 557 if (!mlen) { 558 pr_err("sg miter failure.\n"); 559 return -EINVAL; 560 } 561 offset = 0; 562 buffer = mi.addr; 563 } 564 omap_sham_write(dd, SHA_REG_DIN(dd, count), 565 buffer[offset]); 566 mlen -= 4; 567 } 568 len32 -= min(len32, bs32); 569 } 570 571 sg_miter_stop(&mi); 572 573 return -EINPROGRESS; 574 } 575 576 static void omap_sham_dma_callback(void *param) 577 { 578 struct omap_sham_dev *dd = param; 579 580 set_bit(FLAGS_DMA_READY, &dd->flags); 581 tasklet_schedule(&dd->done_task); 582 } 583 584 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length, 585 int final) 586 { 587 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 588 struct dma_async_tx_descriptor *tx; 589 struct dma_slave_config cfg; 590 int ret; 591 592 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", 593 ctx->digcnt, length, final); 594 595 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) { 596 dev_err(dd->dev, "dma_map_sg error\n"); 597 return -EINVAL; 598 } 599 600 memset(&cfg, 0, sizeof(cfg)); 601 602 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); 603 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 604 cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES; 605 606 ret = dmaengine_slave_config(dd->dma_lch, &cfg); 607 if (ret) { 608 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret); 609 return ret; 610 } 611 612 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len, 613 DMA_MEM_TO_DEV, 614 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 615 616 if (!tx) { 617 dev_err(dd->dev, "prep_slave_sg failed\n"); 618 return -EINVAL; 619 } 620 621 tx->callback = omap_sham_dma_callback; 622 tx->callback_param = dd; 623 624 dd->pdata->write_ctrl(dd, length, final, 1); 625 626 ctx->digcnt += length; 627 ctx->total -= length; 628 629 if (final) 630 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ 631 632 set_bit(FLAGS_DMA_ACTIVE, &dd->flags); 633 634 dmaengine_submit(tx); 635 dma_async_issue_pending(dd->dma_lch); 636 637 dd->pdata->trigger(dd, length); 638 639 return -EINPROGRESS; 640 } 641 642 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx, 643 struct scatterlist *sg, int bs, int new_len) 644 { 645 int n = sg_nents(sg); 646 struct scatterlist *tmp; 647 int offset = ctx->offset; 648 649 if (ctx->bufcnt) 650 n++; 651 652 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); 653 if (!ctx->sg) 654 return -ENOMEM; 655 656 sg_init_table(ctx->sg, n); 657 658 tmp = ctx->sg; 659 660 ctx->sg_len = 0; 661 662 if (ctx->bufcnt) { 663 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); 664 tmp = sg_next(tmp); 665 ctx->sg_len++; 666 } 667 668 while (sg && new_len) { 669 int len = sg->length - offset; 670 671 if (offset) { 672 offset -= sg->length; 673 if (offset < 0) 674 offset = 0; 675 } 676 677 if (new_len < len) 678 len = new_len; 679 680 if (len > 0) { 681 new_len -= len; 682 sg_set_page(tmp, sg_page(sg), len, sg->offset); 683 if (new_len <= 0) 684 sg_mark_end(tmp); 685 tmp = sg_next(tmp); 686 ctx->sg_len++; 687 } 688 689 sg = sg_next(sg); 690 } 691 692 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags); 693 694 ctx->bufcnt = 0; 695 696 return 0; 697 } 698 699 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx, 700 struct scatterlist *sg, int bs, int new_len) 701 { 702 int pages; 703 void *buf; 704 int len; 705 706 len = new_len + ctx->bufcnt; 707 708 pages = get_order(ctx->total); 709 710 buf = (void *)__get_free_pages(GFP_ATOMIC, pages); 711 if (!buf) { 712 pr_err("Couldn't allocate pages for unaligned cases.\n"); 713 return -ENOMEM; 714 } 715 716 if (ctx->bufcnt) 717 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); 718 719 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset, 720 ctx->total - ctx->bufcnt, 0); 721 sg_init_table(ctx->sgl, 1); 722 sg_set_buf(ctx->sgl, buf, len); 723 ctx->sg = ctx->sgl; 724 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags); 725 ctx->sg_len = 1; 726 ctx->bufcnt = 0; 727 ctx->offset = 0; 728 729 return 0; 730 } 731 732 static int omap_sham_align_sgs(struct scatterlist *sg, 733 int nbytes, int bs, bool final, 734 struct omap_sham_reqctx *rctx) 735 { 736 int n = 0; 737 bool aligned = true; 738 bool list_ok = true; 739 struct scatterlist *sg_tmp = sg; 740 int new_len; 741 int offset = rctx->offset; 742 743 if (!sg || !sg->length || !nbytes) 744 return 0; 745 746 new_len = nbytes; 747 748 if (offset) 749 list_ok = false; 750 751 if (final) 752 new_len = DIV_ROUND_UP(new_len, bs) * bs; 753 else 754 new_len = (new_len - 1) / bs * bs; 755 756 if (nbytes != new_len) 757 list_ok = false; 758 759 while (nbytes > 0 && sg_tmp) { 760 n++; 761 762 if (offset < sg_tmp->length) { 763 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { 764 aligned = false; 765 break; 766 } 767 768 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { 769 aligned = false; 770 break; 771 } 772 } 773 774 if (offset) { 775 offset -= sg_tmp->length; 776 if (offset < 0) { 777 nbytes += offset; 778 offset = 0; 779 } 780 } else { 781 nbytes -= sg_tmp->length; 782 } 783 784 sg_tmp = sg_next(sg_tmp); 785 786 if (nbytes < 0) { 787 list_ok = false; 788 break; 789 } 790 } 791 792 if (!aligned) 793 return omap_sham_copy_sgs(rctx, sg, bs, new_len); 794 else if (!list_ok) 795 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len); 796 797 rctx->sg_len = n; 798 rctx->sg = sg; 799 800 return 0; 801 } 802 803 static int omap_sham_prepare_request(struct ahash_request *req, bool update) 804 { 805 struct omap_sham_reqctx *rctx = ahash_request_ctx(req); 806 int bs; 807 int ret; 808 int nbytes; 809 bool final = rctx->flags & BIT(FLAGS_FINUP); 810 int xmit_len, hash_later; 811 812 if (!req) 813 return 0; 814 815 bs = get_block_size(rctx); 816 817 if (update) 818 nbytes = req->nbytes; 819 else 820 nbytes = 0; 821 822 rctx->total = nbytes + rctx->bufcnt; 823 824 if (!rctx->total) 825 return 0; 826 827 if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) { 828 int len = bs - rctx->bufcnt % bs; 829 830 if (len > nbytes) 831 len = nbytes; 832 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src, 833 0, len, 0); 834 rctx->bufcnt += len; 835 nbytes -= len; 836 rctx->offset = len; 837 } 838 839 if (rctx->bufcnt) 840 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt); 841 842 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx); 843 if (ret) 844 return ret; 845 846 xmit_len = rctx->total; 847 848 if (!IS_ALIGNED(xmit_len, bs)) { 849 if (final) 850 xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; 851 else 852 xmit_len = xmit_len / bs * bs; 853 } else if (!final) { 854 xmit_len -= bs; 855 } 856 857 hash_later = rctx->total - xmit_len; 858 if (hash_later < 0) 859 hash_later = 0; 860 861 if (rctx->bufcnt && nbytes) { 862 /* have data from previous operation and current */ 863 sg_init_table(rctx->sgl, 2); 864 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); 865 866 sg_chain(rctx->sgl, 2, req->src); 867 868 rctx->sg = rctx->sgl; 869 870 rctx->sg_len++; 871 } else if (rctx->bufcnt) { 872 /* have buffered data only */ 873 sg_init_table(rctx->sgl, 1); 874 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len); 875 876 rctx->sg = rctx->sgl; 877 878 rctx->sg_len = 1; 879 } 880 881 if (hash_later) { 882 int offset = 0; 883 884 if (hash_later > req->nbytes) { 885 memcpy(rctx->buffer, rctx->buffer + xmit_len, 886 hash_later - req->nbytes); 887 offset = hash_later - req->nbytes; 888 } 889 890 if (req->nbytes) { 891 scatterwalk_map_and_copy(rctx->buffer + offset, 892 req->src, 893 offset + req->nbytes - 894 hash_later, hash_later, 0); 895 } 896 897 rctx->bufcnt = hash_later; 898 } else { 899 rctx->bufcnt = 0; 900 } 901 902 if (!final) 903 rctx->total = xmit_len; 904 905 return 0; 906 } 907 908 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) 909 { 910 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); 911 912 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); 913 914 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags); 915 916 return 0; 917 } 918 919 static int omap_sham_init(struct ahash_request *req) 920 { 921 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 922 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 923 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 924 struct omap_sham_dev *dd = NULL, *tmp; 925 int bs = 0; 926 927 spin_lock_bh(&sham.lock); 928 if (!tctx->dd) { 929 list_for_each_entry(tmp, &sham.dev_list, list) { 930 dd = tmp; 931 break; 932 } 933 tctx->dd = dd; 934 } else { 935 dd = tctx->dd; 936 } 937 spin_unlock_bh(&sham.lock); 938 939 ctx->dd = dd; 940 941 ctx->flags = 0; 942 943 dev_dbg(dd->dev, "init: digest size: %d\n", 944 crypto_ahash_digestsize(tfm)); 945 946 switch (crypto_ahash_digestsize(tfm)) { 947 case MD5_DIGEST_SIZE: 948 ctx->flags |= FLAGS_MODE_MD5; 949 bs = SHA1_BLOCK_SIZE; 950 break; 951 case SHA1_DIGEST_SIZE: 952 ctx->flags |= FLAGS_MODE_SHA1; 953 bs = SHA1_BLOCK_SIZE; 954 break; 955 case SHA224_DIGEST_SIZE: 956 ctx->flags |= FLAGS_MODE_SHA224; 957 bs = SHA224_BLOCK_SIZE; 958 break; 959 case SHA256_DIGEST_SIZE: 960 ctx->flags |= FLAGS_MODE_SHA256; 961 bs = SHA256_BLOCK_SIZE; 962 break; 963 case SHA384_DIGEST_SIZE: 964 ctx->flags |= FLAGS_MODE_SHA384; 965 bs = SHA384_BLOCK_SIZE; 966 break; 967 case SHA512_DIGEST_SIZE: 968 ctx->flags |= FLAGS_MODE_SHA512; 969 bs = SHA512_BLOCK_SIZE; 970 break; 971 } 972 973 ctx->bufcnt = 0; 974 ctx->digcnt = 0; 975 ctx->total = 0; 976 ctx->offset = 0; 977 ctx->buflen = BUFLEN; 978 979 if (tctx->flags & BIT(FLAGS_HMAC)) { 980 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { 981 struct omap_sham_hmac_ctx *bctx = tctx->base; 982 983 memcpy(ctx->buffer, bctx->ipad, bs); 984 ctx->bufcnt = bs; 985 } 986 987 ctx->flags |= BIT(FLAGS_HMAC); 988 } 989 990 return 0; 991 992 } 993 994 static int omap_sham_update_req(struct omap_sham_dev *dd) 995 { 996 struct ahash_request *req = dd->req; 997 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 998 int err; 999 bool final = ctx->flags & BIT(FLAGS_FINUP); 1000 1001 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", 1002 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); 1003 1004 if (ctx->total < get_block_size(ctx) || 1005 ctx->total < OMAP_SHA_DMA_THRESHOLD) 1006 ctx->flags |= BIT(FLAGS_CPU); 1007 1008 if (ctx->flags & BIT(FLAGS_CPU)) 1009 err = omap_sham_xmit_cpu(dd, ctx->total, final); 1010 else 1011 err = omap_sham_xmit_dma(dd, ctx->total, final); 1012 1013 /* wait for dma completion before can take more data */ 1014 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); 1015 1016 return err; 1017 } 1018 1019 static int omap_sham_final_req(struct omap_sham_dev *dd) 1020 { 1021 struct ahash_request *req = dd->req; 1022 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1023 int err = 0, use_dma = 1; 1024 1025 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode) 1026 /* 1027 * faster to handle last block with cpu or 1028 * use cpu when dma is not present. 1029 */ 1030 use_dma = 0; 1031 1032 if (use_dma) 1033 err = omap_sham_xmit_dma(dd, ctx->total, 1); 1034 else 1035 err = omap_sham_xmit_cpu(dd, ctx->total, 1); 1036 1037 ctx->bufcnt = 0; 1038 1039 dev_dbg(dd->dev, "final_req: err: %d\n", err); 1040 1041 return err; 1042 } 1043 1044 static int omap_sham_finish_hmac(struct ahash_request *req) 1045 { 1046 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 1047 struct omap_sham_hmac_ctx *bctx = tctx->base; 1048 int bs = crypto_shash_blocksize(bctx->shash); 1049 int ds = crypto_shash_digestsize(bctx->shash); 1050 SHASH_DESC_ON_STACK(shash, bctx->shash); 1051 1052 shash->tfm = bctx->shash; 1053 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ 1054 1055 return crypto_shash_init(shash) ?: 1056 crypto_shash_update(shash, bctx->opad, bs) ?: 1057 crypto_shash_finup(shash, req->result, ds, req->result); 1058 } 1059 1060 static int omap_sham_finish(struct ahash_request *req) 1061 { 1062 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1063 struct omap_sham_dev *dd = ctx->dd; 1064 int err = 0; 1065 1066 if (ctx->digcnt) { 1067 omap_sham_copy_ready_hash(req); 1068 if ((ctx->flags & BIT(FLAGS_HMAC)) && 1069 !test_bit(FLAGS_AUTO_XOR, &dd->flags)) 1070 err = omap_sham_finish_hmac(req); 1071 } 1072 1073 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); 1074 1075 return err; 1076 } 1077 1078 static void omap_sham_finish_req(struct ahash_request *req, int err) 1079 { 1080 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1081 struct omap_sham_dev *dd = ctx->dd; 1082 1083 if (test_bit(FLAGS_SGS_COPIED, &dd->flags)) 1084 free_pages((unsigned long)sg_virt(ctx->sg), 1085 get_order(ctx->sg->length)); 1086 1087 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags)) 1088 kfree(ctx->sg); 1089 1090 ctx->sg = NULL; 1091 1092 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED)); 1093 1094 if (!err) { 1095 dd->pdata->copy_hash(req, 1); 1096 if (test_bit(FLAGS_FINAL, &dd->flags)) 1097 err = omap_sham_finish(req); 1098 } else { 1099 ctx->flags |= BIT(FLAGS_ERROR); 1100 } 1101 1102 /* atomic operation is not needed here */ 1103 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | 1104 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); 1105 1106 pm_runtime_mark_last_busy(dd->dev); 1107 pm_runtime_put_autosuspend(dd->dev); 1108 1109 if (req->base.complete) 1110 req->base.complete(&req->base, err); 1111 } 1112 1113 static int omap_sham_handle_queue(struct omap_sham_dev *dd, 1114 struct ahash_request *req) 1115 { 1116 struct crypto_async_request *async_req, *backlog; 1117 struct omap_sham_reqctx *ctx; 1118 unsigned long flags; 1119 int err = 0, ret = 0; 1120 1121 retry: 1122 spin_lock_irqsave(&dd->lock, flags); 1123 if (req) 1124 ret = ahash_enqueue_request(&dd->queue, req); 1125 if (test_bit(FLAGS_BUSY, &dd->flags)) { 1126 spin_unlock_irqrestore(&dd->lock, flags); 1127 return ret; 1128 } 1129 backlog = crypto_get_backlog(&dd->queue); 1130 async_req = crypto_dequeue_request(&dd->queue); 1131 if (async_req) 1132 set_bit(FLAGS_BUSY, &dd->flags); 1133 spin_unlock_irqrestore(&dd->lock, flags); 1134 1135 if (!async_req) 1136 return ret; 1137 1138 if (backlog) 1139 backlog->complete(backlog, -EINPROGRESS); 1140 1141 req = ahash_request_cast(async_req); 1142 dd->req = req; 1143 ctx = ahash_request_ctx(req); 1144 1145 err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE); 1146 if (err || !ctx->total) 1147 goto err1; 1148 1149 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", 1150 ctx->op, req->nbytes); 1151 1152 err = omap_sham_hw_init(dd); 1153 if (err) 1154 goto err1; 1155 1156 if (ctx->digcnt) 1157 /* request has changed - restore hash */ 1158 dd->pdata->copy_hash(req, 0); 1159 1160 if (ctx->op == OP_UPDATE) { 1161 err = omap_sham_update_req(dd); 1162 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) 1163 /* no final() after finup() */ 1164 err = omap_sham_final_req(dd); 1165 } else if (ctx->op == OP_FINAL) { 1166 err = omap_sham_final_req(dd); 1167 } 1168 err1: 1169 dev_dbg(dd->dev, "exit, err: %d\n", err); 1170 1171 if (err != -EINPROGRESS) { 1172 /* done_task will not finish it, so do it here */ 1173 omap_sham_finish_req(req, err); 1174 req = NULL; 1175 1176 /* 1177 * Execute next request immediately if there is anything 1178 * in queue. 1179 */ 1180 goto retry; 1181 } 1182 1183 return ret; 1184 } 1185 1186 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) 1187 { 1188 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1189 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 1190 struct omap_sham_dev *dd = tctx->dd; 1191 1192 ctx->op = op; 1193 1194 return omap_sham_handle_queue(dd, req); 1195 } 1196 1197 static int omap_sham_update(struct ahash_request *req) 1198 { 1199 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1200 struct omap_sham_dev *dd = ctx->dd; 1201 1202 if (!req->nbytes) 1203 return 0; 1204 1205 if (ctx->bufcnt + req->nbytes <= ctx->buflen) { 1206 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, 1207 0, req->nbytes, 0); 1208 ctx->bufcnt += req->nbytes; 1209 return 0; 1210 } 1211 1212 if (dd->polling_mode) 1213 ctx->flags |= BIT(FLAGS_CPU); 1214 1215 return omap_sham_enqueue(req, OP_UPDATE); 1216 } 1217 1218 static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags, 1219 const u8 *data, unsigned int len, u8 *out) 1220 { 1221 SHASH_DESC_ON_STACK(shash, tfm); 1222 1223 shash->tfm = tfm; 1224 shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; 1225 1226 return crypto_shash_digest(shash, data, len, out); 1227 } 1228 1229 static int omap_sham_final_shash(struct ahash_request *req) 1230 { 1231 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 1232 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1233 int offset = 0; 1234 1235 /* 1236 * If we are running HMAC on limited hardware support, skip 1237 * the ipad in the beginning of the buffer if we are going for 1238 * software fallback algorithm. 1239 */ 1240 if (test_bit(FLAGS_HMAC, &ctx->flags) && 1241 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags)) 1242 offset = get_block_size(ctx); 1243 1244 return omap_sham_shash_digest(tctx->fallback, req->base.flags, 1245 ctx->buffer + offset, 1246 ctx->bufcnt - offset, req->result); 1247 } 1248 1249 static int omap_sham_final(struct ahash_request *req) 1250 { 1251 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1252 1253 ctx->flags |= BIT(FLAGS_FINUP); 1254 1255 if (ctx->flags & BIT(FLAGS_ERROR)) 1256 return 0; /* uncompleted hash is not needed */ 1257 1258 /* 1259 * OMAP HW accel works only with buffers >= 9. 1260 * HMAC is always >= 9 because ipad == block size. 1261 * If buffersize is less than DMA_THRESHOLD, we use fallback 1262 * SW encoding, as using DMA + HW in this case doesn't provide 1263 * any benefit. 1264 */ 1265 if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD) 1266 return omap_sham_final_shash(req); 1267 else if (ctx->bufcnt) 1268 return omap_sham_enqueue(req, OP_FINAL); 1269 1270 /* copy ready hash (+ finalize hmac) */ 1271 return omap_sham_finish(req); 1272 } 1273 1274 static int omap_sham_finup(struct ahash_request *req) 1275 { 1276 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); 1277 int err1, err2; 1278 1279 ctx->flags |= BIT(FLAGS_FINUP); 1280 1281 err1 = omap_sham_update(req); 1282 if (err1 == -EINPROGRESS || err1 == -EBUSY) 1283 return err1; 1284 /* 1285 * final() has to be always called to cleanup resources 1286 * even if udpate() failed, except EINPROGRESS 1287 */ 1288 err2 = omap_sham_final(req); 1289 1290 return err1 ?: err2; 1291 } 1292 1293 static int omap_sham_digest(struct ahash_request *req) 1294 { 1295 return omap_sham_init(req) ?: omap_sham_finup(req); 1296 } 1297 1298 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, 1299 unsigned int keylen) 1300 { 1301 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); 1302 struct omap_sham_hmac_ctx *bctx = tctx->base; 1303 int bs = crypto_shash_blocksize(bctx->shash); 1304 int ds = crypto_shash_digestsize(bctx->shash); 1305 struct omap_sham_dev *dd = NULL, *tmp; 1306 int err, i; 1307 1308 spin_lock_bh(&sham.lock); 1309 if (!tctx->dd) { 1310 list_for_each_entry(tmp, &sham.dev_list, list) { 1311 dd = tmp; 1312 break; 1313 } 1314 tctx->dd = dd; 1315 } else { 1316 dd = tctx->dd; 1317 } 1318 spin_unlock_bh(&sham.lock); 1319 1320 err = crypto_shash_setkey(tctx->fallback, key, keylen); 1321 if (err) 1322 return err; 1323 1324 if (keylen > bs) { 1325 err = omap_sham_shash_digest(bctx->shash, 1326 crypto_shash_get_flags(bctx->shash), 1327 key, keylen, bctx->ipad); 1328 if (err) 1329 return err; 1330 keylen = ds; 1331 } else { 1332 memcpy(bctx->ipad, key, keylen); 1333 } 1334 1335 memset(bctx->ipad + keylen, 0, bs - keylen); 1336 1337 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { 1338 memcpy(bctx->opad, bctx->ipad, bs); 1339 1340 for (i = 0; i < bs; i++) { 1341 bctx->ipad[i] ^= HMAC_IPAD_VALUE; 1342 bctx->opad[i] ^= HMAC_OPAD_VALUE; 1343 } 1344 } 1345 1346 return err; 1347 } 1348 1349 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) 1350 { 1351 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 1352 const char *alg_name = crypto_tfm_alg_name(tfm); 1353 1354 /* Allocate a fallback and abort if it failed. */ 1355 tctx->fallback = crypto_alloc_shash(alg_name, 0, 1356 CRYPTO_ALG_NEED_FALLBACK); 1357 if (IS_ERR(tctx->fallback)) { 1358 pr_err("omap-sham: fallback driver '%s' " 1359 "could not be loaded.\n", alg_name); 1360 return PTR_ERR(tctx->fallback); 1361 } 1362 1363 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1364 sizeof(struct omap_sham_reqctx) + BUFLEN); 1365 1366 if (alg_base) { 1367 struct omap_sham_hmac_ctx *bctx = tctx->base; 1368 tctx->flags |= BIT(FLAGS_HMAC); 1369 bctx->shash = crypto_alloc_shash(alg_base, 0, 1370 CRYPTO_ALG_NEED_FALLBACK); 1371 if (IS_ERR(bctx->shash)) { 1372 pr_err("omap-sham: base driver '%s' " 1373 "could not be loaded.\n", alg_base); 1374 crypto_free_shash(tctx->fallback); 1375 return PTR_ERR(bctx->shash); 1376 } 1377 1378 } 1379 1380 return 0; 1381 } 1382 1383 static int omap_sham_cra_init(struct crypto_tfm *tfm) 1384 { 1385 return omap_sham_cra_init_alg(tfm, NULL); 1386 } 1387 1388 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) 1389 { 1390 return omap_sham_cra_init_alg(tfm, "sha1"); 1391 } 1392 1393 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm) 1394 { 1395 return omap_sham_cra_init_alg(tfm, "sha224"); 1396 } 1397 1398 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm) 1399 { 1400 return omap_sham_cra_init_alg(tfm, "sha256"); 1401 } 1402 1403 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) 1404 { 1405 return omap_sham_cra_init_alg(tfm, "md5"); 1406 } 1407 1408 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm) 1409 { 1410 return omap_sham_cra_init_alg(tfm, "sha384"); 1411 } 1412 1413 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm) 1414 { 1415 return omap_sham_cra_init_alg(tfm, "sha512"); 1416 } 1417 1418 static void omap_sham_cra_exit(struct crypto_tfm *tfm) 1419 { 1420 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); 1421 1422 crypto_free_shash(tctx->fallback); 1423 tctx->fallback = NULL; 1424 1425 if (tctx->flags & BIT(FLAGS_HMAC)) { 1426 struct omap_sham_hmac_ctx *bctx = tctx->base; 1427 crypto_free_shash(bctx->shash); 1428 } 1429 } 1430 1431 static int omap_sham_export(struct ahash_request *req, void *out) 1432 { 1433 struct omap_sham_reqctx *rctx = ahash_request_ctx(req); 1434 1435 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt); 1436 1437 return 0; 1438 } 1439 1440 static int omap_sham_import(struct ahash_request *req, const void *in) 1441 { 1442 struct omap_sham_reqctx *rctx = ahash_request_ctx(req); 1443 const struct omap_sham_reqctx *ctx_in = in; 1444 1445 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt); 1446 1447 return 0; 1448 } 1449 1450 static struct ahash_alg algs_sha1_md5[] = { 1451 { 1452 .init = omap_sham_init, 1453 .update = omap_sham_update, 1454 .final = omap_sham_final, 1455 .finup = omap_sham_finup, 1456 .digest = omap_sham_digest, 1457 .halg.digestsize = SHA1_DIGEST_SIZE, 1458 .halg.base = { 1459 .cra_name = "sha1", 1460 .cra_driver_name = "omap-sha1", 1461 .cra_priority = 400, 1462 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1463 CRYPTO_ALG_KERN_DRIVER_ONLY | 1464 CRYPTO_ALG_ASYNC | 1465 CRYPTO_ALG_NEED_FALLBACK, 1466 .cra_blocksize = SHA1_BLOCK_SIZE, 1467 .cra_ctxsize = sizeof(struct omap_sham_ctx), 1468 .cra_alignmask = OMAP_ALIGN_MASK, 1469 .cra_module = THIS_MODULE, 1470 .cra_init = omap_sham_cra_init, 1471 .cra_exit = omap_sham_cra_exit, 1472 } 1473 }, 1474 { 1475 .init = omap_sham_init, 1476 .update = omap_sham_update, 1477 .final = omap_sham_final, 1478 .finup = omap_sham_finup, 1479 .digest = omap_sham_digest, 1480 .halg.digestsize = MD5_DIGEST_SIZE, 1481 .halg.base = { 1482 .cra_name = "md5", 1483 .cra_driver_name = "omap-md5", 1484 .cra_priority = 400, 1485 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1486 CRYPTO_ALG_KERN_DRIVER_ONLY | 1487 CRYPTO_ALG_ASYNC | 1488 CRYPTO_ALG_NEED_FALLBACK, 1489 .cra_blocksize = SHA1_BLOCK_SIZE, 1490 .cra_ctxsize = sizeof(struct omap_sham_ctx), 1491 .cra_alignmask = OMAP_ALIGN_MASK, 1492 .cra_module = THIS_MODULE, 1493 .cra_init = omap_sham_cra_init, 1494 .cra_exit = omap_sham_cra_exit, 1495 } 1496 }, 1497 { 1498 .init = omap_sham_init, 1499 .update = omap_sham_update, 1500 .final = omap_sham_final, 1501 .finup = omap_sham_finup, 1502 .digest = omap_sham_digest, 1503 .setkey = omap_sham_setkey, 1504 .halg.digestsize = SHA1_DIGEST_SIZE, 1505 .halg.base = { 1506 .cra_name = "hmac(sha1)", 1507 .cra_driver_name = "omap-hmac-sha1", 1508 .cra_priority = 400, 1509 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1510 CRYPTO_ALG_KERN_DRIVER_ONLY | 1511 CRYPTO_ALG_ASYNC | 1512 CRYPTO_ALG_NEED_FALLBACK, 1513 .cra_blocksize = SHA1_BLOCK_SIZE, 1514 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1515 sizeof(struct omap_sham_hmac_ctx), 1516 .cra_alignmask = OMAP_ALIGN_MASK, 1517 .cra_module = THIS_MODULE, 1518 .cra_init = omap_sham_cra_sha1_init, 1519 .cra_exit = omap_sham_cra_exit, 1520 } 1521 }, 1522 { 1523 .init = omap_sham_init, 1524 .update = omap_sham_update, 1525 .final = omap_sham_final, 1526 .finup = omap_sham_finup, 1527 .digest = omap_sham_digest, 1528 .setkey = omap_sham_setkey, 1529 .halg.digestsize = MD5_DIGEST_SIZE, 1530 .halg.base = { 1531 .cra_name = "hmac(md5)", 1532 .cra_driver_name = "omap-hmac-md5", 1533 .cra_priority = 400, 1534 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1535 CRYPTO_ALG_KERN_DRIVER_ONLY | 1536 CRYPTO_ALG_ASYNC | 1537 CRYPTO_ALG_NEED_FALLBACK, 1538 .cra_blocksize = SHA1_BLOCK_SIZE, 1539 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1540 sizeof(struct omap_sham_hmac_ctx), 1541 .cra_alignmask = OMAP_ALIGN_MASK, 1542 .cra_module = THIS_MODULE, 1543 .cra_init = omap_sham_cra_md5_init, 1544 .cra_exit = omap_sham_cra_exit, 1545 } 1546 } 1547 }; 1548 1549 /* OMAP4 has some algs in addition to what OMAP2 has */ 1550 static struct ahash_alg algs_sha224_sha256[] = { 1551 { 1552 .init = omap_sham_init, 1553 .update = omap_sham_update, 1554 .final = omap_sham_final, 1555 .finup = omap_sham_finup, 1556 .digest = omap_sham_digest, 1557 .halg.digestsize = SHA224_DIGEST_SIZE, 1558 .halg.base = { 1559 .cra_name = "sha224", 1560 .cra_driver_name = "omap-sha224", 1561 .cra_priority = 400, 1562 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1563 CRYPTO_ALG_ASYNC | 1564 CRYPTO_ALG_NEED_FALLBACK, 1565 .cra_blocksize = SHA224_BLOCK_SIZE, 1566 .cra_ctxsize = sizeof(struct omap_sham_ctx), 1567 .cra_alignmask = OMAP_ALIGN_MASK, 1568 .cra_module = THIS_MODULE, 1569 .cra_init = omap_sham_cra_init, 1570 .cra_exit = omap_sham_cra_exit, 1571 } 1572 }, 1573 { 1574 .init = omap_sham_init, 1575 .update = omap_sham_update, 1576 .final = omap_sham_final, 1577 .finup = omap_sham_finup, 1578 .digest = omap_sham_digest, 1579 .halg.digestsize = SHA256_DIGEST_SIZE, 1580 .halg.base = { 1581 .cra_name = "sha256", 1582 .cra_driver_name = "omap-sha256", 1583 .cra_priority = 400, 1584 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1585 CRYPTO_ALG_ASYNC | 1586 CRYPTO_ALG_NEED_FALLBACK, 1587 .cra_blocksize = SHA256_BLOCK_SIZE, 1588 .cra_ctxsize = sizeof(struct omap_sham_ctx), 1589 .cra_alignmask = OMAP_ALIGN_MASK, 1590 .cra_module = THIS_MODULE, 1591 .cra_init = omap_sham_cra_init, 1592 .cra_exit = omap_sham_cra_exit, 1593 } 1594 }, 1595 { 1596 .init = omap_sham_init, 1597 .update = omap_sham_update, 1598 .final = omap_sham_final, 1599 .finup = omap_sham_finup, 1600 .digest = omap_sham_digest, 1601 .setkey = omap_sham_setkey, 1602 .halg.digestsize = SHA224_DIGEST_SIZE, 1603 .halg.base = { 1604 .cra_name = "hmac(sha224)", 1605 .cra_driver_name = "omap-hmac-sha224", 1606 .cra_priority = 400, 1607 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1608 CRYPTO_ALG_ASYNC | 1609 CRYPTO_ALG_NEED_FALLBACK, 1610 .cra_blocksize = SHA224_BLOCK_SIZE, 1611 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1612 sizeof(struct omap_sham_hmac_ctx), 1613 .cra_alignmask = OMAP_ALIGN_MASK, 1614 .cra_module = THIS_MODULE, 1615 .cra_init = omap_sham_cra_sha224_init, 1616 .cra_exit = omap_sham_cra_exit, 1617 } 1618 }, 1619 { 1620 .init = omap_sham_init, 1621 .update = omap_sham_update, 1622 .final = omap_sham_final, 1623 .finup = omap_sham_finup, 1624 .digest = omap_sham_digest, 1625 .setkey = omap_sham_setkey, 1626 .halg.digestsize = SHA256_DIGEST_SIZE, 1627 .halg.base = { 1628 .cra_name = "hmac(sha256)", 1629 .cra_driver_name = "omap-hmac-sha256", 1630 .cra_priority = 400, 1631 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1632 CRYPTO_ALG_ASYNC | 1633 CRYPTO_ALG_NEED_FALLBACK, 1634 .cra_blocksize = SHA256_BLOCK_SIZE, 1635 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1636 sizeof(struct omap_sham_hmac_ctx), 1637 .cra_alignmask = OMAP_ALIGN_MASK, 1638 .cra_module = THIS_MODULE, 1639 .cra_init = omap_sham_cra_sha256_init, 1640 .cra_exit = omap_sham_cra_exit, 1641 } 1642 }, 1643 }; 1644 1645 static struct ahash_alg algs_sha384_sha512[] = { 1646 { 1647 .init = omap_sham_init, 1648 .update = omap_sham_update, 1649 .final = omap_sham_final, 1650 .finup = omap_sham_finup, 1651 .digest = omap_sham_digest, 1652 .halg.digestsize = SHA384_DIGEST_SIZE, 1653 .halg.base = { 1654 .cra_name = "sha384", 1655 .cra_driver_name = "omap-sha384", 1656 .cra_priority = 400, 1657 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1658 CRYPTO_ALG_ASYNC | 1659 CRYPTO_ALG_NEED_FALLBACK, 1660 .cra_blocksize = SHA384_BLOCK_SIZE, 1661 .cra_ctxsize = sizeof(struct omap_sham_ctx), 1662 .cra_alignmask = OMAP_ALIGN_MASK, 1663 .cra_module = THIS_MODULE, 1664 .cra_init = omap_sham_cra_init, 1665 .cra_exit = omap_sham_cra_exit, 1666 } 1667 }, 1668 { 1669 .init = omap_sham_init, 1670 .update = omap_sham_update, 1671 .final = omap_sham_final, 1672 .finup = omap_sham_finup, 1673 .digest = omap_sham_digest, 1674 .halg.digestsize = SHA512_DIGEST_SIZE, 1675 .halg.base = { 1676 .cra_name = "sha512", 1677 .cra_driver_name = "omap-sha512", 1678 .cra_priority = 400, 1679 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1680 CRYPTO_ALG_ASYNC | 1681 CRYPTO_ALG_NEED_FALLBACK, 1682 .cra_blocksize = SHA512_BLOCK_SIZE, 1683 .cra_ctxsize = sizeof(struct omap_sham_ctx), 1684 .cra_alignmask = OMAP_ALIGN_MASK, 1685 .cra_module = THIS_MODULE, 1686 .cra_init = omap_sham_cra_init, 1687 .cra_exit = omap_sham_cra_exit, 1688 } 1689 }, 1690 { 1691 .init = omap_sham_init, 1692 .update = omap_sham_update, 1693 .final = omap_sham_final, 1694 .finup = omap_sham_finup, 1695 .digest = omap_sham_digest, 1696 .setkey = omap_sham_setkey, 1697 .halg.digestsize = SHA384_DIGEST_SIZE, 1698 .halg.base = { 1699 .cra_name = "hmac(sha384)", 1700 .cra_driver_name = "omap-hmac-sha384", 1701 .cra_priority = 400, 1702 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1703 CRYPTO_ALG_ASYNC | 1704 CRYPTO_ALG_NEED_FALLBACK, 1705 .cra_blocksize = SHA384_BLOCK_SIZE, 1706 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1707 sizeof(struct omap_sham_hmac_ctx), 1708 .cra_alignmask = OMAP_ALIGN_MASK, 1709 .cra_module = THIS_MODULE, 1710 .cra_init = omap_sham_cra_sha384_init, 1711 .cra_exit = omap_sham_cra_exit, 1712 } 1713 }, 1714 { 1715 .init = omap_sham_init, 1716 .update = omap_sham_update, 1717 .final = omap_sham_final, 1718 .finup = omap_sham_finup, 1719 .digest = omap_sham_digest, 1720 .setkey = omap_sham_setkey, 1721 .halg.digestsize = SHA512_DIGEST_SIZE, 1722 .halg.base = { 1723 .cra_name = "hmac(sha512)", 1724 .cra_driver_name = "omap-hmac-sha512", 1725 .cra_priority = 400, 1726 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1727 CRYPTO_ALG_ASYNC | 1728 CRYPTO_ALG_NEED_FALLBACK, 1729 .cra_blocksize = SHA512_BLOCK_SIZE, 1730 .cra_ctxsize = sizeof(struct omap_sham_ctx) + 1731 sizeof(struct omap_sham_hmac_ctx), 1732 .cra_alignmask = OMAP_ALIGN_MASK, 1733 .cra_module = THIS_MODULE, 1734 .cra_init = omap_sham_cra_sha512_init, 1735 .cra_exit = omap_sham_cra_exit, 1736 } 1737 }, 1738 }; 1739 1740 static void omap_sham_done_task(unsigned long data) 1741 { 1742 struct omap_sham_dev *dd = (struct omap_sham_dev *)data; 1743 int err = 0; 1744 1745 if (!test_bit(FLAGS_BUSY, &dd->flags)) { 1746 omap_sham_handle_queue(dd, NULL); 1747 return; 1748 } 1749 1750 if (test_bit(FLAGS_CPU, &dd->flags)) { 1751 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) 1752 goto finish; 1753 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { 1754 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { 1755 omap_sham_update_dma_stop(dd); 1756 if (dd->err) { 1757 err = dd->err; 1758 goto finish; 1759 } 1760 } 1761 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { 1762 /* hash or semi-hash ready */ 1763 clear_bit(FLAGS_DMA_READY, &dd->flags); 1764 goto finish; 1765 } 1766 } 1767 1768 return; 1769 1770 finish: 1771 dev_dbg(dd->dev, "update done: err: %d\n", err); 1772 /* finish curent request */ 1773 omap_sham_finish_req(dd->req, err); 1774 1775 /* If we are not busy, process next req */ 1776 if (!test_bit(FLAGS_BUSY, &dd->flags)) 1777 omap_sham_handle_queue(dd, NULL); 1778 } 1779 1780 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) 1781 { 1782 if (!test_bit(FLAGS_BUSY, &dd->flags)) { 1783 dev_warn(dd->dev, "Interrupt when no active requests.\n"); 1784 } else { 1785 set_bit(FLAGS_OUTPUT_READY, &dd->flags); 1786 tasklet_schedule(&dd->done_task); 1787 } 1788 1789 return IRQ_HANDLED; 1790 } 1791 1792 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id) 1793 { 1794 struct omap_sham_dev *dd = dev_id; 1795 1796 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) 1797 /* final -> allow device to go to power-saving mode */ 1798 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); 1799 1800 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, 1801 SHA_REG_CTRL_OUTPUT_READY); 1802 omap_sham_read(dd, SHA_REG_CTRL); 1803 1804 return omap_sham_irq_common(dd); 1805 } 1806 1807 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id) 1808 { 1809 struct omap_sham_dev *dd = dev_id; 1810 1811 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN); 1812 1813 return omap_sham_irq_common(dd); 1814 } 1815 1816 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = { 1817 { 1818 .algs_list = algs_sha1_md5, 1819 .size = ARRAY_SIZE(algs_sha1_md5), 1820 }, 1821 }; 1822 1823 static const struct omap_sham_pdata omap_sham_pdata_omap2 = { 1824 .algs_info = omap_sham_algs_info_omap2, 1825 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2), 1826 .flags = BIT(FLAGS_BE32_SHA1), 1827 .digest_size = SHA1_DIGEST_SIZE, 1828 .copy_hash = omap_sham_copy_hash_omap2, 1829 .write_ctrl = omap_sham_write_ctrl_omap2, 1830 .trigger = omap_sham_trigger_omap2, 1831 .poll_irq = omap_sham_poll_irq_omap2, 1832 .intr_hdlr = omap_sham_irq_omap2, 1833 .idigest_ofs = 0x00, 1834 .din_ofs = 0x1c, 1835 .digcnt_ofs = 0x14, 1836 .rev_ofs = 0x5c, 1837 .mask_ofs = 0x60, 1838 .sysstatus_ofs = 0x64, 1839 .major_mask = 0xf0, 1840 .major_shift = 4, 1841 .minor_mask = 0x0f, 1842 .minor_shift = 0, 1843 }; 1844 1845 #ifdef CONFIG_OF 1846 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = { 1847 { 1848 .algs_list = algs_sha1_md5, 1849 .size = ARRAY_SIZE(algs_sha1_md5), 1850 }, 1851 { 1852 .algs_list = algs_sha224_sha256, 1853 .size = ARRAY_SIZE(algs_sha224_sha256), 1854 }, 1855 }; 1856 1857 static const struct omap_sham_pdata omap_sham_pdata_omap4 = { 1858 .algs_info = omap_sham_algs_info_omap4, 1859 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4), 1860 .flags = BIT(FLAGS_AUTO_XOR), 1861 .digest_size = SHA256_DIGEST_SIZE, 1862 .copy_hash = omap_sham_copy_hash_omap4, 1863 .write_ctrl = omap_sham_write_ctrl_omap4, 1864 .trigger = omap_sham_trigger_omap4, 1865 .poll_irq = omap_sham_poll_irq_omap4, 1866 .intr_hdlr = omap_sham_irq_omap4, 1867 .idigest_ofs = 0x020, 1868 .odigest_ofs = 0x0, 1869 .din_ofs = 0x080, 1870 .digcnt_ofs = 0x040, 1871 .rev_ofs = 0x100, 1872 .mask_ofs = 0x110, 1873 .sysstatus_ofs = 0x114, 1874 .mode_ofs = 0x44, 1875 .length_ofs = 0x48, 1876 .major_mask = 0x0700, 1877 .major_shift = 8, 1878 .minor_mask = 0x003f, 1879 .minor_shift = 0, 1880 }; 1881 1882 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = { 1883 { 1884 .algs_list = algs_sha1_md5, 1885 .size = ARRAY_SIZE(algs_sha1_md5), 1886 }, 1887 { 1888 .algs_list = algs_sha224_sha256, 1889 .size = ARRAY_SIZE(algs_sha224_sha256), 1890 }, 1891 { 1892 .algs_list = algs_sha384_sha512, 1893 .size = ARRAY_SIZE(algs_sha384_sha512), 1894 }, 1895 }; 1896 1897 static const struct omap_sham_pdata omap_sham_pdata_omap5 = { 1898 .algs_info = omap_sham_algs_info_omap5, 1899 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5), 1900 .flags = BIT(FLAGS_AUTO_XOR), 1901 .digest_size = SHA512_DIGEST_SIZE, 1902 .copy_hash = omap_sham_copy_hash_omap4, 1903 .write_ctrl = omap_sham_write_ctrl_omap4, 1904 .trigger = omap_sham_trigger_omap4, 1905 .poll_irq = omap_sham_poll_irq_omap4, 1906 .intr_hdlr = omap_sham_irq_omap4, 1907 .idigest_ofs = 0x240, 1908 .odigest_ofs = 0x200, 1909 .din_ofs = 0x080, 1910 .digcnt_ofs = 0x280, 1911 .rev_ofs = 0x100, 1912 .mask_ofs = 0x110, 1913 .sysstatus_ofs = 0x114, 1914 .mode_ofs = 0x284, 1915 .length_ofs = 0x288, 1916 .major_mask = 0x0700, 1917 .major_shift = 8, 1918 .minor_mask = 0x003f, 1919 .minor_shift = 0, 1920 }; 1921 1922 static const struct of_device_id omap_sham_of_match[] = { 1923 { 1924 .compatible = "ti,omap2-sham", 1925 .data = &omap_sham_pdata_omap2, 1926 }, 1927 { 1928 .compatible = "ti,omap3-sham", 1929 .data = &omap_sham_pdata_omap2, 1930 }, 1931 { 1932 .compatible = "ti,omap4-sham", 1933 .data = &omap_sham_pdata_omap4, 1934 }, 1935 { 1936 .compatible = "ti,omap5-sham", 1937 .data = &omap_sham_pdata_omap5, 1938 }, 1939 {}, 1940 }; 1941 MODULE_DEVICE_TABLE(of, omap_sham_of_match); 1942 1943 static int omap_sham_get_res_of(struct omap_sham_dev *dd, 1944 struct device *dev, struct resource *res) 1945 { 1946 struct device_node *node = dev->of_node; 1947 int err = 0; 1948 1949 dd->pdata = of_device_get_match_data(dev); 1950 if (!dd->pdata) { 1951 dev_err(dev, "no compatible OF match\n"); 1952 err = -EINVAL; 1953 goto err; 1954 } 1955 1956 err = of_address_to_resource(node, 0, res); 1957 if (err < 0) { 1958 dev_err(dev, "can't translate OF node address\n"); 1959 err = -EINVAL; 1960 goto err; 1961 } 1962 1963 dd->irq = irq_of_parse_and_map(node, 0); 1964 if (!dd->irq) { 1965 dev_err(dev, "can't translate OF irq value\n"); 1966 err = -EINVAL; 1967 goto err; 1968 } 1969 1970 err: 1971 return err; 1972 } 1973 #else 1974 static const struct of_device_id omap_sham_of_match[] = { 1975 {}, 1976 }; 1977 1978 static int omap_sham_get_res_of(struct omap_sham_dev *dd, 1979 struct device *dev, struct resource *res) 1980 { 1981 return -EINVAL; 1982 } 1983 #endif 1984 1985 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd, 1986 struct platform_device *pdev, struct resource *res) 1987 { 1988 struct device *dev = &pdev->dev; 1989 struct resource *r; 1990 int err = 0; 1991 1992 /* Get the base address */ 1993 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1994 if (!r) { 1995 dev_err(dev, "no MEM resource info\n"); 1996 err = -ENODEV; 1997 goto err; 1998 } 1999 memcpy(res, r, sizeof(*res)); 2000 2001 /* Get the IRQ */ 2002 dd->irq = platform_get_irq(pdev, 0); 2003 if (dd->irq < 0) { 2004 dev_err(dev, "no IRQ resource info\n"); 2005 err = dd->irq; 2006 goto err; 2007 } 2008 2009 /* Only OMAP2/3 can be non-DT */ 2010 dd->pdata = &omap_sham_pdata_omap2; 2011 2012 err: 2013 return err; 2014 } 2015 2016 static int omap_sham_probe(struct platform_device *pdev) 2017 { 2018 struct omap_sham_dev *dd; 2019 struct device *dev = &pdev->dev; 2020 struct resource res; 2021 dma_cap_mask_t mask; 2022 int err, i, j; 2023 u32 rev; 2024 2025 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL); 2026 if (dd == NULL) { 2027 dev_err(dev, "unable to alloc data struct.\n"); 2028 err = -ENOMEM; 2029 goto data_err; 2030 } 2031 dd->dev = dev; 2032 platform_set_drvdata(pdev, dd); 2033 2034 INIT_LIST_HEAD(&dd->list); 2035 spin_lock_init(&dd->lock); 2036 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); 2037 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); 2038 2039 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : 2040 omap_sham_get_res_pdev(dd, pdev, &res); 2041 if (err) 2042 goto data_err; 2043 2044 dd->io_base = devm_ioremap_resource(dev, &res); 2045 if (IS_ERR(dd->io_base)) { 2046 err = PTR_ERR(dd->io_base); 2047 goto data_err; 2048 } 2049 dd->phys_base = res.start; 2050 2051 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr, 2052 IRQF_TRIGGER_NONE, dev_name(dev), dd); 2053 if (err) { 2054 dev_err(dev, "unable to request irq %d, err = %d\n", 2055 dd->irq, err); 2056 goto data_err; 2057 } 2058 2059 dma_cap_zero(mask); 2060 dma_cap_set(DMA_SLAVE, mask); 2061 2062 dd->dma_lch = dma_request_chan(dev, "rx"); 2063 if (IS_ERR(dd->dma_lch)) { 2064 err = PTR_ERR(dd->dma_lch); 2065 if (err == -EPROBE_DEFER) 2066 goto data_err; 2067 2068 dd->polling_mode = 1; 2069 dev_dbg(dev, "using polling mode instead of dma\n"); 2070 } 2071 2072 dd->flags |= dd->pdata->flags; 2073 2074 pm_runtime_use_autosuspend(dev); 2075 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); 2076 2077 pm_runtime_enable(dev); 2078 pm_runtime_irq_safe(dev); 2079 2080 err = pm_runtime_get_sync(dev); 2081 if (err < 0) { 2082 dev_err(dev, "failed to get sync: %d\n", err); 2083 goto err_pm; 2084 } 2085 2086 rev = omap_sham_read(dd, SHA_REG_REV(dd)); 2087 pm_runtime_put_sync(&pdev->dev); 2088 2089 dev_info(dev, "hw accel on OMAP rev %u.%u\n", 2090 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift, 2091 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift); 2092 2093 spin_lock(&sham.lock); 2094 list_add_tail(&dd->list, &sham.dev_list); 2095 spin_unlock(&sham.lock); 2096 2097 for (i = 0; i < dd->pdata->algs_info_size; i++) { 2098 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { 2099 struct ahash_alg *alg; 2100 2101 alg = &dd->pdata->algs_info[i].algs_list[j]; 2102 alg->export = omap_sham_export; 2103 alg->import = omap_sham_import; 2104 alg->halg.statesize = sizeof(struct omap_sham_reqctx) + 2105 BUFLEN; 2106 err = crypto_register_ahash(alg); 2107 if (err) 2108 goto err_algs; 2109 2110 dd->pdata->algs_info[i].registered++; 2111 } 2112 } 2113 2114 return 0; 2115 2116 err_algs: 2117 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 2118 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 2119 crypto_unregister_ahash( 2120 &dd->pdata->algs_info[i].algs_list[j]); 2121 err_pm: 2122 pm_runtime_disable(dev); 2123 if (!dd->polling_mode) 2124 dma_release_channel(dd->dma_lch); 2125 data_err: 2126 dev_err(dev, "initialization failed.\n"); 2127 2128 return err; 2129 } 2130 2131 static int omap_sham_remove(struct platform_device *pdev) 2132 { 2133 struct omap_sham_dev *dd; 2134 int i, j; 2135 2136 dd = platform_get_drvdata(pdev); 2137 if (!dd) 2138 return -ENODEV; 2139 spin_lock(&sham.lock); 2140 list_del(&dd->list); 2141 spin_unlock(&sham.lock); 2142 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 2143 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 2144 crypto_unregister_ahash( 2145 &dd->pdata->algs_info[i].algs_list[j]); 2146 tasklet_kill(&dd->done_task); 2147 pm_runtime_disable(&pdev->dev); 2148 2149 if (!dd->polling_mode) 2150 dma_release_channel(dd->dma_lch); 2151 2152 return 0; 2153 } 2154 2155 #ifdef CONFIG_PM_SLEEP 2156 static int omap_sham_suspend(struct device *dev) 2157 { 2158 pm_runtime_put_sync(dev); 2159 return 0; 2160 } 2161 2162 static int omap_sham_resume(struct device *dev) 2163 { 2164 int err = pm_runtime_get_sync(dev); 2165 if (err < 0) { 2166 dev_err(dev, "failed to get sync: %d\n", err); 2167 return err; 2168 } 2169 return 0; 2170 } 2171 #endif 2172 2173 static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume); 2174 2175 static struct platform_driver omap_sham_driver = { 2176 .probe = omap_sham_probe, 2177 .remove = omap_sham_remove, 2178 .driver = { 2179 .name = "omap-sham", 2180 .pm = &omap_sham_pm_ops, 2181 .of_match_table = omap_sham_of_match, 2182 }, 2183 }; 2184 2185 module_platform_driver(omap_sham_driver); 2186 2187 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); 2188 MODULE_LICENSE("GPL v2"); 2189 MODULE_AUTHOR("Dmitry Kasatkin"); 2190 MODULE_ALIAS("platform:omap-sham"); 2191