1 /* 2 * Cryptographic API. 3 * 4 * Support for OMAP AES HW acceleration. 5 * 6 * Copyright (c) 2010 Nokia Corporation 7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 8 * Copyright (c) 2011 Texas Instruments Incorporated 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 * 14 */ 15 16 #define pr_fmt(fmt) "%20s: " fmt, __func__ 17 #define prn(num) pr_debug(#num "=%d\n", num) 18 #define prx(num) pr_debug(#num "=%x\n", num) 19 20 #include <linux/err.h> 21 #include <linux/module.h> 22 #include <linux/init.h> 23 #include <linux/errno.h> 24 #include <linux/kernel.h> 25 #include <linux/platform_device.h> 26 #include <linux/scatterlist.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/dmaengine.h> 29 #include <linux/omap-dma.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/of.h> 32 #include <linux/of_device.h> 33 #include <linux/of_address.h> 34 #include <linux/io.h> 35 #include <linux/crypto.h> 36 #include <linux/interrupt.h> 37 #include <crypto/scatterwalk.h> 38 #include <crypto/aes.h> 39 #include <crypto/algapi.h> 40 41 #define DST_MAXBURST 4 42 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) 43 44 #define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) 45 46 /* OMAP TRM gives bitfields as start:end, where start is the higher bit 47 number. For example 7:0 */ 48 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) 49 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) 50 51 #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ 52 ((x ^ 0x01) * 0x04)) 53 #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) 54 55 #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) 56 #define AES_REG_CTRL_CTR_WIDTH_MASK GENMASK(8, 7) 57 #define AES_REG_CTRL_CTR_WIDTH_32 0 58 #define AES_REG_CTRL_CTR_WIDTH_64 BIT(7) 59 #define AES_REG_CTRL_CTR_WIDTH_96 BIT(8) 60 #define AES_REG_CTRL_CTR_WIDTH_128 GENMASK(8, 7) 61 #define AES_REG_CTRL_CTR BIT(6) 62 #define AES_REG_CTRL_CBC BIT(5) 63 #define AES_REG_CTRL_KEY_SIZE GENMASK(4, 3) 64 #define AES_REG_CTRL_DIRECTION BIT(2) 65 #define AES_REG_CTRL_INPUT_READY BIT(1) 66 #define AES_REG_CTRL_OUTPUT_READY BIT(0) 67 #define AES_REG_CTRL_MASK GENMASK(24, 2) 68 69 #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) 70 71 #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) 72 73 #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) 74 #define AES_REG_MASK_SIDLE BIT(6) 75 #define AES_REG_MASK_START BIT(5) 76 #define AES_REG_MASK_DMA_OUT_EN BIT(3) 77 #define AES_REG_MASK_DMA_IN_EN BIT(2) 78 #define AES_REG_MASK_SOFTRESET BIT(1) 79 #define AES_REG_AUTOIDLE BIT(0) 80 81 #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) 82 83 #define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) 84 #define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) 85 #define AES_REG_IRQ_DATA_IN BIT(1) 86 #define AES_REG_IRQ_DATA_OUT BIT(2) 87 #define DEFAULT_TIMEOUT (5*HZ) 88 89 #define FLAGS_MODE_MASK 0x000f 90 #define FLAGS_ENCRYPT BIT(0) 91 #define FLAGS_CBC BIT(1) 92 #define FLAGS_GIV BIT(2) 93 #define FLAGS_CTR BIT(3) 94 95 #define FLAGS_INIT BIT(4) 96 #define FLAGS_FAST BIT(5) 97 #define FLAGS_BUSY BIT(6) 98 99 #define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2) 100 101 struct omap_aes_ctx { 102 struct omap_aes_dev *dd; 103 104 int keylen; 105 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 106 unsigned long flags; 107 }; 108 109 struct omap_aes_reqctx { 110 unsigned long mode; 111 }; 112 113 #define OMAP_AES_QUEUE_LENGTH 1 114 #define OMAP_AES_CACHE_SIZE 0 115 116 struct omap_aes_algs_info { 117 struct crypto_alg *algs_list; 118 unsigned int size; 119 unsigned int registered; 120 }; 121 122 struct omap_aes_pdata { 123 struct omap_aes_algs_info *algs_info; 124 unsigned int algs_info_size; 125 126 void (*trigger)(struct omap_aes_dev *dd, int length); 127 128 u32 key_ofs; 129 u32 iv_ofs; 130 u32 ctrl_ofs; 131 u32 data_ofs; 132 u32 rev_ofs; 133 u32 mask_ofs; 134 u32 irq_enable_ofs; 135 u32 irq_status_ofs; 136 137 u32 dma_enable_in; 138 u32 dma_enable_out; 139 u32 dma_start; 140 141 u32 major_mask; 142 u32 major_shift; 143 u32 minor_mask; 144 u32 minor_shift; 145 }; 146 147 struct omap_aes_dev { 148 struct list_head list; 149 unsigned long phys_base; 150 void __iomem *io_base; 151 struct omap_aes_ctx *ctx; 152 struct device *dev; 153 unsigned long flags; 154 int err; 155 156 struct tasklet_struct done_task; 157 158 struct ablkcipher_request *req; 159 struct crypto_engine *engine; 160 161 /* 162 * total is used by PIO mode for book keeping so introduce 163 * variable total_save as need it to calc page_order 164 */ 165 size_t total; 166 size_t total_save; 167 168 struct scatterlist *in_sg; 169 struct scatterlist *out_sg; 170 171 /* Buffers for copying for unaligned cases */ 172 struct scatterlist in_sgl; 173 struct scatterlist out_sgl; 174 struct scatterlist *orig_out; 175 int sgs_copied; 176 177 struct scatter_walk in_walk; 178 struct scatter_walk out_walk; 179 int dma_in; 180 struct dma_chan *dma_lch_in; 181 int dma_out; 182 struct dma_chan *dma_lch_out; 183 int in_sg_len; 184 int out_sg_len; 185 int pio_only; 186 const struct omap_aes_pdata *pdata; 187 }; 188 189 /* keep registered devices data here */ 190 static LIST_HEAD(dev_list); 191 static DEFINE_SPINLOCK(list_lock); 192 193 #ifdef DEBUG 194 #define omap_aes_read(dd, offset) \ 195 ({ \ 196 int _read_ret; \ 197 _read_ret = __raw_readl(dd->io_base + offset); \ 198 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \ 199 offset, _read_ret); \ 200 _read_ret; \ 201 }) 202 #else 203 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) 204 { 205 return __raw_readl(dd->io_base + offset); 206 } 207 #endif 208 209 #ifdef DEBUG 210 #define omap_aes_write(dd, offset, value) \ 211 do { \ 212 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \ 213 offset, value); \ 214 __raw_writel(value, dd->io_base + offset); \ 215 } while (0) 216 #else 217 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, 218 u32 value) 219 { 220 __raw_writel(value, dd->io_base + offset); 221 } 222 #endif 223 224 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, 225 u32 value, u32 mask) 226 { 227 u32 val; 228 229 val = omap_aes_read(dd, offset); 230 val &= ~mask; 231 val |= value; 232 omap_aes_write(dd, offset, val); 233 } 234 235 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, 236 u32 *value, int count) 237 { 238 for (; count--; value++, offset += 4) 239 omap_aes_write(dd, offset, *value); 240 } 241 242 static int omap_aes_hw_init(struct omap_aes_dev *dd) 243 { 244 if (!(dd->flags & FLAGS_INIT)) { 245 dd->flags |= FLAGS_INIT; 246 dd->err = 0; 247 } 248 249 return 0; 250 } 251 252 static int omap_aes_write_ctrl(struct omap_aes_dev *dd) 253 { 254 unsigned int key32; 255 int i, err; 256 u32 val; 257 258 err = omap_aes_hw_init(dd); 259 if (err) 260 return err; 261 262 key32 = dd->ctx->keylen / sizeof(u32); 263 264 /* it seems a key should always be set even if it has not changed */ 265 for (i = 0; i < key32; i++) { 266 omap_aes_write(dd, AES_REG_KEY(dd, i), 267 __le32_to_cpu(dd->ctx->key[i])); 268 } 269 270 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) 271 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); 272 273 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); 274 if (dd->flags & FLAGS_CBC) 275 val |= AES_REG_CTRL_CBC; 276 if (dd->flags & FLAGS_CTR) 277 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; 278 279 if (dd->flags & FLAGS_ENCRYPT) 280 val |= AES_REG_CTRL_DIRECTION; 281 282 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK); 283 284 return 0; 285 } 286 287 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length) 288 { 289 u32 mask, val; 290 291 val = dd->pdata->dma_start; 292 293 if (dd->dma_lch_out != NULL) 294 val |= dd->pdata->dma_enable_out; 295 if (dd->dma_lch_in != NULL) 296 val |= dd->pdata->dma_enable_in; 297 298 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 299 dd->pdata->dma_start; 300 301 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask); 302 303 } 304 305 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) 306 { 307 omap_aes_write(dd, AES_REG_LENGTH_N(0), length); 308 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); 309 310 omap_aes_dma_trigger_omap2(dd, length); 311 } 312 313 static void omap_aes_dma_stop(struct omap_aes_dev *dd) 314 { 315 u32 mask; 316 317 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 318 dd->pdata->dma_start; 319 320 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); 321 } 322 323 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) 324 { 325 struct omap_aes_dev *dd = NULL, *tmp; 326 327 spin_lock_bh(&list_lock); 328 if (!ctx->dd) { 329 list_for_each_entry(tmp, &dev_list, list) { 330 /* FIXME: take fist available aes core */ 331 dd = tmp; 332 break; 333 } 334 ctx->dd = dd; 335 } else { 336 /* already found before */ 337 dd = ctx->dd; 338 } 339 spin_unlock_bh(&list_lock); 340 341 return dd; 342 } 343 344 static void omap_aes_dma_out_callback(void *data) 345 { 346 struct omap_aes_dev *dd = data; 347 348 /* dma_lch_out - completed */ 349 tasklet_schedule(&dd->done_task); 350 } 351 352 static int omap_aes_dma_init(struct omap_aes_dev *dd) 353 { 354 int err = -ENOMEM; 355 dma_cap_mask_t mask; 356 357 dd->dma_lch_out = NULL; 358 dd->dma_lch_in = NULL; 359 360 dma_cap_zero(mask); 361 dma_cap_set(DMA_SLAVE, mask); 362 363 dd->dma_lch_in = dma_request_slave_channel_compat(mask, 364 omap_dma_filter_fn, 365 &dd->dma_in, 366 dd->dev, "rx"); 367 if (!dd->dma_lch_in) { 368 dev_err(dd->dev, "Unable to request in DMA channel\n"); 369 goto err_dma_in; 370 } 371 372 dd->dma_lch_out = dma_request_slave_channel_compat(mask, 373 omap_dma_filter_fn, 374 &dd->dma_out, 375 dd->dev, "tx"); 376 if (!dd->dma_lch_out) { 377 dev_err(dd->dev, "Unable to request out DMA channel\n"); 378 goto err_dma_out; 379 } 380 381 return 0; 382 383 err_dma_out: 384 dma_release_channel(dd->dma_lch_in); 385 err_dma_in: 386 if (err) 387 pr_err("error: %d\n", err); 388 return err; 389 } 390 391 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) 392 { 393 dma_release_channel(dd->dma_lch_out); 394 dma_release_channel(dd->dma_lch_in); 395 } 396 397 static void sg_copy_buf(void *buf, struct scatterlist *sg, 398 unsigned int start, unsigned int nbytes, int out) 399 { 400 struct scatter_walk walk; 401 402 if (!nbytes) 403 return; 404 405 scatterwalk_start(&walk, sg); 406 scatterwalk_advance(&walk, start); 407 scatterwalk_copychunks(buf, &walk, nbytes, out); 408 scatterwalk_done(&walk, out, 0); 409 } 410 411 static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 412 struct scatterlist *in_sg, struct scatterlist *out_sg, 413 int in_sg_len, int out_sg_len) 414 { 415 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 416 struct omap_aes_dev *dd = ctx->dd; 417 struct dma_async_tx_descriptor *tx_in, *tx_out; 418 struct dma_slave_config cfg; 419 int ret; 420 421 if (dd->pio_only) { 422 scatterwalk_start(&dd->in_walk, dd->in_sg); 423 scatterwalk_start(&dd->out_walk, dd->out_sg); 424 425 /* Enable DATAIN interrupt and let it take 426 care of the rest */ 427 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); 428 return 0; 429 } 430 431 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); 432 433 memset(&cfg, 0, sizeof(cfg)); 434 435 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 436 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 437 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 438 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 439 cfg.src_maxburst = DST_MAXBURST; 440 cfg.dst_maxburst = DST_MAXBURST; 441 442 /* IN */ 443 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); 444 if (ret) { 445 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", 446 ret); 447 return ret; 448 } 449 450 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, 451 DMA_MEM_TO_DEV, 452 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 453 if (!tx_in) { 454 dev_err(dd->dev, "IN prep_slave_sg() failed\n"); 455 return -EINVAL; 456 } 457 458 /* No callback necessary */ 459 tx_in->callback_param = dd; 460 461 /* OUT */ 462 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); 463 if (ret) { 464 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", 465 ret); 466 return ret; 467 } 468 469 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, 470 DMA_DEV_TO_MEM, 471 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 472 if (!tx_out) { 473 dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); 474 return -EINVAL; 475 } 476 477 tx_out->callback = omap_aes_dma_out_callback; 478 tx_out->callback_param = dd; 479 480 dmaengine_submit(tx_in); 481 dmaengine_submit(tx_out); 482 483 dma_async_issue_pending(dd->dma_lch_in); 484 dma_async_issue_pending(dd->dma_lch_out); 485 486 /* start DMA */ 487 dd->pdata->trigger(dd, dd->total); 488 489 return 0; 490 } 491 492 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) 493 { 494 struct crypto_tfm *tfm = crypto_ablkcipher_tfm( 495 crypto_ablkcipher_reqtfm(dd->req)); 496 int err; 497 498 pr_debug("total: %d\n", dd->total); 499 500 if (!dd->pio_only) { 501 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, 502 DMA_TO_DEVICE); 503 if (!err) { 504 dev_err(dd->dev, "dma_map_sg() error\n"); 505 return -EINVAL; 506 } 507 508 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, 509 DMA_FROM_DEVICE); 510 if (!err) { 511 dev_err(dd->dev, "dma_map_sg() error\n"); 512 return -EINVAL; 513 } 514 } 515 516 err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, 517 dd->out_sg_len); 518 if (err && !dd->pio_only) { 519 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); 520 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, 521 DMA_FROM_DEVICE); 522 } 523 524 return err; 525 } 526 527 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) 528 { 529 struct ablkcipher_request *req = dd->req; 530 531 pr_debug("err: %d\n", err); 532 533 crypto_finalize_request(dd->engine, req, err); 534 } 535 536 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 537 { 538 pr_debug("total: %d\n", dd->total); 539 540 omap_aes_dma_stop(dd); 541 542 dmaengine_terminate_all(dd->dma_lch_in); 543 dmaengine_terminate_all(dd->dma_lch_out); 544 545 return 0; 546 } 547 548 static int omap_aes_check_aligned(struct scatterlist *sg, int total) 549 { 550 int len = 0; 551 552 if (!IS_ALIGNED(total, AES_BLOCK_SIZE)) 553 return -EINVAL; 554 555 while (sg) { 556 if (!IS_ALIGNED(sg->offset, 4)) 557 return -1; 558 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) 559 return -1; 560 561 len += sg->length; 562 sg = sg_next(sg); 563 } 564 565 if (len != total) 566 return -1; 567 568 return 0; 569 } 570 571 static int omap_aes_copy_sgs(struct omap_aes_dev *dd) 572 { 573 void *buf_in, *buf_out; 574 int pages, total; 575 576 total = ALIGN(dd->total, AES_BLOCK_SIZE); 577 pages = get_order(total); 578 579 buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); 580 buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); 581 582 if (!buf_in || !buf_out) { 583 pr_err("Couldn't allocated pages for unaligned cases.\n"); 584 return -1; 585 } 586 587 dd->orig_out = dd->out_sg; 588 589 sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); 590 591 sg_init_table(&dd->in_sgl, 1); 592 sg_set_buf(&dd->in_sgl, buf_in, total); 593 dd->in_sg = &dd->in_sgl; 594 595 sg_init_table(&dd->out_sgl, 1); 596 sg_set_buf(&dd->out_sgl, buf_out, total); 597 dd->out_sg = &dd->out_sgl; 598 599 return 0; 600 } 601 602 static int omap_aes_handle_queue(struct omap_aes_dev *dd, 603 struct ablkcipher_request *req) 604 { 605 if (req) 606 return crypto_transfer_request_to_engine(dd->engine, req); 607 608 return 0; 609 } 610 611 static int omap_aes_prepare_req(struct crypto_engine *engine, 612 struct ablkcipher_request *req) 613 { 614 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 615 crypto_ablkcipher_reqtfm(req)); 616 struct omap_aes_dev *dd = omap_aes_find_dev(ctx); 617 struct omap_aes_reqctx *rctx; 618 int len; 619 620 if (!dd) 621 return -ENODEV; 622 623 /* assign new request to device */ 624 dd->req = req; 625 dd->total = req->nbytes; 626 dd->total_save = req->nbytes; 627 dd->in_sg = req->src; 628 dd->out_sg = req->dst; 629 630 if (omap_aes_check_aligned(dd->in_sg, dd->total) || 631 omap_aes_check_aligned(dd->out_sg, dd->total)) { 632 if (omap_aes_copy_sgs(dd)) 633 pr_err("Failed to copy SGs for unaligned cases\n"); 634 dd->sgs_copied = 1; 635 } else { 636 dd->sgs_copied = 0; 637 } 638 639 len = ALIGN(dd->total, AES_BLOCK_SIZE); 640 dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len); 641 dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len); 642 BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); 643 644 rctx = ablkcipher_request_ctx(req); 645 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 646 rctx->mode &= FLAGS_MODE_MASK; 647 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; 648 649 dd->ctx = ctx; 650 ctx->dd = dd; 651 652 return omap_aes_write_ctrl(dd); 653 } 654 655 static int omap_aes_crypt_req(struct crypto_engine *engine, 656 struct ablkcipher_request *req) 657 { 658 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 659 crypto_ablkcipher_reqtfm(req)); 660 struct omap_aes_dev *dd = omap_aes_find_dev(ctx); 661 662 if (!dd) 663 return -ENODEV; 664 665 return omap_aes_crypt_dma_start(dd); 666 } 667 668 static void omap_aes_done_task(unsigned long data) 669 { 670 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 671 void *buf_in, *buf_out; 672 int pages, len; 673 674 pr_debug("enter done_task\n"); 675 676 if (!dd->pio_only) { 677 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, 678 DMA_FROM_DEVICE); 679 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); 680 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, 681 DMA_FROM_DEVICE); 682 omap_aes_crypt_dma_stop(dd); 683 } 684 685 if (dd->sgs_copied) { 686 buf_in = sg_virt(&dd->in_sgl); 687 buf_out = sg_virt(&dd->out_sgl); 688 689 sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); 690 691 len = ALIGN(dd->total_save, AES_BLOCK_SIZE); 692 pages = get_order(len); 693 free_pages((unsigned long)buf_in, pages); 694 free_pages((unsigned long)buf_out, pages); 695 } 696 697 omap_aes_finish_req(dd, 0); 698 699 pr_debug("exit\n"); 700 } 701 702 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 703 { 704 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 705 crypto_ablkcipher_reqtfm(req)); 706 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 707 struct omap_aes_dev *dd; 708 709 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, 710 !!(mode & FLAGS_ENCRYPT), 711 !!(mode & FLAGS_CBC)); 712 713 dd = omap_aes_find_dev(ctx); 714 if (!dd) 715 return -ENODEV; 716 717 rctx->mode = mode; 718 719 return omap_aes_handle_queue(dd, req); 720 } 721 722 /* ********************** ALG API ************************************ */ 723 724 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 725 unsigned int keylen) 726 { 727 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 728 729 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && 730 keylen != AES_KEYSIZE_256) 731 return -EINVAL; 732 733 pr_debug("enter, keylen: %d\n", keylen); 734 735 memcpy(ctx->key, key, keylen); 736 ctx->keylen = keylen; 737 738 return 0; 739 } 740 741 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) 742 { 743 return omap_aes_crypt(req, FLAGS_ENCRYPT); 744 } 745 746 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) 747 { 748 return omap_aes_crypt(req, 0); 749 } 750 751 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) 752 { 753 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); 754 } 755 756 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) 757 { 758 return omap_aes_crypt(req, FLAGS_CBC); 759 } 760 761 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req) 762 { 763 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); 764 } 765 766 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) 767 { 768 return omap_aes_crypt(req, FLAGS_CTR); 769 } 770 771 static int omap_aes_cra_init(struct crypto_tfm *tfm) 772 { 773 struct omap_aes_dev *dd = NULL; 774 int err; 775 776 /* Find AES device, currently picks the first device */ 777 spin_lock_bh(&list_lock); 778 list_for_each_entry(dd, &dev_list, list) { 779 break; 780 } 781 spin_unlock_bh(&list_lock); 782 783 err = pm_runtime_get_sync(dd->dev); 784 if (err < 0) { 785 dev_err(dd->dev, "%s: failed to get_sync(%d)\n", 786 __func__, err); 787 return err; 788 } 789 790 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); 791 792 return 0; 793 } 794 795 static void omap_aes_cra_exit(struct crypto_tfm *tfm) 796 { 797 struct omap_aes_dev *dd = NULL; 798 799 /* Find AES device, currently picks the first device */ 800 spin_lock_bh(&list_lock); 801 list_for_each_entry(dd, &dev_list, list) { 802 break; 803 } 804 spin_unlock_bh(&list_lock); 805 806 pm_runtime_put_sync(dd->dev); 807 } 808 809 /* ********************** ALGS ************************************ */ 810 811 static struct crypto_alg algs_ecb_cbc[] = { 812 { 813 .cra_name = "ecb(aes)", 814 .cra_driver_name = "ecb-aes-omap", 815 .cra_priority = 300, 816 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 817 CRYPTO_ALG_KERN_DRIVER_ONLY | 818 CRYPTO_ALG_ASYNC, 819 .cra_blocksize = AES_BLOCK_SIZE, 820 .cra_ctxsize = sizeof(struct omap_aes_ctx), 821 .cra_alignmask = 0, 822 .cra_type = &crypto_ablkcipher_type, 823 .cra_module = THIS_MODULE, 824 .cra_init = omap_aes_cra_init, 825 .cra_exit = omap_aes_cra_exit, 826 .cra_u.ablkcipher = { 827 .min_keysize = AES_MIN_KEY_SIZE, 828 .max_keysize = AES_MAX_KEY_SIZE, 829 .setkey = omap_aes_setkey, 830 .encrypt = omap_aes_ecb_encrypt, 831 .decrypt = omap_aes_ecb_decrypt, 832 } 833 }, 834 { 835 .cra_name = "cbc(aes)", 836 .cra_driver_name = "cbc-aes-omap", 837 .cra_priority = 300, 838 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 839 CRYPTO_ALG_KERN_DRIVER_ONLY | 840 CRYPTO_ALG_ASYNC, 841 .cra_blocksize = AES_BLOCK_SIZE, 842 .cra_ctxsize = sizeof(struct omap_aes_ctx), 843 .cra_alignmask = 0, 844 .cra_type = &crypto_ablkcipher_type, 845 .cra_module = THIS_MODULE, 846 .cra_init = omap_aes_cra_init, 847 .cra_exit = omap_aes_cra_exit, 848 .cra_u.ablkcipher = { 849 .min_keysize = AES_MIN_KEY_SIZE, 850 .max_keysize = AES_MAX_KEY_SIZE, 851 .ivsize = AES_BLOCK_SIZE, 852 .setkey = omap_aes_setkey, 853 .encrypt = omap_aes_cbc_encrypt, 854 .decrypt = omap_aes_cbc_decrypt, 855 } 856 } 857 }; 858 859 static struct crypto_alg algs_ctr[] = { 860 { 861 .cra_name = "ctr(aes)", 862 .cra_driver_name = "ctr-aes-omap", 863 .cra_priority = 300, 864 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 865 CRYPTO_ALG_KERN_DRIVER_ONLY | 866 CRYPTO_ALG_ASYNC, 867 .cra_blocksize = AES_BLOCK_SIZE, 868 .cra_ctxsize = sizeof(struct omap_aes_ctx), 869 .cra_alignmask = 0, 870 .cra_type = &crypto_ablkcipher_type, 871 .cra_module = THIS_MODULE, 872 .cra_init = omap_aes_cra_init, 873 .cra_exit = omap_aes_cra_exit, 874 .cra_u.ablkcipher = { 875 .min_keysize = AES_MIN_KEY_SIZE, 876 .max_keysize = AES_MAX_KEY_SIZE, 877 .geniv = "eseqiv", 878 .ivsize = AES_BLOCK_SIZE, 879 .setkey = omap_aes_setkey, 880 .encrypt = omap_aes_ctr_encrypt, 881 .decrypt = omap_aes_ctr_decrypt, 882 } 883 } , 884 }; 885 886 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { 887 { 888 .algs_list = algs_ecb_cbc, 889 .size = ARRAY_SIZE(algs_ecb_cbc), 890 }, 891 }; 892 893 static const struct omap_aes_pdata omap_aes_pdata_omap2 = { 894 .algs_info = omap_aes_algs_info_ecb_cbc, 895 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), 896 .trigger = omap_aes_dma_trigger_omap2, 897 .key_ofs = 0x1c, 898 .iv_ofs = 0x20, 899 .ctrl_ofs = 0x30, 900 .data_ofs = 0x34, 901 .rev_ofs = 0x44, 902 .mask_ofs = 0x48, 903 .dma_enable_in = BIT(2), 904 .dma_enable_out = BIT(3), 905 .dma_start = BIT(5), 906 .major_mask = 0xf0, 907 .major_shift = 4, 908 .minor_mask = 0x0f, 909 .minor_shift = 0, 910 }; 911 912 #ifdef CONFIG_OF 913 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = { 914 { 915 .algs_list = algs_ecb_cbc, 916 .size = ARRAY_SIZE(algs_ecb_cbc), 917 }, 918 { 919 .algs_list = algs_ctr, 920 .size = ARRAY_SIZE(algs_ctr), 921 }, 922 }; 923 924 static const struct omap_aes_pdata omap_aes_pdata_omap3 = { 925 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 926 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 927 .trigger = omap_aes_dma_trigger_omap2, 928 .key_ofs = 0x1c, 929 .iv_ofs = 0x20, 930 .ctrl_ofs = 0x30, 931 .data_ofs = 0x34, 932 .rev_ofs = 0x44, 933 .mask_ofs = 0x48, 934 .dma_enable_in = BIT(2), 935 .dma_enable_out = BIT(3), 936 .dma_start = BIT(5), 937 .major_mask = 0xf0, 938 .major_shift = 4, 939 .minor_mask = 0x0f, 940 .minor_shift = 0, 941 }; 942 943 static const struct omap_aes_pdata omap_aes_pdata_omap4 = { 944 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 945 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 946 .trigger = omap_aes_dma_trigger_omap4, 947 .key_ofs = 0x3c, 948 .iv_ofs = 0x40, 949 .ctrl_ofs = 0x50, 950 .data_ofs = 0x60, 951 .rev_ofs = 0x80, 952 .mask_ofs = 0x84, 953 .irq_status_ofs = 0x8c, 954 .irq_enable_ofs = 0x90, 955 .dma_enable_in = BIT(5), 956 .dma_enable_out = BIT(6), 957 .major_mask = 0x0700, 958 .major_shift = 8, 959 .minor_mask = 0x003f, 960 .minor_shift = 0, 961 }; 962 963 static irqreturn_t omap_aes_irq(int irq, void *dev_id) 964 { 965 struct omap_aes_dev *dd = dev_id; 966 u32 status, i; 967 u32 *src, *dst; 968 969 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd)); 970 if (status & AES_REG_IRQ_DATA_IN) { 971 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); 972 973 BUG_ON(!dd->in_sg); 974 975 BUG_ON(_calc_walked(in) > dd->in_sg->length); 976 977 src = sg_virt(dd->in_sg) + _calc_walked(in); 978 979 for (i = 0; i < AES_BLOCK_WORDS; i++) { 980 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); 981 982 scatterwalk_advance(&dd->in_walk, 4); 983 if (dd->in_sg->length == _calc_walked(in)) { 984 dd->in_sg = sg_next(dd->in_sg); 985 if (dd->in_sg) { 986 scatterwalk_start(&dd->in_walk, 987 dd->in_sg); 988 src = sg_virt(dd->in_sg) + 989 _calc_walked(in); 990 } 991 } else { 992 src++; 993 } 994 } 995 996 /* Clear IRQ status */ 997 status &= ~AES_REG_IRQ_DATA_IN; 998 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); 999 1000 /* Enable DATA_OUT interrupt */ 1001 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4); 1002 1003 } else if (status & AES_REG_IRQ_DATA_OUT) { 1004 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); 1005 1006 BUG_ON(!dd->out_sg); 1007 1008 BUG_ON(_calc_walked(out) > dd->out_sg->length); 1009 1010 dst = sg_virt(dd->out_sg) + _calc_walked(out); 1011 1012 for (i = 0; i < AES_BLOCK_WORDS; i++) { 1013 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); 1014 scatterwalk_advance(&dd->out_walk, 4); 1015 if (dd->out_sg->length == _calc_walked(out)) { 1016 dd->out_sg = sg_next(dd->out_sg); 1017 if (dd->out_sg) { 1018 scatterwalk_start(&dd->out_walk, 1019 dd->out_sg); 1020 dst = sg_virt(dd->out_sg) + 1021 _calc_walked(out); 1022 } 1023 } else { 1024 dst++; 1025 } 1026 } 1027 1028 dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total); 1029 1030 /* Clear IRQ status */ 1031 status &= ~AES_REG_IRQ_DATA_OUT; 1032 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); 1033 1034 if (!dd->total) 1035 /* All bytes read! */ 1036 tasklet_schedule(&dd->done_task); 1037 else 1038 /* Enable DATA_IN interrupt for next block */ 1039 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); 1040 } 1041 1042 return IRQ_HANDLED; 1043 } 1044 1045 static const struct of_device_id omap_aes_of_match[] = { 1046 { 1047 .compatible = "ti,omap2-aes", 1048 .data = &omap_aes_pdata_omap2, 1049 }, 1050 { 1051 .compatible = "ti,omap3-aes", 1052 .data = &omap_aes_pdata_omap3, 1053 }, 1054 { 1055 .compatible = "ti,omap4-aes", 1056 .data = &omap_aes_pdata_omap4, 1057 }, 1058 {}, 1059 }; 1060 MODULE_DEVICE_TABLE(of, omap_aes_of_match); 1061 1062 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 1063 struct device *dev, struct resource *res) 1064 { 1065 struct device_node *node = dev->of_node; 1066 const struct of_device_id *match; 1067 int err = 0; 1068 1069 match = of_match_device(of_match_ptr(omap_aes_of_match), dev); 1070 if (!match) { 1071 dev_err(dev, "no compatible OF match\n"); 1072 err = -EINVAL; 1073 goto err; 1074 } 1075 1076 err = of_address_to_resource(node, 0, res); 1077 if (err < 0) { 1078 dev_err(dev, "can't translate OF node address\n"); 1079 err = -EINVAL; 1080 goto err; 1081 } 1082 1083 dd->dma_out = -1; /* Dummy value that's unused */ 1084 dd->dma_in = -1; /* Dummy value that's unused */ 1085 1086 dd->pdata = match->data; 1087 1088 err: 1089 return err; 1090 } 1091 #else 1092 static const struct of_device_id omap_aes_of_match[] = { 1093 {}, 1094 }; 1095 1096 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 1097 struct device *dev, struct resource *res) 1098 { 1099 return -EINVAL; 1100 } 1101 #endif 1102 1103 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd, 1104 struct platform_device *pdev, struct resource *res) 1105 { 1106 struct device *dev = &pdev->dev; 1107 struct resource *r; 1108 int err = 0; 1109 1110 /* Get the base address */ 1111 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1112 if (!r) { 1113 dev_err(dev, "no MEM resource info\n"); 1114 err = -ENODEV; 1115 goto err; 1116 } 1117 memcpy(res, r, sizeof(*res)); 1118 1119 /* Get the DMA out channel */ 1120 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1121 if (!r) { 1122 dev_err(dev, "no DMA out resource info\n"); 1123 err = -ENODEV; 1124 goto err; 1125 } 1126 dd->dma_out = r->start; 1127 1128 /* Get the DMA in channel */ 1129 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1130 if (!r) { 1131 dev_err(dev, "no DMA in resource info\n"); 1132 err = -ENODEV; 1133 goto err; 1134 } 1135 dd->dma_in = r->start; 1136 1137 /* Only OMAP2/3 can be non-DT */ 1138 dd->pdata = &omap_aes_pdata_omap2; 1139 1140 err: 1141 return err; 1142 } 1143 1144 static int omap_aes_probe(struct platform_device *pdev) 1145 { 1146 struct device *dev = &pdev->dev; 1147 struct omap_aes_dev *dd; 1148 struct crypto_alg *algp; 1149 struct resource res; 1150 int err = -ENOMEM, i, j, irq = -1; 1151 u32 reg; 1152 1153 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL); 1154 if (dd == NULL) { 1155 dev_err(dev, "unable to alloc data struct.\n"); 1156 goto err_data; 1157 } 1158 dd->dev = dev; 1159 platform_set_drvdata(pdev, dd); 1160 1161 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : 1162 omap_aes_get_res_pdev(dd, pdev, &res); 1163 if (err) 1164 goto err_res; 1165 1166 dd->io_base = devm_ioremap_resource(dev, &res); 1167 if (IS_ERR(dd->io_base)) { 1168 err = PTR_ERR(dd->io_base); 1169 goto err_res; 1170 } 1171 dd->phys_base = res.start; 1172 1173 pm_runtime_enable(dev); 1174 err = pm_runtime_get_sync(dev); 1175 if (err < 0) { 1176 dev_err(dev, "%s: failed to get_sync(%d)\n", 1177 __func__, err); 1178 goto err_res; 1179 } 1180 1181 omap_aes_dma_stop(dd); 1182 1183 reg = omap_aes_read(dd, AES_REG_REV(dd)); 1184 1185 pm_runtime_put_sync(dev); 1186 1187 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", 1188 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, 1189 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); 1190 1191 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); 1192 1193 err = omap_aes_dma_init(dd); 1194 if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { 1195 dd->pio_only = 1; 1196 1197 irq = platform_get_irq(pdev, 0); 1198 if (irq < 0) { 1199 dev_err(dev, "can't get IRQ resource\n"); 1200 goto err_irq; 1201 } 1202 1203 err = devm_request_irq(dev, irq, omap_aes_irq, 0, 1204 dev_name(dev), dd); 1205 if (err) { 1206 dev_err(dev, "Unable to grab omap-aes IRQ\n"); 1207 goto err_irq; 1208 } 1209 } 1210 1211 1212 INIT_LIST_HEAD(&dd->list); 1213 spin_lock(&list_lock); 1214 list_add_tail(&dd->list, &dev_list); 1215 spin_unlock(&list_lock); 1216 1217 for (i = 0; i < dd->pdata->algs_info_size; i++) { 1218 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { 1219 algp = &dd->pdata->algs_info[i].algs_list[j]; 1220 1221 pr_debug("reg alg: %s\n", algp->cra_name); 1222 INIT_LIST_HEAD(&algp->cra_list); 1223 1224 err = crypto_register_alg(algp); 1225 if (err) 1226 goto err_algs; 1227 1228 dd->pdata->algs_info[i].registered++; 1229 } 1230 } 1231 1232 /* Initialize crypto engine */ 1233 dd->engine = crypto_engine_alloc_init(dev, 1); 1234 if (!dd->engine) 1235 goto err_algs; 1236 1237 dd->engine->prepare_request = omap_aes_prepare_req; 1238 dd->engine->crypt_one_request = omap_aes_crypt_req; 1239 err = crypto_engine_start(dd->engine); 1240 if (err) 1241 goto err_engine; 1242 1243 return 0; 1244 err_engine: 1245 crypto_engine_exit(dd->engine); 1246 err_algs: 1247 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1248 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1249 crypto_unregister_alg( 1250 &dd->pdata->algs_info[i].algs_list[j]); 1251 if (!dd->pio_only) 1252 omap_aes_dma_cleanup(dd); 1253 err_irq: 1254 tasklet_kill(&dd->done_task); 1255 pm_runtime_disable(dev); 1256 err_res: 1257 dd = NULL; 1258 err_data: 1259 dev_err(dev, "initialization failed.\n"); 1260 return err; 1261 } 1262 1263 static int omap_aes_remove(struct platform_device *pdev) 1264 { 1265 struct omap_aes_dev *dd = platform_get_drvdata(pdev); 1266 int i, j; 1267 1268 if (!dd) 1269 return -ENODEV; 1270 1271 spin_lock(&list_lock); 1272 list_del(&dd->list); 1273 spin_unlock(&list_lock); 1274 1275 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1276 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1277 crypto_unregister_alg( 1278 &dd->pdata->algs_info[i].algs_list[j]); 1279 1280 crypto_engine_exit(dd->engine); 1281 tasklet_kill(&dd->done_task); 1282 omap_aes_dma_cleanup(dd); 1283 pm_runtime_disable(dd->dev); 1284 dd = NULL; 1285 1286 return 0; 1287 } 1288 1289 #ifdef CONFIG_PM_SLEEP 1290 static int omap_aes_suspend(struct device *dev) 1291 { 1292 pm_runtime_put_sync(dev); 1293 return 0; 1294 } 1295 1296 static int omap_aes_resume(struct device *dev) 1297 { 1298 pm_runtime_get_sync(dev); 1299 return 0; 1300 } 1301 #endif 1302 1303 static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume); 1304 1305 static struct platform_driver omap_aes_driver = { 1306 .probe = omap_aes_probe, 1307 .remove = omap_aes_remove, 1308 .driver = { 1309 .name = "omap-aes", 1310 .pm = &omap_aes_pm_ops, 1311 .of_match_table = omap_aes_of_match, 1312 }, 1313 }; 1314 1315 module_platform_driver(omap_aes_driver); 1316 1317 MODULE_DESCRIPTION("OMAP AES hw acceleration support."); 1318 MODULE_LICENSE("GPL v2"); 1319 MODULE_AUTHOR("Dmitry Kasatkin"); 1320 1321