1 /* 2 * Cryptographic API. 3 * 4 * Support for OMAP AES HW acceleration. 5 * 6 * Copyright (c) 2010 Nokia Corporation 7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 8 * Copyright (c) 2011 Texas Instruments Incorporated 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 * 14 */ 15 16 #define pr_fmt(fmt) "%s: " fmt, __func__ 17 18 #include <linux/err.h> 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/errno.h> 22 #include <linux/kernel.h> 23 #include <linux/platform_device.h> 24 #include <linux/scatterlist.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/dmaengine.h> 27 #include <linux/omap-dma.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/of_address.h> 32 #include <linux/io.h> 33 #include <linux/crypto.h> 34 #include <linux/interrupt.h> 35 #include <crypto/scatterwalk.h> 36 #include <crypto/aes.h> 37 38 #define DST_MAXBURST 4 39 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) 40 41 /* OMAP TRM gives bitfields as start:end, where start is the higher bit 42 number. For example 7:0 */ 43 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) 44 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) 45 46 #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ 47 ((x ^ 0x01) * 0x04)) 48 #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) 49 50 #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) 51 #define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7) 52 #define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7) 53 #define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7) 54 #define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7) 55 #define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7) 56 #define AES_REG_CTRL_CTR (1 << 6) 57 #define AES_REG_CTRL_CBC (1 << 5) 58 #define AES_REG_CTRL_KEY_SIZE (3 << 3) 59 #define AES_REG_CTRL_DIRECTION (1 << 2) 60 #define AES_REG_CTRL_INPUT_READY (1 << 1) 61 #define AES_REG_CTRL_OUTPUT_READY (1 << 0) 62 63 #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) 64 65 #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) 66 67 #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) 68 #define AES_REG_MASK_SIDLE (1 << 6) 69 #define AES_REG_MASK_START (1 << 5) 70 #define AES_REG_MASK_DMA_OUT_EN (1 << 3) 71 #define AES_REG_MASK_DMA_IN_EN (1 << 2) 72 #define AES_REG_MASK_SOFTRESET (1 << 1) 73 #define AES_REG_AUTOIDLE (1 << 0) 74 75 #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) 76 77 #define DEFAULT_TIMEOUT (5*HZ) 78 79 #define FLAGS_MODE_MASK 0x000f 80 #define FLAGS_ENCRYPT BIT(0) 81 #define FLAGS_CBC BIT(1) 82 #define FLAGS_GIV BIT(2) 83 #define FLAGS_CTR BIT(3) 84 85 #define FLAGS_INIT BIT(4) 86 #define FLAGS_FAST BIT(5) 87 #define FLAGS_BUSY BIT(6) 88 89 struct omap_aes_ctx { 90 struct omap_aes_dev *dd; 91 92 int keylen; 93 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 94 unsigned long flags; 95 }; 96 97 struct omap_aes_reqctx { 98 unsigned long mode; 99 }; 100 101 #define OMAP_AES_QUEUE_LENGTH 1 102 #define OMAP_AES_CACHE_SIZE 0 103 104 struct omap_aes_algs_info { 105 struct crypto_alg *algs_list; 106 unsigned int size; 107 unsigned int registered; 108 }; 109 110 struct omap_aes_pdata { 111 struct omap_aes_algs_info *algs_info; 112 unsigned int algs_info_size; 113 114 void (*trigger)(struct omap_aes_dev *dd, int length); 115 116 u32 key_ofs; 117 u32 iv_ofs; 118 u32 ctrl_ofs; 119 u32 data_ofs; 120 u32 rev_ofs; 121 u32 mask_ofs; 122 123 u32 dma_enable_in; 124 u32 dma_enable_out; 125 u32 dma_start; 126 127 u32 major_mask; 128 u32 major_shift; 129 u32 minor_mask; 130 u32 minor_shift; 131 }; 132 133 struct omap_aes_dev { 134 struct list_head list; 135 unsigned long phys_base; 136 void __iomem *io_base; 137 struct omap_aes_ctx *ctx; 138 struct device *dev; 139 unsigned long flags; 140 int err; 141 142 spinlock_t lock; 143 struct crypto_queue queue; 144 145 struct tasklet_struct done_task; 146 struct tasklet_struct queue_task; 147 148 struct ablkcipher_request *req; 149 size_t total; 150 struct scatterlist *in_sg; 151 struct scatterlist in_sgl; 152 size_t in_offset; 153 struct scatterlist *out_sg; 154 struct scatterlist out_sgl; 155 size_t out_offset; 156 157 size_t buflen; 158 void *buf_in; 159 size_t dma_size; 160 int dma_in; 161 struct dma_chan *dma_lch_in; 162 dma_addr_t dma_addr_in; 163 void *buf_out; 164 int dma_out; 165 struct dma_chan *dma_lch_out; 166 dma_addr_t dma_addr_out; 167 168 const struct omap_aes_pdata *pdata; 169 }; 170 171 /* keep registered devices data here */ 172 static LIST_HEAD(dev_list); 173 static DEFINE_SPINLOCK(list_lock); 174 175 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) 176 { 177 return __raw_readl(dd->io_base + offset); 178 } 179 180 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, 181 u32 value) 182 { 183 __raw_writel(value, dd->io_base + offset); 184 } 185 186 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, 187 u32 value, u32 mask) 188 { 189 u32 val; 190 191 val = omap_aes_read(dd, offset); 192 val &= ~mask; 193 val |= value; 194 omap_aes_write(dd, offset, val); 195 } 196 197 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, 198 u32 *value, int count) 199 { 200 for (; count--; value++, offset += 4) 201 omap_aes_write(dd, offset, *value); 202 } 203 204 static int omap_aes_hw_init(struct omap_aes_dev *dd) 205 { 206 if (!(dd->flags & FLAGS_INIT)) { 207 dd->flags |= FLAGS_INIT; 208 dd->err = 0; 209 } 210 211 return 0; 212 } 213 214 static int omap_aes_write_ctrl(struct omap_aes_dev *dd) 215 { 216 unsigned int key32; 217 int i, err; 218 u32 val, mask = 0; 219 220 err = omap_aes_hw_init(dd); 221 if (err) 222 return err; 223 224 key32 = dd->ctx->keylen / sizeof(u32); 225 226 /* it seems a key should always be set even if it has not changed */ 227 for (i = 0; i < key32; i++) { 228 omap_aes_write(dd, AES_REG_KEY(dd, i), 229 __le32_to_cpu(dd->ctx->key[i])); 230 } 231 232 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) 233 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); 234 235 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); 236 if (dd->flags & FLAGS_CBC) 237 val |= AES_REG_CTRL_CBC; 238 if (dd->flags & FLAGS_CTR) { 239 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; 240 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; 241 } 242 if (dd->flags & FLAGS_ENCRYPT) 243 val |= AES_REG_CTRL_DIRECTION; 244 245 mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | 246 AES_REG_CTRL_KEY_SIZE; 247 248 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask); 249 250 return 0; 251 } 252 253 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length) 254 { 255 u32 mask, val; 256 257 val = dd->pdata->dma_start; 258 259 if (dd->dma_lch_out != NULL) 260 val |= dd->pdata->dma_enable_out; 261 if (dd->dma_lch_in != NULL) 262 val |= dd->pdata->dma_enable_in; 263 264 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 265 dd->pdata->dma_start; 266 267 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask); 268 269 } 270 271 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) 272 { 273 omap_aes_write(dd, AES_REG_LENGTH_N(0), length); 274 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); 275 276 omap_aes_dma_trigger_omap2(dd, length); 277 } 278 279 static void omap_aes_dma_stop(struct omap_aes_dev *dd) 280 { 281 u32 mask; 282 283 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 284 dd->pdata->dma_start; 285 286 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); 287 } 288 289 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) 290 { 291 struct omap_aes_dev *dd = NULL, *tmp; 292 293 spin_lock_bh(&list_lock); 294 if (!ctx->dd) { 295 list_for_each_entry(tmp, &dev_list, list) { 296 /* FIXME: take fist available aes core */ 297 dd = tmp; 298 break; 299 } 300 ctx->dd = dd; 301 } else { 302 /* already found before */ 303 dd = ctx->dd; 304 } 305 spin_unlock_bh(&list_lock); 306 307 return dd; 308 } 309 310 static void omap_aes_dma_out_callback(void *data) 311 { 312 struct omap_aes_dev *dd = data; 313 314 /* dma_lch_out - completed */ 315 tasklet_schedule(&dd->done_task); 316 } 317 318 static int omap_aes_dma_init(struct omap_aes_dev *dd) 319 { 320 int err = -ENOMEM; 321 dma_cap_mask_t mask; 322 323 dd->dma_lch_out = NULL; 324 dd->dma_lch_in = NULL; 325 326 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); 327 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); 328 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; 329 dd->buflen &= ~(AES_BLOCK_SIZE - 1); 330 331 if (!dd->buf_in || !dd->buf_out) { 332 dev_err(dd->dev, "unable to alloc pages.\n"); 333 goto err_alloc; 334 } 335 336 /* MAP here */ 337 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, 338 DMA_TO_DEVICE); 339 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { 340 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); 341 err = -EINVAL; 342 goto err_map_in; 343 } 344 345 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, 346 DMA_FROM_DEVICE); 347 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { 348 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); 349 err = -EINVAL; 350 goto err_map_out; 351 } 352 353 dma_cap_zero(mask); 354 dma_cap_set(DMA_SLAVE, mask); 355 356 dd->dma_lch_in = dma_request_slave_channel_compat(mask, 357 omap_dma_filter_fn, 358 &dd->dma_in, 359 dd->dev, "rx"); 360 if (!dd->dma_lch_in) { 361 dev_err(dd->dev, "Unable to request in DMA channel\n"); 362 goto err_dma_in; 363 } 364 365 dd->dma_lch_out = dma_request_slave_channel_compat(mask, 366 omap_dma_filter_fn, 367 &dd->dma_out, 368 dd->dev, "tx"); 369 if (!dd->dma_lch_out) { 370 dev_err(dd->dev, "Unable to request out DMA channel\n"); 371 goto err_dma_out; 372 } 373 374 return 0; 375 376 err_dma_out: 377 dma_release_channel(dd->dma_lch_in); 378 err_dma_in: 379 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, 380 DMA_FROM_DEVICE); 381 err_map_out: 382 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); 383 err_map_in: 384 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); 385 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); 386 err_alloc: 387 if (err) 388 pr_err("error: %d\n", err); 389 return err; 390 } 391 392 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) 393 { 394 dma_release_channel(dd->dma_lch_out); 395 dma_release_channel(dd->dma_lch_in); 396 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, 397 DMA_FROM_DEVICE); 398 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); 399 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); 400 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); 401 } 402 403 static void sg_copy_buf(void *buf, struct scatterlist *sg, 404 unsigned int start, unsigned int nbytes, int out) 405 { 406 struct scatter_walk walk; 407 408 if (!nbytes) 409 return; 410 411 scatterwalk_start(&walk, sg); 412 scatterwalk_advance(&walk, start); 413 scatterwalk_copychunks(buf, &walk, nbytes, out); 414 scatterwalk_done(&walk, out, 0); 415 } 416 417 static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, 418 size_t buflen, size_t total, int out) 419 { 420 unsigned int count, off = 0; 421 422 while (buflen && total) { 423 count = min((*sg)->length - *offset, total); 424 count = min(count, buflen); 425 426 if (!count) 427 return off; 428 429 /* 430 * buflen and total are AES_BLOCK_SIZE size aligned, 431 * so count should be also aligned 432 */ 433 434 sg_copy_buf(buf + off, *sg, *offset, count, out); 435 436 off += count; 437 buflen -= count; 438 *offset += count; 439 total -= count; 440 441 if (*offset == (*sg)->length) { 442 *sg = sg_next(*sg); 443 if (*sg) 444 *offset = 0; 445 else 446 total = 0; 447 } 448 } 449 450 return off; 451 } 452 453 static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 454 struct scatterlist *in_sg, struct scatterlist *out_sg) 455 { 456 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 457 struct omap_aes_dev *dd = ctx->dd; 458 struct dma_async_tx_descriptor *tx_in, *tx_out; 459 struct dma_slave_config cfg; 460 dma_addr_t dma_addr_in = sg_dma_address(in_sg); 461 int ret, length = sg_dma_len(in_sg); 462 463 pr_debug("len: %d\n", length); 464 465 dd->dma_size = length; 466 467 if (!(dd->flags & FLAGS_FAST)) 468 dma_sync_single_for_device(dd->dev, dma_addr_in, length, 469 DMA_TO_DEVICE); 470 471 memset(&cfg, 0, sizeof(cfg)); 472 473 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 474 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 475 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 476 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 477 cfg.src_maxburst = DST_MAXBURST; 478 cfg.dst_maxburst = DST_MAXBURST; 479 480 /* IN */ 481 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); 482 if (ret) { 483 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", 484 ret); 485 return ret; 486 } 487 488 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, 489 DMA_MEM_TO_DEV, 490 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 491 if (!tx_in) { 492 dev_err(dd->dev, "IN prep_slave_sg() failed\n"); 493 return -EINVAL; 494 } 495 496 /* No callback necessary */ 497 tx_in->callback_param = dd; 498 499 /* OUT */ 500 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); 501 if (ret) { 502 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", 503 ret); 504 return ret; 505 } 506 507 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, 508 DMA_DEV_TO_MEM, 509 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 510 if (!tx_out) { 511 dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); 512 return -EINVAL; 513 } 514 515 tx_out->callback = omap_aes_dma_out_callback; 516 tx_out->callback_param = dd; 517 518 dmaengine_submit(tx_in); 519 dmaengine_submit(tx_out); 520 521 dma_async_issue_pending(dd->dma_lch_in); 522 dma_async_issue_pending(dd->dma_lch_out); 523 524 /* start DMA */ 525 dd->pdata->trigger(dd, length); 526 527 return 0; 528 } 529 530 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) 531 { 532 struct crypto_tfm *tfm = crypto_ablkcipher_tfm( 533 crypto_ablkcipher_reqtfm(dd->req)); 534 int err, fast = 0, in, out; 535 size_t count; 536 dma_addr_t addr_in, addr_out; 537 struct scatterlist *in_sg, *out_sg; 538 int len32; 539 540 pr_debug("total: %d\n", dd->total); 541 542 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { 543 /* check for alignment */ 544 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); 545 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); 546 547 fast = in && out; 548 } 549 550 if (fast) { 551 count = min(dd->total, sg_dma_len(dd->in_sg)); 552 count = min(count, sg_dma_len(dd->out_sg)); 553 554 if (count != dd->total) { 555 pr_err("request length != buffer length\n"); 556 return -EINVAL; 557 } 558 559 pr_debug("fast\n"); 560 561 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 562 if (!err) { 563 dev_err(dd->dev, "dma_map_sg() error\n"); 564 return -EINVAL; 565 } 566 567 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 568 if (!err) { 569 dev_err(dd->dev, "dma_map_sg() error\n"); 570 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 571 return -EINVAL; 572 } 573 574 addr_in = sg_dma_address(dd->in_sg); 575 addr_out = sg_dma_address(dd->out_sg); 576 577 in_sg = dd->in_sg; 578 out_sg = dd->out_sg; 579 580 dd->flags |= FLAGS_FAST; 581 582 } else { 583 /* use cache buffers */ 584 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, 585 dd->buflen, dd->total, 0); 586 587 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN; 588 589 /* 590 * The data going into the AES module has been copied 591 * to a local buffer and the data coming out will go 592 * into a local buffer so set up local SG entries for 593 * both. 594 */ 595 sg_init_table(&dd->in_sgl, 1); 596 dd->in_sgl.offset = dd->in_offset; 597 sg_dma_len(&dd->in_sgl) = len32; 598 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in; 599 600 sg_init_table(&dd->out_sgl, 1); 601 dd->out_sgl.offset = dd->out_offset; 602 sg_dma_len(&dd->out_sgl) = len32; 603 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out; 604 605 in_sg = &dd->in_sgl; 606 out_sg = &dd->out_sgl; 607 608 addr_in = dd->dma_addr_in; 609 addr_out = dd->dma_addr_out; 610 611 dd->flags &= ~FLAGS_FAST; 612 613 } 614 615 dd->total -= count; 616 617 err = omap_aes_crypt_dma(tfm, in_sg, out_sg); 618 if (err) { 619 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 620 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); 621 } 622 623 return err; 624 } 625 626 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) 627 { 628 struct ablkcipher_request *req = dd->req; 629 630 pr_debug("err: %d\n", err); 631 632 dd->flags &= ~FLAGS_BUSY; 633 634 req->base.complete(&req->base, err); 635 } 636 637 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 638 { 639 int err = 0; 640 size_t count; 641 642 pr_debug("total: %d\n", dd->total); 643 644 omap_aes_dma_stop(dd); 645 646 dmaengine_terminate_all(dd->dma_lch_in); 647 dmaengine_terminate_all(dd->dma_lch_out); 648 649 if (dd->flags & FLAGS_FAST) { 650 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 651 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 652 } else { 653 dma_sync_single_for_device(dd->dev, dd->dma_addr_out, 654 dd->dma_size, DMA_FROM_DEVICE); 655 656 /* copy data */ 657 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, 658 dd->buflen, dd->dma_size, 1); 659 if (count != dd->dma_size) { 660 err = -EINVAL; 661 pr_err("not all data converted: %u\n", count); 662 } 663 } 664 665 return err; 666 } 667 668 static int omap_aes_handle_queue(struct omap_aes_dev *dd, 669 struct ablkcipher_request *req) 670 { 671 struct crypto_async_request *async_req, *backlog; 672 struct omap_aes_ctx *ctx; 673 struct omap_aes_reqctx *rctx; 674 unsigned long flags; 675 int err, ret = 0; 676 677 spin_lock_irqsave(&dd->lock, flags); 678 if (req) 679 ret = ablkcipher_enqueue_request(&dd->queue, req); 680 if (dd->flags & FLAGS_BUSY) { 681 spin_unlock_irqrestore(&dd->lock, flags); 682 return ret; 683 } 684 backlog = crypto_get_backlog(&dd->queue); 685 async_req = crypto_dequeue_request(&dd->queue); 686 if (async_req) 687 dd->flags |= FLAGS_BUSY; 688 spin_unlock_irqrestore(&dd->lock, flags); 689 690 if (!async_req) 691 return ret; 692 693 if (backlog) 694 backlog->complete(backlog, -EINPROGRESS); 695 696 req = ablkcipher_request_cast(async_req); 697 698 /* assign new request to device */ 699 dd->req = req; 700 dd->total = req->nbytes; 701 dd->in_offset = 0; 702 dd->in_sg = req->src; 703 dd->out_offset = 0; 704 dd->out_sg = req->dst; 705 706 rctx = ablkcipher_request_ctx(req); 707 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 708 rctx->mode &= FLAGS_MODE_MASK; 709 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; 710 711 dd->ctx = ctx; 712 ctx->dd = dd; 713 714 err = omap_aes_write_ctrl(dd); 715 if (!err) 716 err = omap_aes_crypt_dma_start(dd); 717 if (err) { 718 /* aes_task will not finish it, so do it here */ 719 omap_aes_finish_req(dd, err); 720 tasklet_schedule(&dd->queue_task); 721 } 722 723 return ret; /* return ret, which is enqueue return value */ 724 } 725 726 static void omap_aes_done_task(unsigned long data) 727 { 728 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 729 int err; 730 731 pr_debug("enter\n"); 732 733 err = omap_aes_crypt_dma_stop(dd); 734 735 err = dd->err ? : err; 736 737 if (dd->total && !err) { 738 err = omap_aes_crypt_dma_start(dd); 739 if (!err) 740 return; /* DMA started. Not fininishing. */ 741 } 742 743 omap_aes_finish_req(dd, err); 744 omap_aes_handle_queue(dd, NULL); 745 746 pr_debug("exit\n"); 747 } 748 749 static void omap_aes_queue_task(unsigned long data) 750 { 751 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 752 753 omap_aes_handle_queue(dd, NULL); 754 } 755 756 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 757 { 758 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 759 crypto_ablkcipher_reqtfm(req)); 760 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 761 struct omap_aes_dev *dd; 762 763 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, 764 !!(mode & FLAGS_ENCRYPT), 765 !!(mode & FLAGS_CBC)); 766 767 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 768 pr_err("request size is not exact amount of AES blocks\n"); 769 return -EINVAL; 770 } 771 772 dd = omap_aes_find_dev(ctx); 773 if (!dd) 774 return -ENODEV; 775 776 rctx->mode = mode; 777 778 return omap_aes_handle_queue(dd, req); 779 } 780 781 /* ********************** ALG API ************************************ */ 782 783 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 784 unsigned int keylen) 785 { 786 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 787 788 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && 789 keylen != AES_KEYSIZE_256) 790 return -EINVAL; 791 792 pr_debug("enter, keylen: %d\n", keylen); 793 794 memcpy(ctx->key, key, keylen); 795 ctx->keylen = keylen; 796 797 return 0; 798 } 799 800 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) 801 { 802 return omap_aes_crypt(req, FLAGS_ENCRYPT); 803 } 804 805 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) 806 { 807 return omap_aes_crypt(req, 0); 808 } 809 810 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) 811 { 812 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); 813 } 814 815 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) 816 { 817 return omap_aes_crypt(req, FLAGS_CBC); 818 } 819 820 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req) 821 { 822 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); 823 } 824 825 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) 826 { 827 return omap_aes_crypt(req, FLAGS_CTR); 828 } 829 830 static int omap_aes_cra_init(struct crypto_tfm *tfm) 831 { 832 struct omap_aes_dev *dd = NULL; 833 834 /* Find AES device, currently picks the first device */ 835 spin_lock_bh(&list_lock); 836 list_for_each_entry(dd, &dev_list, list) { 837 break; 838 } 839 spin_unlock_bh(&list_lock); 840 841 pm_runtime_get_sync(dd->dev); 842 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); 843 844 return 0; 845 } 846 847 static void omap_aes_cra_exit(struct crypto_tfm *tfm) 848 { 849 struct omap_aes_dev *dd = NULL; 850 851 /* Find AES device, currently picks the first device */ 852 spin_lock_bh(&list_lock); 853 list_for_each_entry(dd, &dev_list, list) { 854 break; 855 } 856 spin_unlock_bh(&list_lock); 857 858 pm_runtime_put_sync(dd->dev); 859 } 860 861 /* ********************** ALGS ************************************ */ 862 863 static struct crypto_alg algs_ecb_cbc[] = { 864 { 865 .cra_name = "ecb(aes)", 866 .cra_driver_name = "ecb-aes-omap", 867 .cra_priority = 100, 868 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 869 CRYPTO_ALG_KERN_DRIVER_ONLY | 870 CRYPTO_ALG_ASYNC, 871 .cra_blocksize = AES_BLOCK_SIZE, 872 .cra_ctxsize = sizeof(struct omap_aes_ctx), 873 .cra_alignmask = 0, 874 .cra_type = &crypto_ablkcipher_type, 875 .cra_module = THIS_MODULE, 876 .cra_init = omap_aes_cra_init, 877 .cra_exit = omap_aes_cra_exit, 878 .cra_u.ablkcipher = { 879 .min_keysize = AES_MIN_KEY_SIZE, 880 .max_keysize = AES_MAX_KEY_SIZE, 881 .setkey = omap_aes_setkey, 882 .encrypt = omap_aes_ecb_encrypt, 883 .decrypt = omap_aes_ecb_decrypt, 884 } 885 }, 886 { 887 .cra_name = "cbc(aes)", 888 .cra_driver_name = "cbc-aes-omap", 889 .cra_priority = 100, 890 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 891 CRYPTO_ALG_KERN_DRIVER_ONLY | 892 CRYPTO_ALG_ASYNC, 893 .cra_blocksize = AES_BLOCK_SIZE, 894 .cra_ctxsize = sizeof(struct omap_aes_ctx), 895 .cra_alignmask = 0, 896 .cra_type = &crypto_ablkcipher_type, 897 .cra_module = THIS_MODULE, 898 .cra_init = omap_aes_cra_init, 899 .cra_exit = omap_aes_cra_exit, 900 .cra_u.ablkcipher = { 901 .min_keysize = AES_MIN_KEY_SIZE, 902 .max_keysize = AES_MAX_KEY_SIZE, 903 .ivsize = AES_BLOCK_SIZE, 904 .setkey = omap_aes_setkey, 905 .encrypt = omap_aes_cbc_encrypt, 906 .decrypt = omap_aes_cbc_decrypt, 907 } 908 } 909 }; 910 911 static struct crypto_alg algs_ctr[] = { 912 { 913 .cra_name = "ctr(aes)", 914 .cra_driver_name = "ctr-aes-omap", 915 .cra_priority = 100, 916 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 917 CRYPTO_ALG_KERN_DRIVER_ONLY | 918 CRYPTO_ALG_ASYNC, 919 .cra_blocksize = AES_BLOCK_SIZE, 920 .cra_ctxsize = sizeof(struct omap_aes_ctx), 921 .cra_alignmask = 0, 922 .cra_type = &crypto_ablkcipher_type, 923 .cra_module = THIS_MODULE, 924 .cra_init = omap_aes_cra_init, 925 .cra_exit = omap_aes_cra_exit, 926 .cra_u.ablkcipher = { 927 .min_keysize = AES_MIN_KEY_SIZE, 928 .max_keysize = AES_MAX_KEY_SIZE, 929 .geniv = "eseqiv", 930 .ivsize = AES_BLOCK_SIZE, 931 .setkey = omap_aes_setkey, 932 .encrypt = omap_aes_ctr_encrypt, 933 .decrypt = omap_aes_ctr_decrypt, 934 } 935 } , 936 }; 937 938 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { 939 { 940 .algs_list = algs_ecb_cbc, 941 .size = ARRAY_SIZE(algs_ecb_cbc), 942 }, 943 }; 944 945 static const struct omap_aes_pdata omap_aes_pdata_omap2 = { 946 .algs_info = omap_aes_algs_info_ecb_cbc, 947 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), 948 .trigger = omap_aes_dma_trigger_omap2, 949 .key_ofs = 0x1c, 950 .iv_ofs = 0x20, 951 .ctrl_ofs = 0x30, 952 .data_ofs = 0x34, 953 .rev_ofs = 0x44, 954 .mask_ofs = 0x48, 955 .dma_enable_in = BIT(2), 956 .dma_enable_out = BIT(3), 957 .dma_start = BIT(5), 958 .major_mask = 0xf0, 959 .major_shift = 4, 960 .minor_mask = 0x0f, 961 .minor_shift = 0, 962 }; 963 964 #ifdef CONFIG_OF 965 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = { 966 { 967 .algs_list = algs_ecb_cbc, 968 .size = ARRAY_SIZE(algs_ecb_cbc), 969 }, 970 { 971 .algs_list = algs_ctr, 972 .size = ARRAY_SIZE(algs_ctr), 973 }, 974 }; 975 976 static const struct omap_aes_pdata omap_aes_pdata_omap3 = { 977 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 978 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 979 .trigger = omap_aes_dma_trigger_omap2, 980 .key_ofs = 0x1c, 981 .iv_ofs = 0x20, 982 .ctrl_ofs = 0x30, 983 .data_ofs = 0x34, 984 .rev_ofs = 0x44, 985 .mask_ofs = 0x48, 986 .dma_enable_in = BIT(2), 987 .dma_enable_out = BIT(3), 988 .dma_start = BIT(5), 989 .major_mask = 0xf0, 990 .major_shift = 4, 991 .minor_mask = 0x0f, 992 .minor_shift = 0, 993 }; 994 995 static const struct omap_aes_pdata omap_aes_pdata_omap4 = { 996 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 997 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 998 .trigger = omap_aes_dma_trigger_omap4, 999 .key_ofs = 0x3c, 1000 .iv_ofs = 0x40, 1001 .ctrl_ofs = 0x50, 1002 .data_ofs = 0x60, 1003 .rev_ofs = 0x80, 1004 .mask_ofs = 0x84, 1005 .dma_enable_in = BIT(5), 1006 .dma_enable_out = BIT(6), 1007 .major_mask = 0x0700, 1008 .major_shift = 8, 1009 .minor_mask = 0x003f, 1010 .minor_shift = 0, 1011 }; 1012 1013 static const struct of_device_id omap_aes_of_match[] = { 1014 { 1015 .compatible = "ti,omap2-aes", 1016 .data = &omap_aes_pdata_omap2, 1017 }, 1018 { 1019 .compatible = "ti,omap3-aes", 1020 .data = &omap_aes_pdata_omap3, 1021 }, 1022 { 1023 .compatible = "ti,omap4-aes", 1024 .data = &omap_aes_pdata_omap4, 1025 }, 1026 {}, 1027 }; 1028 MODULE_DEVICE_TABLE(of, omap_aes_of_match); 1029 1030 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 1031 struct device *dev, struct resource *res) 1032 { 1033 struct device_node *node = dev->of_node; 1034 const struct of_device_id *match; 1035 int err = 0; 1036 1037 match = of_match_device(of_match_ptr(omap_aes_of_match), dev); 1038 if (!match) { 1039 dev_err(dev, "no compatible OF match\n"); 1040 err = -EINVAL; 1041 goto err; 1042 } 1043 1044 err = of_address_to_resource(node, 0, res); 1045 if (err < 0) { 1046 dev_err(dev, "can't translate OF node address\n"); 1047 err = -EINVAL; 1048 goto err; 1049 } 1050 1051 dd->dma_out = -1; /* Dummy value that's unused */ 1052 dd->dma_in = -1; /* Dummy value that's unused */ 1053 1054 dd->pdata = match->data; 1055 1056 err: 1057 return err; 1058 } 1059 #else 1060 static const struct of_device_id omap_aes_of_match[] = { 1061 {}, 1062 }; 1063 1064 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 1065 struct device *dev, struct resource *res) 1066 { 1067 return -EINVAL; 1068 } 1069 #endif 1070 1071 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd, 1072 struct platform_device *pdev, struct resource *res) 1073 { 1074 struct device *dev = &pdev->dev; 1075 struct resource *r; 1076 int err = 0; 1077 1078 /* Get the base address */ 1079 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1080 if (!r) { 1081 dev_err(dev, "no MEM resource info\n"); 1082 err = -ENODEV; 1083 goto err; 1084 } 1085 memcpy(res, r, sizeof(*res)); 1086 1087 /* Get the DMA out channel */ 1088 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1089 if (!r) { 1090 dev_err(dev, "no DMA out resource info\n"); 1091 err = -ENODEV; 1092 goto err; 1093 } 1094 dd->dma_out = r->start; 1095 1096 /* Get the DMA in channel */ 1097 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1098 if (!r) { 1099 dev_err(dev, "no DMA in resource info\n"); 1100 err = -ENODEV; 1101 goto err; 1102 } 1103 dd->dma_in = r->start; 1104 1105 /* Only OMAP2/3 can be non-DT */ 1106 dd->pdata = &omap_aes_pdata_omap2; 1107 1108 err: 1109 return err; 1110 } 1111 1112 static int omap_aes_probe(struct platform_device *pdev) 1113 { 1114 struct device *dev = &pdev->dev; 1115 struct omap_aes_dev *dd; 1116 struct crypto_alg *algp; 1117 struct resource res; 1118 int err = -ENOMEM, i, j; 1119 u32 reg; 1120 1121 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); 1122 if (dd == NULL) { 1123 dev_err(dev, "unable to alloc data struct.\n"); 1124 goto err_data; 1125 } 1126 dd->dev = dev; 1127 platform_set_drvdata(pdev, dd); 1128 1129 spin_lock_init(&dd->lock); 1130 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); 1131 1132 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : 1133 omap_aes_get_res_pdev(dd, pdev, &res); 1134 if (err) 1135 goto err_res; 1136 1137 dd->io_base = devm_ioremap_resource(dev, &res); 1138 if (IS_ERR(dd->io_base)) { 1139 err = PTR_ERR(dd->io_base); 1140 goto err_res; 1141 } 1142 dd->phys_base = res.start; 1143 1144 pm_runtime_enable(dev); 1145 pm_runtime_get_sync(dev); 1146 1147 omap_aes_dma_stop(dd); 1148 1149 reg = omap_aes_read(dd, AES_REG_REV(dd)); 1150 1151 pm_runtime_put_sync(dev); 1152 1153 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", 1154 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, 1155 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); 1156 1157 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); 1158 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); 1159 1160 err = omap_aes_dma_init(dd); 1161 if (err) 1162 goto err_dma; 1163 1164 INIT_LIST_HEAD(&dd->list); 1165 spin_lock(&list_lock); 1166 list_add_tail(&dd->list, &dev_list); 1167 spin_unlock(&list_lock); 1168 1169 for (i = 0; i < dd->pdata->algs_info_size; i++) { 1170 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { 1171 algp = &dd->pdata->algs_info[i].algs_list[j]; 1172 1173 pr_debug("reg alg: %s\n", algp->cra_name); 1174 INIT_LIST_HEAD(&algp->cra_list); 1175 1176 err = crypto_register_alg(algp); 1177 if (err) 1178 goto err_algs; 1179 1180 dd->pdata->algs_info[i].registered++; 1181 } 1182 } 1183 1184 return 0; 1185 err_algs: 1186 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1187 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1188 crypto_unregister_alg( 1189 &dd->pdata->algs_info[i].algs_list[j]); 1190 omap_aes_dma_cleanup(dd); 1191 err_dma: 1192 tasklet_kill(&dd->done_task); 1193 tasklet_kill(&dd->queue_task); 1194 pm_runtime_disable(dev); 1195 err_res: 1196 kfree(dd); 1197 dd = NULL; 1198 err_data: 1199 dev_err(dev, "initialization failed.\n"); 1200 return err; 1201 } 1202 1203 static int omap_aes_remove(struct platform_device *pdev) 1204 { 1205 struct omap_aes_dev *dd = platform_get_drvdata(pdev); 1206 int i, j; 1207 1208 if (!dd) 1209 return -ENODEV; 1210 1211 spin_lock(&list_lock); 1212 list_del(&dd->list); 1213 spin_unlock(&list_lock); 1214 1215 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1216 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1217 crypto_unregister_alg( 1218 &dd->pdata->algs_info[i].algs_list[j]); 1219 1220 tasklet_kill(&dd->done_task); 1221 tasklet_kill(&dd->queue_task); 1222 omap_aes_dma_cleanup(dd); 1223 pm_runtime_disable(dd->dev); 1224 kfree(dd); 1225 dd = NULL; 1226 1227 return 0; 1228 } 1229 1230 #ifdef CONFIG_PM_SLEEP 1231 static int omap_aes_suspend(struct device *dev) 1232 { 1233 pm_runtime_put_sync(dev); 1234 return 0; 1235 } 1236 1237 static int omap_aes_resume(struct device *dev) 1238 { 1239 pm_runtime_get_sync(dev); 1240 return 0; 1241 } 1242 #endif 1243 1244 static const struct dev_pm_ops omap_aes_pm_ops = { 1245 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume) 1246 }; 1247 1248 static struct platform_driver omap_aes_driver = { 1249 .probe = omap_aes_probe, 1250 .remove = omap_aes_remove, 1251 .driver = { 1252 .name = "omap-aes", 1253 .owner = THIS_MODULE, 1254 .pm = &omap_aes_pm_ops, 1255 .of_match_table = omap_aes_of_match, 1256 }, 1257 }; 1258 1259 module_platform_driver(omap_aes_driver); 1260 1261 MODULE_DESCRIPTION("OMAP AES hw acceleration support."); 1262 MODULE_LICENSE("GPL v2"); 1263 MODULE_AUTHOR("Dmitry Kasatkin"); 1264 1265