1 /* 2 * Cryptographic API. 3 * 4 * Support for OMAP AES HW acceleration. 5 * 6 * Copyright (c) 2010 Nokia Corporation 7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 8 * Copyright (c) 2011 Texas Instruments Incorporated 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 * 14 */ 15 16 #define pr_fmt(fmt) "%s: " fmt, __func__ 17 18 #include <linux/err.h> 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/errno.h> 22 #include <linux/kernel.h> 23 #include <linux/platform_device.h> 24 #include <linux/scatterlist.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/dmaengine.h> 27 #include <linux/omap-dma.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/of.h> 30 #include <linux/of_device.h> 31 #include <linux/of_address.h> 32 #include <linux/io.h> 33 #include <linux/crypto.h> 34 #include <linux/interrupt.h> 35 #include <crypto/scatterwalk.h> 36 #include <crypto/aes.h> 37 38 #define DST_MAXBURST 4 39 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) 40 41 /* OMAP TRM gives bitfields as start:end, where start is the higher bit 42 number. For example 7:0 */ 43 #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) 44 #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) 45 46 #define AES_REG_KEY(dd, x) ((dd)->pdata->key_ofs - \ 47 ((x ^ 0x01) * 0x04)) 48 #define AES_REG_IV(dd, x) ((dd)->pdata->iv_ofs + ((x) * 0x04)) 49 50 #define AES_REG_CTRL(dd) ((dd)->pdata->ctrl_ofs) 51 #define AES_REG_CTRL_CTR_WIDTH_MASK (3 << 7) 52 #define AES_REG_CTRL_CTR_WIDTH_32 (0 << 7) 53 #define AES_REG_CTRL_CTR_WIDTH_64 (1 << 7) 54 #define AES_REG_CTRL_CTR_WIDTH_96 (2 << 7) 55 #define AES_REG_CTRL_CTR_WIDTH_128 (3 << 7) 56 #define AES_REG_CTRL_CTR (1 << 6) 57 #define AES_REG_CTRL_CBC (1 << 5) 58 #define AES_REG_CTRL_KEY_SIZE (3 << 3) 59 #define AES_REG_CTRL_DIRECTION (1 << 2) 60 #define AES_REG_CTRL_INPUT_READY (1 << 1) 61 #define AES_REG_CTRL_OUTPUT_READY (1 << 0) 62 63 #define AES_REG_DATA_N(dd, x) ((dd)->pdata->data_ofs + ((x) * 0x04)) 64 65 #define AES_REG_REV(dd) ((dd)->pdata->rev_ofs) 66 67 #define AES_REG_MASK(dd) ((dd)->pdata->mask_ofs) 68 #define AES_REG_MASK_SIDLE (1 << 6) 69 #define AES_REG_MASK_START (1 << 5) 70 #define AES_REG_MASK_DMA_OUT_EN (1 << 3) 71 #define AES_REG_MASK_DMA_IN_EN (1 << 2) 72 #define AES_REG_MASK_SOFTRESET (1 << 1) 73 #define AES_REG_AUTOIDLE (1 << 0) 74 75 #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) 76 77 #define DEFAULT_TIMEOUT (5*HZ) 78 79 #define FLAGS_MODE_MASK 0x000f 80 #define FLAGS_ENCRYPT BIT(0) 81 #define FLAGS_CBC BIT(1) 82 #define FLAGS_GIV BIT(2) 83 #define FLAGS_CTR BIT(3) 84 85 #define FLAGS_INIT BIT(4) 86 #define FLAGS_FAST BIT(5) 87 #define FLAGS_BUSY BIT(6) 88 89 struct omap_aes_ctx { 90 struct omap_aes_dev *dd; 91 92 int keylen; 93 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 94 unsigned long flags; 95 }; 96 97 struct omap_aes_reqctx { 98 unsigned long mode; 99 }; 100 101 #define OMAP_AES_QUEUE_LENGTH 1 102 #define OMAP_AES_CACHE_SIZE 0 103 104 struct omap_aes_algs_info { 105 struct crypto_alg *algs_list; 106 unsigned int size; 107 unsigned int registered; 108 }; 109 110 struct omap_aes_pdata { 111 struct omap_aes_algs_info *algs_info; 112 unsigned int algs_info_size; 113 114 void (*trigger)(struct omap_aes_dev *dd, int length); 115 116 u32 key_ofs; 117 u32 iv_ofs; 118 u32 ctrl_ofs; 119 u32 data_ofs; 120 u32 rev_ofs; 121 u32 mask_ofs; 122 123 u32 dma_enable_in; 124 u32 dma_enable_out; 125 u32 dma_start; 126 127 u32 major_mask; 128 u32 major_shift; 129 u32 minor_mask; 130 u32 minor_shift; 131 }; 132 133 struct omap_aes_dev { 134 struct list_head list; 135 unsigned long phys_base; 136 void __iomem *io_base; 137 struct omap_aes_ctx *ctx; 138 struct device *dev; 139 unsigned long flags; 140 int err; 141 142 spinlock_t lock; 143 struct crypto_queue queue; 144 145 struct tasklet_struct done_task; 146 struct tasklet_struct queue_task; 147 148 struct ablkcipher_request *req; 149 size_t total; 150 struct scatterlist *in_sg; 151 struct scatterlist in_sgl; 152 size_t in_offset; 153 struct scatterlist *out_sg; 154 struct scatterlist out_sgl; 155 size_t out_offset; 156 157 size_t buflen; 158 void *buf_in; 159 size_t dma_size; 160 int dma_in; 161 struct dma_chan *dma_lch_in; 162 dma_addr_t dma_addr_in; 163 void *buf_out; 164 int dma_out; 165 struct dma_chan *dma_lch_out; 166 dma_addr_t dma_addr_out; 167 168 const struct omap_aes_pdata *pdata; 169 }; 170 171 /* keep registered devices data here */ 172 static LIST_HEAD(dev_list); 173 static DEFINE_SPINLOCK(list_lock); 174 175 static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) 176 { 177 return __raw_readl(dd->io_base + offset); 178 } 179 180 static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, 181 u32 value) 182 { 183 __raw_writel(value, dd->io_base + offset); 184 } 185 186 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, 187 u32 value, u32 mask) 188 { 189 u32 val; 190 191 val = omap_aes_read(dd, offset); 192 val &= ~mask; 193 val |= value; 194 omap_aes_write(dd, offset, val); 195 } 196 197 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, 198 u32 *value, int count) 199 { 200 for (; count--; value++, offset += 4) 201 omap_aes_write(dd, offset, *value); 202 } 203 204 static int omap_aes_hw_init(struct omap_aes_dev *dd) 205 { 206 /* 207 * clocks are enabled when request starts and disabled when finished. 208 * It may be long delays between requests. 209 * Device might go to off mode to save power. 210 */ 211 pm_runtime_get_sync(dd->dev); 212 213 if (!(dd->flags & FLAGS_INIT)) { 214 dd->flags |= FLAGS_INIT; 215 dd->err = 0; 216 } 217 218 return 0; 219 } 220 221 static int omap_aes_write_ctrl(struct omap_aes_dev *dd) 222 { 223 unsigned int key32; 224 int i, err; 225 u32 val, mask = 0; 226 227 err = omap_aes_hw_init(dd); 228 if (err) 229 return err; 230 231 key32 = dd->ctx->keylen / sizeof(u32); 232 233 /* it seems a key should always be set even if it has not changed */ 234 for (i = 0; i < key32; i++) { 235 omap_aes_write(dd, AES_REG_KEY(dd, i), 236 __le32_to_cpu(dd->ctx->key[i])); 237 } 238 239 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info) 240 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4); 241 242 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); 243 if (dd->flags & FLAGS_CBC) 244 val |= AES_REG_CTRL_CBC; 245 if (dd->flags & FLAGS_CTR) { 246 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; 247 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; 248 } 249 if (dd->flags & FLAGS_ENCRYPT) 250 val |= AES_REG_CTRL_DIRECTION; 251 252 mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | 253 AES_REG_CTRL_KEY_SIZE; 254 255 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask); 256 257 return 0; 258 } 259 260 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length) 261 { 262 u32 mask, val; 263 264 val = dd->pdata->dma_start; 265 266 if (dd->dma_lch_out != NULL) 267 val |= dd->pdata->dma_enable_out; 268 if (dd->dma_lch_in != NULL) 269 val |= dd->pdata->dma_enable_in; 270 271 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 272 dd->pdata->dma_start; 273 274 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask); 275 276 } 277 278 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) 279 { 280 omap_aes_write(dd, AES_REG_LENGTH_N(0), length); 281 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); 282 283 omap_aes_dma_trigger_omap2(dd, length); 284 } 285 286 static void omap_aes_dma_stop(struct omap_aes_dev *dd) 287 { 288 u32 mask; 289 290 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 291 dd->pdata->dma_start; 292 293 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); 294 } 295 296 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) 297 { 298 struct omap_aes_dev *dd = NULL, *tmp; 299 300 spin_lock_bh(&list_lock); 301 if (!ctx->dd) { 302 list_for_each_entry(tmp, &dev_list, list) { 303 /* FIXME: take fist available aes core */ 304 dd = tmp; 305 break; 306 } 307 ctx->dd = dd; 308 } else { 309 /* already found before */ 310 dd = ctx->dd; 311 } 312 spin_unlock_bh(&list_lock); 313 314 return dd; 315 } 316 317 static void omap_aes_dma_out_callback(void *data) 318 { 319 struct omap_aes_dev *dd = data; 320 321 /* dma_lch_out - completed */ 322 tasklet_schedule(&dd->done_task); 323 } 324 325 static int omap_aes_dma_init(struct omap_aes_dev *dd) 326 { 327 int err = -ENOMEM; 328 dma_cap_mask_t mask; 329 330 dd->dma_lch_out = NULL; 331 dd->dma_lch_in = NULL; 332 333 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); 334 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); 335 dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; 336 dd->buflen &= ~(AES_BLOCK_SIZE - 1); 337 338 if (!dd->buf_in || !dd->buf_out) { 339 dev_err(dd->dev, "unable to alloc pages.\n"); 340 goto err_alloc; 341 } 342 343 /* MAP here */ 344 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, 345 DMA_TO_DEVICE); 346 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { 347 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); 348 err = -EINVAL; 349 goto err_map_in; 350 } 351 352 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, 353 DMA_FROM_DEVICE); 354 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { 355 dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); 356 err = -EINVAL; 357 goto err_map_out; 358 } 359 360 dma_cap_zero(mask); 361 dma_cap_set(DMA_SLAVE, mask); 362 363 dd->dma_lch_in = dma_request_slave_channel_compat(mask, 364 omap_dma_filter_fn, 365 &dd->dma_in, 366 dd->dev, "rx"); 367 if (!dd->dma_lch_in) { 368 dev_err(dd->dev, "Unable to request in DMA channel\n"); 369 goto err_dma_in; 370 } 371 372 dd->dma_lch_out = dma_request_slave_channel_compat(mask, 373 omap_dma_filter_fn, 374 &dd->dma_out, 375 dd->dev, "tx"); 376 if (!dd->dma_lch_out) { 377 dev_err(dd->dev, "Unable to request out DMA channel\n"); 378 goto err_dma_out; 379 } 380 381 return 0; 382 383 err_dma_out: 384 dma_release_channel(dd->dma_lch_in); 385 err_dma_in: 386 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, 387 DMA_FROM_DEVICE); 388 err_map_out: 389 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); 390 err_map_in: 391 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); 392 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); 393 err_alloc: 394 if (err) 395 pr_err("error: %d\n", err); 396 return err; 397 } 398 399 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) 400 { 401 dma_release_channel(dd->dma_lch_out); 402 dma_release_channel(dd->dma_lch_in); 403 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, 404 DMA_FROM_DEVICE); 405 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); 406 free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); 407 free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); 408 } 409 410 static void sg_copy_buf(void *buf, struct scatterlist *sg, 411 unsigned int start, unsigned int nbytes, int out) 412 { 413 struct scatter_walk walk; 414 415 if (!nbytes) 416 return; 417 418 scatterwalk_start(&walk, sg); 419 scatterwalk_advance(&walk, start); 420 scatterwalk_copychunks(buf, &walk, nbytes, out); 421 scatterwalk_done(&walk, out, 0); 422 } 423 424 static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, 425 size_t buflen, size_t total, int out) 426 { 427 unsigned int count, off = 0; 428 429 while (buflen && total) { 430 count = min((*sg)->length - *offset, total); 431 count = min(count, buflen); 432 433 if (!count) 434 return off; 435 436 /* 437 * buflen and total are AES_BLOCK_SIZE size aligned, 438 * so count should be also aligned 439 */ 440 441 sg_copy_buf(buf + off, *sg, *offset, count, out); 442 443 off += count; 444 buflen -= count; 445 *offset += count; 446 total -= count; 447 448 if (*offset == (*sg)->length) { 449 *sg = sg_next(*sg); 450 if (*sg) 451 *offset = 0; 452 else 453 total = 0; 454 } 455 } 456 457 return off; 458 } 459 460 static int omap_aes_crypt_dma(struct crypto_tfm *tfm, 461 struct scatterlist *in_sg, struct scatterlist *out_sg) 462 { 463 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 464 struct omap_aes_dev *dd = ctx->dd; 465 struct dma_async_tx_descriptor *tx_in, *tx_out; 466 struct dma_slave_config cfg; 467 dma_addr_t dma_addr_in = sg_dma_address(in_sg); 468 int ret, length = sg_dma_len(in_sg); 469 470 pr_debug("len: %d\n", length); 471 472 dd->dma_size = length; 473 474 if (!(dd->flags & FLAGS_FAST)) 475 dma_sync_single_for_device(dd->dev, dma_addr_in, length, 476 DMA_TO_DEVICE); 477 478 memset(&cfg, 0, sizeof(cfg)); 479 480 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 481 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 482 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 483 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 484 cfg.src_maxburst = DST_MAXBURST; 485 cfg.dst_maxburst = DST_MAXBURST; 486 487 /* IN */ 488 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); 489 if (ret) { 490 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", 491 ret); 492 return ret; 493 } 494 495 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, 496 DMA_MEM_TO_DEV, 497 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 498 if (!tx_in) { 499 dev_err(dd->dev, "IN prep_slave_sg() failed\n"); 500 return -EINVAL; 501 } 502 503 /* No callback necessary */ 504 tx_in->callback_param = dd; 505 506 /* OUT */ 507 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); 508 if (ret) { 509 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", 510 ret); 511 return ret; 512 } 513 514 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, 515 DMA_DEV_TO_MEM, 516 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 517 if (!tx_out) { 518 dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); 519 return -EINVAL; 520 } 521 522 tx_out->callback = omap_aes_dma_out_callback; 523 tx_out->callback_param = dd; 524 525 dmaengine_submit(tx_in); 526 dmaengine_submit(tx_out); 527 528 dma_async_issue_pending(dd->dma_lch_in); 529 dma_async_issue_pending(dd->dma_lch_out); 530 531 /* start DMA */ 532 dd->pdata->trigger(dd, length); 533 534 return 0; 535 } 536 537 static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) 538 { 539 struct crypto_tfm *tfm = crypto_ablkcipher_tfm( 540 crypto_ablkcipher_reqtfm(dd->req)); 541 int err, fast = 0, in, out; 542 size_t count; 543 dma_addr_t addr_in, addr_out; 544 struct scatterlist *in_sg, *out_sg; 545 int len32; 546 547 pr_debug("total: %d\n", dd->total); 548 549 if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { 550 /* check for alignment */ 551 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); 552 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); 553 554 fast = in && out; 555 } 556 557 if (fast) { 558 count = min(dd->total, sg_dma_len(dd->in_sg)); 559 count = min(count, sg_dma_len(dd->out_sg)); 560 561 if (count != dd->total) { 562 pr_err("request length != buffer length\n"); 563 return -EINVAL; 564 } 565 566 pr_debug("fast\n"); 567 568 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 569 if (!err) { 570 dev_err(dd->dev, "dma_map_sg() error\n"); 571 return -EINVAL; 572 } 573 574 err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 575 if (!err) { 576 dev_err(dd->dev, "dma_map_sg() error\n"); 577 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 578 return -EINVAL; 579 } 580 581 addr_in = sg_dma_address(dd->in_sg); 582 addr_out = sg_dma_address(dd->out_sg); 583 584 in_sg = dd->in_sg; 585 out_sg = dd->out_sg; 586 587 dd->flags |= FLAGS_FAST; 588 589 } else { 590 /* use cache buffers */ 591 count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, 592 dd->buflen, dd->total, 0); 593 594 len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN; 595 596 /* 597 * The data going into the AES module has been copied 598 * to a local buffer and the data coming out will go 599 * into a local buffer so set up local SG entries for 600 * both. 601 */ 602 sg_init_table(&dd->in_sgl, 1); 603 dd->in_sgl.offset = dd->in_offset; 604 sg_dma_len(&dd->in_sgl) = len32; 605 sg_dma_address(&dd->in_sgl) = dd->dma_addr_in; 606 607 sg_init_table(&dd->out_sgl, 1); 608 dd->out_sgl.offset = dd->out_offset; 609 sg_dma_len(&dd->out_sgl) = len32; 610 sg_dma_address(&dd->out_sgl) = dd->dma_addr_out; 611 612 in_sg = &dd->in_sgl; 613 out_sg = &dd->out_sgl; 614 615 addr_in = dd->dma_addr_in; 616 addr_out = dd->dma_addr_out; 617 618 dd->flags &= ~FLAGS_FAST; 619 620 } 621 622 dd->total -= count; 623 624 err = omap_aes_crypt_dma(tfm, in_sg, out_sg); 625 if (err) { 626 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 627 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); 628 } 629 630 return err; 631 } 632 633 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) 634 { 635 struct ablkcipher_request *req = dd->req; 636 637 pr_debug("err: %d\n", err); 638 639 pm_runtime_put(dd->dev); 640 dd->flags &= ~FLAGS_BUSY; 641 642 req->base.complete(&req->base, err); 643 } 644 645 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 646 { 647 int err = 0; 648 size_t count; 649 650 pr_debug("total: %d\n", dd->total); 651 652 omap_aes_dma_stop(dd); 653 654 dmaengine_terminate_all(dd->dma_lch_in); 655 dmaengine_terminate_all(dd->dma_lch_out); 656 657 if (dd->flags & FLAGS_FAST) { 658 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 659 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 660 } else { 661 dma_sync_single_for_device(dd->dev, dd->dma_addr_out, 662 dd->dma_size, DMA_FROM_DEVICE); 663 664 /* copy data */ 665 count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, 666 dd->buflen, dd->dma_size, 1); 667 if (count != dd->dma_size) { 668 err = -EINVAL; 669 pr_err("not all data converted: %u\n", count); 670 } 671 } 672 673 return err; 674 } 675 676 static int omap_aes_handle_queue(struct omap_aes_dev *dd, 677 struct ablkcipher_request *req) 678 { 679 struct crypto_async_request *async_req, *backlog; 680 struct omap_aes_ctx *ctx; 681 struct omap_aes_reqctx *rctx; 682 unsigned long flags; 683 int err, ret = 0; 684 685 spin_lock_irqsave(&dd->lock, flags); 686 if (req) 687 ret = ablkcipher_enqueue_request(&dd->queue, req); 688 if (dd->flags & FLAGS_BUSY) { 689 spin_unlock_irqrestore(&dd->lock, flags); 690 return ret; 691 } 692 backlog = crypto_get_backlog(&dd->queue); 693 async_req = crypto_dequeue_request(&dd->queue); 694 if (async_req) 695 dd->flags |= FLAGS_BUSY; 696 spin_unlock_irqrestore(&dd->lock, flags); 697 698 if (!async_req) 699 return ret; 700 701 if (backlog) 702 backlog->complete(backlog, -EINPROGRESS); 703 704 req = ablkcipher_request_cast(async_req); 705 706 /* assign new request to device */ 707 dd->req = req; 708 dd->total = req->nbytes; 709 dd->in_offset = 0; 710 dd->in_sg = req->src; 711 dd->out_offset = 0; 712 dd->out_sg = req->dst; 713 714 rctx = ablkcipher_request_ctx(req); 715 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 716 rctx->mode &= FLAGS_MODE_MASK; 717 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; 718 719 dd->ctx = ctx; 720 ctx->dd = dd; 721 722 err = omap_aes_write_ctrl(dd); 723 if (!err) 724 err = omap_aes_crypt_dma_start(dd); 725 if (err) { 726 /* aes_task will not finish it, so do it here */ 727 omap_aes_finish_req(dd, err); 728 tasklet_schedule(&dd->queue_task); 729 } 730 731 return ret; /* return ret, which is enqueue return value */ 732 } 733 734 static void omap_aes_done_task(unsigned long data) 735 { 736 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 737 int err; 738 739 pr_debug("enter\n"); 740 741 err = omap_aes_crypt_dma_stop(dd); 742 743 err = dd->err ? : err; 744 745 if (dd->total && !err) { 746 err = omap_aes_crypt_dma_start(dd); 747 if (!err) 748 return; /* DMA started. Not fininishing. */ 749 } 750 751 omap_aes_finish_req(dd, err); 752 omap_aes_handle_queue(dd, NULL); 753 754 pr_debug("exit\n"); 755 } 756 757 static void omap_aes_queue_task(unsigned long data) 758 { 759 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 760 761 omap_aes_handle_queue(dd, NULL); 762 } 763 764 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 765 { 766 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 767 crypto_ablkcipher_reqtfm(req)); 768 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 769 struct omap_aes_dev *dd; 770 771 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, 772 !!(mode & FLAGS_ENCRYPT), 773 !!(mode & FLAGS_CBC)); 774 775 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 776 pr_err("request size is not exact amount of AES blocks\n"); 777 return -EINVAL; 778 } 779 780 dd = omap_aes_find_dev(ctx); 781 if (!dd) 782 return -ENODEV; 783 784 rctx->mode = mode; 785 786 return omap_aes_handle_queue(dd, req); 787 } 788 789 /* ********************** ALG API ************************************ */ 790 791 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 792 unsigned int keylen) 793 { 794 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 795 796 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && 797 keylen != AES_KEYSIZE_256) 798 return -EINVAL; 799 800 pr_debug("enter, keylen: %d\n", keylen); 801 802 memcpy(ctx->key, key, keylen); 803 ctx->keylen = keylen; 804 805 return 0; 806 } 807 808 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) 809 { 810 return omap_aes_crypt(req, FLAGS_ENCRYPT); 811 } 812 813 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) 814 { 815 return omap_aes_crypt(req, 0); 816 } 817 818 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) 819 { 820 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); 821 } 822 823 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) 824 { 825 return omap_aes_crypt(req, FLAGS_CBC); 826 } 827 828 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req) 829 { 830 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); 831 } 832 833 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req) 834 { 835 return omap_aes_crypt(req, FLAGS_CTR); 836 } 837 838 static int omap_aes_cra_init(struct crypto_tfm *tfm) 839 { 840 pr_debug("enter\n"); 841 842 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); 843 844 return 0; 845 } 846 847 static void omap_aes_cra_exit(struct crypto_tfm *tfm) 848 { 849 pr_debug("enter\n"); 850 } 851 852 /* ********************** ALGS ************************************ */ 853 854 static struct crypto_alg algs_ecb_cbc[] = { 855 { 856 .cra_name = "ecb(aes)", 857 .cra_driver_name = "ecb-aes-omap", 858 .cra_priority = 100, 859 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 860 CRYPTO_ALG_KERN_DRIVER_ONLY | 861 CRYPTO_ALG_ASYNC, 862 .cra_blocksize = AES_BLOCK_SIZE, 863 .cra_ctxsize = sizeof(struct omap_aes_ctx), 864 .cra_alignmask = 0, 865 .cra_type = &crypto_ablkcipher_type, 866 .cra_module = THIS_MODULE, 867 .cra_init = omap_aes_cra_init, 868 .cra_exit = omap_aes_cra_exit, 869 .cra_u.ablkcipher = { 870 .min_keysize = AES_MIN_KEY_SIZE, 871 .max_keysize = AES_MAX_KEY_SIZE, 872 .setkey = omap_aes_setkey, 873 .encrypt = omap_aes_ecb_encrypt, 874 .decrypt = omap_aes_ecb_decrypt, 875 } 876 }, 877 { 878 .cra_name = "cbc(aes)", 879 .cra_driver_name = "cbc-aes-omap", 880 .cra_priority = 100, 881 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 882 CRYPTO_ALG_KERN_DRIVER_ONLY | 883 CRYPTO_ALG_ASYNC, 884 .cra_blocksize = AES_BLOCK_SIZE, 885 .cra_ctxsize = sizeof(struct omap_aes_ctx), 886 .cra_alignmask = 0, 887 .cra_type = &crypto_ablkcipher_type, 888 .cra_module = THIS_MODULE, 889 .cra_init = omap_aes_cra_init, 890 .cra_exit = omap_aes_cra_exit, 891 .cra_u.ablkcipher = { 892 .min_keysize = AES_MIN_KEY_SIZE, 893 .max_keysize = AES_MAX_KEY_SIZE, 894 .ivsize = AES_BLOCK_SIZE, 895 .setkey = omap_aes_setkey, 896 .encrypt = omap_aes_cbc_encrypt, 897 .decrypt = omap_aes_cbc_decrypt, 898 } 899 } 900 }; 901 902 static struct crypto_alg algs_ctr[] = { 903 { 904 .cra_name = "ctr(aes)", 905 .cra_driver_name = "ctr-aes-omap", 906 .cra_priority = 100, 907 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 908 CRYPTO_ALG_KERN_DRIVER_ONLY | 909 CRYPTO_ALG_ASYNC, 910 .cra_blocksize = AES_BLOCK_SIZE, 911 .cra_ctxsize = sizeof(struct omap_aes_ctx), 912 .cra_alignmask = 0, 913 .cra_type = &crypto_ablkcipher_type, 914 .cra_module = THIS_MODULE, 915 .cra_init = omap_aes_cra_init, 916 .cra_exit = omap_aes_cra_exit, 917 .cra_u.ablkcipher = { 918 .min_keysize = AES_MIN_KEY_SIZE, 919 .max_keysize = AES_MAX_KEY_SIZE, 920 .geniv = "eseqiv", 921 .ivsize = AES_BLOCK_SIZE, 922 .setkey = omap_aes_setkey, 923 .encrypt = omap_aes_ctr_encrypt, 924 .decrypt = omap_aes_ctr_decrypt, 925 } 926 } , 927 }; 928 929 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { 930 { 931 .algs_list = algs_ecb_cbc, 932 .size = ARRAY_SIZE(algs_ecb_cbc), 933 }, 934 }; 935 936 static const struct omap_aes_pdata omap_aes_pdata_omap2 = { 937 .algs_info = omap_aes_algs_info_ecb_cbc, 938 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), 939 .trigger = omap_aes_dma_trigger_omap2, 940 .key_ofs = 0x1c, 941 .iv_ofs = 0x20, 942 .ctrl_ofs = 0x30, 943 .data_ofs = 0x34, 944 .rev_ofs = 0x44, 945 .mask_ofs = 0x48, 946 .dma_enable_in = BIT(2), 947 .dma_enable_out = BIT(3), 948 .dma_start = BIT(5), 949 .major_mask = 0xf0, 950 .major_shift = 4, 951 .minor_mask = 0x0f, 952 .minor_shift = 0, 953 }; 954 955 #ifdef CONFIG_OF 956 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = { 957 { 958 .algs_list = algs_ecb_cbc, 959 .size = ARRAY_SIZE(algs_ecb_cbc), 960 }, 961 { 962 .algs_list = algs_ctr, 963 .size = ARRAY_SIZE(algs_ctr), 964 }, 965 }; 966 967 static const struct omap_aes_pdata omap_aes_pdata_omap3 = { 968 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 969 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 970 .trigger = omap_aes_dma_trigger_omap2, 971 .key_ofs = 0x1c, 972 .iv_ofs = 0x20, 973 .ctrl_ofs = 0x30, 974 .data_ofs = 0x34, 975 .rev_ofs = 0x44, 976 .mask_ofs = 0x48, 977 .dma_enable_in = BIT(2), 978 .dma_enable_out = BIT(3), 979 .dma_start = BIT(5), 980 .major_mask = 0xf0, 981 .major_shift = 4, 982 .minor_mask = 0x0f, 983 .minor_shift = 0, 984 }; 985 986 static const struct omap_aes_pdata omap_aes_pdata_omap4 = { 987 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 988 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 989 .trigger = omap_aes_dma_trigger_omap4, 990 .key_ofs = 0x3c, 991 .iv_ofs = 0x40, 992 .ctrl_ofs = 0x50, 993 .data_ofs = 0x60, 994 .rev_ofs = 0x80, 995 .mask_ofs = 0x84, 996 .dma_enable_in = BIT(5), 997 .dma_enable_out = BIT(6), 998 .major_mask = 0x0700, 999 .major_shift = 8, 1000 .minor_mask = 0x003f, 1001 .minor_shift = 0, 1002 }; 1003 1004 static const struct of_device_id omap_aes_of_match[] = { 1005 { 1006 .compatible = "ti,omap2-aes", 1007 .data = &omap_aes_pdata_omap2, 1008 }, 1009 { 1010 .compatible = "ti,omap3-aes", 1011 .data = &omap_aes_pdata_omap3, 1012 }, 1013 { 1014 .compatible = "ti,omap4-aes", 1015 .data = &omap_aes_pdata_omap4, 1016 }, 1017 {}, 1018 }; 1019 MODULE_DEVICE_TABLE(of, omap_aes_of_match); 1020 1021 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 1022 struct device *dev, struct resource *res) 1023 { 1024 struct device_node *node = dev->of_node; 1025 const struct of_device_id *match; 1026 int err = 0; 1027 1028 match = of_match_device(of_match_ptr(omap_aes_of_match), dev); 1029 if (!match) { 1030 dev_err(dev, "no compatible OF match\n"); 1031 err = -EINVAL; 1032 goto err; 1033 } 1034 1035 err = of_address_to_resource(node, 0, res); 1036 if (err < 0) { 1037 dev_err(dev, "can't translate OF node address\n"); 1038 err = -EINVAL; 1039 goto err; 1040 } 1041 1042 dd->dma_out = -1; /* Dummy value that's unused */ 1043 dd->dma_in = -1; /* Dummy value that's unused */ 1044 1045 dd->pdata = match->data; 1046 1047 err: 1048 return err; 1049 } 1050 #else 1051 static const struct of_device_id omap_aes_of_match[] = { 1052 {}, 1053 }; 1054 1055 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 1056 struct device *dev, struct resource *res) 1057 { 1058 return -EINVAL; 1059 } 1060 #endif 1061 1062 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd, 1063 struct platform_device *pdev, struct resource *res) 1064 { 1065 struct device *dev = &pdev->dev; 1066 struct resource *r; 1067 int err = 0; 1068 1069 /* Get the base address */ 1070 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1071 if (!r) { 1072 dev_err(dev, "no MEM resource info\n"); 1073 err = -ENODEV; 1074 goto err; 1075 } 1076 memcpy(res, r, sizeof(*res)); 1077 1078 /* Get the DMA out channel */ 1079 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1080 if (!r) { 1081 dev_err(dev, "no DMA out resource info\n"); 1082 err = -ENODEV; 1083 goto err; 1084 } 1085 dd->dma_out = r->start; 1086 1087 /* Get the DMA in channel */ 1088 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1089 if (!r) { 1090 dev_err(dev, "no DMA in resource info\n"); 1091 err = -ENODEV; 1092 goto err; 1093 } 1094 dd->dma_in = r->start; 1095 1096 /* Only OMAP2/3 can be non-DT */ 1097 dd->pdata = &omap_aes_pdata_omap2; 1098 1099 err: 1100 return err; 1101 } 1102 1103 static int omap_aes_probe(struct platform_device *pdev) 1104 { 1105 struct device *dev = &pdev->dev; 1106 struct omap_aes_dev *dd; 1107 struct crypto_alg *algp; 1108 struct resource res; 1109 int err = -ENOMEM, i, j; 1110 u32 reg; 1111 1112 dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); 1113 if (dd == NULL) { 1114 dev_err(dev, "unable to alloc data struct.\n"); 1115 goto err_data; 1116 } 1117 dd->dev = dev; 1118 platform_set_drvdata(pdev, dd); 1119 1120 spin_lock_init(&dd->lock); 1121 crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); 1122 1123 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : 1124 omap_aes_get_res_pdev(dd, pdev, &res); 1125 if (err) 1126 goto err_res; 1127 1128 dd->io_base = devm_request_and_ioremap(dev, &res); 1129 if (!dd->io_base) { 1130 dev_err(dev, "can't ioremap\n"); 1131 err = -ENOMEM; 1132 goto err_res; 1133 } 1134 dd->phys_base = res.start; 1135 1136 pm_runtime_enable(dev); 1137 pm_runtime_get_sync(dev); 1138 1139 omap_aes_dma_stop(dd); 1140 1141 reg = omap_aes_read(dd, AES_REG_REV(dd)); 1142 1143 pm_runtime_put_sync(dev); 1144 1145 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", 1146 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, 1147 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); 1148 1149 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); 1150 tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); 1151 1152 err = omap_aes_dma_init(dd); 1153 if (err) 1154 goto err_dma; 1155 1156 INIT_LIST_HEAD(&dd->list); 1157 spin_lock(&list_lock); 1158 list_add_tail(&dd->list, &dev_list); 1159 spin_unlock(&list_lock); 1160 1161 for (i = 0; i < dd->pdata->algs_info_size; i++) { 1162 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { 1163 algp = &dd->pdata->algs_info[i].algs_list[j]; 1164 1165 pr_debug("reg alg: %s\n", algp->cra_name); 1166 INIT_LIST_HEAD(&algp->cra_list); 1167 1168 err = crypto_register_alg(algp); 1169 if (err) 1170 goto err_algs; 1171 1172 dd->pdata->algs_info[i].registered++; 1173 } 1174 } 1175 1176 return 0; 1177 err_algs: 1178 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1179 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1180 crypto_unregister_alg( 1181 &dd->pdata->algs_info[i].algs_list[j]); 1182 omap_aes_dma_cleanup(dd); 1183 err_dma: 1184 tasklet_kill(&dd->done_task); 1185 tasklet_kill(&dd->queue_task); 1186 pm_runtime_disable(dev); 1187 err_res: 1188 kfree(dd); 1189 dd = NULL; 1190 err_data: 1191 dev_err(dev, "initialization failed.\n"); 1192 return err; 1193 } 1194 1195 static int omap_aes_remove(struct platform_device *pdev) 1196 { 1197 struct omap_aes_dev *dd = platform_get_drvdata(pdev); 1198 int i, j; 1199 1200 if (!dd) 1201 return -ENODEV; 1202 1203 spin_lock(&list_lock); 1204 list_del(&dd->list); 1205 spin_unlock(&list_lock); 1206 1207 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1208 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1209 crypto_unregister_alg( 1210 &dd->pdata->algs_info[i].algs_list[j]); 1211 1212 tasklet_kill(&dd->done_task); 1213 tasklet_kill(&dd->queue_task); 1214 omap_aes_dma_cleanup(dd); 1215 pm_runtime_disable(dd->dev); 1216 kfree(dd); 1217 dd = NULL; 1218 1219 return 0; 1220 } 1221 1222 #ifdef CONFIG_PM_SLEEP 1223 static int omap_aes_suspend(struct device *dev) 1224 { 1225 pm_runtime_put_sync(dev); 1226 return 0; 1227 } 1228 1229 static int omap_aes_resume(struct device *dev) 1230 { 1231 pm_runtime_get_sync(dev); 1232 return 0; 1233 } 1234 #endif 1235 1236 static const struct dev_pm_ops omap_aes_pm_ops = { 1237 SET_SYSTEM_SLEEP_PM_OPS(omap_aes_suspend, omap_aes_resume) 1238 }; 1239 1240 static struct platform_driver omap_aes_driver = { 1241 .probe = omap_aes_probe, 1242 .remove = omap_aes_remove, 1243 .driver = { 1244 .name = "omap-aes", 1245 .owner = THIS_MODULE, 1246 .pm = &omap_aes_pm_ops, 1247 .of_match_table = omap_aes_of_match, 1248 }, 1249 }; 1250 1251 module_platform_driver(omap_aes_driver); 1252 1253 MODULE_DESCRIPTION("OMAP AES hw acceleration support."); 1254 MODULE_LICENSE("GPL v2"); 1255 MODULE_AUTHOR("Dmitry Kasatkin"); 1256 1257