1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Cryptographic API. 4 * 5 * Support for OMAP AES HW acceleration. 6 * 7 * Copyright (c) 2010 Nokia Corporation 8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 9 * Copyright (c) 2011 Texas Instruments Incorporated 10 */ 11 12 #define pr_fmt(fmt) "%20s: " fmt, __func__ 13 #define prn(num) pr_debug(#num "=%d\n", num) 14 #define prx(num) pr_debug(#num "=%x\n", num) 15 16 #include <linux/err.h> 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/errno.h> 20 #include <linux/kernel.h> 21 #include <linux/platform_device.h> 22 #include <linux/scatterlist.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/dmaengine.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/of.h> 27 #include <linux/of_device.h> 28 #include <linux/of_address.h> 29 #include <linux/io.h> 30 #include <linux/crypto.h> 31 #include <linux/interrupt.h> 32 #include <crypto/scatterwalk.h> 33 #include <crypto/aes.h> 34 #include <crypto/gcm.h> 35 #include <crypto/engine.h> 36 #include <crypto/internal/skcipher.h> 37 #include <crypto/internal/aead.h> 38 39 #include "omap-crypto.h" 40 #include "omap-aes.h" 41 42 /* keep registered devices data here */ 43 static LIST_HEAD(dev_list); 44 static DEFINE_SPINLOCK(list_lock); 45 46 static int aes_fallback_sz = 200; 47 48 #ifdef DEBUG 49 #define omap_aes_read(dd, offset) \ 50 ({ \ 51 int _read_ret; \ 52 _read_ret = __raw_readl(dd->io_base + offset); \ 53 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \ 54 offset, _read_ret); \ 55 _read_ret; \ 56 }) 57 #else 58 inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) 59 { 60 return __raw_readl(dd->io_base + offset); 61 } 62 #endif 63 64 #ifdef DEBUG 65 #define omap_aes_write(dd, offset, value) \ 66 do { \ 67 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \ 68 offset, value); \ 69 __raw_writel(value, dd->io_base + offset); \ 70 } while (0) 71 #else 72 inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, 73 u32 value) 74 { 75 __raw_writel(value, dd->io_base + offset); 76 } 77 #endif 78 79 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, 80 u32 value, u32 mask) 81 { 82 u32 val; 83 84 val = omap_aes_read(dd, offset); 85 val &= ~mask; 86 val |= value; 87 omap_aes_write(dd, offset, val); 88 } 89 90 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, 91 u32 *value, int count) 92 { 93 for (; count--; value++, offset += 4) 94 omap_aes_write(dd, offset, *value); 95 } 96 97 static int omap_aes_hw_init(struct omap_aes_dev *dd) 98 { 99 int err; 100 101 if (!(dd->flags & FLAGS_INIT)) { 102 dd->flags |= FLAGS_INIT; 103 dd->err = 0; 104 } 105 106 err = pm_runtime_get_sync(dd->dev); 107 if (err < 0) { 108 dev_err(dd->dev, "failed to get sync: %d\n", err); 109 return err; 110 } 111 112 return 0; 113 } 114 115 void omap_aes_clear_copy_flags(struct omap_aes_dev *dd) 116 { 117 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT); 118 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT); 119 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT); 120 } 121 122 int omap_aes_write_ctrl(struct omap_aes_dev *dd) 123 { 124 struct omap_aes_reqctx *rctx; 125 unsigned int key32; 126 int i, err; 127 u32 val; 128 129 err = omap_aes_hw_init(dd); 130 if (err) 131 return err; 132 133 key32 = dd->ctx->keylen / sizeof(u32); 134 135 /* RESET the key as previous HASH keys should not get affected*/ 136 if (dd->flags & FLAGS_GCM) 137 for (i = 0; i < 0x40; i = i + 4) 138 omap_aes_write(dd, i, 0x0); 139 140 for (i = 0; i < key32; i++) { 141 omap_aes_write(dd, AES_REG_KEY(dd, i), 142 __le32_to_cpu(dd->ctx->key[i])); 143 } 144 145 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->iv) 146 omap_aes_write_n(dd, AES_REG_IV(dd, 0), (void *)dd->req->iv, 4); 147 148 if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) { 149 rctx = aead_request_ctx(dd->aead_req); 150 omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4); 151 } 152 153 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); 154 if (dd->flags & FLAGS_CBC) 155 val |= AES_REG_CTRL_CBC; 156 157 if (dd->flags & (FLAGS_CTR | FLAGS_GCM)) 158 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; 159 160 if (dd->flags & FLAGS_GCM) 161 val |= AES_REG_CTRL_GCM; 162 163 if (dd->flags & FLAGS_ENCRYPT) 164 val |= AES_REG_CTRL_DIRECTION; 165 166 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK); 167 168 return 0; 169 } 170 171 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length) 172 { 173 u32 mask, val; 174 175 val = dd->pdata->dma_start; 176 177 if (dd->dma_lch_out != NULL) 178 val |= dd->pdata->dma_enable_out; 179 if (dd->dma_lch_in != NULL) 180 val |= dd->pdata->dma_enable_in; 181 182 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 183 dd->pdata->dma_start; 184 185 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask); 186 187 } 188 189 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length) 190 { 191 omap_aes_write(dd, AES_REG_LENGTH_N(0), length); 192 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0); 193 if (dd->flags & FLAGS_GCM) 194 omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len); 195 196 omap_aes_dma_trigger_omap2(dd, length); 197 } 198 199 static void omap_aes_dma_stop(struct omap_aes_dev *dd) 200 { 201 u32 mask; 202 203 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in | 204 dd->pdata->dma_start; 205 206 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask); 207 } 208 209 struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx) 210 { 211 struct omap_aes_dev *dd; 212 213 spin_lock_bh(&list_lock); 214 dd = list_first_entry(&dev_list, struct omap_aes_dev, list); 215 list_move_tail(&dd->list, &dev_list); 216 rctx->dd = dd; 217 spin_unlock_bh(&list_lock); 218 219 return dd; 220 } 221 222 static void omap_aes_dma_out_callback(void *data) 223 { 224 struct omap_aes_dev *dd = data; 225 226 /* dma_lch_out - completed */ 227 tasklet_schedule(&dd->done_task); 228 } 229 230 static int omap_aes_dma_init(struct omap_aes_dev *dd) 231 { 232 int err; 233 234 dd->dma_lch_out = NULL; 235 dd->dma_lch_in = NULL; 236 237 dd->dma_lch_in = dma_request_chan(dd->dev, "rx"); 238 if (IS_ERR(dd->dma_lch_in)) { 239 dev_err(dd->dev, "Unable to request in DMA channel\n"); 240 return PTR_ERR(dd->dma_lch_in); 241 } 242 243 dd->dma_lch_out = dma_request_chan(dd->dev, "tx"); 244 if (IS_ERR(dd->dma_lch_out)) { 245 dev_err(dd->dev, "Unable to request out DMA channel\n"); 246 err = PTR_ERR(dd->dma_lch_out); 247 goto err_dma_out; 248 } 249 250 return 0; 251 252 err_dma_out: 253 dma_release_channel(dd->dma_lch_in); 254 255 return err; 256 } 257 258 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) 259 { 260 if (dd->pio_only) 261 return; 262 263 dma_release_channel(dd->dma_lch_out); 264 dma_release_channel(dd->dma_lch_in); 265 } 266 267 static int omap_aes_crypt_dma(struct omap_aes_dev *dd, 268 struct scatterlist *in_sg, 269 struct scatterlist *out_sg, 270 int in_sg_len, int out_sg_len) 271 { 272 struct dma_async_tx_descriptor *tx_in, *tx_out; 273 struct dma_slave_config cfg; 274 int ret; 275 276 if (dd->pio_only) { 277 scatterwalk_start(&dd->in_walk, dd->in_sg); 278 scatterwalk_start(&dd->out_walk, dd->out_sg); 279 280 /* Enable DATAIN interrupt and let it take 281 care of the rest */ 282 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); 283 return 0; 284 } 285 286 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); 287 288 memset(&cfg, 0, sizeof(cfg)); 289 290 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 291 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0); 292 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 293 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 294 cfg.src_maxburst = DST_MAXBURST; 295 cfg.dst_maxburst = DST_MAXBURST; 296 297 /* IN */ 298 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg); 299 if (ret) { 300 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n", 301 ret); 302 return ret; 303 } 304 305 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, 306 DMA_MEM_TO_DEV, 307 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 308 if (!tx_in) { 309 dev_err(dd->dev, "IN prep_slave_sg() failed\n"); 310 return -EINVAL; 311 } 312 313 /* No callback necessary */ 314 tx_in->callback_param = dd; 315 316 /* OUT */ 317 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg); 318 if (ret) { 319 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n", 320 ret); 321 return ret; 322 } 323 324 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, 325 DMA_DEV_TO_MEM, 326 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 327 if (!tx_out) { 328 dev_err(dd->dev, "OUT prep_slave_sg() failed\n"); 329 return -EINVAL; 330 } 331 332 if (dd->flags & FLAGS_GCM) 333 tx_out->callback = omap_aes_gcm_dma_out_callback; 334 else 335 tx_out->callback = omap_aes_dma_out_callback; 336 tx_out->callback_param = dd; 337 338 dmaengine_submit(tx_in); 339 dmaengine_submit(tx_out); 340 341 dma_async_issue_pending(dd->dma_lch_in); 342 dma_async_issue_pending(dd->dma_lch_out); 343 344 /* start DMA */ 345 dd->pdata->trigger(dd, dd->total); 346 347 return 0; 348 } 349 350 int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) 351 { 352 int err; 353 354 pr_debug("total: %d\n", dd->total); 355 356 if (!dd->pio_only) { 357 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, 358 DMA_TO_DEVICE); 359 if (!err) { 360 dev_err(dd->dev, "dma_map_sg() error\n"); 361 return -EINVAL; 362 } 363 364 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, 365 DMA_FROM_DEVICE); 366 if (!err) { 367 dev_err(dd->dev, "dma_map_sg() error\n"); 368 return -EINVAL; 369 } 370 } 371 372 err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len, 373 dd->out_sg_len); 374 if (err && !dd->pio_only) { 375 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); 376 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, 377 DMA_FROM_DEVICE); 378 } 379 380 return err; 381 } 382 383 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) 384 { 385 struct skcipher_request *req = dd->req; 386 387 pr_debug("err: %d\n", err); 388 389 crypto_finalize_skcipher_request(dd->engine, req, err); 390 391 pm_runtime_mark_last_busy(dd->dev); 392 pm_runtime_put_autosuspend(dd->dev); 393 } 394 395 int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 396 { 397 pr_debug("total: %d\n", dd->total); 398 399 omap_aes_dma_stop(dd); 400 401 402 return 0; 403 } 404 405 static int omap_aes_handle_queue(struct omap_aes_dev *dd, 406 struct skcipher_request *req) 407 { 408 if (req) 409 return crypto_transfer_skcipher_request_to_engine(dd->engine, req); 410 411 return 0; 412 } 413 414 static int omap_aes_prepare_req(struct crypto_engine *engine, 415 void *areq) 416 { 417 struct skcipher_request *req = container_of(areq, struct skcipher_request, base); 418 struct omap_aes_ctx *ctx = crypto_skcipher_ctx( 419 crypto_skcipher_reqtfm(req)); 420 struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); 421 struct omap_aes_dev *dd = rctx->dd; 422 int ret; 423 u16 flags; 424 425 if (!dd) 426 return -ENODEV; 427 428 /* assign new request to device */ 429 dd->req = req; 430 dd->total = req->cryptlen; 431 dd->total_save = req->cryptlen; 432 dd->in_sg = req->src; 433 dd->out_sg = req->dst; 434 dd->orig_out = req->dst; 435 436 flags = OMAP_CRYPTO_COPY_DATA; 437 if (req->src == req->dst) 438 flags |= OMAP_CRYPTO_FORCE_COPY; 439 440 ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE, 441 dd->in_sgl, flags, 442 FLAGS_IN_DATA_ST_SHIFT, &dd->flags); 443 if (ret) 444 return ret; 445 446 ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE, 447 &dd->out_sgl, 0, 448 FLAGS_OUT_DATA_ST_SHIFT, &dd->flags); 449 if (ret) 450 return ret; 451 452 dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total); 453 if (dd->in_sg_len < 0) 454 return dd->in_sg_len; 455 456 dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total); 457 if (dd->out_sg_len < 0) 458 return dd->out_sg_len; 459 460 rctx->mode &= FLAGS_MODE_MASK; 461 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; 462 463 dd->ctx = ctx; 464 rctx->dd = dd; 465 466 return omap_aes_write_ctrl(dd); 467 } 468 469 static int omap_aes_crypt_req(struct crypto_engine *engine, 470 void *areq) 471 { 472 struct skcipher_request *req = container_of(areq, struct skcipher_request, base); 473 struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); 474 struct omap_aes_dev *dd = rctx->dd; 475 476 if (!dd) 477 return -ENODEV; 478 479 return omap_aes_crypt_dma_start(dd); 480 } 481 482 static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf) 483 { 484 int i; 485 486 for (i = 0; i < 4; i++) 487 ((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i)); 488 } 489 490 static void omap_aes_done_task(unsigned long data) 491 { 492 struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 493 494 pr_debug("enter done_task\n"); 495 496 if (!dd->pio_only) { 497 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, 498 DMA_FROM_DEVICE); 499 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); 500 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, 501 DMA_FROM_DEVICE); 502 omap_aes_crypt_dma_stop(dd); 503 } 504 505 omap_crypto_cleanup(dd->in_sg, NULL, 0, dd->total_save, 506 FLAGS_IN_DATA_ST_SHIFT, dd->flags); 507 508 omap_crypto_cleanup(dd->out_sg, dd->orig_out, 0, dd->total_save, 509 FLAGS_OUT_DATA_ST_SHIFT, dd->flags); 510 511 /* Update IV output */ 512 if (dd->flags & (FLAGS_CBC | FLAGS_CTR)) 513 omap_aes_copy_ivout(dd, dd->req->iv); 514 515 omap_aes_finish_req(dd, 0); 516 517 pr_debug("exit\n"); 518 } 519 520 static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) 521 { 522 struct omap_aes_ctx *ctx = crypto_skcipher_ctx( 523 crypto_skcipher_reqtfm(req)); 524 struct omap_aes_reqctx *rctx = skcipher_request_ctx(req); 525 struct omap_aes_dev *dd; 526 int ret; 527 528 if ((req->cryptlen % AES_BLOCK_SIZE) && !(mode & FLAGS_CTR)) 529 return -EINVAL; 530 531 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen, 532 !!(mode & FLAGS_ENCRYPT), 533 !!(mode & FLAGS_CBC)); 534 535 if (req->cryptlen < aes_fallback_sz) { 536 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 537 538 skcipher_request_set_sync_tfm(subreq, ctx->fallback); 539 skcipher_request_set_callback(subreq, req->base.flags, NULL, 540 NULL); 541 skcipher_request_set_crypt(subreq, req->src, req->dst, 542 req->cryptlen, req->iv); 543 544 if (mode & FLAGS_ENCRYPT) 545 ret = crypto_skcipher_encrypt(subreq); 546 else 547 ret = crypto_skcipher_decrypt(subreq); 548 549 skcipher_request_zero(subreq); 550 return ret; 551 } 552 dd = omap_aes_find_dev(rctx); 553 if (!dd) 554 return -ENODEV; 555 556 rctx->mode = mode; 557 558 return omap_aes_handle_queue(dd, req); 559 } 560 561 /* ********************** ALG API ************************************ */ 562 563 static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 564 unsigned int keylen) 565 { 566 struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 567 int ret; 568 569 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && 570 keylen != AES_KEYSIZE_256) 571 return -EINVAL; 572 573 pr_debug("enter, keylen: %d\n", keylen); 574 575 memcpy(ctx->key, key, keylen); 576 ctx->keylen = keylen; 577 578 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); 579 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & 580 CRYPTO_TFM_REQ_MASK); 581 582 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); 583 if (!ret) 584 return 0; 585 586 return 0; 587 } 588 589 static int omap_aes_ecb_encrypt(struct skcipher_request *req) 590 { 591 return omap_aes_crypt(req, FLAGS_ENCRYPT); 592 } 593 594 static int omap_aes_ecb_decrypt(struct skcipher_request *req) 595 { 596 return omap_aes_crypt(req, 0); 597 } 598 599 static int omap_aes_cbc_encrypt(struct skcipher_request *req) 600 { 601 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); 602 } 603 604 static int omap_aes_cbc_decrypt(struct skcipher_request *req) 605 { 606 return omap_aes_crypt(req, FLAGS_CBC); 607 } 608 609 static int omap_aes_ctr_encrypt(struct skcipher_request *req) 610 { 611 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR); 612 } 613 614 static int omap_aes_ctr_decrypt(struct skcipher_request *req) 615 { 616 return omap_aes_crypt(req, FLAGS_CTR); 617 } 618 619 static int omap_aes_prepare_req(struct crypto_engine *engine, 620 void *req); 621 static int omap_aes_crypt_req(struct crypto_engine *engine, 622 void *req); 623 624 static int omap_aes_init_tfm(struct crypto_skcipher *tfm) 625 { 626 const char *name = crypto_tfm_alg_name(&tfm->base); 627 struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 628 struct crypto_sync_skcipher *blk; 629 630 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 631 if (IS_ERR(blk)) 632 return PTR_ERR(blk); 633 634 ctx->fallback = blk; 635 636 crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); 637 638 ctx->enginectx.op.prepare_request = omap_aes_prepare_req; 639 ctx->enginectx.op.unprepare_request = NULL; 640 ctx->enginectx.op.do_one_request = omap_aes_crypt_req; 641 642 return 0; 643 } 644 645 static int omap_aes_gcm_cra_init(struct crypto_aead *tfm) 646 { 647 struct omap_aes_dev *dd = NULL; 648 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); 649 int err; 650 651 /* Find AES device, currently picks the first device */ 652 spin_lock_bh(&list_lock); 653 list_for_each_entry(dd, &dev_list, list) { 654 break; 655 } 656 spin_unlock_bh(&list_lock); 657 658 err = pm_runtime_get_sync(dd->dev); 659 if (err < 0) { 660 dev_err(dd->dev, "%s: failed to get_sync(%d)\n", 661 __func__, err); 662 return err; 663 } 664 665 tfm->reqsize = sizeof(struct omap_aes_reqctx); 666 ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0); 667 if (IS_ERR(ctx->ctr)) { 668 pr_warn("could not load aes driver for encrypting IV\n"); 669 return PTR_ERR(ctx->ctr); 670 } 671 672 return 0; 673 } 674 675 static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) 676 { 677 struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 678 679 if (ctx->fallback) 680 crypto_free_sync_skcipher(ctx->fallback); 681 682 ctx->fallback = NULL; 683 } 684 685 static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm) 686 { 687 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm); 688 689 if (ctx->fallback) 690 crypto_free_sync_skcipher(ctx->fallback); 691 692 ctx->fallback = NULL; 693 694 if (ctx->ctr) 695 crypto_free_skcipher(ctx->ctr); 696 } 697 698 /* ********************** ALGS ************************************ */ 699 700 static struct skcipher_alg algs_ecb_cbc[] = { 701 { 702 .base.cra_name = "ecb(aes)", 703 .base.cra_driver_name = "ecb-aes-omap", 704 .base.cra_priority = 300, 705 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 706 CRYPTO_ALG_ASYNC | 707 CRYPTO_ALG_NEED_FALLBACK, 708 .base.cra_blocksize = AES_BLOCK_SIZE, 709 .base.cra_ctxsize = sizeof(struct omap_aes_ctx), 710 .base.cra_module = THIS_MODULE, 711 712 .min_keysize = AES_MIN_KEY_SIZE, 713 .max_keysize = AES_MAX_KEY_SIZE, 714 .setkey = omap_aes_setkey, 715 .encrypt = omap_aes_ecb_encrypt, 716 .decrypt = omap_aes_ecb_decrypt, 717 .init = omap_aes_init_tfm, 718 .exit = omap_aes_exit_tfm, 719 }, 720 { 721 .base.cra_name = "cbc(aes)", 722 .base.cra_driver_name = "cbc-aes-omap", 723 .base.cra_priority = 300, 724 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 725 CRYPTO_ALG_ASYNC | 726 CRYPTO_ALG_NEED_FALLBACK, 727 .base.cra_blocksize = AES_BLOCK_SIZE, 728 .base.cra_ctxsize = sizeof(struct omap_aes_ctx), 729 .base.cra_module = THIS_MODULE, 730 731 .min_keysize = AES_MIN_KEY_SIZE, 732 .max_keysize = AES_MAX_KEY_SIZE, 733 .ivsize = AES_BLOCK_SIZE, 734 .setkey = omap_aes_setkey, 735 .encrypt = omap_aes_cbc_encrypt, 736 .decrypt = omap_aes_cbc_decrypt, 737 .init = omap_aes_init_tfm, 738 .exit = omap_aes_exit_tfm, 739 } 740 }; 741 742 static struct skcipher_alg algs_ctr[] = { 743 { 744 .base.cra_name = "ctr(aes)", 745 .base.cra_driver_name = "ctr-aes-omap", 746 .base.cra_priority = 300, 747 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 748 CRYPTO_ALG_ASYNC | 749 CRYPTO_ALG_NEED_FALLBACK, 750 .base.cra_blocksize = 1, 751 .base.cra_ctxsize = sizeof(struct omap_aes_ctx), 752 .base.cra_module = THIS_MODULE, 753 754 .min_keysize = AES_MIN_KEY_SIZE, 755 .max_keysize = AES_MAX_KEY_SIZE, 756 .ivsize = AES_BLOCK_SIZE, 757 .setkey = omap_aes_setkey, 758 .encrypt = omap_aes_ctr_encrypt, 759 .decrypt = omap_aes_ctr_decrypt, 760 .init = omap_aes_init_tfm, 761 .exit = omap_aes_exit_tfm, 762 } 763 }; 764 765 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = { 766 { 767 .algs_list = algs_ecb_cbc, 768 .size = ARRAY_SIZE(algs_ecb_cbc), 769 }, 770 }; 771 772 static struct aead_alg algs_aead_gcm[] = { 773 { 774 .base = { 775 .cra_name = "gcm(aes)", 776 .cra_driver_name = "gcm-aes-omap", 777 .cra_priority = 300, 778 .cra_flags = CRYPTO_ALG_ASYNC | 779 CRYPTO_ALG_KERN_DRIVER_ONLY, 780 .cra_blocksize = 1, 781 .cra_ctxsize = sizeof(struct omap_aes_ctx), 782 .cra_alignmask = 0xf, 783 .cra_module = THIS_MODULE, 784 }, 785 .init = omap_aes_gcm_cra_init, 786 .exit = omap_aes_gcm_cra_exit, 787 .ivsize = GCM_AES_IV_SIZE, 788 .maxauthsize = AES_BLOCK_SIZE, 789 .setkey = omap_aes_gcm_setkey, 790 .setauthsize = omap_aes_gcm_setauthsize, 791 .encrypt = omap_aes_gcm_encrypt, 792 .decrypt = omap_aes_gcm_decrypt, 793 }, 794 { 795 .base = { 796 .cra_name = "rfc4106(gcm(aes))", 797 .cra_driver_name = "rfc4106-gcm-aes-omap", 798 .cra_priority = 300, 799 .cra_flags = CRYPTO_ALG_ASYNC | 800 CRYPTO_ALG_KERN_DRIVER_ONLY, 801 .cra_blocksize = 1, 802 .cra_ctxsize = sizeof(struct omap_aes_ctx), 803 .cra_alignmask = 0xf, 804 .cra_module = THIS_MODULE, 805 }, 806 .init = omap_aes_gcm_cra_init, 807 .exit = omap_aes_gcm_cra_exit, 808 .maxauthsize = AES_BLOCK_SIZE, 809 .ivsize = GCM_RFC4106_IV_SIZE, 810 .setkey = omap_aes_4106gcm_setkey, 811 .setauthsize = omap_aes_4106gcm_setauthsize, 812 .encrypt = omap_aes_4106gcm_encrypt, 813 .decrypt = omap_aes_4106gcm_decrypt, 814 }, 815 }; 816 817 static struct omap_aes_aead_algs omap_aes_aead_info = { 818 .algs_list = algs_aead_gcm, 819 .size = ARRAY_SIZE(algs_aead_gcm), 820 }; 821 822 static const struct omap_aes_pdata omap_aes_pdata_omap2 = { 823 .algs_info = omap_aes_algs_info_ecb_cbc, 824 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc), 825 .trigger = omap_aes_dma_trigger_omap2, 826 .key_ofs = 0x1c, 827 .iv_ofs = 0x20, 828 .ctrl_ofs = 0x30, 829 .data_ofs = 0x34, 830 .rev_ofs = 0x44, 831 .mask_ofs = 0x48, 832 .dma_enable_in = BIT(2), 833 .dma_enable_out = BIT(3), 834 .dma_start = BIT(5), 835 .major_mask = 0xf0, 836 .major_shift = 4, 837 .minor_mask = 0x0f, 838 .minor_shift = 0, 839 }; 840 841 #ifdef CONFIG_OF 842 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = { 843 { 844 .algs_list = algs_ecb_cbc, 845 .size = ARRAY_SIZE(algs_ecb_cbc), 846 }, 847 { 848 .algs_list = algs_ctr, 849 .size = ARRAY_SIZE(algs_ctr), 850 }, 851 }; 852 853 static const struct omap_aes_pdata omap_aes_pdata_omap3 = { 854 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 855 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 856 .trigger = omap_aes_dma_trigger_omap2, 857 .key_ofs = 0x1c, 858 .iv_ofs = 0x20, 859 .ctrl_ofs = 0x30, 860 .data_ofs = 0x34, 861 .rev_ofs = 0x44, 862 .mask_ofs = 0x48, 863 .dma_enable_in = BIT(2), 864 .dma_enable_out = BIT(3), 865 .dma_start = BIT(5), 866 .major_mask = 0xf0, 867 .major_shift = 4, 868 .minor_mask = 0x0f, 869 .minor_shift = 0, 870 }; 871 872 static const struct omap_aes_pdata omap_aes_pdata_omap4 = { 873 .algs_info = omap_aes_algs_info_ecb_cbc_ctr, 874 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr), 875 .aead_algs_info = &omap_aes_aead_info, 876 .trigger = omap_aes_dma_trigger_omap4, 877 .key_ofs = 0x3c, 878 .iv_ofs = 0x40, 879 .ctrl_ofs = 0x50, 880 .data_ofs = 0x60, 881 .rev_ofs = 0x80, 882 .mask_ofs = 0x84, 883 .irq_status_ofs = 0x8c, 884 .irq_enable_ofs = 0x90, 885 .dma_enable_in = BIT(5), 886 .dma_enable_out = BIT(6), 887 .major_mask = 0x0700, 888 .major_shift = 8, 889 .minor_mask = 0x003f, 890 .minor_shift = 0, 891 }; 892 893 static irqreturn_t omap_aes_irq(int irq, void *dev_id) 894 { 895 struct omap_aes_dev *dd = dev_id; 896 u32 status, i; 897 u32 *src, *dst; 898 899 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd)); 900 if (status & AES_REG_IRQ_DATA_IN) { 901 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); 902 903 BUG_ON(!dd->in_sg); 904 905 BUG_ON(_calc_walked(in) > dd->in_sg->length); 906 907 src = sg_virt(dd->in_sg) + _calc_walked(in); 908 909 for (i = 0; i < AES_BLOCK_WORDS; i++) { 910 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); 911 912 scatterwalk_advance(&dd->in_walk, 4); 913 if (dd->in_sg->length == _calc_walked(in)) { 914 dd->in_sg = sg_next(dd->in_sg); 915 if (dd->in_sg) { 916 scatterwalk_start(&dd->in_walk, 917 dd->in_sg); 918 src = sg_virt(dd->in_sg) + 919 _calc_walked(in); 920 } 921 } else { 922 src++; 923 } 924 } 925 926 /* Clear IRQ status */ 927 status &= ~AES_REG_IRQ_DATA_IN; 928 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); 929 930 /* Enable DATA_OUT interrupt */ 931 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4); 932 933 } else if (status & AES_REG_IRQ_DATA_OUT) { 934 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); 935 936 BUG_ON(!dd->out_sg); 937 938 BUG_ON(_calc_walked(out) > dd->out_sg->length); 939 940 dst = sg_virt(dd->out_sg) + _calc_walked(out); 941 942 for (i = 0; i < AES_BLOCK_WORDS; i++) { 943 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); 944 scatterwalk_advance(&dd->out_walk, 4); 945 if (dd->out_sg->length == _calc_walked(out)) { 946 dd->out_sg = sg_next(dd->out_sg); 947 if (dd->out_sg) { 948 scatterwalk_start(&dd->out_walk, 949 dd->out_sg); 950 dst = sg_virt(dd->out_sg) + 951 _calc_walked(out); 952 } 953 } else { 954 dst++; 955 } 956 } 957 958 dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total); 959 960 /* Clear IRQ status */ 961 status &= ~AES_REG_IRQ_DATA_OUT; 962 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); 963 964 if (!dd->total) 965 /* All bytes read! */ 966 tasklet_schedule(&dd->done_task); 967 else 968 /* Enable DATA_IN interrupt for next block */ 969 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); 970 } 971 972 return IRQ_HANDLED; 973 } 974 975 static const struct of_device_id omap_aes_of_match[] = { 976 { 977 .compatible = "ti,omap2-aes", 978 .data = &omap_aes_pdata_omap2, 979 }, 980 { 981 .compatible = "ti,omap3-aes", 982 .data = &omap_aes_pdata_omap3, 983 }, 984 { 985 .compatible = "ti,omap4-aes", 986 .data = &omap_aes_pdata_omap4, 987 }, 988 {}, 989 }; 990 MODULE_DEVICE_TABLE(of, omap_aes_of_match); 991 992 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 993 struct device *dev, struct resource *res) 994 { 995 struct device_node *node = dev->of_node; 996 int err = 0; 997 998 dd->pdata = of_device_get_match_data(dev); 999 if (!dd->pdata) { 1000 dev_err(dev, "no compatible OF match\n"); 1001 err = -EINVAL; 1002 goto err; 1003 } 1004 1005 err = of_address_to_resource(node, 0, res); 1006 if (err < 0) { 1007 dev_err(dev, "can't translate OF node address\n"); 1008 err = -EINVAL; 1009 goto err; 1010 } 1011 1012 err: 1013 return err; 1014 } 1015 #else 1016 static const struct of_device_id omap_aes_of_match[] = { 1017 {}, 1018 }; 1019 1020 static int omap_aes_get_res_of(struct omap_aes_dev *dd, 1021 struct device *dev, struct resource *res) 1022 { 1023 return -EINVAL; 1024 } 1025 #endif 1026 1027 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd, 1028 struct platform_device *pdev, struct resource *res) 1029 { 1030 struct device *dev = &pdev->dev; 1031 struct resource *r; 1032 int err = 0; 1033 1034 /* Get the base address */ 1035 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1036 if (!r) { 1037 dev_err(dev, "no MEM resource info\n"); 1038 err = -ENODEV; 1039 goto err; 1040 } 1041 memcpy(res, r, sizeof(*res)); 1042 1043 /* Only OMAP2/3 can be non-DT */ 1044 dd->pdata = &omap_aes_pdata_omap2; 1045 1046 err: 1047 return err; 1048 } 1049 1050 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr, 1051 char *buf) 1052 { 1053 return sprintf(buf, "%d\n", aes_fallback_sz); 1054 } 1055 1056 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr, 1057 const char *buf, size_t size) 1058 { 1059 ssize_t status; 1060 long value; 1061 1062 status = kstrtol(buf, 0, &value); 1063 if (status) 1064 return status; 1065 1066 /* HW accelerator only works with buffers > 9 */ 1067 if (value < 9) { 1068 dev_err(dev, "minimum fallback size 9\n"); 1069 return -EINVAL; 1070 } 1071 1072 aes_fallback_sz = value; 1073 1074 return size; 1075 } 1076 1077 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr, 1078 char *buf) 1079 { 1080 struct omap_aes_dev *dd = dev_get_drvdata(dev); 1081 1082 return sprintf(buf, "%d\n", dd->engine->queue.max_qlen); 1083 } 1084 1085 static ssize_t queue_len_store(struct device *dev, 1086 struct device_attribute *attr, const char *buf, 1087 size_t size) 1088 { 1089 struct omap_aes_dev *dd; 1090 ssize_t status; 1091 long value; 1092 unsigned long flags; 1093 1094 status = kstrtol(buf, 0, &value); 1095 if (status) 1096 return status; 1097 1098 if (value < 1) 1099 return -EINVAL; 1100 1101 /* 1102 * Changing the queue size in fly is safe, if size becomes smaller 1103 * than current size, it will just not accept new entries until 1104 * it has shrank enough. 1105 */ 1106 spin_lock_bh(&list_lock); 1107 list_for_each_entry(dd, &dev_list, list) { 1108 spin_lock_irqsave(&dd->lock, flags); 1109 dd->engine->queue.max_qlen = value; 1110 dd->aead_queue.base.max_qlen = value; 1111 spin_unlock_irqrestore(&dd->lock, flags); 1112 } 1113 spin_unlock_bh(&list_lock); 1114 1115 return size; 1116 } 1117 1118 static DEVICE_ATTR_RW(queue_len); 1119 static DEVICE_ATTR_RW(fallback); 1120 1121 static struct attribute *omap_aes_attrs[] = { 1122 &dev_attr_queue_len.attr, 1123 &dev_attr_fallback.attr, 1124 NULL, 1125 }; 1126 1127 static struct attribute_group omap_aes_attr_group = { 1128 .attrs = omap_aes_attrs, 1129 }; 1130 1131 static int omap_aes_probe(struct platform_device *pdev) 1132 { 1133 struct device *dev = &pdev->dev; 1134 struct omap_aes_dev *dd; 1135 struct skcipher_alg *algp; 1136 struct aead_alg *aalg; 1137 struct resource res; 1138 int err = -ENOMEM, i, j, irq = -1; 1139 u32 reg; 1140 1141 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL); 1142 if (dd == NULL) { 1143 dev_err(dev, "unable to alloc data struct.\n"); 1144 goto err_data; 1145 } 1146 dd->dev = dev; 1147 platform_set_drvdata(pdev, dd); 1148 1149 aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH); 1150 1151 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) : 1152 omap_aes_get_res_pdev(dd, pdev, &res); 1153 if (err) 1154 goto err_res; 1155 1156 dd->io_base = devm_ioremap_resource(dev, &res); 1157 if (IS_ERR(dd->io_base)) { 1158 err = PTR_ERR(dd->io_base); 1159 goto err_res; 1160 } 1161 dd->phys_base = res.start; 1162 1163 pm_runtime_use_autosuspend(dev); 1164 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); 1165 1166 pm_runtime_enable(dev); 1167 err = pm_runtime_get_sync(dev); 1168 if (err < 0) { 1169 dev_err(dev, "%s: failed to get_sync(%d)\n", 1170 __func__, err); 1171 goto err_res; 1172 } 1173 1174 omap_aes_dma_stop(dd); 1175 1176 reg = omap_aes_read(dd, AES_REG_REV(dd)); 1177 1178 pm_runtime_put_sync(dev); 1179 1180 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", 1181 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift, 1182 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); 1183 1184 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); 1185 1186 err = omap_aes_dma_init(dd); 1187 if (err == -EPROBE_DEFER) { 1188 goto err_irq; 1189 } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { 1190 dd->pio_only = 1; 1191 1192 irq = platform_get_irq(pdev, 0); 1193 if (irq < 0) { 1194 err = irq; 1195 goto err_irq; 1196 } 1197 1198 err = devm_request_irq(dev, irq, omap_aes_irq, 0, 1199 dev_name(dev), dd); 1200 if (err) { 1201 dev_err(dev, "Unable to grab omap-aes IRQ\n"); 1202 goto err_irq; 1203 } 1204 } 1205 1206 spin_lock_init(&dd->lock); 1207 1208 INIT_LIST_HEAD(&dd->list); 1209 spin_lock(&list_lock); 1210 list_add_tail(&dd->list, &dev_list); 1211 spin_unlock(&list_lock); 1212 1213 /* Initialize crypto engine */ 1214 dd->engine = crypto_engine_alloc_init(dev, 1); 1215 if (!dd->engine) { 1216 err = -ENOMEM; 1217 goto err_engine; 1218 } 1219 1220 err = crypto_engine_start(dd->engine); 1221 if (err) 1222 goto err_engine; 1223 1224 for (i = 0; i < dd->pdata->algs_info_size; i++) { 1225 if (!dd->pdata->algs_info[i].registered) { 1226 for (j = 0; j < dd->pdata->algs_info[i].size; j++) { 1227 algp = &dd->pdata->algs_info[i].algs_list[j]; 1228 1229 pr_debug("reg alg: %s\n", algp->base.cra_name); 1230 1231 err = crypto_register_skcipher(algp); 1232 if (err) 1233 goto err_algs; 1234 1235 dd->pdata->algs_info[i].registered++; 1236 } 1237 } 1238 } 1239 1240 if (dd->pdata->aead_algs_info && 1241 !dd->pdata->aead_algs_info->registered) { 1242 for (i = 0; i < dd->pdata->aead_algs_info->size; i++) { 1243 aalg = &dd->pdata->aead_algs_info->algs_list[i]; 1244 1245 pr_debug("reg alg: %s\n", aalg->base.cra_name); 1246 1247 err = crypto_register_aead(aalg); 1248 if (err) 1249 goto err_aead_algs; 1250 1251 dd->pdata->aead_algs_info->registered++; 1252 } 1253 } 1254 1255 err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group); 1256 if (err) { 1257 dev_err(dev, "could not create sysfs device attrs\n"); 1258 goto err_aead_algs; 1259 } 1260 1261 return 0; 1262 err_aead_algs: 1263 for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) { 1264 aalg = &dd->pdata->aead_algs_info->algs_list[i]; 1265 crypto_unregister_aead(aalg); 1266 } 1267 err_algs: 1268 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1269 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1270 crypto_unregister_skcipher( 1271 &dd->pdata->algs_info[i].algs_list[j]); 1272 1273 err_engine: 1274 if (dd->engine) 1275 crypto_engine_exit(dd->engine); 1276 1277 omap_aes_dma_cleanup(dd); 1278 err_irq: 1279 tasklet_kill(&dd->done_task); 1280 pm_runtime_disable(dev); 1281 err_res: 1282 dd = NULL; 1283 err_data: 1284 dev_err(dev, "initialization failed.\n"); 1285 return err; 1286 } 1287 1288 static int omap_aes_remove(struct platform_device *pdev) 1289 { 1290 struct omap_aes_dev *dd = platform_get_drvdata(pdev); 1291 struct aead_alg *aalg; 1292 int i, j; 1293 1294 if (!dd) 1295 return -ENODEV; 1296 1297 spin_lock(&list_lock); 1298 list_del(&dd->list); 1299 spin_unlock(&list_lock); 1300 1301 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) 1302 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) 1303 crypto_unregister_skcipher( 1304 &dd->pdata->algs_info[i].algs_list[j]); 1305 1306 for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) { 1307 aalg = &dd->pdata->aead_algs_info->algs_list[i]; 1308 crypto_unregister_aead(aalg); 1309 } 1310 1311 crypto_engine_exit(dd->engine); 1312 1313 tasklet_kill(&dd->done_task); 1314 omap_aes_dma_cleanup(dd); 1315 pm_runtime_disable(dd->dev); 1316 1317 sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group); 1318 1319 return 0; 1320 } 1321 1322 #ifdef CONFIG_PM_SLEEP 1323 static int omap_aes_suspend(struct device *dev) 1324 { 1325 pm_runtime_put_sync(dev); 1326 return 0; 1327 } 1328 1329 static int omap_aes_resume(struct device *dev) 1330 { 1331 pm_runtime_get_sync(dev); 1332 return 0; 1333 } 1334 #endif 1335 1336 static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume); 1337 1338 static struct platform_driver omap_aes_driver = { 1339 .probe = omap_aes_probe, 1340 .remove = omap_aes_remove, 1341 .driver = { 1342 .name = "omap-aes", 1343 .pm = &omap_aes_pm_ops, 1344 .of_match_table = omap_aes_of_match, 1345 }, 1346 }; 1347 1348 module_platform_driver(omap_aes_driver); 1349 1350 MODULE_DESCRIPTION("OMAP AES hw acceleration support."); 1351 MODULE_LICENSE("GPL v2"); 1352 MODULE_AUTHOR("Dmitry Kasatkin"); 1353 1354