1 /* 2 * Cryptographic API. 3 * 4 * Support for Samsung S5PV210 HW acceleration. 5 * 6 * Copyright (C) 2011 NetUP Inc. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as published 10 * by the Free Software Foundation. 11 * 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/crypto.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/err.h> 18 #include <linux/errno.h> 19 #include <linux/init.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/scatterlist.h> 27 28 #include <crypto/ctr.h> 29 #include <crypto/aes.h> 30 #include <crypto/algapi.h> 31 #include <crypto/scatterwalk.h> 32 33 #define _SBF(s, v) ((v) << (s)) 34 35 /* Feed control registers */ 36 #define SSS_REG_FCINTSTAT 0x0000 37 #define SSS_FCINTSTAT_BRDMAINT BIT(3) 38 #define SSS_FCINTSTAT_BTDMAINT BIT(2) 39 #define SSS_FCINTSTAT_HRDMAINT BIT(1) 40 #define SSS_FCINTSTAT_PKDMAINT BIT(0) 41 42 #define SSS_REG_FCINTENSET 0x0004 43 #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) 44 #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) 45 #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) 46 #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) 47 48 #define SSS_REG_FCINTENCLR 0x0008 49 #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) 50 #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) 51 #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) 52 #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) 53 54 #define SSS_REG_FCINTPEND 0x000C 55 #define SSS_FCINTPEND_BRDMAINTP BIT(3) 56 #define SSS_FCINTPEND_BTDMAINTP BIT(2) 57 #define SSS_FCINTPEND_HRDMAINTP BIT(1) 58 #define SSS_FCINTPEND_PKDMAINTP BIT(0) 59 60 #define SSS_REG_FCFIFOSTAT 0x0010 61 #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) 62 #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) 63 #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) 64 #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) 65 #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) 66 #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) 67 #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) 68 #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) 69 70 #define SSS_REG_FCFIFOCTRL 0x0014 71 #define SSS_FCFIFOCTRL_DESSEL BIT(2) 72 #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) 73 #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) 74 #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) 75 76 #define SSS_REG_FCBRDMAS 0x0020 77 #define SSS_REG_FCBRDMAL 0x0024 78 #define SSS_REG_FCBRDMAC 0x0028 79 #define SSS_FCBRDMAC_BYTESWAP BIT(1) 80 #define SSS_FCBRDMAC_FLUSH BIT(0) 81 82 #define SSS_REG_FCBTDMAS 0x0030 83 #define SSS_REG_FCBTDMAL 0x0034 84 #define SSS_REG_FCBTDMAC 0x0038 85 #define SSS_FCBTDMAC_BYTESWAP BIT(1) 86 #define SSS_FCBTDMAC_FLUSH BIT(0) 87 88 #define SSS_REG_FCHRDMAS 0x0040 89 #define SSS_REG_FCHRDMAL 0x0044 90 #define SSS_REG_FCHRDMAC 0x0048 91 #define SSS_FCHRDMAC_BYTESWAP BIT(1) 92 #define SSS_FCHRDMAC_FLUSH BIT(0) 93 94 #define SSS_REG_FCPKDMAS 0x0050 95 #define SSS_REG_FCPKDMAL 0x0054 96 #define SSS_REG_FCPKDMAC 0x0058 97 #define SSS_FCPKDMAC_BYTESWAP BIT(3) 98 #define SSS_FCPKDMAC_DESCEND BIT(2) 99 #define SSS_FCPKDMAC_TRANSMIT BIT(1) 100 #define SSS_FCPKDMAC_FLUSH BIT(0) 101 102 #define SSS_REG_FCPKDMAO 0x005C 103 104 /* AES registers */ 105 #define SSS_REG_AES_CONTROL 0x00 106 #define SSS_AES_BYTESWAP_DI BIT(11) 107 #define SSS_AES_BYTESWAP_DO BIT(10) 108 #define SSS_AES_BYTESWAP_IV BIT(9) 109 #define SSS_AES_BYTESWAP_CNT BIT(8) 110 #define SSS_AES_BYTESWAP_KEY BIT(7) 111 #define SSS_AES_KEY_CHANGE_MODE BIT(6) 112 #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) 113 #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) 114 #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) 115 #define SSS_AES_FIFO_MODE BIT(3) 116 #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) 117 #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) 118 #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) 119 #define SSS_AES_MODE_DECRYPT BIT(0) 120 121 #define SSS_REG_AES_STATUS 0x04 122 #define SSS_AES_BUSY BIT(2) 123 #define SSS_AES_INPUT_READY BIT(1) 124 #define SSS_AES_OUTPUT_READY BIT(0) 125 126 #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) 127 #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) 128 #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2)) 129 #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) 130 #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) 131 132 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) 133 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) 134 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) 135 136 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) 137 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ 138 SSS_AES_REG(dev, reg)) 139 140 /* HW engine modes */ 141 #define FLAGS_AES_DECRYPT BIT(0) 142 #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) 143 #define FLAGS_AES_CBC _SBF(1, 0x01) 144 #define FLAGS_AES_CTR _SBF(1, 0x02) 145 146 #define AES_KEY_LEN 16 147 #define CRYPTO_QUEUE_LEN 1 148 149 /** 150 * struct samsung_aes_variant - platform specific SSS driver data 151 * @aes_offset: AES register offset from SSS module's base. 152 * 153 * Specifies platform specific configuration of SSS module. 154 * Note: A structure for driver specific platform data is used for future 155 * expansion of its usage. 156 */ 157 struct samsung_aes_variant { 158 unsigned int aes_offset; 159 }; 160 161 struct s5p_aes_reqctx { 162 unsigned long mode; 163 }; 164 165 struct s5p_aes_ctx { 166 struct s5p_aes_dev *dev; 167 168 uint8_t aes_key[AES_MAX_KEY_SIZE]; 169 uint8_t nonce[CTR_RFC3686_NONCE_SIZE]; 170 int keylen; 171 }; 172 173 /** 174 * struct s5p_aes_dev - Crypto device state container 175 * @dev: Associated device 176 * @clk: Clock for accessing hardware 177 * @ioaddr: Mapped IO memory region 178 * @aes_ioaddr: Per-varian offset for AES block IO memory 179 * @irq_fc: Feed control interrupt line 180 * @req: Crypto request currently handled by the device 181 * @ctx: Configuration for currently handled crypto request 182 * @sg_src: Scatter list with source data for currently handled block 183 * in device. This is DMA-mapped into device. 184 * @sg_dst: Scatter list with destination data for currently handled block 185 * in device. This is DMA-mapped into device. 186 * @sg_src_cpy: In case of unaligned access, copied scatter list 187 * with source data. 188 * @sg_dst_cpy: In case of unaligned access, copied scatter list 189 * with destination data. 190 * @tasklet: New request scheduling jib 191 * @queue: Crypto queue 192 * @busy: Indicates whether the device is currently handling some request 193 * thus it uses some of the fields from this state, like: 194 * req, ctx, sg_src/dst (and copies). This essentially 195 * protects against concurrent access to these fields. 196 * @lock: Lock for protecting both access to device hardware registers 197 * and fields related to current request (including the busy field). 198 */ 199 struct s5p_aes_dev { 200 struct device *dev; 201 struct clk *clk; 202 void __iomem *ioaddr; 203 void __iomem *aes_ioaddr; 204 int irq_fc; 205 206 struct ablkcipher_request *req; 207 struct s5p_aes_ctx *ctx; 208 struct scatterlist *sg_src; 209 struct scatterlist *sg_dst; 210 211 struct scatterlist *sg_src_cpy; 212 struct scatterlist *sg_dst_cpy; 213 214 struct tasklet_struct tasklet; 215 struct crypto_queue queue; 216 bool busy; 217 spinlock_t lock; 218 }; 219 220 static struct s5p_aes_dev *s5p_dev; 221 222 static const struct samsung_aes_variant s5p_aes_data = { 223 .aes_offset = 0x4000, 224 }; 225 226 static const struct samsung_aes_variant exynos_aes_data = { 227 .aes_offset = 0x200, 228 }; 229 230 static const struct of_device_id s5p_sss_dt_match[] = { 231 { 232 .compatible = "samsung,s5pv210-secss", 233 .data = &s5p_aes_data, 234 }, 235 { 236 .compatible = "samsung,exynos4210-secss", 237 .data = &exynos_aes_data, 238 }, 239 { }, 240 }; 241 MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); 242 243 static inline struct samsung_aes_variant *find_s5p_sss_version 244 (struct platform_device *pdev) 245 { 246 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { 247 const struct of_device_id *match; 248 249 match = of_match_node(s5p_sss_dt_match, 250 pdev->dev.of_node); 251 return (struct samsung_aes_variant *)match->data; 252 } 253 return (struct samsung_aes_variant *) 254 platform_get_device_id(pdev)->driver_data; 255 } 256 257 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 258 { 259 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); 260 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); 261 } 262 263 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 264 { 265 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); 266 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); 267 } 268 269 static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg) 270 { 271 int len; 272 273 if (!*sg) 274 return; 275 276 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); 277 free_pages((unsigned long)sg_virt(*sg), get_order(len)); 278 279 kfree(*sg); 280 *sg = NULL; 281 } 282 283 static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, 284 unsigned int nbytes, int out) 285 { 286 struct scatter_walk walk; 287 288 if (!nbytes) 289 return; 290 291 scatterwalk_start(&walk, sg); 292 scatterwalk_copychunks(buf, &walk, nbytes, out); 293 scatterwalk_done(&walk, out, 0); 294 } 295 296 static void s5p_sg_done(struct s5p_aes_dev *dev) 297 { 298 if (dev->sg_dst_cpy) { 299 dev_dbg(dev->dev, 300 "Copying %d bytes of output data back to original place\n", 301 dev->req->nbytes); 302 s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst, 303 dev->req->nbytes, 1); 304 } 305 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 306 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 307 } 308 309 /* Calls the completion. Cannot be called with dev->lock hold. */ 310 static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) 311 { 312 dev->req->base.complete(&dev->req->base, err); 313 } 314 315 static void s5p_unset_outdata(struct s5p_aes_dev *dev) 316 { 317 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE); 318 } 319 320 static void s5p_unset_indata(struct s5p_aes_dev *dev) 321 { 322 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); 323 } 324 325 static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, 326 struct scatterlist **dst) 327 { 328 void *pages; 329 int len; 330 331 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC); 332 if (!*dst) 333 return -ENOMEM; 334 335 len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE); 336 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len)); 337 if (!pages) { 338 kfree(*dst); 339 *dst = NULL; 340 return -ENOMEM; 341 } 342 343 s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0); 344 345 sg_init_table(*dst, 1); 346 sg_set_buf(*dst, pages, len); 347 348 return 0; 349 } 350 351 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) 352 { 353 int err; 354 355 if (!sg->length) { 356 err = -EINVAL; 357 goto exit; 358 } 359 360 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE); 361 if (!err) { 362 err = -ENOMEM; 363 goto exit; 364 } 365 366 dev->sg_dst = sg; 367 err = 0; 368 369 exit: 370 return err; 371 } 372 373 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 374 { 375 int err; 376 377 if (!sg->length) { 378 err = -EINVAL; 379 goto exit; 380 } 381 382 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE); 383 if (!err) { 384 err = -ENOMEM; 385 goto exit; 386 } 387 388 dev->sg_src = sg; 389 err = 0; 390 391 exit: 392 return err; 393 } 394 395 /* 396 * Returns -ERRNO on error (mapping of new data failed). 397 * On success returns: 398 * - 0 if there is no more data, 399 * - 1 if new transmitting (output) data is ready and its address+length 400 * have to be written to device (by calling s5p_set_dma_outdata()). 401 */ 402 static int s5p_aes_tx(struct s5p_aes_dev *dev) 403 { 404 int ret = 0; 405 406 s5p_unset_outdata(dev); 407 408 if (!sg_is_last(dev->sg_dst)) { 409 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst)); 410 if (!ret) 411 ret = 1; 412 } 413 414 return ret; 415 } 416 417 /* 418 * Returns -ERRNO on error (mapping of new data failed). 419 * On success returns: 420 * - 0 if there is no more data, 421 * - 1 if new receiving (input) data is ready and its address+length 422 * have to be written to device (by calling s5p_set_dma_indata()). 423 */ 424 static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/) 425 { 426 int ret = 0; 427 428 s5p_unset_indata(dev); 429 430 if (!sg_is_last(dev->sg_src)) { 431 ret = s5p_set_indata(dev, sg_next(dev->sg_src)); 432 if (!ret) 433 ret = 1; 434 } 435 436 return ret; 437 } 438 439 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) 440 { 441 struct platform_device *pdev = dev_id; 442 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 443 int err_dma_tx = 0; 444 int err_dma_rx = 0; 445 bool tx_end = false; 446 unsigned long flags; 447 uint32_t status; 448 int err; 449 450 spin_lock_irqsave(&dev->lock, flags); 451 452 /* 453 * Handle rx or tx interrupt. If there is still data (scatterlist did not 454 * reach end), then map next scatterlist entry. 455 * In case of such mapping error, s5p_aes_complete() should be called. 456 * 457 * If there is no more data in tx scatter list, call s5p_aes_complete() 458 * and schedule new tasklet. 459 */ 460 status = SSS_READ(dev, FCINTSTAT); 461 if (status & SSS_FCINTSTAT_BRDMAINT) 462 err_dma_rx = s5p_aes_rx(dev); 463 464 if (status & SSS_FCINTSTAT_BTDMAINT) { 465 if (sg_is_last(dev->sg_dst)) 466 tx_end = true; 467 err_dma_tx = s5p_aes_tx(dev); 468 } 469 470 SSS_WRITE(dev, FCINTPEND, status); 471 472 if (err_dma_rx < 0) { 473 err = err_dma_rx; 474 goto error; 475 } 476 if (err_dma_tx < 0) { 477 err = err_dma_tx; 478 goto error; 479 } 480 481 if (tx_end) { 482 s5p_sg_done(dev); 483 484 spin_unlock_irqrestore(&dev->lock, flags); 485 486 s5p_aes_complete(dev, 0); 487 /* Device is still busy */ 488 tasklet_schedule(&dev->tasklet); 489 } else { 490 /* 491 * Writing length of DMA block (either receiving or 492 * transmitting) will start the operation immediately, so this 493 * should be done at the end (even after clearing pending 494 * interrupts to not miss the interrupt). 495 */ 496 if (err_dma_tx == 1) 497 s5p_set_dma_outdata(dev, dev->sg_dst); 498 if (err_dma_rx == 1) 499 s5p_set_dma_indata(dev, dev->sg_src); 500 501 spin_unlock_irqrestore(&dev->lock, flags); 502 } 503 504 return IRQ_HANDLED; 505 506 error: 507 s5p_sg_done(dev); 508 dev->busy = false; 509 spin_unlock_irqrestore(&dev->lock, flags); 510 s5p_aes_complete(dev, err); 511 512 return IRQ_HANDLED; 513 } 514 515 static void s5p_set_aes(struct s5p_aes_dev *dev, 516 uint8_t *key, uint8_t *iv, unsigned int keylen) 517 { 518 void __iomem *keystart; 519 520 if (iv) 521 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); 522 523 if (keylen == AES_KEYSIZE_256) 524 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); 525 else if (keylen == AES_KEYSIZE_192) 526 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2); 527 else 528 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4); 529 530 memcpy_toio(keystart, key, keylen); 531 } 532 533 static bool s5p_is_sg_aligned(struct scatterlist *sg) 534 { 535 while (sg) { 536 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) 537 return false; 538 sg = sg_next(sg); 539 } 540 541 return true; 542 } 543 544 static int s5p_set_indata_start(struct s5p_aes_dev *dev, 545 struct ablkcipher_request *req) 546 { 547 struct scatterlist *sg; 548 int err; 549 550 dev->sg_src_cpy = NULL; 551 sg = req->src; 552 if (!s5p_is_sg_aligned(sg)) { 553 dev_dbg(dev->dev, 554 "At least one unaligned source scatter list, making a copy\n"); 555 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy); 556 if (err) 557 return err; 558 559 sg = dev->sg_src_cpy; 560 } 561 562 err = s5p_set_indata(dev, sg); 563 if (err) { 564 s5p_free_sg_cpy(dev, &dev->sg_src_cpy); 565 return err; 566 } 567 568 return 0; 569 } 570 571 static int s5p_set_outdata_start(struct s5p_aes_dev *dev, 572 struct ablkcipher_request *req) 573 { 574 struct scatterlist *sg; 575 int err; 576 577 dev->sg_dst_cpy = NULL; 578 sg = req->dst; 579 if (!s5p_is_sg_aligned(sg)) { 580 dev_dbg(dev->dev, 581 "At least one unaligned dest scatter list, making a copy\n"); 582 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy); 583 if (err) 584 return err; 585 586 sg = dev->sg_dst_cpy; 587 } 588 589 err = s5p_set_outdata(dev, sg); 590 if (err) { 591 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); 592 return err; 593 } 594 595 return 0; 596 } 597 598 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) 599 { 600 struct ablkcipher_request *req = dev->req; 601 uint32_t aes_control; 602 unsigned long flags; 603 int err; 604 605 aes_control = SSS_AES_KEY_CHANGE_MODE; 606 if (mode & FLAGS_AES_DECRYPT) 607 aes_control |= SSS_AES_MODE_DECRYPT; 608 609 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) 610 aes_control |= SSS_AES_CHAIN_MODE_CBC; 611 else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) 612 aes_control |= SSS_AES_CHAIN_MODE_CTR; 613 614 if (dev->ctx->keylen == AES_KEYSIZE_192) 615 aes_control |= SSS_AES_KEY_SIZE_192; 616 else if (dev->ctx->keylen == AES_KEYSIZE_256) 617 aes_control |= SSS_AES_KEY_SIZE_256; 618 619 aes_control |= SSS_AES_FIFO_MODE; 620 621 /* as a variant it is possible to use byte swapping on DMA side */ 622 aes_control |= SSS_AES_BYTESWAP_DI 623 | SSS_AES_BYTESWAP_DO 624 | SSS_AES_BYTESWAP_IV 625 | SSS_AES_BYTESWAP_KEY 626 | SSS_AES_BYTESWAP_CNT; 627 628 spin_lock_irqsave(&dev->lock, flags); 629 630 SSS_WRITE(dev, FCINTENCLR, 631 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR); 632 SSS_WRITE(dev, FCFIFOCTRL, 0x00); 633 634 err = s5p_set_indata_start(dev, req); 635 if (err) 636 goto indata_error; 637 638 err = s5p_set_outdata_start(dev, req); 639 if (err) 640 goto outdata_error; 641 642 SSS_AES_WRITE(dev, AES_CONTROL, aes_control); 643 s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); 644 645 s5p_set_dma_indata(dev, dev->sg_src); 646 s5p_set_dma_outdata(dev, dev->sg_dst); 647 648 SSS_WRITE(dev, FCINTENSET, 649 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET); 650 651 spin_unlock_irqrestore(&dev->lock, flags); 652 653 return; 654 655 outdata_error: 656 s5p_unset_indata(dev); 657 658 indata_error: 659 s5p_sg_done(dev); 660 dev->busy = false; 661 spin_unlock_irqrestore(&dev->lock, flags); 662 s5p_aes_complete(dev, err); 663 } 664 665 static void s5p_tasklet_cb(unsigned long data) 666 { 667 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data; 668 struct crypto_async_request *async_req, *backlog; 669 struct s5p_aes_reqctx *reqctx; 670 unsigned long flags; 671 672 spin_lock_irqsave(&dev->lock, flags); 673 backlog = crypto_get_backlog(&dev->queue); 674 async_req = crypto_dequeue_request(&dev->queue); 675 676 if (!async_req) { 677 dev->busy = false; 678 spin_unlock_irqrestore(&dev->lock, flags); 679 return; 680 } 681 spin_unlock_irqrestore(&dev->lock, flags); 682 683 if (backlog) 684 backlog->complete(backlog, -EINPROGRESS); 685 686 dev->req = ablkcipher_request_cast(async_req); 687 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm); 688 reqctx = ablkcipher_request_ctx(dev->req); 689 690 s5p_aes_crypt_start(dev, reqctx->mode); 691 } 692 693 static int s5p_aes_handle_req(struct s5p_aes_dev *dev, 694 struct ablkcipher_request *req) 695 { 696 unsigned long flags; 697 int err; 698 699 spin_lock_irqsave(&dev->lock, flags); 700 err = ablkcipher_enqueue_request(&dev->queue, req); 701 if (dev->busy) { 702 spin_unlock_irqrestore(&dev->lock, flags); 703 goto exit; 704 } 705 dev->busy = true; 706 707 spin_unlock_irqrestore(&dev->lock, flags); 708 709 tasklet_schedule(&dev->tasklet); 710 711 exit: 712 return err; 713 } 714 715 static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 716 { 717 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 718 struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); 719 struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 720 struct s5p_aes_dev *dev = ctx->dev; 721 722 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { 723 dev_err(dev->dev, "request size is not exact amount of AES blocks\n"); 724 return -EINVAL; 725 } 726 727 reqctx->mode = mode; 728 729 return s5p_aes_handle_req(dev, req); 730 } 731 732 static int s5p_aes_setkey(struct crypto_ablkcipher *cipher, 733 const uint8_t *key, unsigned int keylen) 734 { 735 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 736 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 737 738 if (keylen != AES_KEYSIZE_128 && 739 keylen != AES_KEYSIZE_192 && 740 keylen != AES_KEYSIZE_256) 741 return -EINVAL; 742 743 memcpy(ctx->aes_key, key, keylen); 744 ctx->keylen = keylen; 745 746 return 0; 747 } 748 749 static int s5p_aes_ecb_encrypt(struct ablkcipher_request *req) 750 { 751 return s5p_aes_crypt(req, 0); 752 } 753 754 static int s5p_aes_ecb_decrypt(struct ablkcipher_request *req) 755 { 756 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT); 757 } 758 759 static int s5p_aes_cbc_encrypt(struct ablkcipher_request *req) 760 { 761 return s5p_aes_crypt(req, FLAGS_AES_CBC); 762 } 763 764 static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req) 765 { 766 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC); 767 } 768 769 static int s5p_aes_cra_init(struct crypto_tfm *tfm) 770 { 771 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm); 772 773 ctx->dev = s5p_dev; 774 tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx); 775 776 return 0; 777 } 778 779 static struct crypto_alg algs[] = { 780 { 781 .cra_name = "ecb(aes)", 782 .cra_driver_name = "ecb-aes-s5p", 783 .cra_priority = 100, 784 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 785 CRYPTO_ALG_ASYNC | 786 CRYPTO_ALG_KERN_DRIVER_ONLY, 787 .cra_blocksize = AES_BLOCK_SIZE, 788 .cra_ctxsize = sizeof(struct s5p_aes_ctx), 789 .cra_alignmask = 0x0f, 790 .cra_type = &crypto_ablkcipher_type, 791 .cra_module = THIS_MODULE, 792 .cra_init = s5p_aes_cra_init, 793 .cra_u.ablkcipher = { 794 .min_keysize = AES_MIN_KEY_SIZE, 795 .max_keysize = AES_MAX_KEY_SIZE, 796 .setkey = s5p_aes_setkey, 797 .encrypt = s5p_aes_ecb_encrypt, 798 .decrypt = s5p_aes_ecb_decrypt, 799 } 800 }, 801 { 802 .cra_name = "cbc(aes)", 803 .cra_driver_name = "cbc-aes-s5p", 804 .cra_priority = 100, 805 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 806 CRYPTO_ALG_ASYNC | 807 CRYPTO_ALG_KERN_DRIVER_ONLY, 808 .cra_blocksize = AES_BLOCK_SIZE, 809 .cra_ctxsize = sizeof(struct s5p_aes_ctx), 810 .cra_alignmask = 0x0f, 811 .cra_type = &crypto_ablkcipher_type, 812 .cra_module = THIS_MODULE, 813 .cra_init = s5p_aes_cra_init, 814 .cra_u.ablkcipher = { 815 .min_keysize = AES_MIN_KEY_SIZE, 816 .max_keysize = AES_MAX_KEY_SIZE, 817 .ivsize = AES_BLOCK_SIZE, 818 .setkey = s5p_aes_setkey, 819 .encrypt = s5p_aes_cbc_encrypt, 820 .decrypt = s5p_aes_cbc_decrypt, 821 } 822 }, 823 }; 824 825 static int s5p_aes_probe(struct platform_device *pdev) 826 { 827 struct device *dev = &pdev->dev; 828 int i, j, err = -ENODEV; 829 struct samsung_aes_variant *variant; 830 struct s5p_aes_dev *pdata; 831 struct resource *res; 832 833 if (s5p_dev) 834 return -EEXIST; 835 836 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 837 if (!pdata) 838 return -ENOMEM; 839 840 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 841 pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); 842 if (IS_ERR(pdata->ioaddr)) 843 return PTR_ERR(pdata->ioaddr); 844 845 variant = find_s5p_sss_version(pdev); 846 847 pdata->clk = devm_clk_get(dev, "secss"); 848 if (IS_ERR(pdata->clk)) { 849 dev_err(dev, "failed to find secss clock source\n"); 850 return -ENOENT; 851 } 852 853 err = clk_prepare_enable(pdata->clk); 854 if (err < 0) { 855 dev_err(dev, "Enabling SSS clk failed, err %d\n", err); 856 return err; 857 } 858 859 spin_lock_init(&pdata->lock); 860 861 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; 862 863 pdata->irq_fc = platform_get_irq(pdev, 0); 864 if (pdata->irq_fc < 0) { 865 err = pdata->irq_fc; 866 dev_warn(dev, "feed control interrupt is not available.\n"); 867 goto err_irq; 868 } 869 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL, 870 s5p_aes_interrupt, IRQF_ONESHOT, 871 pdev->name, pdev); 872 if (err < 0) { 873 dev_warn(dev, "feed control interrupt is not available.\n"); 874 goto err_irq; 875 } 876 877 pdata->busy = false; 878 pdata->dev = dev; 879 platform_set_drvdata(pdev, pdata); 880 s5p_dev = pdata; 881 882 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata); 883 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN); 884 885 for (i = 0; i < ARRAY_SIZE(algs); i++) { 886 err = crypto_register_alg(&algs[i]); 887 if (err) 888 goto err_algs; 889 } 890 891 dev_info(dev, "s5p-sss driver registered\n"); 892 893 return 0; 894 895 err_algs: 896 dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); 897 898 for (j = 0; j < i; j++) 899 crypto_unregister_alg(&algs[j]); 900 901 tasklet_kill(&pdata->tasklet); 902 903 err_irq: 904 clk_disable_unprepare(pdata->clk); 905 906 s5p_dev = NULL; 907 908 return err; 909 } 910 911 static int s5p_aes_remove(struct platform_device *pdev) 912 { 913 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev); 914 int i; 915 916 if (!pdata) 917 return -ENODEV; 918 919 for (i = 0; i < ARRAY_SIZE(algs); i++) 920 crypto_unregister_alg(&algs[i]); 921 922 tasklet_kill(&pdata->tasklet); 923 924 clk_disable_unprepare(pdata->clk); 925 926 s5p_dev = NULL; 927 928 return 0; 929 } 930 931 static struct platform_driver s5p_aes_crypto = { 932 .probe = s5p_aes_probe, 933 .remove = s5p_aes_remove, 934 .driver = { 935 .name = "s5p-secss", 936 .of_match_table = s5p_sss_dt_match, 937 }, 938 }; 939 940 module_platform_driver(s5p_aes_crypto); 941 942 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); 943 MODULE_LICENSE("GPL v2"); 944 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); 945