1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sun8i-ce-core.c - hardware cryptographic offloader for 4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC 5 * 6 * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com> 7 * 8 * Core file which registers crypto algorithms supported by the CryptoEngine. 9 * 10 * You could find a link for the datasheet in Documentation/arm/sunxi.rst 11 */ 12 #include <linux/clk.h> 13 #include <linux/crypto.h> 14 #include <linux/delay.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/reset.h> 25 #include <crypto/internal/skcipher.h> 26 27 #include "sun8i-ce.h" 28 29 /* 30 * mod clock is lower on H3 than other SoC due to some DMA timeout occurring 31 * with high value. 32 * If you want to tune mod clock, loading driver and passing selftest is 33 * insufficient, you need to test with some LUKS test (mount and write to it) 34 */ 35 static const struct ce_variant ce_h3_variant = { 36 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, 37 }, 38 .op_mode = { CE_OP_ECB, CE_OP_CBC 39 }, 40 .ce_clks = { 41 { "bus", 0, 200000000 }, 42 { "mod", 50000000, 0 }, 43 } 44 }; 45 46 static const struct ce_variant ce_h5_variant = { 47 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, 48 }, 49 .op_mode = { CE_OP_ECB, CE_OP_CBC 50 }, 51 .ce_clks = { 52 { "bus", 0, 200000000 }, 53 { "mod", 300000000, 0 }, 54 } 55 }; 56 57 static const struct ce_variant ce_h6_variant = { 58 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, 59 }, 60 .op_mode = { CE_OP_ECB, CE_OP_CBC 61 }, 62 .has_t_dlen_in_bytes = true, 63 .ce_clks = { 64 { "bus", 0, 200000000 }, 65 { "mod", 300000000, 0 }, 66 { "ram", 0, 400000000 }, 67 } 68 }; 69 70 static const struct ce_variant ce_a64_variant = { 71 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, 72 }, 73 .op_mode = { CE_OP_ECB, CE_OP_CBC 74 }, 75 .ce_clks = { 76 { "bus", 0, 200000000 }, 77 { "mod", 300000000, 0 }, 78 } 79 }; 80 81 static const struct ce_variant ce_r40_variant = { 82 .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, 83 }, 84 .op_mode = { CE_OP_ECB, CE_OP_CBC 85 }, 86 .ce_clks = { 87 { "bus", 0, 200000000 }, 88 { "mod", 300000000, 0 }, 89 } 90 }; 91 92 /* 93 * sun8i_ce_get_engine_number() get the next channel slot 94 * This is a simple round-robin way of getting the next channel 95 */ 96 int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce) 97 { 98 return atomic_inc_return(&ce->flow) % MAXFLOW; 99 } 100 101 int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) 102 { 103 u32 v; 104 int err = 0; 105 106 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG 107 ce->chanlist[flow].stat_req++; 108 #endif 109 110 mutex_lock(&ce->mlock); 111 112 v = readl(ce->base + CE_ICR); 113 v |= 1 << flow; 114 writel(v, ce->base + CE_ICR); 115 116 reinit_completion(&ce->chanlist[flow].complete); 117 writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ); 118 119 ce->chanlist[flow].status = 0; 120 /* Be sure all data is written before enabling the task */ 121 wmb(); 122 123 v = 1 | (ce->chanlist[flow].tl->t_common_ctl & 0x7F) << 8; 124 writel(v, ce->base + CE_TLR); 125 mutex_unlock(&ce->mlock); 126 127 wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete, 128 msecs_to_jiffies(ce->chanlist[flow].timeout)); 129 130 if (ce->chanlist[flow].status == 0) { 131 dev_err(ce->dev, "DMA timeout for %s\n", name); 132 err = -EFAULT; 133 } 134 /* No need to lock for this read, the channel is locked so 135 * nothing could modify the error value for this channel 136 */ 137 v = readl(ce->base + CE_ESR); 138 if (v) { 139 v >>= (flow * 4); 140 v &= 0xFF; 141 if (v) { 142 dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); 143 err = -EFAULT; 144 } 145 if (v & CE_ERR_ALGO_NOTSUP) 146 dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); 147 if (v & CE_ERR_DATALEN) 148 dev_err(ce->dev, "CE ERROR: data length error\n"); 149 if (v & CE_ERR_KEYSRAM) 150 dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); 151 if (v & CE_ERR_ADDR_INVALID) 152 dev_err(ce->dev, "CE ERROR: address invalid\n"); 153 } 154 155 return err; 156 } 157 158 static irqreturn_t ce_irq_handler(int irq, void *data) 159 { 160 struct sun8i_ce_dev *ce = (struct sun8i_ce_dev *)data; 161 int flow = 0; 162 u32 p; 163 164 p = readl(ce->base + CE_ISR); 165 for (flow = 0; flow < MAXFLOW; flow++) { 166 if (p & (BIT(flow))) { 167 writel(BIT(flow), ce->base + CE_ISR); 168 ce->chanlist[flow].status = 1; 169 complete(&ce->chanlist[flow].complete); 170 } 171 } 172 173 return IRQ_HANDLED; 174 } 175 176 static struct sun8i_ce_alg_template ce_algs[] = { 177 { 178 .type = CRYPTO_ALG_TYPE_SKCIPHER, 179 .ce_algo_id = CE_ID_CIPHER_AES, 180 .ce_blockmode = CE_ID_OP_CBC, 181 .alg.skcipher = { 182 .base = { 183 .cra_name = "cbc(aes)", 184 .cra_driver_name = "cbc-aes-sun8i-ce", 185 .cra_priority = 400, 186 .cra_blocksize = AES_BLOCK_SIZE, 187 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 188 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 189 CRYPTO_ALG_NEED_FALLBACK, 190 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 191 .cra_module = THIS_MODULE, 192 .cra_alignmask = 0xf, 193 .cra_init = sun8i_ce_cipher_init, 194 .cra_exit = sun8i_ce_cipher_exit, 195 }, 196 .min_keysize = AES_MIN_KEY_SIZE, 197 .max_keysize = AES_MAX_KEY_SIZE, 198 .ivsize = AES_BLOCK_SIZE, 199 .setkey = sun8i_ce_aes_setkey, 200 .encrypt = sun8i_ce_skencrypt, 201 .decrypt = sun8i_ce_skdecrypt, 202 } 203 }, 204 { 205 .type = CRYPTO_ALG_TYPE_SKCIPHER, 206 .ce_algo_id = CE_ID_CIPHER_AES, 207 .ce_blockmode = CE_ID_OP_ECB, 208 .alg.skcipher = { 209 .base = { 210 .cra_name = "ecb(aes)", 211 .cra_driver_name = "ecb-aes-sun8i-ce", 212 .cra_priority = 400, 213 .cra_blocksize = AES_BLOCK_SIZE, 214 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 215 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 216 CRYPTO_ALG_NEED_FALLBACK, 217 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 218 .cra_module = THIS_MODULE, 219 .cra_alignmask = 0xf, 220 .cra_init = sun8i_ce_cipher_init, 221 .cra_exit = sun8i_ce_cipher_exit, 222 }, 223 .min_keysize = AES_MIN_KEY_SIZE, 224 .max_keysize = AES_MAX_KEY_SIZE, 225 .setkey = sun8i_ce_aes_setkey, 226 .encrypt = sun8i_ce_skencrypt, 227 .decrypt = sun8i_ce_skdecrypt, 228 } 229 }, 230 { 231 .type = CRYPTO_ALG_TYPE_SKCIPHER, 232 .ce_algo_id = CE_ID_CIPHER_DES3, 233 .ce_blockmode = CE_ID_OP_CBC, 234 .alg.skcipher = { 235 .base = { 236 .cra_name = "cbc(des3_ede)", 237 .cra_driver_name = "cbc-des3-sun8i-ce", 238 .cra_priority = 400, 239 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 240 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 241 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 242 CRYPTO_ALG_NEED_FALLBACK, 243 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 244 .cra_module = THIS_MODULE, 245 .cra_alignmask = 0xf, 246 .cra_init = sun8i_ce_cipher_init, 247 .cra_exit = sun8i_ce_cipher_exit, 248 }, 249 .min_keysize = DES3_EDE_KEY_SIZE, 250 .max_keysize = DES3_EDE_KEY_SIZE, 251 .ivsize = DES3_EDE_BLOCK_SIZE, 252 .setkey = sun8i_ce_des3_setkey, 253 .encrypt = sun8i_ce_skencrypt, 254 .decrypt = sun8i_ce_skdecrypt, 255 } 256 }, 257 { 258 .type = CRYPTO_ALG_TYPE_SKCIPHER, 259 .ce_algo_id = CE_ID_CIPHER_DES3, 260 .ce_blockmode = CE_ID_OP_ECB, 261 .alg.skcipher = { 262 .base = { 263 .cra_name = "ecb(des3_ede)", 264 .cra_driver_name = "ecb-des3-sun8i-ce", 265 .cra_priority = 400, 266 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 267 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 268 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 269 CRYPTO_ALG_NEED_FALLBACK, 270 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 271 .cra_module = THIS_MODULE, 272 .cra_alignmask = 0xf, 273 .cra_init = sun8i_ce_cipher_init, 274 .cra_exit = sun8i_ce_cipher_exit, 275 }, 276 .min_keysize = DES3_EDE_KEY_SIZE, 277 .max_keysize = DES3_EDE_KEY_SIZE, 278 .setkey = sun8i_ce_des3_setkey, 279 .encrypt = sun8i_ce_skencrypt, 280 .decrypt = sun8i_ce_skdecrypt, 281 } 282 }, 283 }; 284 285 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG 286 static int sun8i_ce_dbgfs_read(struct seq_file *seq, void *v) 287 { 288 struct sun8i_ce_dev *ce = seq->private; 289 int i; 290 291 for (i = 0; i < MAXFLOW; i++) 292 seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req); 293 294 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 295 if (!ce_algs[i].ce) 296 continue; 297 switch (ce_algs[i].type) { 298 case CRYPTO_ALG_TYPE_SKCIPHER: 299 seq_printf(seq, "%s %s %lu %lu\n", 300 ce_algs[i].alg.skcipher.base.cra_driver_name, 301 ce_algs[i].alg.skcipher.base.cra_name, 302 ce_algs[i].stat_req, ce_algs[i].stat_fb); 303 break; 304 } 305 } 306 return 0; 307 } 308 309 static int sun8i_ce_dbgfs_open(struct inode *inode, struct file *file) 310 { 311 return single_open(file, sun8i_ce_dbgfs_read, inode->i_private); 312 } 313 314 static const struct file_operations sun8i_ce_debugfs_fops = { 315 .owner = THIS_MODULE, 316 .open = sun8i_ce_dbgfs_open, 317 .read = seq_read, 318 .llseek = seq_lseek, 319 .release = single_release, 320 }; 321 #endif 322 323 static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i) 324 { 325 while (i >= 0) { 326 crypto_engine_exit(ce->chanlist[i].engine); 327 if (ce->chanlist[i].tl) 328 dma_free_coherent(ce->dev, sizeof(struct ce_task), 329 ce->chanlist[i].tl, 330 ce->chanlist[i].t_phy); 331 i--; 332 } 333 } 334 335 /* 336 * Allocate the channel list structure 337 */ 338 static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce) 339 { 340 int i, err; 341 342 ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW, 343 sizeof(struct sun8i_ce_flow), GFP_KERNEL); 344 if (!ce->chanlist) 345 return -ENOMEM; 346 347 for (i = 0; i < MAXFLOW; i++) { 348 init_completion(&ce->chanlist[i].complete); 349 350 ce->chanlist[i].engine = crypto_engine_alloc_init(ce->dev, true); 351 if (!ce->chanlist[i].engine) { 352 dev_err(ce->dev, "Cannot allocate engine\n"); 353 i--; 354 err = -ENOMEM; 355 goto error_engine; 356 } 357 err = crypto_engine_start(ce->chanlist[i].engine); 358 if (err) { 359 dev_err(ce->dev, "Cannot start engine\n"); 360 goto error_engine; 361 } 362 ce->chanlist[i].tl = dma_alloc_coherent(ce->dev, 363 sizeof(struct ce_task), 364 &ce->chanlist[i].t_phy, 365 GFP_KERNEL); 366 if (!ce->chanlist[i].tl) { 367 dev_err(ce->dev, "Cannot get DMA memory for task %d\n", 368 i); 369 err = -ENOMEM; 370 goto error_engine; 371 } 372 } 373 return 0; 374 error_engine: 375 sun8i_ce_free_chanlist(ce, i); 376 return err; 377 } 378 379 /* 380 * Power management strategy: The device is suspended unless a TFM exists for 381 * one of the algorithms proposed by this driver. 382 */ 383 static int sun8i_ce_pm_suspend(struct device *dev) 384 { 385 struct sun8i_ce_dev *ce = dev_get_drvdata(dev); 386 int i; 387 388 reset_control_assert(ce->reset); 389 for (i = 0; i < CE_MAX_CLOCKS; i++) 390 clk_disable_unprepare(ce->ceclks[i]); 391 return 0; 392 } 393 394 static int sun8i_ce_pm_resume(struct device *dev) 395 { 396 struct sun8i_ce_dev *ce = dev_get_drvdata(dev); 397 int err, i; 398 399 for (i = 0; i < CE_MAX_CLOCKS; i++) { 400 if (!ce->variant->ce_clks[i].name) 401 continue; 402 err = clk_prepare_enable(ce->ceclks[i]); 403 if (err) { 404 dev_err(ce->dev, "Cannot prepare_enable %s\n", 405 ce->variant->ce_clks[i].name); 406 goto error; 407 } 408 } 409 err = reset_control_deassert(ce->reset); 410 if (err) { 411 dev_err(ce->dev, "Cannot deassert reset control\n"); 412 goto error; 413 } 414 return 0; 415 error: 416 sun8i_ce_pm_suspend(dev); 417 return err; 418 } 419 420 static const struct dev_pm_ops sun8i_ce_pm_ops = { 421 SET_RUNTIME_PM_OPS(sun8i_ce_pm_suspend, sun8i_ce_pm_resume, NULL) 422 }; 423 424 static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce) 425 { 426 int err; 427 428 pm_runtime_use_autosuspend(ce->dev); 429 pm_runtime_set_autosuspend_delay(ce->dev, 2000); 430 431 err = pm_runtime_set_suspended(ce->dev); 432 if (err) 433 return err; 434 pm_runtime_enable(ce->dev); 435 return err; 436 } 437 438 static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce) 439 { 440 pm_runtime_disable(ce->dev); 441 } 442 443 static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) 444 { 445 unsigned long cr; 446 int err, i; 447 448 for (i = 0; i < CE_MAX_CLOCKS; i++) { 449 if (!ce->variant->ce_clks[i].name) 450 continue; 451 ce->ceclks[i] = devm_clk_get(ce->dev, ce->variant->ce_clks[i].name); 452 if (IS_ERR(ce->ceclks[i])) { 453 err = PTR_ERR(ce->ceclks[i]); 454 dev_err(ce->dev, "Cannot get %s CE clock err=%d\n", 455 ce->variant->ce_clks[i].name, err); 456 return err; 457 } 458 cr = clk_get_rate(ce->ceclks[i]); 459 if (!cr) 460 return -EINVAL; 461 if (ce->variant->ce_clks[i].freq > 0 && 462 cr != ce->variant->ce_clks[i].freq) { 463 dev_info(ce->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", 464 ce->variant->ce_clks[i].name, 465 ce->variant->ce_clks[i].freq, 466 ce->variant->ce_clks[i].freq / 1000000, 467 cr, cr / 1000000); 468 err = clk_set_rate(ce->ceclks[i], ce->variant->ce_clks[i].freq); 469 if (err) 470 dev_err(ce->dev, "Fail to set %s clk speed to %lu hz\n", 471 ce->variant->ce_clks[i].name, 472 ce->variant->ce_clks[i].freq); 473 } 474 if (ce->variant->ce_clks[i].max_freq > 0 && 475 cr > ce->variant->ce_clks[i].max_freq) 476 dev_warn(ce->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)", 477 ce->variant->ce_clks[i].name, cr, 478 ce->variant->ce_clks[i].max_freq); 479 } 480 return 0; 481 } 482 483 static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) 484 { 485 int ce_method, err, id, i; 486 487 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 488 ce_algs[i].ce = ce; 489 switch (ce_algs[i].type) { 490 case CRYPTO_ALG_TYPE_SKCIPHER: 491 id = ce_algs[i].ce_algo_id; 492 ce_method = ce->variant->alg_cipher[id]; 493 if (ce_method == CE_ID_NOTSUPP) { 494 dev_dbg(ce->dev, 495 "DEBUG: Algo of %s not supported\n", 496 ce_algs[i].alg.skcipher.base.cra_name); 497 ce_algs[i].ce = NULL; 498 break; 499 } 500 id = ce_algs[i].ce_blockmode; 501 ce_method = ce->variant->op_mode[id]; 502 if (ce_method == CE_ID_NOTSUPP) { 503 dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n", 504 ce_algs[i].alg.skcipher.base.cra_name); 505 ce_algs[i].ce = NULL; 506 break; 507 } 508 dev_info(ce->dev, "Register %s\n", 509 ce_algs[i].alg.skcipher.base.cra_name); 510 err = crypto_register_skcipher(&ce_algs[i].alg.skcipher); 511 if (err) { 512 dev_err(ce->dev, "ERROR: Fail to register %s\n", 513 ce_algs[i].alg.skcipher.base.cra_name); 514 ce_algs[i].ce = NULL; 515 return err; 516 } 517 break; 518 default: 519 ce_algs[i].ce = NULL; 520 dev_err(ce->dev, "ERROR: tried to register an unknown algo\n"); 521 } 522 } 523 return 0; 524 } 525 526 static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce) 527 { 528 int i; 529 530 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { 531 if (!ce_algs[i].ce) 532 continue; 533 switch (ce_algs[i].type) { 534 case CRYPTO_ALG_TYPE_SKCIPHER: 535 dev_info(ce->dev, "Unregister %d %s\n", i, 536 ce_algs[i].alg.skcipher.base.cra_name); 537 crypto_unregister_skcipher(&ce_algs[i].alg.skcipher); 538 break; 539 } 540 } 541 } 542 543 static int sun8i_ce_probe(struct platform_device *pdev) 544 { 545 struct sun8i_ce_dev *ce; 546 int err, irq; 547 u32 v; 548 549 ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL); 550 if (!ce) 551 return -ENOMEM; 552 553 ce->dev = &pdev->dev; 554 platform_set_drvdata(pdev, ce); 555 556 ce->variant = of_device_get_match_data(&pdev->dev); 557 if (!ce->variant) { 558 dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); 559 return -EINVAL; 560 } 561 562 ce->base = devm_platform_ioremap_resource(pdev, 0); 563 if (IS_ERR(ce->base)) 564 return PTR_ERR(ce->base); 565 566 err = sun8i_ce_get_clks(ce); 567 if (err) 568 return err; 569 570 /* Get Non Secure IRQ */ 571 irq = platform_get_irq(pdev, 0); 572 if (irq < 0) 573 return irq; 574 575 ce->reset = devm_reset_control_get(&pdev->dev, NULL); 576 if (IS_ERR(ce->reset)) { 577 if (PTR_ERR(ce->reset) == -EPROBE_DEFER) 578 return PTR_ERR(ce->reset); 579 dev_err(&pdev->dev, "No reset control found\n"); 580 return PTR_ERR(ce->reset); 581 } 582 583 mutex_init(&ce->mlock); 584 585 err = sun8i_ce_allocate_chanlist(ce); 586 if (err) 587 return err; 588 589 err = sun8i_ce_pm_init(ce); 590 if (err) 591 goto error_pm; 592 593 err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, 594 "sun8i-ce-ns", ce); 595 if (err) { 596 dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err); 597 goto error_irq; 598 } 599 600 err = sun8i_ce_register_algs(ce); 601 if (err) 602 goto error_alg; 603 604 err = pm_runtime_get_sync(ce->dev); 605 if (err < 0) 606 goto error_alg; 607 608 v = readl(ce->base + CE_CTR); 609 v >>= CE_DIE_ID_SHIFT; 610 v &= CE_DIE_ID_MASK; 611 dev_info(&pdev->dev, "CryptoEngine Die ID %x\n", v); 612 613 pm_runtime_put_sync(ce->dev); 614 615 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG 616 /* Ignore error of debugfs */ 617 ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL); 618 ce->dbgfs_stats = debugfs_create_file("stats", 0444, 619 ce->dbgfs_dir, ce, 620 &sun8i_ce_debugfs_fops); 621 #endif 622 623 return 0; 624 error_alg: 625 sun8i_ce_unregister_algs(ce); 626 error_irq: 627 sun8i_ce_pm_exit(ce); 628 error_pm: 629 sun8i_ce_free_chanlist(ce, MAXFLOW - 1); 630 return err; 631 } 632 633 static int sun8i_ce_remove(struct platform_device *pdev) 634 { 635 struct sun8i_ce_dev *ce = platform_get_drvdata(pdev); 636 637 sun8i_ce_unregister_algs(ce); 638 639 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG 640 debugfs_remove_recursive(ce->dbgfs_dir); 641 #endif 642 643 sun8i_ce_free_chanlist(ce, MAXFLOW - 1); 644 645 sun8i_ce_pm_exit(ce); 646 return 0; 647 } 648 649 static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { 650 { .compatible = "allwinner,sun8i-h3-crypto", 651 .data = &ce_h3_variant }, 652 { .compatible = "allwinner,sun8i-r40-crypto", 653 .data = &ce_r40_variant }, 654 { .compatible = "allwinner,sun50i-a64-crypto", 655 .data = &ce_a64_variant }, 656 { .compatible = "allwinner,sun50i-h5-crypto", 657 .data = &ce_h5_variant }, 658 { .compatible = "allwinner,sun50i-h6-crypto", 659 .data = &ce_h6_variant }, 660 {} 661 }; 662 MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table); 663 664 static struct platform_driver sun8i_ce_driver = { 665 .probe = sun8i_ce_probe, 666 .remove = sun8i_ce_remove, 667 .driver = { 668 .name = "sun8i-ce", 669 .pm = &sun8i_ce_pm_ops, 670 .of_match_table = sun8i_ce_crypto_of_match_table, 671 }, 672 }; 673 674 module_platform_driver(sun8i_ce_driver); 675 676 MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic offloader"); 677 MODULE_LICENSE("GPL"); 678 MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>"); 679