1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sun8i-ss-core.c - hardware cryptographic offloader for 4 * Allwinner A80/A83T SoC 5 * 6 * Copyright (C) 2015-2019 Corentin Labbe <clabbe.montjoie@gmail.com> 7 * 8 * Core file which registers crypto algorithms supported by the SecuritySystem 9 * 10 * You could find a link for the datasheet in Documentation/arm/sunxi.rst 11 */ 12 #include <linux/clk.h> 13 #include <linux/crypto.h> 14 #include <linux/delay.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/of_device.h> 22 #include <linux/platform_device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/reset.h> 25 #include <crypto/internal/skcipher.h> 26 27 #include "sun8i-ss.h" 28 29 static const struct ss_variant ss_a80_variant = { 30 .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, 31 }, 32 .op_mode = { SS_OP_ECB, SS_OP_CBC, 33 }, 34 .ss_clks = { 35 { "bus", 0, 300 * 1000 * 1000 }, 36 { "mod", 0, 300 * 1000 * 1000 }, 37 } 38 }; 39 40 static const struct ss_variant ss_a83t_variant = { 41 .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, 42 }, 43 .op_mode = { SS_OP_ECB, SS_OP_CBC, 44 }, 45 .ss_clks = { 46 { "bus", 0, 300 * 1000 * 1000 }, 47 { "mod", 0, 300 * 1000 * 1000 }, 48 } 49 }; 50 51 /* 52 * sun8i_ss_get_engine_number() get the next channel slot 53 * This is a simple round-robin way of getting the next channel 54 */ 55 int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss) 56 { 57 return atomic_inc_return(&ss->flow) % MAXFLOW; 58 } 59 60 int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, 61 const char *name) 62 { 63 int flow = rctx->flow; 64 u32 v = 1; 65 int i; 66 67 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 68 ss->flows[flow].stat_req++; 69 #endif 70 71 /* choose between stream0/stream1 */ 72 if (flow) 73 v |= SS_FLOW1; 74 else 75 v |= SS_FLOW0; 76 77 v |= rctx->op_mode; 78 v |= rctx->method; 79 80 if (rctx->op_dir) 81 v |= SS_DECRYPTION; 82 83 switch (rctx->keylen) { 84 case 128 / 8: 85 v |= SS_AES_128BITS << 7; 86 break; 87 case 192 / 8: 88 v |= SS_AES_192BITS << 7; 89 break; 90 case 256 / 8: 91 v |= SS_AES_256BITS << 7; 92 break; 93 } 94 95 for (i = 0; i < MAX_SG; i++) { 96 if (!rctx->t_dst[i].addr) 97 break; 98 99 mutex_lock(&ss->mlock); 100 writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); 101 102 if (i == 0) { 103 if (rctx->p_iv) 104 writel(rctx->p_iv, ss->base + SS_IV_ADR_REG); 105 } else { 106 if (rctx->biv) { 107 if (rctx->op_dir == SS_ENCRYPTION) 108 writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); 109 else 110 writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); 111 } 112 } 113 114 dev_dbg(ss->dev, 115 "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n", 116 i, flow, name, v, 117 rctx->t_src[i].len, rctx->t_dst[i].len, 118 rctx->method, rctx->op_mode, 119 rctx->op_dir, rctx->t_src[i].len); 120 121 writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); 122 writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); 123 writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); 124 125 reinit_completion(&ss->flows[flow].complete); 126 ss->flows[flow].status = 0; 127 wmb(); 128 129 writel(v, ss->base + SS_CTL_REG); 130 mutex_unlock(&ss->mlock); 131 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, 132 msecs_to_jiffies(2000)); 133 if (ss->flows[flow].status == 0) { 134 dev_err(ss->dev, "DMA timeout for %s\n", name); 135 return -EFAULT; 136 } 137 } 138 139 return 0; 140 } 141 142 static irqreturn_t ss_irq_handler(int irq, void *data) 143 { 144 struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data; 145 int flow = 0; 146 u32 p; 147 148 p = readl(ss->base + SS_INT_STA_REG); 149 for (flow = 0; flow < MAXFLOW; flow++) { 150 if (p & (BIT(flow))) { 151 writel(BIT(flow), ss->base + SS_INT_STA_REG); 152 ss->flows[flow].status = 1; 153 complete(&ss->flows[flow].complete); 154 } 155 } 156 157 return IRQ_HANDLED; 158 } 159 160 static struct sun8i_ss_alg_template ss_algs[] = { 161 { 162 .type = CRYPTO_ALG_TYPE_SKCIPHER, 163 .ss_algo_id = SS_ID_CIPHER_AES, 164 .ss_blockmode = SS_ID_OP_CBC, 165 .alg.skcipher = { 166 .base = { 167 .cra_name = "cbc(aes)", 168 .cra_driver_name = "cbc-aes-sun8i-ss", 169 .cra_priority = 400, 170 .cra_blocksize = AES_BLOCK_SIZE, 171 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 172 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 173 CRYPTO_ALG_NEED_FALLBACK, 174 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 175 .cra_module = THIS_MODULE, 176 .cra_alignmask = 0xf, 177 .cra_init = sun8i_ss_cipher_init, 178 .cra_exit = sun8i_ss_cipher_exit, 179 }, 180 .min_keysize = AES_MIN_KEY_SIZE, 181 .max_keysize = AES_MAX_KEY_SIZE, 182 .ivsize = AES_BLOCK_SIZE, 183 .setkey = sun8i_ss_aes_setkey, 184 .encrypt = sun8i_ss_skencrypt, 185 .decrypt = sun8i_ss_skdecrypt, 186 } 187 }, 188 { 189 .type = CRYPTO_ALG_TYPE_SKCIPHER, 190 .ss_algo_id = SS_ID_CIPHER_AES, 191 .ss_blockmode = SS_ID_OP_ECB, 192 .alg.skcipher = { 193 .base = { 194 .cra_name = "ecb(aes)", 195 .cra_driver_name = "ecb-aes-sun8i-ss", 196 .cra_priority = 400, 197 .cra_blocksize = AES_BLOCK_SIZE, 198 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 199 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 200 CRYPTO_ALG_NEED_FALLBACK, 201 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 202 .cra_module = THIS_MODULE, 203 .cra_alignmask = 0xf, 204 .cra_init = sun8i_ss_cipher_init, 205 .cra_exit = sun8i_ss_cipher_exit, 206 }, 207 .min_keysize = AES_MIN_KEY_SIZE, 208 .max_keysize = AES_MAX_KEY_SIZE, 209 .setkey = sun8i_ss_aes_setkey, 210 .encrypt = sun8i_ss_skencrypt, 211 .decrypt = sun8i_ss_skdecrypt, 212 } 213 }, 214 { 215 .type = CRYPTO_ALG_TYPE_SKCIPHER, 216 .ss_algo_id = SS_ID_CIPHER_DES3, 217 .ss_blockmode = SS_ID_OP_CBC, 218 .alg.skcipher = { 219 .base = { 220 .cra_name = "cbc(des3_ede)", 221 .cra_driver_name = "cbc-des3-sun8i-ss", 222 .cra_priority = 400, 223 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 224 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 225 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 226 CRYPTO_ALG_NEED_FALLBACK, 227 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 228 .cra_module = THIS_MODULE, 229 .cra_alignmask = 0xf, 230 .cra_init = sun8i_ss_cipher_init, 231 .cra_exit = sun8i_ss_cipher_exit, 232 }, 233 .min_keysize = DES3_EDE_KEY_SIZE, 234 .max_keysize = DES3_EDE_KEY_SIZE, 235 .ivsize = DES3_EDE_BLOCK_SIZE, 236 .setkey = sun8i_ss_des3_setkey, 237 .encrypt = sun8i_ss_skencrypt, 238 .decrypt = sun8i_ss_skdecrypt, 239 } 240 }, 241 { 242 .type = CRYPTO_ALG_TYPE_SKCIPHER, 243 .ss_algo_id = SS_ID_CIPHER_DES3, 244 .ss_blockmode = SS_ID_OP_ECB, 245 .alg.skcipher = { 246 .base = { 247 .cra_name = "ecb(des3_ede)", 248 .cra_driver_name = "ecb-des3-sun8i-ss", 249 .cra_priority = 400, 250 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 251 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | 252 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | 253 CRYPTO_ALG_NEED_FALLBACK, 254 .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), 255 .cra_module = THIS_MODULE, 256 .cra_alignmask = 0xf, 257 .cra_init = sun8i_ss_cipher_init, 258 .cra_exit = sun8i_ss_cipher_exit, 259 }, 260 .min_keysize = DES3_EDE_KEY_SIZE, 261 .max_keysize = DES3_EDE_KEY_SIZE, 262 .setkey = sun8i_ss_des3_setkey, 263 .encrypt = sun8i_ss_skencrypt, 264 .decrypt = sun8i_ss_skdecrypt, 265 } 266 }, 267 }; 268 269 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 270 static int sun8i_ss_dbgfs_read(struct seq_file *seq, void *v) 271 { 272 struct sun8i_ss_dev *ss = seq->private; 273 int i; 274 275 for (i = 0; i < MAXFLOW; i++) 276 seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req); 277 278 for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { 279 if (!ss_algs[i].ss) 280 continue; 281 switch (ss_algs[i].type) { 282 case CRYPTO_ALG_TYPE_SKCIPHER: 283 seq_printf(seq, "%s %s %lu %lu\n", 284 ss_algs[i].alg.skcipher.base.cra_driver_name, 285 ss_algs[i].alg.skcipher.base.cra_name, 286 ss_algs[i].stat_req, ss_algs[i].stat_fb); 287 break; 288 } 289 } 290 return 0; 291 } 292 293 static int sun8i_ss_dbgfs_open(struct inode *inode, struct file *file) 294 { 295 return single_open(file, sun8i_ss_dbgfs_read, inode->i_private); 296 } 297 298 static const struct file_operations sun8i_ss_debugfs_fops = { 299 .owner = THIS_MODULE, 300 .open = sun8i_ss_dbgfs_open, 301 .read = seq_read, 302 .llseek = seq_lseek, 303 .release = single_release, 304 }; 305 #endif 306 307 static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) 308 { 309 while (i >= 0) { 310 crypto_engine_exit(ss->flows[i].engine); 311 i--; 312 } 313 } 314 315 /* 316 * Allocate the flow list structure 317 */ 318 static int allocate_flows(struct sun8i_ss_dev *ss) 319 { 320 int i, err; 321 322 ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), 323 GFP_KERNEL); 324 if (!ss->flows) 325 return -ENOMEM; 326 327 for (i = 0; i < MAXFLOW; i++) { 328 init_completion(&ss->flows[i].complete); 329 330 ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); 331 if (!ss->flows[i].engine) { 332 dev_err(ss->dev, "Cannot allocate engine\n"); 333 i--; 334 err = -ENOMEM; 335 goto error_engine; 336 } 337 err = crypto_engine_start(ss->flows[i].engine); 338 if (err) { 339 dev_err(ss->dev, "Cannot start engine\n"); 340 goto error_engine; 341 } 342 } 343 return 0; 344 error_engine: 345 sun8i_ss_free_flows(ss, i); 346 return err; 347 } 348 349 /* 350 * Power management strategy: The device is suspended unless a TFM exists for 351 * one of the algorithms proposed by this driver. 352 */ 353 static int sun8i_ss_pm_suspend(struct device *dev) 354 { 355 struct sun8i_ss_dev *ss = dev_get_drvdata(dev); 356 int i; 357 358 reset_control_assert(ss->reset); 359 for (i = 0; i < SS_MAX_CLOCKS; i++) 360 clk_disable_unprepare(ss->ssclks[i]); 361 return 0; 362 } 363 364 static int sun8i_ss_pm_resume(struct device *dev) 365 { 366 struct sun8i_ss_dev *ss = dev_get_drvdata(dev); 367 int err, i; 368 369 for (i = 0; i < SS_MAX_CLOCKS; i++) { 370 if (!ss->variant->ss_clks[i].name) 371 continue; 372 err = clk_prepare_enable(ss->ssclks[i]); 373 if (err) { 374 dev_err(ss->dev, "Cannot prepare_enable %s\n", 375 ss->variant->ss_clks[i].name); 376 goto error; 377 } 378 } 379 err = reset_control_deassert(ss->reset); 380 if (err) { 381 dev_err(ss->dev, "Cannot deassert reset control\n"); 382 goto error; 383 } 384 /* enable interrupts for all flows */ 385 writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); 386 387 return 0; 388 error: 389 sun8i_ss_pm_suspend(dev); 390 return err; 391 } 392 393 static const struct dev_pm_ops sun8i_ss_pm_ops = { 394 SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL) 395 }; 396 397 static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss) 398 { 399 int err; 400 401 pm_runtime_use_autosuspend(ss->dev); 402 pm_runtime_set_autosuspend_delay(ss->dev, 2000); 403 404 err = pm_runtime_set_suspended(ss->dev); 405 if (err) 406 return err; 407 pm_runtime_enable(ss->dev); 408 return err; 409 } 410 411 static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss) 412 { 413 pm_runtime_disable(ss->dev); 414 } 415 416 static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) 417 { 418 int ss_method, err, id, i; 419 420 for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { 421 ss_algs[i].ss = ss; 422 switch (ss_algs[i].type) { 423 case CRYPTO_ALG_TYPE_SKCIPHER: 424 id = ss_algs[i].ss_algo_id; 425 ss_method = ss->variant->alg_cipher[id]; 426 if (ss_method == SS_ID_NOTSUPP) { 427 dev_info(ss->dev, 428 "DEBUG: Algo of %s not supported\n", 429 ss_algs[i].alg.skcipher.base.cra_name); 430 ss_algs[i].ss = NULL; 431 break; 432 } 433 id = ss_algs[i].ss_blockmode; 434 ss_method = ss->variant->op_mode[id]; 435 if (ss_method == SS_ID_NOTSUPP) { 436 dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n", 437 ss_algs[i].alg.skcipher.base.cra_name); 438 ss_algs[i].ss = NULL; 439 break; 440 } 441 dev_info(ss->dev, "DEBUG: Register %s\n", 442 ss_algs[i].alg.skcipher.base.cra_name); 443 err = crypto_register_skcipher(&ss_algs[i].alg.skcipher); 444 if (err) { 445 dev_err(ss->dev, "Fail to register %s\n", 446 ss_algs[i].alg.skcipher.base.cra_name); 447 ss_algs[i].ss = NULL; 448 return err; 449 } 450 break; 451 default: 452 ss_algs[i].ss = NULL; 453 dev_err(ss->dev, "ERROR: tried to register an unknown algo\n"); 454 } 455 } 456 return 0; 457 } 458 459 static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss) 460 { 461 int i; 462 463 for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { 464 if (!ss_algs[i].ss) 465 continue; 466 switch (ss_algs[i].type) { 467 case CRYPTO_ALG_TYPE_SKCIPHER: 468 dev_info(ss->dev, "Unregister %d %s\n", i, 469 ss_algs[i].alg.skcipher.base.cra_name); 470 crypto_unregister_skcipher(&ss_algs[i].alg.skcipher); 471 break; 472 } 473 } 474 } 475 476 static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss) 477 { 478 unsigned long cr; 479 int err, i; 480 481 for (i = 0; i < SS_MAX_CLOCKS; i++) { 482 if (!ss->variant->ss_clks[i].name) 483 continue; 484 ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name); 485 if (IS_ERR(ss->ssclks[i])) { 486 err = PTR_ERR(ss->ssclks[i]); 487 dev_err(ss->dev, "Cannot get %s SS clock err=%d\n", 488 ss->variant->ss_clks[i].name, err); 489 return err; 490 } 491 cr = clk_get_rate(ss->ssclks[i]); 492 if (!cr) 493 return -EINVAL; 494 if (ss->variant->ss_clks[i].freq > 0 && 495 cr != ss->variant->ss_clks[i].freq) { 496 dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", 497 ss->variant->ss_clks[i].name, 498 ss->variant->ss_clks[i].freq, 499 ss->variant->ss_clks[i].freq / 1000000, 500 cr, cr / 1000000); 501 err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq); 502 if (err) 503 dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n", 504 ss->variant->ss_clks[i].name, 505 ss->variant->ss_clks[i].freq); 506 } 507 if (ss->variant->ss_clks[i].max_freq > 0 && 508 cr > ss->variant->ss_clks[i].max_freq) 509 dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)", 510 ss->variant->ss_clks[i].name, cr, 511 ss->variant->ss_clks[i].max_freq); 512 } 513 return 0; 514 } 515 516 static int sun8i_ss_probe(struct platform_device *pdev) 517 { 518 struct sun8i_ss_dev *ss; 519 int err, irq; 520 u32 v; 521 522 ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); 523 if (!ss) 524 return -ENOMEM; 525 526 ss->dev = &pdev->dev; 527 platform_set_drvdata(pdev, ss); 528 529 ss->variant = of_device_get_match_data(&pdev->dev); 530 if (!ss->variant) { 531 dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); 532 return -EINVAL; 533 } 534 535 ss->base = devm_platform_ioremap_resource(pdev, 0); 536 if (IS_ERR(ss->base)) 537 return PTR_ERR(ss->base); 538 539 err = sun8i_ss_get_clks(ss); 540 if (err) 541 return err; 542 543 irq = platform_get_irq(pdev, 0); 544 if (irq < 0) 545 return irq; 546 547 ss->reset = devm_reset_control_get(&pdev->dev, NULL); 548 if (IS_ERR(ss->reset)) { 549 if (PTR_ERR(ss->reset) == -EPROBE_DEFER) 550 return PTR_ERR(ss->reset); 551 dev_err(&pdev->dev, "No reset control found\n"); 552 return PTR_ERR(ss->reset); 553 } 554 555 mutex_init(&ss->mlock); 556 557 err = allocate_flows(ss); 558 if (err) 559 return err; 560 561 err = sun8i_ss_pm_init(ss); 562 if (err) 563 goto error_pm; 564 565 err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss); 566 if (err) { 567 dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err); 568 goto error_irq; 569 } 570 571 err = sun8i_ss_register_algs(ss); 572 if (err) 573 goto error_alg; 574 575 err = pm_runtime_get_sync(ss->dev); 576 if (err < 0) 577 goto error_alg; 578 579 v = readl(ss->base + SS_CTL_REG); 580 v >>= SS_DIE_ID_SHIFT; 581 v &= SS_DIE_ID_MASK; 582 dev_info(&pdev->dev, "Security System Die ID %x\n", v); 583 584 pm_runtime_put_sync(ss->dev); 585 586 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 587 /* Ignore error of debugfs */ 588 ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL); 589 ss->dbgfs_stats = debugfs_create_file("stats", 0444, 590 ss->dbgfs_dir, ss, 591 &sun8i_ss_debugfs_fops); 592 #endif 593 594 return 0; 595 error_alg: 596 sun8i_ss_unregister_algs(ss); 597 error_irq: 598 sun8i_ss_pm_exit(ss); 599 error_pm: 600 sun8i_ss_free_flows(ss, MAXFLOW - 1); 601 return err; 602 } 603 604 static int sun8i_ss_remove(struct platform_device *pdev) 605 { 606 struct sun8i_ss_dev *ss = platform_get_drvdata(pdev); 607 608 sun8i_ss_unregister_algs(ss); 609 610 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG 611 debugfs_remove_recursive(ss->dbgfs_dir); 612 #endif 613 614 sun8i_ss_free_flows(ss, MAXFLOW - 1); 615 616 sun8i_ss_pm_exit(ss); 617 618 return 0; 619 } 620 621 static const struct of_device_id sun8i_ss_crypto_of_match_table[] = { 622 { .compatible = "allwinner,sun8i-a83t-crypto", 623 .data = &ss_a83t_variant }, 624 { .compatible = "allwinner,sun9i-a80-crypto", 625 .data = &ss_a80_variant }, 626 {} 627 }; 628 MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table); 629 630 static struct platform_driver sun8i_ss_driver = { 631 .probe = sun8i_ss_probe, 632 .remove = sun8i_ss_remove, 633 .driver = { 634 .name = "sun8i-ss", 635 .pm = &sun8i_ss_pm_ops, 636 .of_match_table = sun8i_ss_crypto_of_match_table, 637 }, 638 }; 639 640 module_platform_driver(sun8i_ss_driver); 641 642 MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader"); 643 MODULE_LICENSE("GPL"); 644 MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>"); 645