Lines Matching +full:inline +full:- +full:crypto +full:- +full:engine

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
5 * driver supports the TDMA engine on platforms on which it is available.
7 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
15 #include <linux/dma-mapping.h>
32 /* Limit of the crypto queue before reaching the backlog */
38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, in mv_cesa_dequeue_req_locked() argument
43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked()
44 req = crypto_dequeue_request(&engine->queue); in mv_cesa_dequeue_req_locked()
52 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) in mv_cesa_rearm_engine() argument
58 spin_lock_bh(&engine->lock); in mv_cesa_rearm_engine()
59 if (!engine->req) { in mv_cesa_rearm_engine()
60 req = mv_cesa_dequeue_req_locked(engine, &backlog); in mv_cesa_rearm_engine()
61 engine->req = req; in mv_cesa_rearm_engine()
63 spin_unlock_bh(&engine->lock); in mv_cesa_rearm_engine()
69 crypto_request_complete(backlog, -EINPROGRESS); in mv_cesa_rearm_engine()
71 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_rearm_engine()
72 ctx->ops->step(req); in mv_cesa_rearm_engine()
75 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) in mv_cesa_std_process() argument
81 req = engine->req; in mv_cesa_std_process()
82 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_std_process()
83 res = ctx->ops->process(req, status); in mv_cesa_std_process()
86 ctx->ops->complete(req); in mv_cesa_std_process()
87 mv_cesa_engine_enqueue_complete_request(engine, req); in mv_cesa_std_process()
88 } else if (res == -EINPROGRESS) { in mv_cesa_std_process()
89 ctx->ops->step(req); in mv_cesa_std_process()
95 static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status) in mv_cesa_int_process() argument
97 if (engine->chain.first && engine->chain.last) in mv_cesa_int_process()
98 return mv_cesa_tdma_process(engine, status); in mv_cesa_int_process()
100 return mv_cesa_std_process(engine, status); in mv_cesa_int_process()
103 static inline void
107 ctx->ops->cleanup(req); in mv_cesa_complete_req()
115 struct mv_cesa_engine *engine = priv; in mv_cesa_int() local
124 mask = mv_cesa_get_int_mask(engine); in mv_cesa_int()
125 status = readl(engine->regs + CESA_SA_INT_STATUS); in mv_cesa_int()
134 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); in mv_cesa_int()
135 writel(~status, engine->regs + CESA_SA_INT_STATUS); in mv_cesa_int()
138 res = mv_cesa_int_process(engine, status & mask); in mv_cesa_int()
141 spin_lock_bh(&engine->lock); in mv_cesa_int()
142 req = engine->req; in mv_cesa_int()
143 if (res != -EINPROGRESS) in mv_cesa_int()
144 engine->req = NULL; in mv_cesa_int()
145 spin_unlock_bh(&engine->lock); in mv_cesa_int()
147 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_int()
149 if (res && res != -EINPROGRESS) in mv_cesa_int()
153 mv_cesa_rearm_engine(engine); in mv_cesa_int()
157 req = mv_cesa_engine_dequeue_complete_request(engine); in mv_cesa_int()
161 ctx = crypto_tfm_ctx(req->tfm); in mv_cesa_int()
173 struct mv_cesa_engine *engine = creq->engine; in mv_cesa_queue_req() local
175 spin_lock_bh(&engine->lock); in mv_cesa_queue_req()
176 ret = crypto_enqueue_request(&engine->queue, req); in mv_cesa_queue_req()
178 (ret == -EINPROGRESS || ret == -EBUSY)) in mv_cesa_queue_req()
179 mv_cesa_tdma_chain(engine, creq); in mv_cesa_queue_req()
180 spin_unlock_bh(&engine->lock); in mv_cesa_queue_req()
182 if (ret != -EINPROGRESS) in mv_cesa_queue_req()
185 mv_cesa_rearm_engine(engine); in mv_cesa_queue_req()
187 return -EINPROGRESS; in mv_cesa_queue_req()
195 for (i = 0; i < cesa->caps->ncipher_algs; i++) { in mv_cesa_add_algs()
196 ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]); in mv_cesa_add_algs()
201 for (i = 0; i < cesa->caps->nahash_algs; i++) { in mv_cesa_add_algs()
202 ret = crypto_register_ahash(cesa->caps->ahash_algs[i]); in mv_cesa_add_algs()
211 crypto_unregister_ahash(cesa->caps->ahash_algs[j]); in mv_cesa_add_algs()
212 i = cesa->caps->ncipher_algs; in mv_cesa_add_algs()
216 crypto_unregister_skcipher(cesa->caps->cipher_algs[j]); in mv_cesa_add_algs()
225 for (i = 0; i < cesa->caps->nahash_algs; i++) in mv_cesa_remove_algs()
226 crypto_unregister_ahash(cesa->caps->ahash_algs[i]); in mv_cesa_remove_algs()
228 for (i = 0; i < cesa->caps->ncipher_algs; i++) in mv_cesa_remove_algs()
229 crypto_unregister_skcipher(cesa->caps->cipher_algs[i]); in mv_cesa_remove_algs()
303 { .compatible = "marvell,orion-crypto", .data = &orion_caps },
304 { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
305 { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
306 { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
307 { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
308 { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
309 { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
315 mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine, in mv_cesa_conf_mbus_windows() argument
318 void __iomem *iobase = engine->regs; in mv_cesa_conf_mbus_windows()
326 for (i = 0; i < dram->num_cs; i++) { in mv_cesa_conf_mbus_windows()
327 const struct mbus_dram_window *cs = dram->cs + i; in mv_cesa_conf_mbus_windows()
329 writel(((cs->size - 1) & 0xffff0000) | in mv_cesa_conf_mbus_windows()
330 (cs->mbus_attr << 8) | in mv_cesa_conf_mbus_windows()
331 (dram->mbus_dram_target_id << 4) | 1, in mv_cesa_conf_mbus_windows()
333 writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i)); in mv_cesa_conf_mbus_windows()
339 struct device *dev = cesa->dev; in mv_cesa_dev_dma_init()
342 if (!cesa->caps->has_tdma) in mv_cesa_dev_dma_init()
347 return -ENOMEM; in mv_cesa_dev_dma_init()
349 dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev, in mv_cesa_dev_dma_init()
352 if (!dma->tdma_desc_pool) in mv_cesa_dev_dma_init()
353 return -ENOMEM; in mv_cesa_dev_dma_init()
355 dma->op_pool = dmam_pool_create("cesa_op", dev, in mv_cesa_dev_dma_init()
357 if (!dma->op_pool) in mv_cesa_dev_dma_init()
358 return -ENOMEM; in mv_cesa_dev_dma_init()
360 dma->cache_pool = dmam_pool_create("cesa_cache", dev, in mv_cesa_dev_dma_init()
362 if (!dma->cache_pool) in mv_cesa_dev_dma_init()
363 return -ENOMEM; in mv_cesa_dev_dma_init()
365 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); in mv_cesa_dev_dma_init()
366 if (!dma->padding_pool) in mv_cesa_dev_dma_init()
367 return -ENOMEM; in mv_cesa_dev_dma_init()
369 cesa->dma = dma; in mv_cesa_dev_dma_init()
377 struct mv_cesa_engine *engine = &cesa->engines[idx]; in mv_cesa_get_sram() local
381 engine->pool = of_gen_pool_get(cesa->dev->of_node, in mv_cesa_get_sram()
382 "marvell,crypto-srams", idx); in mv_cesa_get_sram()
383 if (engine->pool) { in mv_cesa_get_sram()
384 engine->sram_pool = gen_pool_dma_alloc(engine->pool, in mv_cesa_get_sram()
385 cesa->sram_size, in mv_cesa_get_sram()
386 &engine->sram_dma); in mv_cesa_get_sram()
387 if (engine->sram_pool) in mv_cesa_get_sram()
390 engine->pool = NULL; in mv_cesa_get_sram()
391 return -ENOMEM; in mv_cesa_get_sram()
394 if (cesa->caps->nengines > 1) { in mv_cesa_get_sram()
403 if (!res || resource_size(res) < cesa->sram_size) in mv_cesa_get_sram()
404 return -EINVAL; in mv_cesa_get_sram()
406 engine->sram = devm_ioremap_resource(cesa->dev, res); in mv_cesa_get_sram()
407 if (IS_ERR(engine->sram)) in mv_cesa_get_sram()
408 return PTR_ERR(engine->sram); in mv_cesa_get_sram()
410 engine->sram_dma = dma_map_resource(cesa->dev, res->start, in mv_cesa_get_sram()
411 cesa->sram_size, in mv_cesa_get_sram()
413 if (dma_mapping_error(cesa->dev, engine->sram_dma)) in mv_cesa_get_sram()
414 return -ENOMEM; in mv_cesa_get_sram()
422 struct mv_cesa_engine *engine = &cesa->engines[idx]; in mv_cesa_put_sram() local
424 if (engine->pool) in mv_cesa_put_sram()
425 gen_pool_free(engine->pool, (unsigned long)engine->sram_pool, in mv_cesa_put_sram()
426 cesa->sram_size); in mv_cesa_put_sram()
428 dma_unmap_resource(cesa->dev, engine->sram_dma, in mv_cesa_put_sram()
429 cesa->sram_size, DMA_BIDIRECTIONAL, 0); in mv_cesa_put_sram()
437 struct device *dev = &pdev->dev; in mv_cesa_probe()
444 dev_err(&pdev->dev, "Only one CESA device authorized\n"); in mv_cesa_probe()
445 return -EEXIST; in mv_cesa_probe()
448 if (dev->of_node) { in mv_cesa_probe()
449 match = of_match_node(mv_cesa_of_match_table, dev->of_node); in mv_cesa_probe()
450 if (!match || !match->data) in mv_cesa_probe()
451 return -ENOTSUPP; in mv_cesa_probe()
453 caps = match->data; in mv_cesa_probe()
458 return -ENOMEM; in mv_cesa_probe()
460 cesa->caps = caps; in mv_cesa_probe()
461 cesa->dev = dev; in mv_cesa_probe()
464 of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size", in mv_cesa_probe()
469 cesa->sram_size = sram_size; in mv_cesa_probe()
470 cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines), in mv_cesa_probe()
472 if (!cesa->engines) in mv_cesa_probe()
473 return -ENOMEM; in mv_cesa_probe()
475 spin_lock_init(&cesa->lock); in mv_cesa_probe()
477 cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs"); in mv_cesa_probe()
478 if (IS_ERR(cesa->regs)) in mv_cesa_probe()
479 return PTR_ERR(cesa->regs); in mv_cesa_probe()
489 for (i = 0; i < caps->nengines; i++) { in mv_cesa_probe()
490 struct mv_cesa_engine *engine = &cesa->engines[i]; in mv_cesa_probe() local
493 engine->id = i; in mv_cesa_probe()
494 spin_lock_init(&engine->lock); in mv_cesa_probe()
506 engine->irq = irq; in mv_cesa_probe()
513 engine->clk = devm_clk_get(dev, res_name); in mv_cesa_probe()
514 if (IS_ERR(engine->clk)) { in mv_cesa_probe()
515 engine->clk = devm_clk_get(dev, NULL); in mv_cesa_probe()
516 if (IS_ERR(engine->clk)) in mv_cesa_probe()
517 engine->clk = NULL; in mv_cesa_probe()
521 engine->zclk = devm_clk_get(dev, res_name); in mv_cesa_probe()
522 if (IS_ERR(engine->zclk)) in mv_cesa_probe()
523 engine->zclk = NULL; in mv_cesa_probe()
525 ret = clk_prepare_enable(engine->clk); in mv_cesa_probe()
529 ret = clk_prepare_enable(engine->zclk); in mv_cesa_probe()
533 engine->regs = cesa->regs + CESA_ENGINE_OFF(i); in mv_cesa_probe()
535 if (dram && cesa->caps->has_tdma) in mv_cesa_probe()
536 mv_cesa_conf_mbus_windows(engine, dram); in mv_cesa_probe()
538 writel(0, engine->regs + CESA_SA_INT_STATUS); in mv_cesa_probe()
540 engine->regs + CESA_SA_CFG); in mv_cesa_probe()
541 writel(engine->sram_dma & CESA_SA_SRAM_MSK, in mv_cesa_probe()
542 engine->regs + CESA_SA_DESC_P0); in mv_cesa_probe()
546 dev_name(&pdev->dev), in mv_cesa_probe()
547 engine); in mv_cesa_probe()
552 cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE); in mv_cesa_probe()
555 crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN); in mv_cesa_probe()
556 atomic_set(&engine->load, 0); in mv_cesa_probe()
557 INIT_LIST_HEAD(&engine->complete_queue); in mv_cesa_probe()
573 for (i = 0; i < caps->nengines; i++) { in mv_cesa_probe()
574 clk_disable_unprepare(cesa->engines[i].zclk); in mv_cesa_probe()
575 clk_disable_unprepare(cesa->engines[i].clk); in mv_cesa_probe()
577 if (cesa->engines[i].irq > 0) in mv_cesa_probe()
578 irq_set_affinity_hint(cesa->engines[i].irq, NULL); in mv_cesa_probe()
591 for (i = 0; i < cesa->caps->nengines; i++) { in mv_cesa_remove()
592 clk_disable_unprepare(cesa->engines[i].zclk); in mv_cesa_remove()
593 clk_disable_unprepare(cesa->engines[i].clk); in mv_cesa_remove()
595 irq_set_affinity_hint(cesa->engines[i].irq, NULL); in mv_cesa_remove()
612 .name = "marvell-cesa",
618 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
620 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");