xref: /openbmc/linux/drivers/crypto/qce/core.c (revision ab589bac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/mod_devicetable.h>
11 #include <linux/platform_device.h>
12 #include <linux/spinlock.h>
13 #include <linux/types.h>
14 #include <crypto/algapi.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha.h>
17 
18 #include "core.h"
19 #include "cipher.h"
20 #include "sha.h"
21 
22 #define QCE_MAJOR_VERSION5	0x05
23 #define QCE_QUEUE_LENGTH	1
24 
25 static const struct qce_algo_ops *qce_ops[] = {
26 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
27 	&skcipher_ops,
28 #endif
29 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
30 	&ahash_ops,
31 #endif
32 };
33 
34 static void qce_unregister_algs(struct qce_device *qce)
35 {
36 	const struct qce_algo_ops *ops;
37 	int i;
38 
39 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
40 		ops = qce_ops[i];
41 		ops->unregister_algs(qce);
42 	}
43 }
44 
45 static int qce_register_algs(struct qce_device *qce)
46 {
47 	const struct qce_algo_ops *ops;
48 	int i, ret = -ENODEV;
49 
50 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
51 		ops = qce_ops[i];
52 		ret = ops->register_algs(qce);
53 		if (ret)
54 			break;
55 	}
56 
57 	return ret;
58 }
59 
60 static int qce_handle_request(struct crypto_async_request *async_req)
61 {
62 	int ret = -EINVAL, i;
63 	const struct qce_algo_ops *ops;
64 	u32 type = crypto_tfm_alg_type(async_req->tfm);
65 
66 	for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
67 		ops = qce_ops[i];
68 		if (type != ops->type)
69 			continue;
70 		ret = ops->async_req_handle(async_req);
71 		break;
72 	}
73 
74 	return ret;
75 }
76 
77 static int qce_handle_queue(struct qce_device *qce,
78 			    struct crypto_async_request *req)
79 {
80 	struct crypto_async_request *async_req, *backlog;
81 	unsigned long flags;
82 	int ret = 0, err;
83 
84 	spin_lock_irqsave(&qce->lock, flags);
85 
86 	if (req)
87 		ret = crypto_enqueue_request(&qce->queue, req);
88 
89 	/* busy, do not dequeue request */
90 	if (qce->req) {
91 		spin_unlock_irqrestore(&qce->lock, flags);
92 		return ret;
93 	}
94 
95 	backlog = crypto_get_backlog(&qce->queue);
96 	async_req = crypto_dequeue_request(&qce->queue);
97 	if (async_req)
98 		qce->req = async_req;
99 
100 	spin_unlock_irqrestore(&qce->lock, flags);
101 
102 	if (!async_req)
103 		return ret;
104 
105 	if (backlog) {
106 		spin_lock_bh(&qce->lock);
107 		backlog->complete(backlog, -EINPROGRESS);
108 		spin_unlock_bh(&qce->lock);
109 	}
110 
111 	err = qce_handle_request(async_req);
112 	if (err) {
113 		qce->result = err;
114 		tasklet_schedule(&qce->done_tasklet);
115 	}
116 
117 	return ret;
118 }
119 
120 static void qce_tasklet_req_done(unsigned long data)
121 {
122 	struct qce_device *qce = (struct qce_device *)data;
123 	struct crypto_async_request *req;
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&qce->lock, flags);
127 	req = qce->req;
128 	qce->req = NULL;
129 	spin_unlock_irqrestore(&qce->lock, flags);
130 
131 	if (req)
132 		req->complete(req, qce->result);
133 
134 	qce_handle_queue(qce, NULL);
135 }
136 
137 static int qce_async_request_enqueue(struct qce_device *qce,
138 				     struct crypto_async_request *req)
139 {
140 	return qce_handle_queue(qce, req);
141 }
142 
143 static void qce_async_request_done(struct qce_device *qce, int ret)
144 {
145 	qce->result = ret;
146 	tasklet_schedule(&qce->done_tasklet);
147 }
148 
149 static int qce_check_version(struct qce_device *qce)
150 {
151 	u32 major, minor, step;
152 
153 	qce_get_version(qce, &major, &minor, &step);
154 
155 	/*
156 	 * the driver does not support v5 with minor 0 because it has special
157 	 * alignment requirements.
158 	 */
159 	if (major != QCE_MAJOR_VERSION5 || minor == 0)
160 		return -ENODEV;
161 
162 	qce->burst_size = QCE_BAM_BURST_SIZE;
163 	qce->pipe_pair_id = 1;
164 
165 	dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
166 		major, minor, step);
167 
168 	return 0;
169 }
170 
171 static int qce_crypto_probe(struct platform_device *pdev)
172 {
173 	struct device *dev = &pdev->dev;
174 	struct qce_device *qce;
175 	int ret;
176 
177 	qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
178 	if (!qce)
179 		return -ENOMEM;
180 
181 	qce->dev = dev;
182 	platform_set_drvdata(pdev, qce);
183 
184 	qce->base = devm_platform_ioremap_resource(pdev, 0);
185 	if (IS_ERR(qce->base))
186 		return PTR_ERR(qce->base);
187 
188 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
189 	if (ret < 0)
190 		return ret;
191 
192 	qce->core = devm_clk_get(qce->dev, "core");
193 	if (IS_ERR(qce->core))
194 		return PTR_ERR(qce->core);
195 
196 	qce->iface = devm_clk_get(qce->dev, "iface");
197 	if (IS_ERR(qce->iface))
198 		return PTR_ERR(qce->iface);
199 
200 	qce->bus = devm_clk_get(qce->dev, "bus");
201 	if (IS_ERR(qce->bus))
202 		return PTR_ERR(qce->bus);
203 
204 	ret = clk_prepare_enable(qce->core);
205 	if (ret)
206 		return ret;
207 
208 	ret = clk_prepare_enable(qce->iface);
209 	if (ret)
210 		goto err_clks_core;
211 
212 	ret = clk_prepare_enable(qce->bus);
213 	if (ret)
214 		goto err_clks_iface;
215 
216 	ret = qce_dma_request(qce->dev, &qce->dma);
217 	if (ret)
218 		goto err_clks;
219 
220 	ret = qce_check_version(qce);
221 	if (ret)
222 		goto err_clks;
223 
224 	spin_lock_init(&qce->lock);
225 	tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
226 		     (unsigned long)qce);
227 	crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
228 
229 	qce->async_req_enqueue = qce_async_request_enqueue;
230 	qce->async_req_done = qce_async_request_done;
231 
232 	ret = qce_register_algs(qce);
233 	if (ret)
234 		goto err_dma;
235 
236 	return 0;
237 
238 err_dma:
239 	qce_dma_release(&qce->dma);
240 err_clks:
241 	clk_disable_unprepare(qce->bus);
242 err_clks_iface:
243 	clk_disable_unprepare(qce->iface);
244 err_clks_core:
245 	clk_disable_unprepare(qce->core);
246 	return ret;
247 }
248 
249 static int qce_crypto_remove(struct platform_device *pdev)
250 {
251 	struct qce_device *qce = platform_get_drvdata(pdev);
252 
253 	tasklet_kill(&qce->done_tasklet);
254 	qce_unregister_algs(qce);
255 	qce_dma_release(&qce->dma);
256 	clk_disable_unprepare(qce->bus);
257 	clk_disable_unprepare(qce->iface);
258 	clk_disable_unprepare(qce->core);
259 	return 0;
260 }
261 
262 static const struct of_device_id qce_crypto_of_match[] = {
263 	{ .compatible = "qcom,crypto-v5.1", },
264 	{}
265 };
266 MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
267 
268 static struct platform_driver qce_crypto_driver = {
269 	.probe = qce_crypto_probe,
270 	.remove = qce_crypto_remove,
271 	.driver = {
272 		.name = KBUILD_MODNAME,
273 		.of_match_table = qce_crypto_of_match,
274 	},
275 };
276 module_platform_driver(qce_crypto_driver);
277 
278 MODULE_LICENSE("GPL v2");
279 MODULE_DESCRIPTION("Qualcomm crypto engine driver");
280 MODULE_ALIAS("platform:" KBUILD_MODNAME);
281 MODULE_AUTHOR("The Linux Foundation");
282