1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
4  *
5  * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
6  *
7  * This file adds support for AES cipher with 128,192,256 bits keysize in
8  * ECB mode.
9  */
10 
11 #include <crypto/engine.h>
12 #include <crypto/internal/skcipher.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include "sl3516-ce.h"
23 
24 /* sl3516_ce_need_fallback - check if a request can be handled by the CE */
sl3516_ce_need_fallback(struct skcipher_request * areq)25 static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
26 {
27 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
28 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
29 	struct sl3516_ce_dev *ce = op->ce;
30 	struct scatterlist *in_sg;
31 	struct scatterlist *out_sg;
32 	struct scatterlist *sg;
33 
34 	if (areq->cryptlen == 0 || areq->cryptlen % 16) {
35 		ce->fallback_mod16++;
36 		return true;
37 	}
38 
39 	/*
40 	 * check if we have enough descriptors for TX
41 	 * Note: TX need one control desc for each SG
42 	 */
43 	if (sg_nents(areq->src) > MAXDESC / 2) {
44 		ce->fallback_sg_count_tx++;
45 		return true;
46 	}
47 	/* check if we have enough descriptors for RX */
48 	if (sg_nents(areq->dst) > MAXDESC) {
49 		ce->fallback_sg_count_rx++;
50 		return true;
51 	}
52 
53 	sg = areq->src;
54 	while (sg) {
55 		if ((sg->length % 16) != 0) {
56 			ce->fallback_mod16++;
57 			return true;
58 		}
59 		if ((sg_dma_len(sg) % 16) != 0) {
60 			ce->fallback_mod16++;
61 			return true;
62 		}
63 		if (!IS_ALIGNED(sg->offset, 16)) {
64 			ce->fallback_align16++;
65 			return true;
66 		}
67 		sg = sg_next(sg);
68 	}
69 	sg = areq->dst;
70 	while (sg) {
71 		if ((sg->length % 16) != 0) {
72 			ce->fallback_mod16++;
73 			return true;
74 		}
75 		if ((sg_dma_len(sg) % 16) != 0) {
76 			ce->fallback_mod16++;
77 			return true;
78 		}
79 		if (!IS_ALIGNED(sg->offset, 16)) {
80 			ce->fallback_align16++;
81 			return true;
82 		}
83 		sg = sg_next(sg);
84 	}
85 
86 	/* need same numbers of SG (with same length) for source and destination */
87 	in_sg = areq->src;
88 	out_sg = areq->dst;
89 	while (in_sg && out_sg) {
90 		if (in_sg->length != out_sg->length) {
91 			ce->fallback_not_same_len++;
92 			return true;
93 		}
94 		in_sg = sg_next(in_sg);
95 		out_sg = sg_next(out_sg);
96 	}
97 	if (in_sg || out_sg)
98 		return true;
99 
100 	return false;
101 }
102 
sl3516_ce_cipher_fallback(struct skcipher_request * areq)103 static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
104 {
105 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
106 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
107 	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
108 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
109 	struct sl3516_ce_alg_template *algt;
110 	int err;
111 
112 	algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
113 	algt->stat_fb++;
114 
115 	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
116 	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
117 				      areq->base.complete, areq->base.data);
118 	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
119 				   areq->cryptlen, areq->iv);
120 	if (rctx->op_dir == CE_DECRYPTION)
121 		err = crypto_skcipher_decrypt(&rctx->fallback_req);
122 	else
123 		err = crypto_skcipher_encrypt(&rctx->fallback_req);
124 	return err;
125 }
126 
sl3516_ce_cipher(struct skcipher_request * areq)127 static int sl3516_ce_cipher(struct skcipher_request *areq)
128 {
129 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
130 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
131 	struct sl3516_ce_dev *ce = op->ce;
132 	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
133 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
134 	struct sl3516_ce_alg_template *algt;
135 	struct scatterlist *sg;
136 	unsigned int todo, len;
137 	struct pkt_control_ecb *ecb;
138 	int nr_sgs = 0;
139 	int nr_sgd = 0;
140 	int err = 0;
141 	int i;
142 
143 	algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
144 
145 	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
146 		crypto_tfm_alg_name(areq->base.tfm),
147 		areq->cryptlen,
148 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
149 		op->keylen);
150 
151 	algt->stat_req++;
152 
153 	if (areq->src == areq->dst) {
154 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
155 				    DMA_BIDIRECTIONAL);
156 		if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
157 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
158 			err = -EINVAL;
159 			goto theend;
160 		}
161 		nr_sgd = nr_sgs;
162 	} else {
163 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
164 				    DMA_TO_DEVICE);
165 		if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
166 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
167 			err = -EINVAL;
168 			goto theend;
169 		}
170 		nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
171 				    DMA_FROM_DEVICE);
172 		if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
173 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
174 			err = -EINVAL;
175 			goto theend_sgs;
176 		}
177 	}
178 
179 	len = areq->cryptlen;
180 	i = 0;
181 	sg = areq->src;
182 	while (i < nr_sgs && sg && len) {
183 		if (sg_dma_len(sg) == 0)
184 			goto sgs_next;
185 		rctx->t_src[i].addr = sg_dma_address(sg);
186 		todo = min(len, sg_dma_len(sg));
187 		rctx->t_src[i].len = todo;
188 		dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
189 			areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
190 		len -= todo;
191 		i++;
192 sgs_next:
193 		sg = sg_next(sg);
194 	}
195 	if (len > 0) {
196 		dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
197 		err = -EINVAL;
198 		goto theend_sgs;
199 	}
200 
201 	len = areq->cryptlen;
202 	i = 0;
203 	sg = areq->dst;
204 	while (i < nr_sgd && sg && len) {
205 		if (sg_dma_len(sg) == 0)
206 			goto sgd_next;
207 		rctx->t_dst[i].addr = sg_dma_address(sg);
208 		todo = min(len, sg_dma_len(sg));
209 		rctx->t_dst[i].len = todo;
210 		dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
211 			areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
212 		len -= todo;
213 		i++;
214 
215 sgd_next:
216 		sg = sg_next(sg);
217 	}
218 	if (len > 0) {
219 		dev_err(ce->dev, "remaining len %d\n", len);
220 		err = -EINVAL;
221 		goto theend_sgs;
222 	}
223 
224 	switch (algt->mode) {
225 	case ECB_AES:
226 		rctx->pctrllen = sizeof(struct pkt_control_ecb);
227 		ecb = (struct pkt_control_ecb *)ce->pctrl;
228 
229 		rctx->tqflag = TQ0_TYPE_CTRL;
230 		rctx->tqflag |= TQ1_CIPHER;
231 		ecb->control.op_mode = rctx->op_dir;
232 		ecb->control.cipher_algorithm = ECB_AES;
233 		ecb->cipher.header_len = 0;
234 		ecb->cipher.algorithm_len = areq->cryptlen;
235 		cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
236 		rctx->h = &ecb->cipher;
237 
238 		rctx->tqflag |= TQ4_KEY0;
239 		rctx->tqflag |= TQ5_KEY4;
240 		rctx->tqflag |= TQ6_KEY6;
241 		ecb->control.aesnk = op->keylen / 4;
242 		break;
243 	}
244 
245 	rctx->nr_sgs = nr_sgs;
246 	rctx->nr_sgd = nr_sgd;
247 	err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
248 
249 theend_sgs:
250 	if (areq->src == areq->dst) {
251 		dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
252 			     DMA_BIDIRECTIONAL);
253 	} else {
254 		dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
255 			     DMA_TO_DEVICE);
256 		dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
257 			     DMA_FROM_DEVICE);
258 	}
259 
260 theend:
261 
262 	return err;
263 }
264 
sl3516_ce_handle_cipher_request(struct crypto_engine * engine,void * areq)265 int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
266 {
267 	int err;
268 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
269 
270 	err = sl3516_ce_cipher(breq);
271 	local_bh_disable();
272 	crypto_finalize_skcipher_request(engine, breq, err);
273 	local_bh_enable();
274 
275 	return 0;
276 }
277 
sl3516_ce_skdecrypt(struct skcipher_request * areq)278 int sl3516_ce_skdecrypt(struct skcipher_request *areq)
279 {
280 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
281 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
282 	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
283 	struct crypto_engine *engine;
284 
285 	memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
286 	rctx->op_dir = CE_DECRYPTION;
287 
288 	if (sl3516_ce_need_fallback(areq))
289 		return sl3516_ce_cipher_fallback(areq);
290 
291 	engine = op->ce->engine;
292 
293 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
294 }
295 
sl3516_ce_skencrypt(struct skcipher_request * areq)296 int sl3516_ce_skencrypt(struct skcipher_request *areq)
297 {
298 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
299 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
300 	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
301 	struct crypto_engine *engine;
302 
303 	memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
304 	rctx->op_dir = CE_ENCRYPTION;
305 
306 	if (sl3516_ce_need_fallback(areq))
307 		return sl3516_ce_cipher_fallback(areq);
308 
309 	engine = op->ce->engine;
310 
311 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
312 }
313 
sl3516_ce_cipher_init(struct crypto_tfm * tfm)314 int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
315 {
316 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
317 	struct sl3516_ce_alg_template *algt;
318 	const char *name = crypto_tfm_alg_name(tfm);
319 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
320 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
321 	int err;
322 
323 	memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
324 
325 	algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
326 	op->ce = algt->ce;
327 
328 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
329 	if (IS_ERR(op->fallback_tfm)) {
330 		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
331 			name, PTR_ERR(op->fallback_tfm));
332 		return PTR_ERR(op->fallback_tfm);
333 	}
334 
335 	sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
336 			 crypto_skcipher_reqsize(op->fallback_tfm);
337 
338 	dev_info(op->ce->dev, "Fallback for %s is %s\n",
339 		 crypto_tfm_alg_driver_name(&sktfm->base),
340 		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
341 
342 	err = pm_runtime_get_sync(op->ce->dev);
343 	if (err < 0)
344 		goto error_pm;
345 
346 	return 0;
347 error_pm:
348 	pm_runtime_put_noidle(op->ce->dev);
349 	crypto_free_skcipher(op->fallback_tfm);
350 	return err;
351 }
352 
sl3516_ce_cipher_exit(struct crypto_tfm * tfm)353 void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
354 {
355 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
356 
357 	kfree_sensitive(op->key);
358 	crypto_free_skcipher(op->fallback_tfm);
359 	pm_runtime_put_sync_suspend(op->ce->dev);
360 }
361 
sl3516_ce_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)362 int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
363 			 unsigned int keylen)
364 {
365 	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
366 	struct sl3516_ce_dev *ce = op->ce;
367 
368 	switch (keylen) {
369 	case 128 / 8:
370 		break;
371 	case 192 / 8:
372 		break;
373 	case 256 / 8:
374 		break;
375 	default:
376 		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
377 		return -EINVAL;
378 	}
379 	kfree_sensitive(op->key);
380 	op->keylen = keylen;
381 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
382 	if (!op->key)
383 		return -ENOMEM;
384 
385 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
386 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
387 
388 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
389 }
390