1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
4  *
5  * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
6  *
7  * This file add support for AES cipher with 128,192,256 bits keysize in
8  * CBC and ECB mode.
9  */
10 
11 #include <linux/crypto.h>
12 #include <linux/delay.h>
13 #include <linux/io.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <crypto/internal/skcipher.h>
18 #include "amlogic-gxl.h"
19 
20 static int get_engine_number(struct meson_dev *mc)
21 {
22 	return atomic_inc_return(&mc->flow) % MAXFLOW;
23 }
24 
25 static bool meson_cipher_need_fallback(struct skcipher_request *areq)
26 {
27 	struct scatterlist *src_sg = areq->src;
28 	struct scatterlist *dst_sg = areq->dst;
29 
30 	if (areq->cryptlen == 0)
31 		return true;
32 
33 	if (sg_nents(src_sg) != sg_nents(dst_sg))
34 		return true;
35 
36 	/* KEY/IV descriptors use 3 desc */
37 	if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
38 		return true;
39 
40 	while (src_sg && dst_sg) {
41 		if ((src_sg->length % 16) != 0)
42 			return true;
43 		if ((dst_sg->length % 16) != 0)
44 			return true;
45 		if (src_sg->length != dst_sg->length)
46 			return true;
47 		if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
48 			return true;
49 		if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
50 			return true;
51 		src_sg = sg_next(src_sg);
52 		dst_sg = sg_next(dst_sg);
53 	}
54 
55 	return false;
56 }
57 
58 static int meson_cipher_do_fallback(struct skcipher_request *areq)
59 {
60 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
61 	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
62 	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
63 	int err;
64 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
65 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
66 	struct meson_alg_template *algt;
67 
68 	algt = container_of(alg, struct meson_alg_template, alg.skcipher);
69 	algt->stat_fb++;
70 #endif
71 	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
72 	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
73 				      areq->base.complete, areq->base.data);
74 	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
75 				   areq->cryptlen, areq->iv);
76 
77 	if (rctx->op_dir == MESON_DECRYPT)
78 		err = crypto_skcipher_decrypt(&rctx->fallback_req);
79 	else
80 		err = crypto_skcipher_encrypt(&rctx->fallback_req);
81 	return err;
82 }
83 
84 static int meson_cipher(struct skcipher_request *areq)
85 {
86 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
87 	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
88 	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
89 	struct meson_dev *mc = op->mc;
90 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
91 	struct meson_alg_template *algt;
92 	int flow = rctx->flow;
93 	unsigned int todo, eat, len;
94 	struct scatterlist *src_sg = areq->src;
95 	struct scatterlist *dst_sg = areq->dst;
96 	struct meson_desc *desc;
97 	int nr_sgs, nr_sgd;
98 	int i, err = 0;
99 	unsigned int keyivlen, ivsize, offset, tloffset;
100 	dma_addr_t phykeyiv;
101 	void *backup_iv = NULL, *bkeyiv;
102 	u32 v;
103 
104 	algt = container_of(alg, struct meson_alg_template, alg.skcipher);
105 
106 	dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
107 		crypto_tfm_alg_name(areq->base.tfm),
108 		areq->cryptlen,
109 		rctx->op_dir, crypto_skcipher_ivsize(tfm),
110 		op->keylen, flow);
111 
112 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
113 	algt->stat_req++;
114 	mc->chanlist[flow].stat_req++;
115 #endif
116 
117 	/*
118 	 * The hardware expect a list of meson_desc structures.
119 	 * The 2 first structures store key
120 	 * The third stores IV
121 	 */
122 	bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
123 	if (!bkeyiv)
124 		return -ENOMEM;
125 
126 	memcpy(bkeyiv, op->key, op->keylen);
127 	keyivlen = op->keylen;
128 
129 	ivsize = crypto_skcipher_ivsize(tfm);
130 	if (areq->iv && ivsize > 0) {
131 		if (ivsize > areq->cryptlen) {
132 			dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
133 			err = -EINVAL;
134 			goto theend;
135 		}
136 		memcpy(bkeyiv + 32, areq->iv, ivsize);
137 		keyivlen = 48;
138 		if (rctx->op_dir == MESON_DECRYPT) {
139 			backup_iv = kzalloc(ivsize, GFP_KERNEL);
140 			if (!backup_iv) {
141 				err = -ENOMEM;
142 				goto theend;
143 			}
144 			offset = areq->cryptlen - ivsize;
145 			scatterwalk_map_and_copy(backup_iv, areq->src, offset,
146 						 ivsize, 0);
147 		}
148 	}
149 	if (keyivlen == 24)
150 		keyivlen = 32;
151 
152 	phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
153 				  DMA_TO_DEVICE);
154 	err = dma_mapping_error(mc->dev, phykeyiv);
155 	if (err) {
156 		dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
157 		goto theend;
158 	}
159 
160 	tloffset = 0;
161 	eat = 0;
162 	i = 0;
163 	while (keyivlen > eat) {
164 		desc = &mc->chanlist[flow].tl[tloffset];
165 		memset(desc, 0, sizeof(struct meson_desc));
166 		todo = min(keyivlen - eat, 16u);
167 		desc->t_src = cpu_to_le32(phykeyiv + i * 16);
168 		desc->t_dst = cpu_to_le32(i * 16);
169 		v = (MODE_KEY << 20) | DESC_OWN | 16;
170 		desc->t_status = cpu_to_le32(v);
171 
172 		eat += todo;
173 		i++;
174 		tloffset++;
175 	}
176 
177 	if (areq->src == areq->dst) {
178 		nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
179 				    DMA_BIDIRECTIONAL);
180 		if (!nr_sgs) {
181 			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
182 			err = -EINVAL;
183 			goto theend;
184 		}
185 		nr_sgd = nr_sgs;
186 	} else {
187 		nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
188 				    DMA_TO_DEVICE);
189 		if (!nr_sgs || nr_sgs > MAXDESC - 3) {
190 			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
191 			err = -EINVAL;
192 			goto theend;
193 		}
194 		nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
195 				    DMA_FROM_DEVICE);
196 		if (!nr_sgd || nr_sgd > MAXDESC - 3) {
197 			dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
198 			err = -EINVAL;
199 			goto theend;
200 		}
201 	}
202 
203 	src_sg = areq->src;
204 	dst_sg = areq->dst;
205 	len = areq->cryptlen;
206 	while (src_sg) {
207 		desc = &mc->chanlist[flow].tl[tloffset];
208 		memset(desc, 0, sizeof(struct meson_desc));
209 
210 		desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
211 		desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
212 		todo = min(len, sg_dma_len(src_sg));
213 		v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
214 		if (rctx->op_dir)
215 			v |= DESC_ENCRYPTION;
216 		len -= todo;
217 
218 		if (!sg_next(src_sg))
219 			v |= DESC_LAST;
220 		desc->t_status = cpu_to_le32(v);
221 		tloffset++;
222 		src_sg = sg_next(src_sg);
223 		dst_sg = sg_next(dst_sg);
224 	}
225 
226 	reinit_completion(&mc->chanlist[flow].complete);
227 	mc->chanlist[flow].status = 0;
228 	writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
229 	wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
230 						  msecs_to_jiffies(500));
231 	if (mc->chanlist[flow].status == 0) {
232 		dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
233 		err = -EINVAL;
234 	}
235 
236 	dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
237 
238 	if (areq->src == areq->dst) {
239 		dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
240 	} else {
241 		dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
242 		dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
243 	}
244 
245 	if (areq->iv && ivsize > 0) {
246 		if (rctx->op_dir == MESON_DECRYPT) {
247 			memcpy(areq->iv, backup_iv, ivsize);
248 		} else {
249 			scatterwalk_map_and_copy(areq->iv, areq->dst,
250 						 areq->cryptlen - ivsize,
251 						 ivsize, 0);
252 		}
253 	}
254 theend:
255 	kfree_sensitive(bkeyiv);
256 	kfree_sensitive(backup_iv);
257 
258 	return err;
259 }
260 
261 static int meson_handle_cipher_request(struct crypto_engine *engine,
262 				       void *areq)
263 {
264 	int err;
265 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
266 
267 	err = meson_cipher(breq);
268 	local_bh_disable();
269 	crypto_finalize_skcipher_request(engine, breq, err);
270 	local_bh_enable();
271 
272 	return 0;
273 }
274 
275 int meson_skdecrypt(struct skcipher_request *areq)
276 {
277 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
278 	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
279 	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
280 	struct crypto_engine *engine;
281 	int e;
282 
283 	rctx->op_dir = MESON_DECRYPT;
284 	if (meson_cipher_need_fallback(areq))
285 		return meson_cipher_do_fallback(areq);
286 	e = get_engine_number(op->mc);
287 	engine = op->mc->chanlist[e].engine;
288 	rctx->flow = e;
289 
290 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
291 }
292 
293 int meson_skencrypt(struct skcipher_request *areq)
294 {
295 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
296 	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
297 	struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
298 	struct crypto_engine *engine;
299 	int e;
300 
301 	rctx->op_dir = MESON_ENCRYPT;
302 	if (meson_cipher_need_fallback(areq))
303 		return meson_cipher_do_fallback(areq);
304 	e = get_engine_number(op->mc);
305 	engine = op->mc->chanlist[e].engine;
306 	rctx->flow = e;
307 
308 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
309 }
310 
311 int meson_cipher_init(struct crypto_tfm *tfm)
312 {
313 	struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
314 	struct meson_alg_template *algt;
315 	const char *name = crypto_tfm_alg_name(tfm);
316 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
317 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
318 
319 	memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
320 
321 	algt = container_of(alg, struct meson_alg_template, alg.skcipher);
322 	op->mc = algt->mc;
323 
324 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
325 	if (IS_ERR(op->fallback_tfm)) {
326 		dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
327 			name, PTR_ERR(op->fallback_tfm));
328 		return PTR_ERR(op->fallback_tfm);
329 	}
330 
331 	sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
332 			 crypto_skcipher_reqsize(op->fallback_tfm);
333 
334 	op->enginectx.op.do_one_request = meson_handle_cipher_request;
335 	op->enginectx.op.prepare_request = NULL;
336 	op->enginectx.op.unprepare_request = NULL;
337 
338 	return 0;
339 }
340 
341 void meson_cipher_exit(struct crypto_tfm *tfm)
342 {
343 	struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
344 
345 	kfree_sensitive(op->key);
346 	crypto_free_skcipher(op->fallback_tfm);
347 }
348 
349 int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
350 		     unsigned int keylen)
351 {
352 	struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
353 	struct meson_dev *mc = op->mc;
354 
355 	switch (keylen) {
356 	case 128 / 8:
357 		op->keymode = MODE_AES_128;
358 		break;
359 	case 192 / 8:
360 		op->keymode = MODE_AES_192;
361 		break;
362 	case 256 / 8:
363 		op->keymode = MODE_AES_256;
364 		break;
365 	default:
366 		dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
367 		return -EINVAL;
368 	}
369 	kfree_sensitive(op->key);
370 	op->keylen = keylen;
371 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
372 	if (!op->key)
373 		return -ENOMEM;
374 
375 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
376 }
377