1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sun8i-ce-cipher.c - hardware cryptographic offloader for
4  * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5  *
6  * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7  *
8  * This file add support for AES cipher with 128,192,256 bits keysize in
9  * CBC and ECB mode.
10  *
11  * You could find a link for the datasheet in Documentation/arm/sunxi/README
12  */
13 
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/io.h>
17 #include <linux/pm_runtime.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/internal/des.h>
20 #include <crypto/internal/skcipher.h>
21 #include "sun8i-ce.h"
22 
23 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
24 {
25 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
26 	struct scatterlist *sg;
27 
28 	if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
29 		return true;
30 
31 	if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
32 		return true;
33 
34 	if (areq->cryptlen == 0 || areq->cryptlen % 16)
35 		return true;
36 
37 	sg = areq->src;
38 	while (sg) {
39 		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
40 			return true;
41 		sg = sg_next(sg);
42 	}
43 	sg = areq->dst;
44 	while (sg) {
45 		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
46 			return true;
47 		sg = sg_next(sg);
48 	}
49 	return false;
50 }
51 
52 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
53 {
54 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
55 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
56 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
57 	int err;
58 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
59 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
60 	struct sun8i_ce_alg_template *algt;
61 #endif
62 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
63 
64 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
65 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
66 	algt->stat_fb++;
67 #endif
68 
69 	skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
70 	skcipher_request_set_callback(subreq, areq->base.flags, NULL, NULL);
71 	skcipher_request_set_crypt(subreq, areq->src, areq->dst,
72 				   areq->cryptlen, areq->iv);
73 	if (rctx->op_dir & CE_DECRYPTION)
74 		err = crypto_skcipher_decrypt(subreq);
75 	else
76 		err = crypto_skcipher_encrypt(subreq);
77 	skcipher_request_zero(subreq);
78 	return err;
79 }
80 
81 static int sun8i_ce_cipher(struct skcipher_request *areq)
82 {
83 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
84 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
85 	struct sun8i_ce_dev *ce = op->ce;
86 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
87 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
88 	struct sun8i_ce_alg_template *algt;
89 	struct sun8i_ce_flow *chan;
90 	struct ce_task *cet;
91 	struct scatterlist *sg;
92 	unsigned int todo, len, offset, ivsize;
93 	dma_addr_t addr_iv = 0, addr_key = 0;
94 	void *backup_iv = NULL;
95 	u32 common, sym;
96 	int flow, i;
97 	int nr_sgs = 0;
98 	int nr_sgd = 0;
99 	int err = 0;
100 
101 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
102 
103 	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
104 		crypto_tfm_alg_name(areq->base.tfm),
105 		areq->cryptlen,
106 		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
107 		op->keylen);
108 
109 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
110 	algt->stat_req++;
111 #endif
112 
113 	flow = rctx->flow;
114 
115 	chan = &ce->chanlist[flow];
116 
117 	cet = chan->tl;
118 	memset(cet, 0, sizeof(struct ce_task));
119 
120 	cet->t_id = cpu_to_le32(flow);
121 	common = ce->variant->alg_cipher[algt->ce_algo_id];
122 	common |= rctx->op_dir | CE_COMM_INT;
123 	cet->t_common_ctl = cpu_to_le32(common);
124 	/* CTS and recent CE (H6) need length in bytes, in word otherwise */
125 	if (ce->variant->has_t_dlen_in_bytes)
126 		cet->t_dlen = cpu_to_le32(areq->cryptlen);
127 	else
128 		cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
129 
130 	sym = ce->variant->op_mode[algt->ce_blockmode];
131 	len = op->keylen;
132 	switch (len) {
133 	case 128 / 8:
134 		sym |= CE_AES_128BITS;
135 		break;
136 	case 192 / 8:
137 		sym |= CE_AES_192BITS;
138 		break;
139 	case 256 / 8:
140 		sym |= CE_AES_256BITS;
141 		break;
142 	}
143 
144 	cet->t_sym_ctl = cpu_to_le32(sym);
145 	cet->t_asym_ctl = 0;
146 
147 	chan->op_mode = ce->variant->op_mode[algt->ce_blockmode];
148 	chan->op_dir = rctx->op_dir;
149 	chan->method = ce->variant->alg_cipher[algt->ce_algo_id];
150 	chan->keylen = op->keylen;
151 
152 	addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
153 	cet->t_key = cpu_to_le32(addr_key);
154 	if (dma_mapping_error(ce->dev, addr_key)) {
155 		dev_err(ce->dev, "Cannot DMA MAP KEY\n");
156 		err = -EFAULT;
157 		goto theend;
158 	}
159 
160 	ivsize = crypto_skcipher_ivsize(tfm);
161 	if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
162 		chan->ivlen = ivsize;
163 		chan->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
164 		if (!chan->bounce_iv) {
165 			err = -ENOMEM;
166 			goto theend_key;
167 		}
168 		if (rctx->op_dir & CE_DECRYPTION) {
169 			backup_iv = kzalloc(ivsize, GFP_KERNEL);
170 			if (!backup_iv) {
171 				err = -ENOMEM;
172 				goto theend_key;
173 			}
174 			offset = areq->cryptlen - ivsize;
175 			scatterwalk_map_and_copy(backup_iv, areq->src, offset,
176 						 ivsize, 0);
177 		}
178 		memcpy(chan->bounce_iv, areq->iv, ivsize);
179 		addr_iv = dma_map_single(ce->dev, chan->bounce_iv, chan->ivlen,
180 					 DMA_TO_DEVICE);
181 		cet->t_iv = cpu_to_le32(addr_iv);
182 		if (dma_mapping_error(ce->dev, addr_iv)) {
183 			dev_err(ce->dev, "Cannot DMA MAP IV\n");
184 			err = -ENOMEM;
185 			goto theend_iv;
186 		}
187 	}
188 
189 	if (areq->src == areq->dst) {
190 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
191 				    DMA_BIDIRECTIONAL);
192 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
193 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
194 			err = -EINVAL;
195 			goto theend_iv;
196 		}
197 		nr_sgd = nr_sgs;
198 	} else {
199 		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
200 				    DMA_TO_DEVICE);
201 		if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
202 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
203 			err = -EINVAL;
204 			goto theend_iv;
205 		}
206 		nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
207 				    DMA_FROM_DEVICE);
208 		if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
209 			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
210 			err = -EINVAL;
211 			goto theend_sgs;
212 		}
213 	}
214 
215 	len = areq->cryptlen;
216 	for_each_sg(areq->src, sg, nr_sgs, i) {
217 		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
218 		todo = min(len, sg_dma_len(sg));
219 		cet->t_src[i].len = cpu_to_le32(todo / 4);
220 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
221 			areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
222 		len -= todo;
223 	}
224 	if (len > 0) {
225 		dev_err(ce->dev, "remaining len %d\n", len);
226 		err = -EINVAL;
227 		goto theend_sgs;
228 	}
229 
230 	len = areq->cryptlen;
231 	for_each_sg(areq->dst, sg, nr_sgd, i) {
232 		cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
233 		todo = min(len, sg_dma_len(sg));
234 		cet->t_dst[i].len = cpu_to_le32(todo / 4);
235 		dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
236 			areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
237 		len -= todo;
238 	}
239 	if (len > 0) {
240 		dev_err(ce->dev, "remaining len %d\n", len);
241 		err = -EINVAL;
242 		goto theend_sgs;
243 	}
244 
245 	chan->timeout = areq->cryptlen;
246 	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
247 
248 theend_sgs:
249 	if (areq->src == areq->dst) {
250 		dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
251 	} else {
252 		if (nr_sgs > 0)
253 			dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
254 		dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
255 	}
256 
257 theend_iv:
258 	if (areq->iv && ivsize > 0) {
259 		if (addr_iv)
260 			dma_unmap_single(ce->dev, addr_iv, chan->ivlen,
261 					 DMA_TO_DEVICE);
262 		offset = areq->cryptlen - ivsize;
263 		if (rctx->op_dir & CE_DECRYPTION) {
264 			memcpy(areq->iv, backup_iv, ivsize);
265 			kzfree(backup_iv);
266 		} else {
267 			scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
268 						 ivsize, 0);
269 		}
270 		kfree(chan->bounce_iv);
271 	}
272 
273 theend_key:
274 	dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
275 
276 theend:
277 	return err;
278 }
279 
280 static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
281 {
282 	int err;
283 	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
284 
285 	err = sun8i_ce_cipher(breq);
286 	crypto_finalize_skcipher_request(engine, breq, err);
287 
288 	return 0;
289 }
290 
291 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
292 {
293 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
294 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
295 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
296 	struct crypto_engine *engine;
297 	int e;
298 
299 	rctx->op_dir = CE_DECRYPTION;
300 	if (sun8i_ce_cipher_need_fallback(areq))
301 		return sun8i_ce_cipher_fallback(areq);
302 
303 	e = sun8i_ce_get_engine_number(op->ce);
304 	rctx->flow = e;
305 	engine = op->ce->chanlist[e].engine;
306 
307 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
308 }
309 
310 int sun8i_ce_skencrypt(struct skcipher_request *areq)
311 {
312 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
313 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
314 	struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
315 	struct crypto_engine *engine;
316 	int e;
317 
318 	rctx->op_dir = CE_ENCRYPTION;
319 	if (sun8i_ce_cipher_need_fallback(areq))
320 		return sun8i_ce_cipher_fallback(areq);
321 
322 	e = sun8i_ce_get_engine_number(op->ce);
323 	rctx->flow = e;
324 	engine = op->ce->chanlist[e].engine;
325 
326 	return crypto_transfer_skcipher_request_to_engine(engine, areq);
327 }
328 
329 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
330 {
331 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
332 	struct sun8i_ce_alg_template *algt;
333 	const char *name = crypto_tfm_alg_name(tfm);
334 	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
335 	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
336 	int err;
337 
338 	memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
339 
340 	algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
341 	op->ce = algt->ce;
342 
343 	sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx);
344 
345 	op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
346 	if (IS_ERR(op->fallback_tfm)) {
347 		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
348 			name, PTR_ERR(op->fallback_tfm));
349 		return PTR_ERR(op->fallback_tfm);
350 	}
351 
352 	dev_info(op->ce->dev, "Fallback for %s is %s\n",
353 		 crypto_tfm_alg_driver_name(&sktfm->base),
354 		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));
355 
356 	op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
357 	op->enginectx.op.prepare_request = NULL;
358 	op->enginectx.op.unprepare_request = NULL;
359 
360 	err = pm_runtime_get_sync(op->ce->dev);
361 	if (err < 0)
362 		goto error_pm;
363 
364 	return 0;
365 error_pm:
366 	crypto_free_sync_skcipher(op->fallback_tfm);
367 	return err;
368 }
369 
370 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
371 {
372 	struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
373 
374 	if (op->key) {
375 		memzero_explicit(op->key, op->keylen);
376 		kfree(op->key);
377 	}
378 	crypto_free_sync_skcipher(op->fallback_tfm);
379 	pm_runtime_put_sync_suspend(op->ce->dev);
380 }
381 
382 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
383 			unsigned int keylen)
384 {
385 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
386 	struct sun8i_ce_dev *ce = op->ce;
387 
388 	switch (keylen) {
389 	case 128 / 8:
390 		break;
391 	case 192 / 8:
392 		break;
393 	case 256 / 8:
394 		break;
395 	default:
396 		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
397 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
398 		return -EINVAL;
399 	}
400 	if (op->key) {
401 		memzero_explicit(op->key, op->keylen);
402 		kfree(op->key);
403 	}
404 	op->keylen = keylen;
405 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
406 	if (!op->key)
407 		return -ENOMEM;
408 
409 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
410 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
411 
412 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
413 }
414 
415 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
416 			 unsigned int keylen)
417 {
418 	struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
419 	int err;
420 
421 	err = verify_skcipher_des3_key(tfm, key);
422 	if (err)
423 		return err;
424 
425 	if (op->key) {
426 		memzero_explicit(op->key, op->keylen);
427 		kfree(op->key);
428 	}
429 	op->keylen = keylen;
430 	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
431 	if (!op->key)
432 		return -ENOMEM;
433 
434 	crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
435 	crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
436 
437 	return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
438 }
439