xref: /openbmc/linux/drivers/crypto/qce/skcipher.c (revision 42f730a47beee3b8df9a55ed8a3009eb0fe5bd3f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/moduleparam.h>
10 #include <linux/types.h>
11 #include <crypto/aes.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/internal/skcipher.h>
14 
15 #include "cipher.h"
16 
17 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
18 module_param(aes_sw_max_len, uint, 0644);
19 MODULE_PARM_DESC(aes_sw_max_len,
20 		 "Only use hardware for AES requests larger than this "
21 		 "[0=always use hardware; anything <16 breaks AES-GCM; default="
22 		 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
23 
24 static LIST_HEAD(skcipher_algs);
25 
26 static void qce_skcipher_done(void *data)
27 {
28 	struct crypto_async_request *async_req = data;
29 	struct skcipher_request *req = skcipher_request_cast(async_req);
30 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
31 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
32 	struct qce_device *qce = tmpl->qce;
33 	struct qce_result_dump *result_buf = qce->dma.result_buf;
34 	enum dma_data_direction dir_src, dir_dst;
35 	u32 status;
36 	int error;
37 	bool diff_dst;
38 
39 	diff_dst = (req->src != req->dst) ? true : false;
40 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
41 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
42 
43 	error = qce_dma_terminate_all(&qce->dma);
44 	if (error)
45 		dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
46 			error);
47 
48 	if (diff_dst)
49 		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
50 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
51 
52 	sg_free_table(&rctx->dst_tbl);
53 
54 	error = qce_check_status(qce, &status);
55 	if (error < 0)
56 		dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
57 
58 	memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
59 	qce->async_req_done(tmpl->qce, error);
60 }
61 
62 static int
63 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
64 {
65 	struct skcipher_request *req = skcipher_request_cast(async_req);
66 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
67 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
68 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
69 	struct qce_device *qce = tmpl->qce;
70 	enum dma_data_direction dir_src, dir_dst;
71 	struct scatterlist *sg;
72 	bool diff_dst;
73 	gfp_t gfp;
74 	int ret;
75 
76 	rctx->iv = req->iv;
77 	rctx->ivsize = crypto_skcipher_ivsize(skcipher);
78 	rctx->cryptlen = req->cryptlen;
79 
80 	diff_dst = (req->src != req->dst) ? true : false;
81 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
82 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
83 
84 	rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
85 	if (diff_dst)
86 		rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
87 	else
88 		rctx->dst_nents = rctx->src_nents;
89 	if (rctx->src_nents < 0) {
90 		dev_err(qce->dev, "Invalid numbers of src SG.\n");
91 		return rctx->src_nents;
92 	}
93 	if (rctx->dst_nents < 0) {
94 		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
95 		return -rctx->dst_nents;
96 	}
97 
98 	rctx->dst_nents += 1;
99 
100 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
101 						GFP_KERNEL : GFP_ATOMIC;
102 
103 	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
104 	if (ret)
105 		return ret;
106 
107 	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
108 
109 	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
110 	if (IS_ERR(sg)) {
111 		ret = PTR_ERR(sg);
112 		goto error_free;
113 	}
114 
115 	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
116 			     QCE_RESULT_BUF_SZ);
117 	if (IS_ERR(sg)) {
118 		ret = PTR_ERR(sg);
119 		goto error_free;
120 	}
121 
122 	sg_mark_end(sg);
123 	rctx->dst_sg = rctx->dst_tbl.sgl;
124 
125 	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
126 	if (ret < 0)
127 		goto error_free;
128 
129 	if (diff_dst) {
130 		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
131 		if (ret < 0)
132 			goto error_unmap_dst;
133 		rctx->src_sg = req->src;
134 	} else {
135 		rctx->src_sg = rctx->dst_sg;
136 	}
137 
138 	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
139 			       rctx->dst_sg, rctx->dst_nents,
140 			       qce_skcipher_done, async_req);
141 	if (ret)
142 		goto error_unmap_src;
143 
144 	qce_dma_issue_pending(&qce->dma);
145 
146 	ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
147 	if (ret)
148 		goto error_terminate;
149 
150 	return 0;
151 
152 error_terminate:
153 	qce_dma_terminate_all(&qce->dma);
154 error_unmap_src:
155 	if (diff_dst)
156 		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
157 error_unmap_dst:
158 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
159 error_free:
160 	sg_free_table(&rctx->dst_tbl);
161 	return ret;
162 }
163 
164 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
165 				 unsigned int keylen)
166 {
167 	struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
168 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
169 	unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
170 	unsigned int __keylen;
171 	int ret;
172 
173 	if (!key || !keylen)
174 		return -EINVAL;
175 
176 	/*
177 	 * AES XTS key1 = key2 not supported by crypto engine.
178 	 * Revisit to request a fallback cipher in this case.
179 	 */
180 	if (IS_XTS(flags)) {
181 		__keylen = keylen >> 1;
182 		if (!memcmp(key, key + __keylen, __keylen))
183 			return -ENOKEY;
184 	} else {
185 		__keylen = keylen;
186 	}
187 
188 	switch (__keylen) {
189 	case AES_KEYSIZE_128:
190 	case AES_KEYSIZE_256:
191 		memcpy(ctx->enc_key, key, keylen);
192 		break;
193 	case AES_KEYSIZE_192:
194 		break;
195 	default:
196 		return -EINVAL;
197 	}
198 
199 	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
200 	if (!ret)
201 		ctx->enc_keylen = keylen;
202 	return ret;
203 }
204 
205 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
206 			  unsigned int keylen)
207 {
208 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
209 	int err;
210 
211 	err = verify_skcipher_des_key(ablk, key);
212 	if (err)
213 		return err;
214 
215 	ctx->enc_keylen = keylen;
216 	memcpy(ctx->enc_key, key, keylen);
217 	return 0;
218 }
219 
220 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
221 			   unsigned int keylen)
222 {
223 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
224 	u32 _key[6];
225 	int err;
226 
227 	err = verify_skcipher_des3_key(ablk, key);
228 	if (err)
229 		return err;
230 
231 	/*
232 	 * The crypto engine does not support any two keys
233 	 * being the same for triple des algorithms. The
234 	 * verify_skcipher_des3_key does not check for all the
235 	 * below conditions. Return -ENOKEY in case any two keys
236 	 * are the same. Revisit to see if a fallback cipher
237 	 * is needed to handle this condition.
238 	 */
239 	memcpy(_key, key, DES3_EDE_KEY_SIZE);
240 	if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
241 	    !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
242 	    !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
243 		return -ENOKEY;
244 
245 	ctx->enc_keylen = keylen;
246 	memcpy(ctx->enc_key, key, keylen);
247 	return 0;
248 }
249 
250 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
251 {
252 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
253 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
254 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
255 	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
256 	int keylen;
257 	int ret;
258 
259 	rctx->flags = tmpl->alg_flags;
260 	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
261 	keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
262 
263 	/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
264 	 * is not a multiple of it; pass such requests to the fallback
265 	 */
266 	if (IS_AES(rctx->flags) &&
267 	    (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
268 	      req->cryptlen <= aes_sw_max_len) ||
269 	     (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
270 	      req->cryptlen % QCE_SECTOR_SIZE))) {
271 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
272 		skcipher_request_set_callback(&rctx->fallback_req,
273 					      req->base.flags,
274 					      req->base.complete,
275 					      req->base.data);
276 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
277 					   req->dst, req->cryptlen, req->iv);
278 		ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
279 				crypto_skcipher_decrypt(&rctx->fallback_req);
280 		return ret;
281 	}
282 
283 	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
284 }
285 
286 static int qce_skcipher_encrypt(struct skcipher_request *req)
287 {
288 	return qce_skcipher_crypt(req, 1);
289 }
290 
291 static int qce_skcipher_decrypt(struct skcipher_request *req)
292 {
293 	return qce_skcipher_crypt(req, 0);
294 }
295 
296 static int qce_skcipher_init(struct crypto_skcipher *tfm)
297 {
298 	/* take the size without the fallback skcipher_request at the end */
299 	crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
300 						  fallback_req));
301 	return 0;
302 }
303 
304 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
305 {
306 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
307 
308 	ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
309 					      0, CRYPTO_ALG_NEED_FALLBACK);
310 	if (IS_ERR(ctx->fallback))
311 		return PTR_ERR(ctx->fallback);
312 
313 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
314 					 crypto_skcipher_reqsize(ctx->fallback));
315 	return 0;
316 }
317 
318 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
319 {
320 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
321 
322 	crypto_free_skcipher(ctx->fallback);
323 }
324 
325 struct qce_skcipher_def {
326 	unsigned long flags;
327 	const char *name;
328 	const char *drv_name;
329 	unsigned int blocksize;
330 	unsigned int chunksize;
331 	unsigned int ivsize;
332 	unsigned int min_keysize;
333 	unsigned int max_keysize;
334 };
335 
336 static const struct qce_skcipher_def skcipher_def[] = {
337 	{
338 		.flags		= QCE_ALG_AES | QCE_MODE_ECB,
339 		.name		= "ecb(aes)",
340 		.drv_name	= "ecb-aes-qce",
341 		.blocksize	= AES_BLOCK_SIZE,
342 		.ivsize		= AES_BLOCK_SIZE,
343 		.min_keysize	= AES_MIN_KEY_SIZE,
344 		.max_keysize	= AES_MAX_KEY_SIZE,
345 	},
346 	{
347 		.flags		= QCE_ALG_AES | QCE_MODE_CBC,
348 		.name		= "cbc(aes)",
349 		.drv_name	= "cbc-aes-qce",
350 		.blocksize	= AES_BLOCK_SIZE,
351 		.ivsize		= AES_BLOCK_SIZE,
352 		.min_keysize	= AES_MIN_KEY_SIZE,
353 		.max_keysize	= AES_MAX_KEY_SIZE,
354 	},
355 	{
356 		.flags		= QCE_ALG_AES | QCE_MODE_CTR,
357 		.name		= "ctr(aes)",
358 		.drv_name	= "ctr-aes-qce",
359 		.blocksize	= 1,
360 		.chunksize	= AES_BLOCK_SIZE,
361 		.ivsize		= AES_BLOCK_SIZE,
362 		.min_keysize	= AES_MIN_KEY_SIZE,
363 		.max_keysize	= AES_MAX_KEY_SIZE,
364 	},
365 	{
366 		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
367 		.name		= "xts(aes)",
368 		.drv_name	= "xts-aes-qce",
369 		.blocksize	= AES_BLOCK_SIZE,
370 		.ivsize		= AES_BLOCK_SIZE,
371 		.min_keysize	= AES_MIN_KEY_SIZE * 2,
372 		.max_keysize	= AES_MAX_KEY_SIZE * 2,
373 	},
374 	{
375 		.flags		= QCE_ALG_DES | QCE_MODE_ECB,
376 		.name		= "ecb(des)",
377 		.drv_name	= "ecb-des-qce",
378 		.blocksize	= DES_BLOCK_SIZE,
379 		.ivsize		= 0,
380 		.min_keysize	= DES_KEY_SIZE,
381 		.max_keysize	= DES_KEY_SIZE,
382 	},
383 	{
384 		.flags		= QCE_ALG_DES | QCE_MODE_CBC,
385 		.name		= "cbc(des)",
386 		.drv_name	= "cbc-des-qce",
387 		.blocksize	= DES_BLOCK_SIZE,
388 		.ivsize		= DES_BLOCK_SIZE,
389 		.min_keysize	= DES_KEY_SIZE,
390 		.max_keysize	= DES_KEY_SIZE,
391 	},
392 	{
393 		.flags		= QCE_ALG_3DES | QCE_MODE_ECB,
394 		.name		= "ecb(des3_ede)",
395 		.drv_name	= "ecb-3des-qce",
396 		.blocksize	= DES3_EDE_BLOCK_SIZE,
397 		.ivsize		= 0,
398 		.min_keysize	= DES3_EDE_KEY_SIZE,
399 		.max_keysize	= DES3_EDE_KEY_SIZE,
400 	},
401 	{
402 		.flags		= QCE_ALG_3DES | QCE_MODE_CBC,
403 		.name		= "cbc(des3_ede)",
404 		.drv_name	= "cbc-3des-qce",
405 		.blocksize	= DES3_EDE_BLOCK_SIZE,
406 		.ivsize		= DES3_EDE_BLOCK_SIZE,
407 		.min_keysize	= DES3_EDE_KEY_SIZE,
408 		.max_keysize	= DES3_EDE_KEY_SIZE,
409 	},
410 };
411 
412 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
413 				       struct qce_device *qce)
414 {
415 	struct qce_alg_template *tmpl;
416 	struct skcipher_alg *alg;
417 	int ret;
418 
419 	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
420 	if (!tmpl)
421 		return -ENOMEM;
422 
423 	alg = &tmpl->alg.skcipher;
424 
425 	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
426 	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
427 		 def->drv_name);
428 
429 	alg->base.cra_blocksize		= def->blocksize;
430 	alg->chunksize			= def->chunksize;
431 	alg->ivsize			= def->ivsize;
432 	alg->min_keysize		= def->min_keysize;
433 	alg->max_keysize		= def->max_keysize;
434 	alg->setkey			= IS_3DES(def->flags) ? qce_des3_setkey :
435 					  IS_DES(def->flags) ? qce_des_setkey :
436 					  qce_skcipher_setkey;
437 	alg->encrypt			= qce_skcipher_encrypt;
438 	alg->decrypt			= qce_skcipher_decrypt;
439 
440 	alg->base.cra_priority		= 300;
441 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
442 					  CRYPTO_ALG_ALLOCATES_MEMORY |
443 					  CRYPTO_ALG_KERN_DRIVER_ONLY;
444 	alg->base.cra_ctxsize		= sizeof(struct qce_cipher_ctx);
445 	alg->base.cra_alignmask		= 0;
446 	alg->base.cra_module		= THIS_MODULE;
447 
448 	if (IS_AES(def->flags)) {
449 		alg->base.cra_flags    |= CRYPTO_ALG_NEED_FALLBACK;
450 		alg->init		= qce_skcipher_init_fallback;
451 		alg->exit		= qce_skcipher_exit;
452 	} else {
453 		alg->init		= qce_skcipher_init;
454 	}
455 
456 	INIT_LIST_HEAD(&tmpl->entry);
457 	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
458 	tmpl->alg_flags = def->flags;
459 	tmpl->qce = qce;
460 
461 	ret = crypto_register_skcipher(alg);
462 	if (ret) {
463 		kfree(tmpl);
464 		dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
465 		return ret;
466 	}
467 
468 	list_add_tail(&tmpl->entry, &skcipher_algs);
469 	dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
470 	return 0;
471 }
472 
473 static void qce_skcipher_unregister(struct qce_device *qce)
474 {
475 	struct qce_alg_template *tmpl, *n;
476 
477 	list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
478 		crypto_unregister_skcipher(&tmpl->alg.skcipher);
479 		list_del(&tmpl->entry);
480 		kfree(tmpl);
481 	}
482 }
483 
484 static int qce_skcipher_register(struct qce_device *qce)
485 {
486 	int ret, i;
487 
488 	for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
489 		ret = qce_skcipher_register_one(&skcipher_def[i], qce);
490 		if (ret)
491 			goto err;
492 	}
493 
494 	return 0;
495 err:
496 	qce_skcipher_unregister(qce);
497 	return ret;
498 }
499 
500 const struct qce_algo_ops skcipher_ops = {
501 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
502 	.register_algs = qce_skcipher_register,
503 	.unregister_algs = qce_skcipher_unregister,
504 	.async_req_handle = qce_skcipher_async_req_handle,
505 };
506