xref: /openbmc/linux/drivers/crypto/qce/skcipher.c (revision d58f75de)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/moduleparam.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <crypto/internal/skcipher.h>
15 
16 #include "cipher.h"
17 
18 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
19 module_param(aes_sw_max_len, uint, 0644);
20 MODULE_PARM_DESC(aes_sw_max_len,
21 		 "Only use hardware for AES requests larger than this "
22 		 "[0=always use hardware; anything <16 breaks AES-GCM; default="
23 		 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
24 
25 static LIST_HEAD(skcipher_algs);
26 
27 static void qce_skcipher_done(void *data)
28 {
29 	struct crypto_async_request *async_req = data;
30 	struct skcipher_request *req = skcipher_request_cast(async_req);
31 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
32 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
33 	struct qce_device *qce = tmpl->qce;
34 	struct qce_result_dump *result_buf = qce->dma.result_buf;
35 	enum dma_data_direction dir_src, dir_dst;
36 	u32 status;
37 	int error;
38 	bool diff_dst;
39 
40 	diff_dst = (req->src != req->dst) ? true : false;
41 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
42 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
43 
44 	error = qce_dma_terminate_all(&qce->dma);
45 	if (error)
46 		dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
47 			error);
48 
49 	if (diff_dst)
50 		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
51 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
52 
53 	sg_free_table(&rctx->dst_tbl);
54 
55 	error = qce_check_status(qce, &status);
56 	if (error < 0)
57 		dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
58 
59 	memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
60 	qce->async_req_done(tmpl->qce, error);
61 }
62 
63 static int
64 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
65 {
66 	struct skcipher_request *req = skcipher_request_cast(async_req);
67 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
68 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
69 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
70 	struct qce_device *qce = tmpl->qce;
71 	enum dma_data_direction dir_src, dir_dst;
72 	struct scatterlist *sg;
73 	bool diff_dst;
74 	gfp_t gfp;
75 	int ret;
76 
77 	rctx->iv = req->iv;
78 	rctx->ivsize = crypto_skcipher_ivsize(skcipher);
79 	rctx->cryptlen = req->cryptlen;
80 
81 	diff_dst = (req->src != req->dst) ? true : false;
82 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
83 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
84 
85 	rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
86 	if (diff_dst)
87 		rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
88 	else
89 		rctx->dst_nents = rctx->src_nents;
90 	if (rctx->src_nents < 0) {
91 		dev_err(qce->dev, "Invalid numbers of src SG.\n");
92 		return rctx->src_nents;
93 	}
94 	if (rctx->dst_nents < 0) {
95 		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
96 		return -rctx->dst_nents;
97 	}
98 
99 	rctx->dst_nents += 1;
100 
101 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
102 						GFP_KERNEL : GFP_ATOMIC;
103 
104 	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
105 	if (ret)
106 		return ret;
107 
108 	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
109 
110 	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
111 	if (IS_ERR(sg)) {
112 		ret = PTR_ERR(sg);
113 		goto error_free;
114 	}
115 
116 	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
117 			     QCE_RESULT_BUF_SZ);
118 	if (IS_ERR(sg)) {
119 		ret = PTR_ERR(sg);
120 		goto error_free;
121 	}
122 
123 	sg_mark_end(sg);
124 	rctx->dst_sg = rctx->dst_tbl.sgl;
125 
126 	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
127 	if (ret < 0)
128 		goto error_free;
129 
130 	if (diff_dst) {
131 		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
132 		if (ret < 0)
133 			goto error_unmap_dst;
134 		rctx->src_sg = req->src;
135 	} else {
136 		rctx->src_sg = rctx->dst_sg;
137 	}
138 
139 	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
140 			       rctx->dst_sg, rctx->dst_nents,
141 			       qce_skcipher_done, async_req);
142 	if (ret)
143 		goto error_unmap_src;
144 
145 	qce_dma_issue_pending(&qce->dma);
146 
147 	ret = qce_start(async_req, tmpl->crypto_alg_type);
148 	if (ret)
149 		goto error_terminate;
150 
151 	return 0;
152 
153 error_terminate:
154 	qce_dma_terminate_all(&qce->dma);
155 error_unmap_src:
156 	if (diff_dst)
157 		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
158 error_unmap_dst:
159 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
160 error_free:
161 	sg_free_table(&rctx->dst_tbl);
162 	return ret;
163 }
164 
165 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
166 				 unsigned int keylen)
167 {
168 	struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
169 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
170 	unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
171 	unsigned int __keylen;
172 	int ret;
173 
174 	if (!key || !keylen)
175 		return -EINVAL;
176 
177 	/*
178 	 * AES XTS key1 = key2 not supported by crypto engine.
179 	 * Revisit to request a fallback cipher in this case.
180 	 */
181 	if (IS_XTS(flags)) {
182 		__keylen = keylen >> 1;
183 		if (!memcmp(key, key + __keylen, __keylen))
184 			return -ENOKEY;
185 	} else {
186 		__keylen = keylen;
187 	}
188 
189 	switch (__keylen) {
190 	case AES_KEYSIZE_128:
191 	case AES_KEYSIZE_256:
192 		memcpy(ctx->enc_key, key, keylen);
193 		break;
194 	case AES_KEYSIZE_192:
195 		break;
196 	default:
197 		return -EINVAL;
198 	}
199 
200 	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
201 	if (!ret)
202 		ctx->enc_keylen = keylen;
203 	return ret;
204 }
205 
206 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
207 			  unsigned int keylen)
208 {
209 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
210 	int err;
211 
212 	err = verify_skcipher_des_key(ablk, key);
213 	if (err)
214 		return err;
215 
216 	ctx->enc_keylen = keylen;
217 	memcpy(ctx->enc_key, key, keylen);
218 	return 0;
219 }
220 
221 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
222 			   unsigned int keylen)
223 {
224 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
225 	u32 _key[6];
226 	int err;
227 
228 	err = verify_skcipher_des3_key(ablk, key);
229 	if (err)
230 		return err;
231 
232 	/*
233 	 * The crypto engine does not support any two keys
234 	 * being the same for triple des algorithms. The
235 	 * verify_skcipher_des3_key does not check for all the
236 	 * below conditions. Return -ENOKEY in case any two keys
237 	 * are the same. Revisit to see if a fallback cipher
238 	 * is needed to handle this condition.
239 	 */
240 	memcpy(_key, key, DES3_EDE_KEY_SIZE);
241 	if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
242 	    !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
243 	    !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
244 		return -ENOKEY;
245 
246 	ctx->enc_keylen = keylen;
247 	memcpy(ctx->enc_key, key, keylen);
248 	return 0;
249 }
250 
251 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
252 {
253 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
254 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
255 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
256 	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
257 	unsigned int blocksize = crypto_skcipher_blocksize(tfm);
258 	int keylen;
259 	int ret;
260 
261 	rctx->flags = tmpl->alg_flags;
262 	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
263 	keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
264 
265 	/* CE does not handle 0 length messages */
266 	if (!req->cryptlen)
267 		return 0;
268 
269 	/*
270 	 * ECB and CBC algorithms require message lengths to be
271 	 * multiples of block size.
272 	 */
273 	if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
274 		if (!IS_ALIGNED(req->cryptlen, blocksize))
275 			return -EINVAL;
276 
277 	/*
278 	 * Conditions for requesting a fallback cipher
279 	 * AES-192 (not supported by crypto engine (CE))
280 	 * AES-XTS request with len <= 512 byte (not recommended to use CE)
281 	 * AES-XTS request with len > QCE_SECTOR_SIZE and
282 	 * is not a multiple of it.(Revisit this condition to check if it is
283 	 * needed in all versions of CE)
284 	 */
285 	if (IS_AES(rctx->flags) &&
286 	    ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
287 	    (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
288 	    (req->cryptlen > QCE_SECTOR_SIZE &&
289 	    req->cryptlen % QCE_SECTOR_SIZE))))) {
290 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
291 		skcipher_request_set_callback(&rctx->fallback_req,
292 					      req->base.flags,
293 					      req->base.complete,
294 					      req->base.data);
295 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
296 					   req->dst, req->cryptlen, req->iv);
297 		ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
298 				crypto_skcipher_decrypt(&rctx->fallback_req);
299 		return ret;
300 	}
301 
302 	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
303 }
304 
305 static int qce_skcipher_encrypt(struct skcipher_request *req)
306 {
307 	return qce_skcipher_crypt(req, 1);
308 }
309 
310 static int qce_skcipher_decrypt(struct skcipher_request *req)
311 {
312 	return qce_skcipher_crypt(req, 0);
313 }
314 
315 static int qce_skcipher_init(struct crypto_skcipher *tfm)
316 {
317 	/* take the size without the fallback skcipher_request at the end */
318 	crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
319 						  fallback_req));
320 	return 0;
321 }
322 
323 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
324 {
325 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
326 
327 	ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
328 					      0, CRYPTO_ALG_NEED_FALLBACK);
329 	if (IS_ERR(ctx->fallback))
330 		return PTR_ERR(ctx->fallback);
331 
332 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
333 					 crypto_skcipher_reqsize(ctx->fallback));
334 	return 0;
335 }
336 
337 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
338 {
339 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
340 
341 	crypto_free_skcipher(ctx->fallback);
342 }
343 
344 struct qce_skcipher_def {
345 	unsigned long flags;
346 	const char *name;
347 	const char *drv_name;
348 	unsigned int blocksize;
349 	unsigned int chunksize;
350 	unsigned int ivsize;
351 	unsigned int min_keysize;
352 	unsigned int max_keysize;
353 };
354 
355 static const struct qce_skcipher_def skcipher_def[] = {
356 	{
357 		.flags		= QCE_ALG_AES | QCE_MODE_ECB,
358 		.name		= "ecb(aes)",
359 		.drv_name	= "ecb-aes-qce",
360 		.blocksize	= AES_BLOCK_SIZE,
361 		.ivsize		= 0,
362 		.min_keysize	= AES_MIN_KEY_SIZE,
363 		.max_keysize	= AES_MAX_KEY_SIZE,
364 	},
365 	{
366 		.flags		= QCE_ALG_AES | QCE_MODE_CBC,
367 		.name		= "cbc(aes)",
368 		.drv_name	= "cbc-aes-qce",
369 		.blocksize	= AES_BLOCK_SIZE,
370 		.ivsize		= AES_BLOCK_SIZE,
371 		.min_keysize	= AES_MIN_KEY_SIZE,
372 		.max_keysize	= AES_MAX_KEY_SIZE,
373 	},
374 	{
375 		.flags		= QCE_ALG_AES | QCE_MODE_CTR,
376 		.name		= "ctr(aes)",
377 		.drv_name	= "ctr-aes-qce",
378 		.blocksize	= 1,
379 		.chunksize	= AES_BLOCK_SIZE,
380 		.ivsize		= AES_BLOCK_SIZE,
381 		.min_keysize	= AES_MIN_KEY_SIZE,
382 		.max_keysize	= AES_MAX_KEY_SIZE,
383 	},
384 	{
385 		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
386 		.name		= "xts(aes)",
387 		.drv_name	= "xts-aes-qce",
388 		.blocksize	= AES_BLOCK_SIZE,
389 		.ivsize		= AES_BLOCK_SIZE,
390 		.min_keysize	= AES_MIN_KEY_SIZE * 2,
391 		.max_keysize	= AES_MAX_KEY_SIZE * 2,
392 	},
393 	{
394 		.flags		= QCE_ALG_DES | QCE_MODE_ECB,
395 		.name		= "ecb(des)",
396 		.drv_name	= "ecb-des-qce",
397 		.blocksize	= DES_BLOCK_SIZE,
398 		.ivsize		= 0,
399 		.min_keysize	= DES_KEY_SIZE,
400 		.max_keysize	= DES_KEY_SIZE,
401 	},
402 	{
403 		.flags		= QCE_ALG_DES | QCE_MODE_CBC,
404 		.name		= "cbc(des)",
405 		.drv_name	= "cbc-des-qce",
406 		.blocksize	= DES_BLOCK_SIZE,
407 		.ivsize		= DES_BLOCK_SIZE,
408 		.min_keysize	= DES_KEY_SIZE,
409 		.max_keysize	= DES_KEY_SIZE,
410 	},
411 	{
412 		.flags		= QCE_ALG_3DES | QCE_MODE_ECB,
413 		.name		= "ecb(des3_ede)",
414 		.drv_name	= "ecb-3des-qce",
415 		.blocksize	= DES3_EDE_BLOCK_SIZE,
416 		.ivsize		= 0,
417 		.min_keysize	= DES3_EDE_KEY_SIZE,
418 		.max_keysize	= DES3_EDE_KEY_SIZE,
419 	},
420 	{
421 		.flags		= QCE_ALG_3DES | QCE_MODE_CBC,
422 		.name		= "cbc(des3_ede)",
423 		.drv_name	= "cbc-3des-qce",
424 		.blocksize	= DES3_EDE_BLOCK_SIZE,
425 		.ivsize		= DES3_EDE_BLOCK_SIZE,
426 		.min_keysize	= DES3_EDE_KEY_SIZE,
427 		.max_keysize	= DES3_EDE_KEY_SIZE,
428 	},
429 };
430 
431 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
432 				       struct qce_device *qce)
433 {
434 	struct qce_alg_template *tmpl;
435 	struct skcipher_alg *alg;
436 	int ret;
437 
438 	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
439 	if (!tmpl)
440 		return -ENOMEM;
441 
442 	alg = &tmpl->alg.skcipher;
443 
444 	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
445 	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
446 		 def->drv_name);
447 
448 	alg->base.cra_blocksize		= def->blocksize;
449 	alg->chunksize			= def->chunksize;
450 	alg->ivsize			= def->ivsize;
451 	alg->min_keysize		= def->min_keysize;
452 	alg->max_keysize		= def->max_keysize;
453 	alg->setkey			= IS_3DES(def->flags) ? qce_des3_setkey :
454 					  IS_DES(def->flags) ? qce_des_setkey :
455 					  qce_skcipher_setkey;
456 	alg->encrypt			= qce_skcipher_encrypt;
457 	alg->decrypt			= qce_skcipher_decrypt;
458 
459 	alg->base.cra_priority		= 300;
460 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
461 					  CRYPTO_ALG_ALLOCATES_MEMORY |
462 					  CRYPTO_ALG_KERN_DRIVER_ONLY;
463 	alg->base.cra_ctxsize		= sizeof(struct qce_cipher_ctx);
464 	alg->base.cra_alignmask		= 0;
465 	alg->base.cra_module		= THIS_MODULE;
466 
467 	if (IS_AES(def->flags)) {
468 		alg->base.cra_flags    |= CRYPTO_ALG_NEED_FALLBACK;
469 		alg->init		= qce_skcipher_init_fallback;
470 		alg->exit		= qce_skcipher_exit;
471 	} else {
472 		alg->init		= qce_skcipher_init;
473 	}
474 
475 	INIT_LIST_HEAD(&tmpl->entry);
476 	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
477 	tmpl->alg_flags = def->flags;
478 	tmpl->qce = qce;
479 
480 	ret = crypto_register_skcipher(alg);
481 	if (ret) {
482 		kfree(tmpl);
483 		dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
484 		return ret;
485 	}
486 
487 	list_add_tail(&tmpl->entry, &skcipher_algs);
488 	dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
489 	return 0;
490 }
491 
492 static void qce_skcipher_unregister(struct qce_device *qce)
493 {
494 	struct qce_alg_template *tmpl, *n;
495 
496 	list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
497 		crypto_unregister_skcipher(&tmpl->alg.skcipher);
498 		list_del(&tmpl->entry);
499 		kfree(tmpl);
500 	}
501 }
502 
503 static int qce_skcipher_register(struct qce_device *qce)
504 {
505 	int ret, i;
506 
507 	for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
508 		ret = qce_skcipher_register_one(&skcipher_def[i], qce);
509 		if (ret)
510 			goto err;
511 	}
512 
513 	return 0;
514 err:
515 	qce_skcipher_unregister(qce);
516 	return ret;
517 }
518 
519 const struct qce_algo_ops skcipher_ops = {
520 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
521 	.register_algs = qce_skcipher_register,
522 	.unregister_algs = qce_skcipher_unregister,
523 	.async_req_handle = qce_skcipher_async_req_handle,
524 };
525