xref: /openbmc/linux/drivers/crypto/qce/skcipher.c (revision dc6a81c3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/device.h>
7 #include <linux/interrupt.h>
8 #include <linux/types.h>
9 #include <crypto/aes.h>
10 #include <crypto/internal/des.h>
11 #include <crypto/internal/skcipher.h>
12 
13 #include "cipher.h"
14 
15 static LIST_HEAD(skcipher_algs);
16 
17 static void qce_skcipher_done(void *data)
18 {
19 	struct crypto_async_request *async_req = data;
20 	struct skcipher_request *req = skcipher_request_cast(async_req);
21 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
22 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
23 	struct qce_device *qce = tmpl->qce;
24 	struct qce_result_dump *result_buf = qce->dma.result_buf;
25 	enum dma_data_direction dir_src, dir_dst;
26 	u32 status;
27 	int error;
28 	bool diff_dst;
29 
30 	diff_dst = (req->src != req->dst) ? true : false;
31 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
32 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
33 
34 	error = qce_dma_terminate_all(&qce->dma);
35 	if (error)
36 		dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
37 			error);
38 
39 	if (diff_dst)
40 		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
41 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
42 
43 	sg_free_table(&rctx->dst_tbl);
44 
45 	error = qce_check_status(qce, &status);
46 	if (error < 0)
47 		dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
48 
49 	memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
50 	qce->async_req_done(tmpl->qce, error);
51 }
52 
53 static int
54 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
55 {
56 	struct skcipher_request *req = skcipher_request_cast(async_req);
57 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
58 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
59 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
60 	struct qce_device *qce = tmpl->qce;
61 	enum dma_data_direction dir_src, dir_dst;
62 	struct scatterlist *sg;
63 	bool diff_dst;
64 	gfp_t gfp;
65 	int ret;
66 
67 	rctx->iv = req->iv;
68 	rctx->ivsize = crypto_skcipher_ivsize(skcipher);
69 	rctx->cryptlen = req->cryptlen;
70 
71 	diff_dst = (req->src != req->dst) ? true : false;
72 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
73 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
74 
75 	rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
76 	if (diff_dst)
77 		rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
78 	else
79 		rctx->dst_nents = rctx->src_nents;
80 	if (rctx->src_nents < 0) {
81 		dev_err(qce->dev, "Invalid numbers of src SG.\n");
82 		return rctx->src_nents;
83 	}
84 	if (rctx->dst_nents < 0) {
85 		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
86 		return -rctx->dst_nents;
87 	}
88 
89 	rctx->dst_nents += 1;
90 
91 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
92 						GFP_KERNEL : GFP_ATOMIC;
93 
94 	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
95 	if (ret)
96 		return ret;
97 
98 	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
99 
100 	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, rctx->dst_nents - 1);
101 	if (IS_ERR(sg)) {
102 		ret = PTR_ERR(sg);
103 		goto error_free;
104 	}
105 
106 	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, 1);
107 	if (IS_ERR(sg)) {
108 		ret = PTR_ERR(sg);
109 		goto error_free;
110 	}
111 
112 	sg_mark_end(sg);
113 	rctx->dst_sg = rctx->dst_tbl.sgl;
114 
115 	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
116 	if (ret < 0)
117 		goto error_free;
118 
119 	if (diff_dst) {
120 		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
121 		if (ret < 0)
122 			goto error_unmap_dst;
123 		rctx->src_sg = req->src;
124 	} else {
125 		rctx->src_sg = rctx->dst_sg;
126 	}
127 
128 	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
129 			       rctx->dst_sg, rctx->dst_nents,
130 			       qce_skcipher_done, async_req);
131 	if (ret)
132 		goto error_unmap_src;
133 
134 	qce_dma_issue_pending(&qce->dma);
135 
136 	ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
137 	if (ret)
138 		goto error_terminate;
139 
140 	return 0;
141 
142 error_terminate:
143 	qce_dma_terminate_all(&qce->dma);
144 error_unmap_src:
145 	if (diff_dst)
146 		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
147 error_unmap_dst:
148 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
149 error_free:
150 	sg_free_table(&rctx->dst_tbl);
151 	return ret;
152 }
153 
154 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
155 				 unsigned int keylen)
156 {
157 	struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
158 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
159 	unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
160 	int ret;
161 
162 	if (!key || !keylen)
163 		return -EINVAL;
164 
165 	switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
166 	case AES_KEYSIZE_128:
167 	case AES_KEYSIZE_256:
168 		break;
169 	default:
170 		goto fallback;
171 	}
172 
173 	ctx->enc_keylen = keylen;
174 	memcpy(ctx->enc_key, key, keylen);
175 	return 0;
176 fallback:
177 	ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
178 	if (!ret)
179 		ctx->enc_keylen = keylen;
180 	return ret;
181 }
182 
183 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
184 			  unsigned int keylen)
185 {
186 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
187 	int err;
188 
189 	err = verify_skcipher_des_key(ablk, key);
190 	if (err)
191 		return err;
192 
193 	ctx->enc_keylen = keylen;
194 	memcpy(ctx->enc_key, key, keylen);
195 	return 0;
196 }
197 
198 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
199 			   unsigned int keylen)
200 {
201 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
202 	int err;
203 
204 	err = verify_skcipher_des3_key(ablk, key);
205 	if (err)
206 		return err;
207 
208 	ctx->enc_keylen = keylen;
209 	memcpy(ctx->enc_key, key, keylen);
210 	return 0;
211 }
212 
213 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
214 {
215 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
216 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
217 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
218 	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
219 	int keylen;
220 	int ret;
221 
222 	rctx->flags = tmpl->alg_flags;
223 	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
224 	keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
225 
226 	if (IS_AES(rctx->flags) && keylen != AES_KEYSIZE_128 &&
227 	    keylen != AES_KEYSIZE_256) {
228 		SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
229 
230 		skcipher_request_set_sync_tfm(subreq, ctx->fallback);
231 		skcipher_request_set_callback(subreq, req->base.flags,
232 					      NULL, NULL);
233 		skcipher_request_set_crypt(subreq, req->src, req->dst,
234 					   req->cryptlen, req->iv);
235 		ret = encrypt ? crypto_skcipher_encrypt(subreq) :
236 				crypto_skcipher_decrypt(subreq);
237 		skcipher_request_zero(subreq);
238 		return ret;
239 	}
240 
241 	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
242 }
243 
244 static int qce_skcipher_encrypt(struct skcipher_request *req)
245 {
246 	return qce_skcipher_crypt(req, 1);
247 }
248 
249 static int qce_skcipher_decrypt(struct skcipher_request *req)
250 {
251 	return qce_skcipher_crypt(req, 0);
252 }
253 
254 static int qce_skcipher_init(struct crypto_skcipher *tfm)
255 {
256 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
257 
258 	memset(ctx, 0, sizeof(*ctx));
259 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
260 	return 0;
261 }
262 
263 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
264 {
265 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
266 
267 	qce_skcipher_init(tfm);
268 	ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
269 						   0, CRYPTO_ALG_NEED_FALLBACK);
270 	return PTR_ERR_OR_ZERO(ctx->fallback);
271 }
272 
273 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
274 {
275 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
276 
277 	crypto_free_sync_skcipher(ctx->fallback);
278 }
279 
280 struct qce_skcipher_def {
281 	unsigned long flags;
282 	const char *name;
283 	const char *drv_name;
284 	unsigned int blocksize;
285 	unsigned int chunksize;
286 	unsigned int ivsize;
287 	unsigned int min_keysize;
288 	unsigned int max_keysize;
289 };
290 
291 static const struct qce_skcipher_def skcipher_def[] = {
292 	{
293 		.flags		= QCE_ALG_AES | QCE_MODE_ECB,
294 		.name		= "ecb(aes)",
295 		.drv_name	= "ecb-aes-qce",
296 		.blocksize	= AES_BLOCK_SIZE,
297 		.ivsize		= AES_BLOCK_SIZE,
298 		.min_keysize	= AES_MIN_KEY_SIZE,
299 		.max_keysize	= AES_MAX_KEY_SIZE,
300 	},
301 	{
302 		.flags		= QCE_ALG_AES | QCE_MODE_CBC,
303 		.name		= "cbc(aes)",
304 		.drv_name	= "cbc-aes-qce",
305 		.blocksize	= AES_BLOCK_SIZE,
306 		.ivsize		= AES_BLOCK_SIZE,
307 		.min_keysize	= AES_MIN_KEY_SIZE,
308 		.max_keysize	= AES_MAX_KEY_SIZE,
309 	},
310 	{
311 		.flags		= QCE_ALG_AES | QCE_MODE_CTR,
312 		.name		= "ctr(aes)",
313 		.drv_name	= "ctr-aes-qce",
314 		.blocksize	= 1,
315 		.chunksize	= AES_BLOCK_SIZE,
316 		.ivsize		= AES_BLOCK_SIZE,
317 		.min_keysize	= AES_MIN_KEY_SIZE,
318 		.max_keysize	= AES_MAX_KEY_SIZE,
319 	},
320 	{
321 		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
322 		.name		= "xts(aes)",
323 		.drv_name	= "xts-aes-qce",
324 		.blocksize	= AES_BLOCK_SIZE,
325 		.ivsize		= AES_BLOCK_SIZE,
326 		.min_keysize	= AES_MIN_KEY_SIZE * 2,
327 		.max_keysize	= AES_MAX_KEY_SIZE * 2,
328 	},
329 	{
330 		.flags		= QCE_ALG_DES | QCE_MODE_ECB,
331 		.name		= "ecb(des)",
332 		.drv_name	= "ecb-des-qce",
333 		.blocksize	= DES_BLOCK_SIZE,
334 		.ivsize		= 0,
335 		.min_keysize	= DES_KEY_SIZE,
336 		.max_keysize	= DES_KEY_SIZE,
337 	},
338 	{
339 		.flags		= QCE_ALG_DES | QCE_MODE_CBC,
340 		.name		= "cbc(des)",
341 		.drv_name	= "cbc-des-qce",
342 		.blocksize	= DES_BLOCK_SIZE,
343 		.ivsize		= DES_BLOCK_SIZE,
344 		.min_keysize	= DES_KEY_SIZE,
345 		.max_keysize	= DES_KEY_SIZE,
346 	},
347 	{
348 		.flags		= QCE_ALG_3DES | QCE_MODE_ECB,
349 		.name		= "ecb(des3_ede)",
350 		.drv_name	= "ecb-3des-qce",
351 		.blocksize	= DES3_EDE_BLOCK_SIZE,
352 		.ivsize		= 0,
353 		.min_keysize	= DES3_EDE_KEY_SIZE,
354 		.max_keysize	= DES3_EDE_KEY_SIZE,
355 	},
356 	{
357 		.flags		= QCE_ALG_3DES | QCE_MODE_CBC,
358 		.name		= "cbc(des3_ede)",
359 		.drv_name	= "cbc-3des-qce",
360 		.blocksize	= DES3_EDE_BLOCK_SIZE,
361 		.ivsize		= DES3_EDE_BLOCK_SIZE,
362 		.min_keysize	= DES3_EDE_KEY_SIZE,
363 		.max_keysize	= DES3_EDE_KEY_SIZE,
364 	},
365 };
366 
367 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
368 				       struct qce_device *qce)
369 {
370 	struct qce_alg_template *tmpl;
371 	struct skcipher_alg *alg;
372 	int ret;
373 
374 	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
375 	if (!tmpl)
376 		return -ENOMEM;
377 
378 	alg = &tmpl->alg.skcipher;
379 
380 	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
381 	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
382 		 def->drv_name);
383 
384 	alg->base.cra_blocksize		= def->blocksize;
385 	alg->chunksize			= def->chunksize;
386 	alg->ivsize			= def->ivsize;
387 	alg->min_keysize		= def->min_keysize;
388 	alg->max_keysize		= def->max_keysize;
389 	alg->setkey			= IS_3DES(def->flags) ? qce_des3_setkey :
390 					  IS_DES(def->flags) ? qce_des_setkey :
391 					  qce_skcipher_setkey;
392 	alg->encrypt			= qce_skcipher_encrypt;
393 	alg->decrypt			= qce_skcipher_decrypt;
394 
395 	alg->base.cra_priority		= 300;
396 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
397 					  CRYPTO_ALG_KERN_DRIVER_ONLY;
398 	alg->base.cra_ctxsize		= sizeof(struct qce_cipher_ctx);
399 	alg->base.cra_alignmask		= 0;
400 	alg->base.cra_module		= THIS_MODULE;
401 
402 	if (IS_AES(def->flags)) {
403 		alg->base.cra_flags    |= CRYPTO_ALG_NEED_FALLBACK;
404 		alg->init		= qce_skcipher_init_fallback;
405 		alg->exit		= qce_skcipher_exit;
406 	} else {
407 		alg->init		= qce_skcipher_init;
408 	}
409 
410 	INIT_LIST_HEAD(&tmpl->entry);
411 	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
412 	tmpl->alg_flags = def->flags;
413 	tmpl->qce = qce;
414 
415 	ret = crypto_register_skcipher(alg);
416 	if (ret) {
417 		kfree(tmpl);
418 		dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
419 		return ret;
420 	}
421 
422 	list_add_tail(&tmpl->entry, &skcipher_algs);
423 	dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
424 	return 0;
425 }
426 
427 static void qce_skcipher_unregister(struct qce_device *qce)
428 {
429 	struct qce_alg_template *tmpl, *n;
430 
431 	list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
432 		crypto_unregister_skcipher(&tmpl->alg.skcipher);
433 		list_del(&tmpl->entry);
434 		kfree(tmpl);
435 	}
436 }
437 
438 static int qce_skcipher_register(struct qce_device *qce)
439 {
440 	int ret, i;
441 
442 	for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
443 		ret = qce_skcipher_register_one(&skcipher_def[i], qce);
444 		if (ret)
445 			goto err;
446 	}
447 
448 	return 0;
449 err:
450 	qce_skcipher_unregister(qce);
451 	return ret;
452 }
453 
454 const struct qce_algo_ops skcipher_ops = {
455 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
456 	.register_algs = qce_skcipher_register,
457 	.unregister_algs = qce_skcipher_unregister,
458 	.async_req_handle = qce_skcipher_async_req_handle,
459 };
460