1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/algapi.h>
6 #include <crypto/des.h>
7 #include <crypto/skcipher.h>
8 #include <crypto/xts.h>
9 #include <linux/crypto.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/idr.h>
12 
13 #include "sec.h"
14 #include "sec_crypto.h"
15 
16 #define SEC_PRIORITY		4001
17 #define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
18 #define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
19 #define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
20 #define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)
21 
22 /* SEC sqe(bd) bit operational relative MACRO */
23 #define SEC_DE_OFFSET		1
24 #define SEC_CIPHER_OFFSET	4
25 #define SEC_SCENE_OFFSET	3
26 #define SEC_DST_SGL_OFFSET	2
27 #define SEC_SRC_SGL_OFFSET	7
28 #define SEC_CKEY_OFFSET		9
29 #define SEC_CMODE_OFFSET	12
30 #define SEC_FLAG_OFFSET		7
31 #define SEC_FLAG_MASK		0x0780
32 #define SEC_TYPE_MASK		0x0F
33 #define SEC_DONE_MASK		0x0001
34 
35 #define SEC_TOTAL_IV_SZ		(SEC_IV_SIZE * QM_Q_DEPTH)
36 #define SEC_SGL_SGE_NR		128
37 #define SEC_CTX_DEV(ctx)	(&(ctx)->sec->qm.pdev->dev)
38 
39 static DEFINE_MUTEX(sec_algs_lock);
40 static unsigned int sec_active_devs;
41 
42 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
43 static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
44 {
45 	if (req->c_req.encrypt)
46 		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
47 				 ctx->hlf_q_num;
48 
49 	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
50 				 ctx->hlf_q_num;
51 }
52 
53 static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
54 {
55 	if (req->c_req.encrypt)
56 		atomic_dec(&ctx->enc_qcyclic);
57 	else
58 		atomic_dec(&ctx->dec_qcyclic);
59 }
60 
61 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
62 {
63 	int req_id;
64 
65 	mutex_lock(&qp_ctx->req_lock);
66 
67 	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
68 				  0, QM_Q_DEPTH, GFP_ATOMIC);
69 	mutex_unlock(&qp_ctx->req_lock);
70 	if (req_id < 0) {
71 		dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
72 		return req_id;
73 	}
74 
75 	req->qp_ctx = qp_ctx;
76 	qp_ctx->req_list[req_id] = req;
77 	return req_id;
78 }
79 
80 static void sec_free_req_id(struct sec_req *req)
81 {
82 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
83 	int req_id = req->req_id;
84 
85 	if (req_id < 0 || req_id >= QM_Q_DEPTH) {
86 		dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
87 		return;
88 	}
89 
90 	qp_ctx->req_list[req_id] = NULL;
91 	req->qp_ctx = NULL;
92 
93 	mutex_lock(&qp_ctx->req_lock);
94 	idr_remove(&qp_ctx->req_idr, req_id);
95 	mutex_unlock(&qp_ctx->req_lock);
96 }
97 
98 static void sec_req_cb(struct hisi_qp *qp, void *resp)
99 {
100 	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
101 	struct sec_sqe *bd = resp;
102 	u16 done, flag;
103 	u8 type;
104 	struct sec_req *req;
105 
106 	type = bd->type_cipher_auth & SEC_TYPE_MASK;
107 	if (type == SEC_BD_TYPE2) {
108 		req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
109 		req->err_type = bd->type2.error_type;
110 
111 		done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
112 		flag = (le16_to_cpu(bd->type2.done_flag) &
113 				   SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
114 		if (req->err_type || done != 0x1 || flag != 0x2)
115 			dev_err(SEC_CTX_DEV(req->ctx),
116 				"err_type[%d],done[%d],flag[%d]\n",
117 				req->err_type, done, flag);
118 	} else {
119 		pr_err("err bd type [%d]\n", type);
120 		return;
121 	}
122 
123 	atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
124 
125 	req->ctx->req_op->buf_unmap(req->ctx, req);
126 
127 	req->ctx->req_op->callback(req->ctx, req);
128 }
129 
130 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
131 {
132 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
133 	int ret;
134 
135 	mutex_lock(&qp_ctx->req_lock);
136 	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
137 	mutex_unlock(&qp_ctx->req_lock);
138 	atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
139 
140 	if (ret == -EBUSY)
141 		return -ENOBUFS;
142 
143 	if (!ret) {
144 		if (atomic_read(&req->fake_busy))
145 			ret = -EBUSY;
146 		else
147 			ret = -EINPROGRESS;
148 	}
149 
150 	return ret;
151 }
152 
153 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
154 			     int qp_ctx_id, int alg_type)
155 {
156 	struct device *dev = SEC_CTX_DEV(ctx);
157 	struct sec_qp_ctx *qp_ctx;
158 	struct hisi_qp *qp;
159 	int ret = -ENOMEM;
160 
161 	qp = hisi_qm_create_qp(qm, alg_type);
162 	if (IS_ERR(qp))
163 		return PTR_ERR(qp);
164 
165 	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
166 	qp->req_type = 0;
167 	qp->qp_ctx = qp_ctx;
168 	qp->req_cb = sec_req_cb;
169 	qp_ctx->qp = qp;
170 	qp_ctx->ctx = ctx;
171 
172 	mutex_init(&qp_ctx->req_lock);
173 	atomic_set(&qp_ctx->pending_reqs, 0);
174 	idr_init(&qp_ctx->req_idr);
175 
176 	qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
177 	if (!qp_ctx->req_list)
178 		goto err_destroy_idr;
179 
180 	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
181 						     SEC_SGL_SGE_NR);
182 	if (IS_ERR(qp_ctx->c_in_pool)) {
183 		dev_err(dev, "fail to create sgl pool for input!\n");
184 		goto err_free_req_list;
185 	}
186 
187 	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
188 						      SEC_SGL_SGE_NR);
189 	if (IS_ERR(qp_ctx->c_out_pool)) {
190 		dev_err(dev, "fail to create sgl pool for output!\n");
191 		goto err_free_c_in_pool;
192 	}
193 
194 	ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
195 	if (ret)
196 		goto err_free_c_out_pool;
197 
198 	ret = hisi_qm_start_qp(qp, 0);
199 	if (ret < 0)
200 		goto err_queue_free;
201 
202 	return 0;
203 
204 err_queue_free:
205 	ctx->req_op->resource_free(ctx, qp_ctx);
206 err_free_c_out_pool:
207 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
208 err_free_c_in_pool:
209 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
210 err_free_req_list:
211 	kfree(qp_ctx->req_list);
212 err_destroy_idr:
213 	idr_destroy(&qp_ctx->req_idr);
214 	hisi_qm_release_qp(qp);
215 
216 	return ret;
217 }
218 
219 static void sec_release_qp_ctx(struct sec_ctx *ctx,
220 			       struct sec_qp_ctx *qp_ctx)
221 {
222 	struct device *dev = SEC_CTX_DEV(ctx);
223 
224 	hisi_qm_stop_qp(qp_ctx->qp);
225 	ctx->req_op->resource_free(ctx, qp_ctx);
226 
227 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
228 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
229 
230 	idr_destroy(&qp_ctx->req_idr);
231 	kfree(qp_ctx->req_list);
232 	hisi_qm_release_qp(qp_ctx->qp);
233 }
234 
235 static int sec_skcipher_init(struct crypto_skcipher *tfm)
236 {
237 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
238 	struct sec_cipher_ctx *c_ctx;
239 	struct sec_dev *sec;
240 	struct device *dev;
241 	struct hisi_qm *qm;
242 	int i, ret;
243 
244 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
245 
246 	sec = sec_find_device(cpu_to_node(smp_processor_id()));
247 	if (!sec) {
248 		pr_err("find no Hisilicon SEC device!\n");
249 		return -ENODEV;
250 	}
251 	ctx->sec = sec;
252 	qm = &sec->qm;
253 	dev = &qm->pdev->dev;
254 	ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
255 
256 	/* Half of queue depth is taken as fake requests limit in the queue. */
257 	ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
258 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
259 			      GFP_KERNEL);
260 	if (!ctx->qp_ctx)
261 		return -ENOMEM;
262 
263 	for (i = 0; i < sec->ctx_q_num; i++) {
264 		ret = sec_create_qp_ctx(qm, ctx, i, 0);
265 		if (ret)
266 			goto err_sec_release_qp_ctx;
267 	}
268 
269 	c_ctx = &ctx->c_ctx;
270 	c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
271 	if (c_ctx->ivsize > SEC_IV_SIZE) {
272 		dev_err(dev, "get error iv size!\n");
273 		ret = -EINVAL;
274 		goto err_sec_release_qp_ctx;
275 	}
276 	c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
277 					  &c_ctx->c_key_dma, GFP_KERNEL);
278 	if (!c_ctx->c_key) {
279 		ret = -ENOMEM;
280 		goto err_sec_release_qp_ctx;
281 	}
282 
283 	return 0;
284 
285 err_sec_release_qp_ctx:
286 	for (i = i - 1; i >= 0; i--)
287 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
288 
289 	kfree(ctx->qp_ctx);
290 	return ret;
291 }
292 
293 static void sec_skcipher_exit(struct crypto_skcipher *tfm)
294 {
295 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
296 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
297 	int i = 0;
298 
299 	if (c_ctx->c_key) {
300 		dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
301 				  c_ctx->c_key, c_ctx->c_key_dma);
302 		c_ctx->c_key = NULL;
303 	}
304 
305 	for (i = 0; i < ctx->sec->ctx_q_num; i++)
306 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
307 
308 	kfree(ctx->qp_ctx);
309 }
310 
311 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
312 				    const u32 keylen,
313 				    const enum sec_cmode c_mode)
314 {
315 	switch (keylen) {
316 	case SEC_DES3_2KEY_SIZE:
317 		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
318 		break;
319 	case SEC_DES3_3KEY_SIZE:
320 		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
321 		break;
322 	default:
323 		return -EINVAL;
324 	}
325 
326 	return 0;
327 }
328 
329 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
330 				       const u32 keylen,
331 				       const enum sec_cmode c_mode)
332 {
333 	if (c_mode == SEC_CMODE_XTS) {
334 		switch (keylen) {
335 		case SEC_XTS_MIN_KEY_SIZE:
336 			c_ctx->c_key_len = SEC_CKEY_128BIT;
337 			break;
338 		case SEC_XTS_MAX_KEY_SIZE:
339 			c_ctx->c_key_len = SEC_CKEY_256BIT;
340 			break;
341 		default:
342 			pr_err("hisi_sec2: xts mode key error!\n");
343 			return -EINVAL;
344 		}
345 	} else {
346 		switch (keylen) {
347 		case AES_KEYSIZE_128:
348 			c_ctx->c_key_len = SEC_CKEY_128BIT;
349 			break;
350 		case AES_KEYSIZE_192:
351 			c_ctx->c_key_len = SEC_CKEY_192BIT;
352 			break;
353 		case AES_KEYSIZE_256:
354 			c_ctx->c_key_len = SEC_CKEY_256BIT;
355 			break;
356 		default:
357 			pr_err("hisi_sec2: aes key error!\n");
358 			return -EINVAL;
359 		}
360 	}
361 
362 	return 0;
363 }
364 
365 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
366 			       const u32 keylen, const enum sec_calg c_alg,
367 			       const enum sec_cmode c_mode)
368 {
369 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
370 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
371 	int ret;
372 
373 	if (c_mode == SEC_CMODE_XTS) {
374 		ret = xts_verify_key(tfm, key, keylen);
375 		if (ret) {
376 			dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
377 			return ret;
378 		}
379 	}
380 
381 	c_ctx->c_alg  = c_alg;
382 	c_ctx->c_mode = c_mode;
383 
384 	switch (c_alg) {
385 	case SEC_CALG_3DES:
386 		ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
387 		break;
388 	case SEC_CALG_AES:
389 	case SEC_CALG_SM4:
390 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
391 		break;
392 	default:
393 		return -EINVAL;
394 	}
395 
396 	if (ret) {
397 		dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
398 		return ret;
399 	}
400 
401 	memcpy(c_ctx->c_key, key, keylen);
402 
403 	return 0;
404 }
405 
406 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
407 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
408 	u32 keylen)							\
409 {									\
410 	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
411 }
412 
413 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
414 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
415 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
416 
417 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
418 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
419 
420 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
421 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
422 
423 static int sec_skcipher_get_res(struct sec_ctx *ctx,
424 				struct sec_req *req)
425 {
426 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
427 	struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
428 	struct sec_cipher_req *c_req = &req->c_req;
429 	int req_id = req->req_id;
430 
431 	c_req->c_ivin = c_res[req_id].c_ivin;
432 	c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
433 
434 	return 0;
435 }
436 
437 static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
438 				       struct sec_qp_ctx *qp_ctx)
439 {
440 	struct device *dev = SEC_CTX_DEV(ctx);
441 	struct sec_cipher_res *res;
442 	int i;
443 
444 	res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
445 	if (!res)
446 		return -ENOMEM;
447 
448 	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
449 					   &res->c_ivin_dma, GFP_KERNEL);
450 	if (!res->c_ivin) {
451 		kfree(res);
452 		return -ENOMEM;
453 	}
454 
455 	for (i = 1; i < QM_Q_DEPTH; i++) {
456 		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
457 		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
458 	}
459 	qp_ctx->alg_meta_data = res;
460 
461 	return 0;
462 }
463 
464 static void sec_skcipher_resource_free(struct sec_ctx *ctx,
465 				      struct sec_qp_ctx *qp_ctx)
466 {
467 	struct sec_cipher_res *res = qp_ctx->alg_meta_data;
468 	struct device *dev = SEC_CTX_DEV(ctx);
469 
470 	if (!res)
471 		return;
472 
473 	dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
474 	kfree(res);
475 }
476 
477 static int sec_skcipher_map(struct device *dev, struct sec_req *req,
478 			    struct scatterlist *src, struct scatterlist *dst)
479 {
480 	struct sec_cipher_req *c_req = &req->c_req;
481 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
482 
483 	c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
484 						    qp_ctx->c_in_pool,
485 						    req->req_id,
486 						    &c_req->c_in_dma);
487 
488 	if (IS_ERR(c_req->c_in)) {
489 		dev_err(dev, "fail to dma map input sgl buffers!\n");
490 		return PTR_ERR(c_req->c_in);
491 	}
492 
493 	if (dst == src) {
494 		c_req->c_out = c_req->c_in;
495 		c_req->c_out_dma = c_req->c_in_dma;
496 	} else {
497 		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
498 							     qp_ctx->c_out_pool,
499 							     req->req_id,
500 							     &c_req->c_out_dma);
501 
502 		if (IS_ERR(c_req->c_out)) {
503 			dev_err(dev, "fail to dma map output sgl buffers!\n");
504 			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
505 			return PTR_ERR(c_req->c_out);
506 		}
507 	}
508 
509 	return 0;
510 }
511 
512 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
513 {
514 	struct sec_cipher_req *c_req = &req->c_req;
515 
516 	return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
517 				c_req->sk_req->src, c_req->sk_req->dst);
518 }
519 
520 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
521 {
522 	struct device *dev = SEC_CTX_DEV(ctx);
523 	struct sec_cipher_req *c_req = &req->c_req;
524 	struct skcipher_request *sk_req = c_req->sk_req;
525 
526 	if (sk_req->dst != sk_req->src)
527 		hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
528 
529 	hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
530 }
531 
532 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
533 {
534 	int ret;
535 
536 	ret = ctx->req_op->buf_map(ctx, req);
537 	if (ret)
538 		return ret;
539 
540 	ctx->req_op->do_transfer(ctx, req);
541 
542 	ret = ctx->req_op->bd_fill(ctx, req);
543 	if (ret)
544 		goto unmap_req_buf;
545 
546 	return ret;
547 
548 unmap_req_buf:
549 	ctx->req_op->buf_unmap(ctx, req);
550 
551 	return ret;
552 }
553 
554 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
555 {
556 	ctx->req_op->buf_unmap(ctx, req);
557 }
558 
559 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
560 {
561 	struct skcipher_request *sk_req = req->c_req.sk_req;
562 	struct sec_cipher_req *c_req = &req->c_req;
563 
564 	c_req->c_len = sk_req->cryptlen;
565 	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
566 }
567 
568 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
569 {
570 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
571 	struct sec_cipher_req *c_req = &req->c_req;
572 	struct sec_sqe *sec_sqe = &req->sec_sqe;
573 	u8 de = 0;
574 	u8 scene, sa_type, da_type;
575 	u8 bd_type, cipher;
576 
577 	memset(sec_sqe, 0, sizeof(struct sec_sqe));
578 
579 	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
580 	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
581 	sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
582 	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
583 
584 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
585 						SEC_CMODE_OFFSET);
586 	sec_sqe->type2.c_alg = c_ctx->c_alg;
587 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
588 						SEC_CKEY_OFFSET);
589 
590 	bd_type = SEC_BD_TYPE2;
591 	if (c_req->encrypt)
592 		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
593 	else
594 		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
595 	sec_sqe->type_cipher_auth = bd_type | cipher;
596 
597 	sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
598 	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
599 	if (c_req->c_in_dma != c_req->c_out_dma)
600 		de = 0x1 << SEC_DE_OFFSET;
601 
602 	sec_sqe->sds_sa_type = (de | scene | sa_type);
603 
604 	/* Just set DST address type */
605 	da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
606 	sec_sqe->sdm_addr_type |= da_type;
607 
608 	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
609 	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
610 
611 	return 0;
612 }
613 
614 static void sec_update_iv(struct sec_req *req)
615 {
616 	struct skcipher_request *sk_req = req->c_req.sk_req;
617 	u32 iv_size = req->ctx->c_ctx.ivsize;
618 	struct scatterlist *sgl;
619 	size_t sz;
620 
621 	if (req->c_req.encrypt)
622 		sgl = sk_req->dst;
623 	else
624 		sgl = sk_req->src;
625 
626 	sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
627 				iv_size, sk_req->cryptlen - iv_size);
628 	if (sz != iv_size)
629 		dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
630 }
631 
632 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
633 {
634 	struct skcipher_request *sk_req = req->c_req.sk_req;
635 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
636 
637 	atomic_dec(&qp_ctx->pending_reqs);
638 	sec_free_req_id(req);
639 
640 	/* IV output at encrypto of CBC mode */
641 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
642 		sec_update_iv(req);
643 
644 	if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
645 		sk_req->base.complete(&sk_req->base, -EINPROGRESS);
646 
647 	sk_req->base.complete(&sk_req->base, req->err_type);
648 }
649 
650 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
651 {
652 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
653 
654 	atomic_dec(&qp_ctx->pending_reqs);
655 	sec_free_req_id(req);
656 	sec_put_queue_id(ctx, req);
657 }
658 
659 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
660 {
661 	struct sec_qp_ctx *qp_ctx;
662 	int issue_id, ret;
663 
664 	/* To load balance */
665 	issue_id = sec_get_queue_id(ctx, req);
666 	qp_ctx = &ctx->qp_ctx[issue_id];
667 
668 	req->req_id = sec_alloc_req_id(req, qp_ctx);
669 	if (req->req_id < 0) {
670 		sec_put_queue_id(ctx, req);
671 		return req->req_id;
672 	}
673 
674 	if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
675 		atomic_set(&req->fake_busy, 1);
676 	else
677 		atomic_set(&req->fake_busy, 0);
678 
679 	ret = ctx->req_op->get_res(ctx, req);
680 	if (ret) {
681 		atomic_dec(&qp_ctx->pending_reqs);
682 		sec_request_uninit(ctx, req);
683 		dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
684 	}
685 
686 	return ret;
687 }
688 
689 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
690 {
691 	int ret;
692 
693 	ret = sec_request_init(ctx, req);
694 	if (ret)
695 		return ret;
696 
697 	ret = sec_request_transfer(ctx, req);
698 	if (ret)
699 		goto err_uninit_req;
700 
701 	/* Output IV as decrypto */
702 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
703 		sec_update_iv(req);
704 
705 	ret = ctx->req_op->bd_send(ctx, req);
706 	if (ret != -EBUSY && ret != -EINPROGRESS) {
707 		dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
708 		goto err_send_req;
709 	}
710 
711 	return ret;
712 
713 err_send_req:
714 	/* As failing, restore the IV from user */
715 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
716 		memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
717 		       ctx->c_ctx.ivsize);
718 
719 	sec_request_untransfer(ctx, req);
720 err_uninit_req:
721 	sec_request_uninit(ctx, req);
722 
723 	return ret;
724 }
725 
726 static struct sec_req_op sec_req_ops_tbl = {
727 	.get_res	= sec_skcipher_get_res,
728 	.resource_alloc	= sec_skcipher_resource_alloc,
729 	.resource_free	= sec_skcipher_resource_free,
730 	.buf_map	= sec_skcipher_sgl_map,
731 	.buf_unmap	= sec_skcipher_sgl_unmap,
732 	.do_transfer	= sec_skcipher_copy_iv,
733 	.bd_fill	= sec_skcipher_bd_fill,
734 	.bd_send	= sec_bd_send,
735 	.callback	= sec_skcipher_callback,
736 	.process	= sec_process,
737 };
738 
739 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
740 {
741 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
742 
743 	ctx->req_op = &sec_req_ops_tbl;
744 
745 	return sec_skcipher_init(tfm);
746 }
747 
748 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
749 {
750 	sec_skcipher_exit(tfm);
751 }
752 
753 static int sec_skcipher_param_check(struct sec_ctx *ctx,
754 				    struct skcipher_request *sk_req)
755 {
756 	u8 c_alg = ctx->c_ctx.c_alg;
757 	struct device *dev = SEC_CTX_DEV(ctx);
758 
759 	if (!sk_req->src || !sk_req->dst) {
760 		dev_err(dev, "skcipher input param error!\n");
761 		return -EINVAL;
762 	}
763 
764 	if (c_alg == SEC_CALG_3DES) {
765 		if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
766 			dev_err(dev, "skcipher 3des input length error!\n");
767 			return -EINVAL;
768 		}
769 		return 0;
770 	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
771 		if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
772 			dev_err(dev, "skcipher aes input length error!\n");
773 			return -EINVAL;
774 		}
775 		return 0;
776 	}
777 
778 	dev_err(dev, "skcipher algorithm error!\n");
779 	return -EINVAL;
780 }
781 
782 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
783 {
784 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
785 	struct sec_req *req = skcipher_request_ctx(sk_req);
786 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
787 	int ret;
788 
789 	if (!sk_req->cryptlen)
790 		return 0;
791 
792 	ret = sec_skcipher_param_check(ctx, sk_req);
793 	if (ret)
794 		return ret;
795 
796 	req->c_req.sk_req = sk_req;
797 	req->c_req.encrypt = encrypt;
798 	req->ctx = ctx;
799 
800 	return ctx->req_op->process(ctx, req);
801 }
802 
803 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
804 {
805 	return sec_skcipher_crypto(sk_req, true);
806 }
807 
808 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
809 {
810 	return sec_skcipher_crypto(sk_req, false);
811 }
812 
813 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
814 	sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
815 {\
816 	.base = {\
817 		.cra_name = sec_cra_name,\
818 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
819 		.cra_priority = SEC_PRIORITY,\
820 		.cra_flags = CRYPTO_ALG_ASYNC,\
821 		.cra_blocksize = blk_size,\
822 		.cra_ctxsize = sizeof(struct sec_ctx),\
823 		.cra_module = THIS_MODULE,\
824 	},\
825 	.init = ctx_init,\
826 	.exit = ctx_exit,\
827 	.setkey = sec_set_key,\
828 	.decrypt = sec_skcipher_decrypt,\
829 	.encrypt = sec_skcipher_encrypt,\
830 	.min_keysize = sec_min_key_size,\
831 	.max_keysize = sec_max_key_size,\
832 	.ivsize = iv_size,\
833 },
834 
835 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
836 	max_key_size, blk_size, iv_size) \
837 	SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
838 	sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
839 
840 static struct skcipher_alg sec_algs[] = {
841 	SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
842 			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
843 			 AES_BLOCK_SIZE, 0)
844 
845 	SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
846 			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
847 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
848 
849 	SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
850 			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
851 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
852 
853 	SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
854 			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
855 			 DES3_EDE_BLOCK_SIZE, 0)
856 
857 	SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
858 			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
859 			 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
860 
861 	SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
862 			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
863 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
864 
865 	SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
866 			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
867 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
868 };
869 
870 int sec_register_to_crypto(void)
871 {
872 	int ret = 0;
873 
874 	/* To avoid repeat register */
875 	mutex_lock(&sec_algs_lock);
876 	if (++sec_active_devs == 1)
877 		ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
878 	mutex_unlock(&sec_algs_lock);
879 
880 	return ret;
881 }
882 
883 void sec_unregister_from_crypto(void)
884 {
885 	mutex_lock(&sec_algs_lock);
886 	if (--sec_active_devs == 0)
887 		crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
888 	mutex_unlock(&sec_algs_lock);
889 }
890