1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/algapi.h>
6 #include <crypto/authenc.h>
7 #include <crypto/des.h>
8 #include <crypto/hash.h>
9 #include <crypto/internal/aead.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <crypto/skcipher.h>
13 #include <crypto/xts.h>
14 #include <linux/crypto.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/idr.h>
17 
18 #include "sec.h"
19 #include "sec_crypto.h"
20 
21 #define SEC_PRIORITY		4001
22 #define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
23 #define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
24 #define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
25 #define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)
26 
27 /* SEC sqe(bd) bit operational relative MACRO */
28 #define SEC_DE_OFFSET		1
29 #define SEC_CIPHER_OFFSET	4
30 #define SEC_SCENE_OFFSET	3
31 #define SEC_DST_SGL_OFFSET	2
32 #define SEC_SRC_SGL_OFFSET	7
33 #define SEC_CKEY_OFFSET		9
34 #define SEC_CMODE_OFFSET	12
35 #define SEC_AKEY_OFFSET         5
36 #define SEC_AEAD_ALG_OFFSET     11
37 #define SEC_AUTH_OFFSET		6
38 
39 #define SEC_FLAG_OFFSET		7
40 #define SEC_FLAG_MASK		0x0780
41 #define SEC_TYPE_MASK		0x0F
42 #define SEC_DONE_MASK		0x0001
43 
44 #define SEC_TOTAL_IV_SZ		(SEC_IV_SIZE * QM_Q_DEPTH)
45 #define SEC_SGL_SGE_NR		128
46 #define SEC_CIPHER_AUTH		0xfe
47 #define SEC_AUTH_CIPHER		0x1
48 #define SEC_MAX_MAC_LEN		64
49 #define SEC_MAX_AAD_LEN		65535
50 #define SEC_TOTAL_MAC_SZ	(SEC_MAX_MAC_LEN * QM_Q_DEPTH)
51 
52 #define SEC_PBUF_SZ			512
53 #define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
54 #define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
55 #define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
56 			SEC_MAX_MAC_LEN * 2)
57 #define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
58 #define SEC_PBUF_PAGE_NUM	(QM_Q_DEPTH / SEC_PBUF_NUM)
59 #define SEC_PBUF_LEFT_SZ	(SEC_PBUF_PKG * (QM_Q_DEPTH -	\
60 			SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
61 #define SEC_TOTAL_PBUF_SZ	(PAGE_SIZE * SEC_PBUF_PAGE_NUM +	\
62 			SEC_PBUF_LEFT_SZ)
63 
64 #define SEC_SQE_LEN_RATE	4
65 #define SEC_SQE_CFLAG		2
66 #define SEC_SQE_AEAD_FLAG	3
67 #define SEC_SQE_DONE		0x1
68 
69 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
70 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
71 {
72 	if (req->c_req.encrypt)
73 		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
74 				 ctx->hlf_q_num;
75 
76 	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
77 				 ctx->hlf_q_num;
78 }
79 
80 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
81 {
82 	if (req->c_req.encrypt)
83 		atomic_dec(&ctx->enc_qcyclic);
84 	else
85 		atomic_dec(&ctx->dec_qcyclic);
86 }
87 
88 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
89 {
90 	int req_id;
91 
92 	mutex_lock(&qp_ctx->req_lock);
93 
94 	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
95 				  0, QM_Q_DEPTH, GFP_ATOMIC);
96 	mutex_unlock(&qp_ctx->req_lock);
97 	if (unlikely(req_id < 0)) {
98 		dev_err(req->ctx->dev, "alloc req id fail!\n");
99 		return req_id;
100 	}
101 
102 	req->qp_ctx = qp_ctx;
103 	qp_ctx->req_list[req_id] = req;
104 
105 	return req_id;
106 }
107 
108 static void sec_free_req_id(struct sec_req *req)
109 {
110 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
111 	int req_id = req->req_id;
112 
113 	if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
114 		dev_err(req->ctx->dev, "free request id invalid!\n");
115 		return;
116 	}
117 
118 	qp_ctx->req_list[req_id] = NULL;
119 	req->qp_ctx = NULL;
120 
121 	mutex_lock(&qp_ctx->req_lock);
122 	idr_remove(&qp_ctx->req_idr, req_id);
123 	mutex_unlock(&qp_ctx->req_lock);
124 }
125 
126 static int sec_aead_verify(struct sec_req *req)
127 {
128 	struct aead_request *aead_req = req->aead_req.aead_req;
129 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
130 	size_t authsize = crypto_aead_authsize(tfm);
131 	u8 *mac_out = req->aead_req.out_mac;
132 	u8 *mac = mac_out + SEC_MAX_MAC_LEN;
133 	struct scatterlist *sgl = aead_req->src;
134 	size_t sz;
135 
136 	sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
137 				aead_req->cryptlen + aead_req->assoclen -
138 				authsize);
139 	if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
140 		dev_err(req->ctx->dev, "aead verify failure!\n");
141 		return -EBADMSG;
142 	}
143 
144 	return 0;
145 }
146 
147 static void sec_req_cb(struct hisi_qp *qp, void *resp)
148 {
149 	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
150 	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
151 	struct sec_sqe *bd = resp;
152 	struct sec_ctx *ctx;
153 	struct sec_req *req;
154 	u16 done, flag;
155 	int err = 0;
156 	u8 type;
157 
158 	type = bd->type_cipher_auth & SEC_TYPE_MASK;
159 	if (unlikely(type != SEC_BD_TYPE2)) {
160 		atomic64_inc(&dfx->err_bd_cnt);
161 		pr_err("err bd type [%d]\n", type);
162 		return;
163 	}
164 
165 	req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
166 	if (unlikely(!req)) {
167 		atomic64_inc(&dfx->invalid_req_cnt);
168 		atomic_inc(&qp->qp_status.used);
169 		return;
170 	}
171 	req->err_type = bd->type2.error_type;
172 	ctx = req->ctx;
173 	done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
174 	flag = (le16_to_cpu(bd->type2.done_flag) &
175 		SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
176 	if (unlikely(req->err_type || done != SEC_SQE_DONE ||
177 	    (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
178 	    (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
179 		dev_err_ratelimited(ctx->dev,
180 			"err_type[%d],done[%d],flag[%d]\n",
181 			req->err_type, done, flag);
182 		err = -EIO;
183 		atomic64_inc(&dfx->done_flag_cnt);
184 	}
185 
186 	if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
187 		err = sec_aead_verify(req);
188 
189 	atomic64_inc(&dfx->recv_cnt);
190 
191 	ctx->req_op->buf_unmap(ctx, req);
192 
193 	ctx->req_op->callback(ctx, req, err);
194 }
195 
196 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
197 {
198 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
199 	int ret;
200 
201 	if (ctx->fake_req_limit <=
202 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
203 	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
204 		return -EBUSY;
205 
206 	mutex_lock(&qp_ctx->req_lock);
207 	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
208 
209 	if (ctx->fake_req_limit <=
210 	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
211 		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
212 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
213 		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
214 		mutex_unlock(&qp_ctx->req_lock);
215 		return -EBUSY;
216 	}
217 	mutex_unlock(&qp_ctx->req_lock);
218 
219 	if (unlikely(ret == -EBUSY))
220 		return -ENOBUFS;
221 
222 	if (likely(!ret)) {
223 		ret = -EINPROGRESS;
224 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
225 	}
226 
227 	return ret;
228 }
229 
230 /* Get DMA memory resources */
231 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
232 {
233 	int i;
234 
235 	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
236 					 &res->c_ivin_dma, GFP_KERNEL);
237 	if (!res->c_ivin)
238 		return -ENOMEM;
239 
240 	for (i = 1; i < QM_Q_DEPTH; i++) {
241 		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
242 		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
243 	}
244 
245 	return 0;
246 }
247 
248 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
249 {
250 	if (res->c_ivin)
251 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
252 				  res->c_ivin, res->c_ivin_dma);
253 }
254 
255 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
256 {
257 	int i;
258 
259 	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
260 					  &res->out_mac_dma, GFP_KERNEL);
261 	if (!res->out_mac)
262 		return -ENOMEM;
263 
264 	for (i = 1; i < QM_Q_DEPTH; i++) {
265 		res[i].out_mac_dma = res->out_mac_dma +
266 				     i * (SEC_MAX_MAC_LEN << 1);
267 		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
268 	}
269 
270 	return 0;
271 }
272 
273 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
274 {
275 	if (res->out_mac)
276 		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
277 				  res->out_mac, res->out_mac_dma);
278 }
279 
280 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
281 {
282 	if (res->pbuf)
283 		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
284 				  res->pbuf, res->pbuf_dma);
285 }
286 
287 /*
288  * To improve performance, pbuffer is used for
289  * small packets (< 512Bytes) as IOMMU translation using.
290  */
291 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
292 {
293 	int pbuf_page_offset;
294 	int i, j, k;
295 
296 	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
297 				&res->pbuf_dma, GFP_KERNEL);
298 	if (!res->pbuf)
299 		return -ENOMEM;
300 
301 	/*
302 	 * SEC_PBUF_PKG contains data pbuf, iv and
303 	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
304 	 * Every PAGE contains six SEC_PBUF_PKG
305 	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
306 	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
307 	 * for the SEC_TOTAL_PBUF_SZ
308 	 */
309 	for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
310 		pbuf_page_offset = PAGE_SIZE * i;
311 		for (j = 0; j < SEC_PBUF_NUM; j++) {
312 			k = i * SEC_PBUF_NUM + j;
313 			if (k == QM_Q_DEPTH)
314 				break;
315 			res[k].pbuf = res->pbuf +
316 				j * SEC_PBUF_PKG + pbuf_page_offset;
317 			res[k].pbuf_dma = res->pbuf_dma +
318 				j * SEC_PBUF_PKG + pbuf_page_offset;
319 		}
320 	}
321 
322 	return 0;
323 }
324 
325 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
326 				  struct sec_qp_ctx *qp_ctx)
327 {
328 	struct sec_alg_res *res = qp_ctx->res;
329 	struct device *dev = ctx->dev;
330 	int ret;
331 
332 	ret = sec_alloc_civ_resource(dev, res);
333 	if (ret)
334 		return ret;
335 
336 	if (ctx->alg_type == SEC_AEAD) {
337 		ret = sec_alloc_mac_resource(dev, res);
338 		if (ret)
339 			goto alloc_fail;
340 	}
341 	if (ctx->pbuf_supported) {
342 		ret = sec_alloc_pbuf_resource(dev, res);
343 		if (ret) {
344 			dev_err(dev, "fail to alloc pbuf dma resource!\n");
345 			goto alloc_pbuf_fail;
346 		}
347 	}
348 
349 	return 0;
350 
351 alloc_pbuf_fail:
352 	if (ctx->alg_type == SEC_AEAD)
353 		sec_free_mac_resource(dev, qp_ctx->res);
354 alloc_fail:
355 	sec_free_civ_resource(dev, res);
356 	return ret;
357 }
358 
359 static void sec_alg_resource_free(struct sec_ctx *ctx,
360 				  struct sec_qp_ctx *qp_ctx)
361 {
362 	struct device *dev = ctx->dev;
363 
364 	sec_free_civ_resource(dev, qp_ctx->res);
365 
366 	if (ctx->pbuf_supported)
367 		sec_free_pbuf_resource(dev, qp_ctx->res);
368 	if (ctx->alg_type == SEC_AEAD)
369 		sec_free_mac_resource(dev, qp_ctx->res);
370 }
371 
372 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
373 			     int qp_ctx_id, int alg_type)
374 {
375 	struct device *dev = ctx->dev;
376 	struct sec_qp_ctx *qp_ctx;
377 	struct hisi_qp *qp;
378 	int ret = -ENOMEM;
379 
380 	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
381 	qp = ctx->qps[qp_ctx_id];
382 	qp->req_type = 0;
383 	qp->qp_ctx = qp_ctx;
384 	qp->req_cb = sec_req_cb;
385 	qp_ctx->qp = qp;
386 	qp_ctx->ctx = ctx;
387 
388 	mutex_init(&qp_ctx->req_lock);
389 	idr_init(&qp_ctx->req_idr);
390 	INIT_LIST_HEAD(&qp_ctx->backlog);
391 
392 	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
393 						     SEC_SGL_SGE_NR);
394 	if (IS_ERR(qp_ctx->c_in_pool)) {
395 		dev_err(dev, "fail to create sgl pool for input!\n");
396 		goto err_destroy_idr;
397 	}
398 
399 	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
400 						      SEC_SGL_SGE_NR);
401 	if (IS_ERR(qp_ctx->c_out_pool)) {
402 		dev_err(dev, "fail to create sgl pool for output!\n");
403 		goto err_free_c_in_pool;
404 	}
405 
406 	ret = sec_alg_resource_alloc(ctx, qp_ctx);
407 	if (ret)
408 		goto err_free_c_out_pool;
409 
410 	ret = hisi_qm_start_qp(qp, 0);
411 	if (ret < 0)
412 		goto err_queue_free;
413 
414 	return 0;
415 
416 err_queue_free:
417 	sec_alg_resource_free(ctx, qp_ctx);
418 err_free_c_out_pool:
419 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
420 err_free_c_in_pool:
421 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
422 err_destroy_idr:
423 	idr_destroy(&qp_ctx->req_idr);
424 	return ret;
425 }
426 
427 static void sec_release_qp_ctx(struct sec_ctx *ctx,
428 			       struct sec_qp_ctx *qp_ctx)
429 {
430 	struct device *dev = ctx->dev;
431 
432 	hisi_qm_stop_qp(qp_ctx->qp);
433 	sec_alg_resource_free(ctx, qp_ctx);
434 
435 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
436 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
437 
438 	idr_destroy(&qp_ctx->req_idr);
439 }
440 
441 static int sec_ctx_base_init(struct sec_ctx *ctx)
442 {
443 	struct sec_dev *sec;
444 	int i, ret;
445 
446 	ctx->qps = sec_create_qps();
447 	if (!ctx->qps) {
448 		pr_err("Can not create sec qps!\n");
449 		return -ENODEV;
450 	}
451 
452 	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
453 	ctx->sec = sec;
454 	ctx->dev = &sec->qm.pdev->dev;
455 	ctx->hlf_q_num = sec->ctx_q_num >> 1;
456 
457 	ctx->pbuf_supported = ctx->sec->iommu_used;
458 
459 	/* Half of queue depth is taken as fake requests limit in the queue. */
460 	ctx->fake_req_limit = QM_Q_DEPTH >> 1;
461 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
462 			      GFP_KERNEL);
463 	if (!ctx->qp_ctx) {
464 		ret = -ENOMEM;
465 		goto err_destroy_qps;
466 	}
467 
468 	for (i = 0; i < sec->ctx_q_num; i++) {
469 		ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
470 		if (ret)
471 			goto err_sec_release_qp_ctx;
472 	}
473 
474 	return 0;
475 
476 err_sec_release_qp_ctx:
477 	for (i = i - 1; i >= 0; i--)
478 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
479 	kfree(ctx->qp_ctx);
480 err_destroy_qps:
481 	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
482 	return ret;
483 }
484 
485 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
486 {
487 	int i;
488 
489 	for (i = 0; i < ctx->sec->ctx_q_num; i++)
490 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
491 
492 	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
493 	kfree(ctx->qp_ctx);
494 }
495 
496 static int sec_cipher_init(struct sec_ctx *ctx)
497 {
498 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
499 
500 	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
501 					  &c_ctx->c_key_dma, GFP_KERNEL);
502 	if (!c_ctx->c_key)
503 		return -ENOMEM;
504 
505 	return 0;
506 }
507 
508 static void sec_cipher_uninit(struct sec_ctx *ctx)
509 {
510 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
511 
512 	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
513 	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
514 			  c_ctx->c_key, c_ctx->c_key_dma);
515 }
516 
517 static int sec_auth_init(struct sec_ctx *ctx)
518 {
519 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
520 
521 	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
522 					  &a_ctx->a_key_dma, GFP_KERNEL);
523 	if (!a_ctx->a_key)
524 		return -ENOMEM;
525 
526 	return 0;
527 }
528 
529 static void sec_auth_uninit(struct sec_ctx *ctx)
530 {
531 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
532 
533 	memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
534 	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
535 			  a_ctx->a_key, a_ctx->a_key_dma);
536 }
537 
538 static int sec_skcipher_init(struct crypto_skcipher *tfm)
539 {
540 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
541 	int ret;
542 
543 	ctx->alg_type = SEC_SKCIPHER;
544 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
545 	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
546 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
547 		pr_err("get error skcipher iv size!\n");
548 		return -EINVAL;
549 	}
550 
551 	ret = sec_ctx_base_init(ctx);
552 	if (ret)
553 		return ret;
554 
555 	ret = sec_cipher_init(ctx);
556 	if (ret)
557 		goto err_cipher_init;
558 
559 	return 0;
560 
561 err_cipher_init:
562 	sec_ctx_base_uninit(ctx);
563 	return ret;
564 }
565 
566 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
567 {
568 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
569 
570 	sec_cipher_uninit(ctx);
571 	sec_ctx_base_uninit(ctx);
572 }
573 
574 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
575 				    const u32 keylen,
576 				    const enum sec_cmode c_mode)
577 {
578 	switch (keylen) {
579 	case SEC_DES3_2KEY_SIZE:
580 		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
581 		break;
582 	case SEC_DES3_3KEY_SIZE:
583 		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
584 		break;
585 	default:
586 		return -EINVAL;
587 	}
588 
589 	return 0;
590 }
591 
592 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
593 				       const u32 keylen,
594 				       const enum sec_cmode c_mode)
595 {
596 	if (c_mode == SEC_CMODE_XTS) {
597 		switch (keylen) {
598 		case SEC_XTS_MIN_KEY_SIZE:
599 			c_ctx->c_key_len = SEC_CKEY_128BIT;
600 			break;
601 		case SEC_XTS_MAX_KEY_SIZE:
602 			c_ctx->c_key_len = SEC_CKEY_256BIT;
603 			break;
604 		default:
605 			pr_err("hisi_sec2: xts mode key error!\n");
606 			return -EINVAL;
607 		}
608 	} else {
609 		switch (keylen) {
610 		case AES_KEYSIZE_128:
611 			c_ctx->c_key_len = SEC_CKEY_128BIT;
612 			break;
613 		case AES_KEYSIZE_192:
614 			c_ctx->c_key_len = SEC_CKEY_192BIT;
615 			break;
616 		case AES_KEYSIZE_256:
617 			c_ctx->c_key_len = SEC_CKEY_256BIT;
618 			break;
619 		default:
620 			pr_err("hisi_sec2: aes key error!\n");
621 			return -EINVAL;
622 		}
623 	}
624 
625 	return 0;
626 }
627 
628 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
629 			       const u32 keylen, const enum sec_calg c_alg,
630 			       const enum sec_cmode c_mode)
631 {
632 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
633 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
634 	struct device *dev = ctx->dev;
635 	int ret;
636 
637 	if (c_mode == SEC_CMODE_XTS) {
638 		ret = xts_verify_key(tfm, key, keylen);
639 		if (ret) {
640 			dev_err(dev, "xts mode key err!\n");
641 			return ret;
642 		}
643 	}
644 
645 	c_ctx->c_alg  = c_alg;
646 	c_ctx->c_mode = c_mode;
647 
648 	switch (c_alg) {
649 	case SEC_CALG_3DES:
650 		ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
651 		break;
652 	case SEC_CALG_AES:
653 	case SEC_CALG_SM4:
654 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
655 		break;
656 	default:
657 		return -EINVAL;
658 	}
659 
660 	if (ret) {
661 		dev_err(dev, "set sec key err!\n");
662 		return ret;
663 	}
664 
665 	memcpy(c_ctx->c_key, key, keylen);
666 
667 	return 0;
668 }
669 
670 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
671 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
672 	u32 keylen)							\
673 {									\
674 	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
675 }
676 
677 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
678 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
679 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
680 
681 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
682 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
683 
684 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
685 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
686 
687 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
688 			struct scatterlist *src)
689 {
690 	struct aead_request *aead_req = req->aead_req.aead_req;
691 	struct sec_cipher_req *c_req = &req->c_req;
692 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
693 	struct device *dev = ctx->dev;
694 	int copy_size, pbuf_length;
695 	int req_id = req->req_id;
696 
697 	if (ctx->alg_type == SEC_AEAD)
698 		copy_size = aead_req->cryptlen + aead_req->assoclen;
699 	else
700 		copy_size = c_req->c_len;
701 
702 	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
703 							qp_ctx->res[req_id].pbuf,
704 							copy_size);
705 	if (unlikely(pbuf_length != copy_size)) {
706 		dev_err(dev, "copy src data to pbuf error!\n");
707 		return -EINVAL;
708 	}
709 
710 	c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
711 
712 	if (!c_req->c_in_dma) {
713 		dev_err(dev, "fail to set pbuffer address!\n");
714 		return -ENOMEM;
715 	}
716 
717 	c_req->c_out_dma = c_req->c_in_dma;
718 
719 	return 0;
720 }
721 
722 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
723 			struct scatterlist *dst)
724 {
725 	struct aead_request *aead_req = req->aead_req.aead_req;
726 	struct sec_cipher_req *c_req = &req->c_req;
727 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
728 	struct device *dev = ctx->dev;
729 	int copy_size, pbuf_length;
730 	int req_id = req->req_id;
731 
732 	if (ctx->alg_type == SEC_AEAD)
733 		copy_size = c_req->c_len + aead_req->assoclen;
734 	else
735 		copy_size = c_req->c_len;
736 
737 	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
738 				qp_ctx->res[req_id].pbuf,
739 				copy_size);
740 	if (unlikely(pbuf_length != copy_size))
741 		dev_err(dev, "copy pbuf data to dst error!\n");
742 }
743 
744 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
745 			  struct scatterlist *src, struct scatterlist *dst)
746 {
747 	struct sec_cipher_req *c_req = &req->c_req;
748 	struct sec_aead_req *a_req = &req->aead_req;
749 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
750 	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
751 	struct device *dev = ctx->dev;
752 	int ret;
753 
754 	if (req->use_pbuf) {
755 		ret = sec_cipher_pbuf_map(ctx, req, src);
756 		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
757 		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
758 		if (ctx->alg_type == SEC_AEAD) {
759 			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
760 			a_req->out_mac_dma = res->pbuf_dma +
761 					SEC_PBUF_MAC_OFFSET;
762 		}
763 
764 		return ret;
765 	}
766 	c_req->c_ivin = res->c_ivin;
767 	c_req->c_ivin_dma = res->c_ivin_dma;
768 	if (ctx->alg_type == SEC_AEAD) {
769 		a_req->out_mac = res->out_mac;
770 		a_req->out_mac_dma = res->out_mac_dma;
771 	}
772 
773 	c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
774 						    qp_ctx->c_in_pool,
775 						    req->req_id,
776 						    &c_req->c_in_dma);
777 
778 	if (IS_ERR(c_req->c_in)) {
779 		dev_err(dev, "fail to dma map input sgl buffers!\n");
780 		return PTR_ERR(c_req->c_in);
781 	}
782 
783 	if (dst == src) {
784 		c_req->c_out = c_req->c_in;
785 		c_req->c_out_dma = c_req->c_in_dma;
786 	} else {
787 		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
788 							     qp_ctx->c_out_pool,
789 							     req->req_id,
790 							     &c_req->c_out_dma);
791 
792 		if (IS_ERR(c_req->c_out)) {
793 			dev_err(dev, "fail to dma map output sgl buffers!\n");
794 			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
795 			return PTR_ERR(c_req->c_out);
796 		}
797 	}
798 
799 	return 0;
800 }
801 
802 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
803 			     struct scatterlist *src, struct scatterlist *dst)
804 {
805 	struct sec_cipher_req *c_req = &req->c_req;
806 	struct device *dev = ctx->dev;
807 
808 	if (req->use_pbuf) {
809 		sec_cipher_pbuf_unmap(ctx, req, dst);
810 	} else {
811 		if (dst != src)
812 			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
813 
814 		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
815 	}
816 }
817 
818 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
819 {
820 	struct skcipher_request *sq = req->c_req.sk_req;
821 
822 	return sec_cipher_map(ctx, req, sq->src, sq->dst);
823 }
824 
825 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
826 {
827 	struct skcipher_request *sq = req->c_req.sk_req;
828 
829 	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
830 }
831 
832 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
833 				struct crypto_authenc_keys *keys)
834 {
835 	switch (keys->enckeylen) {
836 	case AES_KEYSIZE_128:
837 		c_ctx->c_key_len = SEC_CKEY_128BIT;
838 		break;
839 	case AES_KEYSIZE_192:
840 		c_ctx->c_key_len = SEC_CKEY_192BIT;
841 		break;
842 	case AES_KEYSIZE_256:
843 		c_ctx->c_key_len = SEC_CKEY_256BIT;
844 		break;
845 	default:
846 		pr_err("hisi_sec2: aead aes key error!\n");
847 		return -EINVAL;
848 	}
849 	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
850 
851 	return 0;
852 }
853 
854 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
855 				 struct crypto_authenc_keys *keys)
856 {
857 	struct crypto_shash *hash_tfm = ctx->hash_tfm;
858 	int blocksize, digestsize, ret;
859 
860 	if (!keys->authkeylen) {
861 		pr_err("hisi_sec2: aead auth key error!\n");
862 		return -EINVAL;
863 	}
864 
865 	blocksize = crypto_shash_blocksize(hash_tfm);
866 	digestsize = crypto_shash_digestsize(hash_tfm);
867 	if (keys->authkeylen > blocksize) {
868 		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
869 					      keys->authkeylen, ctx->a_key);
870 		if (ret) {
871 			pr_err("hisi_sec2: aead auth digest error!\n");
872 			return -EINVAL;
873 		}
874 		ctx->a_key_len = digestsize;
875 	} else {
876 		memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
877 		ctx->a_key_len = keys->authkeylen;
878 	}
879 
880 	return 0;
881 }
882 
883 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
884 			   const u32 keylen, const enum sec_hash_alg a_alg,
885 			   const enum sec_calg c_alg,
886 			   const enum sec_mac_len mac_len,
887 			   const enum sec_cmode c_mode)
888 {
889 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
890 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
891 	struct device *dev = ctx->dev;
892 	struct crypto_authenc_keys keys;
893 	int ret;
894 
895 	ctx->a_ctx.a_alg = a_alg;
896 	ctx->c_ctx.c_alg = c_alg;
897 	ctx->a_ctx.mac_len = mac_len;
898 	c_ctx->c_mode = c_mode;
899 
900 	if (crypto_authenc_extractkeys(&keys, key, keylen))
901 		goto bad_key;
902 
903 	ret = sec_aead_aes_set_key(c_ctx, &keys);
904 	if (ret) {
905 		dev_err(dev, "set sec cipher key err!\n");
906 		goto bad_key;
907 	}
908 
909 	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
910 	if (ret) {
911 		dev_err(dev, "set sec auth key err!\n");
912 		goto bad_key;
913 	}
914 
915 	return 0;
916 
917 bad_key:
918 	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
919 	return -EINVAL;
920 }
921 
922 
923 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
924 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
925 	u32 keylen)							\
926 {									\
927 	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
928 }
929 
930 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
931 			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
932 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
933 			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
934 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
935 			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
936 
937 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
938 {
939 	struct aead_request *aq = req->aead_req.aead_req;
940 
941 	return sec_cipher_map(ctx, req, aq->src, aq->dst);
942 }
943 
944 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
945 {
946 	struct aead_request *aq = req->aead_req.aead_req;
947 
948 	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
949 }
950 
951 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
952 {
953 	int ret;
954 
955 	ret = ctx->req_op->buf_map(ctx, req);
956 	if (unlikely(ret))
957 		return ret;
958 
959 	ctx->req_op->do_transfer(ctx, req);
960 
961 	ret = ctx->req_op->bd_fill(ctx, req);
962 	if (unlikely(ret))
963 		goto unmap_req_buf;
964 
965 	return ret;
966 
967 unmap_req_buf:
968 	ctx->req_op->buf_unmap(ctx, req);
969 	return ret;
970 }
971 
972 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
973 {
974 	ctx->req_op->buf_unmap(ctx, req);
975 }
976 
977 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
978 {
979 	struct skcipher_request *sk_req = req->c_req.sk_req;
980 	struct sec_cipher_req *c_req = &req->c_req;
981 
982 	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
983 }
984 
985 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
986 {
987 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
988 	struct sec_cipher_req *c_req = &req->c_req;
989 	struct sec_sqe *sec_sqe = &req->sec_sqe;
990 	u8 scene, sa_type, da_type;
991 	u8 bd_type, cipher;
992 	u8 de = 0;
993 
994 	memset(sec_sqe, 0, sizeof(struct sec_sqe));
995 
996 	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
997 	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
998 	sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
999 	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1000 
1001 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1002 						SEC_CMODE_OFFSET);
1003 	sec_sqe->type2.c_alg = c_ctx->c_alg;
1004 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1005 						SEC_CKEY_OFFSET);
1006 
1007 	bd_type = SEC_BD_TYPE2;
1008 	if (c_req->encrypt)
1009 		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1010 	else
1011 		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1012 	sec_sqe->type_cipher_auth = bd_type | cipher;
1013 
1014 	if (req->use_pbuf)
1015 		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1016 	else
1017 		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1018 	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1019 	if (c_req->c_in_dma != c_req->c_out_dma)
1020 		de = 0x1 << SEC_DE_OFFSET;
1021 
1022 	sec_sqe->sds_sa_type = (de | scene | sa_type);
1023 
1024 	/* Just set DST address type */
1025 	if (req->use_pbuf)
1026 		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1027 	else
1028 		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1029 	sec_sqe->sdm_addr_type |= da_type;
1030 
1031 	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1032 	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1033 
1034 	return 0;
1035 }
1036 
1037 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1038 {
1039 	struct aead_request *aead_req = req->aead_req.aead_req;
1040 	struct skcipher_request *sk_req = req->c_req.sk_req;
1041 	u32 iv_size = req->ctx->c_ctx.ivsize;
1042 	struct scatterlist *sgl;
1043 	unsigned int cryptlen;
1044 	size_t sz;
1045 	u8 *iv;
1046 
1047 	if (req->c_req.encrypt)
1048 		sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1049 	else
1050 		sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1051 
1052 	if (alg_type == SEC_SKCIPHER) {
1053 		iv = sk_req->iv;
1054 		cryptlen = sk_req->cryptlen;
1055 	} else {
1056 		iv = aead_req->iv;
1057 		cryptlen = aead_req->cryptlen;
1058 	}
1059 
1060 	sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1061 				cryptlen - iv_size);
1062 	if (unlikely(sz != iv_size))
1063 		dev_err(req->ctx->dev, "copy output iv error!\n");
1064 }
1065 
1066 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1067 				struct sec_qp_ctx *qp_ctx)
1068 {
1069 	struct sec_req *backlog_req = NULL;
1070 
1071 	mutex_lock(&qp_ctx->req_lock);
1072 	if (ctx->fake_req_limit >=
1073 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
1074 	    !list_empty(&qp_ctx->backlog)) {
1075 		backlog_req = list_first_entry(&qp_ctx->backlog,
1076 				typeof(*backlog_req), backlog_head);
1077 		list_del(&backlog_req->backlog_head);
1078 	}
1079 	mutex_unlock(&qp_ctx->req_lock);
1080 
1081 	return backlog_req;
1082 }
1083 
1084 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1085 				  int err)
1086 {
1087 	struct skcipher_request *sk_req = req->c_req.sk_req;
1088 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1089 	struct skcipher_request *backlog_sk_req;
1090 	struct sec_req *backlog_req;
1091 
1092 	sec_free_req_id(req);
1093 
1094 	/* IV output at encrypto of CBC mode */
1095 	if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
1096 		sec_update_iv(req, SEC_SKCIPHER);
1097 
1098 	while (1) {
1099 		backlog_req = sec_back_req_clear(ctx, qp_ctx);
1100 		if (!backlog_req)
1101 			break;
1102 
1103 		backlog_sk_req = backlog_req->c_req.sk_req;
1104 		backlog_sk_req->base.complete(&backlog_sk_req->base,
1105 						-EINPROGRESS);
1106 		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1107 	}
1108 
1109 	sk_req->base.complete(&sk_req->base, err);
1110 }
1111 
1112 static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1113 {
1114 	struct aead_request *aead_req = req->aead_req.aead_req;
1115 	struct sec_cipher_req *c_req = &req->c_req;
1116 
1117 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1118 }
1119 
1120 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1121 			       struct sec_req *req, struct sec_sqe *sec_sqe)
1122 {
1123 	struct sec_aead_req *a_req = &req->aead_req;
1124 	struct sec_cipher_req *c_req = &req->c_req;
1125 	struct aead_request *aq = a_req->aead_req;
1126 
1127 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1128 
1129 	sec_sqe->type2.mac_key_alg =
1130 			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
1131 
1132 	sec_sqe->type2.mac_key_alg |=
1133 			cpu_to_le32((u32)((ctx->a_key_len) /
1134 			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
1135 
1136 	sec_sqe->type2.mac_key_alg |=
1137 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1138 
1139 	sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1140 
1141 	if (dir)
1142 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1143 	else
1144 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1145 
1146 	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1147 
1148 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1149 
1150 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1151 }
1152 
1153 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1154 {
1155 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1156 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1157 	int ret;
1158 
1159 	ret = sec_skcipher_bd_fill(ctx, req);
1160 	if (unlikely(ret)) {
1161 		dev_err(ctx->dev, "skcipher bd fill is error!\n");
1162 		return ret;
1163 	}
1164 
1165 	sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1166 
1167 	return 0;
1168 }
1169 
1170 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1171 {
1172 	struct aead_request *a_req = req->aead_req.aead_req;
1173 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1174 	struct sec_aead_req *aead_req = &req->aead_req;
1175 	struct sec_cipher_req *c_req = &req->c_req;
1176 	size_t authsize = crypto_aead_authsize(tfm);
1177 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1178 	struct aead_request *backlog_aead_req;
1179 	struct sec_req *backlog_req;
1180 	size_t sz;
1181 
1182 	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1183 		sec_update_iv(req, SEC_AEAD);
1184 
1185 	/* Copy output mac */
1186 	if (!err && c_req->encrypt) {
1187 		struct scatterlist *sgl = a_req->dst;
1188 
1189 		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1190 					  aead_req->out_mac,
1191 					  authsize, a_req->cryptlen +
1192 					  a_req->assoclen);
1193 
1194 		if (unlikely(sz != authsize)) {
1195 			dev_err(c->dev, "copy out mac err!\n");
1196 			err = -EINVAL;
1197 		}
1198 	}
1199 
1200 	sec_free_req_id(req);
1201 
1202 	while (1) {
1203 		backlog_req = sec_back_req_clear(c, qp_ctx);
1204 		if (!backlog_req)
1205 			break;
1206 
1207 		backlog_aead_req = backlog_req->aead_req.aead_req;
1208 		backlog_aead_req->base.complete(&backlog_aead_req->base,
1209 						-EINPROGRESS);
1210 		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1211 	}
1212 
1213 	a_req->base.complete(&a_req->base, err);
1214 }
1215 
1216 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1217 {
1218 	sec_free_req_id(req);
1219 	sec_free_queue_id(ctx, req);
1220 }
1221 
1222 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1223 {
1224 	struct sec_qp_ctx *qp_ctx;
1225 	int queue_id;
1226 
1227 	/* To load balance */
1228 	queue_id = sec_alloc_queue_id(ctx, req);
1229 	qp_ctx = &ctx->qp_ctx[queue_id];
1230 
1231 	req->req_id = sec_alloc_req_id(req, qp_ctx);
1232 	if (unlikely(req->req_id < 0)) {
1233 		sec_free_queue_id(ctx, req);
1234 		return req->req_id;
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1241 {
1242 	struct sec_cipher_req *c_req = &req->c_req;
1243 	int ret;
1244 
1245 	ret = sec_request_init(ctx, req);
1246 	if (unlikely(ret))
1247 		return ret;
1248 
1249 	ret = sec_request_transfer(ctx, req);
1250 	if (unlikely(ret))
1251 		goto err_uninit_req;
1252 
1253 	/* Output IV as decrypto */
1254 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
1255 		sec_update_iv(req, ctx->alg_type);
1256 
1257 	ret = ctx->req_op->bd_send(ctx, req);
1258 	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1259 		(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1260 		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1261 		goto err_send_req;
1262 	}
1263 
1264 	return ret;
1265 
1266 err_send_req:
1267 	/* As failing, restore the IV from user */
1268 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1269 		if (ctx->alg_type == SEC_SKCIPHER)
1270 			memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1271 			       ctx->c_ctx.ivsize);
1272 		else
1273 			memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1274 			       ctx->c_ctx.ivsize);
1275 	}
1276 
1277 	sec_request_untransfer(ctx, req);
1278 err_uninit_req:
1279 	sec_request_uninit(ctx, req);
1280 	return ret;
1281 }
1282 
1283 static const struct sec_req_op sec_skcipher_req_ops = {
1284 	.buf_map	= sec_skcipher_sgl_map,
1285 	.buf_unmap	= sec_skcipher_sgl_unmap,
1286 	.do_transfer	= sec_skcipher_copy_iv,
1287 	.bd_fill	= sec_skcipher_bd_fill,
1288 	.bd_send	= sec_bd_send,
1289 	.callback	= sec_skcipher_callback,
1290 	.process	= sec_process,
1291 };
1292 
1293 static const struct sec_req_op sec_aead_req_ops = {
1294 	.buf_map	= sec_aead_sgl_map,
1295 	.buf_unmap	= sec_aead_sgl_unmap,
1296 	.do_transfer	= sec_aead_copy_iv,
1297 	.bd_fill	= sec_aead_bd_fill,
1298 	.bd_send	= sec_bd_send,
1299 	.callback	= sec_aead_callback,
1300 	.process	= sec_process,
1301 };
1302 
1303 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1304 {
1305 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1306 
1307 	ctx->req_op = &sec_skcipher_req_ops;
1308 
1309 	return sec_skcipher_init(tfm);
1310 }
1311 
1312 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1313 {
1314 	sec_skcipher_uninit(tfm);
1315 }
1316 
1317 static int sec_aead_init(struct crypto_aead *tfm)
1318 {
1319 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1320 	int ret;
1321 
1322 	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1323 	ctx->alg_type = SEC_AEAD;
1324 	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1325 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1326 		dev_err(ctx->dev, "get error aead iv size!\n");
1327 		return -EINVAL;
1328 	}
1329 
1330 	ctx->req_op = &sec_aead_req_ops;
1331 	ret = sec_ctx_base_init(ctx);
1332 	if (ret)
1333 		return ret;
1334 
1335 	ret = sec_auth_init(ctx);
1336 	if (ret)
1337 		goto err_auth_init;
1338 
1339 	ret = sec_cipher_init(ctx);
1340 	if (ret)
1341 		goto err_cipher_init;
1342 
1343 	return ret;
1344 
1345 err_cipher_init:
1346 	sec_auth_uninit(ctx);
1347 err_auth_init:
1348 	sec_ctx_base_uninit(ctx);
1349 	return ret;
1350 }
1351 
1352 static void sec_aead_exit(struct crypto_aead *tfm)
1353 {
1354 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1355 
1356 	sec_cipher_uninit(ctx);
1357 	sec_auth_uninit(ctx);
1358 	sec_ctx_base_uninit(ctx);
1359 }
1360 
1361 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1362 {
1363 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1364 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1365 	int ret;
1366 
1367 	ret = sec_aead_init(tfm);
1368 	if (ret) {
1369 		pr_err("hisi_sec2: aead init error!\n");
1370 		return ret;
1371 	}
1372 
1373 	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1374 	if (IS_ERR(auth_ctx->hash_tfm)) {
1375 		dev_err(ctx->dev, "aead alloc shash error!\n");
1376 		sec_aead_exit(tfm);
1377 		return PTR_ERR(auth_ctx->hash_tfm);
1378 	}
1379 
1380 	return 0;
1381 }
1382 
1383 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1384 {
1385 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1386 
1387 	crypto_free_shash(ctx->a_ctx.hash_tfm);
1388 	sec_aead_exit(tfm);
1389 }
1390 
1391 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1392 {
1393 	return sec_aead_ctx_init(tfm, "sha1");
1394 }
1395 
1396 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1397 {
1398 	return sec_aead_ctx_init(tfm, "sha256");
1399 }
1400 
1401 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1402 {
1403 	return sec_aead_ctx_init(tfm, "sha512");
1404 }
1405 
1406 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1407 {
1408 	struct skcipher_request *sk_req = sreq->c_req.sk_req;
1409 	struct device *dev = ctx->dev;
1410 	u8 c_alg = ctx->c_ctx.c_alg;
1411 
1412 	if (unlikely(!sk_req->src || !sk_req->dst)) {
1413 		dev_err(dev, "skcipher input param error!\n");
1414 		return -EINVAL;
1415 	}
1416 	sreq->c_req.c_len = sk_req->cryptlen;
1417 
1418 	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
1419 		sreq->use_pbuf = true;
1420 	else
1421 		sreq->use_pbuf = false;
1422 
1423 	if (c_alg == SEC_CALG_3DES) {
1424 		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
1425 			dev_err(dev, "skcipher 3des input length error!\n");
1426 			return -EINVAL;
1427 		}
1428 		return 0;
1429 	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
1430 		if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
1431 			dev_err(dev, "skcipher aes input length error!\n");
1432 			return -EINVAL;
1433 		}
1434 		return 0;
1435 	}
1436 	dev_err(dev, "skcipher algorithm error!\n");
1437 
1438 	return -EINVAL;
1439 }
1440 
1441 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
1442 {
1443 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
1444 	struct sec_req *req = skcipher_request_ctx(sk_req);
1445 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1446 	int ret;
1447 
1448 	if (!sk_req->cryptlen)
1449 		return 0;
1450 
1451 	req->flag = sk_req->base.flags;
1452 	req->c_req.sk_req = sk_req;
1453 	req->c_req.encrypt = encrypt;
1454 	req->ctx = ctx;
1455 
1456 	ret = sec_skcipher_param_check(ctx, req);
1457 	if (unlikely(ret))
1458 		return -EINVAL;
1459 
1460 	return ctx->req_op->process(ctx, req);
1461 }
1462 
1463 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
1464 {
1465 	return sec_skcipher_crypto(sk_req, true);
1466 }
1467 
1468 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
1469 {
1470 	return sec_skcipher_crypto(sk_req, false);
1471 }
1472 
1473 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
1474 	sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
1475 {\
1476 	.base = {\
1477 		.cra_name = sec_cra_name,\
1478 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
1479 		.cra_priority = SEC_PRIORITY,\
1480 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1481 		.cra_blocksize = blk_size,\
1482 		.cra_ctxsize = sizeof(struct sec_ctx),\
1483 		.cra_module = THIS_MODULE,\
1484 	},\
1485 	.init = ctx_init,\
1486 	.exit = ctx_exit,\
1487 	.setkey = sec_set_key,\
1488 	.decrypt = sec_skcipher_decrypt,\
1489 	.encrypt = sec_skcipher_encrypt,\
1490 	.min_keysize = sec_min_key_size,\
1491 	.max_keysize = sec_max_key_size,\
1492 	.ivsize = iv_size,\
1493 },
1494 
1495 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
1496 	max_key_size, blk_size, iv_size) \
1497 	SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
1498 	sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
1499 
1500 static struct skcipher_alg sec_skciphers[] = {
1501 	SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
1502 			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1503 			 AES_BLOCK_SIZE, 0)
1504 
1505 	SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
1506 			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
1507 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1508 
1509 	SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
1510 			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
1511 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1512 
1513 	SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
1514 			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1515 			 DES3_EDE_BLOCK_SIZE, 0)
1516 
1517 	SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
1518 			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
1519 			 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
1520 
1521 	SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
1522 			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
1523 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1524 
1525 	SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
1526 			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
1527 			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
1528 };
1529 
1530 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1531 {
1532 	struct aead_request *req = sreq->aead_req.aead_req;
1533 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1534 	size_t authsize = crypto_aead_authsize(tfm);
1535 	struct device *dev = ctx->dev;
1536 	u8 c_alg = ctx->c_ctx.c_alg;
1537 
1538 	if (unlikely(!req->src || !req->dst || !req->cryptlen ||
1539 		req->assoclen > SEC_MAX_AAD_LEN)) {
1540 		dev_err(dev, "aead input param error!\n");
1541 		return -EINVAL;
1542 	}
1543 
1544 	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
1545 		SEC_PBUF_SZ)
1546 		sreq->use_pbuf = true;
1547 	else
1548 		sreq->use_pbuf = false;
1549 
1550 	/* Support AES only */
1551 	if (unlikely(c_alg != SEC_CALG_AES)) {
1552 		dev_err(dev, "aead crypto alg error!\n");
1553 		return -EINVAL;
1554 	}
1555 	if (sreq->c_req.encrypt)
1556 		sreq->c_req.c_len = req->cryptlen;
1557 	else
1558 		sreq->c_req.c_len = req->cryptlen - authsize;
1559 
1560 	if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
1561 		dev_err(dev, "aead crypto length error!\n");
1562 		return -EINVAL;
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
1569 {
1570 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1571 	struct sec_req *req = aead_request_ctx(a_req);
1572 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1573 	int ret;
1574 
1575 	req->flag = a_req->base.flags;
1576 	req->aead_req.aead_req = a_req;
1577 	req->c_req.encrypt = encrypt;
1578 	req->ctx = ctx;
1579 
1580 	ret = sec_aead_param_check(ctx, req);
1581 	if (unlikely(ret))
1582 		return -EINVAL;
1583 
1584 	return ctx->req_op->process(ctx, req);
1585 }
1586 
1587 static int sec_aead_encrypt(struct aead_request *a_req)
1588 {
1589 	return sec_aead_crypto(a_req, true);
1590 }
1591 
1592 static int sec_aead_decrypt(struct aead_request *a_req)
1593 {
1594 	return sec_aead_crypto(a_req, false);
1595 }
1596 
1597 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
1598 			 ctx_exit, blk_size, iv_size, max_authsize)\
1599 {\
1600 	.base = {\
1601 		.cra_name = sec_cra_name,\
1602 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
1603 		.cra_priority = SEC_PRIORITY,\
1604 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1605 		.cra_blocksize = blk_size,\
1606 		.cra_ctxsize = sizeof(struct sec_ctx),\
1607 		.cra_module = THIS_MODULE,\
1608 	},\
1609 	.init = ctx_init,\
1610 	.exit = ctx_exit,\
1611 	.setkey = sec_set_key,\
1612 	.decrypt = sec_aead_decrypt,\
1613 	.encrypt = sec_aead_encrypt,\
1614 	.ivsize = iv_size,\
1615 	.maxauthsize = max_authsize,\
1616 }
1617 
1618 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
1619 	SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
1620 			sec_aead_ctx_exit, blksize, ivsize, authsize)
1621 
1622 static struct aead_alg sec_aeads[] = {
1623 	SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
1624 		     sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
1625 		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
1626 
1627 	SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
1628 		     sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
1629 		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
1630 
1631 	SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
1632 		     sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
1633 		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
1634 };
1635 
1636 int sec_register_to_crypto(struct hisi_qm *qm)
1637 {
1638 	int ret;
1639 
1640 	/* To avoid repeat register */
1641 	ret = crypto_register_skciphers(sec_skciphers,
1642 					ARRAY_SIZE(sec_skciphers));
1643 	if (ret)
1644 		return ret;
1645 
1646 	ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1647 	if (ret)
1648 		crypto_unregister_skciphers(sec_skciphers,
1649 					    ARRAY_SIZE(sec_skciphers));
1650 	return ret;
1651 }
1652 
1653 void sec_unregister_from_crypto(struct hisi_qm *qm)
1654 {
1655 	crypto_unregister_skciphers(sec_skciphers,
1656 				    ARRAY_SIZE(sec_skciphers));
1657 	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1658 }
1659