1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/curve25519.h>
5 #include <crypto/dh.h>
6 #include <crypto/ecc_curve.h>
7 #include <crypto/ecdh.h>
8 #include <crypto/internal/akcipher.h>
9 #include <crypto/internal/kpp.h>
10 #include <crypto/internal/rsa.h>
11 #include <crypto/kpp.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fips.h>
15 #include <linux/module.h>
16 #include <linux/time.h>
17 #include "hpre.h"
18 
19 struct hpre_ctx;
20 
21 #define HPRE_CRYPTO_ALG_PRI	1000
22 #define HPRE_ALIGN_SZ		64
23 #define HPRE_BITS_2_BYTES_SHIFT	3
24 #define HPRE_RSA_512BITS_KSZ	64
25 #define HPRE_RSA_1536BITS_KSZ	192
26 #define HPRE_CRT_PRMS		5
27 #define HPRE_CRT_Q		2
28 #define HPRE_CRT_P		3
29 #define HPRE_CRT_INV		4
30 #define HPRE_DH_G_FLAG		0x02
31 #define HPRE_TRY_SEND_TIMES	100
32 #define HPRE_INVLD_REQ_ID		(-1)
33 #define HPRE_DEV(ctx)		(&((ctx)->qp->qm->pdev->dev))
34 
35 #define HPRE_SQE_ALG_BITS	5
36 #define HPRE_SQE_DONE_SHIFT	30
37 #define HPRE_DH_MAX_P_SZ	512
38 
39 #define HPRE_DFX_SEC_TO_US	1000000
40 #define HPRE_DFX_US_TO_NS	1000
41 
42 /* size in bytes of the n prime */
43 #define HPRE_ECC_NIST_P192_N_SIZE	24
44 #define HPRE_ECC_NIST_P256_N_SIZE	32
45 
46 /* size in bytes */
47 #define HPRE_ECC_HW256_KSZ_B	32
48 
49 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
50 
51 struct hpre_rsa_ctx {
52 	/* low address: e--->n */
53 	char *pubkey;
54 	dma_addr_t dma_pubkey;
55 
56 	/* low address: d--->n */
57 	char *prikey;
58 	dma_addr_t dma_prikey;
59 
60 	/* low address: dq->dp->q->p->qinv */
61 	char *crt_prikey;
62 	dma_addr_t dma_crt_prikey;
63 
64 	struct crypto_akcipher *soft_tfm;
65 };
66 
67 struct hpre_dh_ctx {
68 	/*
69 	 * If base is g we compute the public key
70 	 *	ya = g^xa mod p; [RFC2631 sec 2.1.1]
71 	 * else if base if the counterpart public key we
72 	 * compute the shared secret
73 	 *	ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
74 	 * low address: d--->n, please refer to Hisilicon HPRE UM
75 	 */
76 	char *xa_p;
77 	dma_addr_t dma_xa_p;
78 
79 	char *g; /* m */
80 	dma_addr_t dma_g;
81 };
82 
83 struct hpre_ecdh_ctx {
84 	/* low address: p->a->k->b */
85 	unsigned char *p;
86 	dma_addr_t dma_p;
87 
88 	/* low address: x->y */
89 	unsigned char *g;
90 	dma_addr_t dma_g;
91 };
92 
93 struct hpre_curve25519_ctx {
94 	/* low address: p->a->k */
95 	unsigned char *p;
96 	dma_addr_t dma_p;
97 
98 	/* gx coordinate */
99 	unsigned char *g;
100 	dma_addr_t dma_g;
101 };
102 
103 struct hpre_ctx {
104 	struct hisi_qp *qp;
105 	struct hpre_asym_request **req_list;
106 	struct hpre *hpre;
107 	spinlock_t req_lock;
108 	unsigned int key_sz;
109 	bool crt_g2_mode;
110 	struct idr req_idr;
111 	union {
112 		struct hpre_rsa_ctx rsa;
113 		struct hpre_dh_ctx dh;
114 		struct hpre_ecdh_ctx ecdh;
115 		struct hpre_curve25519_ctx curve25519;
116 	};
117 	/* for ecc algorithms */
118 	unsigned int curve_id;
119 };
120 
121 struct hpre_asym_request {
122 	char *src;
123 	char *dst;
124 	struct hpre_sqe req;
125 	struct hpre_ctx *ctx;
126 	union {
127 		struct akcipher_request *rsa;
128 		struct kpp_request *dh;
129 		struct kpp_request *ecdh;
130 		struct kpp_request *curve25519;
131 	} areq;
132 	int err;
133 	int req_id;
134 	hpre_cb cb;
135 	struct timespec64 req_time;
136 };
137 
138 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
139 {
140 	unsigned long flags;
141 	int id;
142 
143 	spin_lock_irqsave(&ctx->req_lock, flags);
144 	id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
145 	spin_unlock_irqrestore(&ctx->req_lock, flags);
146 
147 	return id;
148 }
149 
150 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&ctx->req_lock, flags);
155 	idr_remove(&ctx->req_idr, req_id);
156 	spin_unlock_irqrestore(&ctx->req_lock, flags);
157 }
158 
159 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
160 {
161 	struct hpre_ctx *ctx;
162 	struct hpre_dfx *dfx;
163 	int id;
164 
165 	ctx = hpre_req->ctx;
166 	id = hpre_alloc_req_id(ctx);
167 	if (unlikely(id < 0))
168 		return -EINVAL;
169 
170 	ctx->req_list[id] = hpre_req;
171 	hpre_req->req_id = id;
172 
173 	dfx = ctx->hpre->debug.dfx;
174 	if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
175 		ktime_get_ts64(&hpre_req->req_time);
176 
177 	return id;
178 }
179 
180 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
181 {
182 	struct hpre_ctx *ctx = hpre_req->ctx;
183 	int id = hpre_req->req_id;
184 
185 	if (hpre_req->req_id >= 0) {
186 		hpre_req->req_id = HPRE_INVLD_REQ_ID;
187 		ctx->req_list[id] = NULL;
188 		hpre_free_req_id(ctx, id);
189 	}
190 }
191 
192 static struct hisi_qp *hpre_get_qp_and_start(u8 type)
193 {
194 	struct hisi_qp *qp;
195 	int ret;
196 
197 	qp = hpre_create_qp(type);
198 	if (!qp) {
199 		pr_err("Can not create hpre qp!\n");
200 		return ERR_PTR(-ENODEV);
201 	}
202 
203 	ret = hisi_qm_start_qp(qp, 0);
204 	if (ret < 0) {
205 		hisi_qm_free_qps(&qp, 1);
206 		pci_err(qp->qm->pdev, "Can not start qp!\n");
207 		return ERR_PTR(-EINVAL);
208 	}
209 
210 	return qp;
211 }
212 
213 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
214 				  struct scatterlist *data, unsigned int len,
215 				  int is_src, dma_addr_t *tmp)
216 {
217 	struct hpre_ctx *ctx = hpre_req->ctx;
218 	struct device *dev = HPRE_DEV(ctx);
219 	enum dma_data_direction dma_dir;
220 
221 	if (is_src) {
222 		hpre_req->src = NULL;
223 		dma_dir = DMA_TO_DEVICE;
224 	} else {
225 		hpre_req->dst = NULL;
226 		dma_dir = DMA_FROM_DEVICE;
227 	}
228 	*tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
229 	if (unlikely(dma_mapping_error(dev, *tmp))) {
230 		dev_err(dev, "dma map data err!\n");
231 		return -ENOMEM;
232 	}
233 
234 	return 0;
235 }
236 
237 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
238 				struct scatterlist *data, unsigned int len,
239 				int is_src, dma_addr_t *tmp)
240 {
241 	struct hpre_ctx *ctx = hpre_req->ctx;
242 	struct device *dev = HPRE_DEV(ctx);
243 	void *ptr;
244 	int shift;
245 
246 	shift = ctx->key_sz - len;
247 	if (unlikely(shift < 0))
248 		return -EINVAL;
249 
250 	ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
251 	if (unlikely(!ptr))
252 		return -ENOMEM;
253 
254 	if (is_src) {
255 		scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
256 		hpre_req->src = ptr;
257 	} else {
258 		hpre_req->dst = ptr;
259 	}
260 
261 	return 0;
262 }
263 
264 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
265 			     struct scatterlist *data, unsigned int len,
266 			     int is_src, int is_dh)
267 {
268 	struct hpre_sqe *msg = &hpre_req->req;
269 	struct hpre_ctx *ctx = hpre_req->ctx;
270 	dma_addr_t tmp = 0;
271 	int ret;
272 
273 	/* when the data is dh's source, we should format it */
274 	if ((sg_is_last(data) && len == ctx->key_sz) &&
275 	    ((is_dh && !is_src) || !is_dh))
276 		ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
277 	else
278 		ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
279 
280 	if (unlikely(ret))
281 		return ret;
282 
283 	if (is_src)
284 		msg->in = cpu_to_le64(tmp);
285 	else
286 		msg->out = cpu_to_le64(tmp);
287 
288 	return 0;
289 }
290 
291 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
292 				 struct hpre_asym_request *req,
293 				 struct scatterlist *dst,
294 				 struct scatterlist *src)
295 {
296 	struct device *dev = HPRE_DEV(ctx);
297 	struct hpre_sqe *sqe = &req->req;
298 	dma_addr_t tmp;
299 
300 	tmp = le64_to_cpu(sqe->in);
301 
302 	if (src) {
303 		if (req->src)
304 			dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
305 		else
306 			dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
307 	}
308 
309 	tmp = le64_to_cpu(sqe->out);
310 
311 	if (req->dst) {
312 		if (dst)
313 			scatterwalk_map_and_copy(req->dst, dst, 0,
314 						 ctx->key_sz, 1);
315 		dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
316 	} else {
317 		dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
318 	}
319 }
320 
321 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
322 				void **kreq)
323 {
324 	struct device *dev = HPRE_DEV(ctx);
325 	struct hpre_asym_request *req;
326 	unsigned int err, done, alg;
327 	int id;
328 
329 #define HPRE_NO_HW_ERR		0
330 #define HPRE_HW_TASK_DONE	3
331 #define HREE_HW_ERR_MASK	0x7ff
332 #define HREE_SQE_DONE_MASK	0x3
333 #define HREE_ALG_TYPE_MASK	0x1f
334 	id = (int)le16_to_cpu(sqe->tag);
335 	req = ctx->req_list[id];
336 	hpre_rm_req_from_ctx(req);
337 	*kreq = req;
338 
339 	err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
340 		HREE_HW_ERR_MASK;
341 
342 	done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
343 		HREE_SQE_DONE_MASK;
344 
345 	if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
346 		return 0;
347 
348 	alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
349 	dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
350 		alg, done, err);
351 
352 	return -EINVAL;
353 }
354 
355 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
356 {
357 	struct hpre *hpre;
358 
359 	if (!ctx || !qp || qlen < 0)
360 		return -EINVAL;
361 
362 	spin_lock_init(&ctx->req_lock);
363 	ctx->qp = qp;
364 
365 	hpre = container_of(ctx->qp->qm, struct hpre, qm);
366 	ctx->hpre = hpre;
367 	ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
368 	if (!ctx->req_list)
369 		return -ENOMEM;
370 	ctx->key_sz = 0;
371 	ctx->crt_g2_mode = false;
372 	idr_init(&ctx->req_idr);
373 
374 	return 0;
375 }
376 
377 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
378 {
379 	if (is_clear_all) {
380 		idr_destroy(&ctx->req_idr);
381 		kfree(ctx->req_list);
382 		hisi_qm_free_qps(&ctx->qp, 1);
383 	}
384 
385 	ctx->crt_g2_mode = false;
386 	ctx->key_sz = 0;
387 }
388 
389 static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
390 			       u64 overtime_thrhld)
391 {
392 	struct timespec64 reply_time;
393 	u64 time_use_us;
394 
395 	ktime_get_ts64(&reply_time);
396 	time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
397 		HPRE_DFX_SEC_TO_US +
398 		(reply_time.tv_nsec - req->req_time.tv_nsec) /
399 		HPRE_DFX_US_TO_NS;
400 
401 	if (time_use_us <= overtime_thrhld)
402 		return false;
403 
404 	return true;
405 }
406 
407 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
408 {
409 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
410 	struct hpre_asym_request *req;
411 	struct kpp_request *areq;
412 	u64 overtime_thrhld;
413 	int ret;
414 
415 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
416 	areq = req->areq.dh;
417 	areq->dst_len = ctx->key_sz;
418 
419 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
420 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
421 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
422 
423 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
424 	kpp_request_complete(areq, ret);
425 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
426 }
427 
428 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
429 {
430 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
431 	struct hpre_asym_request *req;
432 	struct akcipher_request *areq;
433 	u64 overtime_thrhld;
434 	int ret;
435 
436 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
437 
438 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
439 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
440 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
441 
442 	areq = req->areq.rsa;
443 	areq->dst_len = ctx->key_sz;
444 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
445 	akcipher_request_complete(areq, ret);
446 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
447 }
448 
449 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
450 {
451 	struct hpre_ctx *ctx = qp->qp_ctx;
452 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
453 	struct hpre_sqe *sqe = resp;
454 	struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
455 
456 	if (unlikely(!req)) {
457 		atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
458 		return;
459 	}
460 
461 	req->cb(ctx, resp);
462 }
463 
464 static void hpre_stop_qp_and_put(struct hisi_qp *qp)
465 {
466 	hisi_qm_stop_qp(qp);
467 	hisi_qm_free_qps(&qp, 1);
468 }
469 
470 static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
471 {
472 	struct hisi_qp *qp;
473 	int ret;
474 
475 	qp = hpre_get_qp_and_start(type);
476 	if (IS_ERR(qp))
477 		return PTR_ERR(qp);
478 
479 	qp->qp_ctx = ctx;
480 	qp->req_cb = hpre_alg_cb;
481 
482 	ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
483 	if (ret)
484 		hpre_stop_qp_and_put(qp);
485 
486 	return ret;
487 }
488 
489 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
490 {
491 	struct hpre_asym_request *h_req;
492 	struct hpre_sqe *msg;
493 	int req_id;
494 	void *tmp;
495 
496 	if (is_rsa) {
497 		struct akcipher_request *akreq = req;
498 
499 		if (akreq->dst_len < ctx->key_sz) {
500 			akreq->dst_len = ctx->key_sz;
501 			return -EOVERFLOW;
502 		}
503 
504 		tmp = akcipher_request_ctx(akreq);
505 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
506 		h_req->cb = hpre_rsa_cb;
507 		h_req->areq.rsa = akreq;
508 		msg = &h_req->req;
509 		memset(msg, 0, sizeof(*msg));
510 	} else {
511 		struct kpp_request *kreq = req;
512 
513 		if (kreq->dst_len < ctx->key_sz) {
514 			kreq->dst_len = ctx->key_sz;
515 			return -EOVERFLOW;
516 		}
517 
518 		tmp = kpp_request_ctx(kreq);
519 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
520 		h_req->cb = hpre_dh_cb;
521 		h_req->areq.dh = kreq;
522 		msg = &h_req->req;
523 		memset(msg, 0, sizeof(*msg));
524 		msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
525 	}
526 
527 	msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
528 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
529 	h_req->ctx = ctx;
530 
531 	req_id = hpre_add_req_to_ctx(h_req);
532 	if (req_id < 0)
533 		return -EBUSY;
534 
535 	msg->tag = cpu_to_le16((u16)req_id);
536 
537 	return 0;
538 }
539 
540 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
541 {
542 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
543 	int ctr = 0;
544 	int ret;
545 
546 	do {
547 		atomic64_inc(&dfx[HPRE_SEND_CNT].value);
548 		ret = hisi_qp_send(ctx->qp, msg);
549 		if (ret != -EBUSY)
550 			break;
551 		atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
552 	} while (ctr++ < HPRE_TRY_SEND_TIMES);
553 
554 	if (likely(!ret))
555 		return ret;
556 
557 	if (ret != -EBUSY)
558 		atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
559 
560 	return ret;
561 }
562 
563 static int hpre_dh_compute_value(struct kpp_request *req)
564 {
565 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
566 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
567 	void *tmp = kpp_request_ctx(req);
568 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
569 	struct hpre_sqe *msg = &hpre_req->req;
570 	int ret;
571 
572 	ret = hpre_msg_request_set(ctx, req, false);
573 	if (unlikely(ret))
574 		return ret;
575 
576 	if (req->src) {
577 		ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
578 		if (unlikely(ret))
579 			goto clear_all;
580 	} else {
581 		msg->in = cpu_to_le64(ctx->dh.dma_g);
582 	}
583 
584 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
585 	if (unlikely(ret))
586 		goto clear_all;
587 
588 	if (ctx->crt_g2_mode && !req->src)
589 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
590 	else
591 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
592 
593 	/* success */
594 	ret = hpre_send(ctx, msg);
595 	if (likely(!ret))
596 		return -EINPROGRESS;
597 
598 clear_all:
599 	hpre_rm_req_from_ctx(hpre_req);
600 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
601 
602 	return ret;
603 }
604 
605 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
606 {
607 #define _HPRE_DH_GRP1		768
608 #define _HPRE_DH_GRP2		1024
609 #define _HPRE_DH_GRP5		1536
610 #define _HPRE_DH_GRP14		2048
611 #define _HPRE_DH_GRP15		3072
612 #define _HPRE_DH_GRP16		4096
613 	switch (key_sz) {
614 	case _HPRE_DH_GRP1:
615 	case _HPRE_DH_GRP2:
616 	case _HPRE_DH_GRP5:
617 	case _HPRE_DH_GRP14:
618 	case _HPRE_DH_GRP15:
619 	case _HPRE_DH_GRP16:
620 		return 0;
621 	}
622 
623 	return -EINVAL;
624 }
625 
626 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
627 {
628 	struct device *dev = HPRE_DEV(ctx);
629 	unsigned int sz;
630 
631 	if (params->p_size > HPRE_DH_MAX_P_SZ)
632 		return -EINVAL;
633 
634 	if (hpre_is_dh_params_length_valid(params->p_size <<
635 					   HPRE_BITS_2_BYTES_SHIFT))
636 		return -EINVAL;
637 
638 	sz = ctx->key_sz = params->p_size;
639 	ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
640 					  &ctx->dh.dma_xa_p, GFP_KERNEL);
641 	if (!ctx->dh.xa_p)
642 		return -ENOMEM;
643 
644 	memcpy(ctx->dh.xa_p + sz, params->p, sz);
645 
646 	/* If g equals 2 don't copy it */
647 	if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
648 		ctx->crt_g2_mode = true;
649 		return 0;
650 	}
651 
652 	ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
653 	if (!ctx->dh.g) {
654 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
655 				  ctx->dh.dma_xa_p);
656 		ctx->dh.xa_p = NULL;
657 		return -ENOMEM;
658 	}
659 
660 	memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
661 
662 	return 0;
663 }
664 
665 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
666 {
667 	struct device *dev = HPRE_DEV(ctx);
668 	unsigned int sz = ctx->key_sz;
669 
670 	if (is_clear_all)
671 		hisi_qm_stop_qp(ctx->qp);
672 
673 	if (ctx->dh.g) {
674 		dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
675 		ctx->dh.g = NULL;
676 	}
677 
678 	if (ctx->dh.xa_p) {
679 		memzero_explicit(ctx->dh.xa_p, sz);
680 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
681 				  ctx->dh.dma_xa_p);
682 		ctx->dh.xa_p = NULL;
683 	}
684 
685 	hpre_ctx_clear(ctx, is_clear_all);
686 }
687 
688 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
689 			      unsigned int len)
690 {
691 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
692 	struct dh params;
693 	int ret;
694 
695 	if (crypto_dh_decode_key(buf, len, &params) < 0)
696 		return -EINVAL;
697 
698 	/* Free old secret if any */
699 	hpre_dh_clear_ctx(ctx, false);
700 
701 	ret = hpre_dh_set_params(ctx, &params);
702 	if (ret < 0)
703 		goto err_clear_ctx;
704 
705 	memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
706 	       params.key_size);
707 
708 	return 0;
709 
710 err_clear_ctx:
711 	hpre_dh_clear_ctx(ctx, false);
712 	return ret;
713 }
714 
715 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
716 {
717 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
718 
719 	return ctx->key_sz;
720 }
721 
722 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
723 {
724 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
725 
726 	return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
727 }
728 
729 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
730 {
731 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
732 
733 	hpre_dh_clear_ctx(ctx, true);
734 }
735 
736 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
737 {
738 	while (!**ptr && *len) {
739 		(*ptr)++;
740 		(*len)--;
741 	}
742 }
743 
744 static bool hpre_rsa_key_size_is_support(unsigned int len)
745 {
746 	unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
747 
748 #define _RSA_1024BITS_KEY_WDTH		1024
749 #define _RSA_2048BITS_KEY_WDTH		2048
750 #define _RSA_3072BITS_KEY_WDTH		3072
751 #define _RSA_4096BITS_KEY_WDTH		4096
752 
753 	switch (bits) {
754 	case _RSA_1024BITS_KEY_WDTH:
755 	case _RSA_2048BITS_KEY_WDTH:
756 	case _RSA_3072BITS_KEY_WDTH:
757 	case _RSA_4096BITS_KEY_WDTH:
758 		return true;
759 	default:
760 		return false;
761 	}
762 }
763 
764 static int hpre_rsa_enc(struct akcipher_request *req)
765 {
766 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
767 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
768 	void *tmp = akcipher_request_ctx(req);
769 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
770 	struct hpre_sqe *msg = &hpre_req->req;
771 	int ret;
772 
773 	/* For 512 and 1536 bits key size, use soft tfm instead */
774 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
775 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
776 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
777 		ret = crypto_akcipher_encrypt(req);
778 		akcipher_request_set_tfm(req, tfm);
779 		return ret;
780 	}
781 
782 	if (unlikely(!ctx->rsa.pubkey))
783 		return -EINVAL;
784 
785 	ret = hpre_msg_request_set(ctx, req, true);
786 	if (unlikely(ret))
787 		return ret;
788 
789 	msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
790 	msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
791 
792 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
793 	if (unlikely(ret))
794 		goto clear_all;
795 
796 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
797 	if (unlikely(ret))
798 		goto clear_all;
799 
800 	/* success */
801 	ret = hpre_send(ctx, msg);
802 	if (likely(!ret))
803 		return -EINPROGRESS;
804 
805 clear_all:
806 	hpre_rm_req_from_ctx(hpre_req);
807 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
808 
809 	return ret;
810 }
811 
812 static int hpre_rsa_dec(struct akcipher_request *req)
813 {
814 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
815 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
816 	void *tmp = akcipher_request_ctx(req);
817 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
818 	struct hpre_sqe *msg = &hpre_req->req;
819 	int ret;
820 
821 	/* For 512 and 1536 bits key size, use soft tfm instead */
822 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
823 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
824 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
825 		ret = crypto_akcipher_decrypt(req);
826 		akcipher_request_set_tfm(req, tfm);
827 		return ret;
828 	}
829 
830 	if (unlikely(!ctx->rsa.prikey))
831 		return -EINVAL;
832 
833 	ret = hpre_msg_request_set(ctx, req, true);
834 	if (unlikely(ret))
835 		return ret;
836 
837 	if (ctx->crt_g2_mode) {
838 		msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
839 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
840 				       HPRE_ALG_NC_CRT);
841 	} else {
842 		msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
843 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
844 				       HPRE_ALG_NC_NCRT);
845 	}
846 
847 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
848 	if (unlikely(ret))
849 		goto clear_all;
850 
851 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
852 	if (unlikely(ret))
853 		goto clear_all;
854 
855 	/* success */
856 	ret = hpre_send(ctx, msg);
857 	if (likely(!ret))
858 		return -EINPROGRESS;
859 
860 clear_all:
861 	hpre_rm_req_from_ctx(hpre_req);
862 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
863 
864 	return ret;
865 }
866 
867 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
868 			  size_t vlen, bool private)
869 {
870 	const char *ptr = value;
871 
872 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
873 
874 	ctx->key_sz = vlen;
875 
876 	/* if invalid key size provided, we use software tfm */
877 	if (!hpre_rsa_key_size_is_support(ctx->key_sz))
878 		return 0;
879 
880 	ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
881 					     &ctx->rsa.dma_pubkey,
882 					     GFP_KERNEL);
883 	if (!ctx->rsa.pubkey)
884 		return -ENOMEM;
885 
886 	if (private) {
887 		ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
888 						     &ctx->rsa.dma_prikey,
889 						     GFP_KERNEL);
890 		if (!ctx->rsa.prikey) {
891 			dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
892 					  ctx->rsa.pubkey,
893 					  ctx->rsa.dma_pubkey);
894 			ctx->rsa.pubkey = NULL;
895 			return -ENOMEM;
896 		}
897 		memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
898 	}
899 	memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
900 
901 	/* Using hardware HPRE to do RSA */
902 	return 1;
903 }
904 
905 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
906 			  size_t vlen)
907 {
908 	const char *ptr = value;
909 
910 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
911 
912 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
913 		return -EINVAL;
914 
915 	memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
916 
917 	return 0;
918 }
919 
920 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
921 			  size_t vlen)
922 {
923 	const char *ptr = value;
924 
925 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
926 
927 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
928 		return -EINVAL;
929 
930 	memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
931 
932 	return 0;
933 }
934 
935 static int hpre_crt_para_get(char *para, size_t para_sz,
936 			     const char *raw, size_t raw_sz)
937 {
938 	const char *ptr = raw;
939 	size_t len = raw_sz;
940 
941 	hpre_rsa_drop_leading_zeros(&ptr, &len);
942 	if (!len || len > para_sz)
943 		return -EINVAL;
944 
945 	memcpy(para + para_sz - len, ptr, len);
946 
947 	return 0;
948 }
949 
950 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
951 {
952 	unsigned int hlf_ksz = ctx->key_sz >> 1;
953 	struct device *dev = HPRE_DEV(ctx);
954 	u64 offset;
955 	int ret;
956 
957 	ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
958 					&ctx->rsa.dma_crt_prikey,
959 					GFP_KERNEL);
960 	if (!ctx->rsa.crt_prikey)
961 		return -ENOMEM;
962 
963 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
964 				rsa_key->dq, rsa_key->dq_sz);
965 	if (ret)
966 		goto free_key;
967 
968 	offset = hlf_ksz;
969 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
970 				rsa_key->dp, rsa_key->dp_sz);
971 	if (ret)
972 		goto free_key;
973 
974 	offset = hlf_ksz * HPRE_CRT_Q;
975 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
976 				rsa_key->q, rsa_key->q_sz);
977 	if (ret)
978 		goto free_key;
979 
980 	offset = hlf_ksz * HPRE_CRT_P;
981 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
982 				rsa_key->p, rsa_key->p_sz);
983 	if (ret)
984 		goto free_key;
985 
986 	offset = hlf_ksz * HPRE_CRT_INV;
987 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
988 				rsa_key->qinv, rsa_key->qinv_sz);
989 	if (ret)
990 		goto free_key;
991 
992 	ctx->crt_g2_mode = true;
993 
994 	return 0;
995 
996 free_key:
997 	offset = hlf_ksz * HPRE_CRT_PRMS;
998 	memzero_explicit(ctx->rsa.crt_prikey, offset);
999 	dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1000 			  ctx->rsa.dma_crt_prikey);
1001 	ctx->rsa.crt_prikey = NULL;
1002 	ctx->crt_g2_mode = false;
1003 
1004 	return ret;
1005 }
1006 
1007 /* If it is clear all, all the resources of the QP will be cleaned. */
1008 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1009 {
1010 	unsigned int half_key_sz = ctx->key_sz >> 1;
1011 	struct device *dev = HPRE_DEV(ctx);
1012 
1013 	if (is_clear_all)
1014 		hisi_qm_stop_qp(ctx->qp);
1015 
1016 	if (ctx->rsa.pubkey) {
1017 		dma_free_coherent(dev, ctx->key_sz << 1,
1018 				  ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1019 		ctx->rsa.pubkey = NULL;
1020 	}
1021 
1022 	if (ctx->rsa.crt_prikey) {
1023 		memzero_explicit(ctx->rsa.crt_prikey,
1024 				 half_key_sz * HPRE_CRT_PRMS);
1025 		dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1026 				  ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1027 		ctx->rsa.crt_prikey = NULL;
1028 	}
1029 
1030 	if (ctx->rsa.prikey) {
1031 		memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1032 		dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1033 				  ctx->rsa.dma_prikey);
1034 		ctx->rsa.prikey = NULL;
1035 	}
1036 
1037 	hpre_ctx_clear(ctx, is_clear_all);
1038 }
1039 
1040 /*
1041  * we should judge if it is CRT or not,
1042  * CRT: return true,  N-CRT: return false .
1043  */
1044 static bool hpre_is_crt_key(struct rsa_key *key)
1045 {
1046 	u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1047 		  key->qinv_sz;
1048 
1049 #define LEN_OF_NCRT_PARA	5
1050 
1051 	/* N-CRT less than 5 parameters */
1052 	return len > LEN_OF_NCRT_PARA;
1053 }
1054 
1055 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1056 			   unsigned int keylen, bool private)
1057 {
1058 	struct rsa_key rsa_key;
1059 	int ret;
1060 
1061 	hpre_rsa_clear_ctx(ctx, false);
1062 
1063 	if (private)
1064 		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1065 	else
1066 		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1067 	if (ret < 0)
1068 		return ret;
1069 
1070 	ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1071 	if (ret <= 0)
1072 		return ret;
1073 
1074 	if (private) {
1075 		ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1076 		if (ret < 0)
1077 			goto free;
1078 
1079 		if (hpre_is_crt_key(&rsa_key)) {
1080 			ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1081 			if (ret < 0)
1082 				goto free;
1083 		}
1084 	}
1085 
1086 	ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1087 	if (ret < 0)
1088 		goto free;
1089 
1090 	if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1091 		ret = -EINVAL;
1092 		goto free;
1093 	}
1094 
1095 	return 0;
1096 
1097 free:
1098 	hpre_rsa_clear_ctx(ctx, false);
1099 	return ret;
1100 }
1101 
1102 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1103 			      unsigned int keylen)
1104 {
1105 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1106 	int ret;
1107 
1108 	ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1109 	if (ret)
1110 		return ret;
1111 
1112 	return hpre_rsa_setkey(ctx, key, keylen, false);
1113 }
1114 
1115 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1116 			       unsigned int keylen)
1117 {
1118 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1119 	int ret;
1120 
1121 	ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1122 	if (ret)
1123 		return ret;
1124 
1125 	return hpre_rsa_setkey(ctx, key, keylen, true);
1126 }
1127 
1128 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1129 {
1130 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1131 
1132 	/* For 512 and 1536 bits key size, use soft tfm instead */
1133 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1134 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1135 		return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1136 
1137 	return ctx->key_sz;
1138 }
1139 
1140 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1141 {
1142 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1143 	int ret;
1144 
1145 	ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1146 	if (IS_ERR(ctx->rsa.soft_tfm)) {
1147 		pr_err("Can not alloc_akcipher!\n");
1148 		return PTR_ERR(ctx->rsa.soft_tfm);
1149 	}
1150 
1151 	ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1152 	if (ret)
1153 		crypto_free_akcipher(ctx->rsa.soft_tfm);
1154 
1155 	return ret;
1156 }
1157 
1158 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1159 {
1160 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1161 
1162 	hpre_rsa_clear_ctx(ctx, true);
1163 	crypto_free_akcipher(ctx->rsa.soft_tfm);
1164 }
1165 
1166 static void hpre_key_to_big_end(u8 *data, int len)
1167 {
1168 	int i, j;
1169 	u8 tmp;
1170 
1171 	for (i = 0; i < len / 2; i++) {
1172 		j = len - i - 1;
1173 		tmp = data[j];
1174 		data[j] = data[i];
1175 		data[i] = tmp;
1176 	}
1177 }
1178 
1179 static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1180 			       bool is_ecdh)
1181 {
1182 	struct device *dev = HPRE_DEV(ctx);
1183 	unsigned int sz = ctx->key_sz;
1184 	unsigned int shift = sz << 1;
1185 
1186 	if (is_clear_all)
1187 		hisi_qm_stop_qp(ctx->qp);
1188 
1189 	if (is_ecdh && ctx->ecdh.p) {
1190 		/* ecdh: p->a->k->b */
1191 		memzero_explicit(ctx->ecdh.p + shift, sz);
1192 		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1193 		ctx->ecdh.p = NULL;
1194 	} else if (!is_ecdh && ctx->curve25519.p) {
1195 		/* curve25519: p->a->k */
1196 		memzero_explicit(ctx->curve25519.p + shift, sz);
1197 		dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1198 				  ctx->curve25519.dma_p);
1199 		ctx->curve25519.p = NULL;
1200 	}
1201 
1202 	hpre_ctx_clear(ctx, is_clear_all);
1203 }
1204 
1205 static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1206 {
1207 	switch (id) {
1208 	case ECC_CURVE_NIST_P192:
1209 	case ECC_CURVE_NIST_P256:
1210 		return HPRE_ECC_HW256_KSZ_B;
1211 	default:
1212 		break;
1213 	}
1214 
1215 	return 0;
1216 }
1217 
1218 static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1219 {
1220 	unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1221 	u8 i = 0;
1222 
1223 	while (i < ndigits - 1) {
1224 		memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
1225 		i++;
1226 	}
1227 
1228 	memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
1229 	hpre_key_to_big_end((u8 *)addr, cur_sz);
1230 }
1231 
1232 static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1233 				unsigned int cur_sz)
1234 {
1235 	unsigned int shifta = ctx->key_sz << 1;
1236 	unsigned int shiftb = ctx->key_sz << 2;
1237 	void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1238 	void *a = ctx->ecdh.p + shifta - cur_sz;
1239 	void *b = ctx->ecdh.p + shiftb - cur_sz;
1240 	void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1241 	void *y = ctx->ecdh.g + shifta - cur_sz;
1242 	const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1243 	char *n;
1244 
1245 	if (unlikely(!curve))
1246 		return -EINVAL;
1247 
1248 	n = kzalloc(ctx->key_sz, GFP_KERNEL);
1249 	if (!n)
1250 		return -ENOMEM;
1251 
1252 	fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1253 	fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1254 	fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1255 	fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1256 	fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1257 	fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1258 
1259 	if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1260 		kfree(n);
1261 		return -EINVAL;
1262 	}
1263 
1264 	kfree(n);
1265 	return 0;
1266 }
1267 
1268 static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1269 {
1270 	switch (id) {
1271 	case ECC_CURVE_NIST_P192:
1272 		return HPRE_ECC_NIST_P192_N_SIZE;
1273 	case ECC_CURVE_NIST_P256:
1274 		return HPRE_ECC_NIST_P256_N_SIZE;
1275 	default:
1276 		break;
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1283 {
1284 	struct device *dev = HPRE_DEV(ctx);
1285 	unsigned int sz, shift, curve_sz;
1286 	int ret;
1287 
1288 	ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1289 	if (!ctx->key_sz)
1290 		return -EINVAL;
1291 
1292 	curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1293 	if (!curve_sz || params->key_size > curve_sz)
1294 		return -EINVAL;
1295 
1296 	sz = ctx->key_sz;
1297 
1298 	if (!ctx->ecdh.p) {
1299 		ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1300 						 GFP_KERNEL);
1301 		if (!ctx->ecdh.p)
1302 			return -ENOMEM;
1303 	}
1304 
1305 	shift = sz << 2;
1306 	ctx->ecdh.g = ctx->ecdh.p + shift;
1307 	ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1308 
1309 	ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1310 	if (ret) {
1311 		dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1312 		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1313 		ctx->ecdh.p = NULL;
1314 		return ret;
1315 	}
1316 
1317 	return 0;
1318 }
1319 
1320 static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1321 {
1322 	int i;
1323 
1324 	for (i = 0; i < key_sz; i++)
1325 		if (key[i])
1326 			return false;
1327 
1328 	return true;
1329 }
1330 
1331 static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1332 				unsigned int len)
1333 {
1334 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1335 	struct device *dev = HPRE_DEV(ctx);
1336 	unsigned int sz, sz_shift;
1337 	struct ecdh params;
1338 	int ret;
1339 
1340 	if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
1341 		dev_err(dev, "failed to decode ecdh key!\n");
1342 		return -EINVAL;
1343 	}
1344 
1345 	if (hpre_key_is_zero(params.key, params.key_size)) {
1346 		dev_err(dev, "Invalid hpre key!\n");
1347 		return -EINVAL;
1348 	}
1349 
1350 	hpre_ecc_clear_ctx(ctx, false, true);
1351 
1352 	ret = hpre_ecdh_set_param(ctx, &params);
1353 	if (ret < 0) {
1354 		dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1355 		return ret;
1356 	}
1357 
1358 	sz = ctx->key_sz;
1359 	sz_shift = (sz << 1) + sz - params.key_size;
1360 	memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1361 
1362 	return 0;
1363 }
1364 
1365 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1366 				      struct hpre_asym_request *req,
1367 				      struct scatterlist *dst,
1368 				      struct scatterlist *src)
1369 {
1370 	struct device *dev = HPRE_DEV(ctx);
1371 	struct hpre_sqe *sqe = &req->req;
1372 	dma_addr_t dma;
1373 
1374 	dma = le64_to_cpu(sqe->in);
1375 
1376 	if (src && req->src)
1377 		dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1378 
1379 	dma = le64_to_cpu(sqe->out);
1380 
1381 	if (req->dst)
1382 		dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1383 	if (dst)
1384 		dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1385 }
1386 
1387 static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1388 {
1389 	unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1390 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1391 	struct hpre_asym_request *req = NULL;
1392 	struct kpp_request *areq;
1393 	u64 overtime_thrhld;
1394 	char *p;
1395 	int ret;
1396 
1397 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1398 	areq = req->areq.ecdh;
1399 	areq->dst_len = ctx->key_sz << 1;
1400 
1401 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1402 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1403 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1404 
1405 	p = sg_virt(areq->dst);
1406 	memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1407 	memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1408 
1409 	hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1410 	kpp_request_complete(areq, ret);
1411 
1412 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1413 }
1414 
1415 static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1416 				     struct kpp_request *req)
1417 {
1418 	struct hpre_asym_request *h_req;
1419 	struct hpre_sqe *msg;
1420 	int req_id;
1421 	void *tmp;
1422 
1423 	if (req->dst_len < ctx->key_sz << 1) {
1424 		req->dst_len = ctx->key_sz << 1;
1425 		return -EINVAL;
1426 	}
1427 
1428 	tmp = kpp_request_ctx(req);
1429 	h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1430 	h_req->cb = hpre_ecdh_cb;
1431 	h_req->areq.ecdh = req;
1432 	msg = &h_req->req;
1433 	memset(msg, 0, sizeof(*msg));
1434 	msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1435 
1436 	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1437 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1438 	h_req->ctx = ctx;
1439 
1440 	req_id = hpre_add_req_to_ctx(h_req);
1441 	if (req_id < 0)
1442 		return -EBUSY;
1443 
1444 	msg->tag = cpu_to_le16((u16)req_id);
1445 	return 0;
1446 }
1447 
1448 static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1449 				   struct scatterlist *data, unsigned int len)
1450 {
1451 	struct hpre_sqe *msg = &hpre_req->req;
1452 	struct hpre_ctx *ctx = hpre_req->ctx;
1453 	struct device *dev = HPRE_DEV(ctx);
1454 	unsigned int tmpshift;
1455 	dma_addr_t dma = 0;
1456 	void *ptr;
1457 	int shift;
1458 
1459 	/* Src_data include gx and gy. */
1460 	shift = ctx->key_sz - (len >> 1);
1461 	if (unlikely(shift < 0))
1462 		return -EINVAL;
1463 
1464 	ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1465 	if (unlikely(!ptr))
1466 		return -ENOMEM;
1467 
1468 	tmpshift = ctx->key_sz << 1;
1469 	scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1470 	memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1471 	memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1472 
1473 	hpre_req->src = ptr;
1474 	msg->in = cpu_to_le64(dma);
1475 	return 0;
1476 }
1477 
1478 static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1479 				   struct scatterlist *data, unsigned int len)
1480 {
1481 	struct hpre_sqe *msg = &hpre_req->req;
1482 	struct hpre_ctx *ctx = hpre_req->ctx;
1483 	struct device *dev = HPRE_DEV(ctx);
1484 	dma_addr_t dma = 0;
1485 
1486 	if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1487 		dev_err(dev, "data or data length is illegal!\n");
1488 		return -EINVAL;
1489 	}
1490 
1491 	hpre_req->dst = NULL;
1492 	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1493 	if (unlikely(dma_mapping_error(dev, dma))) {
1494 		dev_err(dev, "dma map data err!\n");
1495 		return -ENOMEM;
1496 	}
1497 
1498 	msg->out = cpu_to_le64(dma);
1499 	return 0;
1500 }
1501 
1502 static int hpre_ecdh_compute_value(struct kpp_request *req)
1503 {
1504 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1505 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1506 	struct device *dev = HPRE_DEV(ctx);
1507 	void *tmp = kpp_request_ctx(req);
1508 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1509 	struct hpre_sqe *msg = &hpre_req->req;
1510 	int ret;
1511 
1512 	ret = hpre_ecdh_msg_request_set(ctx, req);
1513 	if (unlikely(ret)) {
1514 		dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1515 		return ret;
1516 	}
1517 
1518 	if (req->src) {
1519 		ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1520 		if (unlikely(ret)) {
1521 			dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1522 			goto clear_all;
1523 		}
1524 	} else {
1525 		msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1526 	}
1527 
1528 	ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1529 	if (unlikely(ret)) {
1530 		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1531 		goto clear_all;
1532 	}
1533 
1534 	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1535 	ret = hpre_send(ctx, msg);
1536 	if (likely(!ret))
1537 		return -EINPROGRESS;
1538 
1539 clear_all:
1540 	hpre_rm_req_from_ctx(hpre_req);
1541 	hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1542 	return ret;
1543 }
1544 
1545 static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1546 {
1547 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1548 
1549 	/* max size is the pub_key_size, include x and y */
1550 	return ctx->key_sz << 1;
1551 }
1552 
1553 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1554 {
1555 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1556 
1557 	ctx->curve_id = ECC_CURVE_NIST_P192;
1558 
1559 	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1560 }
1561 
1562 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1563 {
1564 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1565 
1566 	ctx->curve_id = ECC_CURVE_NIST_P256;
1567 
1568 	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1569 }
1570 
1571 static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1572 {
1573 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1574 
1575 	hpre_ecc_clear_ctx(ctx, true, true);
1576 }
1577 
1578 static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1579 				       unsigned int len)
1580 {
1581 	u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1582 	unsigned int sz = ctx->key_sz;
1583 	const struct ecc_curve *curve;
1584 	unsigned int shift = sz << 1;
1585 	void *p;
1586 
1587 	/*
1588 	 * The key from 'buf' is in little-endian, we should preprocess it as
1589 	 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1590 	 * then convert it to big endian. Only in this way, the result can be
1591 	 * the same as the software curve-25519 that exists in crypto.
1592 	 */
1593 	memcpy(secret, buf, len);
1594 	curve25519_clamp_secret(secret);
1595 	hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1596 
1597 	p = ctx->curve25519.p + sz - len;
1598 
1599 	curve = ecc_get_curve25519();
1600 
1601 	/* fill curve parameters */
1602 	fill_curve_param(p, curve->p, len, curve->g.ndigits);
1603 	fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1604 	memcpy(p + shift, secret, len);
1605 	fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1606 	memzero_explicit(secret, CURVE25519_KEY_SIZE);
1607 }
1608 
1609 static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1610 				     unsigned int len)
1611 {
1612 	struct device *dev = HPRE_DEV(ctx);
1613 	unsigned int sz = ctx->key_sz;
1614 	unsigned int shift = sz << 1;
1615 
1616 	/* p->a->k->gx */
1617 	if (!ctx->curve25519.p) {
1618 		ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1619 						       &ctx->curve25519.dma_p,
1620 						       GFP_KERNEL);
1621 		if (!ctx->curve25519.p)
1622 			return -ENOMEM;
1623 	}
1624 
1625 	ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1626 	ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1627 
1628 	hpre_curve25519_fill_curve(ctx, buf, len);
1629 
1630 	return 0;
1631 }
1632 
1633 static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1634 				      unsigned int len)
1635 {
1636 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1637 	struct device *dev = HPRE_DEV(ctx);
1638 	int ret = -EINVAL;
1639 
1640 	if (len != CURVE25519_KEY_SIZE ||
1641 	    !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1642 		dev_err(dev, "key is null or key len is not 32bytes!\n");
1643 		return ret;
1644 	}
1645 
1646 	/* Free old secret if any */
1647 	hpre_ecc_clear_ctx(ctx, false, false);
1648 
1649 	ctx->key_sz = CURVE25519_KEY_SIZE;
1650 	ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1651 	if (ret) {
1652 		dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1653 		hpre_ecc_clear_ctx(ctx, false, false);
1654 		return ret;
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1661 					    struct hpre_asym_request *req,
1662 					    struct scatterlist *dst,
1663 					    struct scatterlist *src)
1664 {
1665 	struct device *dev = HPRE_DEV(ctx);
1666 	struct hpre_sqe *sqe = &req->req;
1667 	dma_addr_t dma;
1668 
1669 	dma = le64_to_cpu(sqe->in);
1670 
1671 	if (src && req->src)
1672 		dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1673 
1674 	dma = le64_to_cpu(sqe->out);
1675 
1676 	if (req->dst)
1677 		dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1678 	if (dst)
1679 		dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1680 }
1681 
1682 static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1683 {
1684 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1685 	struct hpre_asym_request *req = NULL;
1686 	struct kpp_request *areq;
1687 	u64 overtime_thrhld;
1688 	int ret;
1689 
1690 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1691 	areq = req->areq.curve25519;
1692 	areq->dst_len = ctx->key_sz;
1693 
1694 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1695 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1696 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1697 
1698 	hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1699 
1700 	hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1701 	kpp_request_complete(areq, ret);
1702 
1703 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1704 }
1705 
1706 static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1707 					   struct kpp_request *req)
1708 {
1709 	struct hpre_asym_request *h_req;
1710 	struct hpre_sqe *msg;
1711 	int req_id;
1712 	void *tmp;
1713 
1714 	if (unlikely(req->dst_len < ctx->key_sz)) {
1715 		req->dst_len = ctx->key_sz;
1716 		return -EINVAL;
1717 	}
1718 
1719 	tmp = kpp_request_ctx(req);
1720 	h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1721 	h_req->cb = hpre_curve25519_cb;
1722 	h_req->areq.curve25519 = req;
1723 	msg = &h_req->req;
1724 	memset(msg, 0, sizeof(*msg));
1725 	msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1726 
1727 	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1728 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1729 	h_req->ctx = ctx;
1730 
1731 	req_id = hpre_add_req_to_ctx(h_req);
1732 	if (req_id < 0)
1733 		return -EBUSY;
1734 
1735 	msg->tag = cpu_to_le16((u16)req_id);
1736 	return 0;
1737 }
1738 
1739 static void hpre_curve25519_src_modulo_p(u8 *ptr)
1740 {
1741 	int i;
1742 
1743 	for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1744 		ptr[i] = 0;
1745 
1746 	/* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1747 	ptr[i] -= 0xed;
1748 }
1749 
1750 static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1751 				    struct scatterlist *data, unsigned int len)
1752 {
1753 	struct hpre_sqe *msg = &hpre_req->req;
1754 	struct hpre_ctx *ctx = hpre_req->ctx;
1755 	struct device *dev = HPRE_DEV(ctx);
1756 	u8 p[CURVE25519_KEY_SIZE] = { 0 };
1757 	const struct ecc_curve *curve;
1758 	dma_addr_t dma = 0;
1759 	u8 *ptr;
1760 
1761 	if (len != CURVE25519_KEY_SIZE) {
1762 		dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1763 		return -EINVAL;
1764 	}
1765 
1766 	ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1767 	if (unlikely(!ptr))
1768 		return -ENOMEM;
1769 
1770 	scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1771 
1772 	if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1773 		dev_err(dev, "gx is null!\n");
1774 		goto err;
1775 	}
1776 
1777 	/*
1778 	 * Src_data(gx) is in little-endian order, MSB in the final byte should
1779 	 * be masked as described in RFC7748, then transform it to big-endian
1780 	 * form, then hisi_hpre can use the data.
1781 	 */
1782 	ptr[31] &= 0x7f;
1783 	hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1784 
1785 	curve = ecc_get_curve25519();
1786 
1787 	fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1788 
1789 	/*
1790 	 * When src_data equals (2^255 - 19) ~  (2^255 - 1), it is out of p,
1791 	 * we get its modulus to p, and then use it.
1792 	 */
1793 	if (memcmp(ptr, p, ctx->key_sz) >= 0)
1794 		hpre_curve25519_src_modulo_p(ptr);
1795 
1796 	hpre_req->src = ptr;
1797 	msg->in = cpu_to_le64(dma);
1798 	return 0;
1799 
1800 err:
1801 	dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1802 	return -EINVAL;
1803 }
1804 
1805 static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1806 				    struct scatterlist *data, unsigned int len)
1807 {
1808 	struct hpre_sqe *msg = &hpre_req->req;
1809 	struct hpre_ctx *ctx = hpre_req->ctx;
1810 	struct device *dev = HPRE_DEV(ctx);
1811 	dma_addr_t dma = 0;
1812 
1813 	if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1814 		dev_err(dev, "data or data length is illegal!\n");
1815 		return -EINVAL;
1816 	}
1817 
1818 	hpre_req->dst = NULL;
1819 	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1820 	if (unlikely(dma_mapping_error(dev, dma))) {
1821 		dev_err(dev, "dma map data err!\n");
1822 		return -ENOMEM;
1823 	}
1824 
1825 	msg->out = cpu_to_le64(dma);
1826 	return 0;
1827 }
1828 
1829 static int hpre_curve25519_compute_value(struct kpp_request *req)
1830 {
1831 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1832 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1833 	struct device *dev = HPRE_DEV(ctx);
1834 	void *tmp = kpp_request_ctx(req);
1835 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1836 	struct hpre_sqe *msg = &hpre_req->req;
1837 	int ret;
1838 
1839 	ret = hpre_curve25519_msg_request_set(ctx, req);
1840 	if (unlikely(ret)) {
1841 		dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1842 		return ret;
1843 	}
1844 
1845 	if (req->src) {
1846 		ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1847 		if (unlikely(ret)) {
1848 			dev_err(dev, "failed to init src data, ret = %d!\n",
1849 				ret);
1850 			goto clear_all;
1851 		}
1852 	} else {
1853 		msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1854 	}
1855 
1856 	ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1857 	if (unlikely(ret)) {
1858 		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1859 		goto clear_all;
1860 	}
1861 
1862 	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1863 	ret = hpre_send(ctx, msg);
1864 	if (likely(!ret))
1865 		return -EINPROGRESS;
1866 
1867 clear_all:
1868 	hpre_rm_req_from_ctx(hpre_req);
1869 	hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1870 	return ret;
1871 }
1872 
1873 static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1874 {
1875 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1876 
1877 	return ctx->key_sz;
1878 }
1879 
1880 static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
1881 {
1882 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1883 
1884 	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1885 }
1886 
1887 static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
1888 {
1889 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1890 
1891 	hpre_ecc_clear_ctx(ctx, true, false);
1892 }
1893 
1894 static struct akcipher_alg rsa = {
1895 	.sign = hpre_rsa_dec,
1896 	.verify = hpre_rsa_enc,
1897 	.encrypt = hpre_rsa_enc,
1898 	.decrypt = hpre_rsa_dec,
1899 	.set_pub_key = hpre_rsa_setpubkey,
1900 	.set_priv_key = hpre_rsa_setprivkey,
1901 	.max_size = hpre_rsa_max_size,
1902 	.init = hpre_rsa_init_tfm,
1903 	.exit = hpre_rsa_exit_tfm,
1904 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1905 	.base = {
1906 		.cra_ctxsize = sizeof(struct hpre_ctx),
1907 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1908 		.cra_name = "rsa",
1909 		.cra_driver_name = "hpre-rsa",
1910 		.cra_module = THIS_MODULE,
1911 	},
1912 };
1913 
1914 static struct kpp_alg dh = {
1915 	.set_secret = hpre_dh_set_secret,
1916 	.generate_public_key = hpre_dh_compute_value,
1917 	.compute_shared_secret = hpre_dh_compute_value,
1918 	.max_size = hpre_dh_max_size,
1919 	.init = hpre_dh_init_tfm,
1920 	.exit = hpre_dh_exit_tfm,
1921 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1922 	.base = {
1923 		.cra_ctxsize = sizeof(struct hpre_ctx),
1924 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1925 		.cra_name = "dh",
1926 		.cra_driver_name = "hpre-dh",
1927 		.cra_module = THIS_MODULE,
1928 	},
1929 };
1930 
1931 static struct kpp_alg ecdh_nist_p192 = {
1932 	.set_secret = hpre_ecdh_set_secret,
1933 	.generate_public_key = hpre_ecdh_compute_value,
1934 	.compute_shared_secret = hpre_ecdh_compute_value,
1935 	.max_size = hpre_ecdh_max_size,
1936 	.init = hpre_ecdh_nist_p192_init_tfm,
1937 	.exit = hpre_ecdh_exit_tfm,
1938 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1939 	.base = {
1940 		.cra_ctxsize = sizeof(struct hpre_ctx),
1941 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1942 		.cra_name = "ecdh-nist-p192",
1943 		.cra_driver_name = "hpre-ecdh",
1944 		.cra_module = THIS_MODULE,
1945 	},
1946 };
1947 
1948 static struct kpp_alg ecdh_nist_p256 = {
1949 	.set_secret = hpre_ecdh_set_secret,
1950 	.generate_public_key = hpre_ecdh_compute_value,
1951 	.compute_shared_secret = hpre_ecdh_compute_value,
1952 	.max_size = hpre_ecdh_max_size,
1953 	.init = hpre_ecdh_nist_p256_init_tfm,
1954 	.exit = hpre_ecdh_exit_tfm,
1955 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1956 	.base = {
1957 		.cra_ctxsize = sizeof(struct hpre_ctx),
1958 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1959 		.cra_name = "ecdh-nist-p256",
1960 		.cra_driver_name = "hpre-ecdh",
1961 		.cra_module = THIS_MODULE,
1962 	},
1963 };
1964 
1965 static struct kpp_alg curve25519_alg = {
1966 	.set_secret = hpre_curve25519_set_secret,
1967 	.generate_public_key = hpre_curve25519_compute_value,
1968 	.compute_shared_secret = hpre_curve25519_compute_value,
1969 	.max_size = hpre_curve25519_max_size,
1970 	.init = hpre_curve25519_init_tfm,
1971 	.exit = hpre_curve25519_exit_tfm,
1972 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1973 	.base = {
1974 		.cra_ctxsize = sizeof(struct hpre_ctx),
1975 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1976 		.cra_name = "curve25519",
1977 		.cra_driver_name = "hpre-curve25519",
1978 		.cra_module = THIS_MODULE,
1979 	},
1980 };
1981 
1982 
1983 static int hpre_register_ecdh(void)
1984 {
1985 	int ret;
1986 
1987 	ret = crypto_register_kpp(&ecdh_nist_p192);
1988 	if (ret)
1989 		return ret;
1990 
1991 	ret = crypto_register_kpp(&ecdh_nist_p256);
1992 	if (ret) {
1993 		crypto_unregister_kpp(&ecdh_nist_p192);
1994 		return ret;
1995 	}
1996 
1997 	return 0;
1998 }
1999 
2000 static void hpre_unregister_ecdh(void)
2001 {
2002 	crypto_unregister_kpp(&ecdh_nist_p256);
2003 	crypto_unregister_kpp(&ecdh_nist_p192);
2004 }
2005 
2006 int hpre_algs_register(struct hisi_qm *qm)
2007 {
2008 	int ret;
2009 
2010 	rsa.base.cra_flags = 0;
2011 	ret = crypto_register_akcipher(&rsa);
2012 	if (ret)
2013 		return ret;
2014 
2015 	ret = crypto_register_kpp(&dh);
2016 	if (ret)
2017 		goto unreg_rsa;
2018 
2019 	if (qm->ver >= QM_HW_V3) {
2020 		ret = hpre_register_ecdh();
2021 		if (ret)
2022 			goto unreg_dh;
2023 		ret = crypto_register_kpp(&curve25519_alg);
2024 		if (ret)
2025 			goto unreg_ecdh;
2026 	}
2027 	return 0;
2028 
2029 unreg_ecdh:
2030 	hpre_unregister_ecdh();
2031 unreg_dh:
2032 	crypto_unregister_kpp(&dh);
2033 unreg_rsa:
2034 	crypto_unregister_akcipher(&rsa);
2035 	return ret;
2036 }
2037 
2038 void hpre_algs_unregister(struct hisi_qm *qm)
2039 {
2040 	if (qm->ver >= QM_HW_V3) {
2041 		crypto_unregister_kpp(&curve25519_alg);
2042 		hpre_unregister_ecdh();
2043 	}
2044 
2045 	crypto_unregister_kpp(&dh);
2046 	crypto_unregister_akcipher(&rsa);
2047 }
2048