1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/curve25519.h>
5 #include <crypto/dh.h>
6 #include <crypto/ecc_curve.h>
7 #include <crypto/ecdh.h>
8 #include <crypto/internal/akcipher.h>
9 #include <crypto/internal/kpp.h>
10 #include <crypto/internal/rsa.h>
11 #include <crypto/kpp.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fips.h>
15 #include <linux/module.h>
16 #include <linux/time.h>
17 #include "hpre.h"
18 
19 struct hpre_ctx;
20 
21 #define HPRE_CRYPTO_ALG_PRI	1000
22 #define HPRE_ALIGN_SZ		64
23 #define HPRE_BITS_2_BYTES_SHIFT	3
24 #define HPRE_RSA_512BITS_KSZ	64
25 #define HPRE_RSA_1536BITS_KSZ	192
26 #define HPRE_CRT_PRMS		5
27 #define HPRE_CRT_Q		2
28 #define HPRE_CRT_P		3
29 #define HPRE_CRT_INV		4
30 #define HPRE_DH_G_FLAG		0x02
31 #define HPRE_TRY_SEND_TIMES	100
32 #define HPRE_INVLD_REQ_ID		(-1)
33 #define HPRE_DEV(ctx)		(&((ctx)->qp->qm->pdev->dev))
34 
35 #define HPRE_SQE_ALG_BITS	5
36 #define HPRE_SQE_DONE_SHIFT	30
37 #define HPRE_DH_MAX_P_SZ	512
38 
39 #define HPRE_DFX_SEC_TO_US	1000000
40 #define HPRE_DFX_US_TO_NS	1000
41 
42 /* size in bytes of the n prime */
43 #define HPRE_ECC_NIST_P192_N_SIZE	24
44 #define HPRE_ECC_NIST_P256_N_SIZE	32
45 
46 /* size in bytes */
47 #define HPRE_ECC_HW256_KSZ_B	32
48 
49 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
50 
51 struct hpre_rsa_ctx {
52 	/* low address: e--->n */
53 	char *pubkey;
54 	dma_addr_t dma_pubkey;
55 
56 	/* low address: d--->n */
57 	char *prikey;
58 	dma_addr_t dma_prikey;
59 
60 	/* low address: dq->dp->q->p->qinv */
61 	char *crt_prikey;
62 	dma_addr_t dma_crt_prikey;
63 
64 	struct crypto_akcipher *soft_tfm;
65 };
66 
67 struct hpre_dh_ctx {
68 	/*
69 	 * If base is g we compute the public key
70 	 *	ya = g^xa mod p; [RFC2631 sec 2.1.1]
71 	 * else if base if the counterpart public key we
72 	 * compute the shared secret
73 	 *	ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
74 	 * low address: d--->n, please refer to Hisilicon HPRE UM
75 	 */
76 	char *xa_p;
77 	dma_addr_t dma_xa_p;
78 
79 	char *g; /* m */
80 	dma_addr_t dma_g;
81 };
82 
83 struct hpre_ecdh_ctx {
84 	/* low address: p->a->k->b */
85 	unsigned char *p;
86 	dma_addr_t dma_p;
87 
88 	/* low address: x->y */
89 	unsigned char *g;
90 	dma_addr_t dma_g;
91 };
92 
93 struct hpre_curve25519_ctx {
94 	/* low address: p->a->k */
95 	unsigned char *p;
96 	dma_addr_t dma_p;
97 
98 	/* gx coordinate */
99 	unsigned char *g;
100 	dma_addr_t dma_g;
101 };
102 
103 struct hpre_ctx {
104 	struct hisi_qp *qp;
105 	struct hpre_asym_request **req_list;
106 	struct hpre *hpre;
107 	spinlock_t req_lock;
108 	unsigned int key_sz;
109 	bool crt_g2_mode;
110 	struct idr req_idr;
111 	union {
112 		struct hpre_rsa_ctx rsa;
113 		struct hpre_dh_ctx dh;
114 		struct hpre_ecdh_ctx ecdh;
115 		struct hpre_curve25519_ctx curve25519;
116 	};
117 	/* for ecc algorithms */
118 	unsigned int curve_id;
119 };
120 
121 struct hpre_asym_request {
122 	char *src;
123 	char *dst;
124 	struct hpre_sqe req;
125 	struct hpre_ctx *ctx;
126 	union {
127 		struct akcipher_request *rsa;
128 		struct kpp_request *dh;
129 		struct kpp_request *ecdh;
130 		struct kpp_request *curve25519;
131 	} areq;
132 	int err;
133 	int req_id;
134 	hpre_cb cb;
135 	struct timespec64 req_time;
136 };
137 
138 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
139 {
140 	unsigned long flags;
141 	int id;
142 
143 	spin_lock_irqsave(&ctx->req_lock, flags);
144 	id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
145 	spin_unlock_irqrestore(&ctx->req_lock, flags);
146 
147 	return id;
148 }
149 
150 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
151 {
152 	unsigned long flags;
153 
154 	spin_lock_irqsave(&ctx->req_lock, flags);
155 	idr_remove(&ctx->req_idr, req_id);
156 	spin_unlock_irqrestore(&ctx->req_lock, flags);
157 }
158 
159 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
160 {
161 	struct hpre_ctx *ctx;
162 	struct hpre_dfx *dfx;
163 	int id;
164 
165 	ctx = hpre_req->ctx;
166 	id = hpre_alloc_req_id(ctx);
167 	if (unlikely(id < 0))
168 		return -EINVAL;
169 
170 	ctx->req_list[id] = hpre_req;
171 	hpre_req->req_id = id;
172 
173 	dfx = ctx->hpre->debug.dfx;
174 	if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
175 		ktime_get_ts64(&hpre_req->req_time);
176 
177 	return id;
178 }
179 
180 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
181 {
182 	struct hpre_ctx *ctx = hpre_req->ctx;
183 	int id = hpre_req->req_id;
184 
185 	if (hpre_req->req_id >= 0) {
186 		hpre_req->req_id = HPRE_INVLD_REQ_ID;
187 		ctx->req_list[id] = NULL;
188 		hpre_free_req_id(ctx, id);
189 	}
190 }
191 
192 static struct hisi_qp *hpre_get_qp_and_start(u8 type)
193 {
194 	struct hisi_qp *qp;
195 	int ret;
196 
197 	qp = hpre_create_qp(type);
198 	if (!qp) {
199 		pr_err("Can not create hpre qp!\n");
200 		return ERR_PTR(-ENODEV);
201 	}
202 
203 	ret = hisi_qm_start_qp(qp, 0);
204 	if (ret < 0) {
205 		hisi_qm_free_qps(&qp, 1);
206 		pci_err(qp->qm->pdev, "Can not start qp!\n");
207 		return ERR_PTR(-EINVAL);
208 	}
209 
210 	return qp;
211 }
212 
213 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
214 				  struct scatterlist *data, unsigned int len,
215 				  int is_src, dma_addr_t *tmp)
216 {
217 	struct hpre_ctx *ctx = hpre_req->ctx;
218 	struct device *dev = HPRE_DEV(ctx);
219 	enum dma_data_direction dma_dir;
220 
221 	if (is_src) {
222 		hpre_req->src = NULL;
223 		dma_dir = DMA_TO_DEVICE;
224 	} else {
225 		hpre_req->dst = NULL;
226 		dma_dir = DMA_FROM_DEVICE;
227 	}
228 	*tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
229 	if (unlikely(dma_mapping_error(dev, *tmp))) {
230 		dev_err(dev, "dma map data err!\n");
231 		return -ENOMEM;
232 	}
233 
234 	return 0;
235 }
236 
237 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
238 				struct scatterlist *data, unsigned int len,
239 				int is_src, dma_addr_t *tmp)
240 {
241 	struct hpre_ctx *ctx = hpre_req->ctx;
242 	struct device *dev = HPRE_DEV(ctx);
243 	void *ptr;
244 	int shift;
245 
246 	shift = ctx->key_sz - len;
247 	if (unlikely(shift < 0))
248 		return -EINVAL;
249 
250 	ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
251 	if (unlikely(!ptr))
252 		return -ENOMEM;
253 
254 	if (is_src) {
255 		scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
256 		hpre_req->src = ptr;
257 	} else {
258 		hpre_req->dst = ptr;
259 	}
260 
261 	return 0;
262 }
263 
264 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
265 			     struct scatterlist *data, unsigned int len,
266 			     int is_src, int is_dh)
267 {
268 	struct hpre_sqe *msg = &hpre_req->req;
269 	struct hpre_ctx *ctx = hpre_req->ctx;
270 	dma_addr_t tmp = 0;
271 	int ret;
272 
273 	/* when the data is dh's source, we should format it */
274 	if ((sg_is_last(data) && len == ctx->key_sz) &&
275 	    ((is_dh && !is_src) || !is_dh))
276 		ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
277 	else
278 		ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
279 
280 	if (unlikely(ret))
281 		return ret;
282 
283 	if (is_src)
284 		msg->in = cpu_to_le64(tmp);
285 	else
286 		msg->out = cpu_to_le64(tmp);
287 
288 	return 0;
289 }
290 
291 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
292 				 struct hpre_asym_request *req,
293 				 struct scatterlist *dst,
294 				 struct scatterlist *src)
295 {
296 	struct device *dev = HPRE_DEV(ctx);
297 	struct hpre_sqe *sqe = &req->req;
298 	dma_addr_t tmp;
299 
300 	tmp = le64_to_cpu(sqe->in);
301 
302 	if (src) {
303 		if (req->src)
304 			dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
305 		else
306 			dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
307 	}
308 
309 	tmp = le64_to_cpu(sqe->out);
310 
311 	if (req->dst) {
312 		if (dst)
313 			scatterwalk_map_and_copy(req->dst, dst, 0,
314 						 ctx->key_sz, 1);
315 		dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
316 	} else {
317 		dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
318 	}
319 }
320 
321 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
322 				void **kreq)
323 {
324 	struct hpre_asym_request *req;
325 	int err, id, done;
326 
327 #define HPRE_NO_HW_ERR		0
328 #define HPRE_HW_TASK_DONE	3
329 #define HREE_HW_ERR_MASK	0x7ff
330 #define HREE_SQE_DONE_MASK	0x3
331 	id = (int)le16_to_cpu(sqe->tag);
332 	req = ctx->req_list[id];
333 	hpre_rm_req_from_ctx(req);
334 	*kreq = req;
335 
336 	err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
337 		HREE_HW_ERR_MASK;
338 
339 	done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
340 		HREE_SQE_DONE_MASK;
341 
342 	if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
343 		return  0;
344 
345 	return -EINVAL;
346 }
347 
348 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
349 {
350 	struct hpre *hpre;
351 
352 	if (!ctx || !qp || qlen < 0)
353 		return -EINVAL;
354 
355 	spin_lock_init(&ctx->req_lock);
356 	ctx->qp = qp;
357 
358 	hpre = container_of(ctx->qp->qm, struct hpre, qm);
359 	ctx->hpre = hpre;
360 	ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
361 	if (!ctx->req_list)
362 		return -ENOMEM;
363 	ctx->key_sz = 0;
364 	ctx->crt_g2_mode = false;
365 	idr_init(&ctx->req_idr);
366 
367 	return 0;
368 }
369 
370 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
371 {
372 	if (is_clear_all) {
373 		idr_destroy(&ctx->req_idr);
374 		kfree(ctx->req_list);
375 		hisi_qm_free_qps(&ctx->qp, 1);
376 	}
377 
378 	ctx->crt_g2_mode = false;
379 	ctx->key_sz = 0;
380 }
381 
382 static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
383 			       u64 overtime_thrhld)
384 {
385 	struct timespec64 reply_time;
386 	u64 time_use_us;
387 
388 	ktime_get_ts64(&reply_time);
389 	time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
390 		HPRE_DFX_SEC_TO_US +
391 		(reply_time.tv_nsec - req->req_time.tv_nsec) /
392 		HPRE_DFX_US_TO_NS;
393 
394 	if (time_use_us <= overtime_thrhld)
395 		return false;
396 
397 	return true;
398 }
399 
400 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
401 {
402 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
403 	struct hpre_asym_request *req;
404 	struct kpp_request *areq;
405 	u64 overtime_thrhld;
406 	int ret;
407 
408 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
409 	areq = req->areq.dh;
410 	areq->dst_len = ctx->key_sz;
411 
412 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
413 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
414 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
415 
416 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
417 	kpp_request_complete(areq, ret);
418 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
419 }
420 
421 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
422 {
423 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
424 	struct hpre_asym_request *req;
425 	struct akcipher_request *areq;
426 	u64 overtime_thrhld;
427 	int ret;
428 
429 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
430 
431 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
432 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
433 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
434 
435 	areq = req->areq.rsa;
436 	areq->dst_len = ctx->key_sz;
437 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
438 	akcipher_request_complete(areq, ret);
439 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
440 }
441 
442 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
443 {
444 	struct hpre_ctx *ctx = qp->qp_ctx;
445 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
446 	struct hpre_sqe *sqe = resp;
447 	struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
448 
449 	if (unlikely(!req)) {
450 		atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
451 		return;
452 	}
453 
454 	req->cb(ctx, resp);
455 }
456 
457 static void hpre_stop_qp_and_put(struct hisi_qp *qp)
458 {
459 	hisi_qm_stop_qp(qp);
460 	hisi_qm_free_qps(&qp, 1);
461 }
462 
463 static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
464 {
465 	struct hisi_qp *qp;
466 	int ret;
467 
468 	qp = hpre_get_qp_and_start(type);
469 	if (IS_ERR(qp))
470 		return PTR_ERR(qp);
471 
472 	qp->qp_ctx = ctx;
473 	qp->req_cb = hpre_alg_cb;
474 
475 	ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
476 	if (ret)
477 		hpre_stop_qp_and_put(qp);
478 
479 	return ret;
480 }
481 
482 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
483 {
484 	struct hpre_asym_request *h_req;
485 	struct hpre_sqe *msg;
486 	int req_id;
487 	void *tmp;
488 
489 	if (is_rsa) {
490 		struct akcipher_request *akreq = req;
491 
492 		if (akreq->dst_len < ctx->key_sz) {
493 			akreq->dst_len = ctx->key_sz;
494 			return -EOVERFLOW;
495 		}
496 
497 		tmp = akcipher_request_ctx(akreq);
498 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
499 		h_req->cb = hpre_rsa_cb;
500 		h_req->areq.rsa = akreq;
501 		msg = &h_req->req;
502 		memset(msg, 0, sizeof(*msg));
503 	} else {
504 		struct kpp_request *kreq = req;
505 
506 		if (kreq->dst_len < ctx->key_sz) {
507 			kreq->dst_len = ctx->key_sz;
508 			return -EOVERFLOW;
509 		}
510 
511 		tmp = kpp_request_ctx(kreq);
512 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
513 		h_req->cb = hpre_dh_cb;
514 		h_req->areq.dh = kreq;
515 		msg = &h_req->req;
516 		memset(msg, 0, sizeof(*msg));
517 		msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
518 	}
519 
520 	msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
521 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
522 	h_req->ctx = ctx;
523 
524 	req_id = hpre_add_req_to_ctx(h_req);
525 	if (req_id < 0)
526 		return -EBUSY;
527 
528 	msg->tag = cpu_to_le16((u16)req_id);
529 
530 	return 0;
531 }
532 
533 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
534 {
535 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
536 	int ctr = 0;
537 	int ret;
538 
539 	do {
540 		atomic64_inc(&dfx[HPRE_SEND_CNT].value);
541 		ret = hisi_qp_send(ctx->qp, msg);
542 		if (ret != -EBUSY)
543 			break;
544 		atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
545 	} while (ctr++ < HPRE_TRY_SEND_TIMES);
546 
547 	if (likely(!ret))
548 		return ret;
549 
550 	if (ret != -EBUSY)
551 		atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
552 
553 	return ret;
554 }
555 
556 static int hpre_dh_compute_value(struct kpp_request *req)
557 {
558 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
559 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
560 	void *tmp = kpp_request_ctx(req);
561 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
562 	struct hpre_sqe *msg = &hpre_req->req;
563 	int ret;
564 
565 	ret = hpre_msg_request_set(ctx, req, false);
566 	if (unlikely(ret))
567 		return ret;
568 
569 	if (req->src) {
570 		ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
571 		if (unlikely(ret))
572 			goto clear_all;
573 	} else {
574 		msg->in = cpu_to_le64(ctx->dh.dma_g);
575 	}
576 
577 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
578 	if (unlikely(ret))
579 		goto clear_all;
580 
581 	if (ctx->crt_g2_mode && !req->src)
582 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
583 	else
584 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
585 
586 	/* success */
587 	ret = hpre_send(ctx, msg);
588 	if (likely(!ret))
589 		return -EINPROGRESS;
590 
591 clear_all:
592 	hpre_rm_req_from_ctx(hpre_req);
593 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
594 
595 	return ret;
596 }
597 
598 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
599 {
600 #define _HPRE_DH_GRP1		768
601 #define _HPRE_DH_GRP2		1024
602 #define _HPRE_DH_GRP5		1536
603 #define _HPRE_DH_GRP14		2048
604 #define _HPRE_DH_GRP15		3072
605 #define _HPRE_DH_GRP16		4096
606 	switch (key_sz) {
607 	case _HPRE_DH_GRP1:
608 	case _HPRE_DH_GRP2:
609 	case _HPRE_DH_GRP5:
610 	case _HPRE_DH_GRP14:
611 	case _HPRE_DH_GRP15:
612 	case _HPRE_DH_GRP16:
613 		return 0;
614 	}
615 
616 	return -EINVAL;
617 }
618 
619 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
620 {
621 	struct device *dev = HPRE_DEV(ctx);
622 	unsigned int sz;
623 
624 	if (params->p_size > HPRE_DH_MAX_P_SZ)
625 		return -EINVAL;
626 
627 	if (hpre_is_dh_params_length_valid(params->p_size <<
628 					   HPRE_BITS_2_BYTES_SHIFT))
629 		return -EINVAL;
630 
631 	sz = ctx->key_sz = params->p_size;
632 	ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
633 					  &ctx->dh.dma_xa_p, GFP_KERNEL);
634 	if (!ctx->dh.xa_p)
635 		return -ENOMEM;
636 
637 	memcpy(ctx->dh.xa_p + sz, params->p, sz);
638 
639 	/* If g equals 2 don't copy it */
640 	if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
641 		ctx->crt_g2_mode = true;
642 		return 0;
643 	}
644 
645 	ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
646 	if (!ctx->dh.g) {
647 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
648 				  ctx->dh.dma_xa_p);
649 		ctx->dh.xa_p = NULL;
650 		return -ENOMEM;
651 	}
652 
653 	memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
654 
655 	return 0;
656 }
657 
658 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
659 {
660 	struct device *dev = HPRE_DEV(ctx);
661 	unsigned int sz = ctx->key_sz;
662 
663 	if (is_clear_all)
664 		hisi_qm_stop_qp(ctx->qp);
665 
666 	if (ctx->dh.g) {
667 		dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
668 		ctx->dh.g = NULL;
669 	}
670 
671 	if (ctx->dh.xa_p) {
672 		memzero_explicit(ctx->dh.xa_p, sz);
673 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
674 				  ctx->dh.dma_xa_p);
675 		ctx->dh.xa_p = NULL;
676 	}
677 
678 	hpre_ctx_clear(ctx, is_clear_all);
679 }
680 
681 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
682 			      unsigned int len)
683 {
684 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
685 	struct dh params;
686 	int ret;
687 
688 	if (crypto_dh_decode_key(buf, len, &params) < 0)
689 		return -EINVAL;
690 
691 	/* Free old secret if any */
692 	hpre_dh_clear_ctx(ctx, false);
693 
694 	ret = hpre_dh_set_params(ctx, &params);
695 	if (ret < 0)
696 		goto err_clear_ctx;
697 
698 	memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
699 	       params.key_size);
700 
701 	return 0;
702 
703 err_clear_ctx:
704 	hpre_dh_clear_ctx(ctx, false);
705 	return ret;
706 }
707 
708 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
709 {
710 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
711 
712 	return ctx->key_sz;
713 }
714 
715 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
716 {
717 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
718 
719 	return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
720 }
721 
722 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
723 {
724 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
725 
726 	hpre_dh_clear_ctx(ctx, true);
727 }
728 
729 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
730 {
731 	while (!**ptr && *len) {
732 		(*ptr)++;
733 		(*len)--;
734 	}
735 }
736 
737 static bool hpre_rsa_key_size_is_support(unsigned int len)
738 {
739 	unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
740 
741 #define _RSA_1024BITS_KEY_WDTH		1024
742 #define _RSA_2048BITS_KEY_WDTH		2048
743 #define _RSA_3072BITS_KEY_WDTH		3072
744 #define _RSA_4096BITS_KEY_WDTH		4096
745 
746 	switch (bits) {
747 	case _RSA_1024BITS_KEY_WDTH:
748 	case _RSA_2048BITS_KEY_WDTH:
749 	case _RSA_3072BITS_KEY_WDTH:
750 	case _RSA_4096BITS_KEY_WDTH:
751 		return true;
752 	default:
753 		return false;
754 	}
755 }
756 
757 static int hpre_rsa_enc(struct akcipher_request *req)
758 {
759 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
760 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
761 	void *tmp = akcipher_request_ctx(req);
762 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
763 	struct hpre_sqe *msg = &hpre_req->req;
764 	int ret;
765 
766 	/* For 512 and 1536 bits key size, use soft tfm instead */
767 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
768 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
769 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
770 		ret = crypto_akcipher_encrypt(req);
771 		akcipher_request_set_tfm(req, tfm);
772 		return ret;
773 	}
774 
775 	if (unlikely(!ctx->rsa.pubkey))
776 		return -EINVAL;
777 
778 	ret = hpre_msg_request_set(ctx, req, true);
779 	if (unlikely(ret))
780 		return ret;
781 
782 	msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
783 	msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
784 
785 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
786 	if (unlikely(ret))
787 		goto clear_all;
788 
789 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
790 	if (unlikely(ret))
791 		goto clear_all;
792 
793 	/* success */
794 	ret = hpre_send(ctx, msg);
795 	if (likely(!ret))
796 		return -EINPROGRESS;
797 
798 clear_all:
799 	hpre_rm_req_from_ctx(hpre_req);
800 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
801 
802 	return ret;
803 }
804 
805 static int hpre_rsa_dec(struct akcipher_request *req)
806 {
807 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
808 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
809 	void *tmp = akcipher_request_ctx(req);
810 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
811 	struct hpre_sqe *msg = &hpre_req->req;
812 	int ret;
813 
814 	/* For 512 and 1536 bits key size, use soft tfm instead */
815 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
816 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
817 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
818 		ret = crypto_akcipher_decrypt(req);
819 		akcipher_request_set_tfm(req, tfm);
820 		return ret;
821 	}
822 
823 	if (unlikely(!ctx->rsa.prikey))
824 		return -EINVAL;
825 
826 	ret = hpre_msg_request_set(ctx, req, true);
827 	if (unlikely(ret))
828 		return ret;
829 
830 	if (ctx->crt_g2_mode) {
831 		msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
832 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
833 				       HPRE_ALG_NC_CRT);
834 	} else {
835 		msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
836 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
837 				       HPRE_ALG_NC_NCRT);
838 	}
839 
840 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
841 	if (unlikely(ret))
842 		goto clear_all;
843 
844 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
845 	if (unlikely(ret))
846 		goto clear_all;
847 
848 	/* success */
849 	ret = hpre_send(ctx, msg);
850 	if (likely(!ret))
851 		return -EINPROGRESS;
852 
853 clear_all:
854 	hpre_rm_req_from_ctx(hpre_req);
855 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
856 
857 	return ret;
858 }
859 
860 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
861 			  size_t vlen, bool private)
862 {
863 	const char *ptr = value;
864 
865 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
866 
867 	ctx->key_sz = vlen;
868 
869 	/* if invalid key size provided, we use software tfm */
870 	if (!hpre_rsa_key_size_is_support(ctx->key_sz))
871 		return 0;
872 
873 	ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
874 					     &ctx->rsa.dma_pubkey,
875 					     GFP_KERNEL);
876 	if (!ctx->rsa.pubkey)
877 		return -ENOMEM;
878 
879 	if (private) {
880 		ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
881 						     &ctx->rsa.dma_prikey,
882 						     GFP_KERNEL);
883 		if (!ctx->rsa.prikey) {
884 			dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
885 					  ctx->rsa.pubkey,
886 					  ctx->rsa.dma_pubkey);
887 			ctx->rsa.pubkey = NULL;
888 			return -ENOMEM;
889 		}
890 		memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
891 	}
892 	memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
893 
894 	/* Using hardware HPRE to do RSA */
895 	return 1;
896 }
897 
898 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
899 			  size_t vlen)
900 {
901 	const char *ptr = value;
902 
903 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
904 
905 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
906 		return -EINVAL;
907 
908 	memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
909 
910 	return 0;
911 }
912 
913 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
914 			  size_t vlen)
915 {
916 	const char *ptr = value;
917 
918 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
919 
920 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
921 		return -EINVAL;
922 
923 	memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
924 
925 	return 0;
926 }
927 
928 static int hpre_crt_para_get(char *para, size_t para_sz,
929 			     const char *raw, size_t raw_sz)
930 {
931 	const char *ptr = raw;
932 	size_t len = raw_sz;
933 
934 	hpre_rsa_drop_leading_zeros(&ptr, &len);
935 	if (!len || len > para_sz)
936 		return -EINVAL;
937 
938 	memcpy(para + para_sz - len, ptr, len);
939 
940 	return 0;
941 }
942 
943 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
944 {
945 	unsigned int hlf_ksz = ctx->key_sz >> 1;
946 	struct device *dev = HPRE_DEV(ctx);
947 	u64 offset;
948 	int ret;
949 
950 	ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
951 					&ctx->rsa.dma_crt_prikey,
952 					GFP_KERNEL);
953 	if (!ctx->rsa.crt_prikey)
954 		return -ENOMEM;
955 
956 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
957 				rsa_key->dq, rsa_key->dq_sz);
958 	if (ret)
959 		goto free_key;
960 
961 	offset = hlf_ksz;
962 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
963 				rsa_key->dp, rsa_key->dp_sz);
964 	if (ret)
965 		goto free_key;
966 
967 	offset = hlf_ksz * HPRE_CRT_Q;
968 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
969 				rsa_key->q, rsa_key->q_sz);
970 	if (ret)
971 		goto free_key;
972 
973 	offset = hlf_ksz * HPRE_CRT_P;
974 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
975 				rsa_key->p, rsa_key->p_sz);
976 	if (ret)
977 		goto free_key;
978 
979 	offset = hlf_ksz * HPRE_CRT_INV;
980 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
981 				rsa_key->qinv, rsa_key->qinv_sz);
982 	if (ret)
983 		goto free_key;
984 
985 	ctx->crt_g2_mode = true;
986 
987 	return 0;
988 
989 free_key:
990 	offset = hlf_ksz * HPRE_CRT_PRMS;
991 	memzero_explicit(ctx->rsa.crt_prikey, offset);
992 	dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
993 			  ctx->rsa.dma_crt_prikey);
994 	ctx->rsa.crt_prikey = NULL;
995 	ctx->crt_g2_mode = false;
996 
997 	return ret;
998 }
999 
1000 /* If it is clear all, all the resources of the QP will be cleaned. */
1001 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1002 {
1003 	unsigned int half_key_sz = ctx->key_sz >> 1;
1004 	struct device *dev = HPRE_DEV(ctx);
1005 
1006 	if (is_clear_all)
1007 		hisi_qm_stop_qp(ctx->qp);
1008 
1009 	if (ctx->rsa.pubkey) {
1010 		dma_free_coherent(dev, ctx->key_sz << 1,
1011 				  ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1012 		ctx->rsa.pubkey = NULL;
1013 	}
1014 
1015 	if (ctx->rsa.crt_prikey) {
1016 		memzero_explicit(ctx->rsa.crt_prikey,
1017 				 half_key_sz * HPRE_CRT_PRMS);
1018 		dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1019 				  ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1020 		ctx->rsa.crt_prikey = NULL;
1021 	}
1022 
1023 	if (ctx->rsa.prikey) {
1024 		memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1025 		dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1026 				  ctx->rsa.dma_prikey);
1027 		ctx->rsa.prikey = NULL;
1028 	}
1029 
1030 	hpre_ctx_clear(ctx, is_clear_all);
1031 }
1032 
1033 /*
1034  * we should judge if it is CRT or not,
1035  * CRT: return true,  N-CRT: return false .
1036  */
1037 static bool hpre_is_crt_key(struct rsa_key *key)
1038 {
1039 	u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1040 		  key->qinv_sz;
1041 
1042 #define LEN_OF_NCRT_PARA	5
1043 
1044 	/* N-CRT less than 5 parameters */
1045 	return len > LEN_OF_NCRT_PARA;
1046 }
1047 
1048 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1049 			   unsigned int keylen, bool private)
1050 {
1051 	struct rsa_key rsa_key;
1052 	int ret;
1053 
1054 	hpre_rsa_clear_ctx(ctx, false);
1055 
1056 	if (private)
1057 		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1058 	else
1059 		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1060 	if (ret < 0)
1061 		return ret;
1062 
1063 	ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1064 	if (ret <= 0)
1065 		return ret;
1066 
1067 	if (private) {
1068 		ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1069 		if (ret < 0)
1070 			goto free;
1071 
1072 		if (hpre_is_crt_key(&rsa_key)) {
1073 			ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1074 			if (ret < 0)
1075 				goto free;
1076 		}
1077 	}
1078 
1079 	ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1080 	if (ret < 0)
1081 		goto free;
1082 
1083 	if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1084 		ret = -EINVAL;
1085 		goto free;
1086 	}
1087 
1088 	return 0;
1089 
1090 free:
1091 	hpre_rsa_clear_ctx(ctx, false);
1092 	return ret;
1093 }
1094 
1095 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1096 			      unsigned int keylen)
1097 {
1098 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1099 	int ret;
1100 
1101 	ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1102 	if (ret)
1103 		return ret;
1104 
1105 	return hpre_rsa_setkey(ctx, key, keylen, false);
1106 }
1107 
1108 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1109 			       unsigned int keylen)
1110 {
1111 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1112 	int ret;
1113 
1114 	ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1115 	if (ret)
1116 		return ret;
1117 
1118 	return hpre_rsa_setkey(ctx, key, keylen, true);
1119 }
1120 
1121 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1122 {
1123 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1124 
1125 	/* For 512 and 1536 bits key size, use soft tfm instead */
1126 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1127 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1128 		return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1129 
1130 	return ctx->key_sz;
1131 }
1132 
1133 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1134 {
1135 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1136 	int ret;
1137 
1138 	ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1139 	if (IS_ERR(ctx->rsa.soft_tfm)) {
1140 		pr_err("Can not alloc_akcipher!\n");
1141 		return PTR_ERR(ctx->rsa.soft_tfm);
1142 	}
1143 
1144 	ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1145 	if (ret)
1146 		crypto_free_akcipher(ctx->rsa.soft_tfm);
1147 
1148 	return ret;
1149 }
1150 
1151 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1152 {
1153 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1154 
1155 	hpre_rsa_clear_ctx(ctx, true);
1156 	crypto_free_akcipher(ctx->rsa.soft_tfm);
1157 }
1158 
1159 static void hpre_key_to_big_end(u8 *data, int len)
1160 {
1161 	int i, j;
1162 	u8 tmp;
1163 
1164 	for (i = 0; i < len / 2; i++) {
1165 		j = len - i - 1;
1166 		tmp = data[j];
1167 		data[j] = data[i];
1168 		data[i] = tmp;
1169 	}
1170 }
1171 
1172 static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1173 			       bool is_ecdh)
1174 {
1175 	struct device *dev = HPRE_DEV(ctx);
1176 	unsigned int sz = ctx->key_sz;
1177 	unsigned int shift = sz << 1;
1178 
1179 	if (is_clear_all)
1180 		hisi_qm_stop_qp(ctx->qp);
1181 
1182 	if (is_ecdh && ctx->ecdh.p) {
1183 		/* ecdh: p->a->k->b */
1184 		memzero_explicit(ctx->ecdh.p + shift, sz);
1185 		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1186 		ctx->ecdh.p = NULL;
1187 	} else if (!is_ecdh && ctx->curve25519.p) {
1188 		/* curve25519: p->a->k */
1189 		memzero_explicit(ctx->curve25519.p + shift, sz);
1190 		dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1191 				  ctx->curve25519.dma_p);
1192 		ctx->curve25519.p = NULL;
1193 	}
1194 
1195 	hpre_ctx_clear(ctx, is_clear_all);
1196 }
1197 
1198 static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1199 {
1200 	switch (id) {
1201 	case ECC_CURVE_NIST_P192:
1202 	case ECC_CURVE_NIST_P256:
1203 		return HPRE_ECC_HW256_KSZ_B;
1204 	default:
1205 		break;
1206 	}
1207 
1208 	return 0;
1209 }
1210 
1211 static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1212 {
1213 	unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1214 	u8 i = 0;
1215 
1216 	while (i < ndigits - 1) {
1217 		memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
1218 		i++;
1219 	}
1220 
1221 	memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
1222 	hpre_key_to_big_end((u8 *)addr, cur_sz);
1223 }
1224 
1225 static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1226 				unsigned int cur_sz)
1227 {
1228 	unsigned int shifta = ctx->key_sz << 1;
1229 	unsigned int shiftb = ctx->key_sz << 2;
1230 	void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1231 	void *a = ctx->ecdh.p + shifta - cur_sz;
1232 	void *b = ctx->ecdh.p + shiftb - cur_sz;
1233 	void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1234 	void *y = ctx->ecdh.g + shifta - cur_sz;
1235 	const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1236 	char *n;
1237 
1238 	if (unlikely(!curve))
1239 		return -EINVAL;
1240 
1241 	n = kzalloc(ctx->key_sz, GFP_KERNEL);
1242 	if (!n)
1243 		return -ENOMEM;
1244 
1245 	fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1246 	fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1247 	fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1248 	fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1249 	fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1250 	fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1251 
1252 	if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1253 		kfree(n);
1254 		return -EINVAL;
1255 	}
1256 
1257 	kfree(n);
1258 	return 0;
1259 }
1260 
1261 static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1262 {
1263 	switch (id) {
1264 	case ECC_CURVE_NIST_P192:
1265 		return HPRE_ECC_NIST_P192_N_SIZE;
1266 	case ECC_CURVE_NIST_P256:
1267 		return HPRE_ECC_NIST_P256_N_SIZE;
1268 	default:
1269 		break;
1270 	}
1271 
1272 	return 0;
1273 }
1274 
1275 static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1276 {
1277 	struct device *dev = HPRE_DEV(ctx);
1278 	unsigned int sz, shift, curve_sz;
1279 	int ret;
1280 
1281 	ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1282 	if (!ctx->key_sz)
1283 		return -EINVAL;
1284 
1285 	curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1286 	if (!curve_sz || params->key_size > curve_sz)
1287 		return -EINVAL;
1288 
1289 	sz = ctx->key_sz;
1290 
1291 	if (!ctx->ecdh.p) {
1292 		ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1293 						 GFP_KERNEL);
1294 		if (!ctx->ecdh.p)
1295 			return -ENOMEM;
1296 	}
1297 
1298 	shift = sz << 2;
1299 	ctx->ecdh.g = ctx->ecdh.p + shift;
1300 	ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1301 
1302 	ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1303 	if (ret) {
1304 		dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1305 		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1306 		ctx->ecdh.p = NULL;
1307 		return ret;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1314 {
1315 	int i;
1316 
1317 	for (i = 0; i < key_sz; i++)
1318 		if (key[i])
1319 			return false;
1320 
1321 	return true;
1322 }
1323 
1324 static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1325 				unsigned int len)
1326 {
1327 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1328 	struct device *dev = HPRE_DEV(ctx);
1329 	unsigned int sz, sz_shift;
1330 	struct ecdh params;
1331 	int ret;
1332 
1333 	if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
1334 		dev_err(dev, "failed to decode ecdh key!\n");
1335 		return -EINVAL;
1336 	}
1337 
1338 	if (hpre_key_is_zero(params.key, params.key_size)) {
1339 		dev_err(dev, "Invalid hpre key!\n");
1340 		return -EINVAL;
1341 	}
1342 
1343 	hpre_ecc_clear_ctx(ctx, false, true);
1344 
1345 	ret = hpre_ecdh_set_param(ctx, &params);
1346 	if (ret < 0) {
1347 		dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1348 		return ret;
1349 	}
1350 
1351 	sz = ctx->key_sz;
1352 	sz_shift = (sz << 1) + sz - params.key_size;
1353 	memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1354 
1355 	return 0;
1356 }
1357 
1358 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1359 				      struct hpre_asym_request *req,
1360 				      struct scatterlist *dst,
1361 				      struct scatterlist *src)
1362 {
1363 	struct device *dev = HPRE_DEV(ctx);
1364 	struct hpre_sqe *sqe = &req->req;
1365 	dma_addr_t dma;
1366 
1367 	dma = le64_to_cpu(sqe->in);
1368 
1369 	if (src && req->src)
1370 		dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1371 
1372 	dma = le64_to_cpu(sqe->out);
1373 
1374 	if (req->dst)
1375 		dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1376 	if (dst)
1377 		dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1378 }
1379 
1380 static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1381 {
1382 	unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1383 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1384 	struct hpre_asym_request *req = NULL;
1385 	struct kpp_request *areq;
1386 	u64 overtime_thrhld;
1387 	char *p;
1388 	int ret;
1389 
1390 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1391 	areq = req->areq.ecdh;
1392 	areq->dst_len = ctx->key_sz << 1;
1393 
1394 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1395 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1396 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1397 
1398 	p = sg_virt(areq->dst);
1399 	memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1400 	memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1401 
1402 	hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1403 	kpp_request_complete(areq, ret);
1404 
1405 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1406 }
1407 
1408 static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1409 				     struct kpp_request *req)
1410 {
1411 	struct hpre_asym_request *h_req;
1412 	struct hpre_sqe *msg;
1413 	int req_id;
1414 	void *tmp;
1415 
1416 	if (req->dst_len < ctx->key_sz << 1) {
1417 		req->dst_len = ctx->key_sz << 1;
1418 		return -EINVAL;
1419 	}
1420 
1421 	tmp = kpp_request_ctx(req);
1422 	h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1423 	h_req->cb = hpre_ecdh_cb;
1424 	h_req->areq.ecdh = req;
1425 	msg = &h_req->req;
1426 	memset(msg, 0, sizeof(*msg));
1427 	msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1428 
1429 	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1430 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1431 	h_req->ctx = ctx;
1432 
1433 	req_id = hpre_add_req_to_ctx(h_req);
1434 	if (req_id < 0)
1435 		return -EBUSY;
1436 
1437 	msg->tag = cpu_to_le16((u16)req_id);
1438 	return 0;
1439 }
1440 
1441 static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1442 				   struct scatterlist *data, unsigned int len)
1443 {
1444 	struct hpre_sqe *msg = &hpre_req->req;
1445 	struct hpre_ctx *ctx = hpre_req->ctx;
1446 	struct device *dev = HPRE_DEV(ctx);
1447 	unsigned int tmpshift;
1448 	dma_addr_t dma = 0;
1449 	void *ptr;
1450 	int shift;
1451 
1452 	/* Src_data include gx and gy. */
1453 	shift = ctx->key_sz - (len >> 1);
1454 	if (unlikely(shift < 0))
1455 		return -EINVAL;
1456 
1457 	ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1458 	if (unlikely(!ptr))
1459 		return -ENOMEM;
1460 
1461 	tmpshift = ctx->key_sz << 1;
1462 	scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1463 	memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1464 	memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1465 
1466 	hpre_req->src = ptr;
1467 	msg->in = cpu_to_le64(dma);
1468 	return 0;
1469 }
1470 
1471 static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1472 				   struct scatterlist *data, unsigned int len)
1473 {
1474 	struct hpre_sqe *msg = &hpre_req->req;
1475 	struct hpre_ctx *ctx = hpre_req->ctx;
1476 	struct device *dev = HPRE_DEV(ctx);
1477 	dma_addr_t dma = 0;
1478 
1479 	if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1480 		dev_err(dev, "data or data length is illegal!\n");
1481 		return -EINVAL;
1482 	}
1483 
1484 	hpre_req->dst = NULL;
1485 	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1486 	if (unlikely(dma_mapping_error(dev, dma))) {
1487 		dev_err(dev, "dma map data err!\n");
1488 		return -ENOMEM;
1489 	}
1490 
1491 	msg->out = cpu_to_le64(dma);
1492 	return 0;
1493 }
1494 
1495 static int hpre_ecdh_compute_value(struct kpp_request *req)
1496 {
1497 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1498 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1499 	struct device *dev = HPRE_DEV(ctx);
1500 	void *tmp = kpp_request_ctx(req);
1501 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1502 	struct hpre_sqe *msg = &hpre_req->req;
1503 	int ret;
1504 
1505 	ret = hpre_ecdh_msg_request_set(ctx, req);
1506 	if (unlikely(ret)) {
1507 		dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1508 		return ret;
1509 	}
1510 
1511 	if (req->src) {
1512 		ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1513 		if (unlikely(ret)) {
1514 			dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1515 			goto clear_all;
1516 		}
1517 	} else {
1518 		msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1519 	}
1520 
1521 	ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1522 	if (unlikely(ret)) {
1523 		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1524 		goto clear_all;
1525 	}
1526 
1527 	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1528 	ret = hpre_send(ctx, msg);
1529 	if (likely(!ret))
1530 		return -EINPROGRESS;
1531 
1532 clear_all:
1533 	hpre_rm_req_from_ctx(hpre_req);
1534 	hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1535 	return ret;
1536 }
1537 
1538 static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1539 {
1540 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1541 
1542 	/* max size is the pub_key_size, include x and y */
1543 	return ctx->key_sz << 1;
1544 }
1545 
1546 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1547 {
1548 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1549 
1550 	ctx->curve_id = ECC_CURVE_NIST_P192;
1551 
1552 	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1553 }
1554 
1555 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1556 {
1557 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1558 
1559 	ctx->curve_id = ECC_CURVE_NIST_P256;
1560 
1561 	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1562 }
1563 
1564 static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1565 {
1566 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1567 
1568 	hpre_ecc_clear_ctx(ctx, true, true);
1569 }
1570 
1571 static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1572 				       unsigned int len)
1573 {
1574 	u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1575 	unsigned int sz = ctx->key_sz;
1576 	const struct ecc_curve *curve;
1577 	unsigned int shift = sz << 1;
1578 	void *p;
1579 
1580 	/*
1581 	 * The key from 'buf' is in little-endian, we should preprocess it as
1582 	 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1583 	 * then convert it to big endian. Only in this way, the result can be
1584 	 * the same as the software curve-25519 that exists in crypto.
1585 	 */
1586 	memcpy(secret, buf, len);
1587 	curve25519_clamp_secret(secret);
1588 	hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1589 
1590 	p = ctx->curve25519.p + sz - len;
1591 
1592 	curve = ecc_get_curve25519();
1593 
1594 	/* fill curve parameters */
1595 	fill_curve_param(p, curve->p, len, curve->g.ndigits);
1596 	fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1597 	memcpy(p + shift, secret, len);
1598 	fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1599 	memzero_explicit(secret, CURVE25519_KEY_SIZE);
1600 }
1601 
1602 static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1603 				     unsigned int len)
1604 {
1605 	struct device *dev = HPRE_DEV(ctx);
1606 	unsigned int sz = ctx->key_sz;
1607 	unsigned int shift = sz << 1;
1608 
1609 	/* p->a->k->gx */
1610 	if (!ctx->curve25519.p) {
1611 		ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1612 						       &ctx->curve25519.dma_p,
1613 						       GFP_KERNEL);
1614 		if (!ctx->curve25519.p)
1615 			return -ENOMEM;
1616 	}
1617 
1618 	ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1619 	ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1620 
1621 	hpre_curve25519_fill_curve(ctx, buf, len);
1622 
1623 	return 0;
1624 }
1625 
1626 static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1627 				      unsigned int len)
1628 {
1629 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1630 	struct device *dev = HPRE_DEV(ctx);
1631 	int ret = -EINVAL;
1632 
1633 	if (len != CURVE25519_KEY_SIZE ||
1634 	    !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1635 		dev_err(dev, "key is null or key len is not 32bytes!\n");
1636 		return ret;
1637 	}
1638 
1639 	/* Free old secret if any */
1640 	hpre_ecc_clear_ctx(ctx, false, false);
1641 
1642 	ctx->key_sz = CURVE25519_KEY_SIZE;
1643 	ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1644 	if (ret) {
1645 		dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1646 		hpre_ecc_clear_ctx(ctx, false, false);
1647 		return ret;
1648 	}
1649 
1650 	return 0;
1651 }
1652 
1653 static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1654 					    struct hpre_asym_request *req,
1655 					    struct scatterlist *dst,
1656 					    struct scatterlist *src)
1657 {
1658 	struct device *dev = HPRE_DEV(ctx);
1659 	struct hpre_sqe *sqe = &req->req;
1660 	dma_addr_t dma;
1661 
1662 	dma = le64_to_cpu(sqe->in);
1663 
1664 	if (src && req->src)
1665 		dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1666 
1667 	dma = le64_to_cpu(sqe->out);
1668 
1669 	if (req->dst)
1670 		dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1671 	if (dst)
1672 		dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1673 }
1674 
1675 static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1676 {
1677 	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1678 	struct hpre_asym_request *req = NULL;
1679 	struct kpp_request *areq;
1680 	u64 overtime_thrhld;
1681 	int ret;
1682 
1683 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1684 	areq = req->areq.curve25519;
1685 	areq->dst_len = ctx->key_sz;
1686 
1687 	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1688 	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1689 		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1690 
1691 	hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1692 
1693 	hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1694 	kpp_request_complete(areq, ret);
1695 
1696 	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1697 }
1698 
1699 static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1700 					   struct kpp_request *req)
1701 {
1702 	struct hpre_asym_request *h_req;
1703 	struct hpre_sqe *msg;
1704 	int req_id;
1705 	void *tmp;
1706 
1707 	if (unlikely(req->dst_len < ctx->key_sz)) {
1708 		req->dst_len = ctx->key_sz;
1709 		return -EINVAL;
1710 	}
1711 
1712 	tmp = kpp_request_ctx(req);
1713 	h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1714 	h_req->cb = hpre_curve25519_cb;
1715 	h_req->areq.curve25519 = req;
1716 	msg = &h_req->req;
1717 	memset(msg, 0, sizeof(*msg));
1718 	msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1719 
1720 	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1721 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1722 	h_req->ctx = ctx;
1723 
1724 	req_id = hpre_add_req_to_ctx(h_req);
1725 	if (req_id < 0)
1726 		return -EBUSY;
1727 
1728 	msg->tag = cpu_to_le16((u16)req_id);
1729 	return 0;
1730 }
1731 
1732 static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1733 				    struct scatterlist *data, unsigned int len)
1734 {
1735 	struct hpre_sqe *msg = &hpre_req->req;
1736 	struct hpre_ctx *ctx = hpre_req->ctx;
1737 	struct device *dev = HPRE_DEV(ctx);
1738 	u8 p[CURVE25519_KEY_SIZE] = { 0 };
1739 	const struct ecc_curve *curve;
1740 	dma_addr_t dma = 0;
1741 	u8 *ptr;
1742 
1743 	if (len != CURVE25519_KEY_SIZE) {
1744 		dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1745 		return -EINVAL;
1746 	}
1747 
1748 	ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1749 	if (unlikely(!ptr))
1750 		return -ENOMEM;
1751 
1752 	scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1753 
1754 	if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1755 		dev_err(dev, "gx is null!\n");
1756 		goto err;
1757 	}
1758 
1759 	/*
1760 	 * Src_data(gx) is in little-endian order, MSB in the final byte should
1761 	 * be masked as discribed in RFC7748, then transform it to big-endian
1762 	 * form, then hisi_hpre can use the data.
1763 	 */
1764 	ptr[31] &= 0x7f;
1765 	hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1766 
1767 	curve = ecc_get_curve25519();
1768 
1769 	fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1770 	if (memcmp(ptr, p, ctx->key_sz) >= 0) {
1771 		dev_err(dev, "gx is out of p!\n");
1772 		goto err;
1773 	}
1774 
1775 	hpre_req->src = ptr;
1776 	msg->in = cpu_to_le64(dma);
1777 	return 0;
1778 
1779 err:
1780 	dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1781 	return -EINVAL;
1782 }
1783 
1784 static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1785 				    struct scatterlist *data, unsigned int len)
1786 {
1787 	struct hpre_sqe *msg = &hpre_req->req;
1788 	struct hpre_ctx *ctx = hpre_req->ctx;
1789 	struct device *dev = HPRE_DEV(ctx);
1790 	dma_addr_t dma = 0;
1791 
1792 	if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1793 		dev_err(dev, "data or data length is illegal!\n");
1794 		return -EINVAL;
1795 	}
1796 
1797 	hpre_req->dst = NULL;
1798 	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1799 	if (unlikely(dma_mapping_error(dev, dma))) {
1800 		dev_err(dev, "dma map data err!\n");
1801 		return -ENOMEM;
1802 	}
1803 
1804 	msg->out = cpu_to_le64(dma);
1805 	return 0;
1806 }
1807 
1808 static int hpre_curve25519_compute_value(struct kpp_request *req)
1809 {
1810 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1811 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1812 	struct device *dev = HPRE_DEV(ctx);
1813 	void *tmp = kpp_request_ctx(req);
1814 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1815 	struct hpre_sqe *msg = &hpre_req->req;
1816 	int ret;
1817 
1818 	ret = hpre_curve25519_msg_request_set(ctx, req);
1819 	if (unlikely(ret)) {
1820 		dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1821 		return ret;
1822 	}
1823 
1824 	if (req->src) {
1825 		ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1826 		if (unlikely(ret)) {
1827 			dev_err(dev, "failed to init src data, ret = %d!\n",
1828 				ret);
1829 			goto clear_all;
1830 		}
1831 	} else {
1832 		msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1833 	}
1834 
1835 	ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1836 	if (unlikely(ret)) {
1837 		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1838 		goto clear_all;
1839 	}
1840 
1841 	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1842 	ret = hpre_send(ctx, msg);
1843 	if (likely(!ret))
1844 		return -EINPROGRESS;
1845 
1846 clear_all:
1847 	hpre_rm_req_from_ctx(hpre_req);
1848 	hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1849 	return ret;
1850 }
1851 
1852 static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1853 {
1854 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1855 
1856 	return ctx->key_sz;
1857 }
1858 
1859 static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
1860 {
1861 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1862 
1863 	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1864 }
1865 
1866 static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
1867 {
1868 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1869 
1870 	hpre_ecc_clear_ctx(ctx, true, false);
1871 }
1872 
1873 static struct akcipher_alg rsa = {
1874 	.sign = hpre_rsa_dec,
1875 	.verify = hpre_rsa_enc,
1876 	.encrypt = hpre_rsa_enc,
1877 	.decrypt = hpre_rsa_dec,
1878 	.set_pub_key = hpre_rsa_setpubkey,
1879 	.set_priv_key = hpre_rsa_setprivkey,
1880 	.max_size = hpre_rsa_max_size,
1881 	.init = hpre_rsa_init_tfm,
1882 	.exit = hpre_rsa_exit_tfm,
1883 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1884 	.base = {
1885 		.cra_ctxsize = sizeof(struct hpre_ctx),
1886 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1887 		.cra_name = "rsa",
1888 		.cra_driver_name = "hpre-rsa",
1889 		.cra_module = THIS_MODULE,
1890 	},
1891 };
1892 
1893 static struct kpp_alg dh = {
1894 	.set_secret = hpre_dh_set_secret,
1895 	.generate_public_key = hpre_dh_compute_value,
1896 	.compute_shared_secret = hpre_dh_compute_value,
1897 	.max_size = hpre_dh_max_size,
1898 	.init = hpre_dh_init_tfm,
1899 	.exit = hpre_dh_exit_tfm,
1900 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1901 	.base = {
1902 		.cra_ctxsize = sizeof(struct hpre_ctx),
1903 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1904 		.cra_name = "dh",
1905 		.cra_driver_name = "hpre-dh",
1906 		.cra_module = THIS_MODULE,
1907 	},
1908 };
1909 
1910 static struct kpp_alg ecdh_nist_p192 = {
1911 	.set_secret = hpre_ecdh_set_secret,
1912 	.generate_public_key = hpre_ecdh_compute_value,
1913 	.compute_shared_secret = hpre_ecdh_compute_value,
1914 	.max_size = hpre_ecdh_max_size,
1915 	.init = hpre_ecdh_nist_p192_init_tfm,
1916 	.exit = hpre_ecdh_exit_tfm,
1917 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1918 	.base = {
1919 		.cra_ctxsize = sizeof(struct hpre_ctx),
1920 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1921 		.cra_name = "ecdh-nist-p192",
1922 		.cra_driver_name = "hpre-ecdh",
1923 		.cra_module = THIS_MODULE,
1924 	},
1925 };
1926 
1927 static struct kpp_alg ecdh_nist_p256 = {
1928 	.set_secret = hpre_ecdh_set_secret,
1929 	.generate_public_key = hpre_ecdh_compute_value,
1930 	.compute_shared_secret = hpre_ecdh_compute_value,
1931 	.max_size = hpre_ecdh_max_size,
1932 	.init = hpre_ecdh_nist_p256_init_tfm,
1933 	.exit = hpre_ecdh_exit_tfm,
1934 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1935 	.base = {
1936 		.cra_ctxsize = sizeof(struct hpre_ctx),
1937 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1938 		.cra_name = "ecdh-nist-p256",
1939 		.cra_driver_name = "hpre-ecdh",
1940 		.cra_module = THIS_MODULE,
1941 	},
1942 };
1943 
1944 static struct kpp_alg curve25519_alg = {
1945 	.set_secret = hpre_curve25519_set_secret,
1946 	.generate_public_key = hpre_curve25519_compute_value,
1947 	.compute_shared_secret = hpre_curve25519_compute_value,
1948 	.max_size = hpre_curve25519_max_size,
1949 	.init = hpre_curve25519_init_tfm,
1950 	.exit = hpre_curve25519_exit_tfm,
1951 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1952 	.base = {
1953 		.cra_ctxsize = sizeof(struct hpre_ctx),
1954 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1955 		.cra_name = "curve25519",
1956 		.cra_driver_name = "hpre-curve25519",
1957 		.cra_module = THIS_MODULE,
1958 	},
1959 };
1960 
1961 
1962 static int hpre_register_ecdh(void)
1963 {
1964 	int ret;
1965 
1966 	ret = crypto_register_kpp(&ecdh_nist_p192);
1967 	if (ret)
1968 		return ret;
1969 
1970 	ret = crypto_register_kpp(&ecdh_nist_p256);
1971 	if (ret) {
1972 		crypto_unregister_kpp(&ecdh_nist_p192);
1973 		return ret;
1974 	}
1975 
1976 	return 0;
1977 }
1978 
1979 static void hpre_unregister_ecdh(void)
1980 {
1981 	crypto_unregister_kpp(&ecdh_nist_p256);
1982 	crypto_unregister_kpp(&ecdh_nist_p192);
1983 }
1984 
1985 int hpre_algs_register(struct hisi_qm *qm)
1986 {
1987 	int ret;
1988 
1989 	rsa.base.cra_flags = 0;
1990 	ret = crypto_register_akcipher(&rsa);
1991 	if (ret)
1992 		return ret;
1993 
1994 	ret = crypto_register_kpp(&dh);
1995 	if (ret)
1996 		goto unreg_rsa;
1997 
1998 	if (qm->ver >= QM_HW_V3) {
1999 		ret = hpre_register_ecdh();
2000 		if (ret)
2001 			goto unreg_dh;
2002 		ret = crypto_register_kpp(&curve25519_alg);
2003 		if (ret)
2004 			goto unreg_ecdh;
2005 	}
2006 	return 0;
2007 
2008 unreg_ecdh:
2009 	hpre_unregister_ecdh();
2010 unreg_dh:
2011 	crypto_unregister_kpp(&dh);
2012 unreg_rsa:
2013 	crypto_unregister_akcipher(&rsa);
2014 	return ret;
2015 }
2016 
2017 void hpre_algs_unregister(struct hisi_qm *qm)
2018 {
2019 	if (qm->ver >= QM_HW_V3) {
2020 		crypto_unregister_kpp(&curve25519_alg);
2021 		hpre_unregister_ecdh();
2022 	}
2023 
2024 	crypto_unregister_kpp(&dh);
2025 	crypto_unregister_akcipher(&rsa);
2026 }
2027