1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/dh.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/internal/kpp.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/kpp.h>
9 #include <crypto/scatterwalk.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <linux/module.h>
13 #include "hpre.h"
14 
15 struct hpre_ctx;
16 
17 #define HPRE_CRYPTO_ALG_PRI	1000
18 #define HPRE_ALIGN_SZ		64
19 #define HPRE_BITS_2_BYTES_SHIFT	3
20 #define HPRE_RSA_512BITS_KSZ	64
21 #define HPRE_RSA_1536BITS_KSZ	192
22 #define HPRE_CRT_PRMS		5
23 #define HPRE_CRT_Q		2
24 #define HPRE_CRT_P		3
25 #define HPRE_CRT_INV		4
26 #define HPRE_DH_G_FLAG		0x02
27 #define HPRE_TRY_SEND_TIMES	100
28 #define HPRE_INVLD_REQ_ID		(-1)
29 #define HPRE_DEV(ctx)		(&((ctx)->qp->qm->pdev->dev))
30 
31 #define HPRE_SQE_ALG_BITS	5
32 #define HPRE_SQE_DONE_SHIFT	30
33 #define HPRE_DH_MAX_P_SZ	512
34 
35 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
36 
37 struct hpre_rsa_ctx {
38 	/* low address: e--->n */
39 	char *pubkey;
40 	dma_addr_t dma_pubkey;
41 
42 	/* low address: d--->n */
43 	char *prikey;
44 	dma_addr_t dma_prikey;
45 
46 	/* low address: dq->dp->q->p->qinv */
47 	char *crt_prikey;
48 	dma_addr_t dma_crt_prikey;
49 
50 	struct crypto_akcipher *soft_tfm;
51 };
52 
53 struct hpre_dh_ctx {
54 	/*
55 	 * If base is g we compute the public key
56 	 *	ya = g^xa mod p; [RFC2631 sec 2.1.1]
57 	 * else if base if the counterpart public key we
58 	 * compute the shared secret
59 	 *	ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
60 	 */
61 	char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
62 	dma_addr_t dma_xa_p;
63 
64 	char *g; /* m */
65 	dma_addr_t dma_g;
66 };
67 
68 struct hpre_ctx {
69 	struct hisi_qp *qp;
70 	struct hpre_asym_request **req_list;
71 	spinlock_t req_lock;
72 	unsigned int key_sz;
73 	bool crt_g2_mode;
74 	struct idr req_idr;
75 	union {
76 		struct hpre_rsa_ctx rsa;
77 		struct hpre_dh_ctx dh;
78 	};
79 };
80 
81 struct hpre_asym_request {
82 	char *src;
83 	char *dst;
84 	struct hpre_sqe req;
85 	struct hpre_ctx *ctx;
86 	union {
87 		struct akcipher_request *rsa;
88 		struct kpp_request *dh;
89 	} areq;
90 	int err;
91 	int req_id;
92 	hpre_cb cb;
93 };
94 
95 static DEFINE_MUTEX(hpre_alg_lock);
96 static unsigned int hpre_active_devs;
97 
98 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
99 {
100 	unsigned long flags;
101 	int id;
102 
103 	spin_lock_irqsave(&ctx->req_lock, flags);
104 	id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
105 	spin_unlock_irqrestore(&ctx->req_lock, flags);
106 
107 	return id;
108 }
109 
110 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
111 {
112 	unsigned long flags;
113 
114 	spin_lock_irqsave(&ctx->req_lock, flags);
115 	idr_remove(&ctx->req_idr, req_id);
116 	spin_unlock_irqrestore(&ctx->req_lock, flags);
117 }
118 
119 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
120 {
121 	struct hpre_ctx *ctx;
122 	int id;
123 
124 	ctx = hpre_req->ctx;
125 	id = hpre_alloc_req_id(ctx);
126 	if (unlikely(id < 0))
127 		return -EINVAL;
128 
129 	ctx->req_list[id] = hpre_req;
130 	hpre_req->req_id = id;
131 
132 	return id;
133 }
134 
135 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
136 {
137 	struct hpre_ctx *ctx = hpre_req->ctx;
138 	int id = hpre_req->req_id;
139 
140 	if (hpre_req->req_id >= 0) {
141 		hpre_req->req_id = HPRE_INVLD_REQ_ID;
142 		ctx->req_list[id] = NULL;
143 		hpre_free_req_id(ctx, id);
144 	}
145 }
146 
147 static struct hisi_qp *hpre_get_qp_and_start(void)
148 {
149 	struct hisi_qp *qp;
150 	int ret;
151 
152 	qp = hpre_create_qp();
153 	if (!qp) {
154 		pr_err("Can not create hpre qp!\n");
155 		return ERR_PTR(-ENODEV);
156 	}
157 
158 	ret = hisi_qm_start_qp(qp, 0);
159 	if (ret < 0) {
160 		hisi_qm_free_qps(&qp, 1);
161 		pci_err(qp->qm->pdev, "Can not start qp!\n");
162 		return ERR_PTR(-EINVAL);
163 	}
164 
165 	return qp;
166 }
167 
168 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
169 				  struct scatterlist *data, unsigned int len,
170 				  int is_src, dma_addr_t *tmp)
171 {
172 	struct hpre_ctx *ctx = hpre_req->ctx;
173 	struct device *dev = HPRE_DEV(ctx);
174 	enum dma_data_direction dma_dir;
175 
176 	if (is_src) {
177 		hpre_req->src = NULL;
178 		dma_dir = DMA_TO_DEVICE;
179 	} else {
180 		hpre_req->dst = NULL;
181 		dma_dir = DMA_FROM_DEVICE;
182 	}
183 	*tmp = dma_map_single(dev, sg_virt(data),
184 			      len, dma_dir);
185 	if (unlikely(dma_mapping_error(dev, *tmp))) {
186 		dev_err(dev, "dma map data err!\n");
187 		return -ENOMEM;
188 	}
189 
190 	return 0;
191 }
192 
193 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
194 				struct scatterlist *data, unsigned int len,
195 				int is_src, dma_addr_t *tmp)
196 {
197 	struct hpre_ctx *ctx = hpre_req->ctx;
198 	struct device *dev = HPRE_DEV(ctx);
199 	void *ptr;
200 	int shift;
201 
202 	shift = ctx->key_sz - len;
203 	if (unlikely(shift < 0))
204 		return -EINVAL;
205 
206 	ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
207 	if (unlikely(!ptr))
208 		return -ENOMEM;
209 
210 	if (is_src) {
211 		scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
212 		hpre_req->src = ptr;
213 	} else {
214 		hpre_req->dst = ptr;
215 	}
216 
217 	return 0;
218 }
219 
220 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
221 			     struct scatterlist *data, unsigned int len,
222 			     int is_src, int is_dh)
223 {
224 	struct hpre_sqe *msg = &hpre_req->req;
225 	struct hpre_ctx *ctx = hpre_req->ctx;
226 	dma_addr_t tmp = 0;
227 	int ret;
228 
229 	/* when the data is dh's source, we should format it */
230 	if ((sg_is_last(data) && len == ctx->key_sz) &&
231 	    ((is_dh && !is_src) || !is_dh))
232 		ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
233 	else
234 		ret = hpre_prepare_dma_buf(hpre_req, data, len,
235 					  is_src, &tmp);
236 	if (unlikely(ret))
237 		return ret;
238 
239 	if (is_src)
240 		msg->in = cpu_to_le64(tmp);
241 	else
242 		msg->out = cpu_to_le64(tmp);
243 
244 	return 0;
245 }
246 
247 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
248 				 struct hpre_asym_request *req,
249 				 struct scatterlist *dst,
250 				 struct scatterlist *src)
251 {
252 	struct device *dev = HPRE_DEV(ctx);
253 	struct hpre_sqe *sqe = &req->req;
254 	dma_addr_t tmp;
255 
256 	tmp = le64_to_cpu(sqe->in);
257 	if (unlikely(!tmp))
258 		return;
259 
260 	if (src) {
261 		if (req->src)
262 			dma_free_coherent(dev, ctx->key_sz,
263 					  req->src, tmp);
264 		else
265 			dma_unmap_single(dev, tmp,
266 					 ctx->key_sz, DMA_TO_DEVICE);
267 	}
268 
269 	tmp = le64_to_cpu(sqe->out);
270 	if (unlikely(!tmp))
271 		return;
272 
273 	if (req->dst) {
274 		if (dst)
275 			scatterwalk_map_and_copy(req->dst, dst, 0,
276 						 ctx->key_sz, 1);
277 		dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
278 	} else {
279 		dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
280 	}
281 }
282 
283 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
284 				void **kreq)
285 {
286 	struct hpre_asym_request *req;
287 	int err, id, done;
288 
289 #define HPRE_NO_HW_ERR		0
290 #define HPRE_HW_TASK_DONE	3
291 #define HREE_HW_ERR_MASK	0x7ff
292 #define HREE_SQE_DONE_MASK	0x3
293 	id = (int)le16_to_cpu(sqe->tag);
294 	req = ctx->req_list[id];
295 	hpre_rm_req_from_ctx(req);
296 	*kreq = req;
297 
298 	err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
299 		HREE_HW_ERR_MASK;
300 
301 	done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
302 		HREE_SQE_DONE_MASK;
303 
304 	if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
305 		return  0;
306 
307 	return -EINVAL;
308 }
309 
310 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
311 {
312 	if (!ctx || !qp || qlen < 0)
313 		return -EINVAL;
314 
315 	spin_lock_init(&ctx->req_lock);
316 	ctx->qp = qp;
317 
318 	ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
319 	if (!ctx->req_list)
320 		return -ENOMEM;
321 	ctx->key_sz = 0;
322 	ctx->crt_g2_mode = false;
323 	idr_init(&ctx->req_idr);
324 
325 	return 0;
326 }
327 
328 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
329 {
330 	if (is_clear_all) {
331 		idr_destroy(&ctx->req_idr);
332 		kfree(ctx->req_list);
333 		hisi_qm_free_qps(&ctx->qp, 1);
334 	}
335 
336 	ctx->crt_g2_mode = false;
337 	ctx->key_sz = 0;
338 }
339 
340 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
341 {
342 	struct hpre_asym_request *req;
343 	struct kpp_request *areq;
344 	int ret;
345 
346 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
347 	areq = req->areq.dh;
348 	areq->dst_len = ctx->key_sz;
349 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
350 	kpp_request_complete(areq, ret);
351 }
352 
353 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
354 {
355 	struct hpre_asym_request *req;
356 	struct akcipher_request *areq;
357 	int ret;
358 
359 	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
360 	areq = req->areq.rsa;
361 	areq->dst_len = ctx->key_sz;
362 	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
363 	akcipher_request_complete(areq, ret);
364 }
365 
366 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
367 {
368 	struct hpre_ctx *ctx = qp->qp_ctx;
369 	struct hpre_sqe *sqe = resp;
370 
371 	ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp);
372 }
373 
374 static int hpre_ctx_init(struct hpre_ctx *ctx)
375 {
376 	struct hisi_qp *qp;
377 
378 	qp = hpre_get_qp_and_start();
379 	if (IS_ERR(qp))
380 		return PTR_ERR(qp);
381 
382 	qp->qp_ctx = ctx;
383 	qp->req_cb = hpre_alg_cb;
384 
385 	return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
386 }
387 
388 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
389 {
390 	struct hpre_asym_request *h_req;
391 	struct hpre_sqe *msg;
392 	int req_id;
393 	void *tmp;
394 
395 	if (is_rsa) {
396 		struct akcipher_request *akreq = req;
397 
398 		if (akreq->dst_len < ctx->key_sz) {
399 			akreq->dst_len = ctx->key_sz;
400 			return -EOVERFLOW;
401 		}
402 
403 		tmp = akcipher_request_ctx(akreq);
404 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
405 		h_req->cb = hpre_rsa_cb;
406 		h_req->areq.rsa = akreq;
407 		msg = &h_req->req;
408 		memset(msg, 0, sizeof(*msg));
409 	} else {
410 		struct kpp_request *kreq = req;
411 
412 		if (kreq->dst_len < ctx->key_sz) {
413 			kreq->dst_len = ctx->key_sz;
414 			return -EOVERFLOW;
415 		}
416 
417 		tmp = kpp_request_ctx(kreq);
418 		h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
419 		h_req->cb = hpre_dh_cb;
420 		h_req->areq.dh = kreq;
421 		msg = &h_req->req;
422 		memset(msg, 0, sizeof(*msg));
423 		msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
424 	}
425 
426 	msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
427 	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
428 	h_req->ctx = ctx;
429 
430 	req_id = hpre_add_req_to_ctx(h_req);
431 	if (req_id < 0)
432 		return -EBUSY;
433 
434 	msg->tag = cpu_to_le16((u16)req_id);
435 
436 	return 0;
437 }
438 
439 #ifdef CONFIG_CRYPTO_DH
440 static int hpre_dh_compute_value(struct kpp_request *req)
441 {
442 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
443 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
444 	void *tmp = kpp_request_ctx(req);
445 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
446 	struct hpre_sqe *msg = &hpre_req->req;
447 	int ctr = 0;
448 	int ret;
449 
450 	ret = hpre_msg_request_set(ctx, req, false);
451 	if (unlikely(ret))
452 		return ret;
453 
454 	if (req->src) {
455 		ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
456 		if (unlikely(ret))
457 			goto clear_all;
458 	}
459 
460 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
461 	if (unlikely(ret))
462 		goto clear_all;
463 
464 	if (ctx->crt_g2_mode && !req->src)
465 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
466 	else
467 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
468 	do {
469 		ret = hisi_qp_send(ctx->qp, msg);
470 	} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
471 
472 	/* success */
473 	if (likely(!ret))
474 		return -EINPROGRESS;
475 
476 clear_all:
477 	hpre_rm_req_from_ctx(hpre_req);
478 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
479 
480 	return ret;
481 }
482 
483 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
484 {
485 #define _HPRE_DH_GRP1		768
486 #define _HPRE_DH_GRP2		1024
487 #define _HPRE_DH_GRP5		1536
488 #define _HPRE_DH_GRP14		2048
489 #define _HPRE_DH_GRP15		3072
490 #define _HPRE_DH_GRP16		4096
491 	switch (key_sz) {
492 	case _HPRE_DH_GRP1:
493 	case _HPRE_DH_GRP2:
494 	case _HPRE_DH_GRP5:
495 	case _HPRE_DH_GRP14:
496 	case _HPRE_DH_GRP15:
497 	case _HPRE_DH_GRP16:
498 		return 0;
499 	}
500 
501 	return -EINVAL;
502 }
503 
504 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
505 {
506 	struct device *dev = HPRE_DEV(ctx);
507 	unsigned int sz;
508 
509 	if (params->p_size > HPRE_DH_MAX_P_SZ)
510 		return -EINVAL;
511 
512 	if (hpre_is_dh_params_length_valid(params->p_size <<
513 					   HPRE_BITS_2_BYTES_SHIFT))
514 		return -EINVAL;
515 
516 	sz = ctx->key_sz = params->p_size;
517 	ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
518 					  &ctx->dh.dma_xa_p, GFP_KERNEL);
519 	if (!ctx->dh.xa_p)
520 		return -ENOMEM;
521 
522 	memcpy(ctx->dh.xa_p + sz, params->p, sz);
523 
524 	/* If g equals 2 don't copy it */
525 	if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
526 		ctx->crt_g2_mode = true;
527 		return 0;
528 	}
529 
530 	ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
531 	if (!ctx->dh.g) {
532 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
533 				  ctx->dh.dma_xa_p);
534 		ctx->dh.xa_p = NULL;
535 		return -ENOMEM;
536 	}
537 
538 	memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
539 
540 	return 0;
541 }
542 
543 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
544 {
545 	struct device *dev = HPRE_DEV(ctx);
546 	unsigned int sz = ctx->key_sz;
547 
548 	if (is_clear_all)
549 		hisi_qm_stop_qp(ctx->qp);
550 
551 	if (ctx->dh.g) {
552 		dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
553 		ctx->dh.g = NULL;
554 	}
555 
556 	if (ctx->dh.xa_p) {
557 		memzero_explicit(ctx->dh.xa_p, sz);
558 		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
559 				  ctx->dh.dma_xa_p);
560 		ctx->dh.xa_p = NULL;
561 	}
562 
563 	hpre_ctx_clear(ctx, is_clear_all);
564 }
565 
566 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
567 			      unsigned int len)
568 {
569 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
570 	struct dh params;
571 	int ret;
572 
573 	if (crypto_dh_decode_key(buf, len, &params) < 0)
574 		return -EINVAL;
575 
576 	/* Free old secret if any */
577 	hpre_dh_clear_ctx(ctx, false);
578 
579 	ret = hpre_dh_set_params(ctx, &params);
580 	if (ret < 0)
581 		goto err_clear_ctx;
582 
583 	memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
584 	       params.key_size);
585 
586 	return 0;
587 
588 err_clear_ctx:
589 	hpre_dh_clear_ctx(ctx, false);
590 	return ret;
591 }
592 
593 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
594 {
595 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
596 
597 	return ctx->key_sz;
598 }
599 
600 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
601 {
602 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
603 
604 	return hpre_ctx_init(ctx);
605 }
606 
607 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
608 {
609 	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
610 
611 	hpre_dh_clear_ctx(ctx, true);
612 }
613 #endif
614 
615 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
616 {
617 	while (!**ptr && *len) {
618 		(*ptr)++;
619 		(*len)--;
620 	}
621 }
622 
623 static bool hpre_rsa_key_size_is_support(unsigned int len)
624 {
625 	unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
626 
627 #define _RSA_1024BITS_KEY_WDTH		1024
628 #define _RSA_2048BITS_KEY_WDTH		2048
629 #define _RSA_3072BITS_KEY_WDTH		3072
630 #define _RSA_4096BITS_KEY_WDTH		4096
631 
632 	switch (bits) {
633 	case _RSA_1024BITS_KEY_WDTH:
634 	case _RSA_2048BITS_KEY_WDTH:
635 	case _RSA_3072BITS_KEY_WDTH:
636 	case _RSA_4096BITS_KEY_WDTH:
637 		return true;
638 	default:
639 		return false;
640 	}
641 }
642 
643 static int hpre_rsa_enc(struct akcipher_request *req)
644 {
645 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
646 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
647 	void *tmp = akcipher_request_ctx(req);
648 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
649 	struct hpre_sqe *msg = &hpre_req->req;
650 	int ctr = 0;
651 	int ret;
652 
653 	/* For 512 and 1536 bits key size, use soft tfm instead */
654 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
655 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
656 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
657 		ret = crypto_akcipher_encrypt(req);
658 		akcipher_request_set_tfm(req, tfm);
659 		return ret;
660 	}
661 
662 	if (unlikely(!ctx->rsa.pubkey))
663 		return -EINVAL;
664 
665 	ret = hpre_msg_request_set(ctx, req, true);
666 	if (unlikely(ret))
667 		return ret;
668 
669 	msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
670 	msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
671 
672 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
673 	if (unlikely(ret))
674 		goto clear_all;
675 
676 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
677 	if (unlikely(ret))
678 		goto clear_all;
679 
680 	do {
681 		ret = hisi_qp_send(ctx->qp, msg);
682 	} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
683 
684 	/* success */
685 	if (likely(!ret))
686 		return -EINPROGRESS;
687 
688 clear_all:
689 	hpre_rm_req_from_ctx(hpre_req);
690 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
691 
692 	return ret;
693 }
694 
695 static int hpre_rsa_dec(struct akcipher_request *req)
696 {
697 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
698 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
699 	void *tmp = akcipher_request_ctx(req);
700 	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
701 	struct hpre_sqe *msg = &hpre_req->req;
702 	int ctr = 0;
703 	int ret;
704 
705 	/* For 512 and 1536 bits key size, use soft tfm instead */
706 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
707 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
708 		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
709 		ret = crypto_akcipher_decrypt(req);
710 		akcipher_request_set_tfm(req, tfm);
711 		return ret;
712 	}
713 
714 	if (unlikely(!ctx->rsa.prikey))
715 		return -EINVAL;
716 
717 	ret = hpre_msg_request_set(ctx, req, true);
718 	if (unlikely(ret))
719 		return ret;
720 
721 	if (ctx->crt_g2_mode) {
722 		msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
723 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
724 				       HPRE_ALG_NC_CRT);
725 	} else {
726 		msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
727 		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
728 				       HPRE_ALG_NC_NCRT);
729 	}
730 
731 	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
732 	if (unlikely(ret))
733 		goto clear_all;
734 
735 	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
736 	if (unlikely(ret))
737 		goto clear_all;
738 
739 	do {
740 		ret = hisi_qp_send(ctx->qp, msg);
741 	} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
742 
743 	/* success */
744 	if (likely(!ret))
745 		return -EINPROGRESS;
746 
747 clear_all:
748 	hpre_rm_req_from_ctx(hpre_req);
749 	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
750 
751 	return ret;
752 }
753 
754 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
755 			  size_t vlen, bool private)
756 {
757 	const char *ptr = value;
758 
759 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
760 
761 	ctx->key_sz = vlen;
762 
763 	/* if invalid key size provided, we use software tfm */
764 	if (!hpre_rsa_key_size_is_support(ctx->key_sz))
765 		return 0;
766 
767 	ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
768 					     &ctx->rsa.dma_pubkey,
769 					     GFP_KERNEL);
770 	if (!ctx->rsa.pubkey)
771 		return -ENOMEM;
772 
773 	if (private) {
774 		ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
775 						     &ctx->rsa.dma_prikey,
776 						     GFP_KERNEL);
777 		if (!ctx->rsa.prikey) {
778 			dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
779 					  ctx->rsa.pubkey,
780 					  ctx->rsa.dma_pubkey);
781 			ctx->rsa.pubkey = NULL;
782 			return -ENOMEM;
783 		}
784 		memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
785 	}
786 	memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
787 
788 	/* Using hardware HPRE to do RSA */
789 	return 1;
790 }
791 
792 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
793 			  size_t vlen)
794 {
795 	const char *ptr = value;
796 
797 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
798 
799 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
800 		return -EINVAL;
801 
802 	memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
803 
804 	return 0;
805 }
806 
807 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
808 			  size_t vlen)
809 {
810 	const char *ptr = value;
811 
812 	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
813 
814 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
815 		return -EINVAL;
816 
817 	memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
818 
819 	return 0;
820 }
821 
822 static int hpre_crt_para_get(char *para, size_t para_sz,
823 			     const char *raw, size_t raw_sz)
824 {
825 	const char *ptr = raw;
826 	size_t len = raw_sz;
827 
828 	hpre_rsa_drop_leading_zeros(&ptr, &len);
829 	if (!len || len > para_sz)
830 		return -EINVAL;
831 
832 	memcpy(para + para_sz - len, ptr, len);
833 
834 	return 0;
835 }
836 
837 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
838 {
839 	unsigned int hlf_ksz = ctx->key_sz >> 1;
840 	struct device *dev = HPRE_DEV(ctx);
841 	u64 offset;
842 	int ret;
843 
844 	ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
845 					&ctx->rsa.dma_crt_prikey,
846 					GFP_KERNEL);
847 	if (!ctx->rsa.crt_prikey)
848 		return -ENOMEM;
849 
850 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
851 				rsa_key->dq, rsa_key->dq_sz);
852 	if (ret)
853 		goto free_key;
854 
855 	offset = hlf_ksz;
856 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
857 				rsa_key->dp, rsa_key->dp_sz);
858 	if (ret)
859 		goto free_key;
860 
861 	offset = hlf_ksz * HPRE_CRT_Q;
862 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
863 				rsa_key->q, rsa_key->q_sz);
864 	if (ret)
865 		goto free_key;
866 
867 	offset = hlf_ksz * HPRE_CRT_P;
868 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
869 				rsa_key->p, rsa_key->p_sz);
870 	if (ret)
871 		goto free_key;
872 
873 	offset = hlf_ksz * HPRE_CRT_INV;
874 	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
875 				rsa_key->qinv, rsa_key->qinv_sz);
876 	if (ret)
877 		goto free_key;
878 
879 	ctx->crt_g2_mode = true;
880 
881 	return 0;
882 
883 free_key:
884 	offset = hlf_ksz * HPRE_CRT_PRMS;
885 	memzero_explicit(ctx->rsa.crt_prikey, offset);
886 	dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
887 			  ctx->rsa.dma_crt_prikey);
888 	ctx->rsa.crt_prikey = NULL;
889 	ctx->crt_g2_mode = false;
890 
891 	return ret;
892 }
893 
894 /* If it is clear all, all the resources of the QP will be cleaned. */
895 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
896 {
897 	unsigned int half_key_sz = ctx->key_sz >> 1;
898 	struct device *dev = HPRE_DEV(ctx);
899 
900 	if (is_clear_all)
901 		hisi_qm_stop_qp(ctx->qp);
902 
903 	if (ctx->rsa.pubkey) {
904 		dma_free_coherent(dev, ctx->key_sz << 1,
905 				  ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
906 		ctx->rsa.pubkey = NULL;
907 	}
908 
909 	if (ctx->rsa.crt_prikey) {
910 		memzero_explicit(ctx->rsa.crt_prikey,
911 				 half_key_sz * HPRE_CRT_PRMS);
912 		dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
913 				  ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
914 		ctx->rsa.crt_prikey = NULL;
915 	}
916 
917 	if (ctx->rsa.prikey) {
918 		memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
919 		dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
920 				  ctx->rsa.dma_prikey);
921 		ctx->rsa.prikey = NULL;
922 	}
923 
924 	hpre_ctx_clear(ctx, is_clear_all);
925 }
926 
927 /*
928  * we should judge if it is CRT or not,
929  * CRT: return true,  N-CRT: return false .
930  */
931 static bool hpre_is_crt_key(struct rsa_key *key)
932 {
933 	u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
934 		  key->qinv_sz;
935 
936 #define LEN_OF_NCRT_PARA	5
937 
938 	/* N-CRT less than 5 parameters */
939 	return len > LEN_OF_NCRT_PARA;
940 }
941 
942 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
943 			   unsigned int keylen, bool private)
944 {
945 	struct rsa_key rsa_key;
946 	int ret;
947 
948 	hpre_rsa_clear_ctx(ctx, false);
949 
950 	if (private)
951 		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
952 	else
953 		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
954 	if (ret < 0)
955 		return ret;
956 
957 	ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
958 	if (ret <= 0)
959 		return ret;
960 
961 	if (private) {
962 		ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
963 		if (ret < 0)
964 			goto free;
965 
966 		if (hpre_is_crt_key(&rsa_key)) {
967 			ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
968 			if (ret < 0)
969 				goto free;
970 		}
971 	}
972 
973 	ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
974 	if (ret < 0)
975 		goto free;
976 
977 	if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
978 		ret = -EINVAL;
979 		goto free;
980 	}
981 
982 	return 0;
983 
984 free:
985 	hpre_rsa_clear_ctx(ctx, false);
986 	return ret;
987 }
988 
989 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
990 			      unsigned int keylen)
991 {
992 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
993 	int ret;
994 
995 	ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
996 	if (ret)
997 		return ret;
998 
999 	return hpre_rsa_setkey(ctx, key, keylen, false);
1000 }
1001 
1002 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1003 			       unsigned int keylen)
1004 {
1005 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1006 	int ret;
1007 
1008 	ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1009 	if (ret)
1010 		return ret;
1011 
1012 	return hpre_rsa_setkey(ctx, key, keylen, true);
1013 }
1014 
1015 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1016 {
1017 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1018 
1019 	/* For 512 and 1536 bits key size, use soft tfm instead */
1020 	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1021 	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1022 		return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1023 
1024 	return ctx->key_sz;
1025 }
1026 
1027 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1028 {
1029 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1030 	int ret;
1031 
1032 	ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1033 	if (IS_ERR(ctx->rsa.soft_tfm)) {
1034 		pr_err("Can not alloc_akcipher!\n");
1035 		return PTR_ERR(ctx->rsa.soft_tfm);
1036 	}
1037 
1038 	ret = hpre_ctx_init(ctx);
1039 	if (ret)
1040 		crypto_free_akcipher(ctx->rsa.soft_tfm);
1041 
1042 	return ret;
1043 }
1044 
1045 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1046 {
1047 	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1048 
1049 	hpre_rsa_clear_ctx(ctx, true);
1050 	crypto_free_akcipher(ctx->rsa.soft_tfm);
1051 }
1052 
1053 static struct akcipher_alg rsa = {
1054 	.sign = hpre_rsa_dec,
1055 	.verify = hpre_rsa_enc,
1056 	.encrypt = hpre_rsa_enc,
1057 	.decrypt = hpre_rsa_dec,
1058 	.set_pub_key = hpre_rsa_setpubkey,
1059 	.set_priv_key = hpre_rsa_setprivkey,
1060 	.max_size = hpre_rsa_max_size,
1061 	.init = hpre_rsa_init_tfm,
1062 	.exit = hpre_rsa_exit_tfm,
1063 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1064 	.base = {
1065 		.cra_ctxsize = sizeof(struct hpre_ctx),
1066 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1067 		.cra_name = "rsa",
1068 		.cra_driver_name = "hpre-rsa",
1069 		.cra_module = THIS_MODULE,
1070 	},
1071 };
1072 
1073 #ifdef CONFIG_CRYPTO_DH
1074 static struct kpp_alg dh = {
1075 	.set_secret = hpre_dh_set_secret,
1076 	.generate_public_key = hpre_dh_compute_value,
1077 	.compute_shared_secret = hpre_dh_compute_value,
1078 	.max_size = hpre_dh_max_size,
1079 	.init = hpre_dh_init_tfm,
1080 	.exit = hpre_dh_exit_tfm,
1081 	.reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1082 	.base = {
1083 		.cra_ctxsize = sizeof(struct hpre_ctx),
1084 		.cra_priority = HPRE_CRYPTO_ALG_PRI,
1085 		.cra_name = "dh",
1086 		.cra_driver_name = "hpre-dh",
1087 		.cra_module = THIS_MODULE,
1088 	},
1089 };
1090 #endif
1091 
1092 int hpre_algs_register(void)
1093 {
1094 	int ret = 0;
1095 
1096 	mutex_lock(&hpre_alg_lock);
1097 	if (++hpre_active_devs == 1) {
1098 		rsa.base.cra_flags = 0;
1099 		ret = crypto_register_akcipher(&rsa);
1100 		if (ret)
1101 			goto unlock;
1102 #ifdef CONFIG_CRYPTO_DH
1103 		ret = crypto_register_kpp(&dh);
1104 		if (ret) {
1105 			crypto_unregister_akcipher(&rsa);
1106 			goto unlock;
1107 		}
1108 #endif
1109 	}
1110 
1111 unlock:
1112 	mutex_unlock(&hpre_alg_lock);
1113 	return ret;
1114 }
1115 
1116 void hpre_algs_unregister(void)
1117 {
1118 	mutex_lock(&hpre_alg_lock);
1119 	if (--hpre_active_devs == 0) {
1120 		crypto_unregister_akcipher(&rsa);
1121 #ifdef CONFIG_CRYPTO_DH
1122 		crypto_unregister_kpp(&dh);
1123 #endif
1124 	}
1125 	mutex_unlock(&hpre_alg_lock);
1126 }
1127