xref: /openbmc/linux/drivers/crypto/caam/caampkc.c (revision e72e8bf1)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * caam - Freescale FSL CAAM support for Public Key Cryptography
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9  * all the desired key parameters, input and output pointers.
10  */
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "error.h"
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
18 #include "caampkc.h"
19 
20 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22 				 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24 				 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26 				 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
28 
29 /* buffer filled with zeros, used for padding */
30 static u8 *zero_buffer;
31 
32 /*
33  * variable used to avoid double free of resources in case
34  * algorithm registration was unsuccessful
35  */
36 static bool init_done;
37 
38 struct caam_akcipher_alg {
39 	struct akcipher_alg akcipher;
40 	bool registered;
41 };
42 
43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 			 struct akcipher_request *req)
45 {
46 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47 
48 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
50 
51 	if (edesc->sec4_sg_bytes)
52 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53 				 DMA_TO_DEVICE);
54 }
55 
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 			  struct akcipher_request *req)
58 {
59 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 	struct caam_rsa_key *key = &ctx->key;
62 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63 
64 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66 }
67 
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 			      struct akcipher_request *req)
70 {
71 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 	struct caam_rsa_key *key = &ctx->key;
74 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75 
76 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78 }
79 
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 			      struct akcipher_request *req)
82 {
83 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 	struct caam_rsa_key *key = &ctx->key;
86 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 	size_t p_sz = key->p_sz;
88 	size_t q_sz = key->q_sz;
89 
90 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95 }
96 
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 			      struct akcipher_request *req)
99 {
100 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 	struct caam_rsa_key *key = &ctx->key;
103 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 	size_t p_sz = key->p_sz;
105 	size_t q_sz = key->q_sz;
106 
107 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
114 }
115 
116 /* RSA Job Completion handler */
117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118 {
119 	struct akcipher_request *req = context;
120 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
122 	struct rsa_edesc *edesc;
123 	int ecode = 0;
124 
125 	if (err)
126 		ecode = caam_jr_strstatus(dev, err);
127 
128 	edesc = req_ctx->edesc;
129 
130 	rsa_pub_unmap(dev, edesc, req);
131 	rsa_io_unmap(dev, edesc, req);
132 	kfree(edesc);
133 
134 	/*
135 	 * If no backlog flag, the completion of the request is done
136 	 * by CAAM, not crypto engine.
137 	 */
138 	if (!edesc->bklog)
139 		akcipher_request_complete(req, ecode);
140 	else
141 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
142 }
143 
144 static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
145 			    void *context)
146 {
147 	struct akcipher_request *req = context;
148 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
149 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
150 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
151 	struct caam_rsa_key *key = &ctx->key;
152 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
153 	struct rsa_edesc *edesc;
154 	int ecode = 0;
155 
156 	if (err)
157 		ecode = caam_jr_strstatus(dev, err);
158 
159 	edesc = req_ctx->edesc;
160 
161 	switch (key->priv_form) {
162 	case FORM1:
163 		rsa_priv_f1_unmap(dev, edesc, req);
164 		break;
165 	case FORM2:
166 		rsa_priv_f2_unmap(dev, edesc, req);
167 		break;
168 	case FORM3:
169 		rsa_priv_f3_unmap(dev, edesc, req);
170 	}
171 
172 	rsa_io_unmap(dev, edesc, req);
173 	kfree(edesc);
174 
175 	/*
176 	 * If no backlog flag, the completion of the request is done
177 	 * by CAAM, not crypto engine.
178 	 */
179 	if (!edesc->bklog)
180 		akcipher_request_complete(req, ecode);
181 	else
182 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
183 }
184 
185 /**
186  * Count leading zeros, need it to strip, from a given scatterlist
187  *
188  * @sgl   : scatterlist to count zeros from
189  * @nbytes: number of zeros, in bytes, to strip
190  * @flags : operation flags
191  */
192 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
193 					unsigned int nbytes,
194 					unsigned int flags)
195 {
196 	struct sg_mapping_iter miter;
197 	int lzeros, ents;
198 	unsigned int len;
199 	unsigned int tbytes = nbytes;
200 	const u8 *buff;
201 
202 	ents = sg_nents_for_len(sgl, nbytes);
203 	if (ents < 0)
204 		return ents;
205 
206 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
207 
208 	lzeros = 0;
209 	len = 0;
210 	while (nbytes > 0) {
211 		/* do not strip more than given bytes */
212 		while (len && !*buff && lzeros < nbytes) {
213 			lzeros++;
214 			len--;
215 			buff++;
216 		}
217 
218 		if (len && *buff)
219 			break;
220 
221 		sg_miter_next(&miter);
222 		buff = miter.addr;
223 		len = miter.length;
224 
225 		nbytes -= lzeros;
226 		lzeros = 0;
227 	}
228 
229 	miter.consumed = lzeros;
230 	sg_miter_stop(&miter);
231 	nbytes -= lzeros;
232 
233 	return tbytes - nbytes;
234 }
235 
236 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
237 					 size_t desclen)
238 {
239 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
240 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
241 	struct device *dev = ctx->dev;
242 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
243 	struct caam_rsa_key *key = &ctx->key;
244 	struct rsa_edesc *edesc;
245 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
246 		       GFP_KERNEL : GFP_ATOMIC;
247 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
248 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
249 	int src_nents, dst_nents;
250 	int mapped_src_nents, mapped_dst_nents;
251 	unsigned int diff_size = 0;
252 	int lzeros;
253 
254 	if (req->src_len > key->n_sz) {
255 		/*
256 		 * strip leading zeros and
257 		 * return the number of zeros to skip
258 		 */
259 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
260 						      key->n_sz, sg_flags);
261 		if (lzeros < 0)
262 			return ERR_PTR(lzeros);
263 
264 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
265 						      lzeros);
266 		req_ctx->fixup_src_len = req->src_len - lzeros;
267 	} else {
268 		/*
269 		 * input src is less then n key modulus,
270 		 * so there will be zero padding
271 		 */
272 		diff_size = key->n_sz - req->src_len;
273 		req_ctx->fixup_src = req->src;
274 		req_ctx->fixup_src_len = req->src_len;
275 	}
276 
277 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
278 				     req_ctx->fixup_src_len);
279 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
280 
281 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
282 				      DMA_TO_DEVICE);
283 	if (unlikely(!mapped_src_nents)) {
284 		dev_err(dev, "unable to map source\n");
285 		return ERR_PTR(-ENOMEM);
286 	}
287 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
288 				      DMA_FROM_DEVICE);
289 	if (unlikely(!mapped_dst_nents)) {
290 		dev_err(dev, "unable to map destination\n");
291 		goto src_fail;
292 	}
293 
294 	if (!diff_size && mapped_src_nents == 1)
295 		sec4_sg_len = 0; /* no need for an input hw s/g table */
296 	else
297 		sec4_sg_len = mapped_src_nents + !!diff_size;
298 	sec4_sg_index = sec4_sg_len;
299 
300 	if (mapped_dst_nents > 1)
301 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
302 	else
303 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
304 
305 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
306 
307 	/* allocate space for base edesc, hw desc commands and link tables */
308 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
309 			GFP_DMA | flags);
310 	if (!edesc)
311 		goto dst_fail;
312 
313 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
314 	if (diff_size)
315 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
316 				   0);
317 
318 	if (sec4_sg_index)
319 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
320 				   edesc->sec4_sg + !!diff_size, 0);
321 
322 	if (mapped_dst_nents > 1)
323 		sg_to_sec4_sg_last(req->dst, req->dst_len,
324 				   edesc->sec4_sg + sec4_sg_index, 0);
325 
326 	/* Save nents for later use in Job Descriptor */
327 	edesc->src_nents = src_nents;
328 	edesc->dst_nents = dst_nents;
329 
330 	req_ctx->edesc = edesc;
331 
332 	if (!sec4_sg_bytes)
333 		return edesc;
334 
335 	edesc->mapped_src_nents = mapped_src_nents;
336 	edesc->mapped_dst_nents = mapped_dst_nents;
337 
338 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
339 					    sec4_sg_bytes, DMA_TO_DEVICE);
340 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
341 		dev_err(dev, "unable to map S/G table\n");
342 		goto sec4_sg_fail;
343 	}
344 
345 	edesc->sec4_sg_bytes = sec4_sg_bytes;
346 
347 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
348 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
349 			     edesc->sec4_sg_bytes, 1);
350 
351 	return edesc;
352 
353 sec4_sg_fail:
354 	kfree(edesc);
355 dst_fail:
356 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
357 src_fail:
358 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
359 	return ERR_PTR(-ENOMEM);
360 }
361 
362 static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
363 {
364 	struct akcipher_request *req = container_of(areq,
365 						    struct akcipher_request,
366 						    base);
367 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
368 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
369 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
370 	struct device *jrdev = ctx->dev;
371 	u32 *desc = req_ctx->edesc->hw_desc;
372 	int ret;
373 
374 	req_ctx->edesc->bklog = true;
375 
376 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
377 
378 	if (ret != -EINPROGRESS) {
379 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
380 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
381 		kfree(req_ctx->edesc);
382 	} else {
383 		ret = 0;
384 	}
385 
386 	return ret;
387 }
388 
389 static int set_rsa_pub_pdb(struct akcipher_request *req,
390 			   struct rsa_edesc *edesc)
391 {
392 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
393 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
394 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
395 	struct caam_rsa_key *key = &ctx->key;
396 	struct device *dev = ctx->dev;
397 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
398 	int sec4_sg_index = 0;
399 
400 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
401 	if (dma_mapping_error(dev, pdb->n_dma)) {
402 		dev_err(dev, "Unable to map RSA modulus memory\n");
403 		return -ENOMEM;
404 	}
405 
406 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
407 	if (dma_mapping_error(dev, pdb->e_dma)) {
408 		dev_err(dev, "Unable to map RSA public exponent memory\n");
409 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
410 		return -ENOMEM;
411 	}
412 
413 	if (edesc->mapped_src_nents > 1) {
414 		pdb->sgf |= RSA_PDB_SGF_F;
415 		pdb->f_dma = edesc->sec4_sg_dma;
416 		sec4_sg_index += edesc->mapped_src_nents;
417 	} else {
418 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
419 	}
420 
421 	if (edesc->mapped_dst_nents > 1) {
422 		pdb->sgf |= RSA_PDB_SGF_G;
423 		pdb->g_dma = edesc->sec4_sg_dma +
424 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
425 	} else {
426 		pdb->g_dma = sg_dma_address(req->dst);
427 	}
428 
429 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
430 	pdb->f_len = req_ctx->fixup_src_len;
431 
432 	return 0;
433 }
434 
435 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
436 			       struct rsa_edesc *edesc)
437 {
438 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
439 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
440 	struct caam_rsa_key *key = &ctx->key;
441 	struct device *dev = ctx->dev;
442 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
443 	int sec4_sg_index = 0;
444 
445 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
446 	if (dma_mapping_error(dev, pdb->n_dma)) {
447 		dev_err(dev, "Unable to map modulus memory\n");
448 		return -ENOMEM;
449 	}
450 
451 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
452 	if (dma_mapping_error(dev, pdb->d_dma)) {
453 		dev_err(dev, "Unable to map RSA private exponent memory\n");
454 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
455 		return -ENOMEM;
456 	}
457 
458 	if (edesc->mapped_src_nents > 1) {
459 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
460 		pdb->g_dma = edesc->sec4_sg_dma;
461 		sec4_sg_index += edesc->mapped_src_nents;
462 
463 	} else {
464 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
465 
466 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
467 	}
468 
469 	if (edesc->mapped_dst_nents > 1) {
470 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
471 		pdb->f_dma = edesc->sec4_sg_dma +
472 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
473 	} else {
474 		pdb->f_dma = sg_dma_address(req->dst);
475 	}
476 
477 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
478 
479 	return 0;
480 }
481 
482 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
483 			       struct rsa_edesc *edesc)
484 {
485 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
486 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
487 	struct caam_rsa_key *key = &ctx->key;
488 	struct device *dev = ctx->dev;
489 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
490 	int sec4_sg_index = 0;
491 	size_t p_sz = key->p_sz;
492 	size_t q_sz = key->q_sz;
493 
494 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
495 	if (dma_mapping_error(dev, pdb->d_dma)) {
496 		dev_err(dev, "Unable to map RSA private exponent memory\n");
497 		return -ENOMEM;
498 	}
499 
500 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
501 	if (dma_mapping_error(dev, pdb->p_dma)) {
502 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
503 		goto unmap_d;
504 	}
505 
506 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
507 	if (dma_mapping_error(dev, pdb->q_dma)) {
508 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
509 		goto unmap_p;
510 	}
511 
512 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
513 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
514 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
515 		goto unmap_q;
516 	}
517 
518 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
519 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
520 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
521 		goto unmap_tmp1;
522 	}
523 
524 	if (edesc->mapped_src_nents > 1) {
525 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
526 		pdb->g_dma = edesc->sec4_sg_dma;
527 		sec4_sg_index += edesc->mapped_src_nents;
528 	} else {
529 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
530 
531 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
532 	}
533 
534 	if (edesc->mapped_dst_nents > 1) {
535 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
536 		pdb->f_dma = edesc->sec4_sg_dma +
537 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
538 	} else {
539 		pdb->f_dma = sg_dma_address(req->dst);
540 	}
541 
542 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
543 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
544 
545 	return 0;
546 
547 unmap_tmp1:
548 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
549 unmap_q:
550 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
551 unmap_p:
552 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
553 unmap_d:
554 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
555 
556 	return -ENOMEM;
557 }
558 
559 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
560 			       struct rsa_edesc *edesc)
561 {
562 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
563 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
564 	struct caam_rsa_key *key = &ctx->key;
565 	struct device *dev = ctx->dev;
566 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
567 	int sec4_sg_index = 0;
568 	size_t p_sz = key->p_sz;
569 	size_t q_sz = key->q_sz;
570 
571 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
572 	if (dma_mapping_error(dev, pdb->p_dma)) {
573 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
574 		return -ENOMEM;
575 	}
576 
577 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
578 	if (dma_mapping_error(dev, pdb->q_dma)) {
579 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
580 		goto unmap_p;
581 	}
582 
583 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
584 	if (dma_mapping_error(dev, pdb->dp_dma)) {
585 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
586 		goto unmap_q;
587 	}
588 
589 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
590 	if (dma_mapping_error(dev, pdb->dq_dma)) {
591 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
592 		goto unmap_dp;
593 	}
594 
595 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
596 	if (dma_mapping_error(dev, pdb->c_dma)) {
597 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
598 		goto unmap_dq;
599 	}
600 
601 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
602 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
603 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
604 		goto unmap_qinv;
605 	}
606 
607 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
608 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
609 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
610 		goto unmap_tmp1;
611 	}
612 
613 	if (edesc->mapped_src_nents > 1) {
614 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
615 		pdb->g_dma = edesc->sec4_sg_dma;
616 		sec4_sg_index += edesc->mapped_src_nents;
617 	} else {
618 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
619 
620 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
621 	}
622 
623 	if (edesc->mapped_dst_nents > 1) {
624 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
625 		pdb->f_dma = edesc->sec4_sg_dma +
626 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
627 	} else {
628 		pdb->f_dma = sg_dma_address(req->dst);
629 	}
630 
631 	pdb->sgf |= key->n_sz;
632 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
633 
634 	return 0;
635 
636 unmap_tmp1:
637 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
638 unmap_qinv:
639 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
640 unmap_dq:
641 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
642 unmap_dp:
643 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
644 unmap_q:
645 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
646 unmap_p:
647 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
648 
649 	return -ENOMEM;
650 }
651 
652 static int akcipher_enqueue_req(struct device *jrdev,
653 				void (*cbk)(struct device *jrdev, u32 *desc,
654 					    u32 err, void *context),
655 				struct akcipher_request *req)
656 {
657 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
658 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
659 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
660 	struct caam_rsa_key *key = &ctx->key;
661 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
662 	struct rsa_edesc *edesc = req_ctx->edesc;
663 	u32 *desc = edesc->hw_desc;
664 	int ret;
665 
666 	req_ctx->akcipher_op_done = cbk;
667 	/*
668 	 * Only the backlog request are sent to crypto-engine since the others
669 	 * can be handled by CAAM, if free, especially since JR has up to 1024
670 	 * entries (more than the 10 entries from crypto-engine).
671 	 */
672 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
673 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
674 								 req);
675 	else
676 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
677 
678 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
679 		switch (key->priv_form) {
680 		case FORM1:
681 			rsa_priv_f1_unmap(jrdev, edesc, req);
682 			break;
683 		case FORM2:
684 			rsa_priv_f2_unmap(jrdev, edesc, req);
685 			break;
686 		case FORM3:
687 			rsa_priv_f3_unmap(jrdev, edesc, req);
688 			break;
689 		default:
690 			rsa_pub_unmap(jrdev, edesc, req);
691 		}
692 		rsa_io_unmap(jrdev, edesc, req);
693 		kfree(edesc);
694 	}
695 
696 	return ret;
697 }
698 
699 static int caam_rsa_enc(struct akcipher_request *req)
700 {
701 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
702 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
703 	struct caam_rsa_key *key = &ctx->key;
704 	struct device *jrdev = ctx->dev;
705 	struct rsa_edesc *edesc;
706 	int ret;
707 
708 	if (unlikely(!key->n || !key->e))
709 		return -EINVAL;
710 
711 	if (req->dst_len < key->n_sz) {
712 		req->dst_len = key->n_sz;
713 		dev_err(jrdev, "Output buffer length less than parameter n\n");
714 		return -EOVERFLOW;
715 	}
716 
717 	/* Allocate extended descriptor */
718 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
719 	if (IS_ERR(edesc))
720 		return PTR_ERR(edesc);
721 
722 	/* Set RSA Encrypt Protocol Data Block */
723 	ret = set_rsa_pub_pdb(req, edesc);
724 	if (ret)
725 		goto init_fail;
726 
727 	/* Initialize Job Descriptor */
728 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
729 
730 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
731 
732 init_fail:
733 	rsa_io_unmap(jrdev, edesc, req);
734 	kfree(edesc);
735 	return ret;
736 }
737 
738 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
739 {
740 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
741 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
742 	struct device *jrdev = ctx->dev;
743 	struct rsa_edesc *edesc;
744 	int ret;
745 
746 	/* Allocate extended descriptor */
747 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
748 	if (IS_ERR(edesc))
749 		return PTR_ERR(edesc);
750 
751 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
752 	ret = set_rsa_priv_f1_pdb(req, edesc);
753 	if (ret)
754 		goto init_fail;
755 
756 	/* Initialize Job Descriptor */
757 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
758 
759 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
760 
761 init_fail:
762 	rsa_io_unmap(jrdev, edesc, req);
763 	kfree(edesc);
764 	return ret;
765 }
766 
767 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
768 {
769 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
770 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
771 	struct device *jrdev = ctx->dev;
772 	struct rsa_edesc *edesc;
773 	int ret;
774 
775 	/* Allocate extended descriptor */
776 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
777 	if (IS_ERR(edesc))
778 		return PTR_ERR(edesc);
779 
780 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
781 	ret = set_rsa_priv_f2_pdb(req, edesc);
782 	if (ret)
783 		goto init_fail;
784 
785 	/* Initialize Job Descriptor */
786 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
787 
788 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
789 
790 init_fail:
791 	rsa_io_unmap(jrdev, edesc, req);
792 	kfree(edesc);
793 	return ret;
794 }
795 
796 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
797 {
798 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
799 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
800 	struct device *jrdev = ctx->dev;
801 	struct rsa_edesc *edesc;
802 	int ret;
803 
804 	/* Allocate extended descriptor */
805 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
806 	if (IS_ERR(edesc))
807 		return PTR_ERR(edesc);
808 
809 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
810 	ret = set_rsa_priv_f3_pdb(req, edesc);
811 	if (ret)
812 		goto init_fail;
813 
814 	/* Initialize Job Descriptor */
815 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
816 
817 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
818 
819 init_fail:
820 	rsa_io_unmap(jrdev, edesc, req);
821 	kfree(edesc);
822 	return ret;
823 }
824 
825 static int caam_rsa_dec(struct akcipher_request *req)
826 {
827 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
828 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
829 	struct caam_rsa_key *key = &ctx->key;
830 	int ret;
831 
832 	if (unlikely(!key->n || !key->d))
833 		return -EINVAL;
834 
835 	if (req->dst_len < key->n_sz) {
836 		req->dst_len = key->n_sz;
837 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
838 		return -EOVERFLOW;
839 	}
840 
841 	if (key->priv_form == FORM3)
842 		ret = caam_rsa_dec_priv_f3(req);
843 	else if (key->priv_form == FORM2)
844 		ret = caam_rsa_dec_priv_f2(req);
845 	else
846 		ret = caam_rsa_dec_priv_f1(req);
847 
848 	return ret;
849 }
850 
851 static void caam_rsa_free_key(struct caam_rsa_key *key)
852 {
853 	kzfree(key->d);
854 	kzfree(key->p);
855 	kzfree(key->q);
856 	kzfree(key->dp);
857 	kzfree(key->dq);
858 	kzfree(key->qinv);
859 	kzfree(key->tmp1);
860 	kzfree(key->tmp2);
861 	kfree(key->e);
862 	kfree(key->n);
863 	memset(key, 0, sizeof(*key));
864 }
865 
866 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
867 {
868 	while (!**ptr && *nbytes) {
869 		(*ptr)++;
870 		(*nbytes)--;
871 	}
872 }
873 
874 /**
875  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
876  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
877  * BER-encoding requires that the minimum number of bytes be used to encode the
878  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
879  * length.
880  *
881  * @ptr   : pointer to {dP, dQ, qInv} CRT member
882  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
883  * @dstlen: length in bytes of corresponding p or q prime factor
884  */
885 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
886 {
887 	u8 *dst;
888 
889 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
890 	if (!nbytes)
891 		return NULL;
892 
893 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
894 	if (!dst)
895 		return NULL;
896 
897 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
898 
899 	return dst;
900 }
901 
902 /**
903  * caam_read_raw_data - Read a raw byte stream as a positive integer.
904  * The function skips buffer's leading zeros, copies the remained data
905  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
906  * the address of the new buffer.
907  *
908  * @buf   : The data to read
909  * @nbytes: The amount of data to read
910  */
911 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
912 {
913 
914 	caam_rsa_drop_leading_zeros(&buf, nbytes);
915 	if (!*nbytes)
916 		return NULL;
917 
918 	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
919 }
920 
921 static int caam_rsa_check_key_length(unsigned int len)
922 {
923 	if (len > 4096)
924 		return -EINVAL;
925 	return 0;
926 }
927 
928 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
929 				unsigned int keylen)
930 {
931 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
932 	struct rsa_key raw_key = {NULL};
933 	struct caam_rsa_key *rsa_key = &ctx->key;
934 	int ret;
935 
936 	/* Free the old RSA key if any */
937 	caam_rsa_free_key(rsa_key);
938 
939 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
940 	if (ret)
941 		return ret;
942 
943 	/* Copy key in DMA zone */
944 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
945 	if (!rsa_key->e)
946 		goto err;
947 
948 	/*
949 	 * Skip leading zeros and copy the positive integer to a buffer
950 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
951 	 * expects a positive integer for the RSA modulus and uses its length as
952 	 * decryption output length.
953 	 */
954 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
955 	if (!rsa_key->n)
956 		goto err;
957 
958 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
959 		caam_rsa_free_key(rsa_key);
960 		return -EINVAL;
961 	}
962 
963 	rsa_key->e_sz = raw_key.e_sz;
964 	rsa_key->n_sz = raw_key.n_sz;
965 
966 	return 0;
967 err:
968 	caam_rsa_free_key(rsa_key);
969 	return -ENOMEM;
970 }
971 
972 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
973 				       struct rsa_key *raw_key)
974 {
975 	struct caam_rsa_key *rsa_key = &ctx->key;
976 	size_t p_sz = raw_key->p_sz;
977 	size_t q_sz = raw_key->q_sz;
978 
979 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
980 	if (!rsa_key->p)
981 		return;
982 	rsa_key->p_sz = p_sz;
983 
984 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
985 	if (!rsa_key->q)
986 		goto free_p;
987 	rsa_key->q_sz = q_sz;
988 
989 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
990 	if (!rsa_key->tmp1)
991 		goto free_q;
992 
993 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
994 	if (!rsa_key->tmp2)
995 		goto free_tmp1;
996 
997 	rsa_key->priv_form = FORM2;
998 
999 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1000 	if (!rsa_key->dp)
1001 		goto free_tmp2;
1002 
1003 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1004 	if (!rsa_key->dq)
1005 		goto free_dp;
1006 
1007 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1008 					  q_sz);
1009 	if (!rsa_key->qinv)
1010 		goto free_dq;
1011 
1012 	rsa_key->priv_form = FORM3;
1013 
1014 	return;
1015 
1016 free_dq:
1017 	kzfree(rsa_key->dq);
1018 free_dp:
1019 	kzfree(rsa_key->dp);
1020 free_tmp2:
1021 	kzfree(rsa_key->tmp2);
1022 free_tmp1:
1023 	kzfree(rsa_key->tmp1);
1024 free_q:
1025 	kzfree(rsa_key->q);
1026 free_p:
1027 	kzfree(rsa_key->p);
1028 }
1029 
1030 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1031 				 unsigned int keylen)
1032 {
1033 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1034 	struct rsa_key raw_key = {NULL};
1035 	struct caam_rsa_key *rsa_key = &ctx->key;
1036 	int ret;
1037 
1038 	/* Free the old RSA key if any */
1039 	caam_rsa_free_key(rsa_key);
1040 
1041 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
1042 	if (ret)
1043 		return ret;
1044 
1045 	/* Copy key in DMA zone */
1046 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1047 	if (!rsa_key->d)
1048 		goto err;
1049 
1050 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1051 	if (!rsa_key->e)
1052 		goto err;
1053 
1054 	/*
1055 	 * Skip leading zeros and copy the positive integer to a buffer
1056 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1057 	 * expects a positive integer for the RSA modulus and uses its length as
1058 	 * decryption output length.
1059 	 */
1060 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1061 	if (!rsa_key->n)
1062 		goto err;
1063 
1064 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1065 		caam_rsa_free_key(rsa_key);
1066 		return -EINVAL;
1067 	}
1068 
1069 	rsa_key->d_sz = raw_key.d_sz;
1070 	rsa_key->e_sz = raw_key.e_sz;
1071 	rsa_key->n_sz = raw_key.n_sz;
1072 
1073 	caam_rsa_set_priv_key_form(ctx, &raw_key);
1074 
1075 	return 0;
1076 
1077 err:
1078 	caam_rsa_free_key(rsa_key);
1079 	return -ENOMEM;
1080 }
1081 
1082 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1083 {
1084 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1085 
1086 	return ctx->key.n_sz;
1087 }
1088 
1089 /* Per session pkc's driver context creation function */
1090 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1091 {
1092 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1093 
1094 	ctx->dev = caam_jr_alloc();
1095 
1096 	if (IS_ERR(ctx->dev)) {
1097 		pr_err("Job Ring Device allocation for transform failed\n");
1098 		return PTR_ERR(ctx->dev);
1099 	}
1100 
1101 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1102 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1103 					  DMA_TO_DEVICE);
1104 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1105 		dev_err(ctx->dev, "unable to map padding\n");
1106 		caam_jr_free(ctx->dev);
1107 		return -ENOMEM;
1108 	}
1109 
1110 	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1111 
1112 	return 0;
1113 }
1114 
1115 /* Per session pkc's driver context cleanup function */
1116 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1117 {
1118 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1119 	struct caam_rsa_key *key = &ctx->key;
1120 
1121 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1122 			 1, DMA_TO_DEVICE);
1123 	caam_rsa_free_key(key);
1124 	caam_jr_free(ctx->dev);
1125 }
1126 
1127 static struct caam_akcipher_alg caam_rsa = {
1128 	.akcipher = {
1129 		.encrypt = caam_rsa_enc,
1130 		.decrypt = caam_rsa_dec,
1131 		.set_pub_key = caam_rsa_set_pub_key,
1132 		.set_priv_key = caam_rsa_set_priv_key,
1133 		.max_size = caam_rsa_max_size,
1134 		.init = caam_rsa_init_tfm,
1135 		.exit = caam_rsa_exit_tfm,
1136 		.reqsize = sizeof(struct caam_rsa_req_ctx),
1137 		.base = {
1138 			.cra_name = "rsa",
1139 			.cra_driver_name = "rsa-caam",
1140 			.cra_priority = 3000,
1141 			.cra_module = THIS_MODULE,
1142 			.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1143 		},
1144 	}
1145 };
1146 
1147 /* Public Key Cryptography module initialization handler */
1148 int caam_pkc_init(struct device *ctrldev)
1149 {
1150 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1151 	u32 pk_inst;
1152 	int err;
1153 	init_done = false;
1154 
1155 	/* Determine public key hardware accelerator presence. */
1156 	if (priv->era < 10)
1157 		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1158 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1159 	else
1160 		pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
1161 
1162 	/* Do not register algorithms if PKHA is not present. */
1163 	if (!pk_inst)
1164 		return 0;
1165 
1166 	/* allocate zero buffer, used for padding input */
1167 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1168 			      GFP_KERNEL);
1169 	if (!zero_buffer)
1170 		return -ENOMEM;
1171 
1172 	err = crypto_register_akcipher(&caam_rsa.akcipher);
1173 
1174 	if (err) {
1175 		kfree(zero_buffer);
1176 		dev_warn(ctrldev, "%s alg registration failed\n",
1177 			 caam_rsa.akcipher.base.cra_driver_name);
1178 	} else {
1179 		init_done = true;
1180 		caam_rsa.registered = true;
1181 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1182 	}
1183 
1184 	return err;
1185 }
1186 
1187 void caam_pkc_exit(void)
1188 {
1189 	if (!init_done)
1190 		return;
1191 
1192 	if (caam_rsa.registered)
1193 		crypto_unregister_akcipher(&caam_rsa.akcipher);
1194 
1195 	kfree(zero_buffer);
1196 }
1197