xref: /openbmc/linux/drivers/crypto/caam/caampkc.c (revision 297e77e5)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * caam - Freescale FSL CAAM support for Public Key Cryptography
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9  * all the desired key parameters, input and output pointers.
10  */
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "error.h"
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
18 #include "caampkc.h"
19 
20 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22 				 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24 				 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26 				 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
28 
29 /* buffer filled with zeros, used for padding */
30 static u8 *zero_buffer;
31 
32 /*
33  * variable used to avoid double free of resources in case
34  * algorithm registration was unsuccessful
35  */
36 static bool init_done;
37 
38 struct caam_akcipher_alg {
39 	struct akcipher_alg akcipher;
40 	bool registered;
41 };
42 
43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 			 struct akcipher_request *req)
45 {
46 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47 
48 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
50 
51 	if (edesc->sec4_sg_bytes)
52 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53 				 DMA_TO_DEVICE);
54 }
55 
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 			  struct akcipher_request *req)
58 {
59 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 	struct caam_rsa_key *key = &ctx->key;
62 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63 
64 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66 }
67 
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 			      struct akcipher_request *req)
70 {
71 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 	struct caam_rsa_key *key = &ctx->key;
74 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75 
76 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78 }
79 
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 			      struct akcipher_request *req)
82 {
83 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 	struct caam_rsa_key *key = &ctx->key;
86 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 	size_t p_sz = key->p_sz;
88 	size_t q_sz = key->q_sz;
89 
90 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95 }
96 
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 			      struct akcipher_request *req)
99 {
100 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 	struct caam_rsa_key *key = &ctx->key;
103 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 	size_t p_sz = key->p_sz;
105 	size_t q_sz = key->q_sz;
106 
107 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
114 }
115 
116 /* RSA Job Completion handler */
117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118 {
119 	struct akcipher_request *req = context;
120 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
122 	struct rsa_edesc *edesc;
123 	int ecode = 0;
124 	bool has_bklog;
125 
126 	if (err)
127 		ecode = caam_jr_strstatus(dev, err);
128 
129 	edesc = req_ctx->edesc;
130 	has_bklog = edesc->bklog;
131 
132 	rsa_pub_unmap(dev, edesc, req);
133 	rsa_io_unmap(dev, edesc, req);
134 	kfree(edesc);
135 
136 	/*
137 	 * If no backlog flag, the completion of the request is done
138 	 * by CAAM, not crypto engine.
139 	 */
140 	if (!has_bklog)
141 		akcipher_request_complete(req, ecode);
142 	else
143 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
144 }
145 
146 static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
147 			    void *context)
148 {
149 	struct akcipher_request *req = context;
150 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
151 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
152 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
153 	struct caam_rsa_key *key = &ctx->key;
154 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
155 	struct rsa_edesc *edesc;
156 	int ecode = 0;
157 	bool has_bklog;
158 
159 	if (err)
160 		ecode = caam_jr_strstatus(dev, err);
161 
162 	edesc = req_ctx->edesc;
163 	has_bklog = edesc->bklog;
164 
165 	switch (key->priv_form) {
166 	case FORM1:
167 		rsa_priv_f1_unmap(dev, edesc, req);
168 		break;
169 	case FORM2:
170 		rsa_priv_f2_unmap(dev, edesc, req);
171 		break;
172 	case FORM3:
173 		rsa_priv_f3_unmap(dev, edesc, req);
174 	}
175 
176 	rsa_io_unmap(dev, edesc, req);
177 	kfree(edesc);
178 
179 	/*
180 	 * If no backlog flag, the completion of the request is done
181 	 * by CAAM, not crypto engine.
182 	 */
183 	if (!has_bklog)
184 		akcipher_request_complete(req, ecode);
185 	else
186 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
187 }
188 
189 /**
190  * Count leading zeros, need it to strip, from a given scatterlist
191  *
192  * @sgl   : scatterlist to count zeros from
193  * @nbytes: number of zeros, in bytes, to strip
194  * @flags : operation flags
195  */
196 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
197 					unsigned int nbytes,
198 					unsigned int flags)
199 {
200 	struct sg_mapping_iter miter;
201 	int lzeros, ents;
202 	unsigned int len;
203 	unsigned int tbytes = nbytes;
204 	const u8 *buff;
205 
206 	ents = sg_nents_for_len(sgl, nbytes);
207 	if (ents < 0)
208 		return ents;
209 
210 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
211 
212 	lzeros = 0;
213 	len = 0;
214 	while (nbytes > 0) {
215 		/* do not strip more than given bytes */
216 		while (len && !*buff && lzeros < nbytes) {
217 			lzeros++;
218 			len--;
219 			buff++;
220 		}
221 
222 		if (len && *buff)
223 			break;
224 
225 		sg_miter_next(&miter);
226 		buff = miter.addr;
227 		len = miter.length;
228 
229 		nbytes -= lzeros;
230 		lzeros = 0;
231 	}
232 
233 	miter.consumed = lzeros;
234 	sg_miter_stop(&miter);
235 	nbytes -= lzeros;
236 
237 	return tbytes - nbytes;
238 }
239 
240 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
241 					 size_t desclen)
242 {
243 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
244 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
245 	struct device *dev = ctx->dev;
246 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
247 	struct caam_rsa_key *key = &ctx->key;
248 	struct rsa_edesc *edesc;
249 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
250 		       GFP_KERNEL : GFP_ATOMIC;
251 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
252 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
253 	int src_nents, dst_nents;
254 	int mapped_src_nents, mapped_dst_nents;
255 	unsigned int diff_size = 0;
256 	int lzeros;
257 
258 	if (req->src_len > key->n_sz) {
259 		/*
260 		 * strip leading zeros and
261 		 * return the number of zeros to skip
262 		 */
263 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
264 						      key->n_sz, sg_flags);
265 		if (lzeros < 0)
266 			return ERR_PTR(lzeros);
267 
268 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
269 						      lzeros);
270 		req_ctx->fixup_src_len = req->src_len - lzeros;
271 	} else {
272 		/*
273 		 * input src is less then n key modulus,
274 		 * so there will be zero padding
275 		 */
276 		diff_size = key->n_sz - req->src_len;
277 		req_ctx->fixup_src = req->src;
278 		req_ctx->fixup_src_len = req->src_len;
279 	}
280 
281 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
282 				     req_ctx->fixup_src_len);
283 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
284 
285 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
286 				      DMA_TO_DEVICE);
287 	if (unlikely(!mapped_src_nents)) {
288 		dev_err(dev, "unable to map source\n");
289 		return ERR_PTR(-ENOMEM);
290 	}
291 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
292 				      DMA_FROM_DEVICE);
293 	if (unlikely(!mapped_dst_nents)) {
294 		dev_err(dev, "unable to map destination\n");
295 		goto src_fail;
296 	}
297 
298 	if (!diff_size && mapped_src_nents == 1)
299 		sec4_sg_len = 0; /* no need for an input hw s/g table */
300 	else
301 		sec4_sg_len = mapped_src_nents + !!diff_size;
302 	sec4_sg_index = sec4_sg_len;
303 
304 	if (mapped_dst_nents > 1)
305 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
306 	else
307 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
308 
309 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
310 
311 	/* allocate space for base edesc, hw desc commands and link tables */
312 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
313 			GFP_DMA | flags);
314 	if (!edesc)
315 		goto dst_fail;
316 
317 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
318 	if (diff_size)
319 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
320 				   0);
321 
322 	if (sec4_sg_index)
323 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
324 				   edesc->sec4_sg + !!diff_size, 0);
325 
326 	if (mapped_dst_nents > 1)
327 		sg_to_sec4_sg_last(req->dst, req->dst_len,
328 				   edesc->sec4_sg + sec4_sg_index, 0);
329 
330 	/* Save nents for later use in Job Descriptor */
331 	edesc->src_nents = src_nents;
332 	edesc->dst_nents = dst_nents;
333 
334 	req_ctx->edesc = edesc;
335 
336 	if (!sec4_sg_bytes)
337 		return edesc;
338 
339 	edesc->mapped_src_nents = mapped_src_nents;
340 	edesc->mapped_dst_nents = mapped_dst_nents;
341 
342 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
343 					    sec4_sg_bytes, DMA_TO_DEVICE);
344 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
345 		dev_err(dev, "unable to map S/G table\n");
346 		goto sec4_sg_fail;
347 	}
348 
349 	edesc->sec4_sg_bytes = sec4_sg_bytes;
350 
351 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
352 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
353 			     edesc->sec4_sg_bytes, 1);
354 
355 	return edesc;
356 
357 sec4_sg_fail:
358 	kfree(edesc);
359 dst_fail:
360 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
361 src_fail:
362 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
363 	return ERR_PTR(-ENOMEM);
364 }
365 
366 static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
367 {
368 	struct akcipher_request *req = container_of(areq,
369 						    struct akcipher_request,
370 						    base);
371 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
372 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
373 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
374 	struct device *jrdev = ctx->dev;
375 	u32 *desc = req_ctx->edesc->hw_desc;
376 	int ret;
377 
378 	req_ctx->edesc->bklog = true;
379 
380 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
381 
382 	if (ret != -EINPROGRESS) {
383 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
384 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
385 		kfree(req_ctx->edesc);
386 	} else {
387 		ret = 0;
388 	}
389 
390 	return ret;
391 }
392 
393 static int set_rsa_pub_pdb(struct akcipher_request *req,
394 			   struct rsa_edesc *edesc)
395 {
396 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
397 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
398 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
399 	struct caam_rsa_key *key = &ctx->key;
400 	struct device *dev = ctx->dev;
401 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
402 	int sec4_sg_index = 0;
403 
404 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
405 	if (dma_mapping_error(dev, pdb->n_dma)) {
406 		dev_err(dev, "Unable to map RSA modulus memory\n");
407 		return -ENOMEM;
408 	}
409 
410 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
411 	if (dma_mapping_error(dev, pdb->e_dma)) {
412 		dev_err(dev, "Unable to map RSA public exponent memory\n");
413 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
414 		return -ENOMEM;
415 	}
416 
417 	if (edesc->mapped_src_nents > 1) {
418 		pdb->sgf |= RSA_PDB_SGF_F;
419 		pdb->f_dma = edesc->sec4_sg_dma;
420 		sec4_sg_index += edesc->mapped_src_nents;
421 	} else {
422 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
423 	}
424 
425 	if (edesc->mapped_dst_nents > 1) {
426 		pdb->sgf |= RSA_PDB_SGF_G;
427 		pdb->g_dma = edesc->sec4_sg_dma +
428 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
429 	} else {
430 		pdb->g_dma = sg_dma_address(req->dst);
431 	}
432 
433 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
434 	pdb->f_len = req_ctx->fixup_src_len;
435 
436 	return 0;
437 }
438 
439 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
440 			       struct rsa_edesc *edesc)
441 {
442 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
443 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
444 	struct caam_rsa_key *key = &ctx->key;
445 	struct device *dev = ctx->dev;
446 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
447 	int sec4_sg_index = 0;
448 
449 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
450 	if (dma_mapping_error(dev, pdb->n_dma)) {
451 		dev_err(dev, "Unable to map modulus memory\n");
452 		return -ENOMEM;
453 	}
454 
455 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
456 	if (dma_mapping_error(dev, pdb->d_dma)) {
457 		dev_err(dev, "Unable to map RSA private exponent memory\n");
458 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
459 		return -ENOMEM;
460 	}
461 
462 	if (edesc->mapped_src_nents > 1) {
463 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
464 		pdb->g_dma = edesc->sec4_sg_dma;
465 		sec4_sg_index += edesc->mapped_src_nents;
466 
467 	} else {
468 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
469 
470 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
471 	}
472 
473 	if (edesc->mapped_dst_nents > 1) {
474 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
475 		pdb->f_dma = edesc->sec4_sg_dma +
476 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
477 	} else {
478 		pdb->f_dma = sg_dma_address(req->dst);
479 	}
480 
481 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
482 
483 	return 0;
484 }
485 
486 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
487 			       struct rsa_edesc *edesc)
488 {
489 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
490 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
491 	struct caam_rsa_key *key = &ctx->key;
492 	struct device *dev = ctx->dev;
493 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
494 	int sec4_sg_index = 0;
495 	size_t p_sz = key->p_sz;
496 	size_t q_sz = key->q_sz;
497 
498 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
499 	if (dma_mapping_error(dev, pdb->d_dma)) {
500 		dev_err(dev, "Unable to map RSA private exponent memory\n");
501 		return -ENOMEM;
502 	}
503 
504 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
505 	if (dma_mapping_error(dev, pdb->p_dma)) {
506 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
507 		goto unmap_d;
508 	}
509 
510 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
511 	if (dma_mapping_error(dev, pdb->q_dma)) {
512 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
513 		goto unmap_p;
514 	}
515 
516 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
517 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
518 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
519 		goto unmap_q;
520 	}
521 
522 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
523 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
524 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
525 		goto unmap_tmp1;
526 	}
527 
528 	if (edesc->mapped_src_nents > 1) {
529 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
530 		pdb->g_dma = edesc->sec4_sg_dma;
531 		sec4_sg_index += edesc->mapped_src_nents;
532 	} else {
533 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
534 
535 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
536 	}
537 
538 	if (edesc->mapped_dst_nents > 1) {
539 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
540 		pdb->f_dma = edesc->sec4_sg_dma +
541 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
542 	} else {
543 		pdb->f_dma = sg_dma_address(req->dst);
544 	}
545 
546 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
547 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
548 
549 	return 0;
550 
551 unmap_tmp1:
552 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
553 unmap_q:
554 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
555 unmap_p:
556 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
557 unmap_d:
558 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
559 
560 	return -ENOMEM;
561 }
562 
563 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
564 			       struct rsa_edesc *edesc)
565 {
566 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
567 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
568 	struct caam_rsa_key *key = &ctx->key;
569 	struct device *dev = ctx->dev;
570 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
571 	int sec4_sg_index = 0;
572 	size_t p_sz = key->p_sz;
573 	size_t q_sz = key->q_sz;
574 
575 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
576 	if (dma_mapping_error(dev, pdb->p_dma)) {
577 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
578 		return -ENOMEM;
579 	}
580 
581 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
582 	if (dma_mapping_error(dev, pdb->q_dma)) {
583 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
584 		goto unmap_p;
585 	}
586 
587 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
588 	if (dma_mapping_error(dev, pdb->dp_dma)) {
589 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
590 		goto unmap_q;
591 	}
592 
593 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
594 	if (dma_mapping_error(dev, pdb->dq_dma)) {
595 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
596 		goto unmap_dp;
597 	}
598 
599 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
600 	if (dma_mapping_error(dev, pdb->c_dma)) {
601 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
602 		goto unmap_dq;
603 	}
604 
605 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
606 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
607 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
608 		goto unmap_qinv;
609 	}
610 
611 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
612 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
613 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
614 		goto unmap_tmp1;
615 	}
616 
617 	if (edesc->mapped_src_nents > 1) {
618 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
619 		pdb->g_dma = edesc->sec4_sg_dma;
620 		sec4_sg_index += edesc->mapped_src_nents;
621 	} else {
622 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
623 
624 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
625 	}
626 
627 	if (edesc->mapped_dst_nents > 1) {
628 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
629 		pdb->f_dma = edesc->sec4_sg_dma +
630 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
631 	} else {
632 		pdb->f_dma = sg_dma_address(req->dst);
633 	}
634 
635 	pdb->sgf |= key->n_sz;
636 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
637 
638 	return 0;
639 
640 unmap_tmp1:
641 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
642 unmap_qinv:
643 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
644 unmap_dq:
645 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
646 unmap_dp:
647 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
648 unmap_q:
649 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
650 unmap_p:
651 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
652 
653 	return -ENOMEM;
654 }
655 
656 static int akcipher_enqueue_req(struct device *jrdev,
657 				void (*cbk)(struct device *jrdev, u32 *desc,
658 					    u32 err, void *context),
659 				struct akcipher_request *req)
660 {
661 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
662 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
663 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
664 	struct caam_rsa_key *key = &ctx->key;
665 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
666 	struct rsa_edesc *edesc = req_ctx->edesc;
667 	u32 *desc = edesc->hw_desc;
668 	int ret;
669 
670 	req_ctx->akcipher_op_done = cbk;
671 	/*
672 	 * Only the backlog request are sent to crypto-engine since the others
673 	 * can be handled by CAAM, if free, especially since JR has up to 1024
674 	 * entries (more than the 10 entries from crypto-engine).
675 	 */
676 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
677 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
678 								 req);
679 	else
680 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
681 
682 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
683 		switch (key->priv_form) {
684 		case FORM1:
685 			rsa_priv_f1_unmap(jrdev, edesc, req);
686 			break;
687 		case FORM2:
688 			rsa_priv_f2_unmap(jrdev, edesc, req);
689 			break;
690 		case FORM3:
691 			rsa_priv_f3_unmap(jrdev, edesc, req);
692 			break;
693 		default:
694 			rsa_pub_unmap(jrdev, edesc, req);
695 		}
696 		rsa_io_unmap(jrdev, edesc, req);
697 		kfree(edesc);
698 	}
699 
700 	return ret;
701 }
702 
703 static int caam_rsa_enc(struct akcipher_request *req)
704 {
705 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
706 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
707 	struct caam_rsa_key *key = &ctx->key;
708 	struct device *jrdev = ctx->dev;
709 	struct rsa_edesc *edesc;
710 	int ret;
711 
712 	if (unlikely(!key->n || !key->e))
713 		return -EINVAL;
714 
715 	if (req->dst_len < key->n_sz) {
716 		req->dst_len = key->n_sz;
717 		dev_err(jrdev, "Output buffer length less than parameter n\n");
718 		return -EOVERFLOW;
719 	}
720 
721 	/* Allocate extended descriptor */
722 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
723 	if (IS_ERR(edesc))
724 		return PTR_ERR(edesc);
725 
726 	/* Set RSA Encrypt Protocol Data Block */
727 	ret = set_rsa_pub_pdb(req, edesc);
728 	if (ret)
729 		goto init_fail;
730 
731 	/* Initialize Job Descriptor */
732 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
733 
734 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
735 
736 init_fail:
737 	rsa_io_unmap(jrdev, edesc, req);
738 	kfree(edesc);
739 	return ret;
740 }
741 
742 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
743 {
744 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
745 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
746 	struct device *jrdev = ctx->dev;
747 	struct rsa_edesc *edesc;
748 	int ret;
749 
750 	/* Allocate extended descriptor */
751 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
752 	if (IS_ERR(edesc))
753 		return PTR_ERR(edesc);
754 
755 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
756 	ret = set_rsa_priv_f1_pdb(req, edesc);
757 	if (ret)
758 		goto init_fail;
759 
760 	/* Initialize Job Descriptor */
761 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
762 
763 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
764 
765 init_fail:
766 	rsa_io_unmap(jrdev, edesc, req);
767 	kfree(edesc);
768 	return ret;
769 }
770 
771 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
772 {
773 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
774 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
775 	struct device *jrdev = ctx->dev;
776 	struct rsa_edesc *edesc;
777 	int ret;
778 
779 	/* Allocate extended descriptor */
780 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
781 	if (IS_ERR(edesc))
782 		return PTR_ERR(edesc);
783 
784 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
785 	ret = set_rsa_priv_f2_pdb(req, edesc);
786 	if (ret)
787 		goto init_fail;
788 
789 	/* Initialize Job Descriptor */
790 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
791 
792 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
793 
794 init_fail:
795 	rsa_io_unmap(jrdev, edesc, req);
796 	kfree(edesc);
797 	return ret;
798 }
799 
800 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
801 {
802 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
803 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
804 	struct device *jrdev = ctx->dev;
805 	struct rsa_edesc *edesc;
806 	int ret;
807 
808 	/* Allocate extended descriptor */
809 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
810 	if (IS_ERR(edesc))
811 		return PTR_ERR(edesc);
812 
813 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
814 	ret = set_rsa_priv_f3_pdb(req, edesc);
815 	if (ret)
816 		goto init_fail;
817 
818 	/* Initialize Job Descriptor */
819 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
820 
821 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
822 
823 init_fail:
824 	rsa_io_unmap(jrdev, edesc, req);
825 	kfree(edesc);
826 	return ret;
827 }
828 
829 static int caam_rsa_dec(struct akcipher_request *req)
830 {
831 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
832 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
833 	struct caam_rsa_key *key = &ctx->key;
834 	int ret;
835 
836 	if (unlikely(!key->n || !key->d))
837 		return -EINVAL;
838 
839 	if (req->dst_len < key->n_sz) {
840 		req->dst_len = key->n_sz;
841 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
842 		return -EOVERFLOW;
843 	}
844 
845 	if (key->priv_form == FORM3)
846 		ret = caam_rsa_dec_priv_f3(req);
847 	else if (key->priv_form == FORM2)
848 		ret = caam_rsa_dec_priv_f2(req);
849 	else
850 		ret = caam_rsa_dec_priv_f1(req);
851 
852 	return ret;
853 }
854 
855 static void caam_rsa_free_key(struct caam_rsa_key *key)
856 {
857 	kfree_sensitive(key->d);
858 	kfree_sensitive(key->p);
859 	kfree_sensitive(key->q);
860 	kfree_sensitive(key->dp);
861 	kfree_sensitive(key->dq);
862 	kfree_sensitive(key->qinv);
863 	kfree_sensitive(key->tmp1);
864 	kfree_sensitive(key->tmp2);
865 	kfree(key->e);
866 	kfree(key->n);
867 	memset(key, 0, sizeof(*key));
868 }
869 
870 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
871 {
872 	while (!**ptr && *nbytes) {
873 		(*ptr)++;
874 		(*nbytes)--;
875 	}
876 }
877 
878 /**
879  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
880  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
881  * BER-encoding requires that the minimum number of bytes be used to encode the
882  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
883  * length.
884  *
885  * @ptr   : pointer to {dP, dQ, qInv} CRT member
886  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
887  * @dstlen: length in bytes of corresponding p or q prime factor
888  */
889 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
890 {
891 	u8 *dst;
892 
893 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
894 	if (!nbytes)
895 		return NULL;
896 
897 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
898 	if (!dst)
899 		return NULL;
900 
901 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
902 
903 	return dst;
904 }
905 
906 /**
907  * caam_read_raw_data - Read a raw byte stream as a positive integer.
908  * The function skips buffer's leading zeros, copies the remained data
909  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
910  * the address of the new buffer.
911  *
912  * @buf   : The data to read
913  * @nbytes: The amount of data to read
914  */
915 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
916 {
917 
918 	caam_rsa_drop_leading_zeros(&buf, nbytes);
919 	if (!*nbytes)
920 		return NULL;
921 
922 	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
923 }
924 
925 static int caam_rsa_check_key_length(unsigned int len)
926 {
927 	if (len > 4096)
928 		return -EINVAL;
929 	return 0;
930 }
931 
932 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
933 				unsigned int keylen)
934 {
935 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
936 	struct rsa_key raw_key = {NULL};
937 	struct caam_rsa_key *rsa_key = &ctx->key;
938 	int ret;
939 
940 	/* Free the old RSA key if any */
941 	caam_rsa_free_key(rsa_key);
942 
943 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
944 	if (ret)
945 		return ret;
946 
947 	/* Copy key in DMA zone */
948 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
949 	if (!rsa_key->e)
950 		goto err;
951 
952 	/*
953 	 * Skip leading zeros and copy the positive integer to a buffer
954 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
955 	 * expects a positive integer for the RSA modulus and uses its length as
956 	 * decryption output length.
957 	 */
958 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
959 	if (!rsa_key->n)
960 		goto err;
961 
962 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
963 		caam_rsa_free_key(rsa_key);
964 		return -EINVAL;
965 	}
966 
967 	rsa_key->e_sz = raw_key.e_sz;
968 	rsa_key->n_sz = raw_key.n_sz;
969 
970 	return 0;
971 err:
972 	caam_rsa_free_key(rsa_key);
973 	return -ENOMEM;
974 }
975 
976 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
977 				       struct rsa_key *raw_key)
978 {
979 	struct caam_rsa_key *rsa_key = &ctx->key;
980 	size_t p_sz = raw_key->p_sz;
981 	size_t q_sz = raw_key->q_sz;
982 
983 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
984 	if (!rsa_key->p)
985 		return;
986 	rsa_key->p_sz = p_sz;
987 
988 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
989 	if (!rsa_key->q)
990 		goto free_p;
991 	rsa_key->q_sz = q_sz;
992 
993 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
994 	if (!rsa_key->tmp1)
995 		goto free_q;
996 
997 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
998 	if (!rsa_key->tmp2)
999 		goto free_tmp1;
1000 
1001 	rsa_key->priv_form = FORM2;
1002 
1003 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1004 	if (!rsa_key->dp)
1005 		goto free_tmp2;
1006 
1007 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1008 	if (!rsa_key->dq)
1009 		goto free_dp;
1010 
1011 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1012 					  q_sz);
1013 	if (!rsa_key->qinv)
1014 		goto free_dq;
1015 
1016 	rsa_key->priv_form = FORM3;
1017 
1018 	return;
1019 
1020 free_dq:
1021 	kfree_sensitive(rsa_key->dq);
1022 free_dp:
1023 	kfree_sensitive(rsa_key->dp);
1024 free_tmp2:
1025 	kfree_sensitive(rsa_key->tmp2);
1026 free_tmp1:
1027 	kfree_sensitive(rsa_key->tmp1);
1028 free_q:
1029 	kfree_sensitive(rsa_key->q);
1030 free_p:
1031 	kfree_sensitive(rsa_key->p);
1032 }
1033 
1034 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1035 				 unsigned int keylen)
1036 {
1037 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1038 	struct rsa_key raw_key = {NULL};
1039 	struct caam_rsa_key *rsa_key = &ctx->key;
1040 	int ret;
1041 
1042 	/* Free the old RSA key if any */
1043 	caam_rsa_free_key(rsa_key);
1044 
1045 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
1046 	if (ret)
1047 		return ret;
1048 
1049 	/* Copy key in DMA zone */
1050 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1051 	if (!rsa_key->d)
1052 		goto err;
1053 
1054 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1055 	if (!rsa_key->e)
1056 		goto err;
1057 
1058 	/*
1059 	 * Skip leading zeros and copy the positive integer to a buffer
1060 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1061 	 * expects a positive integer for the RSA modulus and uses its length as
1062 	 * decryption output length.
1063 	 */
1064 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1065 	if (!rsa_key->n)
1066 		goto err;
1067 
1068 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1069 		caam_rsa_free_key(rsa_key);
1070 		return -EINVAL;
1071 	}
1072 
1073 	rsa_key->d_sz = raw_key.d_sz;
1074 	rsa_key->e_sz = raw_key.e_sz;
1075 	rsa_key->n_sz = raw_key.n_sz;
1076 
1077 	caam_rsa_set_priv_key_form(ctx, &raw_key);
1078 
1079 	return 0;
1080 
1081 err:
1082 	caam_rsa_free_key(rsa_key);
1083 	return -ENOMEM;
1084 }
1085 
1086 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1087 {
1088 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1089 
1090 	return ctx->key.n_sz;
1091 }
1092 
1093 /* Per session pkc's driver context creation function */
1094 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1095 {
1096 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1097 
1098 	ctx->dev = caam_jr_alloc();
1099 
1100 	if (IS_ERR(ctx->dev)) {
1101 		pr_err("Job Ring Device allocation for transform failed\n");
1102 		return PTR_ERR(ctx->dev);
1103 	}
1104 
1105 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1106 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1107 					  DMA_TO_DEVICE);
1108 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1109 		dev_err(ctx->dev, "unable to map padding\n");
1110 		caam_jr_free(ctx->dev);
1111 		return -ENOMEM;
1112 	}
1113 
1114 	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1115 
1116 	return 0;
1117 }
1118 
1119 /* Per session pkc's driver context cleanup function */
1120 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1121 {
1122 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1123 	struct caam_rsa_key *key = &ctx->key;
1124 
1125 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1126 			 1, DMA_TO_DEVICE);
1127 	caam_rsa_free_key(key);
1128 	caam_jr_free(ctx->dev);
1129 }
1130 
1131 static struct caam_akcipher_alg caam_rsa = {
1132 	.akcipher = {
1133 		.encrypt = caam_rsa_enc,
1134 		.decrypt = caam_rsa_dec,
1135 		.set_pub_key = caam_rsa_set_pub_key,
1136 		.set_priv_key = caam_rsa_set_priv_key,
1137 		.max_size = caam_rsa_max_size,
1138 		.init = caam_rsa_init_tfm,
1139 		.exit = caam_rsa_exit_tfm,
1140 		.reqsize = sizeof(struct caam_rsa_req_ctx),
1141 		.base = {
1142 			.cra_name = "rsa",
1143 			.cra_driver_name = "rsa-caam",
1144 			.cra_priority = 3000,
1145 			.cra_module = THIS_MODULE,
1146 			.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1147 		},
1148 	}
1149 };
1150 
1151 /* Public Key Cryptography module initialization handler */
1152 int caam_pkc_init(struct device *ctrldev)
1153 {
1154 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1155 	u32 pk_inst;
1156 	int err;
1157 	init_done = false;
1158 
1159 	/* Determine public key hardware accelerator presence. */
1160 	if (priv->era < 10)
1161 		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1162 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1163 	else
1164 		pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
1165 
1166 	/* Do not register algorithms if PKHA is not present. */
1167 	if (!pk_inst)
1168 		return 0;
1169 
1170 	/* allocate zero buffer, used for padding input */
1171 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1172 			      GFP_KERNEL);
1173 	if (!zero_buffer)
1174 		return -ENOMEM;
1175 
1176 	err = crypto_register_akcipher(&caam_rsa.akcipher);
1177 
1178 	if (err) {
1179 		kfree(zero_buffer);
1180 		dev_warn(ctrldev, "%s alg registration failed\n",
1181 			 caam_rsa.akcipher.base.cra_driver_name);
1182 	} else {
1183 		init_done = true;
1184 		caam_rsa.registered = true;
1185 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1186 	}
1187 
1188 	return err;
1189 }
1190 
1191 void caam_pkc_exit(void)
1192 {
1193 	if (!init_done)
1194 		return;
1195 
1196 	if (caam_rsa.registered)
1197 		crypto_unregister_akcipher(&caam_rsa.akcipher);
1198 
1199 	kfree(zero_buffer);
1200 }
1201