xref: /openbmc/linux/drivers/crypto/caam/caampkc.c (revision fe7498ef)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * caam - Freescale FSL CAAM support for Public Key Cryptography
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9  * all the desired key parameters, input and output pointers.
10  */
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "error.h"
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
18 #include "caampkc.h"
19 
20 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22 				 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24 				 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26 				 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
28 
29 /* buffer filled with zeros, used for padding */
30 static u8 *zero_buffer;
31 
32 /*
33  * variable used to avoid double free of resources in case
34  * algorithm registration was unsuccessful
35  */
36 static bool init_done;
37 
38 struct caam_akcipher_alg {
39 	struct akcipher_alg akcipher;
40 	bool registered;
41 };
42 
43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 			 struct akcipher_request *req)
45 {
46 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47 
48 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
50 
51 	if (edesc->sec4_sg_bytes)
52 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53 				 DMA_TO_DEVICE);
54 }
55 
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 			  struct akcipher_request *req)
58 {
59 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 	struct caam_rsa_key *key = &ctx->key;
62 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63 
64 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66 }
67 
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 			      struct akcipher_request *req)
70 {
71 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 	struct caam_rsa_key *key = &ctx->key;
74 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75 
76 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78 }
79 
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 			      struct akcipher_request *req)
82 {
83 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 	struct caam_rsa_key *key = &ctx->key;
86 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 	size_t p_sz = key->p_sz;
88 	size_t q_sz = key->q_sz;
89 
90 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95 }
96 
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 			      struct akcipher_request *req)
99 {
100 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 	struct caam_rsa_key *key = &ctx->key;
103 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 	size_t p_sz = key->p_sz;
105 	size_t q_sz = key->q_sz;
106 
107 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
114 }
115 
116 /* RSA Job Completion handler */
117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118 {
119 	struct akcipher_request *req = context;
120 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
122 	struct rsa_edesc *edesc;
123 	int ecode = 0;
124 	bool has_bklog;
125 
126 	if (err)
127 		ecode = caam_jr_strstatus(dev, err);
128 
129 	edesc = req_ctx->edesc;
130 	has_bklog = edesc->bklog;
131 
132 	rsa_pub_unmap(dev, edesc, req);
133 	rsa_io_unmap(dev, edesc, req);
134 	kfree(edesc);
135 
136 	/*
137 	 * If no backlog flag, the completion of the request is done
138 	 * by CAAM, not crypto engine.
139 	 */
140 	if (!has_bklog)
141 		akcipher_request_complete(req, ecode);
142 	else
143 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
144 }
145 
146 static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
147 			    void *context)
148 {
149 	struct akcipher_request *req = context;
150 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
151 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
152 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
153 	struct caam_rsa_key *key = &ctx->key;
154 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
155 	struct rsa_edesc *edesc;
156 	int ecode = 0;
157 	bool has_bklog;
158 
159 	if (err)
160 		ecode = caam_jr_strstatus(dev, err);
161 
162 	edesc = req_ctx->edesc;
163 	has_bklog = edesc->bklog;
164 
165 	switch (key->priv_form) {
166 	case FORM1:
167 		rsa_priv_f1_unmap(dev, edesc, req);
168 		break;
169 	case FORM2:
170 		rsa_priv_f2_unmap(dev, edesc, req);
171 		break;
172 	case FORM3:
173 		rsa_priv_f3_unmap(dev, edesc, req);
174 	}
175 
176 	rsa_io_unmap(dev, edesc, req);
177 	kfree(edesc);
178 
179 	/*
180 	 * If no backlog flag, the completion of the request is done
181 	 * by CAAM, not crypto engine.
182 	 */
183 	if (!has_bklog)
184 		akcipher_request_complete(req, ecode);
185 	else
186 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
187 }
188 
189 /**
190  * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
191  *                                from a given scatterlist
192  *
193  * @sgl   : scatterlist to count zeros from
194  * @nbytes: number of zeros, in bytes, to strip
195  * @flags : operation flags
196  */
197 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
198 					unsigned int nbytes,
199 					unsigned int flags)
200 {
201 	struct sg_mapping_iter miter;
202 	int lzeros, ents;
203 	unsigned int len;
204 	unsigned int tbytes = nbytes;
205 	const u8 *buff;
206 
207 	ents = sg_nents_for_len(sgl, nbytes);
208 	if (ents < 0)
209 		return ents;
210 
211 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
212 
213 	lzeros = 0;
214 	len = 0;
215 	while (nbytes > 0) {
216 		/* do not strip more than given bytes */
217 		while (len && !*buff && lzeros < nbytes) {
218 			lzeros++;
219 			len--;
220 			buff++;
221 		}
222 
223 		if (len && *buff)
224 			break;
225 
226 		sg_miter_next(&miter);
227 		buff = miter.addr;
228 		len = miter.length;
229 
230 		nbytes -= lzeros;
231 		lzeros = 0;
232 	}
233 
234 	miter.consumed = lzeros;
235 	sg_miter_stop(&miter);
236 	nbytes -= lzeros;
237 
238 	return tbytes - nbytes;
239 }
240 
241 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
242 					 size_t desclen)
243 {
244 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
245 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
246 	struct device *dev = ctx->dev;
247 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
248 	struct caam_rsa_key *key = &ctx->key;
249 	struct rsa_edesc *edesc;
250 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
251 		       GFP_KERNEL : GFP_ATOMIC;
252 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
253 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
254 	int src_nents, dst_nents;
255 	int mapped_src_nents, mapped_dst_nents;
256 	unsigned int diff_size = 0;
257 	int lzeros;
258 
259 	if (req->src_len > key->n_sz) {
260 		/*
261 		 * strip leading zeros and
262 		 * return the number of zeros to skip
263 		 */
264 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
265 						      key->n_sz, sg_flags);
266 		if (lzeros < 0)
267 			return ERR_PTR(lzeros);
268 
269 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
270 						      lzeros);
271 		req_ctx->fixup_src_len = req->src_len - lzeros;
272 	} else {
273 		/*
274 		 * input src is less then n key modulus,
275 		 * so there will be zero padding
276 		 */
277 		diff_size = key->n_sz - req->src_len;
278 		req_ctx->fixup_src = req->src;
279 		req_ctx->fixup_src_len = req->src_len;
280 	}
281 
282 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
283 				     req_ctx->fixup_src_len);
284 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
285 
286 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
287 				      DMA_TO_DEVICE);
288 	if (unlikely(!mapped_src_nents)) {
289 		dev_err(dev, "unable to map source\n");
290 		return ERR_PTR(-ENOMEM);
291 	}
292 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
293 				      DMA_FROM_DEVICE);
294 	if (unlikely(!mapped_dst_nents)) {
295 		dev_err(dev, "unable to map destination\n");
296 		goto src_fail;
297 	}
298 
299 	if (!diff_size && mapped_src_nents == 1)
300 		sec4_sg_len = 0; /* no need for an input hw s/g table */
301 	else
302 		sec4_sg_len = mapped_src_nents + !!diff_size;
303 	sec4_sg_index = sec4_sg_len;
304 
305 	if (mapped_dst_nents > 1)
306 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
307 	else
308 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
309 
310 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
311 
312 	/* allocate space for base edesc, hw desc commands and link tables */
313 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
314 			GFP_DMA | flags);
315 	if (!edesc)
316 		goto dst_fail;
317 
318 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
319 	if (diff_size)
320 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
321 				   0);
322 
323 	if (sec4_sg_index)
324 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
325 				   edesc->sec4_sg + !!diff_size, 0);
326 
327 	if (mapped_dst_nents > 1)
328 		sg_to_sec4_sg_last(req->dst, req->dst_len,
329 				   edesc->sec4_sg + sec4_sg_index, 0);
330 
331 	/* Save nents for later use in Job Descriptor */
332 	edesc->src_nents = src_nents;
333 	edesc->dst_nents = dst_nents;
334 
335 	req_ctx->edesc = edesc;
336 
337 	if (!sec4_sg_bytes)
338 		return edesc;
339 
340 	edesc->mapped_src_nents = mapped_src_nents;
341 	edesc->mapped_dst_nents = mapped_dst_nents;
342 
343 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
344 					    sec4_sg_bytes, DMA_TO_DEVICE);
345 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
346 		dev_err(dev, "unable to map S/G table\n");
347 		goto sec4_sg_fail;
348 	}
349 
350 	edesc->sec4_sg_bytes = sec4_sg_bytes;
351 
352 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
353 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
354 			     edesc->sec4_sg_bytes, 1);
355 
356 	return edesc;
357 
358 sec4_sg_fail:
359 	kfree(edesc);
360 dst_fail:
361 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
362 src_fail:
363 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
364 	return ERR_PTR(-ENOMEM);
365 }
366 
367 static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
368 {
369 	struct akcipher_request *req = container_of(areq,
370 						    struct akcipher_request,
371 						    base);
372 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
373 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
374 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
375 	struct device *jrdev = ctx->dev;
376 	u32 *desc = req_ctx->edesc->hw_desc;
377 	int ret;
378 
379 	req_ctx->edesc->bklog = true;
380 
381 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
382 
383 	if (ret != -EINPROGRESS) {
384 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
385 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
386 		kfree(req_ctx->edesc);
387 	} else {
388 		ret = 0;
389 	}
390 
391 	return ret;
392 }
393 
394 static int set_rsa_pub_pdb(struct akcipher_request *req,
395 			   struct rsa_edesc *edesc)
396 {
397 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
398 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
399 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
400 	struct caam_rsa_key *key = &ctx->key;
401 	struct device *dev = ctx->dev;
402 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
403 	int sec4_sg_index = 0;
404 
405 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
406 	if (dma_mapping_error(dev, pdb->n_dma)) {
407 		dev_err(dev, "Unable to map RSA modulus memory\n");
408 		return -ENOMEM;
409 	}
410 
411 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
412 	if (dma_mapping_error(dev, pdb->e_dma)) {
413 		dev_err(dev, "Unable to map RSA public exponent memory\n");
414 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
415 		return -ENOMEM;
416 	}
417 
418 	if (edesc->mapped_src_nents > 1) {
419 		pdb->sgf |= RSA_PDB_SGF_F;
420 		pdb->f_dma = edesc->sec4_sg_dma;
421 		sec4_sg_index += edesc->mapped_src_nents;
422 	} else {
423 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
424 	}
425 
426 	if (edesc->mapped_dst_nents > 1) {
427 		pdb->sgf |= RSA_PDB_SGF_G;
428 		pdb->g_dma = edesc->sec4_sg_dma +
429 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
430 	} else {
431 		pdb->g_dma = sg_dma_address(req->dst);
432 	}
433 
434 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
435 	pdb->f_len = req_ctx->fixup_src_len;
436 
437 	return 0;
438 }
439 
440 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
441 			       struct rsa_edesc *edesc)
442 {
443 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
444 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
445 	struct caam_rsa_key *key = &ctx->key;
446 	struct device *dev = ctx->dev;
447 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
448 	int sec4_sg_index = 0;
449 
450 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
451 	if (dma_mapping_error(dev, pdb->n_dma)) {
452 		dev_err(dev, "Unable to map modulus memory\n");
453 		return -ENOMEM;
454 	}
455 
456 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
457 	if (dma_mapping_error(dev, pdb->d_dma)) {
458 		dev_err(dev, "Unable to map RSA private exponent memory\n");
459 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
460 		return -ENOMEM;
461 	}
462 
463 	if (edesc->mapped_src_nents > 1) {
464 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
465 		pdb->g_dma = edesc->sec4_sg_dma;
466 		sec4_sg_index += edesc->mapped_src_nents;
467 
468 	} else {
469 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
470 
471 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
472 	}
473 
474 	if (edesc->mapped_dst_nents > 1) {
475 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
476 		pdb->f_dma = edesc->sec4_sg_dma +
477 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
478 	} else {
479 		pdb->f_dma = sg_dma_address(req->dst);
480 	}
481 
482 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
483 
484 	return 0;
485 }
486 
487 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
488 			       struct rsa_edesc *edesc)
489 {
490 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
491 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
492 	struct caam_rsa_key *key = &ctx->key;
493 	struct device *dev = ctx->dev;
494 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
495 	int sec4_sg_index = 0;
496 	size_t p_sz = key->p_sz;
497 	size_t q_sz = key->q_sz;
498 
499 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
500 	if (dma_mapping_error(dev, pdb->d_dma)) {
501 		dev_err(dev, "Unable to map RSA private exponent memory\n");
502 		return -ENOMEM;
503 	}
504 
505 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
506 	if (dma_mapping_error(dev, pdb->p_dma)) {
507 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
508 		goto unmap_d;
509 	}
510 
511 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
512 	if (dma_mapping_error(dev, pdb->q_dma)) {
513 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
514 		goto unmap_p;
515 	}
516 
517 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
518 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
519 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
520 		goto unmap_q;
521 	}
522 
523 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
524 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
525 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
526 		goto unmap_tmp1;
527 	}
528 
529 	if (edesc->mapped_src_nents > 1) {
530 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
531 		pdb->g_dma = edesc->sec4_sg_dma;
532 		sec4_sg_index += edesc->mapped_src_nents;
533 	} else {
534 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
535 
536 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
537 	}
538 
539 	if (edesc->mapped_dst_nents > 1) {
540 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
541 		pdb->f_dma = edesc->sec4_sg_dma +
542 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
543 	} else {
544 		pdb->f_dma = sg_dma_address(req->dst);
545 	}
546 
547 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
548 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
549 
550 	return 0;
551 
552 unmap_tmp1:
553 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
554 unmap_q:
555 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
556 unmap_p:
557 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
558 unmap_d:
559 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
560 
561 	return -ENOMEM;
562 }
563 
564 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
565 			       struct rsa_edesc *edesc)
566 {
567 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
568 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
569 	struct caam_rsa_key *key = &ctx->key;
570 	struct device *dev = ctx->dev;
571 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
572 	int sec4_sg_index = 0;
573 	size_t p_sz = key->p_sz;
574 	size_t q_sz = key->q_sz;
575 
576 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
577 	if (dma_mapping_error(dev, pdb->p_dma)) {
578 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
579 		return -ENOMEM;
580 	}
581 
582 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
583 	if (dma_mapping_error(dev, pdb->q_dma)) {
584 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
585 		goto unmap_p;
586 	}
587 
588 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
589 	if (dma_mapping_error(dev, pdb->dp_dma)) {
590 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
591 		goto unmap_q;
592 	}
593 
594 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
595 	if (dma_mapping_error(dev, pdb->dq_dma)) {
596 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
597 		goto unmap_dp;
598 	}
599 
600 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
601 	if (dma_mapping_error(dev, pdb->c_dma)) {
602 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
603 		goto unmap_dq;
604 	}
605 
606 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
607 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
608 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
609 		goto unmap_qinv;
610 	}
611 
612 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
613 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
614 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
615 		goto unmap_tmp1;
616 	}
617 
618 	if (edesc->mapped_src_nents > 1) {
619 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
620 		pdb->g_dma = edesc->sec4_sg_dma;
621 		sec4_sg_index += edesc->mapped_src_nents;
622 	} else {
623 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
624 
625 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
626 	}
627 
628 	if (edesc->mapped_dst_nents > 1) {
629 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
630 		pdb->f_dma = edesc->sec4_sg_dma +
631 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
632 	} else {
633 		pdb->f_dma = sg_dma_address(req->dst);
634 	}
635 
636 	pdb->sgf |= key->n_sz;
637 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
638 
639 	return 0;
640 
641 unmap_tmp1:
642 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
643 unmap_qinv:
644 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
645 unmap_dq:
646 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
647 unmap_dp:
648 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
649 unmap_q:
650 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
651 unmap_p:
652 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
653 
654 	return -ENOMEM;
655 }
656 
657 static int akcipher_enqueue_req(struct device *jrdev,
658 				void (*cbk)(struct device *jrdev, u32 *desc,
659 					    u32 err, void *context),
660 				struct akcipher_request *req)
661 {
662 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
663 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
664 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
665 	struct caam_rsa_key *key = &ctx->key;
666 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
667 	struct rsa_edesc *edesc = req_ctx->edesc;
668 	u32 *desc = edesc->hw_desc;
669 	int ret;
670 
671 	req_ctx->akcipher_op_done = cbk;
672 	/*
673 	 * Only the backlog request are sent to crypto-engine since the others
674 	 * can be handled by CAAM, if free, especially since JR has up to 1024
675 	 * entries (more than the 10 entries from crypto-engine).
676 	 */
677 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
678 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
679 								 req);
680 	else
681 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
682 
683 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
684 		switch (key->priv_form) {
685 		case FORM1:
686 			rsa_priv_f1_unmap(jrdev, edesc, req);
687 			break;
688 		case FORM2:
689 			rsa_priv_f2_unmap(jrdev, edesc, req);
690 			break;
691 		case FORM3:
692 			rsa_priv_f3_unmap(jrdev, edesc, req);
693 			break;
694 		default:
695 			rsa_pub_unmap(jrdev, edesc, req);
696 		}
697 		rsa_io_unmap(jrdev, edesc, req);
698 		kfree(edesc);
699 	}
700 
701 	return ret;
702 }
703 
704 static int caam_rsa_enc(struct akcipher_request *req)
705 {
706 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
707 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
708 	struct caam_rsa_key *key = &ctx->key;
709 	struct device *jrdev = ctx->dev;
710 	struct rsa_edesc *edesc;
711 	int ret;
712 
713 	if (unlikely(!key->n || !key->e))
714 		return -EINVAL;
715 
716 	if (req->dst_len < key->n_sz) {
717 		req->dst_len = key->n_sz;
718 		dev_err(jrdev, "Output buffer length less than parameter n\n");
719 		return -EOVERFLOW;
720 	}
721 
722 	/* Allocate extended descriptor */
723 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
724 	if (IS_ERR(edesc))
725 		return PTR_ERR(edesc);
726 
727 	/* Set RSA Encrypt Protocol Data Block */
728 	ret = set_rsa_pub_pdb(req, edesc);
729 	if (ret)
730 		goto init_fail;
731 
732 	/* Initialize Job Descriptor */
733 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
734 
735 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
736 
737 init_fail:
738 	rsa_io_unmap(jrdev, edesc, req);
739 	kfree(edesc);
740 	return ret;
741 }
742 
743 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
744 {
745 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
746 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
747 	struct device *jrdev = ctx->dev;
748 	struct rsa_edesc *edesc;
749 	int ret;
750 
751 	/* Allocate extended descriptor */
752 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
753 	if (IS_ERR(edesc))
754 		return PTR_ERR(edesc);
755 
756 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
757 	ret = set_rsa_priv_f1_pdb(req, edesc);
758 	if (ret)
759 		goto init_fail;
760 
761 	/* Initialize Job Descriptor */
762 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
763 
764 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
765 
766 init_fail:
767 	rsa_io_unmap(jrdev, edesc, req);
768 	kfree(edesc);
769 	return ret;
770 }
771 
772 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
773 {
774 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
775 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
776 	struct device *jrdev = ctx->dev;
777 	struct rsa_edesc *edesc;
778 	int ret;
779 
780 	/* Allocate extended descriptor */
781 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
782 	if (IS_ERR(edesc))
783 		return PTR_ERR(edesc);
784 
785 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
786 	ret = set_rsa_priv_f2_pdb(req, edesc);
787 	if (ret)
788 		goto init_fail;
789 
790 	/* Initialize Job Descriptor */
791 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
792 
793 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
794 
795 init_fail:
796 	rsa_io_unmap(jrdev, edesc, req);
797 	kfree(edesc);
798 	return ret;
799 }
800 
801 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
802 {
803 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
804 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
805 	struct device *jrdev = ctx->dev;
806 	struct rsa_edesc *edesc;
807 	int ret;
808 
809 	/* Allocate extended descriptor */
810 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
811 	if (IS_ERR(edesc))
812 		return PTR_ERR(edesc);
813 
814 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
815 	ret = set_rsa_priv_f3_pdb(req, edesc);
816 	if (ret)
817 		goto init_fail;
818 
819 	/* Initialize Job Descriptor */
820 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
821 
822 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
823 
824 init_fail:
825 	rsa_io_unmap(jrdev, edesc, req);
826 	kfree(edesc);
827 	return ret;
828 }
829 
830 static int caam_rsa_dec(struct akcipher_request *req)
831 {
832 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
833 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
834 	struct caam_rsa_key *key = &ctx->key;
835 	int ret;
836 
837 	if (unlikely(!key->n || !key->d))
838 		return -EINVAL;
839 
840 	if (req->dst_len < key->n_sz) {
841 		req->dst_len = key->n_sz;
842 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
843 		return -EOVERFLOW;
844 	}
845 
846 	if (key->priv_form == FORM3)
847 		ret = caam_rsa_dec_priv_f3(req);
848 	else if (key->priv_form == FORM2)
849 		ret = caam_rsa_dec_priv_f2(req);
850 	else
851 		ret = caam_rsa_dec_priv_f1(req);
852 
853 	return ret;
854 }
855 
856 static void caam_rsa_free_key(struct caam_rsa_key *key)
857 {
858 	kfree_sensitive(key->d);
859 	kfree_sensitive(key->p);
860 	kfree_sensitive(key->q);
861 	kfree_sensitive(key->dp);
862 	kfree_sensitive(key->dq);
863 	kfree_sensitive(key->qinv);
864 	kfree_sensitive(key->tmp1);
865 	kfree_sensitive(key->tmp2);
866 	kfree(key->e);
867 	kfree(key->n);
868 	memset(key, 0, sizeof(*key));
869 }
870 
871 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
872 {
873 	while (!**ptr && *nbytes) {
874 		(*ptr)++;
875 		(*nbytes)--;
876 	}
877 }
878 
879 /**
880  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
881  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
882  * BER-encoding requires that the minimum number of bytes be used to encode the
883  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
884  * length.
885  *
886  * @ptr   : pointer to {dP, dQ, qInv} CRT member
887  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
888  * @dstlen: length in bytes of corresponding p or q prime factor
889  */
890 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
891 {
892 	u8 *dst;
893 
894 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
895 	if (!nbytes)
896 		return NULL;
897 
898 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
899 	if (!dst)
900 		return NULL;
901 
902 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
903 
904 	return dst;
905 }
906 
907 /**
908  * caam_read_raw_data - Read a raw byte stream as a positive integer.
909  * The function skips buffer's leading zeros, copies the remained data
910  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
911  * the address of the new buffer.
912  *
913  * @buf   : The data to read
914  * @nbytes: The amount of data to read
915  */
916 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
917 {
918 
919 	caam_rsa_drop_leading_zeros(&buf, nbytes);
920 	if (!*nbytes)
921 		return NULL;
922 
923 	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
924 }
925 
926 static int caam_rsa_check_key_length(unsigned int len)
927 {
928 	if (len > 4096)
929 		return -EINVAL;
930 	return 0;
931 }
932 
933 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
934 				unsigned int keylen)
935 {
936 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
937 	struct rsa_key raw_key = {NULL};
938 	struct caam_rsa_key *rsa_key = &ctx->key;
939 	int ret;
940 
941 	/* Free the old RSA key if any */
942 	caam_rsa_free_key(rsa_key);
943 
944 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
945 	if (ret)
946 		return ret;
947 
948 	/* Copy key in DMA zone */
949 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
950 	if (!rsa_key->e)
951 		goto err;
952 
953 	/*
954 	 * Skip leading zeros and copy the positive integer to a buffer
955 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
956 	 * expects a positive integer for the RSA modulus and uses its length as
957 	 * decryption output length.
958 	 */
959 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
960 	if (!rsa_key->n)
961 		goto err;
962 
963 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
964 		caam_rsa_free_key(rsa_key);
965 		return -EINVAL;
966 	}
967 
968 	rsa_key->e_sz = raw_key.e_sz;
969 	rsa_key->n_sz = raw_key.n_sz;
970 
971 	return 0;
972 err:
973 	caam_rsa_free_key(rsa_key);
974 	return -ENOMEM;
975 }
976 
977 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
978 				       struct rsa_key *raw_key)
979 {
980 	struct caam_rsa_key *rsa_key = &ctx->key;
981 	size_t p_sz = raw_key->p_sz;
982 	size_t q_sz = raw_key->q_sz;
983 
984 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
985 	if (!rsa_key->p)
986 		return;
987 	rsa_key->p_sz = p_sz;
988 
989 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
990 	if (!rsa_key->q)
991 		goto free_p;
992 	rsa_key->q_sz = q_sz;
993 
994 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
995 	if (!rsa_key->tmp1)
996 		goto free_q;
997 
998 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
999 	if (!rsa_key->tmp2)
1000 		goto free_tmp1;
1001 
1002 	rsa_key->priv_form = FORM2;
1003 
1004 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1005 	if (!rsa_key->dp)
1006 		goto free_tmp2;
1007 
1008 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1009 	if (!rsa_key->dq)
1010 		goto free_dp;
1011 
1012 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1013 					  q_sz);
1014 	if (!rsa_key->qinv)
1015 		goto free_dq;
1016 
1017 	rsa_key->priv_form = FORM3;
1018 
1019 	return;
1020 
1021 free_dq:
1022 	kfree_sensitive(rsa_key->dq);
1023 free_dp:
1024 	kfree_sensitive(rsa_key->dp);
1025 free_tmp2:
1026 	kfree_sensitive(rsa_key->tmp2);
1027 free_tmp1:
1028 	kfree_sensitive(rsa_key->tmp1);
1029 free_q:
1030 	kfree_sensitive(rsa_key->q);
1031 free_p:
1032 	kfree_sensitive(rsa_key->p);
1033 }
1034 
1035 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1036 				 unsigned int keylen)
1037 {
1038 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1039 	struct rsa_key raw_key = {NULL};
1040 	struct caam_rsa_key *rsa_key = &ctx->key;
1041 	int ret;
1042 
1043 	/* Free the old RSA key if any */
1044 	caam_rsa_free_key(rsa_key);
1045 
1046 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
1047 	if (ret)
1048 		return ret;
1049 
1050 	/* Copy key in DMA zone */
1051 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1052 	if (!rsa_key->d)
1053 		goto err;
1054 
1055 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1056 	if (!rsa_key->e)
1057 		goto err;
1058 
1059 	/*
1060 	 * Skip leading zeros and copy the positive integer to a buffer
1061 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1062 	 * expects a positive integer for the RSA modulus and uses its length as
1063 	 * decryption output length.
1064 	 */
1065 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1066 	if (!rsa_key->n)
1067 		goto err;
1068 
1069 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1070 		caam_rsa_free_key(rsa_key);
1071 		return -EINVAL;
1072 	}
1073 
1074 	rsa_key->d_sz = raw_key.d_sz;
1075 	rsa_key->e_sz = raw_key.e_sz;
1076 	rsa_key->n_sz = raw_key.n_sz;
1077 
1078 	caam_rsa_set_priv_key_form(ctx, &raw_key);
1079 
1080 	return 0;
1081 
1082 err:
1083 	caam_rsa_free_key(rsa_key);
1084 	return -ENOMEM;
1085 }
1086 
1087 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1088 {
1089 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1090 
1091 	return ctx->key.n_sz;
1092 }
1093 
1094 /* Per session pkc's driver context creation function */
1095 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1096 {
1097 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1098 
1099 	ctx->dev = caam_jr_alloc();
1100 
1101 	if (IS_ERR(ctx->dev)) {
1102 		pr_err("Job Ring Device allocation for transform failed\n");
1103 		return PTR_ERR(ctx->dev);
1104 	}
1105 
1106 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1107 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1108 					  DMA_TO_DEVICE);
1109 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1110 		dev_err(ctx->dev, "unable to map padding\n");
1111 		caam_jr_free(ctx->dev);
1112 		return -ENOMEM;
1113 	}
1114 
1115 	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1116 
1117 	return 0;
1118 }
1119 
1120 /* Per session pkc's driver context cleanup function */
1121 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1122 {
1123 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1124 	struct caam_rsa_key *key = &ctx->key;
1125 
1126 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1127 			 1, DMA_TO_DEVICE);
1128 	caam_rsa_free_key(key);
1129 	caam_jr_free(ctx->dev);
1130 }
1131 
1132 static struct caam_akcipher_alg caam_rsa = {
1133 	.akcipher = {
1134 		.encrypt = caam_rsa_enc,
1135 		.decrypt = caam_rsa_dec,
1136 		.set_pub_key = caam_rsa_set_pub_key,
1137 		.set_priv_key = caam_rsa_set_priv_key,
1138 		.max_size = caam_rsa_max_size,
1139 		.init = caam_rsa_init_tfm,
1140 		.exit = caam_rsa_exit_tfm,
1141 		.reqsize = sizeof(struct caam_rsa_req_ctx),
1142 		.base = {
1143 			.cra_name = "rsa",
1144 			.cra_driver_name = "rsa-caam",
1145 			.cra_priority = 3000,
1146 			.cra_module = THIS_MODULE,
1147 			.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1148 		},
1149 	}
1150 };
1151 
1152 /* Public Key Cryptography module initialization handler */
1153 int caam_pkc_init(struct device *ctrldev)
1154 {
1155 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1156 	u32 pk_inst, pkha;
1157 	int err;
1158 	init_done = false;
1159 
1160 	/* Determine public key hardware accelerator presence. */
1161 	if (priv->era < 10) {
1162 		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1163 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1164 	} else {
1165 		pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1166 		pk_inst = pkha & CHA_VER_NUM_MASK;
1167 
1168 		/*
1169 		 * Newer CAAMs support partially disabled functionality. If this is the
1170 		 * case, the number is non-zero, but this bit is set to indicate that
1171 		 * no encryption or decryption is supported. Only signing and verifying
1172 		 * is supported.
1173 		 */
1174 		if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1175 			pk_inst = 0;
1176 	}
1177 
1178 	/* Do not register algorithms if PKHA is not present. */
1179 	if (!pk_inst)
1180 		return 0;
1181 
1182 	/* allocate zero buffer, used for padding input */
1183 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1184 			      GFP_KERNEL);
1185 	if (!zero_buffer)
1186 		return -ENOMEM;
1187 
1188 	err = crypto_register_akcipher(&caam_rsa.akcipher);
1189 
1190 	if (err) {
1191 		kfree(zero_buffer);
1192 		dev_warn(ctrldev, "%s alg registration failed\n",
1193 			 caam_rsa.akcipher.base.cra_driver_name);
1194 	} else {
1195 		init_done = true;
1196 		caam_rsa.registered = true;
1197 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1198 	}
1199 
1200 	return err;
1201 }
1202 
1203 void caam_pkc_exit(void)
1204 {
1205 	if (!init_done)
1206 		return;
1207 
1208 	if (caam_rsa.registered)
1209 		crypto_unregister_akcipher(&caam_rsa.akcipher);
1210 
1211 	kfree(zero_buffer);
1212 }
1213