xref: /openbmc/linux/drivers/crypto/caam/caampkc.c (revision 1fa0a7dc)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * caam - Freescale FSL CAAM support for Public Key Cryptography
4  *
5  * Copyright 2016 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9  * all the desired key parameters, input and output pointers.
10  */
11 #include "compat.h"
12 #include "regs.h"
13 #include "intern.h"
14 #include "jr.h"
15 #include "error.h"
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
18 #include "caampkc.h"
19 
20 #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
21 #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22 				 sizeof(struct rsa_priv_f1_pdb))
23 #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24 				 sizeof(struct rsa_priv_f2_pdb))
25 #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26 				 sizeof(struct rsa_priv_f3_pdb))
27 
28 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
29 			 struct akcipher_request *req)
30 {
31 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
32 	dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
33 
34 	if (edesc->sec4_sg_bytes)
35 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
36 				 DMA_TO_DEVICE);
37 }
38 
39 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
40 			  struct akcipher_request *req)
41 {
42 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
43 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
44 	struct caam_rsa_key *key = &ctx->key;
45 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
46 
47 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
48 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
49 }
50 
51 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
52 			      struct akcipher_request *req)
53 {
54 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
55 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
56 	struct caam_rsa_key *key = &ctx->key;
57 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
58 
59 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
60 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
61 }
62 
63 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
64 			      struct akcipher_request *req)
65 {
66 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
67 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
68 	struct caam_rsa_key *key = &ctx->key;
69 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
70 	size_t p_sz = key->p_sz;
71 	size_t q_sz = key->q_sz;
72 
73 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
74 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
75 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
76 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
77 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
78 }
79 
80 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
81 			      struct akcipher_request *req)
82 {
83 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 	struct caam_rsa_key *key = &ctx->key;
86 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
87 	size_t p_sz = key->p_sz;
88 	size_t q_sz = key->q_sz;
89 
90 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
91 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
92 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
93 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
94 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
95 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
96 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
97 }
98 
99 /* RSA Job Completion handler */
100 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
101 {
102 	struct akcipher_request *req = context;
103 	struct rsa_edesc *edesc;
104 
105 	if (err)
106 		caam_jr_strstatus(dev, err);
107 
108 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
109 
110 	rsa_pub_unmap(dev, edesc, req);
111 	rsa_io_unmap(dev, edesc, req);
112 	kfree(edesc);
113 
114 	akcipher_request_complete(req, err);
115 }
116 
117 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
118 			     void *context)
119 {
120 	struct akcipher_request *req = context;
121 	struct rsa_edesc *edesc;
122 
123 	if (err)
124 		caam_jr_strstatus(dev, err);
125 
126 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
127 
128 	rsa_priv_f1_unmap(dev, edesc, req);
129 	rsa_io_unmap(dev, edesc, req);
130 	kfree(edesc);
131 
132 	akcipher_request_complete(req, err);
133 }
134 
135 static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
136 			     void *context)
137 {
138 	struct akcipher_request *req = context;
139 	struct rsa_edesc *edesc;
140 
141 	if (err)
142 		caam_jr_strstatus(dev, err);
143 
144 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
145 
146 	rsa_priv_f2_unmap(dev, edesc, req);
147 	rsa_io_unmap(dev, edesc, req);
148 	kfree(edesc);
149 
150 	akcipher_request_complete(req, err);
151 }
152 
153 static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
154 			     void *context)
155 {
156 	struct akcipher_request *req = context;
157 	struct rsa_edesc *edesc;
158 
159 	if (err)
160 		caam_jr_strstatus(dev, err);
161 
162 	edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
163 
164 	rsa_priv_f3_unmap(dev, edesc, req);
165 	rsa_io_unmap(dev, edesc, req);
166 	kfree(edesc);
167 
168 	akcipher_request_complete(req, err);
169 }
170 
171 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
172 					unsigned int nbytes,
173 					unsigned int flags)
174 {
175 	struct sg_mapping_iter miter;
176 	int lzeros, ents;
177 	unsigned int len;
178 	unsigned int tbytes = nbytes;
179 	const u8 *buff;
180 
181 	ents = sg_nents_for_len(sgl, nbytes);
182 	if (ents < 0)
183 		return ents;
184 
185 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
186 
187 	lzeros = 0;
188 	len = 0;
189 	while (nbytes > 0) {
190 		while (len && !*buff) {
191 			lzeros++;
192 			len--;
193 			buff++;
194 		}
195 
196 		if (len && *buff)
197 			break;
198 
199 		sg_miter_next(&miter);
200 		buff = miter.addr;
201 		len = miter.length;
202 
203 		nbytes -= lzeros;
204 		lzeros = 0;
205 	}
206 
207 	miter.consumed = lzeros;
208 	sg_miter_stop(&miter);
209 	nbytes -= lzeros;
210 
211 	return tbytes - nbytes;
212 }
213 
214 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
215 					 size_t desclen)
216 {
217 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
218 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
219 	struct device *dev = ctx->dev;
220 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
221 	struct rsa_edesc *edesc;
222 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
223 		       GFP_KERNEL : GFP_ATOMIC;
224 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
225 	int sgc;
226 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
227 	int src_nents, dst_nents;
228 	int lzeros;
229 
230 	lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
231 	if (lzeros < 0)
232 		return ERR_PTR(lzeros);
233 
234 	req->src_len -= lzeros;
235 	req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
236 
237 	src_nents = sg_nents_for_len(req->src, req->src_len);
238 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
239 
240 	if (src_nents > 1)
241 		sec4_sg_len = src_nents;
242 
243 	if (dst_nents > 1)
244 		sec4_sg_len += pad_sg_nents(dst_nents);
245 	else
246 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
247 
248 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
249 
250 	/* allocate space for base edesc, hw desc commands and link tables */
251 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
252 			GFP_DMA | flags);
253 	if (!edesc)
254 		return ERR_PTR(-ENOMEM);
255 
256 	sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
257 	if (unlikely(!sgc)) {
258 		dev_err(dev, "unable to map source\n");
259 		goto src_fail;
260 	}
261 
262 	sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
263 	if (unlikely(!sgc)) {
264 		dev_err(dev, "unable to map destination\n");
265 		goto dst_fail;
266 	}
267 
268 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
269 
270 	sec4_sg_index = 0;
271 	if (src_nents > 1) {
272 		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
273 		sec4_sg_index += src_nents;
274 	}
275 	if (dst_nents > 1)
276 		sg_to_sec4_sg_last(req->dst, dst_nents,
277 				   edesc->sec4_sg + sec4_sg_index, 0);
278 
279 	/* Save nents for later use in Job Descriptor */
280 	edesc->src_nents = src_nents;
281 	edesc->dst_nents = dst_nents;
282 
283 	if (!sec4_sg_bytes)
284 		return edesc;
285 
286 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
287 					    sec4_sg_bytes, DMA_TO_DEVICE);
288 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
289 		dev_err(dev, "unable to map S/G table\n");
290 		goto sec4_sg_fail;
291 	}
292 
293 	edesc->sec4_sg_bytes = sec4_sg_bytes;
294 
295 	return edesc;
296 
297 sec4_sg_fail:
298 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
299 dst_fail:
300 	dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
301 src_fail:
302 	kfree(edesc);
303 	return ERR_PTR(-ENOMEM);
304 }
305 
306 static int set_rsa_pub_pdb(struct akcipher_request *req,
307 			   struct rsa_edesc *edesc)
308 {
309 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
310 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
311 	struct caam_rsa_key *key = &ctx->key;
312 	struct device *dev = ctx->dev;
313 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
314 	int sec4_sg_index = 0;
315 
316 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
317 	if (dma_mapping_error(dev, pdb->n_dma)) {
318 		dev_err(dev, "Unable to map RSA modulus memory\n");
319 		return -ENOMEM;
320 	}
321 
322 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
323 	if (dma_mapping_error(dev, pdb->e_dma)) {
324 		dev_err(dev, "Unable to map RSA public exponent memory\n");
325 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
326 		return -ENOMEM;
327 	}
328 
329 	if (edesc->src_nents > 1) {
330 		pdb->sgf |= RSA_PDB_SGF_F;
331 		pdb->f_dma = edesc->sec4_sg_dma;
332 		sec4_sg_index += edesc->src_nents;
333 	} else {
334 		pdb->f_dma = sg_dma_address(req->src);
335 	}
336 
337 	if (edesc->dst_nents > 1) {
338 		pdb->sgf |= RSA_PDB_SGF_G;
339 		pdb->g_dma = edesc->sec4_sg_dma +
340 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
341 	} else {
342 		pdb->g_dma = sg_dma_address(req->dst);
343 	}
344 
345 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
346 	pdb->f_len = req->src_len;
347 
348 	return 0;
349 }
350 
351 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
352 			       struct rsa_edesc *edesc)
353 {
354 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
355 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
356 	struct caam_rsa_key *key = &ctx->key;
357 	struct device *dev = ctx->dev;
358 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
359 	int sec4_sg_index = 0;
360 
361 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
362 	if (dma_mapping_error(dev, pdb->n_dma)) {
363 		dev_err(dev, "Unable to map modulus memory\n");
364 		return -ENOMEM;
365 	}
366 
367 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
368 	if (dma_mapping_error(dev, pdb->d_dma)) {
369 		dev_err(dev, "Unable to map RSA private exponent memory\n");
370 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
371 		return -ENOMEM;
372 	}
373 
374 	if (edesc->src_nents > 1) {
375 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
376 		pdb->g_dma = edesc->sec4_sg_dma;
377 		sec4_sg_index += edesc->src_nents;
378 	} else {
379 		pdb->g_dma = sg_dma_address(req->src);
380 	}
381 
382 	if (edesc->dst_nents > 1) {
383 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
384 		pdb->f_dma = edesc->sec4_sg_dma +
385 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
386 	} else {
387 		pdb->f_dma = sg_dma_address(req->dst);
388 	}
389 
390 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
391 
392 	return 0;
393 }
394 
395 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
396 			       struct rsa_edesc *edesc)
397 {
398 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
399 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
400 	struct caam_rsa_key *key = &ctx->key;
401 	struct device *dev = ctx->dev;
402 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
403 	int sec4_sg_index = 0;
404 	size_t p_sz = key->p_sz;
405 	size_t q_sz = key->q_sz;
406 
407 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
408 	if (dma_mapping_error(dev, pdb->d_dma)) {
409 		dev_err(dev, "Unable to map RSA private exponent memory\n");
410 		return -ENOMEM;
411 	}
412 
413 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
414 	if (dma_mapping_error(dev, pdb->p_dma)) {
415 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
416 		goto unmap_d;
417 	}
418 
419 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
420 	if (dma_mapping_error(dev, pdb->q_dma)) {
421 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
422 		goto unmap_p;
423 	}
424 
425 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
426 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
427 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
428 		goto unmap_q;
429 	}
430 
431 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
432 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
433 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
434 		goto unmap_tmp1;
435 	}
436 
437 	if (edesc->src_nents > 1) {
438 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
439 		pdb->g_dma = edesc->sec4_sg_dma;
440 		sec4_sg_index += edesc->src_nents;
441 	} else {
442 		pdb->g_dma = sg_dma_address(req->src);
443 	}
444 
445 	if (edesc->dst_nents > 1) {
446 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
447 		pdb->f_dma = edesc->sec4_sg_dma +
448 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
449 	} else {
450 		pdb->f_dma = sg_dma_address(req->dst);
451 	}
452 
453 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
454 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
455 
456 	return 0;
457 
458 unmap_tmp1:
459 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
460 unmap_q:
461 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
462 unmap_p:
463 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
464 unmap_d:
465 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
466 
467 	return -ENOMEM;
468 }
469 
470 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
471 			       struct rsa_edesc *edesc)
472 {
473 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
474 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
475 	struct caam_rsa_key *key = &ctx->key;
476 	struct device *dev = ctx->dev;
477 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
478 	int sec4_sg_index = 0;
479 	size_t p_sz = key->p_sz;
480 	size_t q_sz = key->q_sz;
481 
482 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
483 	if (dma_mapping_error(dev, pdb->p_dma)) {
484 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
485 		return -ENOMEM;
486 	}
487 
488 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
489 	if (dma_mapping_error(dev, pdb->q_dma)) {
490 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
491 		goto unmap_p;
492 	}
493 
494 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
495 	if (dma_mapping_error(dev, pdb->dp_dma)) {
496 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
497 		goto unmap_q;
498 	}
499 
500 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
501 	if (dma_mapping_error(dev, pdb->dq_dma)) {
502 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
503 		goto unmap_dp;
504 	}
505 
506 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
507 	if (dma_mapping_error(dev, pdb->c_dma)) {
508 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
509 		goto unmap_dq;
510 	}
511 
512 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
513 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
514 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
515 		goto unmap_qinv;
516 	}
517 
518 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
519 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
520 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
521 		goto unmap_tmp1;
522 	}
523 
524 	if (edesc->src_nents > 1) {
525 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
526 		pdb->g_dma = edesc->sec4_sg_dma;
527 		sec4_sg_index += edesc->src_nents;
528 	} else {
529 		pdb->g_dma = sg_dma_address(req->src);
530 	}
531 
532 	if (edesc->dst_nents > 1) {
533 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
534 		pdb->f_dma = edesc->sec4_sg_dma +
535 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
536 	} else {
537 		pdb->f_dma = sg_dma_address(req->dst);
538 	}
539 
540 	pdb->sgf |= key->n_sz;
541 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
542 
543 	return 0;
544 
545 unmap_tmp1:
546 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
547 unmap_qinv:
548 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
549 unmap_dq:
550 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
551 unmap_dp:
552 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
553 unmap_q:
554 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
555 unmap_p:
556 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
557 
558 	return -ENOMEM;
559 }
560 
561 static int caam_rsa_enc(struct akcipher_request *req)
562 {
563 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
564 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
565 	struct caam_rsa_key *key = &ctx->key;
566 	struct device *jrdev = ctx->dev;
567 	struct rsa_edesc *edesc;
568 	int ret;
569 
570 	if (unlikely(!key->n || !key->e))
571 		return -EINVAL;
572 
573 	if (req->dst_len < key->n_sz) {
574 		req->dst_len = key->n_sz;
575 		dev_err(jrdev, "Output buffer length less than parameter n\n");
576 		return -EOVERFLOW;
577 	}
578 
579 	/* Allocate extended descriptor */
580 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
581 	if (IS_ERR(edesc))
582 		return PTR_ERR(edesc);
583 
584 	/* Set RSA Encrypt Protocol Data Block */
585 	ret = set_rsa_pub_pdb(req, edesc);
586 	if (ret)
587 		goto init_fail;
588 
589 	/* Initialize Job Descriptor */
590 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
591 
592 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
593 	if (!ret)
594 		return -EINPROGRESS;
595 
596 	rsa_pub_unmap(jrdev, edesc, req);
597 
598 init_fail:
599 	rsa_io_unmap(jrdev, edesc, req);
600 	kfree(edesc);
601 	return ret;
602 }
603 
604 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
605 {
606 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
607 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
608 	struct device *jrdev = ctx->dev;
609 	struct rsa_edesc *edesc;
610 	int ret;
611 
612 	/* Allocate extended descriptor */
613 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
614 	if (IS_ERR(edesc))
615 		return PTR_ERR(edesc);
616 
617 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
618 	ret = set_rsa_priv_f1_pdb(req, edesc);
619 	if (ret)
620 		goto init_fail;
621 
622 	/* Initialize Job Descriptor */
623 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
624 
625 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
626 	if (!ret)
627 		return -EINPROGRESS;
628 
629 	rsa_priv_f1_unmap(jrdev, edesc, req);
630 
631 init_fail:
632 	rsa_io_unmap(jrdev, edesc, req);
633 	kfree(edesc);
634 	return ret;
635 }
636 
637 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
638 {
639 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
640 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
641 	struct device *jrdev = ctx->dev;
642 	struct rsa_edesc *edesc;
643 	int ret;
644 
645 	/* Allocate extended descriptor */
646 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
647 	if (IS_ERR(edesc))
648 		return PTR_ERR(edesc);
649 
650 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
651 	ret = set_rsa_priv_f2_pdb(req, edesc);
652 	if (ret)
653 		goto init_fail;
654 
655 	/* Initialize Job Descriptor */
656 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
657 
658 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
659 	if (!ret)
660 		return -EINPROGRESS;
661 
662 	rsa_priv_f2_unmap(jrdev, edesc, req);
663 
664 init_fail:
665 	rsa_io_unmap(jrdev, edesc, req);
666 	kfree(edesc);
667 	return ret;
668 }
669 
670 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
671 {
672 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
673 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
674 	struct device *jrdev = ctx->dev;
675 	struct rsa_edesc *edesc;
676 	int ret;
677 
678 	/* Allocate extended descriptor */
679 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
680 	if (IS_ERR(edesc))
681 		return PTR_ERR(edesc);
682 
683 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
684 	ret = set_rsa_priv_f3_pdb(req, edesc);
685 	if (ret)
686 		goto init_fail;
687 
688 	/* Initialize Job Descriptor */
689 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
690 
691 	ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
692 	if (!ret)
693 		return -EINPROGRESS;
694 
695 	rsa_priv_f3_unmap(jrdev, edesc, req);
696 
697 init_fail:
698 	rsa_io_unmap(jrdev, edesc, req);
699 	kfree(edesc);
700 	return ret;
701 }
702 
703 static int caam_rsa_dec(struct akcipher_request *req)
704 {
705 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
706 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
707 	struct caam_rsa_key *key = &ctx->key;
708 	int ret;
709 
710 	if (unlikely(!key->n || !key->d))
711 		return -EINVAL;
712 
713 	if (req->dst_len < key->n_sz) {
714 		req->dst_len = key->n_sz;
715 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
716 		return -EOVERFLOW;
717 	}
718 
719 	if (key->priv_form == FORM3)
720 		ret = caam_rsa_dec_priv_f3(req);
721 	else if (key->priv_form == FORM2)
722 		ret = caam_rsa_dec_priv_f2(req);
723 	else
724 		ret = caam_rsa_dec_priv_f1(req);
725 
726 	return ret;
727 }
728 
729 static void caam_rsa_free_key(struct caam_rsa_key *key)
730 {
731 	kzfree(key->d);
732 	kzfree(key->p);
733 	kzfree(key->q);
734 	kzfree(key->dp);
735 	kzfree(key->dq);
736 	kzfree(key->qinv);
737 	kzfree(key->tmp1);
738 	kzfree(key->tmp2);
739 	kfree(key->e);
740 	kfree(key->n);
741 	memset(key, 0, sizeof(*key));
742 }
743 
744 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
745 {
746 	while (!**ptr && *nbytes) {
747 		(*ptr)++;
748 		(*nbytes)--;
749 	}
750 }
751 
752 /**
753  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
754  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
755  * BER-encoding requires that the minimum number of bytes be used to encode the
756  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
757  * length.
758  *
759  * @ptr   : pointer to {dP, dQ, qInv} CRT member
760  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
761  * @dstlen: length in bytes of corresponding p or q prime factor
762  */
763 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
764 {
765 	u8 *dst;
766 
767 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
768 	if (!nbytes)
769 		return NULL;
770 
771 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
772 	if (!dst)
773 		return NULL;
774 
775 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
776 
777 	return dst;
778 }
779 
780 /**
781  * caam_read_raw_data - Read a raw byte stream as a positive integer.
782  * The function skips buffer's leading zeros, copies the remained data
783  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
784  * the address of the new buffer.
785  *
786  * @buf   : The data to read
787  * @nbytes: The amount of data to read
788  */
789 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
790 {
791 
792 	caam_rsa_drop_leading_zeros(&buf, nbytes);
793 	if (!*nbytes)
794 		return NULL;
795 
796 	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
797 }
798 
799 static int caam_rsa_check_key_length(unsigned int len)
800 {
801 	if (len > 4096)
802 		return -EINVAL;
803 	return 0;
804 }
805 
806 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
807 				unsigned int keylen)
808 {
809 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
810 	struct rsa_key raw_key = {NULL};
811 	struct caam_rsa_key *rsa_key = &ctx->key;
812 	int ret;
813 
814 	/* Free the old RSA key if any */
815 	caam_rsa_free_key(rsa_key);
816 
817 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
818 	if (ret)
819 		return ret;
820 
821 	/* Copy key in DMA zone */
822 	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
823 	if (!rsa_key->e)
824 		goto err;
825 
826 	/*
827 	 * Skip leading zeros and copy the positive integer to a buffer
828 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
829 	 * expects a positive integer for the RSA modulus and uses its length as
830 	 * decryption output length.
831 	 */
832 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
833 	if (!rsa_key->n)
834 		goto err;
835 
836 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
837 		caam_rsa_free_key(rsa_key);
838 		return -EINVAL;
839 	}
840 
841 	rsa_key->e_sz = raw_key.e_sz;
842 	rsa_key->n_sz = raw_key.n_sz;
843 
844 	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
845 
846 	return 0;
847 err:
848 	caam_rsa_free_key(rsa_key);
849 	return -ENOMEM;
850 }
851 
852 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
853 				       struct rsa_key *raw_key)
854 {
855 	struct caam_rsa_key *rsa_key = &ctx->key;
856 	size_t p_sz = raw_key->p_sz;
857 	size_t q_sz = raw_key->q_sz;
858 
859 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
860 	if (!rsa_key->p)
861 		return;
862 	rsa_key->p_sz = p_sz;
863 
864 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
865 	if (!rsa_key->q)
866 		goto free_p;
867 	rsa_key->q_sz = q_sz;
868 
869 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
870 	if (!rsa_key->tmp1)
871 		goto free_q;
872 
873 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
874 	if (!rsa_key->tmp2)
875 		goto free_tmp1;
876 
877 	rsa_key->priv_form = FORM2;
878 
879 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
880 	if (!rsa_key->dp)
881 		goto free_tmp2;
882 
883 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
884 	if (!rsa_key->dq)
885 		goto free_dp;
886 
887 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
888 					  q_sz);
889 	if (!rsa_key->qinv)
890 		goto free_dq;
891 
892 	rsa_key->priv_form = FORM3;
893 
894 	return;
895 
896 free_dq:
897 	kzfree(rsa_key->dq);
898 free_dp:
899 	kzfree(rsa_key->dp);
900 free_tmp2:
901 	kzfree(rsa_key->tmp2);
902 free_tmp1:
903 	kzfree(rsa_key->tmp1);
904 free_q:
905 	kzfree(rsa_key->q);
906 free_p:
907 	kzfree(rsa_key->p);
908 }
909 
910 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
911 				 unsigned int keylen)
912 {
913 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
914 	struct rsa_key raw_key = {NULL};
915 	struct caam_rsa_key *rsa_key = &ctx->key;
916 	int ret;
917 
918 	/* Free the old RSA key if any */
919 	caam_rsa_free_key(rsa_key);
920 
921 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
922 	if (ret)
923 		return ret;
924 
925 	/* Copy key in DMA zone */
926 	rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
927 	if (!rsa_key->d)
928 		goto err;
929 
930 	rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
931 	if (!rsa_key->e)
932 		goto err;
933 
934 	/*
935 	 * Skip leading zeros and copy the positive integer to a buffer
936 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
937 	 * expects a positive integer for the RSA modulus and uses its length as
938 	 * decryption output length.
939 	 */
940 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
941 	if (!rsa_key->n)
942 		goto err;
943 
944 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
945 		caam_rsa_free_key(rsa_key);
946 		return -EINVAL;
947 	}
948 
949 	rsa_key->d_sz = raw_key.d_sz;
950 	rsa_key->e_sz = raw_key.e_sz;
951 	rsa_key->n_sz = raw_key.n_sz;
952 
953 	memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
954 	memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
955 
956 	caam_rsa_set_priv_key_form(ctx, &raw_key);
957 
958 	return 0;
959 
960 err:
961 	caam_rsa_free_key(rsa_key);
962 	return -ENOMEM;
963 }
964 
965 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
966 {
967 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
968 
969 	return ctx->key.n_sz;
970 }
971 
972 /* Per session pkc's driver context creation function */
973 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
974 {
975 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
976 
977 	ctx->dev = caam_jr_alloc();
978 
979 	if (IS_ERR(ctx->dev)) {
980 		pr_err("Job Ring Device allocation for transform failed\n");
981 		return PTR_ERR(ctx->dev);
982 	}
983 
984 	return 0;
985 }
986 
987 /* Per session pkc's driver context cleanup function */
988 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
989 {
990 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
991 	struct caam_rsa_key *key = &ctx->key;
992 
993 	caam_rsa_free_key(key);
994 	caam_jr_free(ctx->dev);
995 }
996 
997 static struct akcipher_alg caam_rsa = {
998 	.encrypt = caam_rsa_enc,
999 	.decrypt = caam_rsa_dec,
1000 	.set_pub_key = caam_rsa_set_pub_key,
1001 	.set_priv_key = caam_rsa_set_priv_key,
1002 	.max_size = caam_rsa_max_size,
1003 	.init = caam_rsa_init_tfm,
1004 	.exit = caam_rsa_exit_tfm,
1005 	.reqsize = sizeof(struct caam_rsa_req_ctx),
1006 	.base = {
1007 		.cra_name = "rsa",
1008 		.cra_driver_name = "rsa-caam",
1009 		.cra_priority = 3000,
1010 		.cra_module = THIS_MODULE,
1011 		.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1012 	},
1013 };
1014 
1015 /* Public Key Cryptography module initialization handler */
1016 int caam_pkc_init(struct device *ctrldev)
1017 {
1018 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1019 	u32 pk_inst;
1020 	int err;
1021 
1022 	/* Determine public key hardware accelerator presence. */
1023 	if (priv->era < 10)
1024 		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1025 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1026 	else
1027 		pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
1028 
1029 	/* Do not register algorithms if PKHA is not present. */
1030 	if (!pk_inst)
1031 		return 0;
1032 
1033 	err = crypto_register_akcipher(&caam_rsa);
1034 	if (err)
1035 		dev_warn(ctrldev, "%s alg registration failed\n",
1036 			 caam_rsa.base.cra_driver_name);
1037 	else
1038 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1039 
1040 	return err;
1041 }
1042 
1043 void caam_pkc_exit(void)
1044 {
1045 	crypto_unregister_akcipher(&caam_rsa);
1046 }
1047