1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* 3 * caam - Freescale FSL CAAM support for Public Key Cryptography 4 * 5 * Copyright 2016 Freescale Semiconductor, Inc. 6 * Copyright 2018-2019 NXP 7 * 8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry 9 * all the desired key parameters, input and output pointers. 10 */ 11 #include "compat.h" 12 #include "regs.h" 13 #include "intern.h" 14 #include "jr.h" 15 #include "error.h" 16 #include "desc_constr.h" 17 #include "sg_sw_sec4.h" 18 #include "caampkc.h" 19 20 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb)) 21 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ 22 sizeof(struct rsa_priv_f1_pdb)) 23 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ 24 sizeof(struct rsa_priv_f2_pdb)) 25 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ 26 sizeof(struct rsa_priv_f3_pdb)) 27 #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ 28 29 /* buffer filled with zeros, used for padding */ 30 static u8 *zero_buffer; 31 32 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, 33 struct akcipher_request *req) 34 { 35 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 36 37 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); 38 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); 39 40 if (edesc->sec4_sg_bytes) 41 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, 42 DMA_TO_DEVICE); 43 } 44 45 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, 46 struct akcipher_request *req) 47 { 48 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 49 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 50 struct caam_rsa_key *key = &ctx->key; 51 struct rsa_pub_pdb *pdb = &edesc->pdb.pub; 52 53 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); 54 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE); 55 } 56 57 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, 58 struct akcipher_request *req) 59 { 60 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 61 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 62 struct caam_rsa_key *key = &ctx->key; 63 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; 64 65 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); 66 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); 67 } 68 69 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, 70 struct akcipher_request *req) 71 { 72 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 73 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 74 struct caam_rsa_key *key = &ctx->key; 75 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; 76 size_t p_sz = key->p_sz; 77 size_t q_sz = key->q_sz; 78 79 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); 80 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); 81 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 82 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); 83 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); 84 } 85 86 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, 87 struct akcipher_request *req) 88 { 89 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 90 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 91 struct caam_rsa_key *key = &ctx->key; 92 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; 93 size_t p_sz = key->p_sz; 94 size_t q_sz = key->q_sz; 95 96 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); 97 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 98 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); 99 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); 100 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); 101 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); 102 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); 103 } 104 105 /* RSA Job Completion handler */ 106 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) 107 { 108 struct akcipher_request *req = context; 109 struct rsa_edesc *edesc; 110 111 if (err) 112 caam_jr_strstatus(dev, err); 113 114 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 115 116 rsa_pub_unmap(dev, edesc, req); 117 rsa_io_unmap(dev, edesc, req); 118 kfree(edesc); 119 120 akcipher_request_complete(req, err); 121 } 122 123 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err, 124 void *context) 125 { 126 struct akcipher_request *req = context; 127 struct rsa_edesc *edesc; 128 129 if (err) 130 caam_jr_strstatus(dev, err); 131 132 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 133 134 rsa_priv_f1_unmap(dev, edesc, req); 135 rsa_io_unmap(dev, edesc, req); 136 kfree(edesc); 137 138 akcipher_request_complete(req, err); 139 } 140 141 static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err, 142 void *context) 143 { 144 struct akcipher_request *req = context; 145 struct rsa_edesc *edesc; 146 147 if (err) 148 caam_jr_strstatus(dev, err); 149 150 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 151 152 rsa_priv_f2_unmap(dev, edesc, req); 153 rsa_io_unmap(dev, edesc, req); 154 kfree(edesc); 155 156 akcipher_request_complete(req, err); 157 } 158 159 static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err, 160 void *context) 161 { 162 struct akcipher_request *req = context; 163 struct rsa_edesc *edesc; 164 165 if (err) 166 caam_jr_strstatus(dev, err); 167 168 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]); 169 170 rsa_priv_f3_unmap(dev, edesc, req); 171 rsa_io_unmap(dev, edesc, req); 172 kfree(edesc); 173 174 akcipher_request_complete(req, err); 175 } 176 177 /** 178 * Count leading zeros, need it to strip, from a given scatterlist 179 * 180 * @sgl : scatterlist to count zeros from 181 * @nbytes: number of zeros, in bytes, to strip 182 * @flags : operation flags 183 */ 184 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, 185 unsigned int nbytes, 186 unsigned int flags) 187 { 188 struct sg_mapping_iter miter; 189 int lzeros, ents; 190 unsigned int len; 191 unsigned int tbytes = nbytes; 192 const u8 *buff; 193 194 ents = sg_nents_for_len(sgl, nbytes); 195 if (ents < 0) 196 return ents; 197 198 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); 199 200 lzeros = 0; 201 len = 0; 202 while (nbytes > 0) { 203 /* do not strip more than given bytes */ 204 while (len && !*buff && lzeros < nbytes) { 205 lzeros++; 206 len--; 207 buff++; 208 } 209 210 if (len && *buff) 211 break; 212 213 sg_miter_next(&miter); 214 buff = miter.addr; 215 len = miter.length; 216 217 nbytes -= lzeros; 218 lzeros = 0; 219 } 220 221 miter.consumed = lzeros; 222 sg_miter_stop(&miter); 223 nbytes -= lzeros; 224 225 return tbytes - nbytes; 226 } 227 228 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, 229 size_t desclen) 230 { 231 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 232 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 233 struct device *dev = ctx->dev; 234 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 235 struct caam_rsa_key *key = &ctx->key; 236 struct rsa_edesc *edesc; 237 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 238 GFP_KERNEL : GFP_ATOMIC; 239 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; 240 int sgc; 241 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 242 int src_nents, dst_nents; 243 unsigned int diff_size = 0; 244 int lzeros; 245 246 if (req->src_len > key->n_sz) { 247 /* 248 * strip leading zeros and 249 * return the number of zeros to skip 250 */ 251 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - 252 key->n_sz, sg_flags); 253 if (lzeros < 0) 254 return ERR_PTR(lzeros); 255 256 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, 257 lzeros); 258 req_ctx->fixup_src_len = req->src_len - lzeros; 259 } else { 260 /* 261 * input src is less then n key modulus, 262 * so there will be zero padding 263 */ 264 diff_size = key->n_sz - req->src_len; 265 req_ctx->fixup_src = req->src; 266 req_ctx->fixup_src_len = req->src_len; 267 } 268 269 src_nents = sg_nents_for_len(req_ctx->fixup_src, 270 req_ctx->fixup_src_len); 271 dst_nents = sg_nents_for_len(req->dst, req->dst_len); 272 273 if (!diff_size && src_nents == 1) 274 sec4_sg_len = 0; /* no need for an input hw s/g table */ 275 else 276 sec4_sg_len = src_nents + !!diff_size; 277 sec4_sg_index = sec4_sg_len; 278 if (dst_nents > 1) 279 sec4_sg_len += pad_sg_nents(dst_nents); 280 else 281 sec4_sg_len = pad_sg_nents(sec4_sg_len); 282 283 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); 284 285 /* allocate space for base edesc, hw desc commands and link tables */ 286 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, 287 GFP_DMA | flags); 288 if (!edesc) 289 return ERR_PTR(-ENOMEM); 290 291 sgc = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); 292 if (unlikely(!sgc)) { 293 dev_err(dev, "unable to map source\n"); 294 goto src_fail; 295 } 296 297 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); 298 if (unlikely(!sgc)) { 299 dev_err(dev, "unable to map destination\n"); 300 goto dst_fail; 301 } 302 303 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; 304 if (diff_size) 305 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, 306 0); 307 308 if (sec4_sg_index) 309 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, 310 edesc->sec4_sg + !!diff_size, 0); 311 312 if (dst_nents > 1) 313 sg_to_sec4_sg_last(req->dst, req->dst_len, 314 edesc->sec4_sg + sec4_sg_index, 0); 315 316 /* Save nents for later use in Job Descriptor */ 317 edesc->src_nents = src_nents; 318 edesc->dst_nents = dst_nents; 319 320 if (!sec4_sg_bytes) 321 return edesc; 322 323 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, 324 sec4_sg_bytes, DMA_TO_DEVICE); 325 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { 326 dev_err(dev, "unable to map S/G table\n"); 327 goto sec4_sg_fail; 328 } 329 330 edesc->sec4_sg_bytes = sec4_sg_bytes; 331 332 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", 333 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, 334 edesc->sec4_sg_bytes, 1); 335 336 return edesc; 337 338 sec4_sg_fail: 339 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); 340 dst_fail: 341 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); 342 src_fail: 343 kfree(edesc); 344 return ERR_PTR(-ENOMEM); 345 } 346 347 static int set_rsa_pub_pdb(struct akcipher_request *req, 348 struct rsa_edesc *edesc) 349 { 350 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 351 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 352 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 353 struct caam_rsa_key *key = &ctx->key; 354 struct device *dev = ctx->dev; 355 struct rsa_pub_pdb *pdb = &edesc->pdb.pub; 356 int sec4_sg_index = 0; 357 358 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); 359 if (dma_mapping_error(dev, pdb->n_dma)) { 360 dev_err(dev, "Unable to map RSA modulus memory\n"); 361 return -ENOMEM; 362 } 363 364 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE); 365 if (dma_mapping_error(dev, pdb->e_dma)) { 366 dev_err(dev, "Unable to map RSA public exponent memory\n"); 367 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); 368 return -ENOMEM; 369 } 370 371 if (edesc->src_nents > 1) { 372 pdb->sgf |= RSA_PDB_SGF_F; 373 pdb->f_dma = edesc->sec4_sg_dma; 374 sec4_sg_index += edesc->src_nents; 375 } else { 376 pdb->f_dma = sg_dma_address(req_ctx->fixup_src); 377 } 378 379 if (edesc->dst_nents > 1) { 380 pdb->sgf |= RSA_PDB_SGF_G; 381 pdb->g_dma = edesc->sec4_sg_dma + 382 sec4_sg_index * sizeof(struct sec4_sg_entry); 383 } else { 384 pdb->g_dma = sg_dma_address(req->dst); 385 } 386 387 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; 388 pdb->f_len = req_ctx->fixup_src_len; 389 390 return 0; 391 } 392 393 static int set_rsa_priv_f1_pdb(struct akcipher_request *req, 394 struct rsa_edesc *edesc) 395 { 396 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 397 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 398 struct caam_rsa_key *key = &ctx->key; 399 struct device *dev = ctx->dev; 400 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; 401 int sec4_sg_index = 0; 402 403 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); 404 if (dma_mapping_error(dev, pdb->n_dma)) { 405 dev_err(dev, "Unable to map modulus memory\n"); 406 return -ENOMEM; 407 } 408 409 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); 410 if (dma_mapping_error(dev, pdb->d_dma)) { 411 dev_err(dev, "Unable to map RSA private exponent memory\n"); 412 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); 413 return -ENOMEM; 414 } 415 416 if (edesc->src_nents > 1) { 417 pdb->sgf |= RSA_PRIV_PDB_SGF_G; 418 pdb->g_dma = edesc->sec4_sg_dma; 419 sec4_sg_index += edesc->src_nents; 420 } else { 421 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 422 423 pdb->g_dma = sg_dma_address(req_ctx->fixup_src); 424 } 425 426 if (edesc->dst_nents > 1) { 427 pdb->sgf |= RSA_PRIV_PDB_SGF_F; 428 pdb->f_dma = edesc->sec4_sg_dma + 429 sec4_sg_index * sizeof(struct sec4_sg_entry); 430 } else { 431 pdb->f_dma = sg_dma_address(req->dst); 432 } 433 434 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; 435 436 return 0; 437 } 438 439 static int set_rsa_priv_f2_pdb(struct akcipher_request *req, 440 struct rsa_edesc *edesc) 441 { 442 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 443 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 444 struct caam_rsa_key *key = &ctx->key; 445 struct device *dev = ctx->dev; 446 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; 447 int sec4_sg_index = 0; 448 size_t p_sz = key->p_sz; 449 size_t q_sz = key->q_sz; 450 451 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); 452 if (dma_mapping_error(dev, pdb->d_dma)) { 453 dev_err(dev, "Unable to map RSA private exponent memory\n"); 454 return -ENOMEM; 455 } 456 457 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); 458 if (dma_mapping_error(dev, pdb->p_dma)) { 459 dev_err(dev, "Unable to map RSA prime factor p memory\n"); 460 goto unmap_d; 461 } 462 463 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); 464 if (dma_mapping_error(dev, pdb->q_dma)) { 465 dev_err(dev, "Unable to map RSA prime factor q memory\n"); 466 goto unmap_p; 467 } 468 469 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); 470 if (dma_mapping_error(dev, pdb->tmp1_dma)) { 471 dev_err(dev, "Unable to map RSA tmp1 memory\n"); 472 goto unmap_q; 473 } 474 475 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); 476 if (dma_mapping_error(dev, pdb->tmp2_dma)) { 477 dev_err(dev, "Unable to map RSA tmp2 memory\n"); 478 goto unmap_tmp1; 479 } 480 481 if (edesc->src_nents > 1) { 482 pdb->sgf |= RSA_PRIV_PDB_SGF_G; 483 pdb->g_dma = edesc->sec4_sg_dma; 484 sec4_sg_index += edesc->src_nents; 485 } else { 486 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 487 488 pdb->g_dma = sg_dma_address(req_ctx->fixup_src); 489 } 490 491 if (edesc->dst_nents > 1) { 492 pdb->sgf |= RSA_PRIV_PDB_SGF_F; 493 pdb->f_dma = edesc->sec4_sg_dma + 494 sec4_sg_index * sizeof(struct sec4_sg_entry); 495 } else { 496 pdb->f_dma = sg_dma_address(req->dst); 497 } 498 499 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; 500 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; 501 502 return 0; 503 504 unmap_tmp1: 505 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); 506 unmap_q: 507 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 508 unmap_p: 509 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); 510 unmap_d: 511 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); 512 513 return -ENOMEM; 514 } 515 516 static int set_rsa_priv_f3_pdb(struct akcipher_request *req, 517 struct rsa_edesc *edesc) 518 { 519 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 520 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 521 struct caam_rsa_key *key = &ctx->key; 522 struct device *dev = ctx->dev; 523 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; 524 int sec4_sg_index = 0; 525 size_t p_sz = key->p_sz; 526 size_t q_sz = key->q_sz; 527 528 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); 529 if (dma_mapping_error(dev, pdb->p_dma)) { 530 dev_err(dev, "Unable to map RSA prime factor p memory\n"); 531 return -ENOMEM; 532 } 533 534 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); 535 if (dma_mapping_error(dev, pdb->q_dma)) { 536 dev_err(dev, "Unable to map RSA prime factor q memory\n"); 537 goto unmap_p; 538 } 539 540 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE); 541 if (dma_mapping_error(dev, pdb->dp_dma)) { 542 dev_err(dev, "Unable to map RSA exponent dp memory\n"); 543 goto unmap_q; 544 } 545 546 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE); 547 if (dma_mapping_error(dev, pdb->dq_dma)) { 548 dev_err(dev, "Unable to map RSA exponent dq memory\n"); 549 goto unmap_dp; 550 } 551 552 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE); 553 if (dma_mapping_error(dev, pdb->c_dma)) { 554 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n"); 555 goto unmap_dq; 556 } 557 558 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); 559 if (dma_mapping_error(dev, pdb->tmp1_dma)) { 560 dev_err(dev, "Unable to map RSA tmp1 memory\n"); 561 goto unmap_qinv; 562 } 563 564 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); 565 if (dma_mapping_error(dev, pdb->tmp2_dma)) { 566 dev_err(dev, "Unable to map RSA tmp2 memory\n"); 567 goto unmap_tmp1; 568 } 569 570 if (edesc->src_nents > 1) { 571 pdb->sgf |= RSA_PRIV_PDB_SGF_G; 572 pdb->g_dma = edesc->sec4_sg_dma; 573 sec4_sg_index += edesc->src_nents; 574 } else { 575 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 576 577 pdb->g_dma = sg_dma_address(req_ctx->fixup_src); 578 } 579 580 if (edesc->dst_nents > 1) { 581 pdb->sgf |= RSA_PRIV_PDB_SGF_F; 582 pdb->f_dma = edesc->sec4_sg_dma + 583 sec4_sg_index * sizeof(struct sec4_sg_entry); 584 } else { 585 pdb->f_dma = sg_dma_address(req->dst); 586 } 587 588 pdb->sgf |= key->n_sz; 589 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; 590 591 return 0; 592 593 unmap_tmp1: 594 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); 595 unmap_qinv: 596 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); 597 unmap_dq: 598 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); 599 unmap_dp: 600 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); 601 unmap_q: 602 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 603 unmap_p: 604 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); 605 606 return -ENOMEM; 607 } 608 609 static int caam_rsa_enc(struct akcipher_request *req) 610 { 611 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 612 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 613 struct caam_rsa_key *key = &ctx->key; 614 struct device *jrdev = ctx->dev; 615 struct rsa_edesc *edesc; 616 int ret; 617 618 if (unlikely(!key->n || !key->e)) 619 return -EINVAL; 620 621 if (req->dst_len < key->n_sz) { 622 req->dst_len = key->n_sz; 623 dev_err(jrdev, "Output buffer length less than parameter n\n"); 624 return -EOVERFLOW; 625 } 626 627 /* Allocate extended descriptor */ 628 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN); 629 if (IS_ERR(edesc)) 630 return PTR_ERR(edesc); 631 632 /* Set RSA Encrypt Protocol Data Block */ 633 ret = set_rsa_pub_pdb(req, edesc); 634 if (ret) 635 goto init_fail; 636 637 /* Initialize Job Descriptor */ 638 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); 639 640 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req); 641 if (!ret) 642 return -EINPROGRESS; 643 644 rsa_pub_unmap(jrdev, edesc, req); 645 646 init_fail: 647 rsa_io_unmap(jrdev, edesc, req); 648 kfree(edesc); 649 return ret; 650 } 651 652 static int caam_rsa_dec_priv_f1(struct akcipher_request *req) 653 { 654 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 655 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 656 struct device *jrdev = ctx->dev; 657 struct rsa_edesc *edesc; 658 int ret; 659 660 /* Allocate extended descriptor */ 661 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); 662 if (IS_ERR(edesc)) 663 return PTR_ERR(edesc); 664 665 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */ 666 ret = set_rsa_priv_f1_pdb(req, edesc); 667 if (ret) 668 goto init_fail; 669 670 /* Initialize Job Descriptor */ 671 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); 672 673 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req); 674 if (!ret) 675 return -EINPROGRESS; 676 677 rsa_priv_f1_unmap(jrdev, edesc, req); 678 679 init_fail: 680 rsa_io_unmap(jrdev, edesc, req); 681 kfree(edesc); 682 return ret; 683 } 684 685 static int caam_rsa_dec_priv_f2(struct akcipher_request *req) 686 { 687 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 688 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 689 struct device *jrdev = ctx->dev; 690 struct rsa_edesc *edesc; 691 int ret; 692 693 /* Allocate extended descriptor */ 694 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); 695 if (IS_ERR(edesc)) 696 return PTR_ERR(edesc); 697 698 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */ 699 ret = set_rsa_priv_f2_pdb(req, edesc); 700 if (ret) 701 goto init_fail; 702 703 /* Initialize Job Descriptor */ 704 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); 705 706 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req); 707 if (!ret) 708 return -EINPROGRESS; 709 710 rsa_priv_f2_unmap(jrdev, edesc, req); 711 712 init_fail: 713 rsa_io_unmap(jrdev, edesc, req); 714 kfree(edesc); 715 return ret; 716 } 717 718 static int caam_rsa_dec_priv_f3(struct akcipher_request *req) 719 { 720 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 721 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 722 struct device *jrdev = ctx->dev; 723 struct rsa_edesc *edesc; 724 int ret; 725 726 /* Allocate extended descriptor */ 727 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); 728 if (IS_ERR(edesc)) 729 return PTR_ERR(edesc); 730 731 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */ 732 ret = set_rsa_priv_f3_pdb(req, edesc); 733 if (ret) 734 goto init_fail; 735 736 /* Initialize Job Descriptor */ 737 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); 738 739 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req); 740 if (!ret) 741 return -EINPROGRESS; 742 743 rsa_priv_f3_unmap(jrdev, edesc, req); 744 745 init_fail: 746 rsa_io_unmap(jrdev, edesc, req); 747 kfree(edesc); 748 return ret; 749 } 750 751 static int caam_rsa_dec(struct akcipher_request *req) 752 { 753 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 754 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 755 struct caam_rsa_key *key = &ctx->key; 756 int ret; 757 758 if (unlikely(!key->n || !key->d)) 759 return -EINVAL; 760 761 if (req->dst_len < key->n_sz) { 762 req->dst_len = key->n_sz; 763 dev_err(ctx->dev, "Output buffer length less than parameter n\n"); 764 return -EOVERFLOW; 765 } 766 767 if (key->priv_form == FORM3) 768 ret = caam_rsa_dec_priv_f3(req); 769 else if (key->priv_form == FORM2) 770 ret = caam_rsa_dec_priv_f2(req); 771 else 772 ret = caam_rsa_dec_priv_f1(req); 773 774 return ret; 775 } 776 777 static void caam_rsa_free_key(struct caam_rsa_key *key) 778 { 779 kzfree(key->d); 780 kzfree(key->p); 781 kzfree(key->q); 782 kzfree(key->dp); 783 kzfree(key->dq); 784 kzfree(key->qinv); 785 kzfree(key->tmp1); 786 kzfree(key->tmp2); 787 kfree(key->e); 788 kfree(key->n); 789 memset(key, 0, sizeof(*key)); 790 } 791 792 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes) 793 { 794 while (!**ptr && *nbytes) { 795 (*ptr)++; 796 (*nbytes)--; 797 } 798 } 799 800 /** 801 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members. 802 * dP, dQ and qInv could decode to less than corresponding p, q length, as the 803 * BER-encoding requires that the minimum number of bytes be used to encode the 804 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate 805 * length. 806 * 807 * @ptr : pointer to {dP, dQ, qInv} CRT member 808 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member 809 * @dstlen: length in bytes of corresponding p or q prime factor 810 */ 811 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) 812 { 813 u8 *dst; 814 815 caam_rsa_drop_leading_zeros(&ptr, &nbytes); 816 if (!nbytes) 817 return NULL; 818 819 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL); 820 if (!dst) 821 return NULL; 822 823 memcpy(dst + (dstlen - nbytes), ptr, nbytes); 824 825 return dst; 826 } 827 828 /** 829 * caam_read_raw_data - Read a raw byte stream as a positive integer. 830 * The function skips buffer's leading zeros, copies the remained data 831 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns 832 * the address of the new buffer. 833 * 834 * @buf : The data to read 835 * @nbytes: The amount of data to read 836 */ 837 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) 838 { 839 840 caam_rsa_drop_leading_zeros(&buf, nbytes); 841 if (!*nbytes) 842 return NULL; 843 844 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL); 845 } 846 847 static int caam_rsa_check_key_length(unsigned int len) 848 { 849 if (len > 4096) 850 return -EINVAL; 851 return 0; 852 } 853 854 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, 855 unsigned int keylen) 856 { 857 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 858 struct rsa_key raw_key = {NULL}; 859 struct caam_rsa_key *rsa_key = &ctx->key; 860 int ret; 861 862 /* Free the old RSA key if any */ 863 caam_rsa_free_key(rsa_key); 864 865 ret = rsa_parse_pub_key(&raw_key, key, keylen); 866 if (ret) 867 return ret; 868 869 /* Copy key in DMA zone */ 870 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); 871 if (!rsa_key->e) 872 goto err; 873 874 /* 875 * Skip leading zeros and copy the positive integer to a buffer 876 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor 877 * expects a positive integer for the RSA modulus and uses its length as 878 * decryption output length. 879 */ 880 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); 881 if (!rsa_key->n) 882 goto err; 883 884 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { 885 caam_rsa_free_key(rsa_key); 886 return -EINVAL; 887 } 888 889 rsa_key->e_sz = raw_key.e_sz; 890 rsa_key->n_sz = raw_key.n_sz; 891 892 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); 893 894 return 0; 895 err: 896 caam_rsa_free_key(rsa_key); 897 return -ENOMEM; 898 } 899 900 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, 901 struct rsa_key *raw_key) 902 { 903 struct caam_rsa_key *rsa_key = &ctx->key; 904 size_t p_sz = raw_key->p_sz; 905 size_t q_sz = raw_key->q_sz; 906 907 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); 908 if (!rsa_key->p) 909 return; 910 rsa_key->p_sz = p_sz; 911 912 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); 913 if (!rsa_key->q) 914 goto free_p; 915 rsa_key->q_sz = q_sz; 916 917 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL); 918 if (!rsa_key->tmp1) 919 goto free_q; 920 921 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL); 922 if (!rsa_key->tmp2) 923 goto free_tmp1; 924 925 rsa_key->priv_form = FORM2; 926 927 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz); 928 if (!rsa_key->dp) 929 goto free_tmp2; 930 931 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz); 932 if (!rsa_key->dq) 933 goto free_dp; 934 935 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz, 936 q_sz); 937 if (!rsa_key->qinv) 938 goto free_dq; 939 940 rsa_key->priv_form = FORM3; 941 942 return; 943 944 free_dq: 945 kzfree(rsa_key->dq); 946 free_dp: 947 kzfree(rsa_key->dp); 948 free_tmp2: 949 kzfree(rsa_key->tmp2); 950 free_tmp1: 951 kzfree(rsa_key->tmp1); 952 free_q: 953 kzfree(rsa_key->q); 954 free_p: 955 kzfree(rsa_key->p); 956 } 957 958 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, 959 unsigned int keylen) 960 { 961 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 962 struct rsa_key raw_key = {NULL}; 963 struct caam_rsa_key *rsa_key = &ctx->key; 964 int ret; 965 966 /* Free the old RSA key if any */ 967 caam_rsa_free_key(rsa_key); 968 969 ret = rsa_parse_priv_key(&raw_key, key, keylen); 970 if (ret) 971 return ret; 972 973 /* Copy key in DMA zone */ 974 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL); 975 if (!rsa_key->d) 976 goto err; 977 978 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL); 979 if (!rsa_key->e) 980 goto err; 981 982 /* 983 * Skip leading zeros and copy the positive integer to a buffer 984 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor 985 * expects a positive integer for the RSA modulus and uses its length as 986 * decryption output length. 987 */ 988 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); 989 if (!rsa_key->n) 990 goto err; 991 992 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { 993 caam_rsa_free_key(rsa_key); 994 return -EINVAL; 995 } 996 997 rsa_key->d_sz = raw_key.d_sz; 998 rsa_key->e_sz = raw_key.e_sz; 999 rsa_key->n_sz = raw_key.n_sz; 1000 1001 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz); 1002 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz); 1003 1004 caam_rsa_set_priv_key_form(ctx, &raw_key); 1005 1006 return 0; 1007 1008 err: 1009 caam_rsa_free_key(rsa_key); 1010 return -ENOMEM; 1011 } 1012 1013 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm) 1014 { 1015 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 1016 1017 return ctx->key.n_sz; 1018 } 1019 1020 /* Per session pkc's driver context creation function */ 1021 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) 1022 { 1023 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 1024 1025 ctx->dev = caam_jr_alloc(); 1026 1027 if (IS_ERR(ctx->dev)) { 1028 pr_err("Job Ring Device allocation for transform failed\n"); 1029 return PTR_ERR(ctx->dev); 1030 } 1031 1032 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, 1033 CAAM_RSA_MAX_INPUT_SIZE - 1, 1034 DMA_TO_DEVICE); 1035 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { 1036 dev_err(ctx->dev, "unable to map padding\n"); 1037 caam_jr_free(ctx->dev); 1038 return -ENOMEM; 1039 } 1040 1041 return 0; 1042 } 1043 1044 /* Per session pkc's driver context cleanup function */ 1045 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) 1046 { 1047 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); 1048 struct caam_rsa_key *key = &ctx->key; 1049 1050 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - 1051 1, DMA_TO_DEVICE); 1052 caam_rsa_free_key(key); 1053 caam_jr_free(ctx->dev); 1054 } 1055 1056 static struct akcipher_alg caam_rsa = { 1057 .encrypt = caam_rsa_enc, 1058 .decrypt = caam_rsa_dec, 1059 .set_pub_key = caam_rsa_set_pub_key, 1060 .set_priv_key = caam_rsa_set_priv_key, 1061 .max_size = caam_rsa_max_size, 1062 .init = caam_rsa_init_tfm, 1063 .exit = caam_rsa_exit_tfm, 1064 .reqsize = sizeof(struct caam_rsa_req_ctx), 1065 .base = { 1066 .cra_name = "rsa", 1067 .cra_driver_name = "rsa-caam", 1068 .cra_priority = 3000, 1069 .cra_module = THIS_MODULE, 1070 .cra_ctxsize = sizeof(struct caam_rsa_ctx), 1071 }, 1072 }; 1073 1074 /* Public Key Cryptography module initialization handler */ 1075 int caam_pkc_init(struct device *ctrldev) 1076 { 1077 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 1078 u32 pk_inst; 1079 int err; 1080 1081 /* Determine public key hardware accelerator presence. */ 1082 if (priv->era < 10) 1083 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & 1084 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; 1085 else 1086 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; 1087 1088 /* Do not register algorithms if PKHA is not present. */ 1089 if (!pk_inst) 1090 return 0; 1091 1092 /* allocate zero buffer, used for padding input */ 1093 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA | 1094 GFP_KERNEL); 1095 if (!zero_buffer) 1096 return -ENOMEM; 1097 1098 err = crypto_register_akcipher(&caam_rsa); 1099 if (err) { 1100 kfree(zero_buffer); 1101 dev_warn(ctrldev, "%s alg registration failed\n", 1102 caam_rsa.base.cra_driver_name); 1103 } else { 1104 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); 1105 } 1106 1107 return err; 1108 } 1109 1110 void caam_pkc_exit(void) 1111 { 1112 kfree(zero_buffer); 1113 crypto_unregister_akcipher(&caam_rsa); 1114 } 1115