1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <crypto/akcipher.h> 4 #include <crypto/dh.h> 5 #include <crypto/internal/akcipher.h> 6 #include <crypto/internal/kpp.h> 7 #include <crypto/internal/rsa.h> 8 #include <crypto/kpp.h> 9 #include <crypto/scatterwalk.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/fips.h> 12 #include <linux/module.h> 13 #include <linux/time.h> 14 #include "hpre.h" 15 16 struct hpre_ctx; 17 18 #define HPRE_CRYPTO_ALG_PRI 1000 19 #define HPRE_ALIGN_SZ 64 20 #define HPRE_BITS_2_BYTES_SHIFT 3 21 #define HPRE_RSA_512BITS_KSZ 64 22 #define HPRE_RSA_1536BITS_KSZ 192 23 #define HPRE_CRT_PRMS 5 24 #define HPRE_CRT_Q 2 25 #define HPRE_CRT_P 3 26 #define HPRE_CRT_INV 4 27 #define HPRE_DH_G_FLAG 0x02 28 #define HPRE_TRY_SEND_TIMES 100 29 #define HPRE_INVLD_REQ_ID (-1) 30 #define HPRE_DEV(ctx) (&((ctx)->qp->qm->pdev->dev)) 31 32 #define HPRE_SQE_ALG_BITS 5 33 #define HPRE_SQE_DONE_SHIFT 30 34 #define HPRE_DH_MAX_P_SZ 512 35 36 #define HPRE_DFX_SEC_TO_US 1000000 37 #define HPRE_DFX_US_TO_NS 1000 38 39 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); 40 41 struct hpre_rsa_ctx { 42 /* low address: e--->n */ 43 char *pubkey; 44 dma_addr_t dma_pubkey; 45 46 /* low address: d--->n */ 47 char *prikey; 48 dma_addr_t dma_prikey; 49 50 /* low address: dq->dp->q->p->qinv */ 51 char *crt_prikey; 52 dma_addr_t dma_crt_prikey; 53 54 struct crypto_akcipher *soft_tfm; 55 }; 56 57 struct hpre_dh_ctx { 58 /* 59 * If base is g we compute the public key 60 * ya = g^xa mod p; [RFC2631 sec 2.1.1] 61 * else if base if the counterpart public key we 62 * compute the shared secret 63 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] 64 */ 65 char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */ 66 dma_addr_t dma_xa_p; 67 68 char *g; /* m */ 69 dma_addr_t dma_g; 70 }; 71 72 struct hpre_ctx { 73 struct hisi_qp *qp; 74 struct hpre_asym_request **req_list; 75 struct hpre *hpre; 76 spinlock_t req_lock; 77 unsigned int key_sz; 78 bool crt_g2_mode; 79 struct idr req_idr; 80 union { 81 struct hpre_rsa_ctx rsa; 82 struct hpre_dh_ctx dh; 83 }; 84 }; 85 86 struct hpre_asym_request { 87 char *src; 88 char *dst; 89 struct hpre_sqe req; 90 struct hpre_ctx *ctx; 91 union { 92 struct akcipher_request *rsa; 93 struct kpp_request *dh; 94 } areq; 95 int err; 96 int req_id; 97 hpre_cb cb; 98 struct timespec64 req_time; 99 }; 100 101 static int hpre_alloc_req_id(struct hpre_ctx *ctx) 102 { 103 unsigned long flags; 104 int id; 105 106 spin_lock_irqsave(&ctx->req_lock, flags); 107 id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); 108 spin_unlock_irqrestore(&ctx->req_lock, flags); 109 110 return id; 111 } 112 113 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) 114 { 115 unsigned long flags; 116 117 spin_lock_irqsave(&ctx->req_lock, flags); 118 idr_remove(&ctx->req_idr, req_id); 119 spin_unlock_irqrestore(&ctx->req_lock, flags); 120 } 121 122 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) 123 { 124 struct hpre_ctx *ctx; 125 struct hpre_dfx *dfx; 126 int id; 127 128 ctx = hpre_req->ctx; 129 id = hpre_alloc_req_id(ctx); 130 if (unlikely(id < 0)) 131 return -EINVAL; 132 133 ctx->req_list[id] = hpre_req; 134 hpre_req->req_id = id; 135 136 dfx = ctx->hpre->debug.dfx; 137 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value)) 138 ktime_get_ts64(&hpre_req->req_time); 139 140 return id; 141 } 142 143 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) 144 { 145 struct hpre_ctx *ctx = hpre_req->ctx; 146 int id = hpre_req->req_id; 147 148 if (hpre_req->req_id >= 0) { 149 hpre_req->req_id = HPRE_INVLD_REQ_ID; 150 ctx->req_list[id] = NULL; 151 hpre_free_req_id(ctx, id); 152 } 153 } 154 155 static struct hisi_qp *hpre_get_qp_and_start(void) 156 { 157 struct hisi_qp *qp; 158 int ret; 159 160 qp = hpre_create_qp(); 161 if (!qp) { 162 pr_err("Can not create hpre qp!\n"); 163 return ERR_PTR(-ENODEV); 164 } 165 166 ret = hisi_qm_start_qp(qp, 0); 167 if (ret < 0) { 168 hisi_qm_free_qps(&qp, 1); 169 pci_err(qp->qm->pdev, "Can not start qp!\n"); 170 return ERR_PTR(-EINVAL); 171 } 172 173 return qp; 174 } 175 176 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, 177 struct scatterlist *data, unsigned int len, 178 int is_src, dma_addr_t *tmp) 179 { 180 struct hpre_ctx *ctx = hpre_req->ctx; 181 struct device *dev = HPRE_DEV(ctx); 182 enum dma_data_direction dma_dir; 183 184 if (is_src) { 185 hpre_req->src = NULL; 186 dma_dir = DMA_TO_DEVICE; 187 } else { 188 hpre_req->dst = NULL; 189 dma_dir = DMA_FROM_DEVICE; 190 } 191 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir); 192 if (unlikely(dma_mapping_error(dev, *tmp))) { 193 dev_err(dev, "dma map data err!\n"); 194 return -ENOMEM; 195 } 196 197 return 0; 198 } 199 200 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, 201 struct scatterlist *data, unsigned int len, 202 int is_src, dma_addr_t *tmp) 203 { 204 struct hpre_ctx *ctx = hpre_req->ctx; 205 struct device *dev = HPRE_DEV(ctx); 206 void *ptr; 207 int shift; 208 209 shift = ctx->key_sz - len; 210 if (unlikely(shift < 0)) 211 return -EINVAL; 212 213 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL); 214 if (unlikely(!ptr)) 215 return -ENOMEM; 216 217 if (is_src) { 218 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0); 219 hpre_req->src = ptr; 220 } else { 221 hpre_req->dst = ptr; 222 } 223 224 return 0; 225 } 226 227 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, 228 struct scatterlist *data, unsigned int len, 229 int is_src, int is_dh) 230 { 231 struct hpre_sqe *msg = &hpre_req->req; 232 struct hpre_ctx *ctx = hpre_req->ctx; 233 dma_addr_t tmp = 0; 234 int ret; 235 236 /* when the data is dh's source, we should format it */ 237 if ((sg_is_last(data) && len == ctx->key_sz) && 238 ((is_dh && !is_src) || !is_dh)) 239 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp); 240 else 241 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp); 242 243 if (unlikely(ret)) 244 return ret; 245 246 if (is_src) 247 msg->in = cpu_to_le64(tmp); 248 else 249 msg->out = cpu_to_le64(tmp); 250 251 return 0; 252 } 253 254 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, 255 struct hpre_asym_request *req, 256 struct scatterlist *dst, 257 struct scatterlist *src) 258 { 259 struct device *dev = HPRE_DEV(ctx); 260 struct hpre_sqe *sqe = &req->req; 261 dma_addr_t tmp; 262 263 tmp = le64_to_cpu(sqe->in); 264 if (unlikely(!tmp)) 265 return; 266 267 if (src) { 268 if (req->src) 269 dma_free_coherent(dev, ctx->key_sz, req->src, tmp); 270 else 271 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE); 272 } 273 274 tmp = le64_to_cpu(sqe->out); 275 if (unlikely(!tmp)) 276 return; 277 278 if (req->dst) { 279 if (dst) 280 scatterwalk_map_and_copy(req->dst, dst, 0, 281 ctx->key_sz, 1); 282 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp); 283 } else { 284 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE); 285 } 286 } 287 288 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, 289 void **kreq) 290 { 291 struct hpre_asym_request *req; 292 int err, id, done; 293 294 #define HPRE_NO_HW_ERR 0 295 #define HPRE_HW_TASK_DONE 3 296 #define HREE_HW_ERR_MASK 0x7ff 297 #define HREE_SQE_DONE_MASK 0x3 298 id = (int)le16_to_cpu(sqe->tag); 299 req = ctx->req_list[id]; 300 hpre_rm_req_from_ctx(req); 301 *kreq = req; 302 303 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & 304 HREE_HW_ERR_MASK; 305 306 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & 307 HREE_SQE_DONE_MASK; 308 309 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) 310 return 0; 311 312 return -EINVAL; 313 } 314 315 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) 316 { 317 struct hpre *hpre; 318 319 if (!ctx || !qp || qlen < 0) 320 return -EINVAL; 321 322 spin_lock_init(&ctx->req_lock); 323 ctx->qp = qp; 324 325 hpre = container_of(ctx->qp->qm, struct hpre, qm); 326 ctx->hpre = hpre; 327 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); 328 if (!ctx->req_list) 329 return -ENOMEM; 330 ctx->key_sz = 0; 331 ctx->crt_g2_mode = false; 332 idr_init(&ctx->req_idr); 333 334 return 0; 335 } 336 337 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) 338 { 339 if (is_clear_all) { 340 idr_destroy(&ctx->req_idr); 341 kfree(ctx->req_list); 342 hisi_qm_free_qps(&ctx->qp, 1); 343 } 344 345 ctx->crt_g2_mode = false; 346 ctx->key_sz = 0; 347 } 348 349 static bool hpre_is_bd_timeout(struct hpre_asym_request *req, 350 u64 overtime_thrhld) 351 { 352 struct timespec64 reply_time; 353 u64 time_use_us; 354 355 ktime_get_ts64(&reply_time); 356 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) * 357 HPRE_DFX_SEC_TO_US + 358 (reply_time.tv_nsec - req->req_time.tv_nsec) / 359 HPRE_DFX_US_TO_NS; 360 361 if (time_use_us <= overtime_thrhld) 362 return false; 363 364 return true; 365 } 366 367 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) 368 { 369 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 370 struct hpre_asym_request *req; 371 struct kpp_request *areq; 372 u64 overtime_thrhld; 373 int ret; 374 375 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); 376 areq = req->areq.dh; 377 areq->dst_len = ctx->key_sz; 378 379 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); 380 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) 381 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); 382 383 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); 384 kpp_request_complete(areq, ret); 385 atomic64_inc(&dfx[HPRE_RECV_CNT].value); 386 } 387 388 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) 389 { 390 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 391 struct hpre_asym_request *req; 392 struct akcipher_request *areq; 393 u64 overtime_thrhld; 394 int ret; 395 396 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); 397 398 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); 399 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) 400 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); 401 402 areq = req->areq.rsa; 403 areq->dst_len = ctx->key_sz; 404 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); 405 akcipher_request_complete(areq, ret); 406 atomic64_inc(&dfx[HPRE_RECV_CNT].value); 407 } 408 409 static void hpre_alg_cb(struct hisi_qp *qp, void *resp) 410 { 411 struct hpre_ctx *ctx = qp->qp_ctx; 412 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 413 struct hpre_sqe *sqe = resp; 414 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; 415 416 417 if (unlikely(!req)) { 418 atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); 419 return; 420 } 421 422 req->cb(ctx, resp); 423 } 424 425 static int hpre_ctx_init(struct hpre_ctx *ctx) 426 { 427 struct hisi_qp *qp; 428 429 qp = hpre_get_qp_and_start(); 430 if (IS_ERR(qp)) 431 return PTR_ERR(qp); 432 433 qp->qp_ctx = ctx; 434 qp->req_cb = hpre_alg_cb; 435 436 return hpre_ctx_set(ctx, qp, QM_Q_DEPTH); 437 } 438 439 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) 440 { 441 struct hpre_asym_request *h_req; 442 struct hpre_sqe *msg; 443 int req_id; 444 void *tmp; 445 446 if (is_rsa) { 447 struct akcipher_request *akreq = req; 448 449 if (akreq->dst_len < ctx->key_sz) { 450 akreq->dst_len = ctx->key_sz; 451 return -EOVERFLOW; 452 } 453 454 tmp = akcipher_request_ctx(akreq); 455 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); 456 h_req->cb = hpre_rsa_cb; 457 h_req->areq.rsa = akreq; 458 msg = &h_req->req; 459 memset(msg, 0, sizeof(*msg)); 460 } else { 461 struct kpp_request *kreq = req; 462 463 if (kreq->dst_len < ctx->key_sz) { 464 kreq->dst_len = ctx->key_sz; 465 return -EOVERFLOW; 466 } 467 468 tmp = kpp_request_ctx(kreq); 469 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); 470 h_req->cb = hpre_dh_cb; 471 h_req->areq.dh = kreq; 472 msg = &h_req->req; 473 memset(msg, 0, sizeof(*msg)); 474 msg->key = cpu_to_le64(ctx->dh.dma_xa_p); 475 } 476 477 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT); 478 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; 479 h_req->ctx = ctx; 480 481 req_id = hpre_add_req_to_ctx(h_req); 482 if (req_id < 0) 483 return -EBUSY; 484 485 msg->tag = cpu_to_le16((u16)req_id); 486 487 return 0; 488 } 489 490 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) 491 { 492 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 493 int ctr = 0; 494 int ret; 495 496 do { 497 atomic64_inc(&dfx[HPRE_SEND_CNT].value); 498 ret = hisi_qp_send(ctx->qp, msg); 499 if (ret != -EBUSY) 500 break; 501 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); 502 } while (ctr++ < HPRE_TRY_SEND_TIMES); 503 504 if (likely(!ret)) 505 return ret; 506 507 if (ret != -EBUSY) 508 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value); 509 510 return ret; 511 } 512 513 #ifdef CONFIG_CRYPTO_DH 514 static int hpre_dh_compute_value(struct kpp_request *req) 515 { 516 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 517 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 518 void *tmp = kpp_request_ctx(req); 519 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); 520 struct hpre_sqe *msg = &hpre_req->req; 521 int ret; 522 523 ret = hpre_msg_request_set(ctx, req, false); 524 if (unlikely(ret)) 525 return ret; 526 527 if (req->src) { 528 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1); 529 if (unlikely(ret)) 530 goto clear_all; 531 } else { 532 msg->in = cpu_to_le64(ctx->dh.dma_g); 533 } 534 535 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1); 536 if (unlikely(ret)) 537 goto clear_all; 538 539 if (ctx->crt_g2_mode && !req->src) 540 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); 541 else 542 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); 543 544 /* success */ 545 ret = hpre_send(ctx, msg); 546 if (likely(!ret)) 547 return -EINPROGRESS; 548 549 clear_all: 550 hpre_rm_req_from_ctx(hpre_req); 551 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 552 553 return ret; 554 } 555 556 static int hpre_is_dh_params_length_valid(unsigned int key_sz) 557 { 558 #define _HPRE_DH_GRP1 768 559 #define _HPRE_DH_GRP2 1024 560 #define _HPRE_DH_GRP5 1536 561 #define _HPRE_DH_GRP14 2048 562 #define _HPRE_DH_GRP15 3072 563 #define _HPRE_DH_GRP16 4096 564 switch (key_sz) { 565 case _HPRE_DH_GRP1: 566 case _HPRE_DH_GRP2: 567 case _HPRE_DH_GRP5: 568 case _HPRE_DH_GRP14: 569 case _HPRE_DH_GRP15: 570 case _HPRE_DH_GRP16: 571 return 0; 572 } 573 574 return -EINVAL; 575 } 576 577 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) 578 { 579 struct device *dev = HPRE_DEV(ctx); 580 unsigned int sz; 581 582 if (params->p_size > HPRE_DH_MAX_P_SZ) 583 return -EINVAL; 584 585 if (hpre_is_dh_params_length_valid(params->p_size << 586 HPRE_BITS_2_BYTES_SHIFT)) 587 return -EINVAL; 588 589 sz = ctx->key_sz = params->p_size; 590 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, 591 &ctx->dh.dma_xa_p, GFP_KERNEL); 592 if (!ctx->dh.xa_p) 593 return -ENOMEM; 594 595 memcpy(ctx->dh.xa_p + sz, params->p, sz); 596 597 /* If g equals 2 don't copy it */ 598 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) { 599 ctx->crt_g2_mode = true; 600 return 0; 601 } 602 603 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL); 604 if (!ctx->dh.g) { 605 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, 606 ctx->dh.dma_xa_p); 607 ctx->dh.xa_p = NULL; 608 return -ENOMEM; 609 } 610 611 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size); 612 613 return 0; 614 } 615 616 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) 617 { 618 struct device *dev = HPRE_DEV(ctx); 619 unsigned int sz = ctx->key_sz; 620 621 if (is_clear_all) 622 hisi_qm_stop_qp(ctx->qp); 623 624 if (ctx->dh.g) { 625 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); 626 ctx->dh.g = NULL; 627 } 628 629 if (ctx->dh.xa_p) { 630 memzero_explicit(ctx->dh.xa_p, sz); 631 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, 632 ctx->dh.dma_xa_p); 633 ctx->dh.xa_p = NULL; 634 } 635 636 hpre_ctx_clear(ctx, is_clear_all); 637 } 638 639 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, 640 unsigned int len) 641 { 642 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 643 struct dh params; 644 int ret; 645 646 if (crypto_dh_decode_key(buf, len, ¶ms) < 0) 647 return -EINVAL; 648 649 /* Free old secret if any */ 650 hpre_dh_clear_ctx(ctx, false); 651 652 ret = hpre_dh_set_params(ctx, ¶ms); 653 if (ret < 0) 654 goto err_clear_ctx; 655 656 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, 657 params.key_size); 658 659 return 0; 660 661 err_clear_ctx: 662 hpre_dh_clear_ctx(ctx, false); 663 return ret; 664 } 665 666 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) 667 { 668 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 669 670 return ctx->key_sz; 671 } 672 673 static int hpre_dh_init_tfm(struct crypto_kpp *tfm) 674 { 675 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 676 677 return hpre_ctx_init(ctx); 678 } 679 680 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) 681 { 682 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 683 684 hpre_dh_clear_ctx(ctx, true); 685 } 686 #endif 687 688 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) 689 { 690 while (!**ptr && *len) { 691 (*ptr)++; 692 (*len)--; 693 } 694 } 695 696 static bool hpre_rsa_key_size_is_support(unsigned int len) 697 { 698 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT; 699 700 #define _RSA_1024BITS_KEY_WDTH 1024 701 #define _RSA_2048BITS_KEY_WDTH 2048 702 #define _RSA_3072BITS_KEY_WDTH 3072 703 #define _RSA_4096BITS_KEY_WDTH 4096 704 705 switch (bits) { 706 case _RSA_1024BITS_KEY_WDTH: 707 case _RSA_2048BITS_KEY_WDTH: 708 case _RSA_3072BITS_KEY_WDTH: 709 case _RSA_4096BITS_KEY_WDTH: 710 return true; 711 default: 712 return false; 713 } 714 } 715 716 static int hpre_rsa_enc(struct akcipher_request *req) 717 { 718 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 719 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 720 void *tmp = akcipher_request_ctx(req); 721 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); 722 struct hpre_sqe *msg = &hpre_req->req; 723 int ret; 724 725 /* For 512 and 1536 bits key size, use soft tfm instead */ 726 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || 727 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { 728 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); 729 ret = crypto_akcipher_encrypt(req); 730 akcipher_request_set_tfm(req, tfm); 731 return ret; 732 } 733 734 if (unlikely(!ctx->rsa.pubkey)) 735 return -EINVAL; 736 737 ret = hpre_msg_request_set(ctx, req, true); 738 if (unlikely(ret)) 739 return ret; 740 741 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT); 742 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey); 743 744 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); 745 if (unlikely(ret)) 746 goto clear_all; 747 748 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); 749 if (unlikely(ret)) 750 goto clear_all; 751 752 /* success */ 753 ret = hpre_send(ctx, msg); 754 if (likely(!ret)) 755 return -EINPROGRESS; 756 757 clear_all: 758 hpre_rm_req_from_ctx(hpre_req); 759 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 760 761 return ret; 762 } 763 764 static int hpre_rsa_dec(struct akcipher_request *req) 765 { 766 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 767 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 768 void *tmp = akcipher_request_ctx(req); 769 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); 770 struct hpre_sqe *msg = &hpre_req->req; 771 int ret; 772 773 /* For 512 and 1536 bits key size, use soft tfm instead */ 774 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || 775 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { 776 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); 777 ret = crypto_akcipher_decrypt(req); 778 akcipher_request_set_tfm(req, tfm); 779 return ret; 780 } 781 782 if (unlikely(!ctx->rsa.prikey)) 783 return -EINVAL; 784 785 ret = hpre_msg_request_set(ctx, req, true); 786 if (unlikely(ret)) 787 return ret; 788 789 if (ctx->crt_g2_mode) { 790 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey); 791 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | 792 HPRE_ALG_NC_CRT); 793 } else { 794 msg->key = cpu_to_le64(ctx->rsa.dma_prikey); 795 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | 796 HPRE_ALG_NC_NCRT); 797 } 798 799 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); 800 if (unlikely(ret)) 801 goto clear_all; 802 803 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); 804 if (unlikely(ret)) 805 goto clear_all; 806 807 /* success */ 808 ret = hpre_send(ctx, msg); 809 if (likely(!ret)) 810 return -EINPROGRESS; 811 812 clear_all: 813 hpre_rm_req_from_ctx(hpre_req); 814 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 815 816 return ret; 817 } 818 819 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, 820 size_t vlen, bool private) 821 { 822 const char *ptr = value; 823 824 hpre_rsa_drop_leading_zeros(&ptr, &vlen); 825 826 ctx->key_sz = vlen; 827 828 /* if invalid key size provided, we use software tfm */ 829 if (!hpre_rsa_key_size_is_support(ctx->key_sz)) 830 return 0; 831 832 ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1, 833 &ctx->rsa.dma_pubkey, 834 GFP_KERNEL); 835 if (!ctx->rsa.pubkey) 836 return -ENOMEM; 837 838 if (private) { 839 ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1, 840 &ctx->rsa.dma_prikey, 841 GFP_KERNEL); 842 if (!ctx->rsa.prikey) { 843 dma_free_coherent(HPRE_DEV(ctx), vlen << 1, 844 ctx->rsa.pubkey, 845 ctx->rsa.dma_pubkey); 846 ctx->rsa.pubkey = NULL; 847 return -ENOMEM; 848 } 849 memcpy(ctx->rsa.prikey + vlen, ptr, vlen); 850 } 851 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen); 852 853 /* Using hardware HPRE to do RSA */ 854 return 1; 855 } 856 857 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value, 858 size_t vlen) 859 { 860 const char *ptr = value; 861 862 hpre_rsa_drop_leading_zeros(&ptr, &vlen); 863 864 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) 865 return -EINVAL; 866 867 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen); 868 869 return 0; 870 } 871 872 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value, 873 size_t vlen) 874 { 875 const char *ptr = value; 876 877 hpre_rsa_drop_leading_zeros(&ptr, &vlen); 878 879 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) 880 return -EINVAL; 881 882 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen); 883 884 return 0; 885 } 886 887 static int hpre_crt_para_get(char *para, size_t para_sz, 888 const char *raw, size_t raw_sz) 889 { 890 const char *ptr = raw; 891 size_t len = raw_sz; 892 893 hpre_rsa_drop_leading_zeros(&ptr, &len); 894 if (!len || len > para_sz) 895 return -EINVAL; 896 897 memcpy(para + para_sz - len, ptr, len); 898 899 return 0; 900 } 901 902 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key) 903 { 904 unsigned int hlf_ksz = ctx->key_sz >> 1; 905 struct device *dev = HPRE_DEV(ctx); 906 u64 offset; 907 int ret; 908 909 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, 910 &ctx->rsa.dma_crt_prikey, 911 GFP_KERNEL); 912 if (!ctx->rsa.crt_prikey) 913 return -ENOMEM; 914 915 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz, 916 rsa_key->dq, rsa_key->dq_sz); 917 if (ret) 918 goto free_key; 919 920 offset = hlf_ksz; 921 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 922 rsa_key->dp, rsa_key->dp_sz); 923 if (ret) 924 goto free_key; 925 926 offset = hlf_ksz * HPRE_CRT_Q; 927 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 928 rsa_key->q, rsa_key->q_sz); 929 if (ret) 930 goto free_key; 931 932 offset = hlf_ksz * HPRE_CRT_P; 933 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 934 rsa_key->p, rsa_key->p_sz); 935 if (ret) 936 goto free_key; 937 938 offset = hlf_ksz * HPRE_CRT_INV; 939 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 940 rsa_key->qinv, rsa_key->qinv_sz); 941 if (ret) 942 goto free_key; 943 944 ctx->crt_g2_mode = true; 945 946 return 0; 947 948 free_key: 949 offset = hlf_ksz * HPRE_CRT_PRMS; 950 memzero_explicit(ctx->rsa.crt_prikey, offset); 951 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, 952 ctx->rsa.dma_crt_prikey); 953 ctx->rsa.crt_prikey = NULL; 954 ctx->crt_g2_mode = false; 955 956 return ret; 957 } 958 959 /* If it is clear all, all the resources of the QP will be cleaned. */ 960 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) 961 { 962 unsigned int half_key_sz = ctx->key_sz >> 1; 963 struct device *dev = HPRE_DEV(ctx); 964 965 if (is_clear_all) 966 hisi_qm_stop_qp(ctx->qp); 967 968 if (ctx->rsa.pubkey) { 969 dma_free_coherent(dev, ctx->key_sz << 1, 970 ctx->rsa.pubkey, ctx->rsa.dma_pubkey); 971 ctx->rsa.pubkey = NULL; 972 } 973 974 if (ctx->rsa.crt_prikey) { 975 memzero_explicit(ctx->rsa.crt_prikey, 976 half_key_sz * HPRE_CRT_PRMS); 977 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS, 978 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); 979 ctx->rsa.crt_prikey = NULL; 980 } 981 982 if (ctx->rsa.prikey) { 983 memzero_explicit(ctx->rsa.prikey, ctx->key_sz); 984 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, 985 ctx->rsa.dma_prikey); 986 ctx->rsa.prikey = NULL; 987 } 988 989 hpre_ctx_clear(ctx, is_clear_all); 990 } 991 992 /* 993 * we should judge if it is CRT or not, 994 * CRT: return true, N-CRT: return false . 995 */ 996 static bool hpre_is_crt_key(struct rsa_key *key) 997 { 998 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz + 999 key->qinv_sz; 1000 1001 #define LEN_OF_NCRT_PARA 5 1002 1003 /* N-CRT less than 5 parameters */ 1004 return len > LEN_OF_NCRT_PARA; 1005 } 1006 1007 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, 1008 unsigned int keylen, bool private) 1009 { 1010 struct rsa_key rsa_key; 1011 int ret; 1012 1013 hpre_rsa_clear_ctx(ctx, false); 1014 1015 if (private) 1016 ret = rsa_parse_priv_key(&rsa_key, key, keylen); 1017 else 1018 ret = rsa_parse_pub_key(&rsa_key, key, keylen); 1019 if (ret < 0) 1020 return ret; 1021 1022 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private); 1023 if (ret <= 0) 1024 return ret; 1025 1026 if (private) { 1027 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz); 1028 if (ret < 0) 1029 goto free; 1030 1031 if (hpre_is_crt_key(&rsa_key)) { 1032 ret = hpre_rsa_setkey_crt(ctx, &rsa_key); 1033 if (ret < 0) 1034 goto free; 1035 } 1036 } 1037 1038 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz); 1039 if (ret < 0) 1040 goto free; 1041 1042 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) { 1043 ret = -EINVAL; 1044 goto free; 1045 } 1046 1047 return 0; 1048 1049 free: 1050 hpre_rsa_clear_ctx(ctx, false); 1051 return ret; 1052 } 1053 1054 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, 1055 unsigned int keylen) 1056 { 1057 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1058 int ret; 1059 1060 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen); 1061 if (ret) 1062 return ret; 1063 1064 return hpre_rsa_setkey(ctx, key, keylen, false); 1065 } 1066 1067 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, 1068 unsigned int keylen) 1069 { 1070 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1071 int ret; 1072 1073 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen); 1074 if (ret) 1075 return ret; 1076 1077 return hpre_rsa_setkey(ctx, key, keylen, true); 1078 } 1079 1080 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) 1081 { 1082 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1083 1084 /* For 512 and 1536 bits key size, use soft tfm instead */ 1085 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || 1086 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) 1087 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); 1088 1089 return ctx->key_sz; 1090 } 1091 1092 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) 1093 { 1094 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1095 int ret; 1096 1097 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); 1098 if (IS_ERR(ctx->rsa.soft_tfm)) { 1099 pr_err("Can not alloc_akcipher!\n"); 1100 return PTR_ERR(ctx->rsa.soft_tfm); 1101 } 1102 1103 ret = hpre_ctx_init(ctx); 1104 if (ret) 1105 crypto_free_akcipher(ctx->rsa.soft_tfm); 1106 1107 return ret; 1108 } 1109 1110 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) 1111 { 1112 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1113 1114 hpre_rsa_clear_ctx(ctx, true); 1115 crypto_free_akcipher(ctx->rsa.soft_tfm); 1116 } 1117 1118 static struct akcipher_alg rsa = { 1119 .sign = hpre_rsa_dec, 1120 .verify = hpre_rsa_enc, 1121 .encrypt = hpre_rsa_enc, 1122 .decrypt = hpre_rsa_dec, 1123 .set_pub_key = hpre_rsa_setpubkey, 1124 .set_priv_key = hpre_rsa_setprivkey, 1125 .max_size = hpre_rsa_max_size, 1126 .init = hpre_rsa_init_tfm, 1127 .exit = hpre_rsa_exit_tfm, 1128 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 1129 .base = { 1130 .cra_ctxsize = sizeof(struct hpre_ctx), 1131 .cra_priority = HPRE_CRYPTO_ALG_PRI, 1132 .cra_name = "rsa", 1133 .cra_driver_name = "hpre-rsa", 1134 .cra_module = THIS_MODULE, 1135 }, 1136 }; 1137 1138 #ifdef CONFIG_CRYPTO_DH 1139 static struct kpp_alg dh = { 1140 .set_secret = hpre_dh_set_secret, 1141 .generate_public_key = hpre_dh_compute_value, 1142 .compute_shared_secret = hpre_dh_compute_value, 1143 .max_size = hpre_dh_max_size, 1144 .init = hpre_dh_init_tfm, 1145 .exit = hpre_dh_exit_tfm, 1146 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, 1147 .base = { 1148 .cra_ctxsize = sizeof(struct hpre_ctx), 1149 .cra_priority = HPRE_CRYPTO_ALG_PRI, 1150 .cra_name = "dh", 1151 .cra_driver_name = "hpre-dh", 1152 .cra_module = THIS_MODULE, 1153 }, 1154 }; 1155 #endif 1156 1157 int hpre_algs_register(void) 1158 { 1159 int ret; 1160 1161 rsa.base.cra_flags = 0; 1162 ret = crypto_register_akcipher(&rsa); 1163 if (ret) 1164 return ret; 1165 #ifdef CONFIG_CRYPTO_DH 1166 ret = crypto_register_kpp(&dh); 1167 if (ret) 1168 crypto_unregister_akcipher(&rsa); 1169 #endif 1170 1171 return ret; 1172 } 1173 1174 void hpre_algs_unregister(void) 1175 { 1176 crypto_unregister_akcipher(&rsa); 1177 #ifdef CONFIG_CRYPTO_DH 1178 crypto_unregister_kpp(&dh); 1179 #endif 1180 } 1181