1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <crypto/akcipher.h> 4 #include <crypto/curve25519.h> 5 #include <crypto/dh.h> 6 #include <crypto/ecc_curve.h> 7 #include <crypto/ecdh.h> 8 #include <crypto/rng.h> 9 #include <crypto/internal/akcipher.h> 10 #include <crypto/internal/kpp.h> 11 #include <crypto/internal/rsa.h> 12 #include <crypto/kpp.h> 13 #include <crypto/scatterwalk.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/fips.h> 16 #include <linux/module.h> 17 #include <linux/time.h> 18 #include "hpre.h" 19 20 struct hpre_ctx; 21 22 #define HPRE_CRYPTO_ALG_PRI 1000 23 #define HPRE_ALIGN_SZ 64 24 #define HPRE_BITS_2_BYTES_SHIFT 3 25 #define HPRE_RSA_512BITS_KSZ 64 26 #define HPRE_RSA_1536BITS_KSZ 192 27 #define HPRE_CRT_PRMS 5 28 #define HPRE_CRT_Q 2 29 #define HPRE_CRT_P 3 30 #define HPRE_CRT_INV 4 31 #define HPRE_DH_G_FLAG 0x02 32 #define HPRE_TRY_SEND_TIMES 100 33 #define HPRE_INVLD_REQ_ID (-1) 34 35 #define HPRE_SQE_ALG_BITS 5 36 #define HPRE_SQE_DONE_SHIFT 30 37 #define HPRE_DH_MAX_P_SZ 512 38 39 #define HPRE_DFX_SEC_TO_US 1000000 40 #define HPRE_DFX_US_TO_NS 1000 41 42 /* due to nist p521 */ 43 #define HPRE_ECC_MAX_KSZ 66 44 45 /* size in bytes of the n prime */ 46 #define HPRE_ECC_NIST_P192_N_SIZE 24 47 #define HPRE_ECC_NIST_P256_N_SIZE 32 48 #define HPRE_ECC_NIST_P384_N_SIZE 48 49 50 /* size in bytes */ 51 #define HPRE_ECC_HW256_KSZ_B 32 52 #define HPRE_ECC_HW384_KSZ_B 48 53 54 /* capability register mask of driver */ 55 #define HPRE_DRV_RSA_MASK_CAP BIT(0) 56 #define HPRE_DRV_DH_MASK_CAP BIT(1) 57 #define HPRE_DRV_ECDH_MASK_CAP BIT(2) 58 #define HPRE_DRV_X25519_MASK_CAP BIT(5) 59 60 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); 61 62 struct hpre_rsa_ctx { 63 /* low address: e--->n */ 64 char *pubkey; 65 dma_addr_t dma_pubkey; 66 67 /* low address: d--->n */ 68 char *prikey; 69 dma_addr_t dma_prikey; 70 71 /* low address: dq->dp->q->p->qinv */ 72 char *crt_prikey; 73 dma_addr_t dma_crt_prikey; 74 75 struct crypto_akcipher *soft_tfm; 76 }; 77 78 struct hpre_dh_ctx { 79 /* 80 * If base is g we compute the public key 81 * ya = g^xa mod p; [RFC2631 sec 2.1.1] 82 * else if base if the counterpart public key we 83 * compute the shared secret 84 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] 85 * low address: d--->n, please refer to Hisilicon HPRE UM 86 */ 87 char *xa_p; 88 dma_addr_t dma_xa_p; 89 90 char *g; /* m */ 91 dma_addr_t dma_g; 92 }; 93 94 struct hpre_ecdh_ctx { 95 /* low address: p->a->k->b */ 96 unsigned char *p; 97 dma_addr_t dma_p; 98 99 /* low address: x->y */ 100 unsigned char *g; 101 dma_addr_t dma_g; 102 }; 103 104 struct hpre_curve25519_ctx { 105 /* low address: p->a->k */ 106 unsigned char *p; 107 dma_addr_t dma_p; 108 109 /* gx coordinate */ 110 unsigned char *g; 111 dma_addr_t dma_g; 112 }; 113 114 struct hpre_ctx { 115 struct hisi_qp *qp; 116 struct device *dev; 117 struct hpre_asym_request **req_list; 118 struct hpre *hpre; 119 spinlock_t req_lock; 120 unsigned int key_sz; 121 bool crt_g2_mode; 122 struct idr req_idr; 123 union { 124 struct hpre_rsa_ctx rsa; 125 struct hpre_dh_ctx dh; 126 struct hpre_ecdh_ctx ecdh; 127 struct hpre_curve25519_ctx curve25519; 128 }; 129 /* for ecc algorithms */ 130 unsigned int curve_id; 131 }; 132 133 struct hpre_asym_request { 134 char *src; 135 char *dst; 136 struct hpre_sqe req; 137 struct hpre_ctx *ctx; 138 union { 139 struct akcipher_request *rsa; 140 struct kpp_request *dh; 141 struct kpp_request *ecdh; 142 struct kpp_request *curve25519; 143 } areq; 144 int err; 145 int req_id; 146 hpre_cb cb; 147 struct timespec64 req_time; 148 }; 149 150 static inline unsigned int hpre_align_sz(void) 151 { 152 return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1; 153 } 154 155 static inline unsigned int hpre_align_pd(void) 156 { 157 return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1); 158 } 159 160 static int hpre_alloc_req_id(struct hpre_ctx *ctx) 161 { 162 unsigned long flags; 163 int id; 164 165 spin_lock_irqsave(&ctx->req_lock, flags); 166 id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC); 167 spin_unlock_irqrestore(&ctx->req_lock, flags); 168 169 return id; 170 } 171 172 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) 173 { 174 unsigned long flags; 175 176 spin_lock_irqsave(&ctx->req_lock, flags); 177 idr_remove(&ctx->req_idr, req_id); 178 spin_unlock_irqrestore(&ctx->req_lock, flags); 179 } 180 181 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) 182 { 183 struct hpre_ctx *ctx; 184 struct hpre_dfx *dfx; 185 int id; 186 187 ctx = hpre_req->ctx; 188 id = hpre_alloc_req_id(ctx); 189 if (unlikely(id < 0)) 190 return -EINVAL; 191 192 ctx->req_list[id] = hpre_req; 193 hpre_req->req_id = id; 194 195 dfx = ctx->hpre->debug.dfx; 196 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value)) 197 ktime_get_ts64(&hpre_req->req_time); 198 199 return id; 200 } 201 202 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) 203 { 204 struct hpre_ctx *ctx = hpre_req->ctx; 205 int id = hpre_req->req_id; 206 207 if (hpre_req->req_id >= 0) { 208 hpre_req->req_id = HPRE_INVLD_REQ_ID; 209 ctx->req_list[id] = NULL; 210 hpre_free_req_id(ctx, id); 211 } 212 } 213 214 static struct hisi_qp *hpre_get_qp_and_start(u8 type) 215 { 216 struct hisi_qp *qp; 217 int ret; 218 219 qp = hpre_create_qp(type); 220 if (!qp) { 221 pr_err("Can not create hpre qp!\n"); 222 return ERR_PTR(-ENODEV); 223 } 224 225 ret = hisi_qm_start_qp(qp, 0); 226 if (ret < 0) { 227 hisi_qm_free_qps(&qp, 1); 228 pci_err(qp->qm->pdev, "Can not start qp!\n"); 229 return ERR_PTR(-EINVAL); 230 } 231 232 return qp; 233 } 234 235 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, 236 struct scatterlist *data, unsigned int len, 237 int is_src, dma_addr_t *tmp) 238 { 239 struct device *dev = hpre_req->ctx->dev; 240 enum dma_data_direction dma_dir; 241 242 if (is_src) { 243 hpre_req->src = NULL; 244 dma_dir = DMA_TO_DEVICE; 245 } else { 246 hpre_req->dst = NULL; 247 dma_dir = DMA_FROM_DEVICE; 248 } 249 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir); 250 if (unlikely(dma_mapping_error(dev, *tmp))) { 251 dev_err(dev, "dma map data err!\n"); 252 return -ENOMEM; 253 } 254 255 return 0; 256 } 257 258 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, 259 struct scatterlist *data, unsigned int len, 260 int is_src, dma_addr_t *tmp) 261 { 262 struct hpre_ctx *ctx = hpre_req->ctx; 263 struct device *dev = ctx->dev; 264 void *ptr; 265 int shift; 266 267 shift = ctx->key_sz - len; 268 if (unlikely(shift < 0)) 269 return -EINVAL; 270 271 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC); 272 if (unlikely(!ptr)) 273 return -ENOMEM; 274 275 if (is_src) { 276 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0); 277 hpre_req->src = ptr; 278 } else { 279 hpre_req->dst = ptr; 280 } 281 282 return 0; 283 } 284 285 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, 286 struct scatterlist *data, unsigned int len, 287 int is_src, int is_dh) 288 { 289 struct hpre_sqe *msg = &hpre_req->req; 290 struct hpre_ctx *ctx = hpre_req->ctx; 291 dma_addr_t tmp = 0; 292 int ret; 293 294 /* when the data is dh's source, we should format it */ 295 if ((sg_is_last(data) && len == ctx->key_sz) && 296 ((is_dh && !is_src) || !is_dh)) 297 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp); 298 else 299 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp); 300 301 if (unlikely(ret)) 302 return ret; 303 304 if (is_src) 305 msg->in = cpu_to_le64(tmp); 306 else 307 msg->out = cpu_to_le64(tmp); 308 309 return 0; 310 } 311 312 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, 313 struct hpre_asym_request *req, 314 struct scatterlist *dst, 315 struct scatterlist *src) 316 { 317 struct device *dev = ctx->dev; 318 struct hpre_sqe *sqe = &req->req; 319 dma_addr_t tmp; 320 321 tmp = le64_to_cpu(sqe->in); 322 if (unlikely(dma_mapping_error(dev, tmp))) 323 return; 324 325 if (src) { 326 if (req->src) 327 dma_free_coherent(dev, ctx->key_sz, req->src, tmp); 328 else 329 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE); 330 } 331 332 tmp = le64_to_cpu(sqe->out); 333 if (unlikely(dma_mapping_error(dev, tmp))) 334 return; 335 336 if (req->dst) { 337 if (dst) 338 scatterwalk_map_and_copy(req->dst, dst, 0, 339 ctx->key_sz, 1); 340 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp); 341 } else { 342 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE); 343 } 344 } 345 346 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, 347 void **kreq) 348 { 349 struct hpre_asym_request *req; 350 unsigned int err, done, alg; 351 int id; 352 353 #define HPRE_NO_HW_ERR 0 354 #define HPRE_HW_TASK_DONE 3 355 #define HREE_HW_ERR_MASK GENMASK(10, 0) 356 #define HREE_SQE_DONE_MASK GENMASK(1, 0) 357 #define HREE_ALG_TYPE_MASK GENMASK(4, 0) 358 id = (int)le16_to_cpu(sqe->tag); 359 req = ctx->req_list[id]; 360 hpre_rm_req_from_ctx(req); 361 *kreq = req; 362 363 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & 364 HREE_HW_ERR_MASK; 365 366 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & 367 HREE_SQE_DONE_MASK; 368 369 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) 370 return 0; 371 372 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK; 373 dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n", 374 alg, done, err); 375 376 return -EINVAL; 377 } 378 379 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) 380 { 381 struct hpre *hpre; 382 383 if (!ctx || !qp || qlen < 0) 384 return -EINVAL; 385 386 spin_lock_init(&ctx->req_lock); 387 ctx->qp = qp; 388 ctx->dev = &qp->qm->pdev->dev; 389 390 hpre = container_of(ctx->qp->qm, struct hpre, qm); 391 ctx->hpre = hpre; 392 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); 393 if (!ctx->req_list) 394 return -ENOMEM; 395 ctx->key_sz = 0; 396 ctx->crt_g2_mode = false; 397 idr_init(&ctx->req_idr); 398 399 return 0; 400 } 401 402 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) 403 { 404 if (is_clear_all) { 405 idr_destroy(&ctx->req_idr); 406 kfree(ctx->req_list); 407 hisi_qm_free_qps(&ctx->qp, 1); 408 } 409 410 ctx->crt_g2_mode = false; 411 ctx->key_sz = 0; 412 } 413 414 static bool hpre_is_bd_timeout(struct hpre_asym_request *req, 415 u64 overtime_thrhld) 416 { 417 struct timespec64 reply_time; 418 u64 time_use_us; 419 420 ktime_get_ts64(&reply_time); 421 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) * 422 HPRE_DFX_SEC_TO_US + 423 (reply_time.tv_nsec - req->req_time.tv_nsec) / 424 HPRE_DFX_US_TO_NS; 425 426 if (time_use_us <= overtime_thrhld) 427 return false; 428 429 return true; 430 } 431 432 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) 433 { 434 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 435 struct hpre_asym_request *req; 436 struct kpp_request *areq; 437 u64 overtime_thrhld; 438 int ret; 439 440 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); 441 areq = req->areq.dh; 442 areq->dst_len = ctx->key_sz; 443 444 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); 445 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) 446 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); 447 448 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); 449 kpp_request_complete(areq, ret); 450 atomic64_inc(&dfx[HPRE_RECV_CNT].value); 451 } 452 453 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) 454 { 455 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 456 struct hpre_asym_request *req; 457 struct akcipher_request *areq; 458 u64 overtime_thrhld; 459 int ret; 460 461 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); 462 463 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); 464 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) 465 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); 466 467 areq = req->areq.rsa; 468 areq->dst_len = ctx->key_sz; 469 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); 470 akcipher_request_complete(areq, ret); 471 atomic64_inc(&dfx[HPRE_RECV_CNT].value); 472 } 473 474 static void hpre_alg_cb(struct hisi_qp *qp, void *resp) 475 { 476 struct hpre_ctx *ctx = qp->qp_ctx; 477 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 478 struct hpre_sqe *sqe = resp; 479 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; 480 481 if (unlikely(!req)) { 482 atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); 483 return; 484 } 485 486 req->cb(ctx, resp); 487 } 488 489 static void hpre_stop_qp_and_put(struct hisi_qp *qp) 490 { 491 hisi_qm_stop_qp(qp); 492 hisi_qm_free_qps(&qp, 1); 493 } 494 495 static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) 496 { 497 struct hisi_qp *qp; 498 int ret; 499 500 qp = hpre_get_qp_and_start(type); 501 if (IS_ERR(qp)) 502 return PTR_ERR(qp); 503 504 qp->qp_ctx = ctx; 505 qp->req_cb = hpre_alg_cb; 506 507 ret = hpre_ctx_set(ctx, qp, qp->sq_depth); 508 if (ret) 509 hpre_stop_qp_and_put(qp); 510 511 return ret; 512 } 513 514 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) 515 { 516 struct hpre_asym_request *h_req; 517 struct hpre_sqe *msg; 518 int req_id; 519 void *tmp; 520 521 if (is_rsa) { 522 struct akcipher_request *akreq = req; 523 524 if (akreq->dst_len < ctx->key_sz) { 525 akreq->dst_len = ctx->key_sz; 526 return -EOVERFLOW; 527 } 528 529 tmp = akcipher_request_ctx(akreq); 530 h_req = PTR_ALIGN(tmp, hpre_align_sz()); 531 h_req->cb = hpre_rsa_cb; 532 h_req->areq.rsa = akreq; 533 msg = &h_req->req; 534 memset(msg, 0, sizeof(*msg)); 535 } else { 536 struct kpp_request *kreq = req; 537 538 if (kreq->dst_len < ctx->key_sz) { 539 kreq->dst_len = ctx->key_sz; 540 return -EOVERFLOW; 541 } 542 543 tmp = kpp_request_ctx(kreq); 544 h_req = PTR_ALIGN(tmp, hpre_align_sz()); 545 h_req->cb = hpre_dh_cb; 546 h_req->areq.dh = kreq; 547 msg = &h_req->req; 548 memset(msg, 0, sizeof(*msg)); 549 msg->key = cpu_to_le64(ctx->dh.dma_xa_p); 550 } 551 552 msg->in = cpu_to_le64(DMA_MAPPING_ERROR); 553 msg->out = cpu_to_le64(DMA_MAPPING_ERROR); 554 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT); 555 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; 556 h_req->ctx = ctx; 557 558 req_id = hpre_add_req_to_ctx(h_req); 559 if (req_id < 0) 560 return -EBUSY; 561 562 msg->tag = cpu_to_le16((u16)req_id); 563 564 return 0; 565 } 566 567 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) 568 { 569 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 570 int ctr = 0; 571 int ret; 572 573 do { 574 atomic64_inc(&dfx[HPRE_SEND_CNT].value); 575 ret = hisi_qp_send(ctx->qp, msg); 576 if (ret != -EBUSY) 577 break; 578 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); 579 } while (ctr++ < HPRE_TRY_SEND_TIMES); 580 581 if (likely(!ret)) 582 return ret; 583 584 if (ret != -EBUSY) 585 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value); 586 587 return ret; 588 } 589 590 static int hpre_dh_compute_value(struct kpp_request *req) 591 { 592 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 593 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 594 void *tmp = kpp_request_ctx(req); 595 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); 596 struct hpre_sqe *msg = &hpre_req->req; 597 int ret; 598 599 ret = hpre_msg_request_set(ctx, req, false); 600 if (unlikely(ret)) 601 return ret; 602 603 if (req->src) { 604 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1); 605 if (unlikely(ret)) 606 goto clear_all; 607 } else { 608 msg->in = cpu_to_le64(ctx->dh.dma_g); 609 } 610 611 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1); 612 if (unlikely(ret)) 613 goto clear_all; 614 615 if (ctx->crt_g2_mode && !req->src) 616 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); 617 else 618 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); 619 620 /* success */ 621 ret = hpre_send(ctx, msg); 622 if (likely(!ret)) 623 return -EINPROGRESS; 624 625 clear_all: 626 hpre_rm_req_from_ctx(hpre_req); 627 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 628 629 return ret; 630 } 631 632 static int hpre_is_dh_params_length_valid(unsigned int key_sz) 633 { 634 #define _HPRE_DH_GRP1 768 635 #define _HPRE_DH_GRP2 1024 636 #define _HPRE_DH_GRP5 1536 637 #define _HPRE_DH_GRP14 2048 638 #define _HPRE_DH_GRP15 3072 639 #define _HPRE_DH_GRP16 4096 640 switch (key_sz) { 641 case _HPRE_DH_GRP1: 642 case _HPRE_DH_GRP2: 643 case _HPRE_DH_GRP5: 644 case _HPRE_DH_GRP14: 645 case _HPRE_DH_GRP15: 646 case _HPRE_DH_GRP16: 647 return 0; 648 default: 649 return -EINVAL; 650 } 651 } 652 653 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) 654 { 655 struct device *dev = ctx->dev; 656 unsigned int sz; 657 658 if (params->p_size > HPRE_DH_MAX_P_SZ) 659 return -EINVAL; 660 661 if (hpre_is_dh_params_length_valid(params->p_size << 662 HPRE_BITS_2_BYTES_SHIFT)) 663 return -EINVAL; 664 665 sz = ctx->key_sz = params->p_size; 666 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, 667 &ctx->dh.dma_xa_p, GFP_KERNEL); 668 if (!ctx->dh.xa_p) 669 return -ENOMEM; 670 671 memcpy(ctx->dh.xa_p + sz, params->p, sz); 672 673 /* If g equals 2 don't copy it */ 674 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) { 675 ctx->crt_g2_mode = true; 676 return 0; 677 } 678 679 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL); 680 if (!ctx->dh.g) { 681 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, 682 ctx->dh.dma_xa_p); 683 ctx->dh.xa_p = NULL; 684 return -ENOMEM; 685 } 686 687 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size); 688 689 return 0; 690 } 691 692 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) 693 { 694 struct device *dev = ctx->dev; 695 unsigned int sz = ctx->key_sz; 696 697 if (is_clear_all) 698 hisi_qm_stop_qp(ctx->qp); 699 700 if (ctx->dh.g) { 701 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); 702 ctx->dh.g = NULL; 703 } 704 705 if (ctx->dh.xa_p) { 706 memzero_explicit(ctx->dh.xa_p, sz); 707 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, 708 ctx->dh.dma_xa_p); 709 ctx->dh.xa_p = NULL; 710 } 711 712 hpre_ctx_clear(ctx, is_clear_all); 713 } 714 715 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, 716 unsigned int len) 717 { 718 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 719 struct dh params; 720 int ret; 721 722 if (crypto_dh_decode_key(buf, len, ¶ms) < 0) 723 return -EINVAL; 724 725 /* Free old secret if any */ 726 hpre_dh_clear_ctx(ctx, false); 727 728 ret = hpre_dh_set_params(ctx, ¶ms); 729 if (ret < 0) 730 goto err_clear_ctx; 731 732 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, 733 params.key_size); 734 735 return 0; 736 737 err_clear_ctx: 738 hpre_dh_clear_ctx(ctx, false); 739 return ret; 740 } 741 742 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) 743 { 744 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 745 746 return ctx->key_sz; 747 } 748 749 static int hpre_dh_init_tfm(struct crypto_kpp *tfm) 750 { 751 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 752 753 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); 754 755 return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); 756 } 757 758 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) 759 { 760 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 761 762 hpre_dh_clear_ctx(ctx, true); 763 } 764 765 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) 766 { 767 while (!**ptr && *len) { 768 (*ptr)++; 769 (*len)--; 770 } 771 } 772 773 static bool hpre_rsa_key_size_is_support(unsigned int len) 774 { 775 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT; 776 777 #define _RSA_1024BITS_KEY_WDTH 1024 778 #define _RSA_2048BITS_KEY_WDTH 2048 779 #define _RSA_3072BITS_KEY_WDTH 3072 780 #define _RSA_4096BITS_KEY_WDTH 4096 781 782 switch (bits) { 783 case _RSA_1024BITS_KEY_WDTH: 784 case _RSA_2048BITS_KEY_WDTH: 785 case _RSA_3072BITS_KEY_WDTH: 786 case _RSA_4096BITS_KEY_WDTH: 787 return true; 788 default: 789 return false; 790 } 791 } 792 793 static int hpre_rsa_enc(struct akcipher_request *req) 794 { 795 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 796 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 797 void *tmp = akcipher_request_ctx(req); 798 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); 799 struct hpre_sqe *msg = &hpre_req->req; 800 int ret; 801 802 /* For 512 and 1536 bits key size, use soft tfm instead */ 803 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || 804 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { 805 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); 806 ret = crypto_akcipher_encrypt(req); 807 akcipher_request_set_tfm(req, tfm); 808 return ret; 809 } 810 811 if (unlikely(!ctx->rsa.pubkey)) 812 return -EINVAL; 813 814 ret = hpre_msg_request_set(ctx, req, true); 815 if (unlikely(ret)) 816 return ret; 817 818 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT); 819 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey); 820 821 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); 822 if (unlikely(ret)) 823 goto clear_all; 824 825 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); 826 if (unlikely(ret)) 827 goto clear_all; 828 829 /* success */ 830 ret = hpre_send(ctx, msg); 831 if (likely(!ret)) 832 return -EINPROGRESS; 833 834 clear_all: 835 hpre_rm_req_from_ctx(hpre_req); 836 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 837 838 return ret; 839 } 840 841 static int hpre_rsa_dec(struct akcipher_request *req) 842 { 843 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 844 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 845 void *tmp = akcipher_request_ctx(req); 846 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); 847 struct hpre_sqe *msg = &hpre_req->req; 848 int ret; 849 850 /* For 512 and 1536 bits key size, use soft tfm instead */ 851 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || 852 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { 853 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); 854 ret = crypto_akcipher_decrypt(req); 855 akcipher_request_set_tfm(req, tfm); 856 return ret; 857 } 858 859 if (unlikely(!ctx->rsa.prikey)) 860 return -EINVAL; 861 862 ret = hpre_msg_request_set(ctx, req, true); 863 if (unlikely(ret)) 864 return ret; 865 866 if (ctx->crt_g2_mode) { 867 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey); 868 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | 869 HPRE_ALG_NC_CRT); 870 } else { 871 msg->key = cpu_to_le64(ctx->rsa.dma_prikey); 872 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | 873 HPRE_ALG_NC_NCRT); 874 } 875 876 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); 877 if (unlikely(ret)) 878 goto clear_all; 879 880 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); 881 if (unlikely(ret)) 882 goto clear_all; 883 884 /* success */ 885 ret = hpre_send(ctx, msg); 886 if (likely(!ret)) 887 return -EINPROGRESS; 888 889 clear_all: 890 hpre_rm_req_from_ctx(hpre_req); 891 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 892 893 return ret; 894 } 895 896 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, 897 size_t vlen, bool private) 898 { 899 const char *ptr = value; 900 901 hpre_rsa_drop_leading_zeros(&ptr, &vlen); 902 903 ctx->key_sz = vlen; 904 905 /* if invalid key size provided, we use software tfm */ 906 if (!hpre_rsa_key_size_is_support(ctx->key_sz)) 907 return 0; 908 909 ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1, 910 &ctx->rsa.dma_pubkey, 911 GFP_KERNEL); 912 if (!ctx->rsa.pubkey) 913 return -ENOMEM; 914 915 if (private) { 916 ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1, 917 &ctx->rsa.dma_prikey, 918 GFP_KERNEL); 919 if (!ctx->rsa.prikey) { 920 dma_free_coherent(ctx->dev, vlen << 1, 921 ctx->rsa.pubkey, 922 ctx->rsa.dma_pubkey); 923 ctx->rsa.pubkey = NULL; 924 return -ENOMEM; 925 } 926 memcpy(ctx->rsa.prikey + vlen, ptr, vlen); 927 } 928 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen); 929 930 /* Using hardware HPRE to do RSA */ 931 return 1; 932 } 933 934 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value, 935 size_t vlen) 936 { 937 const char *ptr = value; 938 939 hpre_rsa_drop_leading_zeros(&ptr, &vlen); 940 941 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) 942 return -EINVAL; 943 944 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen); 945 946 return 0; 947 } 948 949 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value, 950 size_t vlen) 951 { 952 const char *ptr = value; 953 954 hpre_rsa_drop_leading_zeros(&ptr, &vlen); 955 956 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) 957 return -EINVAL; 958 959 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen); 960 961 return 0; 962 } 963 964 static int hpre_crt_para_get(char *para, size_t para_sz, 965 const char *raw, size_t raw_sz) 966 { 967 const char *ptr = raw; 968 size_t len = raw_sz; 969 970 hpre_rsa_drop_leading_zeros(&ptr, &len); 971 if (!len || len > para_sz) 972 return -EINVAL; 973 974 memcpy(para + para_sz - len, ptr, len); 975 976 return 0; 977 } 978 979 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key) 980 { 981 unsigned int hlf_ksz = ctx->key_sz >> 1; 982 struct device *dev = ctx->dev; 983 u64 offset; 984 int ret; 985 986 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, 987 &ctx->rsa.dma_crt_prikey, 988 GFP_KERNEL); 989 if (!ctx->rsa.crt_prikey) 990 return -ENOMEM; 991 992 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz, 993 rsa_key->dq, rsa_key->dq_sz); 994 if (ret) 995 goto free_key; 996 997 offset = hlf_ksz; 998 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 999 rsa_key->dp, rsa_key->dp_sz); 1000 if (ret) 1001 goto free_key; 1002 1003 offset = hlf_ksz * HPRE_CRT_Q; 1004 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 1005 rsa_key->q, rsa_key->q_sz); 1006 if (ret) 1007 goto free_key; 1008 1009 offset = hlf_ksz * HPRE_CRT_P; 1010 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 1011 rsa_key->p, rsa_key->p_sz); 1012 if (ret) 1013 goto free_key; 1014 1015 offset = hlf_ksz * HPRE_CRT_INV; 1016 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, 1017 rsa_key->qinv, rsa_key->qinv_sz); 1018 if (ret) 1019 goto free_key; 1020 1021 ctx->crt_g2_mode = true; 1022 1023 return 0; 1024 1025 free_key: 1026 offset = hlf_ksz * HPRE_CRT_PRMS; 1027 memzero_explicit(ctx->rsa.crt_prikey, offset); 1028 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, 1029 ctx->rsa.dma_crt_prikey); 1030 ctx->rsa.crt_prikey = NULL; 1031 ctx->crt_g2_mode = false; 1032 1033 return ret; 1034 } 1035 1036 /* If it is clear all, all the resources of the QP will be cleaned. */ 1037 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) 1038 { 1039 unsigned int half_key_sz = ctx->key_sz >> 1; 1040 struct device *dev = ctx->dev; 1041 1042 if (is_clear_all) 1043 hisi_qm_stop_qp(ctx->qp); 1044 1045 if (ctx->rsa.pubkey) { 1046 dma_free_coherent(dev, ctx->key_sz << 1, 1047 ctx->rsa.pubkey, ctx->rsa.dma_pubkey); 1048 ctx->rsa.pubkey = NULL; 1049 } 1050 1051 if (ctx->rsa.crt_prikey) { 1052 memzero_explicit(ctx->rsa.crt_prikey, 1053 half_key_sz * HPRE_CRT_PRMS); 1054 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS, 1055 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); 1056 ctx->rsa.crt_prikey = NULL; 1057 } 1058 1059 if (ctx->rsa.prikey) { 1060 memzero_explicit(ctx->rsa.prikey, ctx->key_sz); 1061 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, 1062 ctx->rsa.dma_prikey); 1063 ctx->rsa.prikey = NULL; 1064 } 1065 1066 hpre_ctx_clear(ctx, is_clear_all); 1067 } 1068 1069 /* 1070 * we should judge if it is CRT or not, 1071 * CRT: return true, N-CRT: return false . 1072 */ 1073 static bool hpre_is_crt_key(struct rsa_key *key) 1074 { 1075 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz + 1076 key->qinv_sz; 1077 1078 #define LEN_OF_NCRT_PARA 5 1079 1080 /* N-CRT less than 5 parameters */ 1081 return len > LEN_OF_NCRT_PARA; 1082 } 1083 1084 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, 1085 unsigned int keylen, bool private) 1086 { 1087 struct rsa_key rsa_key; 1088 int ret; 1089 1090 hpre_rsa_clear_ctx(ctx, false); 1091 1092 if (private) 1093 ret = rsa_parse_priv_key(&rsa_key, key, keylen); 1094 else 1095 ret = rsa_parse_pub_key(&rsa_key, key, keylen); 1096 if (ret < 0) 1097 return ret; 1098 1099 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private); 1100 if (ret <= 0) 1101 return ret; 1102 1103 if (private) { 1104 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz); 1105 if (ret < 0) 1106 goto free; 1107 1108 if (hpre_is_crt_key(&rsa_key)) { 1109 ret = hpre_rsa_setkey_crt(ctx, &rsa_key); 1110 if (ret < 0) 1111 goto free; 1112 } 1113 } 1114 1115 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz); 1116 if (ret < 0) 1117 goto free; 1118 1119 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) { 1120 ret = -EINVAL; 1121 goto free; 1122 } 1123 1124 return 0; 1125 1126 free: 1127 hpre_rsa_clear_ctx(ctx, false); 1128 return ret; 1129 } 1130 1131 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, 1132 unsigned int keylen) 1133 { 1134 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1135 int ret; 1136 1137 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen); 1138 if (ret) 1139 return ret; 1140 1141 return hpre_rsa_setkey(ctx, key, keylen, false); 1142 } 1143 1144 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, 1145 unsigned int keylen) 1146 { 1147 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1148 int ret; 1149 1150 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen); 1151 if (ret) 1152 return ret; 1153 1154 return hpre_rsa_setkey(ctx, key, keylen, true); 1155 } 1156 1157 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) 1158 { 1159 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1160 1161 /* For 512 and 1536 bits key size, use soft tfm instead */ 1162 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || 1163 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) 1164 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); 1165 1166 return ctx->key_sz; 1167 } 1168 1169 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) 1170 { 1171 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1172 int ret; 1173 1174 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); 1175 if (IS_ERR(ctx->rsa.soft_tfm)) { 1176 pr_err("Can not alloc_akcipher!\n"); 1177 return PTR_ERR(ctx->rsa.soft_tfm); 1178 } 1179 1180 akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) + 1181 hpre_align_pd()); 1182 1183 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); 1184 if (ret) 1185 crypto_free_akcipher(ctx->rsa.soft_tfm); 1186 1187 return ret; 1188 } 1189 1190 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) 1191 { 1192 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); 1193 1194 hpre_rsa_clear_ctx(ctx, true); 1195 crypto_free_akcipher(ctx->rsa.soft_tfm); 1196 } 1197 1198 static void hpre_key_to_big_end(u8 *data, int len) 1199 { 1200 int i, j; 1201 1202 for (i = 0; i < len / 2; i++) { 1203 j = len - i - 1; 1204 swap(data[j], data[i]); 1205 } 1206 } 1207 1208 static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all, 1209 bool is_ecdh) 1210 { 1211 struct device *dev = ctx->dev; 1212 unsigned int sz = ctx->key_sz; 1213 unsigned int shift = sz << 1; 1214 1215 if (is_clear_all) 1216 hisi_qm_stop_qp(ctx->qp); 1217 1218 if (is_ecdh && ctx->ecdh.p) { 1219 /* ecdh: p->a->k->b */ 1220 memzero_explicit(ctx->ecdh.p + shift, sz); 1221 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); 1222 ctx->ecdh.p = NULL; 1223 } else if (!is_ecdh && ctx->curve25519.p) { 1224 /* curve25519: p->a->k */ 1225 memzero_explicit(ctx->curve25519.p + shift, sz); 1226 dma_free_coherent(dev, sz << 2, ctx->curve25519.p, 1227 ctx->curve25519.dma_p); 1228 ctx->curve25519.p = NULL; 1229 } 1230 1231 hpre_ctx_clear(ctx, is_clear_all); 1232 } 1233 1234 /* 1235 * The bits of 192/224/256/384/521 are supported by HPRE, 1236 * and convert the bits like: 1237 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576; 1238 * If the parameter bit width is insufficient, then we fill in the 1239 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8; 1240 */ 1241 static unsigned int hpre_ecdh_supported_curve(unsigned short id) 1242 { 1243 switch (id) { 1244 case ECC_CURVE_NIST_P192: 1245 case ECC_CURVE_NIST_P256: 1246 return HPRE_ECC_HW256_KSZ_B; 1247 case ECC_CURVE_NIST_P384: 1248 return HPRE_ECC_HW384_KSZ_B; 1249 default: 1250 break; 1251 } 1252 1253 return 0; 1254 } 1255 1256 static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits) 1257 { 1258 unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64); 1259 u8 i = 0; 1260 1261 while (i < ndigits - 1) { 1262 memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64)); 1263 i++; 1264 } 1265 1266 memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz); 1267 hpre_key_to_big_end((u8 *)addr, cur_sz); 1268 } 1269 1270 static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params, 1271 unsigned int cur_sz) 1272 { 1273 unsigned int shifta = ctx->key_sz << 1; 1274 unsigned int shiftb = ctx->key_sz << 2; 1275 void *p = ctx->ecdh.p + ctx->key_sz - cur_sz; 1276 void *a = ctx->ecdh.p + shifta - cur_sz; 1277 void *b = ctx->ecdh.p + shiftb - cur_sz; 1278 void *x = ctx->ecdh.g + ctx->key_sz - cur_sz; 1279 void *y = ctx->ecdh.g + shifta - cur_sz; 1280 const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id); 1281 char *n; 1282 1283 if (unlikely(!curve)) 1284 return -EINVAL; 1285 1286 n = kzalloc(ctx->key_sz, GFP_KERNEL); 1287 if (!n) 1288 return -ENOMEM; 1289 1290 fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits); 1291 fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits); 1292 fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits); 1293 fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits); 1294 fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits); 1295 fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits); 1296 1297 if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) { 1298 kfree(n); 1299 return -EINVAL; 1300 } 1301 1302 kfree(n); 1303 return 0; 1304 } 1305 1306 static unsigned int hpre_ecdh_get_curvesz(unsigned short id) 1307 { 1308 switch (id) { 1309 case ECC_CURVE_NIST_P192: 1310 return HPRE_ECC_NIST_P192_N_SIZE; 1311 case ECC_CURVE_NIST_P256: 1312 return HPRE_ECC_NIST_P256_N_SIZE; 1313 case ECC_CURVE_NIST_P384: 1314 return HPRE_ECC_NIST_P384_N_SIZE; 1315 default: 1316 break; 1317 } 1318 1319 return 0; 1320 } 1321 1322 static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params) 1323 { 1324 struct device *dev = ctx->dev; 1325 unsigned int sz, shift, curve_sz; 1326 int ret; 1327 1328 ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id); 1329 if (!ctx->key_sz) 1330 return -EINVAL; 1331 1332 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); 1333 if (!curve_sz || params->key_size > curve_sz) 1334 return -EINVAL; 1335 1336 sz = ctx->key_sz; 1337 1338 if (!ctx->ecdh.p) { 1339 ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p, 1340 GFP_KERNEL); 1341 if (!ctx->ecdh.p) 1342 return -ENOMEM; 1343 } 1344 1345 shift = sz << 2; 1346 ctx->ecdh.g = ctx->ecdh.p + shift; 1347 ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift; 1348 1349 ret = hpre_ecdh_fill_curve(ctx, params, curve_sz); 1350 if (ret) { 1351 dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret); 1352 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); 1353 ctx->ecdh.p = NULL; 1354 return ret; 1355 } 1356 1357 return 0; 1358 } 1359 1360 static bool hpre_key_is_zero(char *key, unsigned short key_sz) 1361 { 1362 int i; 1363 1364 for (i = 0; i < key_sz; i++) 1365 if (key[i]) 1366 return false; 1367 1368 return true; 1369 } 1370 1371 static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params) 1372 { 1373 struct device *dev = ctx->dev; 1374 int ret; 1375 1376 ret = crypto_get_default_rng(); 1377 if (ret) { 1378 dev_err(dev, "failed to get default rng, ret = %d!\n", ret); 1379 return ret; 1380 } 1381 1382 ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key, 1383 params->key_size); 1384 crypto_put_default_rng(); 1385 if (ret) 1386 dev_err(dev, "failed to get rng, ret = %d!\n", ret); 1387 1388 return ret; 1389 } 1390 1391 static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, 1392 unsigned int len) 1393 { 1394 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1395 struct device *dev = ctx->dev; 1396 char key[HPRE_ECC_MAX_KSZ]; 1397 unsigned int sz, sz_shift; 1398 struct ecdh params; 1399 int ret; 1400 1401 if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { 1402 dev_err(dev, "failed to decode ecdh key!\n"); 1403 return -EINVAL; 1404 } 1405 1406 /* Use stdrng to generate private key */ 1407 if (!params.key || !params.key_size) { 1408 params.key = key; 1409 params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id); 1410 ret = ecdh_gen_privkey(ctx, ¶ms); 1411 if (ret) 1412 return ret; 1413 } 1414 1415 if (hpre_key_is_zero(params.key, params.key_size)) { 1416 dev_err(dev, "Invalid hpre key!\n"); 1417 return -EINVAL; 1418 } 1419 1420 hpre_ecc_clear_ctx(ctx, false, true); 1421 1422 ret = hpre_ecdh_set_param(ctx, ¶ms); 1423 if (ret < 0) { 1424 dev_err(dev, "failed to set hpre param, ret = %d!\n", ret); 1425 return ret; 1426 } 1427 1428 sz = ctx->key_sz; 1429 sz_shift = (sz << 1) + sz - params.key_size; 1430 memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size); 1431 1432 return 0; 1433 } 1434 1435 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx, 1436 struct hpre_asym_request *req, 1437 struct scatterlist *dst, 1438 struct scatterlist *src) 1439 { 1440 struct device *dev = ctx->dev; 1441 struct hpre_sqe *sqe = &req->req; 1442 dma_addr_t dma; 1443 1444 dma = le64_to_cpu(sqe->in); 1445 if (unlikely(dma_mapping_error(dev, dma))) 1446 return; 1447 1448 if (src && req->src) 1449 dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma); 1450 1451 dma = le64_to_cpu(sqe->out); 1452 if (unlikely(dma_mapping_error(dev, dma))) 1453 return; 1454 1455 if (req->dst) 1456 dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma); 1457 if (dst) 1458 dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE); 1459 } 1460 1461 static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) 1462 { 1463 unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); 1464 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 1465 struct hpre_asym_request *req = NULL; 1466 struct kpp_request *areq; 1467 u64 overtime_thrhld; 1468 char *p; 1469 int ret; 1470 1471 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); 1472 areq = req->areq.ecdh; 1473 areq->dst_len = ctx->key_sz << 1; 1474 1475 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); 1476 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) 1477 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); 1478 1479 p = sg_virt(areq->dst); 1480 memmove(p, p + ctx->key_sz - curve_sz, curve_sz); 1481 memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); 1482 1483 hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); 1484 kpp_request_complete(areq, ret); 1485 1486 atomic64_inc(&dfx[HPRE_RECV_CNT].value); 1487 } 1488 1489 static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, 1490 struct kpp_request *req) 1491 { 1492 struct hpre_asym_request *h_req; 1493 struct hpre_sqe *msg; 1494 int req_id; 1495 void *tmp; 1496 1497 if (req->dst_len < ctx->key_sz << 1) { 1498 req->dst_len = ctx->key_sz << 1; 1499 return -EINVAL; 1500 } 1501 1502 tmp = kpp_request_ctx(req); 1503 h_req = PTR_ALIGN(tmp, hpre_align_sz()); 1504 h_req->cb = hpre_ecdh_cb; 1505 h_req->areq.ecdh = req; 1506 msg = &h_req->req; 1507 memset(msg, 0, sizeof(*msg)); 1508 msg->in = cpu_to_le64(DMA_MAPPING_ERROR); 1509 msg->out = cpu_to_le64(DMA_MAPPING_ERROR); 1510 msg->key = cpu_to_le64(ctx->ecdh.dma_p); 1511 1512 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); 1513 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; 1514 h_req->ctx = ctx; 1515 1516 req_id = hpre_add_req_to_ctx(h_req); 1517 if (req_id < 0) 1518 return -EBUSY; 1519 1520 msg->tag = cpu_to_le16((u16)req_id); 1521 return 0; 1522 } 1523 1524 static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req, 1525 struct scatterlist *data, unsigned int len) 1526 { 1527 struct hpre_sqe *msg = &hpre_req->req; 1528 struct hpre_ctx *ctx = hpre_req->ctx; 1529 struct device *dev = ctx->dev; 1530 unsigned int tmpshift; 1531 dma_addr_t dma = 0; 1532 void *ptr; 1533 int shift; 1534 1535 /* Src_data include gx and gy. */ 1536 shift = ctx->key_sz - (len >> 1); 1537 if (unlikely(shift < 0)) 1538 return -EINVAL; 1539 1540 ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL); 1541 if (unlikely(!ptr)) 1542 return -ENOMEM; 1543 1544 tmpshift = ctx->key_sz << 1; 1545 scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0); 1546 memcpy(ptr + shift, ptr + tmpshift, len >> 1); 1547 memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1); 1548 1549 hpre_req->src = ptr; 1550 msg->in = cpu_to_le64(dma); 1551 return 0; 1552 } 1553 1554 static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req, 1555 struct scatterlist *data, unsigned int len) 1556 { 1557 struct hpre_sqe *msg = &hpre_req->req; 1558 struct hpre_ctx *ctx = hpre_req->ctx; 1559 struct device *dev = ctx->dev; 1560 dma_addr_t dma; 1561 1562 if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) { 1563 dev_err(dev, "data or data length is illegal!\n"); 1564 return -EINVAL; 1565 } 1566 1567 hpre_req->dst = NULL; 1568 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); 1569 if (unlikely(dma_mapping_error(dev, dma))) { 1570 dev_err(dev, "dma map data err!\n"); 1571 return -ENOMEM; 1572 } 1573 1574 msg->out = cpu_to_le64(dma); 1575 return 0; 1576 } 1577 1578 static int hpre_ecdh_compute_value(struct kpp_request *req) 1579 { 1580 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 1581 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1582 struct device *dev = ctx->dev; 1583 void *tmp = kpp_request_ctx(req); 1584 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); 1585 struct hpre_sqe *msg = &hpre_req->req; 1586 int ret; 1587 1588 ret = hpre_ecdh_msg_request_set(ctx, req); 1589 if (unlikely(ret)) { 1590 dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret); 1591 return ret; 1592 } 1593 1594 if (req->src) { 1595 ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len); 1596 if (unlikely(ret)) { 1597 dev_err(dev, "failed to init src data, ret = %d!\n", ret); 1598 goto clear_all; 1599 } 1600 } else { 1601 msg->in = cpu_to_le64(ctx->ecdh.dma_g); 1602 } 1603 1604 ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len); 1605 if (unlikely(ret)) { 1606 dev_err(dev, "failed to init dst data, ret = %d!\n", ret); 1607 goto clear_all; 1608 } 1609 1610 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL); 1611 ret = hpre_send(ctx, msg); 1612 if (likely(!ret)) 1613 return -EINPROGRESS; 1614 1615 clear_all: 1616 hpre_rm_req_from_ctx(hpre_req); 1617 hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 1618 return ret; 1619 } 1620 1621 static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) 1622 { 1623 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1624 1625 /* max size is the pub_key_size, include x and y */ 1626 return ctx->key_sz << 1; 1627 } 1628 1629 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) 1630 { 1631 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1632 1633 ctx->curve_id = ECC_CURVE_NIST_P192; 1634 1635 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); 1636 1637 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); 1638 } 1639 1640 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) 1641 { 1642 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1643 1644 ctx->curve_id = ECC_CURVE_NIST_P256; 1645 1646 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); 1647 1648 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); 1649 } 1650 1651 static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) 1652 { 1653 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1654 1655 ctx->curve_id = ECC_CURVE_NIST_P384; 1656 1657 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); 1658 1659 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); 1660 } 1661 1662 static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) 1663 { 1664 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1665 1666 hpre_ecc_clear_ctx(ctx, true, true); 1667 } 1668 1669 static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf, 1670 unsigned int len) 1671 { 1672 u8 secret[CURVE25519_KEY_SIZE] = { 0 }; 1673 unsigned int sz = ctx->key_sz; 1674 const struct ecc_curve *curve; 1675 unsigned int shift = sz << 1; 1676 void *p; 1677 1678 /* 1679 * The key from 'buf' is in little-endian, we should preprocess it as 1680 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64", 1681 * then convert it to big endian. Only in this way, the result can be 1682 * the same as the software curve-25519 that exists in crypto. 1683 */ 1684 memcpy(secret, buf, len); 1685 curve25519_clamp_secret(secret); 1686 hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE); 1687 1688 p = ctx->curve25519.p + sz - len; 1689 1690 curve = ecc_get_curve25519(); 1691 1692 /* fill curve parameters */ 1693 fill_curve_param(p, curve->p, len, curve->g.ndigits); 1694 fill_curve_param(p + sz, curve->a, len, curve->g.ndigits); 1695 memcpy(p + shift, secret, len); 1696 fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits); 1697 memzero_explicit(secret, CURVE25519_KEY_SIZE); 1698 } 1699 1700 static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf, 1701 unsigned int len) 1702 { 1703 struct device *dev = ctx->dev; 1704 unsigned int sz = ctx->key_sz; 1705 unsigned int shift = sz << 1; 1706 1707 /* p->a->k->gx */ 1708 if (!ctx->curve25519.p) { 1709 ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2, 1710 &ctx->curve25519.dma_p, 1711 GFP_KERNEL); 1712 if (!ctx->curve25519.p) 1713 return -ENOMEM; 1714 } 1715 1716 ctx->curve25519.g = ctx->curve25519.p + shift + sz; 1717 ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz; 1718 1719 hpre_curve25519_fill_curve(ctx, buf, len); 1720 1721 return 0; 1722 } 1723 1724 static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, 1725 unsigned int len) 1726 { 1727 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1728 struct device *dev = ctx->dev; 1729 int ret = -EINVAL; 1730 1731 if (len != CURVE25519_KEY_SIZE || 1732 !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { 1733 dev_err(dev, "key is null or key len is not 32bytes!\n"); 1734 return ret; 1735 } 1736 1737 /* Free old secret if any */ 1738 hpre_ecc_clear_ctx(ctx, false, false); 1739 1740 ctx->key_sz = CURVE25519_KEY_SIZE; 1741 ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE); 1742 if (ret) { 1743 dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret); 1744 hpre_ecc_clear_ctx(ctx, false, false); 1745 return ret; 1746 } 1747 1748 return 0; 1749 } 1750 1751 static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx, 1752 struct hpre_asym_request *req, 1753 struct scatterlist *dst, 1754 struct scatterlist *src) 1755 { 1756 struct device *dev = ctx->dev; 1757 struct hpre_sqe *sqe = &req->req; 1758 dma_addr_t dma; 1759 1760 dma = le64_to_cpu(sqe->in); 1761 if (unlikely(dma_mapping_error(dev, dma))) 1762 return; 1763 1764 if (src && req->src) 1765 dma_free_coherent(dev, ctx->key_sz, req->src, dma); 1766 1767 dma = le64_to_cpu(sqe->out); 1768 if (unlikely(dma_mapping_error(dev, dma))) 1769 return; 1770 1771 if (req->dst) 1772 dma_free_coherent(dev, ctx->key_sz, req->dst, dma); 1773 if (dst) 1774 dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE); 1775 } 1776 1777 static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) 1778 { 1779 struct hpre_dfx *dfx = ctx->hpre->debug.dfx; 1780 struct hpre_asym_request *req = NULL; 1781 struct kpp_request *areq; 1782 u64 overtime_thrhld; 1783 int ret; 1784 1785 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); 1786 areq = req->areq.curve25519; 1787 areq->dst_len = ctx->key_sz; 1788 1789 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); 1790 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) 1791 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); 1792 1793 hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); 1794 1795 hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); 1796 kpp_request_complete(areq, ret); 1797 1798 atomic64_inc(&dfx[HPRE_RECV_CNT].value); 1799 } 1800 1801 static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, 1802 struct kpp_request *req) 1803 { 1804 struct hpre_asym_request *h_req; 1805 struct hpre_sqe *msg; 1806 int req_id; 1807 void *tmp; 1808 1809 if (unlikely(req->dst_len < ctx->key_sz)) { 1810 req->dst_len = ctx->key_sz; 1811 return -EINVAL; 1812 } 1813 1814 tmp = kpp_request_ctx(req); 1815 h_req = PTR_ALIGN(tmp, hpre_align_sz()); 1816 h_req->cb = hpre_curve25519_cb; 1817 h_req->areq.curve25519 = req; 1818 msg = &h_req->req; 1819 memset(msg, 0, sizeof(*msg)); 1820 msg->in = cpu_to_le64(DMA_MAPPING_ERROR); 1821 msg->out = cpu_to_le64(DMA_MAPPING_ERROR); 1822 msg->key = cpu_to_le64(ctx->curve25519.dma_p); 1823 1824 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); 1825 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; 1826 h_req->ctx = ctx; 1827 1828 req_id = hpre_add_req_to_ctx(h_req); 1829 if (req_id < 0) 1830 return -EBUSY; 1831 1832 msg->tag = cpu_to_le16((u16)req_id); 1833 return 0; 1834 } 1835 1836 static void hpre_curve25519_src_modulo_p(u8 *ptr) 1837 { 1838 int i; 1839 1840 for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++) 1841 ptr[i] = 0; 1842 1843 /* The modulus is ptr's last byte minus '0xed'(last byte of p) */ 1844 ptr[i] -= 0xed; 1845 } 1846 1847 static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req, 1848 struct scatterlist *data, unsigned int len) 1849 { 1850 struct hpre_sqe *msg = &hpre_req->req; 1851 struct hpre_ctx *ctx = hpre_req->ctx; 1852 struct device *dev = ctx->dev; 1853 u8 p[CURVE25519_KEY_SIZE] = { 0 }; 1854 const struct ecc_curve *curve; 1855 dma_addr_t dma = 0; 1856 u8 *ptr; 1857 1858 if (len != CURVE25519_KEY_SIZE) { 1859 dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len); 1860 return -EINVAL; 1861 } 1862 1863 ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL); 1864 if (unlikely(!ptr)) 1865 return -ENOMEM; 1866 1867 scatterwalk_map_and_copy(ptr, data, 0, len, 0); 1868 1869 if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) { 1870 dev_err(dev, "gx is null!\n"); 1871 goto err; 1872 } 1873 1874 /* 1875 * Src_data(gx) is in little-endian order, MSB in the final byte should 1876 * be masked as described in RFC7748, then transform it to big-endian 1877 * form, then hisi_hpre can use the data. 1878 */ 1879 ptr[31] &= 0x7f; 1880 hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE); 1881 1882 curve = ecc_get_curve25519(); 1883 1884 fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits); 1885 1886 /* 1887 * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p, 1888 * we get its modulus to p, and then use it. 1889 */ 1890 if (memcmp(ptr, p, ctx->key_sz) == 0) { 1891 dev_err(dev, "gx is p!\n"); 1892 goto err; 1893 } else if (memcmp(ptr, p, ctx->key_sz) > 0) { 1894 hpre_curve25519_src_modulo_p(ptr); 1895 } 1896 1897 hpre_req->src = ptr; 1898 msg->in = cpu_to_le64(dma); 1899 return 0; 1900 1901 err: 1902 dma_free_coherent(dev, ctx->key_sz, ptr, dma); 1903 return -EINVAL; 1904 } 1905 1906 static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req, 1907 struct scatterlist *data, unsigned int len) 1908 { 1909 struct hpre_sqe *msg = &hpre_req->req; 1910 struct hpre_ctx *ctx = hpre_req->ctx; 1911 struct device *dev = ctx->dev; 1912 dma_addr_t dma; 1913 1914 if (!data || !sg_is_last(data) || len != ctx->key_sz) { 1915 dev_err(dev, "data or data length is illegal!\n"); 1916 return -EINVAL; 1917 } 1918 1919 hpre_req->dst = NULL; 1920 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); 1921 if (unlikely(dma_mapping_error(dev, dma))) { 1922 dev_err(dev, "dma map data err!\n"); 1923 return -ENOMEM; 1924 } 1925 1926 msg->out = cpu_to_le64(dma); 1927 return 0; 1928 } 1929 1930 static int hpre_curve25519_compute_value(struct kpp_request *req) 1931 { 1932 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 1933 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1934 struct device *dev = ctx->dev; 1935 void *tmp = kpp_request_ctx(req); 1936 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); 1937 struct hpre_sqe *msg = &hpre_req->req; 1938 int ret; 1939 1940 ret = hpre_curve25519_msg_request_set(ctx, req); 1941 if (unlikely(ret)) { 1942 dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret); 1943 return ret; 1944 } 1945 1946 if (req->src) { 1947 ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len); 1948 if (unlikely(ret)) { 1949 dev_err(dev, "failed to init src data, ret = %d!\n", 1950 ret); 1951 goto clear_all; 1952 } 1953 } else { 1954 msg->in = cpu_to_le64(ctx->curve25519.dma_g); 1955 } 1956 1957 ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len); 1958 if (unlikely(ret)) { 1959 dev_err(dev, "failed to init dst data, ret = %d!\n", ret); 1960 goto clear_all; 1961 } 1962 1963 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL); 1964 ret = hpre_send(ctx, msg); 1965 if (likely(!ret)) 1966 return -EINPROGRESS; 1967 1968 clear_all: 1969 hpre_rm_req_from_ctx(hpre_req); 1970 hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); 1971 return ret; 1972 } 1973 1974 static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) 1975 { 1976 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1977 1978 return ctx->key_sz; 1979 } 1980 1981 static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) 1982 { 1983 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1984 1985 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); 1986 1987 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); 1988 } 1989 1990 static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) 1991 { 1992 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); 1993 1994 hpre_ecc_clear_ctx(ctx, true, false); 1995 } 1996 1997 static struct akcipher_alg rsa = { 1998 .sign = hpre_rsa_dec, 1999 .verify = hpre_rsa_enc, 2000 .encrypt = hpre_rsa_enc, 2001 .decrypt = hpre_rsa_dec, 2002 .set_pub_key = hpre_rsa_setpubkey, 2003 .set_priv_key = hpre_rsa_setprivkey, 2004 .max_size = hpre_rsa_max_size, 2005 .init = hpre_rsa_init_tfm, 2006 .exit = hpre_rsa_exit_tfm, 2007 .base = { 2008 .cra_ctxsize = sizeof(struct hpre_ctx), 2009 .cra_priority = HPRE_CRYPTO_ALG_PRI, 2010 .cra_name = "rsa", 2011 .cra_driver_name = "hpre-rsa", 2012 .cra_module = THIS_MODULE, 2013 }, 2014 }; 2015 2016 static struct kpp_alg dh = { 2017 .set_secret = hpre_dh_set_secret, 2018 .generate_public_key = hpre_dh_compute_value, 2019 .compute_shared_secret = hpre_dh_compute_value, 2020 .max_size = hpre_dh_max_size, 2021 .init = hpre_dh_init_tfm, 2022 .exit = hpre_dh_exit_tfm, 2023 .base = { 2024 .cra_ctxsize = sizeof(struct hpre_ctx), 2025 .cra_priority = HPRE_CRYPTO_ALG_PRI, 2026 .cra_name = "dh", 2027 .cra_driver_name = "hpre-dh", 2028 .cra_module = THIS_MODULE, 2029 }, 2030 }; 2031 2032 static struct kpp_alg ecdh_curves[] = { 2033 { 2034 .set_secret = hpre_ecdh_set_secret, 2035 .generate_public_key = hpre_ecdh_compute_value, 2036 .compute_shared_secret = hpre_ecdh_compute_value, 2037 .max_size = hpre_ecdh_max_size, 2038 .init = hpre_ecdh_nist_p192_init_tfm, 2039 .exit = hpre_ecdh_exit_tfm, 2040 .base = { 2041 .cra_ctxsize = sizeof(struct hpre_ctx), 2042 .cra_priority = HPRE_CRYPTO_ALG_PRI, 2043 .cra_name = "ecdh-nist-p192", 2044 .cra_driver_name = "hpre-ecdh-nist-p192", 2045 .cra_module = THIS_MODULE, 2046 }, 2047 }, { 2048 .set_secret = hpre_ecdh_set_secret, 2049 .generate_public_key = hpre_ecdh_compute_value, 2050 .compute_shared_secret = hpre_ecdh_compute_value, 2051 .max_size = hpre_ecdh_max_size, 2052 .init = hpre_ecdh_nist_p256_init_tfm, 2053 .exit = hpre_ecdh_exit_tfm, 2054 .base = { 2055 .cra_ctxsize = sizeof(struct hpre_ctx), 2056 .cra_priority = HPRE_CRYPTO_ALG_PRI, 2057 .cra_name = "ecdh-nist-p256", 2058 .cra_driver_name = "hpre-ecdh-nist-p256", 2059 .cra_module = THIS_MODULE, 2060 }, 2061 }, { 2062 .set_secret = hpre_ecdh_set_secret, 2063 .generate_public_key = hpre_ecdh_compute_value, 2064 .compute_shared_secret = hpre_ecdh_compute_value, 2065 .max_size = hpre_ecdh_max_size, 2066 .init = hpre_ecdh_nist_p384_init_tfm, 2067 .exit = hpre_ecdh_exit_tfm, 2068 .base = { 2069 .cra_ctxsize = sizeof(struct hpre_ctx), 2070 .cra_priority = HPRE_CRYPTO_ALG_PRI, 2071 .cra_name = "ecdh-nist-p384", 2072 .cra_driver_name = "hpre-ecdh-nist-p384", 2073 .cra_module = THIS_MODULE, 2074 }, 2075 } 2076 }; 2077 2078 static struct kpp_alg curve25519_alg = { 2079 .set_secret = hpre_curve25519_set_secret, 2080 .generate_public_key = hpre_curve25519_compute_value, 2081 .compute_shared_secret = hpre_curve25519_compute_value, 2082 .max_size = hpre_curve25519_max_size, 2083 .init = hpre_curve25519_init_tfm, 2084 .exit = hpre_curve25519_exit_tfm, 2085 .base = { 2086 .cra_ctxsize = sizeof(struct hpre_ctx), 2087 .cra_priority = HPRE_CRYPTO_ALG_PRI, 2088 .cra_name = "curve25519", 2089 .cra_driver_name = "hpre-curve25519", 2090 .cra_module = THIS_MODULE, 2091 }, 2092 }; 2093 2094 static int hpre_register_rsa(struct hisi_qm *qm) 2095 { 2096 int ret; 2097 2098 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) 2099 return 0; 2100 2101 rsa.base.cra_flags = 0; 2102 ret = crypto_register_akcipher(&rsa); 2103 if (ret) 2104 dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret); 2105 2106 return ret; 2107 } 2108 2109 static void hpre_unregister_rsa(struct hisi_qm *qm) 2110 { 2111 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) 2112 return; 2113 2114 crypto_unregister_akcipher(&rsa); 2115 } 2116 2117 static int hpre_register_dh(struct hisi_qm *qm) 2118 { 2119 int ret; 2120 2121 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) 2122 return 0; 2123 2124 ret = crypto_register_kpp(&dh); 2125 if (ret) 2126 dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret); 2127 2128 return ret; 2129 } 2130 2131 static void hpre_unregister_dh(struct hisi_qm *qm) 2132 { 2133 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) 2134 return; 2135 2136 crypto_unregister_kpp(&dh); 2137 } 2138 2139 static int hpre_register_ecdh(struct hisi_qm *qm) 2140 { 2141 int ret, i; 2142 2143 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) 2144 return 0; 2145 2146 for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) { 2147 ret = crypto_register_kpp(&ecdh_curves[i]); 2148 if (ret) { 2149 dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n", 2150 ecdh_curves[i].base.cra_name, ret); 2151 goto unreg_kpp; 2152 } 2153 } 2154 2155 return 0; 2156 2157 unreg_kpp: 2158 for (--i; i >= 0; --i) 2159 crypto_unregister_kpp(&ecdh_curves[i]); 2160 2161 return ret; 2162 } 2163 2164 static void hpre_unregister_ecdh(struct hisi_qm *qm) 2165 { 2166 int i; 2167 2168 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) 2169 return; 2170 2171 for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i) 2172 crypto_unregister_kpp(&ecdh_curves[i]); 2173 } 2174 2175 static int hpre_register_x25519(struct hisi_qm *qm) 2176 { 2177 int ret; 2178 2179 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) 2180 return 0; 2181 2182 ret = crypto_register_kpp(&curve25519_alg); 2183 if (ret) 2184 dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret); 2185 2186 return ret; 2187 } 2188 2189 static void hpre_unregister_x25519(struct hisi_qm *qm) 2190 { 2191 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) 2192 return; 2193 2194 crypto_unregister_kpp(&curve25519_alg); 2195 } 2196 2197 int hpre_algs_register(struct hisi_qm *qm) 2198 { 2199 int ret; 2200 2201 ret = hpre_register_rsa(qm); 2202 if (ret) 2203 return ret; 2204 2205 ret = hpre_register_dh(qm); 2206 if (ret) 2207 goto unreg_rsa; 2208 2209 ret = hpre_register_ecdh(qm); 2210 if (ret) 2211 goto unreg_dh; 2212 2213 ret = hpre_register_x25519(qm); 2214 if (ret) 2215 goto unreg_ecdh; 2216 2217 return ret; 2218 2219 unreg_ecdh: 2220 hpre_unregister_ecdh(qm); 2221 unreg_dh: 2222 hpre_unregister_dh(qm); 2223 unreg_rsa: 2224 hpre_unregister_rsa(qm); 2225 return ret; 2226 } 2227 2228 void hpre_algs_unregister(struct hisi_qm *qm) 2229 { 2230 hpre_unregister_x25519(qm); 2231 hpre_unregister_ecdh(qm); 2232 hpre_unregister_dh(qm); 2233 hpre_unregister_rsa(qm); 2234 } 2235