1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <crypto/aes.h> 5 #include <crypto/algapi.h> 6 #include <crypto/authenc.h> 7 #include <crypto/des.h> 8 #include <crypto/hash.h> 9 #include <crypto/internal/aead.h> 10 #include <crypto/sha.h> 11 #include <crypto/skcipher.h> 12 #include <crypto/xts.h> 13 #include <linux/crypto.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/idr.h> 16 17 #include "sec.h" 18 #include "sec_crypto.h" 19 20 #define SEC_PRIORITY 4001 21 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) 22 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) 23 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) 24 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) 25 26 /* SEC sqe(bd) bit operational relative MACRO */ 27 #define SEC_DE_OFFSET 1 28 #define SEC_CIPHER_OFFSET 4 29 #define SEC_SCENE_OFFSET 3 30 #define SEC_DST_SGL_OFFSET 2 31 #define SEC_SRC_SGL_OFFSET 7 32 #define SEC_CKEY_OFFSET 9 33 #define SEC_CMODE_OFFSET 12 34 #define SEC_AKEY_OFFSET 5 35 #define SEC_AEAD_ALG_OFFSET 11 36 #define SEC_AUTH_OFFSET 6 37 38 #define SEC_FLAG_OFFSET 7 39 #define SEC_FLAG_MASK 0x0780 40 #define SEC_TYPE_MASK 0x0F 41 #define SEC_DONE_MASK 0x0001 42 43 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) 44 #define SEC_SGL_SGE_NR 128 45 #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) 46 #define SEC_CIPHER_AUTH 0xfe 47 #define SEC_AUTH_CIPHER 0x1 48 #define SEC_MAX_MAC_LEN 64 49 #define SEC_MAX_AAD_LEN 65535 50 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) 51 52 #define SEC_PBUF_SZ 512 53 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ 54 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) 55 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ 56 SEC_MAX_MAC_LEN * 2) 57 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) 58 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) 59 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ 60 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) 61 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ 62 SEC_PBUF_LEFT_SZ) 63 64 #define SEC_SQE_LEN_RATE 4 65 #define SEC_SQE_CFLAG 2 66 #define SEC_SQE_AEAD_FLAG 3 67 #define SEC_SQE_DONE 0x1 68 69 static atomic_t sec_active_devs; 70 71 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 72 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 73 { 74 if (req->c_req.encrypt) 75 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % 76 ctx->hlf_q_num; 77 78 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + 79 ctx->hlf_q_num; 80 } 81 82 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) 83 { 84 if (req->c_req.encrypt) 85 atomic_dec(&ctx->enc_qcyclic); 86 else 87 atomic_dec(&ctx->dec_qcyclic); 88 } 89 90 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 91 { 92 int req_id; 93 94 mutex_lock(&qp_ctx->req_lock); 95 96 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 97 0, QM_Q_DEPTH, GFP_ATOMIC); 98 mutex_unlock(&qp_ctx->req_lock); 99 if (unlikely(req_id < 0)) { 100 dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); 101 return req_id; 102 } 103 104 req->qp_ctx = qp_ctx; 105 qp_ctx->req_list[req_id] = req; 106 return req_id; 107 } 108 109 static void sec_free_req_id(struct sec_req *req) 110 { 111 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 112 int req_id = req->req_id; 113 114 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { 115 dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); 116 return; 117 } 118 119 qp_ctx->req_list[req_id] = NULL; 120 req->qp_ctx = NULL; 121 122 mutex_lock(&qp_ctx->req_lock); 123 idr_remove(&qp_ctx->req_idr, req_id); 124 mutex_unlock(&qp_ctx->req_lock); 125 } 126 127 static int sec_aead_verify(struct sec_req *req) 128 { 129 struct aead_request *aead_req = req->aead_req.aead_req; 130 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 131 size_t authsize = crypto_aead_authsize(tfm); 132 u8 *mac_out = req->aead_req.out_mac; 133 u8 *mac = mac_out + SEC_MAX_MAC_LEN; 134 struct scatterlist *sgl = aead_req->src; 135 size_t sz; 136 137 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize, 138 aead_req->cryptlen + aead_req->assoclen - 139 authsize); 140 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { 141 dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); 142 return -EBADMSG; 143 } 144 145 return 0; 146 } 147 148 static void sec_req_cb(struct hisi_qp *qp, void *resp) 149 { 150 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; 151 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; 152 struct sec_sqe *bd = resp; 153 struct sec_ctx *ctx; 154 struct sec_req *req; 155 u16 done, flag; 156 int err = 0; 157 u8 type; 158 159 type = bd->type_cipher_auth & SEC_TYPE_MASK; 160 if (unlikely(type != SEC_BD_TYPE2)) { 161 atomic64_inc(&dfx->err_bd_cnt); 162 pr_err("err bd type [%d]\n", type); 163 return; 164 } 165 166 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; 167 if (unlikely(!req)) { 168 atomic64_inc(&dfx->invalid_req_cnt); 169 atomic_inc(&qp->qp_status.used); 170 return; 171 } 172 req->err_type = bd->type2.error_type; 173 ctx = req->ctx; 174 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; 175 flag = (le16_to_cpu(bd->type2.done_flag) & 176 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 177 if (unlikely(req->err_type || done != SEC_SQE_DONE || 178 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || 179 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { 180 dev_err(SEC_CTX_DEV(ctx), 181 "err_type[%d],done[%d],flag[%d]\n", 182 req->err_type, done, flag); 183 err = -EIO; 184 atomic64_inc(&dfx->done_flag_cnt); 185 } 186 187 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) 188 err = sec_aead_verify(req); 189 190 atomic64_inc(&dfx->recv_cnt); 191 192 ctx->req_op->buf_unmap(ctx, req); 193 194 ctx->req_op->callback(ctx, req, err); 195 } 196 197 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) 198 { 199 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 200 int ret; 201 202 if (ctx->fake_req_limit <= 203 atomic_read(&qp_ctx->qp->qp_status.used) && 204 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) 205 return -EBUSY; 206 207 mutex_lock(&qp_ctx->req_lock); 208 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); 209 210 if (ctx->fake_req_limit <= 211 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { 212 list_add_tail(&req->backlog_head, &qp_ctx->backlog); 213 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 214 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); 215 mutex_unlock(&qp_ctx->req_lock); 216 return -EBUSY; 217 } 218 mutex_unlock(&qp_ctx->req_lock); 219 220 if (unlikely(ret == -EBUSY)) 221 return -ENOBUFS; 222 223 if (likely(!ret)) { 224 ret = -EINPROGRESS; 225 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 226 } 227 228 return ret; 229 } 230 231 /* Get DMA memory resources */ 232 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 233 { 234 int i; 235 236 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, 237 &res->c_ivin_dma, GFP_KERNEL); 238 if (!res->c_ivin) 239 return -ENOMEM; 240 241 for (i = 1; i < QM_Q_DEPTH; i++) { 242 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 243 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 244 } 245 246 return 0; 247 } 248 249 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 250 { 251 if (res->c_ivin) 252 dma_free_coherent(dev, SEC_TOTAL_IV_SZ, 253 res->c_ivin, res->c_ivin_dma); 254 } 255 256 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) 257 { 258 int i; 259 260 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 261 &res->out_mac_dma, GFP_KERNEL); 262 if (!res->out_mac) 263 return -ENOMEM; 264 265 for (i = 1; i < QM_Q_DEPTH; i++) { 266 res[i].out_mac_dma = res->out_mac_dma + 267 i * (SEC_MAX_MAC_LEN << 1); 268 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); 269 } 270 271 return 0; 272 } 273 274 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) 275 { 276 if (res->out_mac) 277 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 278 res->out_mac, res->out_mac_dma); 279 } 280 281 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) 282 { 283 if (res->pbuf) 284 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, 285 res->pbuf, res->pbuf_dma); 286 } 287 288 /* 289 * To improve performance, pbuffer is used for 290 * small packets (< 512Bytes) as IOMMU translation using. 291 */ 292 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) 293 { 294 int pbuf_page_offset; 295 int i, j, k; 296 297 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, 298 &res->pbuf_dma, GFP_KERNEL); 299 if (!res->pbuf) 300 return -ENOMEM; 301 302 /* 303 * SEC_PBUF_PKG contains data pbuf, iv and 304 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC> 305 * Every PAGE contains six SEC_PBUF_PKG 306 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG 307 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE 308 * for the SEC_TOTAL_PBUF_SZ 309 */ 310 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { 311 pbuf_page_offset = PAGE_SIZE * i; 312 for (j = 0; j < SEC_PBUF_NUM; j++) { 313 k = i * SEC_PBUF_NUM + j; 314 if (k == QM_Q_DEPTH) 315 break; 316 res[k].pbuf = res->pbuf + 317 j * SEC_PBUF_PKG + pbuf_page_offset; 318 res[k].pbuf_dma = res->pbuf_dma + 319 j * SEC_PBUF_PKG + pbuf_page_offset; 320 } 321 } 322 return 0; 323 } 324 325 static int sec_alg_resource_alloc(struct sec_ctx *ctx, 326 struct sec_qp_ctx *qp_ctx) 327 { 328 struct device *dev = SEC_CTX_DEV(ctx); 329 struct sec_alg_res *res = qp_ctx->res; 330 int ret; 331 332 ret = sec_alloc_civ_resource(dev, res); 333 if (ret) 334 return ret; 335 336 if (ctx->alg_type == SEC_AEAD) { 337 ret = sec_alloc_mac_resource(dev, res); 338 if (ret) 339 goto alloc_fail; 340 } 341 if (ctx->pbuf_supported) { 342 ret = sec_alloc_pbuf_resource(dev, res); 343 if (ret) { 344 dev_err(dev, "fail to alloc pbuf dma resource!\n"); 345 goto alloc_fail; 346 } 347 } 348 349 return 0; 350 alloc_fail: 351 sec_free_civ_resource(dev, res); 352 353 return ret; 354 } 355 356 static void sec_alg_resource_free(struct sec_ctx *ctx, 357 struct sec_qp_ctx *qp_ctx) 358 { 359 struct device *dev = SEC_CTX_DEV(ctx); 360 361 sec_free_civ_resource(dev, qp_ctx->res); 362 363 if (ctx->pbuf_supported) 364 sec_free_pbuf_resource(dev, qp_ctx->res); 365 if (ctx->alg_type == SEC_AEAD) 366 sec_free_mac_resource(dev, qp_ctx->res); 367 } 368 369 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, 370 int qp_ctx_id, int alg_type) 371 { 372 struct device *dev = SEC_CTX_DEV(ctx); 373 struct sec_qp_ctx *qp_ctx; 374 struct hisi_qp *qp; 375 int ret = -ENOMEM; 376 377 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 378 qp = ctx->qps[qp_ctx_id]; 379 qp->req_type = 0; 380 qp->qp_ctx = qp_ctx; 381 qp->req_cb = sec_req_cb; 382 qp_ctx->qp = qp; 383 qp_ctx->ctx = ctx; 384 385 mutex_init(&qp_ctx->req_lock); 386 idr_init(&qp_ctx->req_idr); 387 INIT_LIST_HEAD(&qp_ctx->backlog); 388 389 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 390 SEC_SGL_SGE_NR); 391 if (IS_ERR(qp_ctx->c_in_pool)) { 392 dev_err(dev, "fail to create sgl pool for input!\n"); 393 goto err_destroy_idr; 394 } 395 396 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 397 SEC_SGL_SGE_NR); 398 if (IS_ERR(qp_ctx->c_out_pool)) { 399 dev_err(dev, "fail to create sgl pool for output!\n"); 400 goto err_free_c_in_pool; 401 } 402 403 ret = sec_alg_resource_alloc(ctx, qp_ctx); 404 if (ret) 405 goto err_free_c_out_pool; 406 407 ret = hisi_qm_start_qp(qp, 0); 408 if (ret < 0) 409 goto err_queue_free; 410 411 return 0; 412 413 err_queue_free: 414 sec_alg_resource_free(ctx, qp_ctx); 415 err_free_c_out_pool: 416 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 417 err_free_c_in_pool: 418 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 419 err_destroy_idr: 420 idr_destroy(&qp_ctx->req_idr); 421 422 return ret; 423 } 424 425 static void sec_release_qp_ctx(struct sec_ctx *ctx, 426 struct sec_qp_ctx *qp_ctx) 427 { 428 struct device *dev = SEC_CTX_DEV(ctx); 429 430 hisi_qm_stop_qp(qp_ctx->qp); 431 sec_alg_resource_free(ctx, qp_ctx); 432 433 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 434 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 435 436 idr_destroy(&qp_ctx->req_idr); 437 } 438 439 static int sec_ctx_base_init(struct sec_ctx *ctx) 440 { 441 struct sec_dev *sec; 442 int i, ret; 443 444 ctx->qps = sec_create_qps(); 445 if (!ctx->qps) { 446 pr_err("Can not create sec qps!\n"); 447 return -ENODEV; 448 } 449 450 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); 451 ctx->sec = sec; 452 ctx->hlf_q_num = sec->ctx_q_num >> 1; 453 454 ctx->pbuf_supported = ctx->sec->iommu_used; 455 456 /* Half of queue depth is taken as fake requests limit in the queue. */ 457 ctx->fake_req_limit = QM_Q_DEPTH >> 1; 458 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 459 GFP_KERNEL); 460 if (!ctx->qp_ctx) 461 return -ENOMEM; 462 463 for (i = 0; i < sec->ctx_q_num; i++) { 464 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); 465 if (ret) 466 goto err_sec_release_qp_ctx; 467 } 468 469 return 0; 470 err_sec_release_qp_ctx: 471 for (i = i - 1; i >= 0; i--) 472 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 473 474 sec_destroy_qps(ctx->qps, sec->ctx_q_num); 475 kfree(ctx->qp_ctx); 476 return ret; 477 } 478 479 static void sec_ctx_base_uninit(struct sec_ctx *ctx) 480 { 481 int i; 482 483 for (i = 0; i < ctx->sec->ctx_q_num; i++) 484 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 485 486 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); 487 kfree(ctx->qp_ctx); 488 } 489 490 static int sec_cipher_init(struct sec_ctx *ctx) 491 { 492 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 493 494 c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 495 &c_ctx->c_key_dma, GFP_KERNEL); 496 if (!c_ctx->c_key) 497 return -ENOMEM; 498 499 return 0; 500 } 501 502 static void sec_cipher_uninit(struct sec_ctx *ctx) 503 { 504 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 505 506 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); 507 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 508 c_ctx->c_key, c_ctx->c_key_dma); 509 } 510 511 static int sec_auth_init(struct sec_ctx *ctx) 512 { 513 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 514 515 a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 516 &a_ctx->a_key_dma, GFP_KERNEL); 517 if (!a_ctx->a_key) 518 return -ENOMEM; 519 520 return 0; 521 } 522 523 static void sec_auth_uninit(struct sec_ctx *ctx) 524 { 525 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 526 527 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); 528 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 529 a_ctx->a_key, a_ctx->a_key_dma); 530 } 531 532 static int sec_skcipher_init(struct crypto_skcipher *tfm) 533 { 534 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 535 int ret; 536 537 ctx->alg_type = SEC_SKCIPHER; 538 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); 539 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); 540 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 541 dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); 542 return -EINVAL; 543 } 544 545 ret = sec_ctx_base_init(ctx); 546 if (ret) 547 return ret; 548 549 ret = sec_cipher_init(ctx); 550 if (ret) 551 goto err_cipher_init; 552 553 return 0; 554 err_cipher_init: 555 sec_ctx_base_uninit(ctx); 556 557 return ret; 558 } 559 560 static void sec_skcipher_uninit(struct crypto_skcipher *tfm) 561 { 562 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 563 564 sec_cipher_uninit(ctx); 565 sec_ctx_base_uninit(ctx); 566 } 567 568 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, 569 const u32 keylen, 570 const enum sec_cmode c_mode) 571 { 572 switch (keylen) { 573 case SEC_DES3_2KEY_SIZE: 574 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; 575 break; 576 case SEC_DES3_3KEY_SIZE: 577 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; 578 break; 579 default: 580 return -EINVAL; 581 } 582 583 return 0; 584 } 585 586 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, 587 const u32 keylen, 588 const enum sec_cmode c_mode) 589 { 590 if (c_mode == SEC_CMODE_XTS) { 591 switch (keylen) { 592 case SEC_XTS_MIN_KEY_SIZE: 593 c_ctx->c_key_len = SEC_CKEY_128BIT; 594 break; 595 case SEC_XTS_MAX_KEY_SIZE: 596 c_ctx->c_key_len = SEC_CKEY_256BIT; 597 break; 598 default: 599 pr_err("hisi_sec2: xts mode key error!\n"); 600 return -EINVAL; 601 } 602 } else { 603 switch (keylen) { 604 case AES_KEYSIZE_128: 605 c_ctx->c_key_len = SEC_CKEY_128BIT; 606 break; 607 case AES_KEYSIZE_192: 608 c_ctx->c_key_len = SEC_CKEY_192BIT; 609 break; 610 case AES_KEYSIZE_256: 611 c_ctx->c_key_len = SEC_CKEY_256BIT; 612 break; 613 default: 614 pr_err("hisi_sec2: aes key error!\n"); 615 return -EINVAL; 616 } 617 } 618 619 return 0; 620 } 621 622 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 623 const u32 keylen, const enum sec_calg c_alg, 624 const enum sec_cmode c_mode) 625 { 626 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 627 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 628 int ret; 629 630 if (c_mode == SEC_CMODE_XTS) { 631 ret = xts_verify_key(tfm, key, keylen); 632 if (ret) { 633 dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); 634 return ret; 635 } 636 } 637 638 c_ctx->c_alg = c_alg; 639 c_ctx->c_mode = c_mode; 640 641 switch (c_alg) { 642 case SEC_CALG_3DES: 643 ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); 644 break; 645 case SEC_CALG_AES: 646 case SEC_CALG_SM4: 647 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 648 break; 649 default: 650 return -EINVAL; 651 } 652 653 if (ret) { 654 dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); 655 return ret; 656 } 657 658 memcpy(c_ctx->c_key, key, keylen); 659 660 return 0; 661 } 662 663 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ 664 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ 665 u32 keylen) \ 666 { \ 667 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ 668 } 669 670 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) 671 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) 672 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) 673 674 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) 675 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) 676 677 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) 678 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) 679 680 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, 681 struct scatterlist *src) 682 { 683 struct aead_request *aead_req = req->aead_req.aead_req; 684 struct sec_cipher_req *c_req = &req->c_req; 685 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 686 struct device *dev = SEC_CTX_DEV(ctx); 687 int copy_size, pbuf_length; 688 int req_id = req->req_id; 689 690 if (ctx->alg_type == SEC_AEAD) 691 copy_size = aead_req->cryptlen + aead_req->assoclen; 692 else 693 copy_size = c_req->c_len; 694 695 pbuf_length = sg_copy_to_buffer(src, sg_nents(src), 696 qp_ctx->res[req_id].pbuf, 697 copy_size); 698 699 if (unlikely(pbuf_length != copy_size)) { 700 dev_err(dev, "copy src data to pbuf error!\n"); 701 return -EINVAL; 702 } 703 704 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; 705 706 if (!c_req->c_in_dma) { 707 dev_err(dev, "fail to set pbuffer address!\n"); 708 return -ENOMEM; 709 } 710 711 c_req->c_out_dma = c_req->c_in_dma; 712 713 return 0; 714 } 715 716 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, 717 struct scatterlist *dst) 718 { 719 struct aead_request *aead_req = req->aead_req.aead_req; 720 struct sec_cipher_req *c_req = &req->c_req; 721 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 722 struct device *dev = SEC_CTX_DEV(ctx); 723 int copy_size, pbuf_length; 724 int req_id = req->req_id; 725 726 if (ctx->alg_type == SEC_AEAD) 727 copy_size = c_req->c_len + aead_req->assoclen; 728 else 729 copy_size = c_req->c_len; 730 731 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), 732 qp_ctx->res[req_id].pbuf, 733 copy_size); 734 735 if (unlikely(pbuf_length != copy_size)) 736 dev_err(dev, "copy pbuf data to dst error!\n"); 737 738 } 739 740 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, 741 struct scatterlist *src, struct scatterlist *dst) 742 { 743 struct sec_cipher_req *c_req = &req->c_req; 744 struct sec_aead_req *a_req = &req->aead_req; 745 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 746 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; 747 struct device *dev = SEC_CTX_DEV(ctx); 748 int ret; 749 750 if (req->use_pbuf) { 751 ret = sec_cipher_pbuf_map(ctx, req, src); 752 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; 753 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; 754 if (ctx->alg_type == SEC_AEAD) { 755 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; 756 a_req->out_mac_dma = res->pbuf_dma + 757 SEC_PBUF_MAC_OFFSET; 758 } 759 760 return ret; 761 } 762 c_req->c_ivin = res->c_ivin; 763 c_req->c_ivin_dma = res->c_ivin_dma; 764 if (ctx->alg_type == SEC_AEAD) { 765 a_req->out_mac = res->out_mac; 766 a_req->out_mac_dma = res->out_mac_dma; 767 } 768 769 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, 770 qp_ctx->c_in_pool, 771 req->req_id, 772 &c_req->c_in_dma); 773 774 if (IS_ERR(c_req->c_in)) { 775 dev_err(dev, "fail to dma map input sgl buffers!\n"); 776 return PTR_ERR(c_req->c_in); 777 } 778 779 if (dst == src) { 780 c_req->c_out = c_req->c_in; 781 c_req->c_out_dma = c_req->c_in_dma; 782 } else { 783 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, 784 qp_ctx->c_out_pool, 785 req->req_id, 786 &c_req->c_out_dma); 787 788 if (IS_ERR(c_req->c_out)) { 789 dev_err(dev, "fail to dma map output sgl buffers!\n"); 790 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 791 return PTR_ERR(c_req->c_out); 792 } 793 } 794 795 return 0; 796 } 797 798 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, 799 struct scatterlist *src, struct scatterlist *dst) 800 { 801 struct sec_cipher_req *c_req = &req->c_req; 802 struct device *dev = SEC_CTX_DEV(ctx); 803 804 if (req->use_pbuf) { 805 sec_cipher_pbuf_unmap(ctx, req, dst); 806 } else { 807 if (dst != src) 808 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 809 810 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); 811 } 812 } 813 814 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 815 { 816 struct skcipher_request *sq = req->c_req.sk_req; 817 818 return sec_cipher_map(ctx, req, sq->src, sq->dst); 819 } 820 821 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 822 { 823 struct skcipher_request *sq = req->c_req.sk_req; 824 825 sec_cipher_unmap(ctx, req, sq->src, sq->dst); 826 } 827 828 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, 829 struct crypto_authenc_keys *keys) 830 { 831 switch (keys->enckeylen) { 832 case AES_KEYSIZE_128: 833 c_ctx->c_key_len = SEC_CKEY_128BIT; 834 break; 835 case AES_KEYSIZE_192: 836 c_ctx->c_key_len = SEC_CKEY_192BIT; 837 break; 838 case AES_KEYSIZE_256: 839 c_ctx->c_key_len = SEC_CKEY_256BIT; 840 break; 841 default: 842 pr_err("hisi_sec2: aead aes key error!\n"); 843 return -EINVAL; 844 } 845 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); 846 847 return 0; 848 } 849 850 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, 851 struct crypto_authenc_keys *keys) 852 { 853 struct crypto_shash *hash_tfm = ctx->hash_tfm; 854 int blocksize, ret; 855 856 if (!keys->authkeylen) { 857 pr_err("hisi_sec2: aead auth key error!\n"); 858 return -EINVAL; 859 } 860 861 blocksize = crypto_shash_blocksize(hash_tfm); 862 if (keys->authkeylen > blocksize) { 863 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, 864 keys->authkeylen, ctx->a_key); 865 if (ret) { 866 pr_err("hisi_sec2: aead auth digest error!\n"); 867 return -EINVAL; 868 } 869 ctx->a_key_len = blocksize; 870 } else { 871 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); 872 ctx->a_key_len = keys->authkeylen; 873 } 874 875 return 0; 876 } 877 878 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, 879 const u32 keylen, const enum sec_hash_alg a_alg, 880 const enum sec_calg c_alg, 881 const enum sec_mac_len mac_len, 882 const enum sec_cmode c_mode) 883 { 884 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 885 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 886 struct crypto_authenc_keys keys; 887 int ret; 888 889 ctx->a_ctx.a_alg = a_alg; 890 ctx->c_ctx.c_alg = c_alg; 891 ctx->a_ctx.mac_len = mac_len; 892 c_ctx->c_mode = c_mode; 893 894 if (crypto_authenc_extractkeys(&keys, key, keylen)) 895 goto bad_key; 896 897 ret = sec_aead_aes_set_key(c_ctx, &keys); 898 if (ret) { 899 dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); 900 goto bad_key; 901 } 902 903 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); 904 if (ret) { 905 dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); 906 goto bad_key; 907 } 908 909 return 0; 910 bad_key: 911 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); 912 913 return -EINVAL; 914 } 915 916 917 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ 918 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ 919 u32 keylen) \ 920 { \ 921 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ 922 } 923 924 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, 925 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) 926 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, 927 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) 928 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, 929 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) 930 931 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 932 { 933 struct aead_request *aq = req->aead_req.aead_req; 934 935 return sec_cipher_map(ctx, req, aq->src, aq->dst); 936 } 937 938 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 939 { 940 struct aead_request *aq = req->aead_req.aead_req; 941 942 sec_cipher_unmap(ctx, req, aq->src, aq->dst); 943 } 944 945 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) 946 { 947 int ret; 948 949 ret = ctx->req_op->buf_map(ctx, req); 950 if (unlikely(ret)) 951 return ret; 952 953 ctx->req_op->do_transfer(ctx, req); 954 955 ret = ctx->req_op->bd_fill(ctx, req); 956 if (unlikely(ret)) 957 goto unmap_req_buf; 958 959 return ret; 960 961 unmap_req_buf: 962 ctx->req_op->buf_unmap(ctx, req); 963 964 return ret; 965 } 966 967 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) 968 { 969 ctx->req_op->buf_unmap(ctx, req); 970 } 971 972 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 973 { 974 struct skcipher_request *sk_req = req->c_req.sk_req; 975 struct sec_cipher_req *c_req = &req->c_req; 976 977 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); 978 } 979 980 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 981 { 982 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 983 struct sec_cipher_req *c_req = &req->c_req; 984 struct sec_sqe *sec_sqe = &req->sec_sqe; 985 u8 scene, sa_type, da_type; 986 u8 bd_type, cipher; 987 u8 de = 0; 988 989 memset(sec_sqe, 0, sizeof(struct sec_sqe)); 990 991 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 992 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 993 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); 994 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); 995 996 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << 997 SEC_CMODE_OFFSET); 998 sec_sqe->type2.c_alg = c_ctx->c_alg; 999 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << 1000 SEC_CKEY_OFFSET); 1001 1002 bd_type = SEC_BD_TYPE2; 1003 if (c_req->encrypt) 1004 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; 1005 else 1006 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; 1007 sec_sqe->type_cipher_auth = bd_type | cipher; 1008 1009 if (req->use_pbuf) 1010 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; 1011 else 1012 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; 1013 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; 1014 if (c_req->c_in_dma != c_req->c_out_dma) 1015 de = 0x1 << SEC_DE_OFFSET; 1016 1017 sec_sqe->sds_sa_type = (de | scene | sa_type); 1018 1019 /* Just set DST address type */ 1020 if (req->use_pbuf) 1021 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; 1022 else 1023 da_type = SEC_SGL << SEC_DST_SGL_OFFSET; 1024 sec_sqe->sdm_addr_type |= da_type; 1025 1026 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); 1027 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); 1028 1029 return 0; 1030 } 1031 1032 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) 1033 { 1034 struct aead_request *aead_req = req->aead_req.aead_req; 1035 struct skcipher_request *sk_req = req->c_req.sk_req; 1036 u32 iv_size = req->ctx->c_ctx.ivsize; 1037 struct scatterlist *sgl; 1038 unsigned int cryptlen; 1039 size_t sz; 1040 u8 *iv; 1041 1042 if (req->c_req.encrypt) 1043 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; 1044 else 1045 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; 1046 1047 if (alg_type == SEC_SKCIPHER) { 1048 iv = sk_req->iv; 1049 cryptlen = sk_req->cryptlen; 1050 } else { 1051 iv = aead_req->iv; 1052 cryptlen = aead_req->cryptlen; 1053 } 1054 1055 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, 1056 cryptlen - iv_size); 1057 if (unlikely(sz != iv_size)) 1058 dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); 1059 } 1060 1061 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, 1062 struct sec_qp_ctx *qp_ctx) 1063 { 1064 struct sec_req *backlog_req = NULL; 1065 1066 mutex_lock(&qp_ctx->req_lock); 1067 if (ctx->fake_req_limit >= 1068 atomic_read(&qp_ctx->qp->qp_status.used) && 1069 !list_empty(&qp_ctx->backlog)) { 1070 backlog_req = list_first_entry(&qp_ctx->backlog, 1071 typeof(*backlog_req), backlog_head); 1072 list_del(&backlog_req->backlog_head); 1073 } 1074 mutex_unlock(&qp_ctx->req_lock); 1075 1076 return backlog_req; 1077 } 1078 1079 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, 1080 int err) 1081 { 1082 struct skcipher_request *sk_req = req->c_req.sk_req; 1083 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1084 struct skcipher_request *backlog_sk_req; 1085 struct sec_req *backlog_req; 1086 1087 sec_free_req_id(req); 1088 1089 /* IV output at encrypto of CBC mode */ 1090 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) 1091 sec_update_iv(req, SEC_SKCIPHER); 1092 1093 while (1) { 1094 backlog_req = sec_back_req_clear(ctx, qp_ctx); 1095 if (!backlog_req) 1096 break; 1097 1098 backlog_sk_req = backlog_req->c_req.sk_req; 1099 backlog_sk_req->base.complete(&backlog_sk_req->base, 1100 -EINPROGRESS); 1101 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); 1102 } 1103 1104 1105 sk_req->base.complete(&sk_req->base, err); 1106 } 1107 1108 static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 1109 { 1110 struct aead_request *aead_req = req->aead_req.aead_req; 1111 struct sec_cipher_req *c_req = &req->c_req; 1112 1113 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); 1114 } 1115 1116 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, 1117 struct sec_req *req, struct sec_sqe *sec_sqe) 1118 { 1119 struct sec_aead_req *a_req = &req->aead_req; 1120 struct sec_cipher_req *c_req = &req->c_req; 1121 struct aead_request *aq = a_req->aead_req; 1122 1123 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); 1124 1125 sec_sqe->type2.mac_key_alg = 1126 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); 1127 1128 sec_sqe->type2.mac_key_alg |= 1129 cpu_to_le32((u32)((ctx->a_key_len) / 1130 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); 1131 1132 sec_sqe->type2.mac_key_alg |= 1133 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); 1134 1135 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; 1136 1137 if (dir) 1138 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1139 else 1140 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1141 1142 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); 1143 1144 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1145 1146 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1147 } 1148 1149 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1150 { 1151 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1152 struct sec_sqe *sec_sqe = &req->sec_sqe; 1153 int ret; 1154 1155 ret = sec_skcipher_bd_fill(ctx, req); 1156 if (unlikely(ret)) { 1157 dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); 1158 return ret; 1159 } 1160 1161 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1162 1163 return 0; 1164 } 1165 1166 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) 1167 { 1168 struct aead_request *a_req = req->aead_req.aead_req; 1169 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1170 struct sec_aead_req *aead_req = &req->aead_req; 1171 struct sec_cipher_req *c_req = &req->c_req; 1172 size_t authsize = crypto_aead_authsize(tfm); 1173 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1174 struct aead_request *backlog_aead_req; 1175 struct sec_req *backlog_req; 1176 size_t sz; 1177 1178 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) 1179 sec_update_iv(req, SEC_AEAD); 1180 1181 /* Copy output mac */ 1182 if (!err && c_req->encrypt) { 1183 struct scatterlist *sgl = a_req->dst; 1184 1185 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), 1186 aead_req->out_mac, 1187 authsize, a_req->cryptlen + 1188 a_req->assoclen); 1189 1190 if (unlikely(sz != authsize)) { 1191 dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); 1192 err = -EINVAL; 1193 } 1194 } 1195 1196 sec_free_req_id(req); 1197 1198 while (1) { 1199 backlog_req = sec_back_req_clear(c, qp_ctx); 1200 if (!backlog_req) 1201 break; 1202 1203 backlog_aead_req = backlog_req->aead_req.aead_req; 1204 backlog_aead_req->base.complete(&backlog_aead_req->base, 1205 -EINPROGRESS); 1206 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); 1207 } 1208 1209 a_req->base.complete(&a_req->base, err); 1210 } 1211 1212 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) 1213 { 1214 sec_free_req_id(req); 1215 sec_free_queue_id(ctx, req); 1216 } 1217 1218 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) 1219 { 1220 struct sec_qp_ctx *qp_ctx; 1221 int queue_id; 1222 1223 /* To load balance */ 1224 queue_id = sec_alloc_queue_id(ctx, req); 1225 qp_ctx = &ctx->qp_ctx[queue_id]; 1226 1227 req->req_id = sec_alloc_req_id(req, qp_ctx); 1228 if (unlikely(req->req_id < 0)) { 1229 sec_free_queue_id(ctx, req); 1230 return req->req_id; 1231 } 1232 1233 return 0; 1234 } 1235 1236 static int sec_process(struct sec_ctx *ctx, struct sec_req *req) 1237 { 1238 struct sec_cipher_req *c_req = &req->c_req; 1239 int ret; 1240 1241 ret = sec_request_init(ctx, req); 1242 if (unlikely(ret)) 1243 return ret; 1244 1245 ret = sec_request_transfer(ctx, req); 1246 if (unlikely(ret)) 1247 goto err_uninit_req; 1248 1249 /* Output IV as decrypto */ 1250 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) 1251 sec_update_iv(req, ctx->alg_type); 1252 1253 ret = ctx->req_op->bd_send(ctx, req); 1254 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || 1255 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1256 dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); 1257 goto err_send_req; 1258 } 1259 1260 return ret; 1261 1262 err_send_req: 1263 /* As failing, restore the IV from user */ 1264 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { 1265 if (ctx->alg_type == SEC_SKCIPHER) 1266 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, 1267 ctx->c_ctx.ivsize); 1268 else 1269 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, 1270 ctx->c_ctx.ivsize); 1271 } 1272 1273 sec_request_untransfer(ctx, req); 1274 err_uninit_req: 1275 sec_request_uninit(ctx, req); 1276 1277 return ret; 1278 } 1279 1280 static const struct sec_req_op sec_skcipher_req_ops = { 1281 .buf_map = sec_skcipher_sgl_map, 1282 .buf_unmap = sec_skcipher_sgl_unmap, 1283 .do_transfer = sec_skcipher_copy_iv, 1284 .bd_fill = sec_skcipher_bd_fill, 1285 .bd_send = sec_bd_send, 1286 .callback = sec_skcipher_callback, 1287 .process = sec_process, 1288 }; 1289 1290 static const struct sec_req_op sec_aead_req_ops = { 1291 .buf_map = sec_aead_sgl_map, 1292 .buf_unmap = sec_aead_sgl_unmap, 1293 .do_transfer = sec_aead_copy_iv, 1294 .bd_fill = sec_aead_bd_fill, 1295 .bd_send = sec_bd_send, 1296 .callback = sec_aead_callback, 1297 .process = sec_process, 1298 }; 1299 1300 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) 1301 { 1302 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1303 1304 ctx->req_op = &sec_skcipher_req_ops; 1305 1306 return sec_skcipher_init(tfm); 1307 } 1308 1309 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) 1310 { 1311 sec_skcipher_uninit(tfm); 1312 } 1313 1314 static int sec_aead_init(struct crypto_aead *tfm) 1315 { 1316 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1317 int ret; 1318 1319 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); 1320 ctx->alg_type = SEC_AEAD; 1321 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); 1322 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 1323 dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); 1324 return -EINVAL; 1325 } 1326 1327 ctx->req_op = &sec_aead_req_ops; 1328 ret = sec_ctx_base_init(ctx); 1329 if (ret) 1330 return ret; 1331 1332 ret = sec_auth_init(ctx); 1333 if (ret) 1334 goto err_auth_init; 1335 1336 ret = sec_cipher_init(ctx); 1337 if (ret) 1338 goto err_cipher_init; 1339 1340 return ret; 1341 1342 err_cipher_init: 1343 sec_auth_uninit(ctx); 1344 err_auth_init: 1345 sec_ctx_base_uninit(ctx); 1346 1347 return ret; 1348 } 1349 1350 static void sec_aead_exit(struct crypto_aead *tfm) 1351 { 1352 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1353 1354 sec_cipher_uninit(ctx); 1355 sec_auth_uninit(ctx); 1356 sec_ctx_base_uninit(ctx); 1357 } 1358 1359 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) 1360 { 1361 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1362 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1363 int ret; 1364 1365 ret = sec_aead_init(tfm); 1366 if (ret) { 1367 pr_err("hisi_sec2: aead init error!\n"); 1368 return ret; 1369 } 1370 1371 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 1372 if (IS_ERR(auth_ctx->hash_tfm)) { 1373 dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); 1374 sec_aead_exit(tfm); 1375 return PTR_ERR(auth_ctx->hash_tfm); 1376 } 1377 1378 return 0; 1379 } 1380 1381 static void sec_aead_ctx_exit(struct crypto_aead *tfm) 1382 { 1383 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1384 1385 crypto_free_shash(ctx->a_ctx.hash_tfm); 1386 sec_aead_exit(tfm); 1387 } 1388 1389 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) 1390 { 1391 return sec_aead_ctx_init(tfm, "sha1"); 1392 } 1393 1394 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) 1395 { 1396 return sec_aead_ctx_init(tfm, "sha256"); 1397 } 1398 1399 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) 1400 { 1401 return sec_aead_ctx_init(tfm, "sha512"); 1402 } 1403 1404 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1405 { 1406 struct skcipher_request *sk_req = sreq->c_req.sk_req; 1407 struct device *dev = SEC_CTX_DEV(ctx); 1408 u8 c_alg = ctx->c_ctx.c_alg; 1409 1410 if (unlikely(!sk_req->src || !sk_req->dst)) { 1411 dev_err(dev, "skcipher input param error!\n"); 1412 return -EINVAL; 1413 } 1414 sreq->c_req.c_len = sk_req->cryptlen; 1415 1416 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) 1417 sreq->use_pbuf = true; 1418 else 1419 sreq->use_pbuf = false; 1420 1421 if (c_alg == SEC_CALG_3DES) { 1422 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { 1423 dev_err(dev, "skcipher 3des input length error!\n"); 1424 return -EINVAL; 1425 } 1426 return 0; 1427 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { 1428 if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { 1429 dev_err(dev, "skcipher aes input length error!\n"); 1430 return -EINVAL; 1431 } 1432 return 0; 1433 } 1434 1435 dev_err(dev, "skcipher algorithm error!\n"); 1436 return -EINVAL; 1437 } 1438 1439 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) 1440 { 1441 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); 1442 struct sec_req *req = skcipher_request_ctx(sk_req); 1443 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1444 int ret; 1445 1446 if (!sk_req->cryptlen) 1447 return 0; 1448 1449 req->flag = sk_req->base.flags; 1450 req->c_req.sk_req = sk_req; 1451 req->c_req.encrypt = encrypt; 1452 req->ctx = ctx; 1453 1454 ret = sec_skcipher_param_check(ctx, req); 1455 if (unlikely(ret)) 1456 return -EINVAL; 1457 1458 return ctx->req_op->process(ctx, req); 1459 } 1460 1461 static int sec_skcipher_encrypt(struct skcipher_request *sk_req) 1462 { 1463 return sec_skcipher_crypto(sk_req, true); 1464 } 1465 1466 static int sec_skcipher_decrypt(struct skcipher_request *sk_req) 1467 { 1468 return sec_skcipher_crypto(sk_req, false); 1469 } 1470 1471 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ 1472 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ 1473 {\ 1474 .base = {\ 1475 .cra_name = sec_cra_name,\ 1476 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1477 .cra_priority = SEC_PRIORITY,\ 1478 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ 1479 .cra_blocksize = blk_size,\ 1480 .cra_ctxsize = sizeof(struct sec_ctx),\ 1481 .cra_module = THIS_MODULE,\ 1482 },\ 1483 .init = ctx_init,\ 1484 .exit = ctx_exit,\ 1485 .setkey = sec_set_key,\ 1486 .decrypt = sec_skcipher_decrypt,\ 1487 .encrypt = sec_skcipher_encrypt,\ 1488 .min_keysize = sec_min_key_size,\ 1489 .max_keysize = sec_max_key_size,\ 1490 .ivsize = iv_size,\ 1491 }, 1492 1493 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ 1494 max_key_size, blk_size, iv_size) \ 1495 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ 1496 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) 1497 1498 static struct skcipher_alg sec_skciphers[] = { 1499 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, 1500 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1501 AES_BLOCK_SIZE, 0) 1502 1503 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, 1504 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1505 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1506 1507 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, 1508 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, 1509 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1510 1511 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, 1512 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 1513 DES3_EDE_BLOCK_SIZE, 0) 1514 1515 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, 1516 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 1517 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) 1518 1519 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, 1520 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, 1521 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1522 1523 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, 1524 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 1525 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1526 }; 1527 1528 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1529 { 1530 u8 c_alg = ctx->c_ctx.c_alg; 1531 struct aead_request *req = sreq->aead_req.aead_req; 1532 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1533 size_t authsize = crypto_aead_authsize(tfm); 1534 1535 if (unlikely(!req->src || !req->dst || !req->cryptlen || 1536 req->assoclen > SEC_MAX_AAD_LEN)) { 1537 dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); 1538 return -EINVAL; 1539 } 1540 1541 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= 1542 SEC_PBUF_SZ) 1543 sreq->use_pbuf = true; 1544 else 1545 sreq->use_pbuf = false; 1546 1547 /* Support AES only */ 1548 if (unlikely(c_alg != SEC_CALG_AES)) { 1549 dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); 1550 return -EINVAL; 1551 1552 } 1553 if (sreq->c_req.encrypt) 1554 sreq->c_req.c_len = req->cryptlen; 1555 else 1556 sreq->c_req.c_len = req->cryptlen - authsize; 1557 1558 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { 1559 dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); 1560 return -EINVAL; 1561 } 1562 1563 return 0; 1564 } 1565 1566 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) 1567 { 1568 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1569 struct sec_req *req = aead_request_ctx(a_req); 1570 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1571 int ret; 1572 1573 req->flag = a_req->base.flags; 1574 req->aead_req.aead_req = a_req; 1575 req->c_req.encrypt = encrypt; 1576 req->ctx = ctx; 1577 1578 ret = sec_aead_param_check(ctx, req); 1579 if (unlikely(ret)) 1580 return -EINVAL; 1581 1582 return ctx->req_op->process(ctx, req); 1583 } 1584 1585 static int sec_aead_encrypt(struct aead_request *a_req) 1586 { 1587 return sec_aead_crypto(a_req, true); 1588 } 1589 1590 static int sec_aead_decrypt(struct aead_request *a_req) 1591 { 1592 return sec_aead_crypto(a_req, false); 1593 } 1594 1595 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\ 1596 ctx_exit, blk_size, iv_size, max_authsize)\ 1597 {\ 1598 .base = {\ 1599 .cra_name = sec_cra_name,\ 1600 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1601 .cra_priority = SEC_PRIORITY,\ 1602 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ 1603 .cra_blocksize = blk_size,\ 1604 .cra_ctxsize = sizeof(struct sec_ctx),\ 1605 .cra_module = THIS_MODULE,\ 1606 },\ 1607 .init = ctx_init,\ 1608 .exit = ctx_exit,\ 1609 .setkey = sec_set_key,\ 1610 .decrypt = sec_aead_decrypt,\ 1611 .encrypt = sec_aead_encrypt,\ 1612 .ivsize = iv_size,\ 1613 .maxauthsize = max_authsize,\ 1614 } 1615 1616 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\ 1617 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\ 1618 sec_aead_ctx_exit, blksize, ivsize, authsize) 1619 1620 static struct aead_alg sec_aeads[] = { 1621 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", 1622 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, 1623 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 1624 1625 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", 1626 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, 1627 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 1628 1629 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", 1630 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, 1631 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 1632 }; 1633 1634 int sec_register_to_crypto(void) 1635 { 1636 int ret = 0; 1637 1638 /* To avoid repeat register */ 1639 if (atomic_add_return(1, &sec_active_devs) == 1) { 1640 ret = crypto_register_skciphers(sec_skciphers, 1641 ARRAY_SIZE(sec_skciphers)); 1642 if (ret) 1643 return ret; 1644 1645 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1646 if (ret) 1647 goto reg_aead_fail; 1648 } 1649 1650 return ret; 1651 1652 reg_aead_fail: 1653 crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); 1654 1655 return ret; 1656 } 1657 1658 void sec_unregister_from_crypto(void) 1659 { 1660 if (atomic_sub_return(1, &sec_active_devs) == 0) { 1661 crypto_unregister_skciphers(sec_skciphers, 1662 ARRAY_SIZE(sec_skciphers)); 1663 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1664 } 1665 } 1666