1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <crypto/aes.h> 5 #include <crypto/algapi.h> 6 #include <crypto/authenc.h> 7 #include <crypto/des.h> 8 #include <crypto/hash.h> 9 #include <crypto/internal/aead.h> 10 #include <crypto/sha1.h> 11 #include <crypto/sha2.h> 12 #include <crypto/skcipher.h> 13 #include <crypto/xts.h> 14 #include <linux/crypto.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/idr.h> 17 18 #include "sec.h" 19 #include "sec_crypto.h" 20 21 #define SEC_PRIORITY 4001 22 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) 23 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) 24 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) 25 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) 26 27 /* SEC sqe(bd) bit operational relative MACRO */ 28 #define SEC_DE_OFFSET 1 29 #define SEC_CIPHER_OFFSET 4 30 #define SEC_SCENE_OFFSET 3 31 #define SEC_DST_SGL_OFFSET 2 32 #define SEC_SRC_SGL_OFFSET 7 33 #define SEC_CKEY_OFFSET 9 34 #define SEC_CMODE_OFFSET 12 35 #define SEC_AKEY_OFFSET 5 36 #define SEC_AEAD_ALG_OFFSET 11 37 #define SEC_AUTH_OFFSET 6 38 39 #define SEC_FLAG_OFFSET 7 40 #define SEC_FLAG_MASK 0x0780 41 #define SEC_TYPE_MASK 0x0F 42 #define SEC_DONE_MASK 0x0001 43 44 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) 45 #define SEC_SGL_SGE_NR 128 46 #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) 47 #define SEC_CIPHER_AUTH 0xfe 48 #define SEC_AUTH_CIPHER 0x1 49 #define SEC_MAX_MAC_LEN 64 50 #define SEC_MAX_AAD_LEN 65535 51 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) 52 53 #define SEC_PBUF_SZ 512 54 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ 55 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) 56 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ 57 SEC_MAX_MAC_LEN * 2) 58 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) 59 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) 60 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ 61 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) 62 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ 63 SEC_PBUF_LEFT_SZ) 64 65 #define SEC_SQE_LEN_RATE 4 66 #define SEC_SQE_CFLAG 2 67 #define SEC_SQE_AEAD_FLAG 3 68 #define SEC_SQE_DONE 0x1 69 70 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 71 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 72 { 73 if (req->c_req.encrypt) 74 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % 75 ctx->hlf_q_num; 76 77 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + 78 ctx->hlf_q_num; 79 } 80 81 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) 82 { 83 if (req->c_req.encrypt) 84 atomic_dec(&ctx->enc_qcyclic); 85 else 86 atomic_dec(&ctx->dec_qcyclic); 87 } 88 89 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 90 { 91 int req_id; 92 93 mutex_lock(&qp_ctx->req_lock); 94 95 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 96 0, QM_Q_DEPTH, GFP_ATOMIC); 97 mutex_unlock(&qp_ctx->req_lock); 98 if (unlikely(req_id < 0)) { 99 dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); 100 return req_id; 101 } 102 103 req->qp_ctx = qp_ctx; 104 qp_ctx->req_list[req_id] = req; 105 106 return req_id; 107 } 108 109 static void sec_free_req_id(struct sec_req *req) 110 { 111 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 112 int req_id = req->req_id; 113 114 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { 115 dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); 116 return; 117 } 118 119 qp_ctx->req_list[req_id] = NULL; 120 req->qp_ctx = NULL; 121 122 mutex_lock(&qp_ctx->req_lock); 123 idr_remove(&qp_ctx->req_idr, req_id); 124 mutex_unlock(&qp_ctx->req_lock); 125 } 126 127 static int sec_aead_verify(struct sec_req *req) 128 { 129 struct aead_request *aead_req = req->aead_req.aead_req; 130 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 131 size_t authsize = crypto_aead_authsize(tfm); 132 u8 *mac_out = req->aead_req.out_mac; 133 u8 *mac = mac_out + SEC_MAX_MAC_LEN; 134 struct scatterlist *sgl = aead_req->src; 135 size_t sz; 136 137 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize, 138 aead_req->cryptlen + aead_req->assoclen - 139 authsize); 140 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { 141 dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); 142 return -EBADMSG; 143 } 144 145 return 0; 146 } 147 148 static void sec_req_cb(struct hisi_qp *qp, void *resp) 149 { 150 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; 151 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; 152 struct sec_sqe *bd = resp; 153 struct sec_ctx *ctx; 154 struct sec_req *req; 155 u16 done, flag; 156 int err = 0; 157 u8 type; 158 159 type = bd->type_cipher_auth & SEC_TYPE_MASK; 160 if (unlikely(type != SEC_BD_TYPE2)) { 161 atomic64_inc(&dfx->err_bd_cnt); 162 pr_err("err bd type [%d]\n", type); 163 return; 164 } 165 166 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; 167 if (unlikely(!req)) { 168 atomic64_inc(&dfx->invalid_req_cnt); 169 atomic_inc(&qp->qp_status.used); 170 return; 171 } 172 req->err_type = bd->type2.error_type; 173 ctx = req->ctx; 174 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; 175 flag = (le16_to_cpu(bd->type2.done_flag) & 176 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 177 if (unlikely(req->err_type || done != SEC_SQE_DONE || 178 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || 179 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { 180 dev_err(SEC_CTX_DEV(ctx), 181 "err_type[%d],done[%d],flag[%d]\n", 182 req->err_type, done, flag); 183 err = -EIO; 184 atomic64_inc(&dfx->done_flag_cnt); 185 } 186 187 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) 188 err = sec_aead_verify(req); 189 190 atomic64_inc(&dfx->recv_cnt); 191 192 ctx->req_op->buf_unmap(ctx, req); 193 194 ctx->req_op->callback(ctx, req, err); 195 } 196 197 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) 198 { 199 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 200 int ret; 201 202 if (ctx->fake_req_limit <= 203 atomic_read(&qp_ctx->qp->qp_status.used) && 204 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) 205 return -EBUSY; 206 207 mutex_lock(&qp_ctx->req_lock); 208 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); 209 210 if (ctx->fake_req_limit <= 211 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { 212 list_add_tail(&req->backlog_head, &qp_ctx->backlog); 213 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 214 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); 215 mutex_unlock(&qp_ctx->req_lock); 216 return -EBUSY; 217 } 218 mutex_unlock(&qp_ctx->req_lock); 219 220 if (unlikely(ret == -EBUSY)) 221 return -ENOBUFS; 222 223 if (likely(!ret)) { 224 ret = -EINPROGRESS; 225 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 226 } 227 228 return ret; 229 } 230 231 /* Get DMA memory resources */ 232 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 233 { 234 int i; 235 236 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, 237 &res->c_ivin_dma, GFP_KERNEL); 238 if (!res->c_ivin) 239 return -ENOMEM; 240 241 for (i = 1; i < QM_Q_DEPTH; i++) { 242 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 243 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 244 } 245 246 return 0; 247 } 248 249 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 250 { 251 if (res->c_ivin) 252 dma_free_coherent(dev, SEC_TOTAL_IV_SZ, 253 res->c_ivin, res->c_ivin_dma); 254 } 255 256 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) 257 { 258 int i; 259 260 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 261 &res->out_mac_dma, GFP_KERNEL); 262 if (!res->out_mac) 263 return -ENOMEM; 264 265 for (i = 1; i < QM_Q_DEPTH; i++) { 266 res[i].out_mac_dma = res->out_mac_dma + 267 i * (SEC_MAX_MAC_LEN << 1); 268 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); 269 } 270 271 return 0; 272 } 273 274 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) 275 { 276 if (res->out_mac) 277 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 278 res->out_mac, res->out_mac_dma); 279 } 280 281 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) 282 { 283 if (res->pbuf) 284 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, 285 res->pbuf, res->pbuf_dma); 286 } 287 288 /* 289 * To improve performance, pbuffer is used for 290 * small packets (< 512Bytes) as IOMMU translation using. 291 */ 292 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) 293 { 294 int pbuf_page_offset; 295 int i, j, k; 296 297 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, 298 &res->pbuf_dma, GFP_KERNEL); 299 if (!res->pbuf) 300 return -ENOMEM; 301 302 /* 303 * SEC_PBUF_PKG contains data pbuf, iv and 304 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC> 305 * Every PAGE contains six SEC_PBUF_PKG 306 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG 307 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE 308 * for the SEC_TOTAL_PBUF_SZ 309 */ 310 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { 311 pbuf_page_offset = PAGE_SIZE * i; 312 for (j = 0; j < SEC_PBUF_NUM; j++) { 313 k = i * SEC_PBUF_NUM + j; 314 if (k == QM_Q_DEPTH) 315 break; 316 res[k].pbuf = res->pbuf + 317 j * SEC_PBUF_PKG + pbuf_page_offset; 318 res[k].pbuf_dma = res->pbuf_dma + 319 j * SEC_PBUF_PKG + pbuf_page_offset; 320 } 321 } 322 323 return 0; 324 } 325 326 static int sec_alg_resource_alloc(struct sec_ctx *ctx, 327 struct sec_qp_ctx *qp_ctx) 328 { 329 struct device *dev = SEC_CTX_DEV(ctx); 330 struct sec_alg_res *res = qp_ctx->res; 331 int ret; 332 333 ret = sec_alloc_civ_resource(dev, res); 334 if (ret) 335 return ret; 336 337 if (ctx->alg_type == SEC_AEAD) { 338 ret = sec_alloc_mac_resource(dev, res); 339 if (ret) 340 goto alloc_fail; 341 } 342 if (ctx->pbuf_supported) { 343 ret = sec_alloc_pbuf_resource(dev, res); 344 if (ret) { 345 dev_err(dev, "fail to alloc pbuf dma resource!\n"); 346 goto alloc_pbuf_fail; 347 } 348 } 349 350 return 0; 351 352 alloc_pbuf_fail: 353 if (ctx->alg_type == SEC_AEAD) 354 sec_free_mac_resource(dev, qp_ctx->res); 355 alloc_fail: 356 sec_free_civ_resource(dev, res); 357 return ret; 358 } 359 360 static void sec_alg_resource_free(struct sec_ctx *ctx, 361 struct sec_qp_ctx *qp_ctx) 362 { 363 struct device *dev = SEC_CTX_DEV(ctx); 364 365 sec_free_civ_resource(dev, qp_ctx->res); 366 367 if (ctx->pbuf_supported) 368 sec_free_pbuf_resource(dev, qp_ctx->res); 369 if (ctx->alg_type == SEC_AEAD) 370 sec_free_mac_resource(dev, qp_ctx->res); 371 } 372 373 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, 374 int qp_ctx_id, int alg_type) 375 { 376 struct device *dev = SEC_CTX_DEV(ctx); 377 struct sec_qp_ctx *qp_ctx; 378 struct hisi_qp *qp; 379 int ret = -ENOMEM; 380 381 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 382 qp = ctx->qps[qp_ctx_id]; 383 qp->req_type = 0; 384 qp->qp_ctx = qp_ctx; 385 qp->req_cb = sec_req_cb; 386 qp_ctx->qp = qp; 387 qp_ctx->ctx = ctx; 388 389 mutex_init(&qp_ctx->req_lock); 390 idr_init(&qp_ctx->req_idr); 391 INIT_LIST_HEAD(&qp_ctx->backlog); 392 393 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 394 SEC_SGL_SGE_NR); 395 if (IS_ERR(qp_ctx->c_in_pool)) { 396 dev_err(dev, "fail to create sgl pool for input!\n"); 397 goto err_destroy_idr; 398 } 399 400 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 401 SEC_SGL_SGE_NR); 402 if (IS_ERR(qp_ctx->c_out_pool)) { 403 dev_err(dev, "fail to create sgl pool for output!\n"); 404 goto err_free_c_in_pool; 405 } 406 407 ret = sec_alg_resource_alloc(ctx, qp_ctx); 408 if (ret) 409 goto err_free_c_out_pool; 410 411 ret = hisi_qm_start_qp(qp, 0); 412 if (ret < 0) 413 goto err_queue_free; 414 415 return 0; 416 417 err_queue_free: 418 sec_alg_resource_free(ctx, qp_ctx); 419 err_free_c_out_pool: 420 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 421 err_free_c_in_pool: 422 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 423 err_destroy_idr: 424 idr_destroy(&qp_ctx->req_idr); 425 return ret; 426 } 427 428 static void sec_release_qp_ctx(struct sec_ctx *ctx, 429 struct sec_qp_ctx *qp_ctx) 430 { 431 struct device *dev = SEC_CTX_DEV(ctx); 432 433 hisi_qm_stop_qp(qp_ctx->qp); 434 sec_alg_resource_free(ctx, qp_ctx); 435 436 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 437 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 438 439 idr_destroy(&qp_ctx->req_idr); 440 } 441 442 static int sec_ctx_base_init(struct sec_ctx *ctx) 443 { 444 struct sec_dev *sec; 445 int i, ret; 446 447 ctx->qps = sec_create_qps(); 448 if (!ctx->qps) { 449 pr_err("Can not create sec qps!\n"); 450 return -ENODEV; 451 } 452 453 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); 454 ctx->sec = sec; 455 ctx->hlf_q_num = sec->ctx_q_num >> 1; 456 457 ctx->pbuf_supported = ctx->sec->iommu_used; 458 459 /* Half of queue depth is taken as fake requests limit in the queue. */ 460 ctx->fake_req_limit = QM_Q_DEPTH >> 1; 461 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 462 GFP_KERNEL); 463 if (!ctx->qp_ctx) { 464 ret = -ENOMEM; 465 goto err_destroy_qps; 466 } 467 468 for (i = 0; i < sec->ctx_q_num; i++) { 469 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); 470 if (ret) 471 goto err_sec_release_qp_ctx; 472 } 473 474 return 0; 475 476 err_sec_release_qp_ctx: 477 for (i = i - 1; i >= 0; i--) 478 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 479 480 kfree(ctx->qp_ctx); 481 err_destroy_qps: 482 sec_destroy_qps(ctx->qps, sec->ctx_q_num); 483 484 return ret; 485 } 486 487 static void sec_ctx_base_uninit(struct sec_ctx *ctx) 488 { 489 int i; 490 491 for (i = 0; i < ctx->sec->ctx_q_num; i++) 492 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 493 494 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); 495 kfree(ctx->qp_ctx); 496 } 497 498 static int sec_cipher_init(struct sec_ctx *ctx) 499 { 500 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 501 502 c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 503 &c_ctx->c_key_dma, GFP_KERNEL); 504 if (!c_ctx->c_key) 505 return -ENOMEM; 506 507 return 0; 508 } 509 510 static void sec_cipher_uninit(struct sec_ctx *ctx) 511 { 512 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 513 514 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); 515 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 516 c_ctx->c_key, c_ctx->c_key_dma); 517 } 518 519 static int sec_auth_init(struct sec_ctx *ctx) 520 { 521 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 522 523 a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 524 &a_ctx->a_key_dma, GFP_KERNEL); 525 if (!a_ctx->a_key) 526 return -ENOMEM; 527 528 return 0; 529 } 530 531 static void sec_auth_uninit(struct sec_ctx *ctx) 532 { 533 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 534 535 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); 536 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 537 a_ctx->a_key, a_ctx->a_key_dma); 538 } 539 540 static int sec_skcipher_init(struct crypto_skcipher *tfm) 541 { 542 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 543 int ret; 544 545 ctx->alg_type = SEC_SKCIPHER; 546 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); 547 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); 548 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 549 dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); 550 return -EINVAL; 551 } 552 553 ret = sec_ctx_base_init(ctx); 554 if (ret) 555 return ret; 556 557 ret = sec_cipher_init(ctx); 558 if (ret) 559 goto err_cipher_init; 560 561 return 0; 562 563 err_cipher_init: 564 sec_ctx_base_uninit(ctx); 565 return ret; 566 } 567 568 static void sec_skcipher_uninit(struct crypto_skcipher *tfm) 569 { 570 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 571 572 sec_cipher_uninit(ctx); 573 sec_ctx_base_uninit(ctx); 574 } 575 576 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, 577 const u32 keylen, 578 const enum sec_cmode c_mode) 579 { 580 switch (keylen) { 581 case SEC_DES3_2KEY_SIZE: 582 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; 583 break; 584 case SEC_DES3_3KEY_SIZE: 585 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; 586 break; 587 default: 588 return -EINVAL; 589 } 590 591 return 0; 592 } 593 594 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, 595 const u32 keylen, 596 const enum sec_cmode c_mode) 597 { 598 if (c_mode == SEC_CMODE_XTS) { 599 switch (keylen) { 600 case SEC_XTS_MIN_KEY_SIZE: 601 c_ctx->c_key_len = SEC_CKEY_128BIT; 602 break; 603 case SEC_XTS_MAX_KEY_SIZE: 604 c_ctx->c_key_len = SEC_CKEY_256BIT; 605 break; 606 default: 607 pr_err("hisi_sec2: xts mode key error!\n"); 608 return -EINVAL; 609 } 610 } else { 611 switch (keylen) { 612 case AES_KEYSIZE_128: 613 c_ctx->c_key_len = SEC_CKEY_128BIT; 614 break; 615 case AES_KEYSIZE_192: 616 c_ctx->c_key_len = SEC_CKEY_192BIT; 617 break; 618 case AES_KEYSIZE_256: 619 c_ctx->c_key_len = SEC_CKEY_256BIT; 620 break; 621 default: 622 pr_err("hisi_sec2: aes key error!\n"); 623 return -EINVAL; 624 } 625 } 626 627 return 0; 628 } 629 630 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 631 const u32 keylen, const enum sec_calg c_alg, 632 const enum sec_cmode c_mode) 633 { 634 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 635 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 636 int ret; 637 638 if (c_mode == SEC_CMODE_XTS) { 639 ret = xts_verify_key(tfm, key, keylen); 640 if (ret) { 641 dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); 642 return ret; 643 } 644 } 645 646 c_ctx->c_alg = c_alg; 647 c_ctx->c_mode = c_mode; 648 649 switch (c_alg) { 650 case SEC_CALG_3DES: 651 ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); 652 break; 653 case SEC_CALG_AES: 654 case SEC_CALG_SM4: 655 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 656 break; 657 default: 658 return -EINVAL; 659 } 660 661 if (ret) { 662 dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); 663 return ret; 664 } 665 666 memcpy(c_ctx->c_key, key, keylen); 667 668 return 0; 669 } 670 671 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ 672 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ 673 u32 keylen) \ 674 { \ 675 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ 676 } 677 678 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) 679 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) 680 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) 681 682 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) 683 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) 684 685 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) 686 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) 687 688 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, 689 struct scatterlist *src) 690 { 691 struct aead_request *aead_req = req->aead_req.aead_req; 692 struct sec_cipher_req *c_req = &req->c_req; 693 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 694 struct device *dev = SEC_CTX_DEV(ctx); 695 int copy_size, pbuf_length; 696 int req_id = req->req_id; 697 698 if (ctx->alg_type == SEC_AEAD) 699 copy_size = aead_req->cryptlen + aead_req->assoclen; 700 else 701 copy_size = c_req->c_len; 702 703 pbuf_length = sg_copy_to_buffer(src, sg_nents(src), 704 qp_ctx->res[req_id].pbuf, 705 copy_size); 706 707 if (unlikely(pbuf_length != copy_size)) { 708 dev_err(dev, "copy src data to pbuf error!\n"); 709 return -EINVAL; 710 } 711 712 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; 713 714 if (!c_req->c_in_dma) { 715 dev_err(dev, "fail to set pbuffer address!\n"); 716 return -ENOMEM; 717 } 718 719 c_req->c_out_dma = c_req->c_in_dma; 720 721 return 0; 722 } 723 724 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, 725 struct scatterlist *dst) 726 { 727 struct aead_request *aead_req = req->aead_req.aead_req; 728 struct sec_cipher_req *c_req = &req->c_req; 729 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 730 struct device *dev = SEC_CTX_DEV(ctx); 731 int copy_size, pbuf_length; 732 int req_id = req->req_id; 733 734 if (ctx->alg_type == SEC_AEAD) 735 copy_size = c_req->c_len + aead_req->assoclen; 736 else 737 copy_size = c_req->c_len; 738 739 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), 740 qp_ctx->res[req_id].pbuf, 741 copy_size); 742 743 if (unlikely(pbuf_length != copy_size)) 744 dev_err(dev, "copy pbuf data to dst error!\n"); 745 } 746 747 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, 748 struct scatterlist *src, struct scatterlist *dst) 749 { 750 struct sec_cipher_req *c_req = &req->c_req; 751 struct sec_aead_req *a_req = &req->aead_req; 752 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 753 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; 754 struct device *dev = SEC_CTX_DEV(ctx); 755 int ret; 756 757 if (req->use_pbuf) { 758 ret = sec_cipher_pbuf_map(ctx, req, src); 759 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; 760 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; 761 if (ctx->alg_type == SEC_AEAD) { 762 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; 763 a_req->out_mac_dma = res->pbuf_dma + 764 SEC_PBUF_MAC_OFFSET; 765 } 766 767 return ret; 768 } 769 c_req->c_ivin = res->c_ivin; 770 c_req->c_ivin_dma = res->c_ivin_dma; 771 if (ctx->alg_type == SEC_AEAD) { 772 a_req->out_mac = res->out_mac; 773 a_req->out_mac_dma = res->out_mac_dma; 774 } 775 776 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, 777 qp_ctx->c_in_pool, 778 req->req_id, 779 &c_req->c_in_dma); 780 781 if (IS_ERR(c_req->c_in)) { 782 dev_err(dev, "fail to dma map input sgl buffers!\n"); 783 return PTR_ERR(c_req->c_in); 784 } 785 786 if (dst == src) { 787 c_req->c_out = c_req->c_in; 788 c_req->c_out_dma = c_req->c_in_dma; 789 } else { 790 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, 791 qp_ctx->c_out_pool, 792 req->req_id, 793 &c_req->c_out_dma); 794 795 if (IS_ERR(c_req->c_out)) { 796 dev_err(dev, "fail to dma map output sgl buffers!\n"); 797 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 798 return PTR_ERR(c_req->c_out); 799 } 800 } 801 802 return 0; 803 } 804 805 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, 806 struct scatterlist *src, struct scatterlist *dst) 807 { 808 struct sec_cipher_req *c_req = &req->c_req; 809 struct device *dev = SEC_CTX_DEV(ctx); 810 811 if (req->use_pbuf) { 812 sec_cipher_pbuf_unmap(ctx, req, dst); 813 } else { 814 if (dst != src) 815 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 816 817 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); 818 } 819 } 820 821 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 822 { 823 struct skcipher_request *sq = req->c_req.sk_req; 824 825 return sec_cipher_map(ctx, req, sq->src, sq->dst); 826 } 827 828 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 829 { 830 struct skcipher_request *sq = req->c_req.sk_req; 831 832 sec_cipher_unmap(ctx, req, sq->src, sq->dst); 833 } 834 835 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, 836 struct crypto_authenc_keys *keys) 837 { 838 switch (keys->enckeylen) { 839 case AES_KEYSIZE_128: 840 c_ctx->c_key_len = SEC_CKEY_128BIT; 841 break; 842 case AES_KEYSIZE_192: 843 c_ctx->c_key_len = SEC_CKEY_192BIT; 844 break; 845 case AES_KEYSIZE_256: 846 c_ctx->c_key_len = SEC_CKEY_256BIT; 847 break; 848 default: 849 pr_err("hisi_sec2: aead aes key error!\n"); 850 return -EINVAL; 851 } 852 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); 853 854 return 0; 855 } 856 857 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, 858 struct crypto_authenc_keys *keys) 859 { 860 struct crypto_shash *hash_tfm = ctx->hash_tfm; 861 int blocksize, digestsize, ret; 862 863 if (!keys->authkeylen) { 864 pr_err("hisi_sec2: aead auth key error!\n"); 865 return -EINVAL; 866 } 867 868 blocksize = crypto_shash_blocksize(hash_tfm); 869 digestsize = crypto_shash_digestsize(hash_tfm); 870 if (keys->authkeylen > blocksize) { 871 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, 872 keys->authkeylen, ctx->a_key); 873 if (ret) { 874 pr_err("hisi_sec2: aead auth digest error!\n"); 875 return -EINVAL; 876 } 877 ctx->a_key_len = digestsize; 878 } else { 879 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); 880 ctx->a_key_len = keys->authkeylen; 881 } 882 883 return 0; 884 } 885 886 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, 887 const u32 keylen, const enum sec_hash_alg a_alg, 888 const enum sec_calg c_alg, 889 const enum sec_mac_len mac_len, 890 const enum sec_cmode c_mode) 891 { 892 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 893 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 894 struct crypto_authenc_keys keys; 895 int ret; 896 897 ctx->a_ctx.a_alg = a_alg; 898 ctx->c_ctx.c_alg = c_alg; 899 ctx->a_ctx.mac_len = mac_len; 900 c_ctx->c_mode = c_mode; 901 902 if (crypto_authenc_extractkeys(&keys, key, keylen)) 903 goto bad_key; 904 905 ret = sec_aead_aes_set_key(c_ctx, &keys); 906 if (ret) { 907 dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); 908 goto bad_key; 909 } 910 911 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); 912 if (ret) { 913 dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); 914 goto bad_key; 915 } 916 917 return 0; 918 919 bad_key: 920 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); 921 return -EINVAL; 922 } 923 924 925 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ 926 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ 927 u32 keylen) \ 928 { \ 929 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ 930 } 931 932 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, 933 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) 934 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, 935 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) 936 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, 937 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) 938 939 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 940 { 941 struct aead_request *aq = req->aead_req.aead_req; 942 943 return sec_cipher_map(ctx, req, aq->src, aq->dst); 944 } 945 946 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 947 { 948 struct aead_request *aq = req->aead_req.aead_req; 949 950 sec_cipher_unmap(ctx, req, aq->src, aq->dst); 951 } 952 953 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) 954 { 955 int ret; 956 957 ret = ctx->req_op->buf_map(ctx, req); 958 if (unlikely(ret)) 959 return ret; 960 961 ctx->req_op->do_transfer(ctx, req); 962 963 ret = ctx->req_op->bd_fill(ctx, req); 964 if (unlikely(ret)) 965 goto unmap_req_buf; 966 967 return ret; 968 969 unmap_req_buf: 970 ctx->req_op->buf_unmap(ctx, req); 971 return ret; 972 } 973 974 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) 975 { 976 ctx->req_op->buf_unmap(ctx, req); 977 } 978 979 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 980 { 981 struct skcipher_request *sk_req = req->c_req.sk_req; 982 struct sec_cipher_req *c_req = &req->c_req; 983 984 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); 985 } 986 987 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 988 { 989 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 990 struct sec_cipher_req *c_req = &req->c_req; 991 struct sec_sqe *sec_sqe = &req->sec_sqe; 992 u8 scene, sa_type, da_type; 993 u8 bd_type, cipher; 994 u8 de = 0; 995 996 memset(sec_sqe, 0, sizeof(struct sec_sqe)); 997 998 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 999 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 1000 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); 1001 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); 1002 1003 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << 1004 SEC_CMODE_OFFSET); 1005 sec_sqe->type2.c_alg = c_ctx->c_alg; 1006 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << 1007 SEC_CKEY_OFFSET); 1008 1009 bd_type = SEC_BD_TYPE2; 1010 if (c_req->encrypt) 1011 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; 1012 else 1013 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; 1014 sec_sqe->type_cipher_auth = bd_type | cipher; 1015 1016 if (req->use_pbuf) 1017 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; 1018 else 1019 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; 1020 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; 1021 if (c_req->c_in_dma != c_req->c_out_dma) 1022 de = 0x1 << SEC_DE_OFFSET; 1023 1024 sec_sqe->sds_sa_type = (de | scene | sa_type); 1025 1026 /* Just set DST address type */ 1027 if (req->use_pbuf) 1028 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; 1029 else 1030 da_type = SEC_SGL << SEC_DST_SGL_OFFSET; 1031 sec_sqe->sdm_addr_type |= da_type; 1032 1033 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); 1034 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); 1035 1036 return 0; 1037 } 1038 1039 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) 1040 { 1041 struct aead_request *aead_req = req->aead_req.aead_req; 1042 struct skcipher_request *sk_req = req->c_req.sk_req; 1043 u32 iv_size = req->ctx->c_ctx.ivsize; 1044 struct scatterlist *sgl; 1045 unsigned int cryptlen; 1046 size_t sz; 1047 u8 *iv; 1048 1049 if (req->c_req.encrypt) 1050 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; 1051 else 1052 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; 1053 1054 if (alg_type == SEC_SKCIPHER) { 1055 iv = sk_req->iv; 1056 cryptlen = sk_req->cryptlen; 1057 } else { 1058 iv = aead_req->iv; 1059 cryptlen = aead_req->cryptlen; 1060 } 1061 1062 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, 1063 cryptlen - iv_size); 1064 if (unlikely(sz != iv_size)) 1065 dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); 1066 } 1067 1068 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, 1069 struct sec_qp_ctx *qp_ctx) 1070 { 1071 struct sec_req *backlog_req = NULL; 1072 1073 mutex_lock(&qp_ctx->req_lock); 1074 if (ctx->fake_req_limit >= 1075 atomic_read(&qp_ctx->qp->qp_status.used) && 1076 !list_empty(&qp_ctx->backlog)) { 1077 backlog_req = list_first_entry(&qp_ctx->backlog, 1078 typeof(*backlog_req), backlog_head); 1079 list_del(&backlog_req->backlog_head); 1080 } 1081 mutex_unlock(&qp_ctx->req_lock); 1082 1083 return backlog_req; 1084 } 1085 1086 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, 1087 int err) 1088 { 1089 struct skcipher_request *sk_req = req->c_req.sk_req; 1090 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1091 struct skcipher_request *backlog_sk_req; 1092 struct sec_req *backlog_req; 1093 1094 sec_free_req_id(req); 1095 1096 /* IV output at encrypto of CBC mode */ 1097 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) 1098 sec_update_iv(req, SEC_SKCIPHER); 1099 1100 while (1) { 1101 backlog_req = sec_back_req_clear(ctx, qp_ctx); 1102 if (!backlog_req) 1103 break; 1104 1105 backlog_sk_req = backlog_req->c_req.sk_req; 1106 backlog_sk_req->base.complete(&backlog_sk_req->base, 1107 -EINPROGRESS); 1108 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); 1109 } 1110 1111 sk_req->base.complete(&sk_req->base, err); 1112 } 1113 1114 static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 1115 { 1116 struct aead_request *aead_req = req->aead_req.aead_req; 1117 struct sec_cipher_req *c_req = &req->c_req; 1118 1119 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); 1120 } 1121 1122 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, 1123 struct sec_req *req, struct sec_sqe *sec_sqe) 1124 { 1125 struct sec_aead_req *a_req = &req->aead_req; 1126 struct sec_cipher_req *c_req = &req->c_req; 1127 struct aead_request *aq = a_req->aead_req; 1128 1129 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); 1130 1131 sec_sqe->type2.mac_key_alg = 1132 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); 1133 1134 sec_sqe->type2.mac_key_alg |= 1135 cpu_to_le32((u32)((ctx->a_key_len) / 1136 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); 1137 1138 sec_sqe->type2.mac_key_alg |= 1139 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); 1140 1141 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; 1142 1143 if (dir) 1144 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1145 else 1146 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1147 1148 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); 1149 1150 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1151 1152 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1153 } 1154 1155 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1156 { 1157 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1158 struct sec_sqe *sec_sqe = &req->sec_sqe; 1159 int ret; 1160 1161 ret = sec_skcipher_bd_fill(ctx, req); 1162 if (unlikely(ret)) { 1163 dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); 1164 return ret; 1165 } 1166 1167 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1168 1169 return 0; 1170 } 1171 1172 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) 1173 { 1174 struct aead_request *a_req = req->aead_req.aead_req; 1175 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1176 struct sec_aead_req *aead_req = &req->aead_req; 1177 struct sec_cipher_req *c_req = &req->c_req; 1178 size_t authsize = crypto_aead_authsize(tfm); 1179 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1180 struct aead_request *backlog_aead_req; 1181 struct sec_req *backlog_req; 1182 size_t sz; 1183 1184 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) 1185 sec_update_iv(req, SEC_AEAD); 1186 1187 /* Copy output mac */ 1188 if (!err && c_req->encrypt) { 1189 struct scatterlist *sgl = a_req->dst; 1190 1191 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), 1192 aead_req->out_mac, 1193 authsize, a_req->cryptlen + 1194 a_req->assoclen); 1195 1196 if (unlikely(sz != authsize)) { 1197 dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); 1198 err = -EINVAL; 1199 } 1200 } 1201 1202 sec_free_req_id(req); 1203 1204 while (1) { 1205 backlog_req = sec_back_req_clear(c, qp_ctx); 1206 if (!backlog_req) 1207 break; 1208 1209 backlog_aead_req = backlog_req->aead_req.aead_req; 1210 backlog_aead_req->base.complete(&backlog_aead_req->base, 1211 -EINPROGRESS); 1212 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); 1213 } 1214 1215 a_req->base.complete(&a_req->base, err); 1216 } 1217 1218 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) 1219 { 1220 sec_free_req_id(req); 1221 sec_free_queue_id(ctx, req); 1222 } 1223 1224 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) 1225 { 1226 struct sec_qp_ctx *qp_ctx; 1227 int queue_id; 1228 1229 /* To load balance */ 1230 queue_id = sec_alloc_queue_id(ctx, req); 1231 qp_ctx = &ctx->qp_ctx[queue_id]; 1232 1233 req->req_id = sec_alloc_req_id(req, qp_ctx); 1234 if (unlikely(req->req_id < 0)) { 1235 sec_free_queue_id(ctx, req); 1236 return req->req_id; 1237 } 1238 1239 return 0; 1240 } 1241 1242 static int sec_process(struct sec_ctx *ctx, struct sec_req *req) 1243 { 1244 struct sec_cipher_req *c_req = &req->c_req; 1245 int ret; 1246 1247 ret = sec_request_init(ctx, req); 1248 if (unlikely(ret)) 1249 return ret; 1250 1251 ret = sec_request_transfer(ctx, req); 1252 if (unlikely(ret)) 1253 goto err_uninit_req; 1254 1255 /* Output IV as decrypto */ 1256 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) 1257 sec_update_iv(req, ctx->alg_type); 1258 1259 ret = ctx->req_op->bd_send(ctx, req); 1260 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || 1261 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1262 dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); 1263 goto err_send_req; 1264 } 1265 1266 return ret; 1267 1268 err_send_req: 1269 /* As failing, restore the IV from user */ 1270 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { 1271 if (ctx->alg_type == SEC_SKCIPHER) 1272 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, 1273 ctx->c_ctx.ivsize); 1274 else 1275 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, 1276 ctx->c_ctx.ivsize); 1277 } 1278 1279 sec_request_untransfer(ctx, req); 1280 err_uninit_req: 1281 sec_request_uninit(ctx, req); 1282 return ret; 1283 } 1284 1285 static const struct sec_req_op sec_skcipher_req_ops = { 1286 .buf_map = sec_skcipher_sgl_map, 1287 .buf_unmap = sec_skcipher_sgl_unmap, 1288 .do_transfer = sec_skcipher_copy_iv, 1289 .bd_fill = sec_skcipher_bd_fill, 1290 .bd_send = sec_bd_send, 1291 .callback = sec_skcipher_callback, 1292 .process = sec_process, 1293 }; 1294 1295 static const struct sec_req_op sec_aead_req_ops = { 1296 .buf_map = sec_aead_sgl_map, 1297 .buf_unmap = sec_aead_sgl_unmap, 1298 .do_transfer = sec_aead_copy_iv, 1299 .bd_fill = sec_aead_bd_fill, 1300 .bd_send = sec_bd_send, 1301 .callback = sec_aead_callback, 1302 .process = sec_process, 1303 }; 1304 1305 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) 1306 { 1307 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1308 1309 ctx->req_op = &sec_skcipher_req_ops; 1310 1311 return sec_skcipher_init(tfm); 1312 } 1313 1314 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) 1315 { 1316 sec_skcipher_uninit(tfm); 1317 } 1318 1319 static int sec_aead_init(struct crypto_aead *tfm) 1320 { 1321 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1322 int ret; 1323 1324 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); 1325 ctx->alg_type = SEC_AEAD; 1326 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); 1327 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 1328 dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); 1329 return -EINVAL; 1330 } 1331 1332 ctx->req_op = &sec_aead_req_ops; 1333 ret = sec_ctx_base_init(ctx); 1334 if (ret) 1335 return ret; 1336 1337 ret = sec_auth_init(ctx); 1338 if (ret) 1339 goto err_auth_init; 1340 1341 ret = sec_cipher_init(ctx); 1342 if (ret) 1343 goto err_cipher_init; 1344 1345 return ret; 1346 1347 err_cipher_init: 1348 sec_auth_uninit(ctx); 1349 err_auth_init: 1350 sec_ctx_base_uninit(ctx); 1351 return ret; 1352 } 1353 1354 static void sec_aead_exit(struct crypto_aead *tfm) 1355 { 1356 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1357 1358 sec_cipher_uninit(ctx); 1359 sec_auth_uninit(ctx); 1360 sec_ctx_base_uninit(ctx); 1361 } 1362 1363 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) 1364 { 1365 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1366 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1367 int ret; 1368 1369 ret = sec_aead_init(tfm); 1370 if (ret) { 1371 pr_err("hisi_sec2: aead init error!\n"); 1372 return ret; 1373 } 1374 1375 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 1376 if (IS_ERR(auth_ctx->hash_tfm)) { 1377 dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); 1378 sec_aead_exit(tfm); 1379 return PTR_ERR(auth_ctx->hash_tfm); 1380 } 1381 1382 return 0; 1383 } 1384 1385 static void sec_aead_ctx_exit(struct crypto_aead *tfm) 1386 { 1387 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1388 1389 crypto_free_shash(ctx->a_ctx.hash_tfm); 1390 sec_aead_exit(tfm); 1391 } 1392 1393 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) 1394 { 1395 return sec_aead_ctx_init(tfm, "sha1"); 1396 } 1397 1398 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) 1399 { 1400 return sec_aead_ctx_init(tfm, "sha256"); 1401 } 1402 1403 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) 1404 { 1405 return sec_aead_ctx_init(tfm, "sha512"); 1406 } 1407 1408 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1409 { 1410 struct skcipher_request *sk_req = sreq->c_req.sk_req; 1411 struct device *dev = SEC_CTX_DEV(ctx); 1412 u8 c_alg = ctx->c_ctx.c_alg; 1413 1414 if (unlikely(!sk_req->src || !sk_req->dst)) { 1415 dev_err(dev, "skcipher input param error!\n"); 1416 return -EINVAL; 1417 } 1418 sreq->c_req.c_len = sk_req->cryptlen; 1419 1420 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) 1421 sreq->use_pbuf = true; 1422 else 1423 sreq->use_pbuf = false; 1424 1425 if (c_alg == SEC_CALG_3DES) { 1426 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { 1427 dev_err(dev, "skcipher 3des input length error!\n"); 1428 return -EINVAL; 1429 } 1430 return 0; 1431 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { 1432 if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { 1433 dev_err(dev, "skcipher aes input length error!\n"); 1434 return -EINVAL; 1435 } 1436 return 0; 1437 } 1438 dev_err(dev, "skcipher algorithm error!\n"); 1439 1440 return -EINVAL; 1441 } 1442 1443 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) 1444 { 1445 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); 1446 struct sec_req *req = skcipher_request_ctx(sk_req); 1447 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1448 int ret; 1449 1450 if (!sk_req->cryptlen) 1451 return 0; 1452 1453 req->flag = sk_req->base.flags; 1454 req->c_req.sk_req = sk_req; 1455 req->c_req.encrypt = encrypt; 1456 req->ctx = ctx; 1457 1458 ret = sec_skcipher_param_check(ctx, req); 1459 if (unlikely(ret)) 1460 return -EINVAL; 1461 1462 return ctx->req_op->process(ctx, req); 1463 } 1464 1465 static int sec_skcipher_encrypt(struct skcipher_request *sk_req) 1466 { 1467 return sec_skcipher_crypto(sk_req, true); 1468 } 1469 1470 static int sec_skcipher_decrypt(struct skcipher_request *sk_req) 1471 { 1472 return sec_skcipher_crypto(sk_req, false); 1473 } 1474 1475 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ 1476 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ 1477 {\ 1478 .base = {\ 1479 .cra_name = sec_cra_name,\ 1480 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1481 .cra_priority = SEC_PRIORITY,\ 1482 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ 1483 .cra_blocksize = blk_size,\ 1484 .cra_ctxsize = sizeof(struct sec_ctx),\ 1485 .cra_module = THIS_MODULE,\ 1486 },\ 1487 .init = ctx_init,\ 1488 .exit = ctx_exit,\ 1489 .setkey = sec_set_key,\ 1490 .decrypt = sec_skcipher_decrypt,\ 1491 .encrypt = sec_skcipher_encrypt,\ 1492 .min_keysize = sec_min_key_size,\ 1493 .max_keysize = sec_max_key_size,\ 1494 .ivsize = iv_size,\ 1495 }, 1496 1497 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ 1498 max_key_size, blk_size, iv_size) \ 1499 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ 1500 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) 1501 1502 static struct skcipher_alg sec_skciphers[] = { 1503 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, 1504 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1505 AES_BLOCK_SIZE, 0) 1506 1507 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, 1508 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1509 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1510 1511 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, 1512 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, 1513 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1514 1515 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, 1516 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 1517 DES3_EDE_BLOCK_SIZE, 0) 1518 1519 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, 1520 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 1521 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) 1522 1523 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, 1524 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, 1525 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1526 1527 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, 1528 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 1529 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1530 }; 1531 1532 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1533 { 1534 u8 c_alg = ctx->c_ctx.c_alg; 1535 struct aead_request *req = sreq->aead_req.aead_req; 1536 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1537 size_t authsize = crypto_aead_authsize(tfm); 1538 1539 if (unlikely(!req->src || !req->dst || !req->cryptlen || 1540 req->assoclen > SEC_MAX_AAD_LEN)) { 1541 dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); 1542 return -EINVAL; 1543 } 1544 1545 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= 1546 SEC_PBUF_SZ) 1547 sreq->use_pbuf = true; 1548 else 1549 sreq->use_pbuf = false; 1550 1551 /* Support AES only */ 1552 if (unlikely(c_alg != SEC_CALG_AES)) { 1553 dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); 1554 return -EINVAL; 1555 } 1556 if (sreq->c_req.encrypt) 1557 sreq->c_req.c_len = req->cryptlen; 1558 else 1559 sreq->c_req.c_len = req->cryptlen - authsize; 1560 1561 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { 1562 dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); 1563 return -EINVAL; 1564 } 1565 1566 return 0; 1567 } 1568 1569 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) 1570 { 1571 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1572 struct sec_req *req = aead_request_ctx(a_req); 1573 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1574 int ret; 1575 1576 req->flag = a_req->base.flags; 1577 req->aead_req.aead_req = a_req; 1578 req->c_req.encrypt = encrypt; 1579 req->ctx = ctx; 1580 1581 ret = sec_aead_param_check(ctx, req); 1582 if (unlikely(ret)) 1583 return -EINVAL; 1584 1585 return ctx->req_op->process(ctx, req); 1586 } 1587 1588 static int sec_aead_encrypt(struct aead_request *a_req) 1589 { 1590 return sec_aead_crypto(a_req, true); 1591 } 1592 1593 static int sec_aead_decrypt(struct aead_request *a_req) 1594 { 1595 return sec_aead_crypto(a_req, false); 1596 } 1597 1598 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\ 1599 ctx_exit, blk_size, iv_size, max_authsize)\ 1600 {\ 1601 .base = {\ 1602 .cra_name = sec_cra_name,\ 1603 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1604 .cra_priority = SEC_PRIORITY,\ 1605 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ 1606 .cra_blocksize = blk_size,\ 1607 .cra_ctxsize = sizeof(struct sec_ctx),\ 1608 .cra_module = THIS_MODULE,\ 1609 },\ 1610 .init = ctx_init,\ 1611 .exit = ctx_exit,\ 1612 .setkey = sec_set_key,\ 1613 .decrypt = sec_aead_decrypt,\ 1614 .encrypt = sec_aead_encrypt,\ 1615 .ivsize = iv_size,\ 1616 .maxauthsize = max_authsize,\ 1617 } 1618 1619 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\ 1620 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\ 1621 sec_aead_ctx_exit, blksize, ivsize, authsize) 1622 1623 static struct aead_alg sec_aeads[] = { 1624 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", 1625 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, 1626 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 1627 1628 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", 1629 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, 1630 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 1631 1632 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", 1633 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, 1634 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 1635 }; 1636 1637 int sec_register_to_crypto(void) 1638 { 1639 int ret; 1640 1641 /* To avoid repeat register */ 1642 ret = crypto_register_skciphers(sec_skciphers, 1643 ARRAY_SIZE(sec_skciphers)); 1644 if (ret) 1645 return ret; 1646 1647 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1648 if (ret) 1649 crypto_unregister_skciphers(sec_skciphers, 1650 ARRAY_SIZE(sec_skciphers)); 1651 return ret; 1652 } 1653 1654 void sec_unregister_from_crypto(void) 1655 { 1656 crypto_unregister_skciphers(sec_skciphers, 1657 ARRAY_SIZE(sec_skciphers)); 1658 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1659 } 1660