1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <crypto/aes.h> 5 #include <crypto/aead.h> 6 #include <crypto/algapi.h> 7 #include <crypto/authenc.h> 8 #include <crypto/des.h> 9 #include <crypto/hash.h> 10 #include <crypto/internal/aead.h> 11 #include <crypto/internal/des.h> 12 #include <crypto/sha1.h> 13 #include <crypto/sha2.h> 14 #include <crypto/skcipher.h> 15 #include <crypto/xts.h> 16 #include <linux/crypto.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/idr.h> 19 20 #include "sec.h" 21 #include "sec_crypto.h" 22 23 #define SEC_PRIORITY 4001 24 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) 25 #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE) 26 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) 27 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) 28 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) 29 30 /* SEC sqe(bd) bit operational relative MACRO */ 31 #define SEC_DE_OFFSET 1 32 #define SEC_CIPHER_OFFSET 4 33 #define SEC_SCENE_OFFSET 3 34 #define SEC_DST_SGL_OFFSET 2 35 #define SEC_SRC_SGL_OFFSET 7 36 #define SEC_CKEY_OFFSET 9 37 #define SEC_CMODE_OFFSET 12 38 #define SEC_AKEY_OFFSET 5 39 #define SEC_AEAD_ALG_OFFSET 11 40 #define SEC_AUTH_OFFSET 6 41 42 #define SEC_DE_OFFSET_V3 9 43 #define SEC_SCENE_OFFSET_V3 5 44 #define SEC_CKEY_OFFSET_V3 13 45 #define SEC_CTR_CNT_OFFSET 25 46 #define SEC_CTR_CNT_ROLLOVER 2 47 #define SEC_SRC_SGL_OFFSET_V3 11 48 #define SEC_DST_SGL_OFFSET_V3 14 49 #define SEC_CALG_OFFSET_V3 4 50 #define SEC_AKEY_OFFSET_V3 9 51 #define SEC_MAC_OFFSET_V3 4 52 #define SEC_AUTH_ALG_OFFSET_V3 15 53 #define SEC_CIPHER_AUTH_V3 0xbf 54 #define SEC_AUTH_CIPHER_V3 0x40 55 #define SEC_FLAG_OFFSET 7 56 #define SEC_FLAG_MASK 0x0780 57 #define SEC_TYPE_MASK 0x0F 58 #define SEC_DONE_MASK 0x0001 59 #define SEC_ICV_MASK 0x000E 60 #define SEC_SQE_LEN_RATE_MASK 0x3 61 62 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) 63 #define SEC_SGL_SGE_NR 128 64 #define SEC_CIPHER_AUTH 0xfe 65 #define SEC_AUTH_CIPHER 0x1 66 #define SEC_MAX_MAC_LEN 64 67 #define SEC_MAX_AAD_LEN 65535 68 #define SEC_MAX_CCM_AAD_LEN 65279 69 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) 70 71 #define SEC_PBUF_SZ 512 72 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ 73 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) 74 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ 75 SEC_MAX_MAC_LEN * 2) 76 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) 77 #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) 78 #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ 79 SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM)) 80 #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ 81 SEC_PBUF_LEFT_SZ(depth)) 82 83 #define SEC_SQE_LEN_RATE 4 84 #define SEC_SQE_CFLAG 2 85 #define SEC_SQE_AEAD_FLAG 3 86 #define SEC_SQE_DONE 0x1 87 #define SEC_ICV_ERR 0x2 88 #define MIN_MAC_LEN 4 89 #define MAC_LEN_MASK 0x1U 90 #define MAX_INPUT_DATA_LEN 0xFFFE00 91 #define BITS_MASK 0xFF 92 #define BYTE_BITS 0x8 93 #define SEC_XTS_NAME_SZ 0x3 94 #define IV_CM_CAL_NUM 2 95 #define IV_CL_MASK 0x7 96 #define IV_CL_MIN 2 97 #define IV_CL_MID 4 98 #define IV_CL_MAX 8 99 #define IV_FLAGS_OFFSET 0x6 100 #define IV_CM_OFFSET 0x3 101 #define IV_LAST_BYTE1 1 102 #define IV_LAST_BYTE2 2 103 #define IV_LAST_BYTE_MASK 0xFF 104 #define IV_CTR_INIT 0x1 105 #define IV_BYTE_OFFSET 0x8 106 107 struct sec_skcipher { 108 u64 alg_msk; 109 struct skcipher_alg alg; 110 }; 111 112 struct sec_aead { 113 u64 alg_msk; 114 struct aead_alg alg; 115 }; 116 117 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 118 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 119 { 120 if (req->c_req.encrypt) 121 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % 122 ctx->hlf_q_num; 123 124 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + 125 ctx->hlf_q_num; 126 } 127 128 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) 129 { 130 if (req->c_req.encrypt) 131 atomic_dec(&ctx->enc_qcyclic); 132 else 133 atomic_dec(&ctx->dec_qcyclic); 134 } 135 136 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 137 { 138 int req_id; 139 140 spin_lock_bh(&qp_ctx->req_lock); 141 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); 142 spin_unlock_bh(&qp_ctx->req_lock); 143 if (unlikely(req_id < 0)) { 144 dev_err(req->ctx->dev, "alloc req id fail!\n"); 145 return req_id; 146 } 147 148 req->qp_ctx = qp_ctx; 149 qp_ctx->req_list[req_id] = req; 150 151 return req_id; 152 } 153 154 static void sec_free_req_id(struct sec_req *req) 155 { 156 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 157 int req_id = req->req_id; 158 159 if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { 160 dev_err(req->ctx->dev, "free request id invalid!\n"); 161 return; 162 } 163 164 qp_ctx->req_list[req_id] = NULL; 165 req->qp_ctx = NULL; 166 167 spin_lock_bh(&qp_ctx->req_lock); 168 idr_remove(&qp_ctx->req_idr, req_id); 169 spin_unlock_bh(&qp_ctx->req_lock); 170 } 171 172 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) 173 { 174 struct sec_sqe *bd = resp; 175 176 status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; 177 status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1; 178 status->flag = (le16_to_cpu(bd->type2.done_flag) & 179 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 180 status->tag = le16_to_cpu(bd->type2.tag); 181 status->err_type = bd->type2.error_type; 182 183 return bd->type_cipher_auth & SEC_TYPE_MASK; 184 } 185 186 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp) 187 { 188 struct sec_sqe3 *bd3 = resp; 189 190 status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK; 191 status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1; 192 status->flag = (le16_to_cpu(bd3->done_flag) & 193 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 194 status->tag = le64_to_cpu(bd3->tag); 195 status->err_type = bd3->error_type; 196 197 return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; 198 } 199 200 static int sec_cb_status_check(struct sec_req *req, 201 struct bd_status *status) 202 { 203 struct sec_ctx *ctx = req->ctx; 204 205 if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) { 206 dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n", 207 req->err_type, status->done); 208 return -EIO; 209 } 210 211 if (unlikely(ctx->alg_type == SEC_SKCIPHER)) { 212 if (unlikely(status->flag != SEC_SQE_CFLAG)) { 213 dev_err_ratelimited(ctx->dev, "flag[%u]\n", 214 status->flag); 215 return -EIO; 216 } 217 } else if (unlikely(ctx->alg_type == SEC_AEAD)) { 218 if (unlikely(status->flag != SEC_SQE_AEAD_FLAG || 219 status->icv == SEC_ICV_ERR)) { 220 dev_err_ratelimited(ctx->dev, 221 "flag[%u], icv[%u]\n", 222 status->flag, status->icv); 223 return -EBADMSG; 224 } 225 } 226 227 return 0; 228 } 229 230 static void sec_req_cb(struct hisi_qp *qp, void *resp) 231 { 232 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; 233 struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; 234 u8 type_supported = qp_ctx->ctx->type_supported; 235 struct bd_status status; 236 struct sec_ctx *ctx; 237 struct sec_req *req; 238 int err; 239 u8 type; 240 241 if (type_supported == SEC_BD_TYPE2) { 242 type = pre_parse_finished_bd(&status, resp); 243 req = qp_ctx->req_list[status.tag]; 244 } else { 245 type = pre_parse_finished_bd3(&status, resp); 246 req = (void *)(uintptr_t)status.tag; 247 } 248 249 if (unlikely(type != type_supported)) { 250 atomic64_inc(&dfx->err_bd_cnt); 251 pr_err("err bd type [%u]\n", type); 252 return; 253 } 254 255 if (unlikely(!req)) { 256 atomic64_inc(&dfx->invalid_req_cnt); 257 atomic_inc(&qp->qp_status.used); 258 return; 259 } 260 261 req->err_type = status.err_type; 262 ctx = req->ctx; 263 err = sec_cb_status_check(req, &status); 264 if (err) 265 atomic64_inc(&dfx->done_flag_cnt); 266 267 atomic64_inc(&dfx->recv_cnt); 268 269 ctx->req_op->buf_unmap(ctx, req); 270 271 ctx->req_op->callback(ctx, req, err); 272 } 273 274 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) 275 { 276 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 277 int ret; 278 279 if (ctx->fake_req_limit <= 280 atomic_read(&qp_ctx->qp->qp_status.used) && 281 !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) 282 return -EBUSY; 283 284 spin_lock_bh(&qp_ctx->req_lock); 285 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); 286 if (ctx->fake_req_limit <= 287 atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { 288 list_add_tail(&req->backlog_head, &qp_ctx->backlog); 289 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 290 atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); 291 spin_unlock_bh(&qp_ctx->req_lock); 292 return -EBUSY; 293 } 294 spin_unlock_bh(&qp_ctx->req_lock); 295 296 if (unlikely(ret == -EBUSY)) 297 return -ENOBUFS; 298 299 if (likely(!ret)) { 300 ret = -EINPROGRESS; 301 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 302 } 303 304 return ret; 305 } 306 307 /* Get DMA memory resources */ 308 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 309 { 310 u16 q_depth = res->depth; 311 int i; 312 313 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), 314 &res->c_ivin_dma, GFP_KERNEL); 315 if (!res->c_ivin) 316 return -ENOMEM; 317 318 for (i = 1; i < q_depth; i++) { 319 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 320 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 321 } 322 323 return 0; 324 } 325 326 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 327 { 328 if (res->c_ivin) 329 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), 330 res->c_ivin, res->c_ivin_dma); 331 } 332 333 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) 334 { 335 u16 q_depth = res->depth; 336 int i; 337 338 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), 339 &res->a_ivin_dma, GFP_KERNEL); 340 if (!res->a_ivin) 341 return -ENOMEM; 342 343 for (i = 1; i < q_depth; i++) { 344 res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; 345 res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; 346 } 347 348 return 0; 349 } 350 351 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res) 352 { 353 if (res->a_ivin) 354 dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), 355 res->a_ivin, res->a_ivin_dma); 356 } 357 358 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) 359 { 360 u16 q_depth = res->depth; 361 int i; 362 363 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, 364 &res->out_mac_dma, GFP_KERNEL); 365 if (!res->out_mac) 366 return -ENOMEM; 367 368 for (i = 1; i < q_depth; i++) { 369 res[i].out_mac_dma = res->out_mac_dma + 370 i * (SEC_MAX_MAC_LEN << 1); 371 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); 372 } 373 374 return 0; 375 } 376 377 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) 378 { 379 if (res->out_mac) 380 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, 381 res->out_mac, res->out_mac_dma); 382 } 383 384 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) 385 { 386 if (res->pbuf) 387 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), 388 res->pbuf, res->pbuf_dma); 389 } 390 391 /* 392 * To improve performance, pbuffer is used for 393 * small packets (< 512Bytes) as IOMMU translation using. 394 */ 395 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) 396 { 397 u16 q_depth = res->depth; 398 int size = SEC_PBUF_PAGE_NUM(q_depth); 399 int pbuf_page_offset; 400 int i, j, k; 401 402 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), 403 &res->pbuf_dma, GFP_KERNEL); 404 if (!res->pbuf) 405 return -ENOMEM; 406 407 /* 408 * SEC_PBUF_PKG contains data pbuf, iv and 409 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC> 410 * Every PAGE contains six SEC_PBUF_PKG 411 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG 412 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE 413 * for the SEC_TOTAL_PBUF_SZ 414 */ 415 for (i = 0; i <= size; i++) { 416 pbuf_page_offset = PAGE_SIZE * i; 417 for (j = 0; j < SEC_PBUF_NUM; j++) { 418 k = i * SEC_PBUF_NUM + j; 419 if (k == q_depth) 420 break; 421 res[k].pbuf = res->pbuf + 422 j * SEC_PBUF_PKG + pbuf_page_offset; 423 res[k].pbuf_dma = res->pbuf_dma + 424 j * SEC_PBUF_PKG + pbuf_page_offset; 425 } 426 } 427 428 return 0; 429 } 430 431 static int sec_alg_resource_alloc(struct sec_ctx *ctx, 432 struct sec_qp_ctx *qp_ctx) 433 { 434 struct sec_alg_res *res = qp_ctx->res; 435 struct device *dev = ctx->dev; 436 int ret; 437 438 ret = sec_alloc_civ_resource(dev, res); 439 if (ret) 440 return ret; 441 442 if (ctx->alg_type == SEC_AEAD) { 443 ret = sec_alloc_aiv_resource(dev, res); 444 if (ret) 445 goto alloc_aiv_fail; 446 447 ret = sec_alloc_mac_resource(dev, res); 448 if (ret) 449 goto alloc_mac_fail; 450 } 451 if (ctx->pbuf_supported) { 452 ret = sec_alloc_pbuf_resource(dev, res); 453 if (ret) { 454 dev_err(dev, "fail to alloc pbuf dma resource!\n"); 455 goto alloc_pbuf_fail; 456 } 457 } 458 459 return 0; 460 461 alloc_pbuf_fail: 462 if (ctx->alg_type == SEC_AEAD) 463 sec_free_mac_resource(dev, qp_ctx->res); 464 alloc_mac_fail: 465 if (ctx->alg_type == SEC_AEAD) 466 sec_free_aiv_resource(dev, res); 467 alloc_aiv_fail: 468 sec_free_civ_resource(dev, res); 469 return ret; 470 } 471 472 static void sec_alg_resource_free(struct sec_ctx *ctx, 473 struct sec_qp_ctx *qp_ctx) 474 { 475 struct device *dev = ctx->dev; 476 477 sec_free_civ_resource(dev, qp_ctx->res); 478 479 if (ctx->pbuf_supported) 480 sec_free_pbuf_resource(dev, qp_ctx->res); 481 if (ctx->alg_type == SEC_AEAD) 482 sec_free_mac_resource(dev, qp_ctx->res); 483 } 484 485 static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx, 486 struct sec_qp_ctx *qp_ctx) 487 { 488 u16 q_depth = qp_ctx->qp->sq_depth; 489 struct device *dev = ctx->dev; 490 int ret = -ENOMEM; 491 492 qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); 493 if (!qp_ctx->req_list) 494 return ret; 495 496 qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); 497 if (!qp_ctx->res) 498 goto err_free_req_list; 499 qp_ctx->res->depth = q_depth; 500 501 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); 502 if (IS_ERR(qp_ctx->c_in_pool)) { 503 dev_err(dev, "fail to create sgl pool for input!\n"); 504 goto err_free_res; 505 } 506 507 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); 508 if (IS_ERR(qp_ctx->c_out_pool)) { 509 dev_err(dev, "fail to create sgl pool for output!\n"); 510 goto err_free_c_in_pool; 511 } 512 513 ret = sec_alg_resource_alloc(ctx, qp_ctx); 514 if (ret) 515 goto err_free_c_out_pool; 516 517 return 0; 518 519 err_free_c_out_pool: 520 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 521 err_free_c_in_pool: 522 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 523 err_free_res: 524 kfree(qp_ctx->res); 525 err_free_req_list: 526 kfree(qp_ctx->req_list); 527 return ret; 528 } 529 530 static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) 531 { 532 struct device *dev = ctx->dev; 533 534 sec_alg_resource_free(ctx, qp_ctx); 535 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 536 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 537 kfree(qp_ctx->res); 538 kfree(qp_ctx->req_list); 539 } 540 541 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, 542 int qp_ctx_id, int alg_type) 543 { 544 struct sec_qp_ctx *qp_ctx; 545 struct hisi_qp *qp; 546 int ret; 547 548 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 549 qp = ctx->qps[qp_ctx_id]; 550 qp->req_type = 0; 551 qp->qp_ctx = qp_ctx; 552 qp_ctx->qp = qp; 553 qp_ctx->ctx = ctx; 554 555 qp->req_cb = sec_req_cb; 556 557 spin_lock_init(&qp_ctx->req_lock); 558 idr_init(&qp_ctx->req_idr); 559 INIT_LIST_HEAD(&qp_ctx->backlog); 560 561 ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx); 562 if (ret) 563 goto err_destroy_idr; 564 565 ret = hisi_qm_start_qp(qp, 0); 566 if (ret < 0) 567 goto err_resource_free; 568 569 return 0; 570 571 err_resource_free: 572 sec_free_qp_ctx_resource(ctx, qp_ctx); 573 err_destroy_idr: 574 idr_destroy(&qp_ctx->req_idr); 575 return ret; 576 } 577 578 static void sec_release_qp_ctx(struct sec_ctx *ctx, 579 struct sec_qp_ctx *qp_ctx) 580 { 581 hisi_qm_stop_qp(qp_ctx->qp); 582 sec_free_qp_ctx_resource(ctx, qp_ctx); 583 idr_destroy(&qp_ctx->req_idr); 584 } 585 586 static int sec_ctx_base_init(struct sec_ctx *ctx) 587 { 588 struct sec_dev *sec; 589 int i, ret; 590 591 ctx->qps = sec_create_qps(); 592 if (!ctx->qps) { 593 pr_err("Can not create sec qps!\n"); 594 return -ENODEV; 595 } 596 597 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); 598 ctx->sec = sec; 599 ctx->dev = &sec->qm.pdev->dev; 600 ctx->hlf_q_num = sec->ctx_q_num >> 1; 601 602 ctx->pbuf_supported = ctx->sec->iommu_used; 603 604 /* Half of queue depth is taken as fake requests limit in the queue. */ 605 ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; 606 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 607 GFP_KERNEL); 608 if (!ctx->qp_ctx) { 609 ret = -ENOMEM; 610 goto err_destroy_qps; 611 } 612 613 for (i = 0; i < sec->ctx_q_num; i++) { 614 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); 615 if (ret) 616 goto err_sec_release_qp_ctx; 617 } 618 619 return 0; 620 621 err_sec_release_qp_ctx: 622 for (i = i - 1; i >= 0; i--) 623 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 624 kfree(ctx->qp_ctx); 625 err_destroy_qps: 626 sec_destroy_qps(ctx->qps, sec->ctx_q_num); 627 return ret; 628 } 629 630 static void sec_ctx_base_uninit(struct sec_ctx *ctx) 631 { 632 int i; 633 634 for (i = 0; i < ctx->sec->ctx_q_num; i++) 635 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 636 637 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); 638 kfree(ctx->qp_ctx); 639 } 640 641 static int sec_cipher_init(struct sec_ctx *ctx) 642 { 643 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 644 645 c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, 646 &c_ctx->c_key_dma, GFP_KERNEL); 647 if (!c_ctx->c_key) 648 return -ENOMEM; 649 650 return 0; 651 } 652 653 static void sec_cipher_uninit(struct sec_ctx *ctx) 654 { 655 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 656 657 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); 658 dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, 659 c_ctx->c_key, c_ctx->c_key_dma); 660 } 661 662 static int sec_auth_init(struct sec_ctx *ctx) 663 { 664 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 665 666 a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, 667 &a_ctx->a_key_dma, GFP_KERNEL); 668 if (!a_ctx->a_key) 669 return -ENOMEM; 670 671 return 0; 672 } 673 674 static void sec_auth_uninit(struct sec_ctx *ctx) 675 { 676 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 677 678 memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); 679 dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, 680 a_ctx->a_key, a_ctx->a_key_dma); 681 } 682 683 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) 684 { 685 const char *alg = crypto_tfm_alg_name(&tfm->base); 686 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 687 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 688 689 c_ctx->fallback = false; 690 691 /* Currently, only XTS mode need fallback tfm when using 192bit key */ 692 if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) 693 return 0; 694 695 c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, 696 CRYPTO_ALG_NEED_FALLBACK); 697 if (IS_ERR(c_ctx->fbtfm)) { 698 pr_err("failed to alloc xts mode fallback tfm!\n"); 699 return PTR_ERR(c_ctx->fbtfm); 700 } 701 702 return 0; 703 } 704 705 static int sec_skcipher_init(struct crypto_skcipher *tfm) 706 { 707 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 708 int ret; 709 710 ctx->alg_type = SEC_SKCIPHER; 711 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); 712 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); 713 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 714 pr_err("get error skcipher iv size!\n"); 715 return -EINVAL; 716 } 717 718 ret = sec_ctx_base_init(ctx); 719 if (ret) 720 return ret; 721 722 ret = sec_cipher_init(ctx); 723 if (ret) 724 goto err_cipher_init; 725 726 ret = sec_skcipher_fbtfm_init(tfm); 727 if (ret) 728 goto err_fbtfm_init; 729 730 return 0; 731 732 err_fbtfm_init: 733 sec_cipher_uninit(ctx); 734 err_cipher_init: 735 sec_ctx_base_uninit(ctx); 736 return ret; 737 } 738 739 static void sec_skcipher_uninit(struct crypto_skcipher *tfm) 740 { 741 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 742 743 if (ctx->c_ctx.fbtfm) 744 crypto_free_sync_skcipher(ctx->c_ctx.fbtfm); 745 746 sec_cipher_uninit(ctx); 747 sec_ctx_base_uninit(ctx); 748 } 749 750 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, 751 const u32 keylen, 752 const enum sec_cmode c_mode) 753 { 754 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 755 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 756 int ret; 757 758 ret = verify_skcipher_des3_key(tfm, key); 759 if (ret) 760 return ret; 761 762 switch (keylen) { 763 case SEC_DES3_2KEY_SIZE: 764 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; 765 break; 766 case SEC_DES3_3KEY_SIZE: 767 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; 768 break; 769 default: 770 return -EINVAL; 771 } 772 773 return 0; 774 } 775 776 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, 777 const u32 keylen, 778 const enum sec_cmode c_mode) 779 { 780 if (c_mode == SEC_CMODE_XTS) { 781 switch (keylen) { 782 case SEC_XTS_MIN_KEY_SIZE: 783 c_ctx->c_key_len = SEC_CKEY_128BIT; 784 break; 785 case SEC_XTS_MID_KEY_SIZE: 786 c_ctx->fallback = true; 787 break; 788 case SEC_XTS_MAX_KEY_SIZE: 789 c_ctx->c_key_len = SEC_CKEY_256BIT; 790 break; 791 default: 792 pr_err("hisi_sec2: xts mode key error!\n"); 793 return -EINVAL; 794 } 795 } else { 796 if (c_ctx->c_alg == SEC_CALG_SM4 && 797 keylen != AES_KEYSIZE_128) { 798 pr_err("hisi_sec2: sm4 key error!\n"); 799 return -EINVAL; 800 } else { 801 switch (keylen) { 802 case AES_KEYSIZE_128: 803 c_ctx->c_key_len = SEC_CKEY_128BIT; 804 break; 805 case AES_KEYSIZE_192: 806 c_ctx->c_key_len = SEC_CKEY_192BIT; 807 break; 808 case AES_KEYSIZE_256: 809 c_ctx->c_key_len = SEC_CKEY_256BIT; 810 break; 811 default: 812 pr_err("hisi_sec2: aes key error!\n"); 813 return -EINVAL; 814 } 815 } 816 } 817 818 return 0; 819 } 820 821 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 822 const u32 keylen, const enum sec_calg c_alg, 823 const enum sec_cmode c_mode) 824 { 825 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 826 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 827 struct device *dev = ctx->dev; 828 int ret; 829 830 if (c_mode == SEC_CMODE_XTS) { 831 ret = xts_verify_key(tfm, key, keylen); 832 if (ret) { 833 dev_err(dev, "xts mode key err!\n"); 834 return ret; 835 } 836 } 837 838 c_ctx->c_alg = c_alg; 839 c_ctx->c_mode = c_mode; 840 841 switch (c_alg) { 842 case SEC_CALG_3DES: 843 ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode); 844 break; 845 case SEC_CALG_AES: 846 case SEC_CALG_SM4: 847 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 848 break; 849 default: 850 return -EINVAL; 851 } 852 853 if (ret) { 854 dev_err(dev, "set sec key err!\n"); 855 return ret; 856 } 857 858 memcpy(c_ctx->c_key, key, keylen); 859 if (c_ctx->fallback && c_ctx->fbtfm) { 860 ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); 861 if (ret) { 862 dev_err(dev, "failed to set fallback skcipher key!\n"); 863 return ret; 864 } 865 } 866 return 0; 867 } 868 869 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ 870 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ 871 u32 keylen) \ 872 { \ 873 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ 874 } 875 876 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) 877 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) 878 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) 879 GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB) 880 GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB) 881 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR) 882 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) 883 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) 884 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) 885 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) 886 GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB) 887 GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB) 888 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR) 889 890 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, 891 struct scatterlist *src) 892 { 893 struct sec_aead_req *a_req = &req->aead_req; 894 struct aead_request *aead_req = a_req->aead_req; 895 struct sec_cipher_req *c_req = &req->c_req; 896 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 897 struct device *dev = ctx->dev; 898 int copy_size, pbuf_length; 899 int req_id = req->req_id; 900 struct crypto_aead *tfm; 901 size_t authsize; 902 u8 *mac_offset; 903 904 if (ctx->alg_type == SEC_AEAD) 905 copy_size = aead_req->cryptlen + aead_req->assoclen; 906 else 907 copy_size = c_req->c_len; 908 909 pbuf_length = sg_copy_to_buffer(src, sg_nents(src), 910 qp_ctx->res[req_id].pbuf, copy_size); 911 if (unlikely(pbuf_length != copy_size)) { 912 dev_err(dev, "copy src data to pbuf error!\n"); 913 return -EINVAL; 914 } 915 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { 916 tfm = crypto_aead_reqtfm(aead_req); 917 authsize = crypto_aead_authsize(tfm); 918 mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize; 919 memcpy(a_req->out_mac, mac_offset, authsize); 920 } 921 922 req->in_dma = qp_ctx->res[req_id].pbuf_dma; 923 c_req->c_out_dma = req->in_dma; 924 925 return 0; 926 } 927 928 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, 929 struct scatterlist *dst) 930 { 931 struct aead_request *aead_req = req->aead_req.aead_req; 932 struct sec_cipher_req *c_req = &req->c_req; 933 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 934 int copy_size, pbuf_length; 935 int req_id = req->req_id; 936 937 if (ctx->alg_type == SEC_AEAD) 938 copy_size = c_req->c_len + aead_req->assoclen; 939 else 940 copy_size = c_req->c_len; 941 942 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), 943 qp_ctx->res[req_id].pbuf, copy_size); 944 if (unlikely(pbuf_length != copy_size)) 945 dev_err(ctx->dev, "copy pbuf data to dst error!\n"); 946 } 947 948 static int sec_aead_mac_init(struct sec_aead_req *req) 949 { 950 struct aead_request *aead_req = req->aead_req; 951 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 952 size_t authsize = crypto_aead_authsize(tfm); 953 u8 *mac_out = req->out_mac; 954 struct scatterlist *sgl = aead_req->src; 955 size_t copy_size; 956 off_t skip_size; 957 958 /* Copy input mac */ 959 skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; 960 copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, 961 authsize, skip_size); 962 if (unlikely(copy_size != authsize)) 963 return -EINVAL; 964 965 return 0; 966 } 967 968 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, 969 struct scatterlist *src, struct scatterlist *dst) 970 { 971 struct sec_cipher_req *c_req = &req->c_req; 972 struct sec_aead_req *a_req = &req->aead_req; 973 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 974 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; 975 struct device *dev = ctx->dev; 976 int ret; 977 978 if (req->use_pbuf) { 979 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; 980 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; 981 if (ctx->alg_type == SEC_AEAD) { 982 a_req->a_ivin = res->a_ivin; 983 a_req->a_ivin_dma = res->a_ivin_dma; 984 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; 985 a_req->out_mac_dma = res->pbuf_dma + 986 SEC_PBUF_MAC_OFFSET; 987 } 988 ret = sec_cipher_pbuf_map(ctx, req, src); 989 990 return ret; 991 } 992 c_req->c_ivin = res->c_ivin; 993 c_req->c_ivin_dma = res->c_ivin_dma; 994 if (ctx->alg_type == SEC_AEAD) { 995 a_req->a_ivin = res->a_ivin; 996 a_req->a_ivin_dma = res->a_ivin_dma; 997 a_req->out_mac = res->out_mac; 998 a_req->out_mac_dma = res->out_mac_dma; 999 } 1000 1001 req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, 1002 qp_ctx->c_in_pool, 1003 req->req_id, 1004 &req->in_dma); 1005 if (IS_ERR(req->in)) { 1006 dev_err(dev, "fail to dma map input sgl buffers!\n"); 1007 return PTR_ERR(req->in); 1008 } 1009 1010 if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { 1011 ret = sec_aead_mac_init(a_req); 1012 if (unlikely(ret)) { 1013 dev_err(dev, "fail to init mac data for ICV!\n"); 1014 return ret; 1015 } 1016 } 1017 1018 if (dst == src) { 1019 c_req->c_out = req->in; 1020 c_req->c_out_dma = req->in_dma; 1021 } else { 1022 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, 1023 qp_ctx->c_out_pool, 1024 req->req_id, 1025 &c_req->c_out_dma); 1026 1027 if (IS_ERR(c_req->c_out)) { 1028 dev_err(dev, "fail to dma map output sgl buffers!\n"); 1029 hisi_acc_sg_buf_unmap(dev, src, req->in); 1030 return PTR_ERR(c_req->c_out); 1031 } 1032 } 1033 1034 return 0; 1035 } 1036 1037 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, 1038 struct scatterlist *src, struct scatterlist *dst) 1039 { 1040 struct sec_cipher_req *c_req = &req->c_req; 1041 struct device *dev = ctx->dev; 1042 1043 if (req->use_pbuf) { 1044 sec_cipher_pbuf_unmap(ctx, req, dst); 1045 } else { 1046 if (dst != src) 1047 hisi_acc_sg_buf_unmap(dev, src, req->in); 1048 1049 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); 1050 } 1051 } 1052 1053 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 1054 { 1055 struct skcipher_request *sq = req->c_req.sk_req; 1056 1057 return sec_cipher_map(ctx, req, sq->src, sq->dst); 1058 } 1059 1060 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 1061 { 1062 struct skcipher_request *sq = req->c_req.sk_req; 1063 1064 sec_cipher_unmap(ctx, req, sq->src, sq->dst); 1065 } 1066 1067 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, 1068 struct crypto_authenc_keys *keys) 1069 { 1070 switch (keys->enckeylen) { 1071 case AES_KEYSIZE_128: 1072 c_ctx->c_key_len = SEC_CKEY_128BIT; 1073 break; 1074 case AES_KEYSIZE_192: 1075 c_ctx->c_key_len = SEC_CKEY_192BIT; 1076 break; 1077 case AES_KEYSIZE_256: 1078 c_ctx->c_key_len = SEC_CKEY_256BIT; 1079 break; 1080 default: 1081 pr_err("hisi_sec2: aead aes key error!\n"); 1082 return -EINVAL; 1083 } 1084 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); 1085 1086 return 0; 1087 } 1088 1089 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, 1090 struct crypto_authenc_keys *keys) 1091 { 1092 struct crypto_shash *hash_tfm = ctx->hash_tfm; 1093 int blocksize, digestsize, ret; 1094 1095 if (!keys->authkeylen) { 1096 pr_err("hisi_sec2: aead auth key error!\n"); 1097 return -EINVAL; 1098 } 1099 1100 blocksize = crypto_shash_blocksize(hash_tfm); 1101 digestsize = crypto_shash_digestsize(hash_tfm); 1102 if (keys->authkeylen > blocksize) { 1103 ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, 1104 keys->authkeylen, ctx->a_key); 1105 if (ret) { 1106 pr_err("hisi_sec2: aead auth digest error!\n"); 1107 return -EINVAL; 1108 } 1109 ctx->a_key_len = digestsize; 1110 } else { 1111 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); 1112 ctx->a_key_len = keys->authkeylen; 1113 } 1114 1115 return 0; 1116 } 1117 1118 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) 1119 { 1120 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 1121 struct sec_ctx *ctx = crypto_tfm_ctx(tfm); 1122 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 1123 1124 if (unlikely(a_ctx->fallback_aead_tfm)) 1125 return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); 1126 1127 return 0; 1128 } 1129 1130 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, 1131 struct crypto_aead *tfm, const u8 *key, 1132 unsigned int keylen) 1133 { 1134 crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK); 1135 crypto_aead_set_flags(a_ctx->fallback_aead_tfm, 1136 crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); 1137 return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen); 1138 } 1139 1140 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1141 const u32 keylen, const enum sec_hash_alg a_alg, 1142 const enum sec_calg c_alg, 1143 const enum sec_mac_len mac_len, 1144 const enum sec_cmode c_mode) 1145 { 1146 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1147 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 1148 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 1149 struct device *dev = ctx->dev; 1150 struct crypto_authenc_keys keys; 1151 int ret; 1152 1153 ctx->a_ctx.a_alg = a_alg; 1154 ctx->c_ctx.c_alg = c_alg; 1155 ctx->a_ctx.mac_len = mac_len; 1156 c_ctx->c_mode = c_mode; 1157 1158 if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { 1159 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 1160 if (ret) { 1161 dev_err(dev, "set sec aes ccm cipher key err!\n"); 1162 return ret; 1163 } 1164 memcpy(c_ctx->c_key, key, keylen); 1165 1166 if (unlikely(a_ctx->fallback_aead_tfm)) { 1167 ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); 1168 if (ret) 1169 return ret; 1170 } 1171 1172 return 0; 1173 } 1174 1175 if (crypto_authenc_extractkeys(&keys, key, keylen)) 1176 goto bad_key; 1177 1178 ret = sec_aead_aes_set_key(c_ctx, &keys); 1179 if (ret) { 1180 dev_err(dev, "set sec cipher key err!\n"); 1181 goto bad_key; 1182 } 1183 1184 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); 1185 if (ret) { 1186 dev_err(dev, "set sec auth key err!\n"); 1187 goto bad_key; 1188 } 1189 1190 if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || 1191 (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { 1192 dev_err(dev, "MAC or AUTH key length error!\n"); 1193 goto bad_key; 1194 } 1195 1196 return 0; 1197 1198 bad_key: 1199 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); 1200 return -EINVAL; 1201 } 1202 1203 1204 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ 1205 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ 1206 u32 keylen) \ 1207 { \ 1208 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ 1209 } 1210 1211 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, 1212 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) 1213 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, 1214 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) 1215 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, 1216 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) 1217 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, 1218 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) 1219 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, 1220 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) 1221 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, 1222 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) 1223 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, 1224 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) 1225 1226 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 1227 { 1228 struct aead_request *aq = req->aead_req.aead_req; 1229 1230 return sec_cipher_map(ctx, req, aq->src, aq->dst); 1231 } 1232 1233 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 1234 { 1235 struct aead_request *aq = req->aead_req.aead_req; 1236 1237 sec_cipher_unmap(ctx, req, aq->src, aq->dst); 1238 } 1239 1240 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) 1241 { 1242 int ret; 1243 1244 ret = ctx->req_op->buf_map(ctx, req); 1245 if (unlikely(ret)) 1246 return ret; 1247 1248 ctx->req_op->do_transfer(ctx, req); 1249 1250 ret = ctx->req_op->bd_fill(ctx, req); 1251 if (unlikely(ret)) 1252 goto unmap_req_buf; 1253 1254 return ret; 1255 1256 unmap_req_buf: 1257 ctx->req_op->buf_unmap(ctx, req); 1258 return ret; 1259 } 1260 1261 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) 1262 { 1263 ctx->req_op->buf_unmap(ctx, req); 1264 } 1265 1266 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 1267 { 1268 struct skcipher_request *sk_req = req->c_req.sk_req; 1269 struct sec_cipher_req *c_req = &req->c_req; 1270 1271 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); 1272 } 1273 1274 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1275 { 1276 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 1277 struct sec_cipher_req *c_req = &req->c_req; 1278 struct sec_sqe *sec_sqe = &req->sec_sqe; 1279 u8 scene, sa_type, da_type; 1280 u8 bd_type, cipher; 1281 u8 de = 0; 1282 1283 memset(sec_sqe, 0, sizeof(struct sec_sqe)); 1284 1285 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 1286 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 1287 sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); 1288 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); 1289 1290 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << 1291 SEC_CMODE_OFFSET); 1292 sec_sqe->type2.c_alg = c_ctx->c_alg; 1293 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << 1294 SEC_CKEY_OFFSET); 1295 1296 bd_type = SEC_BD_TYPE2; 1297 if (c_req->encrypt) 1298 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; 1299 else 1300 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; 1301 sec_sqe->type_cipher_auth = bd_type | cipher; 1302 1303 /* Set destination and source address type */ 1304 if (req->use_pbuf) { 1305 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; 1306 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; 1307 } else { 1308 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; 1309 da_type = SEC_SGL << SEC_DST_SGL_OFFSET; 1310 } 1311 1312 sec_sqe->sdm_addr_type |= da_type; 1313 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; 1314 if (req->in_dma != c_req->c_out_dma) 1315 de = 0x1 << SEC_DE_OFFSET; 1316 1317 sec_sqe->sds_sa_type = (de | scene | sa_type); 1318 1319 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); 1320 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); 1321 1322 return 0; 1323 } 1324 1325 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) 1326 { 1327 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; 1328 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 1329 struct sec_cipher_req *c_req = &req->c_req; 1330 u32 bd_param = 0; 1331 u16 cipher; 1332 1333 memset(sec_sqe3, 0, sizeof(struct sec_sqe3)); 1334 1335 sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 1336 sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 1337 sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); 1338 sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); 1339 1340 sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | 1341 c_ctx->c_mode; 1342 sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) << 1343 SEC_CKEY_OFFSET_V3); 1344 1345 if (c_req->encrypt) 1346 cipher = SEC_CIPHER_ENC; 1347 else 1348 cipher = SEC_CIPHER_DEC; 1349 sec_sqe3->c_icv_key |= cpu_to_le16(cipher); 1350 1351 /* Set the CTR counter mode is 128bit rollover */ 1352 sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER << 1353 SEC_CTR_CNT_OFFSET); 1354 1355 if (req->use_pbuf) { 1356 bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3; 1357 bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3; 1358 } else { 1359 bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3; 1360 bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3; 1361 } 1362 1363 bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3; 1364 if (req->in_dma != c_req->c_out_dma) 1365 bd_param |= 0x1 << SEC_DE_OFFSET_V3; 1366 1367 bd_param |= SEC_BD_TYPE3; 1368 sec_sqe3->bd_param = cpu_to_le32(bd_param); 1369 1370 sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); 1371 sec_sqe3->tag = cpu_to_le64(req); 1372 1373 return 0; 1374 } 1375 1376 /* increment counter (128-bit int) */ 1377 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) 1378 { 1379 do { 1380 --bits; 1381 nums += counter[bits]; 1382 counter[bits] = nums & BITS_MASK; 1383 nums >>= BYTE_BITS; 1384 } while (bits && nums); 1385 } 1386 1387 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) 1388 { 1389 struct aead_request *aead_req = req->aead_req.aead_req; 1390 struct skcipher_request *sk_req = req->c_req.sk_req; 1391 u32 iv_size = req->ctx->c_ctx.ivsize; 1392 struct scatterlist *sgl; 1393 unsigned int cryptlen; 1394 size_t sz; 1395 u8 *iv; 1396 1397 if (req->c_req.encrypt) 1398 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; 1399 else 1400 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; 1401 1402 if (alg_type == SEC_SKCIPHER) { 1403 iv = sk_req->iv; 1404 cryptlen = sk_req->cryptlen; 1405 } else { 1406 iv = aead_req->iv; 1407 cryptlen = aead_req->cryptlen; 1408 } 1409 1410 if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) { 1411 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, 1412 cryptlen - iv_size); 1413 if (unlikely(sz != iv_size)) 1414 dev_err(req->ctx->dev, "copy output iv error!\n"); 1415 } else { 1416 sz = cryptlen / iv_size; 1417 if (cryptlen % iv_size) 1418 sz += 1; 1419 ctr_iv_inc(iv, iv_size, sz); 1420 } 1421 } 1422 1423 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, 1424 struct sec_qp_ctx *qp_ctx) 1425 { 1426 struct sec_req *backlog_req = NULL; 1427 1428 spin_lock_bh(&qp_ctx->req_lock); 1429 if (ctx->fake_req_limit >= 1430 atomic_read(&qp_ctx->qp->qp_status.used) && 1431 !list_empty(&qp_ctx->backlog)) { 1432 backlog_req = list_first_entry(&qp_ctx->backlog, 1433 typeof(*backlog_req), backlog_head); 1434 list_del(&backlog_req->backlog_head); 1435 } 1436 spin_unlock_bh(&qp_ctx->req_lock); 1437 1438 return backlog_req; 1439 } 1440 1441 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, 1442 int err) 1443 { 1444 struct skcipher_request *sk_req = req->c_req.sk_req; 1445 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1446 struct skcipher_request *backlog_sk_req; 1447 struct sec_req *backlog_req; 1448 1449 sec_free_req_id(req); 1450 1451 /* IV output at encrypto of CBC/CTR mode */ 1452 if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || 1453 ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) 1454 sec_update_iv(req, SEC_SKCIPHER); 1455 1456 while (1) { 1457 backlog_req = sec_back_req_clear(ctx, qp_ctx); 1458 if (!backlog_req) 1459 break; 1460 1461 backlog_sk_req = backlog_req->c_req.sk_req; 1462 backlog_sk_req->base.complete(&backlog_sk_req->base, 1463 -EINPROGRESS); 1464 atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); 1465 } 1466 1467 sk_req->base.complete(&sk_req->base, err); 1468 } 1469 1470 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) 1471 { 1472 struct aead_request *aead_req = req->aead_req.aead_req; 1473 struct sec_cipher_req *c_req = &req->c_req; 1474 struct sec_aead_req *a_req = &req->aead_req; 1475 size_t authsize = ctx->a_ctx.mac_len; 1476 u32 data_size = aead_req->cryptlen; 1477 u8 flage = 0; 1478 u8 cm, cl; 1479 1480 /* the specification has been checked in aead_iv_demension_check() */ 1481 cl = c_req->c_ivin[0] + 1; 1482 c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00; 1483 memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl); 1484 c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT; 1485 1486 /* the last 3bit is L' */ 1487 flage |= c_req->c_ivin[0] & IV_CL_MASK; 1488 1489 /* the M' is bit3~bit5, the Flags is bit6 */ 1490 cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM; 1491 flage |= cm << IV_CM_OFFSET; 1492 if (aead_req->assoclen) 1493 flage |= 0x01 << IV_FLAGS_OFFSET; 1494 1495 memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize); 1496 a_req->a_ivin[0] = flage; 1497 1498 /* 1499 * the last 32bit is counter's initial number, 1500 * but the nonce uses the first 16bit 1501 * the tail 16bit fill with the cipher length 1502 */ 1503 if (!c_req->encrypt) 1504 data_size = aead_req->cryptlen - authsize; 1505 1506 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = 1507 data_size & IV_LAST_BYTE_MASK; 1508 data_size >>= IV_BYTE_OFFSET; 1509 a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] = 1510 data_size & IV_LAST_BYTE_MASK; 1511 } 1512 1513 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) 1514 { 1515 struct aead_request *aead_req = req->aead_req.aead_req; 1516 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 1517 size_t authsize = crypto_aead_authsize(tfm); 1518 struct sec_cipher_req *c_req = &req->c_req; 1519 struct sec_aead_req *a_req = &req->aead_req; 1520 1521 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); 1522 1523 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) { 1524 /* 1525 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, 1526 * the counter must set to 0x01 1527 */ 1528 ctx->a_ctx.mac_len = authsize; 1529 /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ 1530 set_aead_auth_iv(ctx, req); 1531 } 1532 1533 /* GCM 12Byte Cipher_IV == Auth_IV */ 1534 if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { 1535 ctx->a_ctx.mac_len = authsize; 1536 memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); 1537 } 1538 } 1539 1540 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, 1541 struct sec_req *req, struct sec_sqe *sec_sqe) 1542 { 1543 struct sec_aead_req *a_req = &req->aead_req; 1544 struct aead_request *aq = a_req->aead_req; 1545 1546 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ 1547 sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); 1548 1549 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ 1550 sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; 1551 sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); 1552 sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET; 1553 1554 if (dir) 1555 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1556 else 1557 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1558 1559 sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen); 1560 sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0); 1561 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1562 1563 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1564 } 1565 1566 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, 1567 struct sec_req *req, struct sec_sqe3 *sqe3) 1568 { 1569 struct sec_aead_req *a_req = &req->aead_req; 1570 struct aead_request *aq = a_req->aead_req; 1571 1572 /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ 1573 sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); 1574 1575 /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ 1576 sqe3->a_key_addr = sqe3->c_key_addr; 1577 sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); 1578 sqe3->auth_mac_key |= SEC_NO_AUTH; 1579 1580 if (dir) 1581 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; 1582 else 1583 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; 1584 1585 sqe3->a_len_key = cpu_to_le32(aq->assoclen); 1586 sqe3->auth_src_offset = cpu_to_le16(0x0); 1587 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1588 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); 1589 } 1590 1591 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, 1592 struct sec_req *req, struct sec_sqe *sec_sqe) 1593 { 1594 struct sec_aead_req *a_req = &req->aead_req; 1595 struct sec_cipher_req *c_req = &req->c_req; 1596 struct aead_request *aq = a_req->aead_req; 1597 1598 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); 1599 1600 sec_sqe->type2.mac_key_alg = 1601 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); 1602 1603 sec_sqe->type2.mac_key_alg |= 1604 cpu_to_le32((u32)((ctx->a_key_len) / 1605 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); 1606 1607 sec_sqe->type2.mac_key_alg |= 1608 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); 1609 1610 if (dir) { 1611 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; 1612 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1613 } else { 1614 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET; 1615 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1616 } 1617 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); 1618 1619 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1620 1621 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1622 } 1623 1624 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1625 { 1626 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1627 struct sec_sqe *sec_sqe = &req->sec_sqe; 1628 int ret; 1629 1630 ret = sec_skcipher_bd_fill(ctx, req); 1631 if (unlikely(ret)) { 1632 dev_err(ctx->dev, "skcipher bd fill is error!\n"); 1633 return ret; 1634 } 1635 1636 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || 1637 ctx->c_ctx.c_mode == SEC_CMODE_GCM) 1638 sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1639 else 1640 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1641 1642 return 0; 1643 } 1644 1645 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, 1646 struct sec_req *req, struct sec_sqe3 *sqe3) 1647 { 1648 struct sec_aead_req *a_req = &req->aead_req; 1649 struct sec_cipher_req *c_req = &req->c_req; 1650 struct aead_request *aq = a_req->aead_req; 1651 1652 sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); 1653 1654 sqe3->auth_mac_key |= 1655 cpu_to_le32((u32)(ctx->mac_len / 1656 SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); 1657 1658 sqe3->auth_mac_key |= 1659 cpu_to_le32((u32)(ctx->a_key_len / 1660 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); 1661 1662 sqe3->auth_mac_key |= 1663 cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); 1664 1665 if (dir) { 1666 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); 1667 sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; 1668 } else { 1669 sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2); 1670 sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; 1671 } 1672 sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); 1673 1674 sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1675 1676 sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); 1677 } 1678 1679 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) 1680 { 1681 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1682 struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; 1683 int ret; 1684 1685 ret = sec_skcipher_bd_fill_v3(ctx, req); 1686 if (unlikely(ret)) { 1687 dev_err(ctx->dev, "skcipher bd3 fill is error!\n"); 1688 return ret; 1689 } 1690 1691 if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || 1692 ctx->c_ctx.c_mode == SEC_CMODE_GCM) 1693 sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt, 1694 req, sec_sqe3); 1695 else 1696 sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, 1697 req, sec_sqe3); 1698 1699 return 0; 1700 } 1701 1702 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) 1703 { 1704 struct aead_request *a_req = req->aead_req.aead_req; 1705 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1706 struct sec_aead_req *aead_req = &req->aead_req; 1707 struct sec_cipher_req *c_req = &req->c_req; 1708 size_t authsize = crypto_aead_authsize(tfm); 1709 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1710 struct aead_request *backlog_aead_req; 1711 struct sec_req *backlog_req; 1712 size_t sz; 1713 1714 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) 1715 sec_update_iv(req, SEC_AEAD); 1716 1717 /* Copy output mac */ 1718 if (!err && c_req->encrypt) { 1719 struct scatterlist *sgl = a_req->dst; 1720 1721 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), 1722 aead_req->out_mac, 1723 authsize, a_req->cryptlen + 1724 a_req->assoclen); 1725 if (unlikely(sz != authsize)) { 1726 dev_err(c->dev, "copy out mac err!\n"); 1727 err = -EINVAL; 1728 } 1729 } 1730 1731 sec_free_req_id(req); 1732 1733 while (1) { 1734 backlog_req = sec_back_req_clear(c, qp_ctx); 1735 if (!backlog_req) 1736 break; 1737 1738 backlog_aead_req = backlog_req->aead_req.aead_req; 1739 backlog_aead_req->base.complete(&backlog_aead_req->base, 1740 -EINPROGRESS); 1741 atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); 1742 } 1743 1744 a_req->base.complete(&a_req->base, err); 1745 } 1746 1747 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) 1748 { 1749 sec_free_req_id(req); 1750 sec_free_queue_id(ctx, req); 1751 } 1752 1753 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) 1754 { 1755 struct sec_qp_ctx *qp_ctx; 1756 int queue_id; 1757 1758 /* To load balance */ 1759 queue_id = sec_alloc_queue_id(ctx, req); 1760 qp_ctx = &ctx->qp_ctx[queue_id]; 1761 1762 req->req_id = sec_alloc_req_id(req, qp_ctx); 1763 if (unlikely(req->req_id < 0)) { 1764 sec_free_queue_id(ctx, req); 1765 return req->req_id; 1766 } 1767 1768 return 0; 1769 } 1770 1771 static int sec_process(struct sec_ctx *ctx, struct sec_req *req) 1772 { 1773 struct sec_cipher_req *c_req = &req->c_req; 1774 int ret; 1775 1776 ret = sec_request_init(ctx, req); 1777 if (unlikely(ret)) 1778 return ret; 1779 1780 ret = sec_request_transfer(ctx, req); 1781 if (unlikely(ret)) 1782 goto err_uninit_req; 1783 1784 /* Output IV as decrypto */ 1785 if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || 1786 ctx->c_ctx.c_mode == SEC_CMODE_CTR)) 1787 sec_update_iv(req, ctx->alg_type); 1788 1789 ret = ctx->req_op->bd_send(ctx, req); 1790 if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || 1791 (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1792 dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); 1793 goto err_send_req; 1794 } 1795 1796 return ret; 1797 1798 err_send_req: 1799 /* As failing, restore the IV from user */ 1800 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { 1801 if (ctx->alg_type == SEC_SKCIPHER) 1802 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, 1803 ctx->c_ctx.ivsize); 1804 else 1805 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, 1806 ctx->c_ctx.ivsize); 1807 } 1808 1809 sec_request_untransfer(ctx, req); 1810 err_uninit_req: 1811 sec_request_uninit(ctx, req); 1812 return ret; 1813 } 1814 1815 static const struct sec_req_op sec_skcipher_req_ops = { 1816 .buf_map = sec_skcipher_sgl_map, 1817 .buf_unmap = sec_skcipher_sgl_unmap, 1818 .do_transfer = sec_skcipher_copy_iv, 1819 .bd_fill = sec_skcipher_bd_fill, 1820 .bd_send = sec_bd_send, 1821 .callback = sec_skcipher_callback, 1822 .process = sec_process, 1823 }; 1824 1825 static const struct sec_req_op sec_aead_req_ops = { 1826 .buf_map = sec_aead_sgl_map, 1827 .buf_unmap = sec_aead_sgl_unmap, 1828 .do_transfer = sec_aead_set_iv, 1829 .bd_fill = sec_aead_bd_fill, 1830 .bd_send = sec_bd_send, 1831 .callback = sec_aead_callback, 1832 .process = sec_process, 1833 }; 1834 1835 static const struct sec_req_op sec_skcipher_req_ops_v3 = { 1836 .buf_map = sec_skcipher_sgl_map, 1837 .buf_unmap = sec_skcipher_sgl_unmap, 1838 .do_transfer = sec_skcipher_copy_iv, 1839 .bd_fill = sec_skcipher_bd_fill_v3, 1840 .bd_send = sec_bd_send, 1841 .callback = sec_skcipher_callback, 1842 .process = sec_process, 1843 }; 1844 1845 static const struct sec_req_op sec_aead_req_ops_v3 = { 1846 .buf_map = sec_aead_sgl_map, 1847 .buf_unmap = sec_aead_sgl_unmap, 1848 .do_transfer = sec_aead_set_iv, 1849 .bd_fill = sec_aead_bd_fill_v3, 1850 .bd_send = sec_bd_send, 1851 .callback = sec_aead_callback, 1852 .process = sec_process, 1853 }; 1854 1855 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) 1856 { 1857 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1858 int ret; 1859 1860 ret = sec_skcipher_init(tfm); 1861 if (ret) 1862 return ret; 1863 1864 if (ctx->sec->qm.ver < QM_HW_V3) { 1865 ctx->type_supported = SEC_BD_TYPE2; 1866 ctx->req_op = &sec_skcipher_req_ops; 1867 } else { 1868 ctx->type_supported = SEC_BD_TYPE3; 1869 ctx->req_op = &sec_skcipher_req_ops_v3; 1870 } 1871 1872 return ret; 1873 } 1874 1875 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) 1876 { 1877 sec_skcipher_uninit(tfm); 1878 } 1879 1880 static int sec_aead_init(struct crypto_aead *tfm) 1881 { 1882 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1883 int ret; 1884 1885 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); 1886 ctx->alg_type = SEC_AEAD; 1887 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); 1888 if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || 1889 ctx->c_ctx.ivsize > SEC_IV_SIZE) { 1890 pr_err("get error aead iv size!\n"); 1891 return -EINVAL; 1892 } 1893 1894 ret = sec_ctx_base_init(ctx); 1895 if (ret) 1896 return ret; 1897 if (ctx->sec->qm.ver < QM_HW_V3) { 1898 ctx->type_supported = SEC_BD_TYPE2; 1899 ctx->req_op = &sec_aead_req_ops; 1900 } else { 1901 ctx->type_supported = SEC_BD_TYPE3; 1902 ctx->req_op = &sec_aead_req_ops_v3; 1903 } 1904 1905 ret = sec_auth_init(ctx); 1906 if (ret) 1907 goto err_auth_init; 1908 1909 ret = sec_cipher_init(ctx); 1910 if (ret) 1911 goto err_cipher_init; 1912 1913 return ret; 1914 1915 err_cipher_init: 1916 sec_auth_uninit(ctx); 1917 err_auth_init: 1918 sec_ctx_base_uninit(ctx); 1919 return ret; 1920 } 1921 1922 static void sec_aead_exit(struct crypto_aead *tfm) 1923 { 1924 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1925 1926 sec_cipher_uninit(ctx); 1927 sec_auth_uninit(ctx); 1928 sec_ctx_base_uninit(ctx); 1929 } 1930 1931 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) 1932 { 1933 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1934 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1935 int ret; 1936 1937 ret = sec_aead_init(tfm); 1938 if (ret) { 1939 pr_err("hisi_sec2: aead init error!\n"); 1940 return ret; 1941 } 1942 1943 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 1944 if (IS_ERR(auth_ctx->hash_tfm)) { 1945 dev_err(ctx->dev, "aead alloc shash error!\n"); 1946 sec_aead_exit(tfm); 1947 return PTR_ERR(auth_ctx->hash_tfm); 1948 } 1949 1950 return 0; 1951 } 1952 1953 static void sec_aead_ctx_exit(struct crypto_aead *tfm) 1954 { 1955 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1956 1957 crypto_free_shash(ctx->a_ctx.hash_tfm); 1958 sec_aead_exit(tfm); 1959 } 1960 1961 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) 1962 { 1963 struct aead_alg *alg = crypto_aead_alg(tfm); 1964 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1965 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 1966 const char *aead_name = alg->base.cra_name; 1967 int ret; 1968 1969 ret = sec_aead_init(tfm); 1970 if (ret) { 1971 dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); 1972 return ret; 1973 } 1974 1975 a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, 1976 CRYPTO_ALG_NEED_FALLBACK | 1977 CRYPTO_ALG_ASYNC); 1978 if (IS_ERR(a_ctx->fallback_aead_tfm)) { 1979 dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); 1980 sec_aead_exit(tfm); 1981 return PTR_ERR(a_ctx->fallback_aead_tfm); 1982 } 1983 a_ctx->fallback = false; 1984 1985 return 0; 1986 } 1987 1988 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm) 1989 { 1990 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1991 1992 crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); 1993 sec_aead_exit(tfm); 1994 } 1995 1996 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) 1997 { 1998 return sec_aead_ctx_init(tfm, "sha1"); 1999 } 2000 2001 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) 2002 { 2003 return sec_aead_ctx_init(tfm, "sha256"); 2004 } 2005 2006 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) 2007 { 2008 return sec_aead_ctx_init(tfm, "sha512"); 2009 } 2010 2011 static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, 2012 struct sec_req *sreq) 2013 { 2014 u32 cryptlen = sreq->c_req.sk_req->cryptlen; 2015 struct device *dev = ctx->dev; 2016 u8 c_mode = ctx->c_ctx.c_mode; 2017 int ret = 0; 2018 2019 switch (c_mode) { 2020 case SEC_CMODE_XTS: 2021 if (unlikely(cryptlen < AES_BLOCK_SIZE)) { 2022 dev_err(dev, "skcipher XTS mode input length error!\n"); 2023 ret = -EINVAL; 2024 } 2025 break; 2026 case SEC_CMODE_ECB: 2027 case SEC_CMODE_CBC: 2028 if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { 2029 dev_err(dev, "skcipher AES input length error!\n"); 2030 ret = -EINVAL; 2031 } 2032 break; 2033 case SEC_CMODE_CFB: 2034 case SEC_CMODE_OFB: 2035 case SEC_CMODE_CTR: 2036 if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { 2037 dev_err(dev, "skcipher HW version error!\n"); 2038 ret = -EINVAL; 2039 } 2040 break; 2041 default: 2042 ret = -EINVAL; 2043 } 2044 2045 return ret; 2046 } 2047 2048 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 2049 { 2050 struct skcipher_request *sk_req = sreq->c_req.sk_req; 2051 struct device *dev = ctx->dev; 2052 u8 c_alg = ctx->c_ctx.c_alg; 2053 2054 if (unlikely(!sk_req->src || !sk_req->dst || 2055 sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { 2056 dev_err(dev, "skcipher input param error!\n"); 2057 return -EINVAL; 2058 } 2059 sreq->c_req.c_len = sk_req->cryptlen; 2060 2061 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) 2062 sreq->use_pbuf = true; 2063 else 2064 sreq->use_pbuf = false; 2065 2066 if (c_alg == SEC_CALG_3DES) { 2067 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { 2068 dev_err(dev, "skcipher 3des input length error!\n"); 2069 return -EINVAL; 2070 } 2071 return 0; 2072 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { 2073 return sec_skcipher_cryptlen_check(ctx, sreq); 2074 } 2075 2076 dev_err(dev, "skcipher algorithm error!\n"); 2077 2078 return -EINVAL; 2079 } 2080 2081 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, 2082 struct skcipher_request *sreq, bool encrypt) 2083 { 2084 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 2085 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); 2086 struct device *dev = ctx->dev; 2087 int ret; 2088 2089 if (!c_ctx->fbtfm) { 2090 dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n"); 2091 return -EINVAL; 2092 } 2093 2094 skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm); 2095 2096 /* software need sync mode to do crypto */ 2097 skcipher_request_set_callback(subreq, sreq->base.flags, 2098 NULL, NULL); 2099 skcipher_request_set_crypt(subreq, sreq->src, sreq->dst, 2100 sreq->cryptlen, sreq->iv); 2101 if (encrypt) 2102 ret = crypto_skcipher_encrypt(subreq); 2103 else 2104 ret = crypto_skcipher_decrypt(subreq); 2105 2106 skcipher_request_zero(subreq); 2107 2108 return ret; 2109 } 2110 2111 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) 2112 { 2113 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); 2114 struct sec_req *req = skcipher_request_ctx(sk_req); 2115 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 2116 int ret; 2117 2118 if (!sk_req->cryptlen) { 2119 if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) 2120 return -EINVAL; 2121 return 0; 2122 } 2123 2124 req->flag = sk_req->base.flags; 2125 req->c_req.sk_req = sk_req; 2126 req->c_req.encrypt = encrypt; 2127 req->ctx = ctx; 2128 2129 ret = sec_skcipher_param_check(ctx, req); 2130 if (unlikely(ret)) 2131 return -EINVAL; 2132 2133 if (unlikely(ctx->c_ctx.fallback)) 2134 return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); 2135 2136 return ctx->req_op->process(ctx, req); 2137 } 2138 2139 static int sec_skcipher_encrypt(struct skcipher_request *sk_req) 2140 { 2141 return sec_skcipher_crypto(sk_req, true); 2142 } 2143 2144 static int sec_skcipher_decrypt(struct skcipher_request *sk_req) 2145 { 2146 return sec_skcipher_crypto(sk_req, false); 2147 } 2148 2149 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ 2150 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ 2151 {\ 2152 .base = {\ 2153 .cra_name = sec_cra_name,\ 2154 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 2155 .cra_priority = SEC_PRIORITY,\ 2156 .cra_flags = CRYPTO_ALG_ASYNC |\ 2157 CRYPTO_ALG_NEED_FALLBACK,\ 2158 .cra_blocksize = blk_size,\ 2159 .cra_ctxsize = sizeof(struct sec_ctx),\ 2160 .cra_module = THIS_MODULE,\ 2161 },\ 2162 .init = ctx_init,\ 2163 .exit = ctx_exit,\ 2164 .setkey = sec_set_key,\ 2165 .decrypt = sec_skcipher_decrypt,\ 2166 .encrypt = sec_skcipher_encrypt,\ 2167 .min_keysize = sec_min_key_size,\ 2168 .max_keysize = sec_max_key_size,\ 2169 .ivsize = iv_size,\ 2170 } 2171 2172 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ 2173 max_key_size, blk_size, iv_size) \ 2174 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ 2175 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) 2176 2177 static struct sec_skcipher sec_skciphers[] = { 2178 { 2179 .alg_msk = BIT(0), 2180 .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE, 2181 AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0), 2182 }, 2183 { 2184 .alg_msk = BIT(1), 2185 .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE, 2186 AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2187 }, 2188 { 2189 .alg_msk = BIT(2), 2190 .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE, 2191 AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2192 }, 2193 { 2194 .alg_msk = BIT(3), 2195 .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE, 2196 SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2197 }, 2198 { 2199 .alg_msk = BIT(4), 2200 .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE, 2201 AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2202 }, 2203 { 2204 .alg_msk = BIT(5), 2205 .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE, 2206 AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2207 }, 2208 { 2209 .alg_msk = BIT(12), 2210 .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE, 2211 AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2212 }, 2213 { 2214 .alg_msk = BIT(13), 2215 .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE, 2216 AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2217 }, 2218 { 2219 .alg_msk = BIT(14), 2220 .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE, 2221 SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), 2222 }, 2223 { 2224 .alg_msk = BIT(15), 2225 .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE, 2226 AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2227 }, 2228 { 2229 .alg_msk = BIT(16), 2230 .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE, 2231 AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), 2232 }, 2233 { 2234 .alg_msk = BIT(23), 2235 .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE, 2236 SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0), 2237 }, 2238 { 2239 .alg_msk = BIT(24), 2240 .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE, 2241 SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 2242 DES3_EDE_BLOCK_SIZE), 2243 }, 2244 }; 2245 2246 static int aead_iv_demension_check(struct aead_request *aead_req) 2247 { 2248 u8 cl; 2249 2250 cl = aead_req->iv[0] + 1; 2251 if (cl < IV_CL_MIN || cl > IV_CL_MAX) 2252 return -EINVAL; 2253 2254 if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl)) 2255 return -EOVERFLOW; 2256 2257 return 0; 2258 } 2259 2260 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) 2261 { 2262 struct aead_request *req = sreq->aead_req.aead_req; 2263 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2264 size_t authsize = crypto_aead_authsize(tfm); 2265 u8 c_mode = ctx->c_ctx.c_mode; 2266 struct device *dev = ctx->dev; 2267 int ret; 2268 2269 if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || 2270 req->assoclen > SEC_MAX_AAD_LEN)) { 2271 dev_err(dev, "aead input spec error!\n"); 2272 return -EINVAL; 2273 } 2274 2275 if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || 2276 (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || 2277 authsize & MAC_LEN_MASK)))) { 2278 dev_err(dev, "aead input mac length error!\n"); 2279 return -EINVAL; 2280 } 2281 2282 if (c_mode == SEC_CMODE_CCM) { 2283 if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { 2284 dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); 2285 return -EINVAL; 2286 } 2287 ret = aead_iv_demension_check(req); 2288 if (ret) { 2289 dev_err(dev, "aead input iv param error!\n"); 2290 return ret; 2291 } 2292 } 2293 2294 if (sreq->c_req.encrypt) 2295 sreq->c_req.c_len = req->cryptlen; 2296 else 2297 sreq->c_req.c_len = req->cryptlen - authsize; 2298 if (c_mode == SEC_CMODE_CBC) { 2299 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { 2300 dev_err(dev, "aead crypto length error!\n"); 2301 return -EINVAL; 2302 } 2303 } 2304 2305 return 0; 2306 } 2307 2308 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 2309 { 2310 struct aead_request *req = sreq->aead_req.aead_req; 2311 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2312 size_t authsize = crypto_aead_authsize(tfm); 2313 struct device *dev = ctx->dev; 2314 u8 c_alg = ctx->c_ctx.c_alg; 2315 2316 if (unlikely(!req->src || !req->dst)) { 2317 dev_err(dev, "aead input param error!\n"); 2318 return -EINVAL; 2319 } 2320 2321 if (ctx->sec->qm.ver == QM_HW_V2) { 2322 if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && 2323 req->cryptlen <= authsize))) { 2324 ctx->a_ctx.fallback = true; 2325 return -EINVAL; 2326 } 2327 } 2328 2329 /* Support AES or SM4 */ 2330 if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) { 2331 dev_err(dev, "aead crypto alg error!\n"); 2332 return -EINVAL; 2333 } 2334 2335 if (unlikely(sec_aead_spec_check(ctx, sreq))) 2336 return -EINVAL; 2337 2338 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= 2339 SEC_PBUF_SZ) 2340 sreq->use_pbuf = true; 2341 else 2342 sreq->use_pbuf = false; 2343 2344 return 0; 2345 } 2346 2347 static int sec_aead_soft_crypto(struct sec_ctx *ctx, 2348 struct aead_request *aead_req, 2349 bool encrypt) 2350 { 2351 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 2352 struct device *dev = ctx->dev; 2353 struct aead_request *subreq; 2354 int ret; 2355 2356 /* Kunpeng920 aead mode not support input 0 size */ 2357 if (!a_ctx->fallback_aead_tfm) { 2358 dev_err(dev, "aead fallback tfm is NULL!\n"); 2359 return -EINVAL; 2360 } 2361 2362 subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); 2363 if (!subreq) 2364 return -ENOMEM; 2365 2366 aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); 2367 aead_request_set_callback(subreq, aead_req->base.flags, 2368 aead_req->base.complete, aead_req->base.data); 2369 aead_request_set_crypt(subreq, aead_req->src, aead_req->dst, 2370 aead_req->cryptlen, aead_req->iv); 2371 aead_request_set_ad(subreq, aead_req->assoclen); 2372 2373 if (encrypt) 2374 ret = crypto_aead_encrypt(subreq); 2375 else 2376 ret = crypto_aead_decrypt(subreq); 2377 aead_request_free(subreq); 2378 2379 return ret; 2380 } 2381 2382 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) 2383 { 2384 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 2385 struct sec_req *req = aead_request_ctx(a_req); 2386 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 2387 int ret; 2388 2389 req->flag = a_req->base.flags; 2390 req->aead_req.aead_req = a_req; 2391 req->c_req.encrypt = encrypt; 2392 req->ctx = ctx; 2393 2394 ret = sec_aead_param_check(ctx, req); 2395 if (unlikely(ret)) { 2396 if (ctx->a_ctx.fallback) 2397 return sec_aead_soft_crypto(ctx, a_req, encrypt); 2398 return -EINVAL; 2399 } 2400 2401 return ctx->req_op->process(ctx, req); 2402 } 2403 2404 static int sec_aead_encrypt(struct aead_request *a_req) 2405 { 2406 return sec_aead_crypto(a_req, true); 2407 } 2408 2409 static int sec_aead_decrypt(struct aead_request *a_req) 2410 { 2411 return sec_aead_crypto(a_req, false); 2412 } 2413 2414 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\ 2415 ctx_exit, blk_size, iv_size, max_authsize)\ 2416 {\ 2417 .base = {\ 2418 .cra_name = sec_cra_name,\ 2419 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 2420 .cra_priority = SEC_PRIORITY,\ 2421 .cra_flags = CRYPTO_ALG_ASYNC |\ 2422 CRYPTO_ALG_NEED_FALLBACK,\ 2423 .cra_blocksize = blk_size,\ 2424 .cra_ctxsize = sizeof(struct sec_ctx),\ 2425 .cra_module = THIS_MODULE,\ 2426 },\ 2427 .init = ctx_init,\ 2428 .exit = ctx_exit,\ 2429 .setkey = sec_set_key,\ 2430 .setauthsize = sec_aead_setauthsize,\ 2431 .decrypt = sec_aead_decrypt,\ 2432 .encrypt = sec_aead_encrypt,\ 2433 .ivsize = iv_size,\ 2434 .maxauthsize = max_authsize,\ 2435 } 2436 2437 static struct sec_aead sec_aeads[] = { 2438 { 2439 .alg_msk = BIT(6), 2440 .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, 2441 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 2442 AES_BLOCK_SIZE), 2443 }, 2444 { 2445 .alg_msk = BIT(7), 2446 .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, 2447 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, 2448 AES_BLOCK_SIZE), 2449 }, 2450 { 2451 .alg_msk = BIT(17), 2452 .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, 2453 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 2454 AES_BLOCK_SIZE), 2455 }, 2456 { 2457 .alg_msk = BIT(18), 2458 .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, 2459 sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, 2460 AES_BLOCK_SIZE), 2461 }, 2462 { 2463 .alg_msk = BIT(43), 2464 .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1, 2465 sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2466 AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 2467 }, 2468 { 2469 .alg_msk = BIT(44), 2470 .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256, 2471 sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2472 AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 2473 }, 2474 { 2475 .alg_msk = BIT(45), 2476 .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512, 2477 sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, 2478 AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 2479 }, 2480 }; 2481 2482 static void sec_unregister_skcipher(u64 alg_mask, int end) 2483 { 2484 int i; 2485 2486 for (i = 0; i < end; i++) 2487 if (sec_skciphers[i].alg_msk & alg_mask) 2488 crypto_unregister_skcipher(&sec_skciphers[i].alg); 2489 } 2490 2491 static int sec_register_skcipher(u64 alg_mask) 2492 { 2493 int i, ret, count; 2494 2495 count = ARRAY_SIZE(sec_skciphers); 2496 2497 for (i = 0; i < count; i++) { 2498 if (!(sec_skciphers[i].alg_msk & alg_mask)) 2499 continue; 2500 2501 ret = crypto_register_skcipher(&sec_skciphers[i].alg); 2502 if (ret) 2503 goto err; 2504 } 2505 2506 return 0; 2507 2508 err: 2509 sec_unregister_skcipher(alg_mask, i); 2510 2511 return ret; 2512 } 2513 2514 static void sec_unregister_aead(u64 alg_mask, int end) 2515 { 2516 int i; 2517 2518 for (i = 0; i < end; i++) 2519 if (sec_aeads[i].alg_msk & alg_mask) 2520 crypto_unregister_aead(&sec_aeads[i].alg); 2521 } 2522 2523 static int sec_register_aead(u64 alg_mask) 2524 { 2525 int i, ret, count; 2526 2527 count = ARRAY_SIZE(sec_aeads); 2528 2529 for (i = 0; i < count; i++) { 2530 if (!(sec_aeads[i].alg_msk & alg_mask)) 2531 continue; 2532 2533 ret = crypto_register_aead(&sec_aeads[i].alg); 2534 if (ret) 2535 goto err; 2536 } 2537 2538 return 0; 2539 2540 err: 2541 sec_unregister_aead(alg_mask, i); 2542 2543 return ret; 2544 } 2545 2546 int sec_register_to_crypto(struct hisi_qm *qm) 2547 { 2548 u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); 2549 int ret; 2550 2551 ret = sec_register_skcipher(alg_mask); 2552 if (ret) 2553 return ret; 2554 2555 ret = sec_register_aead(alg_mask); 2556 if (ret) 2557 sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); 2558 2559 return ret; 2560 } 2561 2562 void sec_unregister_from_crypto(struct hisi_qm *qm) 2563 { 2564 u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); 2565 2566 sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); 2567 sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); 2568 } 2569