1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <crypto/aes.h> 5 #include <crypto/algapi.h> 6 #include <crypto/authenc.h> 7 #include <crypto/des.h> 8 #include <crypto/hash.h> 9 #include <crypto/internal/aead.h> 10 #include <crypto/sha.h> 11 #include <crypto/skcipher.h> 12 #include <crypto/xts.h> 13 #include <linux/crypto.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/idr.h> 16 17 #include "sec.h" 18 #include "sec_crypto.h" 19 20 #define SEC_PRIORITY 4001 21 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) 22 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) 23 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) 24 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) 25 26 /* SEC sqe(bd) bit operational relative MACRO */ 27 #define SEC_DE_OFFSET 1 28 #define SEC_CIPHER_OFFSET 4 29 #define SEC_SCENE_OFFSET 3 30 #define SEC_DST_SGL_OFFSET 2 31 #define SEC_SRC_SGL_OFFSET 7 32 #define SEC_CKEY_OFFSET 9 33 #define SEC_CMODE_OFFSET 12 34 #define SEC_AKEY_OFFSET 5 35 #define SEC_AEAD_ALG_OFFSET 11 36 #define SEC_AUTH_OFFSET 6 37 38 #define SEC_FLAG_OFFSET 7 39 #define SEC_FLAG_MASK 0x0780 40 #define SEC_TYPE_MASK 0x0F 41 #define SEC_DONE_MASK 0x0001 42 43 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) 44 #define SEC_SGL_SGE_NR 128 45 #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) 46 #define SEC_CIPHER_AUTH 0xfe 47 #define SEC_AUTH_CIPHER 0x1 48 #define SEC_MAX_MAC_LEN 64 49 #define SEC_MAX_AAD_LEN 65535 50 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) 51 52 #define SEC_PBUF_SZ 512 53 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ 54 #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) 55 #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ 56 SEC_MAX_MAC_LEN * 2) 57 #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) 58 #define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) 59 #define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ 60 SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) 61 #define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ 62 SEC_PBUF_LEFT_SZ) 63 64 #define SEC_SQE_LEN_RATE 4 65 #define SEC_SQE_CFLAG 2 66 #define SEC_SQE_AEAD_FLAG 3 67 #define SEC_SQE_DONE 0x1 68 69 static atomic_t sec_active_devs; 70 71 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ 72 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) 73 { 74 if (req->c_req.encrypt) 75 return (u32)atomic_inc_return(&ctx->enc_qcyclic) % 76 ctx->hlf_q_num; 77 78 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + 79 ctx->hlf_q_num; 80 } 81 82 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) 83 { 84 if (req->c_req.encrypt) 85 atomic_dec(&ctx->enc_qcyclic); 86 else 87 atomic_dec(&ctx->dec_qcyclic); 88 } 89 90 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 91 { 92 int req_id; 93 94 mutex_lock(&qp_ctx->req_lock); 95 96 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 97 0, QM_Q_DEPTH, GFP_ATOMIC); 98 mutex_unlock(&qp_ctx->req_lock); 99 if (unlikely(req_id < 0)) { 100 dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); 101 return req_id; 102 } 103 104 req->qp_ctx = qp_ctx; 105 qp_ctx->req_list[req_id] = req; 106 return req_id; 107 } 108 109 static void sec_free_req_id(struct sec_req *req) 110 { 111 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 112 int req_id = req->req_id; 113 114 if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { 115 dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); 116 return; 117 } 118 119 qp_ctx->req_list[req_id] = NULL; 120 req->qp_ctx = NULL; 121 122 mutex_lock(&qp_ctx->req_lock); 123 idr_remove(&qp_ctx->req_idr, req_id); 124 mutex_unlock(&qp_ctx->req_lock); 125 } 126 127 static int sec_aead_verify(struct sec_req *req) 128 { 129 struct aead_request *aead_req = req->aead_req.aead_req; 130 struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); 131 size_t authsize = crypto_aead_authsize(tfm); 132 u8 *mac_out = req->aead_req.out_mac; 133 u8 *mac = mac_out + SEC_MAX_MAC_LEN; 134 struct scatterlist *sgl = aead_req->src; 135 size_t sz; 136 137 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize, 138 aead_req->cryptlen + aead_req->assoclen - 139 authsize); 140 if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { 141 dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); 142 return -EBADMSG; 143 } 144 145 return 0; 146 } 147 148 static void sec_req_cb(struct hisi_qp *qp, void *resp) 149 { 150 struct sec_qp_ctx *qp_ctx = qp->qp_ctx; 151 struct sec_sqe *bd = resp; 152 struct sec_ctx *ctx; 153 struct sec_req *req; 154 u16 done, flag; 155 int err = 0; 156 u8 type; 157 158 type = bd->type_cipher_auth & SEC_TYPE_MASK; 159 if (unlikely(type != SEC_BD_TYPE2)) { 160 pr_err("err bd type [%d]\n", type); 161 return; 162 } 163 164 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; 165 req->err_type = bd->type2.error_type; 166 ctx = req->ctx; 167 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; 168 flag = (le16_to_cpu(bd->type2.done_flag) & 169 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; 170 if (unlikely(req->err_type || done != SEC_SQE_DONE || 171 (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || 172 (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { 173 dev_err(SEC_CTX_DEV(ctx), 174 "err_type[%d],done[%d],flag[%d]\n", 175 req->err_type, done, flag); 176 err = -EIO; 177 } 178 179 if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) 180 err = sec_aead_verify(req); 181 182 atomic64_inc(&ctx->sec->debug.dfx.recv_cnt); 183 184 ctx->req_op->buf_unmap(ctx, req); 185 186 ctx->req_op->callback(ctx, req, err); 187 } 188 189 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) 190 { 191 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 192 int ret; 193 194 mutex_lock(&qp_ctx->req_lock); 195 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); 196 mutex_unlock(&qp_ctx->req_lock); 197 atomic64_inc(&ctx->sec->debug.dfx.send_cnt); 198 199 if (unlikely(ret == -EBUSY)) 200 return -ENOBUFS; 201 202 if (!ret) { 203 if (req->fake_busy) 204 ret = -EBUSY; 205 else 206 ret = -EINPROGRESS; 207 } 208 209 return ret; 210 } 211 212 /* Get DMA memory resources */ 213 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) 214 { 215 int i; 216 217 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, 218 &res->c_ivin_dma, GFP_KERNEL); 219 if (!res->c_ivin) 220 return -ENOMEM; 221 222 for (i = 1; i < QM_Q_DEPTH; i++) { 223 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; 224 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; 225 } 226 227 return 0; 228 } 229 230 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) 231 { 232 if (res->c_ivin) 233 dma_free_coherent(dev, SEC_TOTAL_IV_SZ, 234 res->c_ivin, res->c_ivin_dma); 235 } 236 237 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) 238 { 239 int i; 240 241 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 242 &res->out_mac_dma, GFP_KERNEL); 243 if (!res->out_mac) 244 return -ENOMEM; 245 246 for (i = 1; i < QM_Q_DEPTH; i++) { 247 res[i].out_mac_dma = res->out_mac_dma + 248 i * (SEC_MAX_MAC_LEN << 1); 249 res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); 250 } 251 252 return 0; 253 } 254 255 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) 256 { 257 if (res->out_mac) 258 dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1, 259 res->out_mac, res->out_mac_dma); 260 } 261 262 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) 263 { 264 if (res->pbuf) 265 dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, 266 res->pbuf, res->pbuf_dma); 267 } 268 269 /* 270 * To improve performance, pbuffer is used for 271 * small packets (< 512Bytes) as IOMMU translation using. 272 */ 273 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) 274 { 275 int pbuf_page_offset; 276 int i, j, k; 277 278 res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, 279 &res->pbuf_dma, GFP_KERNEL); 280 if (!res->pbuf) 281 return -ENOMEM; 282 283 /* 284 * SEC_PBUF_PKG contains data pbuf, iv and 285 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC> 286 * Every PAGE contains six SEC_PBUF_PKG 287 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG 288 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE 289 * for the SEC_TOTAL_PBUF_SZ 290 */ 291 for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { 292 pbuf_page_offset = PAGE_SIZE * i; 293 for (j = 0; j < SEC_PBUF_NUM; j++) { 294 k = i * SEC_PBUF_NUM + j; 295 if (k == QM_Q_DEPTH) 296 break; 297 res[k].pbuf = res->pbuf + 298 j * SEC_PBUF_PKG + pbuf_page_offset; 299 res[k].pbuf_dma = res->pbuf_dma + 300 j * SEC_PBUF_PKG + pbuf_page_offset; 301 } 302 } 303 return 0; 304 } 305 306 static int sec_alg_resource_alloc(struct sec_ctx *ctx, 307 struct sec_qp_ctx *qp_ctx) 308 { 309 struct device *dev = SEC_CTX_DEV(ctx); 310 struct sec_alg_res *res = qp_ctx->res; 311 int ret; 312 313 ret = sec_alloc_civ_resource(dev, res); 314 if (ret) 315 return ret; 316 317 if (ctx->alg_type == SEC_AEAD) { 318 ret = sec_alloc_mac_resource(dev, res); 319 if (ret) 320 goto alloc_fail; 321 } 322 if (ctx->pbuf_supported) { 323 ret = sec_alloc_pbuf_resource(dev, res); 324 if (ret) { 325 dev_err(dev, "fail to alloc pbuf dma resource!\n"); 326 goto alloc_fail; 327 } 328 } 329 330 return 0; 331 alloc_fail: 332 sec_free_civ_resource(dev, res); 333 334 return ret; 335 } 336 337 static void sec_alg_resource_free(struct sec_ctx *ctx, 338 struct sec_qp_ctx *qp_ctx) 339 { 340 struct device *dev = SEC_CTX_DEV(ctx); 341 342 sec_free_civ_resource(dev, qp_ctx->res); 343 344 if (ctx->pbuf_supported) 345 sec_free_pbuf_resource(dev, qp_ctx->res); 346 if (ctx->alg_type == SEC_AEAD) 347 sec_free_mac_resource(dev, qp_ctx->res); 348 } 349 350 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, 351 int qp_ctx_id, int alg_type) 352 { 353 struct device *dev = SEC_CTX_DEV(ctx); 354 struct sec_qp_ctx *qp_ctx; 355 struct hisi_qp *qp; 356 int ret = -ENOMEM; 357 358 qp_ctx = &ctx->qp_ctx[qp_ctx_id]; 359 qp = ctx->qps[qp_ctx_id]; 360 qp->req_type = 0; 361 qp->qp_ctx = qp_ctx; 362 qp->req_cb = sec_req_cb; 363 qp_ctx->qp = qp; 364 qp_ctx->ctx = ctx; 365 366 mutex_init(&qp_ctx->req_lock); 367 atomic_set(&qp_ctx->pending_reqs, 0); 368 idr_init(&qp_ctx->req_idr); 369 370 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 371 SEC_SGL_SGE_NR); 372 if (IS_ERR(qp_ctx->c_in_pool)) { 373 dev_err(dev, "fail to create sgl pool for input!\n"); 374 goto err_destroy_idr; 375 } 376 377 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH, 378 SEC_SGL_SGE_NR); 379 if (IS_ERR(qp_ctx->c_out_pool)) { 380 dev_err(dev, "fail to create sgl pool for output!\n"); 381 goto err_free_c_in_pool; 382 } 383 384 ret = sec_alg_resource_alloc(ctx, qp_ctx); 385 if (ret) 386 goto err_free_c_out_pool; 387 388 ret = hisi_qm_start_qp(qp, 0); 389 if (ret < 0) 390 goto err_queue_free; 391 392 return 0; 393 394 err_queue_free: 395 sec_alg_resource_free(ctx, qp_ctx); 396 err_free_c_out_pool: 397 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 398 err_free_c_in_pool: 399 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 400 err_destroy_idr: 401 idr_destroy(&qp_ctx->req_idr); 402 403 return ret; 404 } 405 406 static void sec_release_qp_ctx(struct sec_ctx *ctx, 407 struct sec_qp_ctx *qp_ctx) 408 { 409 struct device *dev = SEC_CTX_DEV(ctx); 410 411 hisi_qm_stop_qp(qp_ctx->qp); 412 sec_alg_resource_free(ctx, qp_ctx); 413 414 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); 415 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); 416 417 idr_destroy(&qp_ctx->req_idr); 418 } 419 420 static int sec_ctx_base_init(struct sec_ctx *ctx) 421 { 422 struct sec_dev *sec; 423 int i, ret; 424 425 ctx->qps = sec_create_qps(); 426 if (!ctx->qps) { 427 pr_err("Can not create sec qps!\n"); 428 return -ENODEV; 429 } 430 431 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); 432 ctx->sec = sec; 433 ctx->hlf_q_num = sec->ctx_q_num >> 1; 434 435 ctx->pbuf_supported = ctx->sec->iommu_used; 436 437 /* Half of queue depth is taken as fake requests limit in the queue. */ 438 ctx->fake_req_limit = QM_Q_DEPTH >> 1; 439 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), 440 GFP_KERNEL); 441 if (!ctx->qp_ctx) 442 return -ENOMEM; 443 444 for (i = 0; i < sec->ctx_q_num; i++) { 445 ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); 446 if (ret) 447 goto err_sec_release_qp_ctx; 448 } 449 450 return 0; 451 err_sec_release_qp_ctx: 452 for (i = i - 1; i >= 0; i--) 453 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 454 455 sec_destroy_qps(ctx->qps, sec->ctx_q_num); 456 kfree(ctx->qp_ctx); 457 return ret; 458 } 459 460 static void sec_ctx_base_uninit(struct sec_ctx *ctx) 461 { 462 int i; 463 464 for (i = 0; i < ctx->sec->ctx_q_num; i++) 465 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); 466 467 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); 468 kfree(ctx->qp_ctx); 469 } 470 471 static int sec_cipher_init(struct sec_ctx *ctx) 472 { 473 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 474 475 c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 476 &c_ctx->c_key_dma, GFP_KERNEL); 477 if (!c_ctx->c_key) 478 return -ENOMEM; 479 480 return 0; 481 } 482 483 static void sec_cipher_uninit(struct sec_ctx *ctx) 484 { 485 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 486 487 memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); 488 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 489 c_ctx->c_key, c_ctx->c_key_dma); 490 } 491 492 static int sec_auth_init(struct sec_ctx *ctx) 493 { 494 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 495 496 a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 497 &a_ctx->a_key_dma, GFP_KERNEL); 498 if (!a_ctx->a_key) 499 return -ENOMEM; 500 501 return 0; 502 } 503 504 static void sec_auth_uninit(struct sec_ctx *ctx) 505 { 506 struct sec_auth_ctx *a_ctx = &ctx->a_ctx; 507 508 memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); 509 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, 510 a_ctx->a_key, a_ctx->a_key_dma); 511 } 512 513 static int sec_skcipher_init(struct crypto_skcipher *tfm) 514 { 515 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 516 int ret; 517 518 ctx->alg_type = SEC_SKCIPHER; 519 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); 520 ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); 521 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 522 dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); 523 return -EINVAL; 524 } 525 526 ret = sec_ctx_base_init(ctx); 527 if (ret) 528 return ret; 529 530 ret = sec_cipher_init(ctx); 531 if (ret) 532 goto err_cipher_init; 533 534 return 0; 535 err_cipher_init: 536 sec_ctx_base_uninit(ctx); 537 538 return ret; 539 } 540 541 static void sec_skcipher_uninit(struct crypto_skcipher *tfm) 542 { 543 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 544 545 sec_cipher_uninit(ctx); 546 sec_ctx_base_uninit(ctx); 547 } 548 549 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, 550 const u32 keylen, 551 const enum sec_cmode c_mode) 552 { 553 switch (keylen) { 554 case SEC_DES3_2KEY_SIZE: 555 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; 556 break; 557 case SEC_DES3_3KEY_SIZE: 558 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; 559 break; 560 default: 561 return -EINVAL; 562 } 563 564 return 0; 565 } 566 567 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, 568 const u32 keylen, 569 const enum sec_cmode c_mode) 570 { 571 if (c_mode == SEC_CMODE_XTS) { 572 switch (keylen) { 573 case SEC_XTS_MIN_KEY_SIZE: 574 c_ctx->c_key_len = SEC_CKEY_128BIT; 575 break; 576 case SEC_XTS_MAX_KEY_SIZE: 577 c_ctx->c_key_len = SEC_CKEY_256BIT; 578 break; 579 default: 580 pr_err("hisi_sec2: xts mode key error!\n"); 581 return -EINVAL; 582 } 583 } else { 584 switch (keylen) { 585 case AES_KEYSIZE_128: 586 c_ctx->c_key_len = SEC_CKEY_128BIT; 587 break; 588 case AES_KEYSIZE_192: 589 c_ctx->c_key_len = SEC_CKEY_192BIT; 590 break; 591 case AES_KEYSIZE_256: 592 c_ctx->c_key_len = SEC_CKEY_256BIT; 593 break; 594 default: 595 pr_err("hisi_sec2: aes key error!\n"); 596 return -EINVAL; 597 } 598 } 599 600 return 0; 601 } 602 603 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 604 const u32 keylen, const enum sec_calg c_alg, 605 const enum sec_cmode c_mode) 606 { 607 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 608 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 609 int ret; 610 611 if (c_mode == SEC_CMODE_XTS) { 612 ret = xts_verify_key(tfm, key, keylen); 613 if (ret) { 614 dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); 615 return ret; 616 } 617 } 618 619 c_ctx->c_alg = c_alg; 620 c_ctx->c_mode = c_mode; 621 622 switch (c_alg) { 623 case SEC_CALG_3DES: 624 ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); 625 break; 626 case SEC_CALG_AES: 627 case SEC_CALG_SM4: 628 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); 629 break; 630 default: 631 return -EINVAL; 632 } 633 634 if (ret) { 635 dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); 636 return ret; 637 } 638 639 memcpy(c_ctx->c_key, key, keylen); 640 641 return 0; 642 } 643 644 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ 645 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ 646 u32 keylen) \ 647 { \ 648 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ 649 } 650 651 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) 652 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) 653 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) 654 655 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) 656 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) 657 658 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) 659 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) 660 661 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, 662 struct scatterlist *src) 663 { 664 struct aead_request *aead_req = req->aead_req.aead_req; 665 struct sec_cipher_req *c_req = &req->c_req; 666 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 667 struct device *dev = SEC_CTX_DEV(ctx); 668 int copy_size, pbuf_length; 669 int req_id = req->req_id; 670 671 if (ctx->alg_type == SEC_AEAD) 672 copy_size = aead_req->cryptlen + aead_req->assoclen; 673 else 674 copy_size = c_req->c_len; 675 676 pbuf_length = sg_copy_to_buffer(src, sg_nents(src), 677 qp_ctx->res[req_id].pbuf, 678 copy_size); 679 680 if (unlikely(pbuf_length != copy_size)) { 681 dev_err(dev, "copy src data to pbuf error!\n"); 682 return -EINVAL; 683 } 684 685 c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; 686 687 if (!c_req->c_in_dma) { 688 dev_err(dev, "fail to set pbuffer address!\n"); 689 return -ENOMEM; 690 } 691 692 c_req->c_out_dma = c_req->c_in_dma; 693 694 return 0; 695 } 696 697 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, 698 struct scatterlist *dst) 699 { 700 struct aead_request *aead_req = req->aead_req.aead_req; 701 struct sec_cipher_req *c_req = &req->c_req; 702 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 703 struct device *dev = SEC_CTX_DEV(ctx); 704 int copy_size, pbuf_length; 705 int req_id = req->req_id; 706 707 if (ctx->alg_type == SEC_AEAD) 708 copy_size = c_req->c_len + aead_req->assoclen; 709 else 710 copy_size = c_req->c_len; 711 712 pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), 713 qp_ctx->res[req_id].pbuf, 714 copy_size); 715 716 if (unlikely(pbuf_length != copy_size)) 717 dev_err(dev, "copy pbuf data to dst error!\n"); 718 719 } 720 721 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, 722 struct scatterlist *src, struct scatterlist *dst) 723 { 724 struct sec_cipher_req *c_req = &req->c_req; 725 struct sec_aead_req *a_req = &req->aead_req; 726 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 727 struct sec_alg_res *res = &qp_ctx->res[req->req_id]; 728 struct device *dev = SEC_CTX_DEV(ctx); 729 int ret; 730 731 if (req->use_pbuf) { 732 ret = sec_cipher_pbuf_map(ctx, req, src); 733 c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; 734 c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; 735 if (ctx->alg_type == SEC_AEAD) { 736 a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; 737 a_req->out_mac_dma = res->pbuf_dma + 738 SEC_PBUF_MAC_OFFSET; 739 } 740 741 return ret; 742 } 743 c_req->c_ivin = res->c_ivin; 744 c_req->c_ivin_dma = res->c_ivin_dma; 745 if (ctx->alg_type == SEC_AEAD) { 746 a_req->out_mac = res->out_mac; 747 a_req->out_mac_dma = res->out_mac_dma; 748 } 749 750 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, 751 qp_ctx->c_in_pool, 752 req->req_id, 753 &c_req->c_in_dma); 754 755 if (IS_ERR(c_req->c_in)) { 756 dev_err(dev, "fail to dma map input sgl buffers!\n"); 757 return PTR_ERR(c_req->c_in); 758 } 759 760 if (dst == src) { 761 c_req->c_out = c_req->c_in; 762 c_req->c_out_dma = c_req->c_in_dma; 763 } else { 764 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, 765 qp_ctx->c_out_pool, 766 req->req_id, 767 &c_req->c_out_dma); 768 769 if (IS_ERR(c_req->c_out)) { 770 dev_err(dev, "fail to dma map output sgl buffers!\n"); 771 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 772 return PTR_ERR(c_req->c_out); 773 } 774 } 775 776 return 0; 777 } 778 779 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, 780 struct scatterlist *src, struct scatterlist *dst) 781 { 782 struct sec_cipher_req *c_req = &req->c_req; 783 struct device *dev = SEC_CTX_DEV(ctx); 784 785 if (req->use_pbuf) { 786 sec_cipher_pbuf_unmap(ctx, req, dst); 787 } else { 788 if (dst != src) 789 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); 790 791 hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); 792 } 793 } 794 795 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 796 { 797 struct skcipher_request *sq = req->c_req.sk_req; 798 799 return sec_cipher_map(ctx, req, sq->src, sq->dst); 800 } 801 802 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 803 { 804 struct skcipher_request *sq = req->c_req.sk_req; 805 806 sec_cipher_unmap(ctx, req, sq->src, sq->dst); 807 } 808 809 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, 810 struct crypto_authenc_keys *keys) 811 { 812 switch (keys->enckeylen) { 813 case AES_KEYSIZE_128: 814 c_ctx->c_key_len = SEC_CKEY_128BIT; 815 break; 816 case AES_KEYSIZE_192: 817 c_ctx->c_key_len = SEC_CKEY_192BIT; 818 break; 819 case AES_KEYSIZE_256: 820 c_ctx->c_key_len = SEC_CKEY_256BIT; 821 break; 822 default: 823 pr_err("hisi_sec2: aead aes key error!\n"); 824 return -EINVAL; 825 } 826 memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); 827 828 return 0; 829 } 830 831 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, 832 struct crypto_authenc_keys *keys) 833 { 834 struct crypto_shash *hash_tfm = ctx->hash_tfm; 835 SHASH_DESC_ON_STACK(shash, hash_tfm); 836 int blocksize, ret; 837 838 if (!keys->authkeylen) { 839 pr_err("hisi_sec2: aead auth key error!\n"); 840 return -EINVAL; 841 } 842 843 blocksize = crypto_shash_blocksize(hash_tfm); 844 if (keys->authkeylen > blocksize) { 845 ret = crypto_shash_digest(shash, keys->authkey, 846 keys->authkeylen, ctx->a_key); 847 if (ret) { 848 pr_err("hisi_sec2: aead auth digest error!\n"); 849 return -EINVAL; 850 } 851 ctx->a_key_len = blocksize; 852 } else { 853 memcpy(ctx->a_key, keys->authkey, keys->authkeylen); 854 ctx->a_key_len = keys->authkeylen; 855 } 856 857 return 0; 858 } 859 860 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, 861 const u32 keylen, const enum sec_hash_alg a_alg, 862 const enum sec_calg c_alg, 863 const enum sec_mac_len mac_len, 864 const enum sec_cmode c_mode) 865 { 866 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 867 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 868 struct crypto_authenc_keys keys; 869 int ret; 870 871 ctx->a_ctx.a_alg = a_alg; 872 ctx->c_ctx.c_alg = c_alg; 873 ctx->a_ctx.mac_len = mac_len; 874 c_ctx->c_mode = c_mode; 875 876 if (crypto_authenc_extractkeys(&keys, key, keylen)) 877 goto bad_key; 878 879 ret = sec_aead_aes_set_key(c_ctx, &keys); 880 if (ret) { 881 dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); 882 goto bad_key; 883 } 884 885 ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); 886 if (ret) { 887 dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); 888 goto bad_key; 889 } 890 891 return 0; 892 bad_key: 893 memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); 894 895 return -EINVAL; 896 } 897 898 899 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ 900 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ 901 u32 keylen) \ 902 { \ 903 return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ 904 } 905 906 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, 907 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) 908 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, 909 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) 910 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, 911 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) 912 913 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) 914 { 915 struct aead_request *aq = req->aead_req.aead_req; 916 917 return sec_cipher_map(ctx, req, aq->src, aq->dst); 918 } 919 920 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) 921 { 922 struct aead_request *aq = req->aead_req.aead_req; 923 924 sec_cipher_unmap(ctx, req, aq->src, aq->dst); 925 } 926 927 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) 928 { 929 int ret; 930 931 ret = ctx->req_op->buf_map(ctx, req); 932 if (unlikely(ret)) 933 return ret; 934 935 ctx->req_op->do_transfer(ctx, req); 936 937 ret = ctx->req_op->bd_fill(ctx, req); 938 if (unlikely(ret)) 939 goto unmap_req_buf; 940 941 return ret; 942 943 unmap_req_buf: 944 ctx->req_op->buf_unmap(ctx, req); 945 946 return ret; 947 } 948 949 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) 950 { 951 ctx->req_op->buf_unmap(ctx, req); 952 } 953 954 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 955 { 956 struct skcipher_request *sk_req = req->c_req.sk_req; 957 struct sec_cipher_req *c_req = &req->c_req; 958 959 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); 960 } 961 962 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 963 { 964 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; 965 struct sec_cipher_req *c_req = &req->c_req; 966 struct sec_sqe *sec_sqe = &req->sec_sqe; 967 u8 scene, sa_type, da_type; 968 u8 bd_type, cipher; 969 u8 de = 0; 970 971 memset(sec_sqe, 0, sizeof(struct sec_sqe)); 972 973 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); 974 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); 975 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); 976 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); 977 978 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << 979 SEC_CMODE_OFFSET); 980 sec_sqe->type2.c_alg = c_ctx->c_alg; 981 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << 982 SEC_CKEY_OFFSET); 983 984 bd_type = SEC_BD_TYPE2; 985 if (c_req->encrypt) 986 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; 987 else 988 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; 989 sec_sqe->type_cipher_auth = bd_type | cipher; 990 991 if (req->use_pbuf) 992 sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; 993 else 994 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; 995 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; 996 if (c_req->c_in_dma != c_req->c_out_dma) 997 de = 0x1 << SEC_DE_OFFSET; 998 999 sec_sqe->sds_sa_type = (de | scene | sa_type); 1000 1001 /* Just set DST address type */ 1002 if (req->use_pbuf) 1003 da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; 1004 else 1005 da_type = SEC_SGL << SEC_DST_SGL_OFFSET; 1006 sec_sqe->sdm_addr_type |= da_type; 1007 1008 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); 1009 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); 1010 1011 return 0; 1012 } 1013 1014 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) 1015 { 1016 struct aead_request *aead_req = req->aead_req.aead_req; 1017 struct skcipher_request *sk_req = req->c_req.sk_req; 1018 u32 iv_size = req->ctx->c_ctx.ivsize; 1019 struct scatterlist *sgl; 1020 unsigned int cryptlen; 1021 size_t sz; 1022 u8 *iv; 1023 1024 if (req->c_req.encrypt) 1025 sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; 1026 else 1027 sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; 1028 1029 if (alg_type == SEC_SKCIPHER) { 1030 iv = sk_req->iv; 1031 cryptlen = sk_req->cryptlen; 1032 } else { 1033 iv = aead_req->iv; 1034 cryptlen = aead_req->cryptlen; 1035 } 1036 1037 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, 1038 cryptlen - iv_size); 1039 if (unlikely(sz != iv_size)) 1040 dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); 1041 } 1042 1043 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, 1044 int err) 1045 { 1046 struct skcipher_request *sk_req = req->c_req.sk_req; 1047 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1048 1049 atomic_dec(&qp_ctx->pending_reqs); 1050 sec_free_req_id(req); 1051 1052 /* IV output at encrypto of CBC mode */ 1053 if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) 1054 sec_update_iv(req, SEC_SKCIPHER); 1055 1056 if (req->fake_busy) 1057 sk_req->base.complete(&sk_req->base, -EINPROGRESS); 1058 1059 sk_req->base.complete(&sk_req->base, err); 1060 } 1061 1062 static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) 1063 { 1064 struct aead_request *aead_req = req->aead_req.aead_req; 1065 struct sec_cipher_req *c_req = &req->c_req; 1066 1067 memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); 1068 } 1069 1070 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, 1071 struct sec_req *req, struct sec_sqe *sec_sqe) 1072 { 1073 struct sec_aead_req *a_req = &req->aead_req; 1074 struct sec_cipher_req *c_req = &req->c_req; 1075 struct aead_request *aq = a_req->aead_req; 1076 1077 sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); 1078 1079 sec_sqe->type2.mac_key_alg = 1080 cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); 1081 1082 sec_sqe->type2.mac_key_alg |= 1083 cpu_to_le32((u32)((ctx->a_key_len) / 1084 SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); 1085 1086 sec_sqe->type2.mac_key_alg |= 1087 cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); 1088 1089 sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; 1090 1091 if (dir) 1092 sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; 1093 else 1094 sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; 1095 1096 sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); 1097 1098 sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); 1099 1100 sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); 1101 } 1102 1103 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) 1104 { 1105 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1106 struct sec_sqe *sec_sqe = &req->sec_sqe; 1107 int ret; 1108 1109 ret = sec_skcipher_bd_fill(ctx, req); 1110 if (unlikely(ret)) { 1111 dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); 1112 return ret; 1113 } 1114 1115 sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); 1116 1117 return 0; 1118 } 1119 1120 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) 1121 { 1122 struct aead_request *a_req = req->aead_req.aead_req; 1123 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1124 struct sec_aead_req *aead_req = &req->aead_req; 1125 struct sec_cipher_req *c_req = &req->c_req; 1126 size_t authsize = crypto_aead_authsize(tfm); 1127 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1128 size_t sz; 1129 1130 atomic_dec(&qp_ctx->pending_reqs); 1131 1132 if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) 1133 sec_update_iv(req, SEC_AEAD); 1134 1135 /* Copy output mac */ 1136 if (!err && c_req->encrypt) { 1137 struct scatterlist *sgl = a_req->dst; 1138 1139 sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), 1140 aead_req->out_mac, 1141 authsize, a_req->cryptlen + 1142 a_req->assoclen); 1143 1144 if (unlikely(sz != authsize)) { 1145 dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); 1146 err = -EINVAL; 1147 } 1148 } 1149 1150 sec_free_req_id(req); 1151 1152 if (req->fake_busy) 1153 a_req->base.complete(&a_req->base, -EINPROGRESS); 1154 1155 a_req->base.complete(&a_req->base, err); 1156 } 1157 1158 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) 1159 { 1160 struct sec_qp_ctx *qp_ctx = req->qp_ctx; 1161 1162 atomic_dec(&qp_ctx->pending_reqs); 1163 sec_free_req_id(req); 1164 sec_free_queue_id(ctx, req); 1165 } 1166 1167 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) 1168 { 1169 struct sec_qp_ctx *qp_ctx; 1170 int queue_id; 1171 1172 /* To load balance */ 1173 queue_id = sec_alloc_queue_id(ctx, req); 1174 qp_ctx = &ctx->qp_ctx[queue_id]; 1175 1176 req->req_id = sec_alloc_req_id(req, qp_ctx); 1177 if (unlikely(req->req_id < 0)) { 1178 sec_free_queue_id(ctx, req); 1179 return req->req_id; 1180 } 1181 1182 if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs)) 1183 req->fake_busy = true; 1184 else 1185 req->fake_busy = false; 1186 1187 return 0; 1188 } 1189 1190 static int sec_process(struct sec_ctx *ctx, struct sec_req *req) 1191 { 1192 struct sec_cipher_req *c_req = &req->c_req; 1193 int ret; 1194 1195 ret = sec_request_init(ctx, req); 1196 if (unlikely(ret)) 1197 return ret; 1198 1199 ret = sec_request_transfer(ctx, req); 1200 if (unlikely(ret)) 1201 goto err_uninit_req; 1202 1203 /* Output IV as decrypto */ 1204 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) 1205 sec_update_iv(req, ctx->alg_type); 1206 1207 ret = ctx->req_op->bd_send(ctx, req); 1208 if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) { 1209 dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); 1210 goto err_send_req; 1211 } 1212 1213 return ret; 1214 1215 err_send_req: 1216 /* As failing, restore the IV from user */ 1217 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { 1218 if (ctx->alg_type == SEC_SKCIPHER) 1219 memcpy(req->c_req.sk_req->iv, c_req->c_ivin, 1220 ctx->c_ctx.ivsize); 1221 else 1222 memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, 1223 ctx->c_ctx.ivsize); 1224 } 1225 1226 sec_request_untransfer(ctx, req); 1227 err_uninit_req: 1228 sec_request_uninit(ctx, req); 1229 1230 return ret; 1231 } 1232 1233 static const struct sec_req_op sec_skcipher_req_ops = { 1234 .buf_map = sec_skcipher_sgl_map, 1235 .buf_unmap = sec_skcipher_sgl_unmap, 1236 .do_transfer = sec_skcipher_copy_iv, 1237 .bd_fill = sec_skcipher_bd_fill, 1238 .bd_send = sec_bd_send, 1239 .callback = sec_skcipher_callback, 1240 .process = sec_process, 1241 }; 1242 1243 static const struct sec_req_op sec_aead_req_ops = { 1244 .buf_map = sec_aead_sgl_map, 1245 .buf_unmap = sec_aead_sgl_unmap, 1246 .do_transfer = sec_aead_copy_iv, 1247 .bd_fill = sec_aead_bd_fill, 1248 .bd_send = sec_bd_send, 1249 .callback = sec_aead_callback, 1250 .process = sec_process, 1251 }; 1252 1253 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) 1254 { 1255 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1256 1257 ctx->req_op = &sec_skcipher_req_ops; 1258 1259 return sec_skcipher_init(tfm); 1260 } 1261 1262 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) 1263 { 1264 sec_skcipher_uninit(tfm); 1265 } 1266 1267 static int sec_aead_init(struct crypto_aead *tfm) 1268 { 1269 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1270 int ret; 1271 1272 crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); 1273 ctx->alg_type = SEC_AEAD; 1274 ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); 1275 if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { 1276 dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); 1277 return -EINVAL; 1278 } 1279 1280 ctx->req_op = &sec_aead_req_ops; 1281 ret = sec_ctx_base_init(ctx); 1282 if (ret) 1283 return ret; 1284 1285 ret = sec_auth_init(ctx); 1286 if (ret) 1287 goto err_auth_init; 1288 1289 ret = sec_cipher_init(ctx); 1290 if (ret) 1291 goto err_cipher_init; 1292 1293 return ret; 1294 1295 err_cipher_init: 1296 sec_auth_uninit(ctx); 1297 err_auth_init: 1298 sec_ctx_base_uninit(ctx); 1299 1300 return ret; 1301 } 1302 1303 static void sec_aead_exit(struct crypto_aead *tfm) 1304 { 1305 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1306 1307 sec_cipher_uninit(ctx); 1308 sec_auth_uninit(ctx); 1309 sec_ctx_base_uninit(ctx); 1310 } 1311 1312 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) 1313 { 1314 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1315 struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; 1316 int ret; 1317 1318 ret = sec_aead_init(tfm); 1319 if (ret) { 1320 pr_err("hisi_sec2: aead init error!\n"); 1321 return ret; 1322 } 1323 1324 auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); 1325 if (IS_ERR(auth_ctx->hash_tfm)) { 1326 dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); 1327 sec_aead_exit(tfm); 1328 return PTR_ERR(auth_ctx->hash_tfm); 1329 } 1330 1331 return 0; 1332 } 1333 1334 static void sec_aead_ctx_exit(struct crypto_aead *tfm) 1335 { 1336 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1337 1338 crypto_free_shash(ctx->a_ctx.hash_tfm); 1339 sec_aead_exit(tfm); 1340 } 1341 1342 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) 1343 { 1344 return sec_aead_ctx_init(tfm, "sha1"); 1345 } 1346 1347 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) 1348 { 1349 return sec_aead_ctx_init(tfm, "sha256"); 1350 } 1351 1352 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) 1353 { 1354 return sec_aead_ctx_init(tfm, "sha512"); 1355 } 1356 1357 static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1358 { 1359 struct skcipher_request *sk_req = sreq->c_req.sk_req; 1360 struct device *dev = SEC_CTX_DEV(ctx); 1361 u8 c_alg = ctx->c_ctx.c_alg; 1362 1363 if (unlikely(!sk_req->src || !sk_req->dst)) { 1364 dev_err(dev, "skcipher input param error!\n"); 1365 return -EINVAL; 1366 } 1367 sreq->c_req.c_len = sk_req->cryptlen; 1368 1369 if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) 1370 sreq->use_pbuf = true; 1371 else 1372 sreq->use_pbuf = false; 1373 1374 if (c_alg == SEC_CALG_3DES) { 1375 if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { 1376 dev_err(dev, "skcipher 3des input length error!\n"); 1377 return -EINVAL; 1378 } 1379 return 0; 1380 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { 1381 if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { 1382 dev_err(dev, "skcipher aes input length error!\n"); 1383 return -EINVAL; 1384 } 1385 return 0; 1386 } 1387 1388 dev_err(dev, "skcipher algorithm error!\n"); 1389 return -EINVAL; 1390 } 1391 1392 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) 1393 { 1394 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); 1395 struct sec_req *req = skcipher_request_ctx(sk_req); 1396 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); 1397 int ret; 1398 1399 if (!sk_req->cryptlen) 1400 return 0; 1401 1402 req->c_req.sk_req = sk_req; 1403 req->c_req.encrypt = encrypt; 1404 req->ctx = ctx; 1405 1406 ret = sec_skcipher_param_check(ctx, req); 1407 if (unlikely(ret)) 1408 return -EINVAL; 1409 1410 return ctx->req_op->process(ctx, req); 1411 } 1412 1413 static int sec_skcipher_encrypt(struct skcipher_request *sk_req) 1414 { 1415 return sec_skcipher_crypto(sk_req, true); 1416 } 1417 1418 static int sec_skcipher_decrypt(struct skcipher_request *sk_req) 1419 { 1420 return sec_skcipher_crypto(sk_req, false); 1421 } 1422 1423 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ 1424 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ 1425 {\ 1426 .base = {\ 1427 .cra_name = sec_cra_name,\ 1428 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1429 .cra_priority = SEC_PRIORITY,\ 1430 .cra_flags = CRYPTO_ALG_ASYNC,\ 1431 .cra_blocksize = blk_size,\ 1432 .cra_ctxsize = sizeof(struct sec_ctx),\ 1433 .cra_module = THIS_MODULE,\ 1434 },\ 1435 .init = ctx_init,\ 1436 .exit = ctx_exit,\ 1437 .setkey = sec_set_key,\ 1438 .decrypt = sec_skcipher_decrypt,\ 1439 .encrypt = sec_skcipher_encrypt,\ 1440 .min_keysize = sec_min_key_size,\ 1441 .max_keysize = sec_max_key_size,\ 1442 .ivsize = iv_size,\ 1443 }, 1444 1445 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ 1446 max_key_size, blk_size, iv_size) \ 1447 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ 1448 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) 1449 1450 static struct skcipher_alg sec_skciphers[] = { 1451 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, 1452 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1453 AES_BLOCK_SIZE, 0) 1454 1455 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, 1456 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, 1457 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1458 1459 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, 1460 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, 1461 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1462 1463 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, 1464 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 1465 DES3_EDE_BLOCK_SIZE, 0) 1466 1467 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, 1468 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, 1469 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) 1470 1471 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, 1472 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, 1473 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1474 1475 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, 1476 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, 1477 AES_BLOCK_SIZE, AES_BLOCK_SIZE) 1478 }; 1479 1480 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) 1481 { 1482 u8 c_alg = ctx->c_ctx.c_alg; 1483 struct aead_request *req = sreq->aead_req.aead_req; 1484 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1485 size_t authsize = crypto_aead_authsize(tfm); 1486 1487 if (unlikely(!req->src || !req->dst || !req->cryptlen || 1488 req->assoclen > SEC_MAX_AAD_LEN)) { 1489 dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); 1490 return -EINVAL; 1491 } 1492 1493 if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= 1494 SEC_PBUF_SZ) 1495 sreq->use_pbuf = true; 1496 else 1497 sreq->use_pbuf = false; 1498 1499 /* Support AES only */ 1500 if (unlikely(c_alg != SEC_CALG_AES)) { 1501 dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); 1502 return -EINVAL; 1503 1504 } 1505 if (sreq->c_req.encrypt) 1506 sreq->c_req.c_len = req->cryptlen; 1507 else 1508 sreq->c_req.c_len = req->cryptlen - authsize; 1509 1510 if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { 1511 dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); 1512 return -EINVAL; 1513 } 1514 1515 return 0; 1516 } 1517 1518 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) 1519 { 1520 struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); 1521 struct sec_req *req = aead_request_ctx(a_req); 1522 struct sec_ctx *ctx = crypto_aead_ctx(tfm); 1523 int ret; 1524 1525 req->aead_req.aead_req = a_req; 1526 req->c_req.encrypt = encrypt; 1527 req->ctx = ctx; 1528 1529 ret = sec_aead_param_check(ctx, req); 1530 if (unlikely(ret)) 1531 return -EINVAL; 1532 1533 return ctx->req_op->process(ctx, req); 1534 } 1535 1536 static int sec_aead_encrypt(struct aead_request *a_req) 1537 { 1538 return sec_aead_crypto(a_req, true); 1539 } 1540 1541 static int sec_aead_decrypt(struct aead_request *a_req) 1542 { 1543 return sec_aead_crypto(a_req, false); 1544 } 1545 1546 #define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\ 1547 ctx_exit, blk_size, iv_size, max_authsize)\ 1548 {\ 1549 .base = {\ 1550 .cra_name = sec_cra_name,\ 1551 .cra_driver_name = "hisi_sec_"sec_cra_name,\ 1552 .cra_priority = SEC_PRIORITY,\ 1553 .cra_flags = CRYPTO_ALG_ASYNC,\ 1554 .cra_blocksize = blk_size,\ 1555 .cra_ctxsize = sizeof(struct sec_ctx),\ 1556 .cra_module = THIS_MODULE,\ 1557 },\ 1558 .init = ctx_init,\ 1559 .exit = ctx_exit,\ 1560 .setkey = sec_set_key,\ 1561 .decrypt = sec_aead_decrypt,\ 1562 .encrypt = sec_aead_encrypt,\ 1563 .ivsize = iv_size,\ 1564 .maxauthsize = max_authsize,\ 1565 } 1566 1567 #define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\ 1568 SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\ 1569 sec_aead_ctx_exit, blksize, ivsize, authsize) 1570 1571 static struct aead_alg sec_aeads[] = { 1572 SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", 1573 sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, 1574 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), 1575 1576 SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", 1577 sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, 1578 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), 1579 1580 SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", 1581 sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, 1582 AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), 1583 }; 1584 1585 int sec_register_to_crypto(void) 1586 { 1587 int ret = 0; 1588 1589 /* To avoid repeat register */ 1590 if (atomic_add_return(1, &sec_active_devs) == 1) { 1591 ret = crypto_register_skciphers(sec_skciphers, 1592 ARRAY_SIZE(sec_skciphers)); 1593 if (ret) 1594 return ret; 1595 1596 ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1597 if (ret) 1598 goto reg_aead_fail; 1599 } 1600 1601 return ret; 1602 1603 reg_aead_fail: 1604 crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); 1605 1606 return ret; 1607 } 1608 1609 void sec_unregister_from_crypto(void) 1610 { 1611 if (atomic_sub_return(1, &sec_active_devs) == 0) { 1612 crypto_unregister_skciphers(sec_skciphers, 1613 ARRAY_SIZE(sec_skciphers)); 1614 crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); 1615 } 1616 } 1617