1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTX CPT driver 3 * 4 * Copyright (C) 2019 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <crypto/aes.h> 12 #include <crypto/authenc.h> 13 #include <crypto/cryptd.h> 14 #include <crypto/des.h> 15 #include <crypto/internal/aead.h> 16 #include <crypto/sha.h> 17 #include <crypto/xts.h> 18 #include <crypto/scatterwalk.h> 19 #include <linux/rtnetlink.h> 20 #include <linux/sort.h> 21 #include <linux/module.h> 22 #include "otx_cptvf.h" 23 #include "otx_cptvf_algs.h" 24 #include "otx_cptvf_reqmgr.h" 25 26 #define CPT_MAX_VF_NUM 64 27 /* Size of salt in AES GCM mode */ 28 #define AES_GCM_SALT_SIZE 4 29 /* Size of IV in AES GCM mode */ 30 #define AES_GCM_IV_SIZE 8 31 /* Size of ICV (Integrity Check Value) in AES GCM mode */ 32 #define AES_GCM_ICV_SIZE 16 33 /* Offset of IV in AES GCM mode */ 34 #define AES_GCM_IV_OFFSET 8 35 #define CONTROL_WORD_LEN 8 36 #define KEY2_OFFSET 48 37 #define DMA_MODE_FLAG(dma_mode) \ 38 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0) 39 40 /* Truncated SHA digest size */ 41 #define SHA1_TRUNC_DIGEST_SIZE 12 42 #define SHA256_TRUNC_DIGEST_SIZE 16 43 #define SHA384_TRUNC_DIGEST_SIZE 24 44 #define SHA512_TRUNC_DIGEST_SIZE 32 45 46 static DEFINE_MUTEX(mutex); 47 static int is_crypto_registered; 48 49 struct cpt_device_desc { 50 enum otx_cptpf_type pf_type; 51 struct pci_dev *dev; 52 int num_queues; 53 }; 54 55 struct cpt_device_table { 56 atomic_t count; 57 struct cpt_device_desc desc[CPT_MAX_VF_NUM]; 58 }; 59 60 static struct cpt_device_table se_devices = { 61 .count = ATOMIC_INIT(0) 62 }; 63 64 static struct cpt_device_table ae_devices = { 65 .count = ATOMIC_INIT(0) 66 }; 67 68 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) 69 { 70 int count, ret = 0; 71 72 count = atomic_read(&se_devices.count); 73 if (count < 1) 74 return -ENODEV; 75 76 *cpu_num = get_cpu(); 77 78 if (se_devices.desc[0].pf_type == OTX_CPT_SE) { 79 /* 80 * On OcteonTX platform there is one CPT instruction queue bound 81 * to each VF. We get maximum performance if one CPT queue 82 * is available for each cpu otherwise CPT queues need to be 83 * shared between cpus. 84 */ 85 if (*cpu_num >= count) 86 *cpu_num %= count; 87 *pdev = se_devices.desc[*cpu_num].dev; 88 } else { 89 pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type); 90 ret = -EINVAL; 91 } 92 put_cpu(); 93 94 return ret; 95 } 96 97 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req) 98 { 99 struct otx_cpt_req_ctx *rctx; 100 struct aead_request *req; 101 struct crypto_aead *tfm; 102 103 req = container_of(cpt_req->areq, struct aead_request, base); 104 tfm = crypto_aead_reqtfm(req); 105 rctx = aead_request_ctx(req); 106 if (memcmp(rctx->fctx.hmac.s.hmac_calc, 107 rctx->fctx.hmac.s.hmac_recv, 108 crypto_aead_authsize(tfm)) != 0) 109 return -EBADMSG; 110 111 return 0; 112 } 113 114 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2) 115 { 116 struct otx_cpt_info_buffer *cpt_info = arg2; 117 struct crypto_async_request *areq = arg1; 118 struct otx_cpt_req_info *cpt_req; 119 struct pci_dev *pdev; 120 121 if (!cpt_info) 122 goto complete; 123 124 cpt_req = cpt_info->req; 125 if (!status) { 126 /* 127 * When selected cipher is NULL we need to manually 128 * verify whether calculated hmac value matches 129 * received hmac value 130 */ 131 if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ && 132 !cpt_req->is_enc) 133 status = validate_hmac_cipher_null(cpt_req); 134 } 135 pdev = cpt_info->pdev; 136 do_request_cleanup(pdev, cpt_info); 137 138 complete: 139 if (areq) 140 areq->complete(areq, status); 141 } 142 143 static void output_iv_copyback(struct crypto_async_request *areq) 144 { 145 struct otx_cpt_req_info *req_info; 146 struct skcipher_request *sreq; 147 struct crypto_skcipher *stfm; 148 struct otx_cpt_req_ctx *rctx; 149 struct otx_cpt_enc_ctx *ctx; 150 u32 start, ivsize; 151 152 sreq = container_of(areq, struct skcipher_request, base); 153 stfm = crypto_skcipher_reqtfm(sreq); 154 ctx = crypto_skcipher_ctx(stfm); 155 if (ctx->cipher_type == OTX_CPT_AES_CBC || 156 ctx->cipher_type == OTX_CPT_DES3_CBC) { 157 rctx = skcipher_request_ctx(sreq); 158 req_info = &rctx->cpt_req; 159 ivsize = crypto_skcipher_ivsize(stfm); 160 start = sreq->cryptlen - ivsize; 161 162 if (req_info->is_enc) { 163 scatterwalk_map_and_copy(sreq->iv, sreq->dst, start, 164 ivsize, 0); 165 } else { 166 if (sreq->src != sreq->dst) { 167 scatterwalk_map_and_copy(sreq->iv, sreq->src, 168 start, ivsize, 0); 169 } else { 170 memcpy(sreq->iv, req_info->iv_out, ivsize); 171 kfree(req_info->iv_out); 172 } 173 } 174 } 175 } 176 177 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2) 178 { 179 struct otx_cpt_info_buffer *cpt_info = arg2; 180 struct crypto_async_request *areq = arg1; 181 struct pci_dev *pdev; 182 183 if (areq) { 184 if (!status) 185 output_iv_copyback(areq); 186 if (cpt_info) { 187 pdev = cpt_info->pdev; 188 do_request_cleanup(pdev, cpt_info); 189 } 190 areq->complete(areq, status); 191 } 192 } 193 194 static inline void update_input_data(struct otx_cpt_req_info *req_info, 195 struct scatterlist *inp_sg, 196 u32 nbytes, u32 *argcnt) 197 { 198 req_info->req.dlen += nbytes; 199 200 while (nbytes) { 201 u32 len = min(nbytes, inp_sg->length); 202 u8 *ptr = sg_virt(inp_sg); 203 204 req_info->in[*argcnt].vptr = (void *)ptr; 205 req_info->in[*argcnt].size = len; 206 nbytes -= len; 207 ++(*argcnt); 208 inp_sg = sg_next(inp_sg); 209 } 210 } 211 212 static inline void update_output_data(struct otx_cpt_req_info *req_info, 213 struct scatterlist *outp_sg, 214 u32 offset, u32 nbytes, u32 *argcnt) 215 { 216 req_info->rlen += nbytes; 217 218 while (nbytes) { 219 u32 len = min(nbytes, outp_sg->length - offset); 220 u8 *ptr = sg_virt(outp_sg); 221 222 req_info->out[*argcnt].vptr = (void *) (ptr + offset); 223 req_info->out[*argcnt].size = len; 224 nbytes -= len; 225 ++(*argcnt); 226 offset = 0; 227 outp_sg = sg_next(outp_sg); 228 } 229 } 230 231 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, 232 u32 *argcnt) 233 { 234 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); 235 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 236 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 237 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); 238 struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm); 239 struct otx_cpt_fc_ctx *fctx = &rctx->fctx; 240 int ivsize = crypto_skcipher_ivsize(stfm); 241 u32 start = req->cryptlen - ivsize; 242 u64 *ctrl_flags = NULL; 243 gfp_t flags; 244 245 flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 246 GFP_KERNEL : GFP_ATOMIC; 247 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 248 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 249 250 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC | 251 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 252 if (enc) { 253 req_info->req.opcode.s.minor = 2; 254 } else { 255 req_info->req.opcode.s.minor = 3; 256 if ((ctx->cipher_type == OTX_CPT_AES_CBC || 257 ctx->cipher_type == OTX_CPT_DES3_CBC) && 258 req->src == req->dst) { 259 req_info->iv_out = kmalloc(ivsize, flags); 260 if (!req_info->iv_out) 261 return -ENOMEM; 262 263 scatterwalk_map_and_copy(req_info->iv_out, req->src, 264 start, ivsize, 0); 265 } 266 } 267 /* Encryption data length */ 268 req_info->req.param1 = req->cryptlen; 269 /* Authentication data length */ 270 req_info->req.param2 = 0; 271 272 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; 273 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; 274 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR; 275 276 if (ctx->cipher_type == OTX_CPT_AES_XTS) 277 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); 278 else 279 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); 280 281 memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm)); 282 283 ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; 284 *ctrl_flags = cpu_to_be64(*ctrl_flags); 285 286 /* 287 * Storing Packet Data Information in offset 288 * Control Word First 8 bytes 289 */ 290 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word; 291 req_info->in[*argcnt].size = CONTROL_WORD_LEN; 292 req_info->req.dlen += CONTROL_WORD_LEN; 293 ++(*argcnt); 294 295 req_info->in[*argcnt].vptr = (u8 *)fctx; 296 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx); 297 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx); 298 299 ++(*argcnt); 300 301 return 0; 302 } 303 304 static inline u32 create_input_list(struct skcipher_request *req, u32 enc, 305 u32 enc_iv_len) 306 { 307 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 308 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 309 u32 argcnt = 0; 310 int ret; 311 312 ret = create_ctx_hdr(req, enc, &argcnt); 313 if (ret) 314 return ret; 315 316 update_input_data(req_info, req->src, req->cryptlen, &argcnt); 317 req_info->incnt = argcnt; 318 319 return 0; 320 } 321 322 static inline void create_output_list(struct skcipher_request *req, 323 u32 enc_iv_len) 324 { 325 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 326 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 327 u32 argcnt = 0; 328 329 /* 330 * OUTPUT Buffer Processing 331 * AES encryption/decryption output would be 332 * received in the following format 333 * 334 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----| 335 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ] 336 */ 337 update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt); 338 req_info->outcnt = argcnt; 339 } 340 341 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc) 342 { 343 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); 344 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 345 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 346 u32 enc_iv_len = crypto_skcipher_ivsize(stfm); 347 struct pci_dev *pdev; 348 int status, cpu_num; 349 350 /* Validate that request doesn't exceed maximum CPT supported size */ 351 if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE) 352 return -E2BIG; 353 354 /* Clear control words */ 355 rctx->ctrl_word.flags = 0; 356 rctx->fctx.enc.enc_ctrl.flags = 0; 357 358 status = create_input_list(req, enc, enc_iv_len); 359 if (status) 360 return status; 361 create_output_list(req, enc_iv_len); 362 363 status = get_se_device(&pdev, &cpu_num); 364 if (status) 365 return status; 366 367 req_info->callback = (void *)otx_cpt_skcipher_callback; 368 req_info->areq = &req->base; 369 req_info->req_type = OTX_CPT_ENC_DEC_REQ; 370 req_info->is_enc = enc; 371 req_info->is_trunc_hmac = false; 372 req_info->ctrl.s.grp = 0; 373 374 /* 375 * We perform an asynchronous send and once 376 * the request is completed the driver would 377 * intimate through registered call back functions 378 */ 379 status = otx_cpt_do_request(pdev, req_info, cpu_num); 380 381 return status; 382 } 383 384 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req) 385 { 386 return cpt_enc_dec(req, true); 387 } 388 389 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req) 390 { 391 return cpt_enc_dec(req, false); 392 } 393 394 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm, 395 const u8 *key, u32 keylen) 396 { 397 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 398 const u8 *key2 = key + (keylen / 2); 399 const u8 *key1 = key; 400 int ret; 401 402 ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen); 403 if (ret) 404 return ret; 405 ctx->key_len = keylen; 406 memcpy(ctx->enc_key, key1, keylen / 2); 407 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2); 408 ctx->cipher_type = OTX_CPT_AES_XTS; 409 switch (ctx->key_len) { 410 case 2 * AES_KEYSIZE_128: 411 ctx->key_type = OTX_CPT_AES_128_BIT; 412 break; 413 case 2 * AES_KEYSIZE_256: 414 ctx->key_type = OTX_CPT_AES_256_BIT; 415 break; 416 default: 417 return -EINVAL; 418 } 419 420 return 0; 421 } 422 423 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key, 424 u32 keylen, u8 cipher_type) 425 { 426 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 427 428 if (keylen != DES3_EDE_KEY_SIZE) 429 return -EINVAL; 430 431 ctx->key_len = keylen; 432 ctx->cipher_type = cipher_type; 433 434 memcpy(ctx->enc_key, key, keylen); 435 436 return 0; 437 } 438 439 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 440 u32 keylen, u8 cipher_type) 441 { 442 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 443 444 switch (keylen) { 445 case AES_KEYSIZE_128: 446 ctx->key_type = OTX_CPT_AES_128_BIT; 447 break; 448 case AES_KEYSIZE_192: 449 ctx->key_type = OTX_CPT_AES_192_BIT; 450 break; 451 case AES_KEYSIZE_256: 452 ctx->key_type = OTX_CPT_AES_256_BIT; 453 break; 454 default: 455 return -EINVAL; 456 } 457 ctx->key_len = keylen; 458 ctx->cipher_type = cipher_type; 459 460 memcpy(ctx->enc_key, key, keylen); 461 462 return 0; 463 } 464 465 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm, 466 const u8 *key, u32 keylen) 467 { 468 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC); 469 } 470 471 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm, 472 const u8 *key, u32 keylen) 473 { 474 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB); 475 } 476 477 static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm, 478 const u8 *key, u32 keylen) 479 { 480 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB); 481 } 482 483 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm, 484 const u8 *key, u32 keylen) 485 { 486 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC); 487 } 488 489 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm, 490 const u8 *key, u32 keylen) 491 { 492 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB); 493 } 494 495 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm) 496 { 497 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 498 499 memset(ctx, 0, sizeof(*ctx)); 500 /* 501 * Additional memory for skcipher_request is 502 * allocated since the cryptd daemon uses 503 * this memory for request_ctx information 504 */ 505 crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) + 506 sizeof(struct skcipher_request)); 507 508 return 0; 509 } 510 511 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) 512 { 513 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 514 515 ctx->cipher_type = cipher_type; 516 ctx->mac_type = mac_type; 517 518 /* 519 * When selected cipher is NULL we use HMAC opcode instead of 520 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms 521 * for calculating ipad and opad 522 */ 523 if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) { 524 switch (ctx->mac_type) { 525 case OTX_CPT_SHA1: 526 ctx->hashalg = crypto_alloc_shash("sha1", 0, 527 CRYPTO_ALG_ASYNC); 528 if (IS_ERR(ctx->hashalg)) 529 return PTR_ERR(ctx->hashalg); 530 break; 531 532 case OTX_CPT_SHA256: 533 ctx->hashalg = crypto_alloc_shash("sha256", 0, 534 CRYPTO_ALG_ASYNC); 535 if (IS_ERR(ctx->hashalg)) 536 return PTR_ERR(ctx->hashalg); 537 break; 538 539 case OTX_CPT_SHA384: 540 ctx->hashalg = crypto_alloc_shash("sha384", 0, 541 CRYPTO_ALG_ASYNC); 542 if (IS_ERR(ctx->hashalg)) 543 return PTR_ERR(ctx->hashalg); 544 break; 545 546 case OTX_CPT_SHA512: 547 ctx->hashalg = crypto_alloc_shash("sha512", 0, 548 CRYPTO_ALG_ASYNC); 549 if (IS_ERR(ctx->hashalg)) 550 return PTR_ERR(ctx->hashalg); 551 break; 552 } 553 } 554 555 crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx)); 556 557 return 0; 558 } 559 560 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm) 561 { 562 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1); 563 } 564 565 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm) 566 { 567 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256); 568 } 569 570 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm) 571 { 572 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384); 573 } 574 575 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm) 576 { 577 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512); 578 } 579 580 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm) 581 { 582 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1); 583 } 584 585 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm) 586 { 587 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256); 588 } 589 590 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm) 591 { 592 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384); 593 } 594 595 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm) 596 { 597 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512); 598 } 599 600 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm) 601 { 602 return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL); 603 } 604 605 static void otx_cpt_aead_exit(struct crypto_aead *tfm) 606 { 607 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 608 609 kfree(ctx->ipad); 610 kfree(ctx->opad); 611 if (ctx->hashalg) 612 crypto_free_shash(ctx->hashalg); 613 kfree(ctx->sdesc); 614 } 615 616 /* 617 * This is the Integrity Check Value validation (aka the authentication tag 618 * length) 619 */ 620 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm, 621 unsigned int authsize) 622 { 623 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 624 625 switch (ctx->mac_type) { 626 case OTX_CPT_SHA1: 627 if (authsize != SHA1_DIGEST_SIZE && 628 authsize != SHA1_TRUNC_DIGEST_SIZE) 629 return -EINVAL; 630 631 if (authsize == SHA1_TRUNC_DIGEST_SIZE) 632 ctx->is_trunc_hmac = true; 633 break; 634 635 case OTX_CPT_SHA256: 636 if (authsize != SHA256_DIGEST_SIZE && 637 authsize != SHA256_TRUNC_DIGEST_SIZE) 638 return -EINVAL; 639 640 if (authsize == SHA256_TRUNC_DIGEST_SIZE) 641 ctx->is_trunc_hmac = true; 642 break; 643 644 case OTX_CPT_SHA384: 645 if (authsize != SHA384_DIGEST_SIZE && 646 authsize != SHA384_TRUNC_DIGEST_SIZE) 647 return -EINVAL; 648 649 if (authsize == SHA384_TRUNC_DIGEST_SIZE) 650 ctx->is_trunc_hmac = true; 651 break; 652 653 case OTX_CPT_SHA512: 654 if (authsize != SHA512_DIGEST_SIZE && 655 authsize != SHA512_TRUNC_DIGEST_SIZE) 656 return -EINVAL; 657 658 if (authsize == SHA512_TRUNC_DIGEST_SIZE) 659 ctx->is_trunc_hmac = true; 660 break; 661 662 case OTX_CPT_MAC_NULL: 663 if (ctx->cipher_type == OTX_CPT_AES_GCM) { 664 if (authsize != AES_GCM_ICV_SIZE) 665 return -EINVAL; 666 } else 667 return -EINVAL; 668 break; 669 670 default: 671 return -EINVAL; 672 } 673 674 tfm->authsize = authsize; 675 return 0; 676 } 677 678 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg) 679 { 680 struct otx_cpt_sdesc *sdesc; 681 int size; 682 683 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); 684 sdesc = kmalloc(size, GFP_KERNEL); 685 if (!sdesc) 686 return NULL; 687 688 sdesc->shash.tfm = alg; 689 690 return sdesc; 691 } 692 693 static inline void swap_data32(void *buf, u32 len) 694 { 695 u32 *store = (u32 *) buf; 696 int i = 0; 697 698 for (i = 0 ; i < len/sizeof(u32); i++, store++) 699 *store = cpu_to_be32(*store); 700 } 701 702 static inline void swap_data64(void *buf, u32 len) 703 { 704 u64 *store = (u64 *) buf; 705 int i = 0; 706 707 for (i = 0 ; i < len/sizeof(u64); i++, store++) 708 *store = cpu_to_be64(*store); 709 } 710 711 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) 712 { 713 struct sha512_state *sha512; 714 struct sha256_state *sha256; 715 struct sha1_state *sha1; 716 717 switch (mac_type) { 718 case OTX_CPT_SHA1: 719 sha1 = (struct sha1_state *) in_pad; 720 swap_data32(sha1->state, SHA1_DIGEST_SIZE); 721 memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); 722 break; 723 724 case OTX_CPT_SHA256: 725 sha256 = (struct sha256_state *) in_pad; 726 swap_data32(sha256->state, SHA256_DIGEST_SIZE); 727 memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); 728 break; 729 730 case OTX_CPT_SHA384: 731 case OTX_CPT_SHA512: 732 sha512 = (struct sha512_state *) in_pad; 733 swap_data64(sha512->state, SHA512_DIGEST_SIZE); 734 memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); 735 break; 736 737 default: 738 return -EINVAL; 739 } 740 741 return 0; 742 } 743 744 static int aead_hmac_init(struct crypto_aead *cipher) 745 { 746 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 747 int state_size = crypto_shash_statesize(ctx->hashalg); 748 int ds = crypto_shash_digestsize(ctx->hashalg); 749 int bs = crypto_shash_blocksize(ctx->hashalg); 750 int authkeylen = ctx->auth_key_len; 751 u8 *ipad = NULL, *opad = NULL; 752 int ret = 0, icount = 0; 753 754 ctx->sdesc = alloc_sdesc(ctx->hashalg); 755 if (!ctx->sdesc) 756 return -ENOMEM; 757 758 ctx->ipad = kzalloc(bs, GFP_KERNEL); 759 if (!ctx->ipad) { 760 ret = -ENOMEM; 761 goto calc_fail; 762 } 763 764 ctx->opad = kzalloc(bs, GFP_KERNEL); 765 if (!ctx->opad) { 766 ret = -ENOMEM; 767 goto calc_fail; 768 } 769 770 ipad = kzalloc(state_size, GFP_KERNEL); 771 if (!ipad) { 772 ret = -ENOMEM; 773 goto calc_fail; 774 } 775 776 opad = kzalloc(state_size, GFP_KERNEL); 777 if (!opad) { 778 ret = -ENOMEM; 779 goto calc_fail; 780 } 781 782 if (authkeylen > bs) { 783 ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, 784 authkeylen, ipad); 785 if (ret) 786 goto calc_fail; 787 788 authkeylen = ds; 789 } else { 790 memcpy(ipad, ctx->key, authkeylen); 791 } 792 793 memset(ipad + authkeylen, 0, bs - authkeylen); 794 memcpy(opad, ipad, bs); 795 796 for (icount = 0; icount < bs; icount++) { 797 ipad[icount] ^= 0x36; 798 opad[icount] ^= 0x5c; 799 } 800 801 /* 802 * Partial Hash calculated from the software 803 * algorithm is retrieved for IPAD & OPAD 804 */ 805 806 /* IPAD Calculation */ 807 crypto_shash_init(&ctx->sdesc->shash); 808 crypto_shash_update(&ctx->sdesc->shash, ipad, bs); 809 crypto_shash_export(&ctx->sdesc->shash, ipad); 810 ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); 811 if (ret) 812 goto calc_fail; 813 814 /* OPAD Calculation */ 815 crypto_shash_init(&ctx->sdesc->shash); 816 crypto_shash_update(&ctx->sdesc->shash, opad, bs); 817 crypto_shash_export(&ctx->sdesc->shash, opad); 818 ret = copy_pad(ctx->mac_type, ctx->opad, opad); 819 if (ret) 820 goto calc_fail; 821 822 kfree(ipad); 823 kfree(opad); 824 825 return 0; 826 827 calc_fail: 828 kfree(ctx->ipad); 829 ctx->ipad = NULL; 830 kfree(ctx->opad); 831 ctx->opad = NULL; 832 kfree(ipad); 833 kfree(opad); 834 kfree(ctx->sdesc); 835 ctx->sdesc = NULL; 836 837 return ret; 838 } 839 840 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, 841 const unsigned char *key, 842 unsigned int keylen) 843 { 844 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 845 struct crypto_authenc_key_param *param; 846 int enckeylen = 0, authkeylen = 0; 847 struct rtattr *rta = (void *)key; 848 int status = -EINVAL; 849 850 if (!RTA_OK(rta, keylen)) 851 goto badkey; 852 853 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 854 goto badkey; 855 856 if (RTA_PAYLOAD(rta) < sizeof(*param)) 857 goto badkey; 858 859 param = RTA_DATA(rta); 860 enckeylen = be32_to_cpu(param->enckeylen); 861 key += RTA_ALIGN(rta->rta_len); 862 keylen -= RTA_ALIGN(rta->rta_len); 863 if (keylen < enckeylen) 864 goto badkey; 865 866 if (keylen > OTX_CPT_MAX_KEY_SIZE) 867 goto badkey; 868 869 authkeylen = keylen - enckeylen; 870 memcpy(ctx->key, key, keylen); 871 872 switch (enckeylen) { 873 case AES_KEYSIZE_128: 874 ctx->key_type = OTX_CPT_AES_128_BIT; 875 break; 876 case AES_KEYSIZE_192: 877 ctx->key_type = OTX_CPT_AES_192_BIT; 878 break; 879 case AES_KEYSIZE_256: 880 ctx->key_type = OTX_CPT_AES_256_BIT; 881 break; 882 default: 883 /* Invalid key length */ 884 goto badkey; 885 } 886 887 ctx->enc_key_len = enckeylen; 888 ctx->auth_key_len = authkeylen; 889 890 status = aead_hmac_init(cipher); 891 if (status) 892 goto badkey; 893 894 return 0; 895 badkey: 896 return status; 897 } 898 899 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, 900 const unsigned char *key, 901 unsigned int keylen) 902 { 903 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 904 struct crypto_authenc_key_param *param; 905 struct rtattr *rta = (void *)key; 906 int enckeylen = 0; 907 908 if (!RTA_OK(rta, keylen)) 909 goto badkey; 910 911 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 912 goto badkey; 913 914 if (RTA_PAYLOAD(rta) < sizeof(*param)) 915 goto badkey; 916 917 param = RTA_DATA(rta); 918 enckeylen = be32_to_cpu(param->enckeylen); 919 key += RTA_ALIGN(rta->rta_len); 920 keylen -= RTA_ALIGN(rta->rta_len); 921 if (enckeylen != 0) 922 goto badkey; 923 924 if (keylen > OTX_CPT_MAX_KEY_SIZE) 925 goto badkey; 926 927 memcpy(ctx->key, key, keylen); 928 ctx->enc_key_len = enckeylen; 929 ctx->auth_key_len = keylen; 930 return 0; 931 badkey: 932 return -EINVAL; 933 } 934 935 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, 936 const unsigned char *key, 937 unsigned int keylen) 938 { 939 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 940 941 /* 942 * For aes gcm we expect to get encryption key (16, 24, 32 bytes) 943 * and salt (4 bytes) 944 */ 945 switch (keylen) { 946 case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE: 947 ctx->key_type = OTX_CPT_AES_128_BIT; 948 ctx->enc_key_len = AES_KEYSIZE_128; 949 break; 950 case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE: 951 ctx->key_type = OTX_CPT_AES_192_BIT; 952 ctx->enc_key_len = AES_KEYSIZE_192; 953 break; 954 case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE: 955 ctx->key_type = OTX_CPT_AES_256_BIT; 956 ctx->enc_key_len = AES_KEYSIZE_256; 957 break; 958 default: 959 /* Invalid key and salt length */ 960 return -EINVAL; 961 } 962 963 /* Store encryption key and salt */ 964 memcpy(ctx->key, key, keylen); 965 966 return 0; 967 } 968 969 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, 970 u32 *argcnt) 971 { 972 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 973 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 974 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 975 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 976 struct otx_cpt_fc_ctx *fctx = &rctx->fctx; 977 int mac_len = crypto_aead_authsize(tfm); 978 int ds; 979 980 rctx->ctrl_word.e.enc_data_offset = req->assoclen; 981 982 switch (ctx->cipher_type) { 983 case OTX_CPT_AES_CBC: 984 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR; 985 /* Copy encryption key to context */ 986 memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len, 987 ctx->enc_key_len); 988 /* Copy IV to context */ 989 memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm)); 990 991 ds = crypto_shash_digestsize(ctx->hashalg); 992 if (ctx->mac_type == OTX_CPT_SHA384) 993 ds = SHA512_DIGEST_SIZE; 994 if (ctx->ipad) 995 memcpy(fctx->hmac.e.ipad, ctx->ipad, ds); 996 if (ctx->opad) 997 memcpy(fctx->hmac.e.opad, ctx->opad, ds); 998 break; 999 1000 case OTX_CPT_AES_GCM: 1001 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR; 1002 /* Copy encryption key to context */ 1003 memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len); 1004 /* Copy salt to context */ 1005 memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len, 1006 AES_GCM_SALT_SIZE); 1007 1008 rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET; 1009 break; 1010 1011 default: 1012 /* Unknown cipher type */ 1013 return -EINVAL; 1014 } 1015 rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags); 1016 1017 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 1018 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 1019 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC | 1020 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 1021 if (enc) { 1022 req_info->req.opcode.s.minor = 2; 1023 req_info->req.param1 = req->cryptlen; 1024 req_info->req.param2 = req->cryptlen + req->assoclen; 1025 } else { 1026 req_info->req.opcode.s.minor = 3; 1027 req_info->req.param1 = req->cryptlen - mac_len; 1028 req_info->req.param2 = req->cryptlen + req->assoclen - mac_len; 1029 } 1030 1031 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; 1032 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; 1033 fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type; 1034 fctx->enc.enc_ctrl.e.mac_len = mac_len; 1035 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); 1036 1037 /* 1038 * Storing Packet Data Information in offset 1039 * Control Word First 8 bytes 1040 */ 1041 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word; 1042 req_info->in[*argcnt].size = CONTROL_WORD_LEN; 1043 req_info->req.dlen += CONTROL_WORD_LEN; 1044 ++(*argcnt); 1045 1046 req_info->in[*argcnt].vptr = (u8 *)fctx; 1047 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx); 1048 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx); 1049 ++(*argcnt); 1050 1051 return 0; 1052 } 1053 1054 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt, 1055 u32 enc) 1056 { 1057 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1058 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1059 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 1060 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1061 1062 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 1063 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 1064 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC | 1065 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 1066 req_info->is_trunc_hmac = ctx->is_trunc_hmac; 1067 1068 req_info->req.opcode.s.minor = 0; 1069 req_info->req.param1 = ctx->auth_key_len; 1070 req_info->req.param2 = ctx->mac_type << 8; 1071 1072 /* Add authentication key */ 1073 req_info->in[*argcnt].vptr = ctx->key; 1074 req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8); 1075 req_info->req.dlen += round_up(ctx->auth_key_len, 8); 1076 ++(*argcnt); 1077 1078 return 0; 1079 } 1080 1081 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc) 1082 { 1083 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1084 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1085 u32 inputlen = req->cryptlen + req->assoclen; 1086 u32 status, argcnt = 0; 1087 1088 status = create_aead_ctx_hdr(req, enc, &argcnt); 1089 if (status) 1090 return status; 1091 update_input_data(req_info, req->src, inputlen, &argcnt); 1092 req_info->incnt = argcnt; 1093 1094 return 0; 1095 } 1096 1097 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc, 1098 u32 mac_len) 1099 { 1100 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1101 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1102 u32 argcnt = 0, outputlen = 0; 1103 1104 if (enc) 1105 outputlen = req->cryptlen + req->assoclen + mac_len; 1106 else 1107 outputlen = req->cryptlen + req->assoclen - mac_len; 1108 1109 update_output_data(req_info, req->dst, 0, outputlen, &argcnt); 1110 req_info->outcnt = argcnt; 1111 1112 return 0; 1113 } 1114 1115 static inline u32 create_aead_null_input_list(struct aead_request *req, 1116 u32 enc, u32 mac_len) 1117 { 1118 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1119 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1120 u32 inputlen, argcnt = 0; 1121 1122 if (enc) 1123 inputlen = req->cryptlen + req->assoclen; 1124 else 1125 inputlen = req->cryptlen + req->assoclen - mac_len; 1126 1127 create_hmac_ctx_hdr(req, &argcnt, enc); 1128 update_input_data(req_info, req->src, inputlen, &argcnt); 1129 req_info->incnt = argcnt; 1130 1131 return 0; 1132 } 1133 1134 static inline u32 create_aead_null_output_list(struct aead_request *req, 1135 u32 enc, u32 mac_len) 1136 { 1137 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1138 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1139 struct scatterlist *dst; 1140 u8 *ptr = NULL; 1141 int argcnt = 0, status, offset; 1142 u32 inputlen; 1143 1144 if (enc) 1145 inputlen = req->cryptlen + req->assoclen; 1146 else 1147 inputlen = req->cryptlen + req->assoclen - mac_len; 1148 1149 /* 1150 * If source and destination are different 1151 * then copy payload to destination 1152 */ 1153 if (req->src != req->dst) { 1154 1155 ptr = kmalloc(inputlen, (req_info->areq->flags & 1156 CRYPTO_TFM_REQ_MAY_SLEEP) ? 1157 GFP_KERNEL : GFP_ATOMIC); 1158 if (!ptr) { 1159 status = -ENOMEM; 1160 goto error; 1161 } 1162 1163 status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr, 1164 inputlen); 1165 if (status != inputlen) { 1166 status = -EINVAL; 1167 goto error_free; 1168 } 1169 status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr, 1170 inputlen); 1171 if (status != inputlen) { 1172 status = -EINVAL; 1173 goto error_free; 1174 } 1175 kfree(ptr); 1176 } 1177 1178 if (enc) { 1179 /* 1180 * In an encryption scenario hmac needs 1181 * to be appended after payload 1182 */ 1183 dst = req->dst; 1184 offset = inputlen; 1185 while (offset >= dst->length) { 1186 offset -= dst->length; 1187 dst = sg_next(dst); 1188 if (!dst) { 1189 status = -ENOENT; 1190 goto error; 1191 } 1192 } 1193 1194 update_output_data(req_info, dst, offset, mac_len, &argcnt); 1195 } else { 1196 /* 1197 * In a decryption scenario calculated hmac for received 1198 * payload needs to be compare with hmac received 1199 */ 1200 status = sg_copy_buffer(req->src, sg_nents(req->src), 1201 rctx->fctx.hmac.s.hmac_recv, mac_len, 1202 inputlen, true); 1203 if (status != mac_len) { 1204 status = -EINVAL; 1205 goto error; 1206 } 1207 1208 req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc; 1209 req_info->out[argcnt].size = mac_len; 1210 argcnt++; 1211 } 1212 1213 req_info->outcnt = argcnt; 1214 return 0; 1215 1216 error_free: 1217 kfree(ptr); 1218 error: 1219 return status; 1220 } 1221 1222 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc) 1223 { 1224 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1225 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1226 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1227 struct pci_dev *pdev; 1228 u32 status, cpu_num; 1229 1230 /* Clear control words */ 1231 rctx->ctrl_word.flags = 0; 1232 rctx->fctx.enc.enc_ctrl.flags = 0; 1233 1234 req_info->callback = otx_cpt_aead_callback; 1235 req_info->areq = &req->base; 1236 req_info->req_type = reg_type; 1237 req_info->is_enc = enc; 1238 req_info->is_trunc_hmac = false; 1239 1240 switch (reg_type) { 1241 case OTX_CPT_AEAD_ENC_DEC_REQ: 1242 status = create_aead_input_list(req, enc); 1243 if (status) 1244 return status; 1245 status = create_aead_output_list(req, enc, 1246 crypto_aead_authsize(tfm)); 1247 if (status) 1248 return status; 1249 break; 1250 1251 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ: 1252 status = create_aead_null_input_list(req, enc, 1253 crypto_aead_authsize(tfm)); 1254 if (status) 1255 return status; 1256 status = create_aead_null_output_list(req, enc, 1257 crypto_aead_authsize(tfm)); 1258 if (status) 1259 return status; 1260 break; 1261 1262 default: 1263 return -EINVAL; 1264 } 1265 1266 /* Validate that request doesn't exceed maximum CPT supported size */ 1267 if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE || 1268 req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE) 1269 return -E2BIG; 1270 1271 status = get_se_device(&pdev, &cpu_num); 1272 if (status) 1273 return status; 1274 1275 req_info->ctrl.s.grp = 0; 1276 1277 status = otx_cpt_do_request(pdev, req_info, cpu_num); 1278 /* 1279 * We perform an asynchronous send and once 1280 * the request is completed the driver would 1281 * intimate through registered call back functions 1282 */ 1283 return status; 1284 } 1285 1286 static int otx_cpt_aead_encrypt(struct aead_request *req) 1287 { 1288 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true); 1289 } 1290 1291 static int otx_cpt_aead_decrypt(struct aead_request *req) 1292 { 1293 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false); 1294 } 1295 1296 static int otx_cpt_aead_null_encrypt(struct aead_request *req) 1297 { 1298 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true); 1299 } 1300 1301 static int otx_cpt_aead_null_decrypt(struct aead_request *req) 1302 { 1303 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false); 1304 } 1305 1306 static struct skcipher_alg otx_cpt_skciphers[] = { { 1307 .base.cra_name = "xts(aes)", 1308 .base.cra_driver_name = "cpt_xts_aes", 1309 .base.cra_flags = CRYPTO_ALG_ASYNC, 1310 .base.cra_blocksize = AES_BLOCK_SIZE, 1311 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1312 .base.cra_alignmask = 7, 1313 .base.cra_priority = 4001, 1314 .base.cra_module = THIS_MODULE, 1315 1316 .init = otx_cpt_enc_dec_init, 1317 .ivsize = AES_BLOCK_SIZE, 1318 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1319 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1320 .setkey = otx_cpt_skcipher_xts_setkey, 1321 .encrypt = otx_cpt_skcipher_encrypt, 1322 .decrypt = otx_cpt_skcipher_decrypt, 1323 }, { 1324 .base.cra_name = "cbc(aes)", 1325 .base.cra_driver_name = "cpt_cbc_aes", 1326 .base.cra_flags = CRYPTO_ALG_ASYNC, 1327 .base.cra_blocksize = AES_BLOCK_SIZE, 1328 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1329 .base.cra_alignmask = 7, 1330 .base.cra_priority = 4001, 1331 .base.cra_module = THIS_MODULE, 1332 1333 .init = otx_cpt_enc_dec_init, 1334 .ivsize = AES_BLOCK_SIZE, 1335 .min_keysize = AES_MIN_KEY_SIZE, 1336 .max_keysize = AES_MAX_KEY_SIZE, 1337 .setkey = otx_cpt_skcipher_cbc_aes_setkey, 1338 .encrypt = otx_cpt_skcipher_encrypt, 1339 .decrypt = otx_cpt_skcipher_decrypt, 1340 }, { 1341 .base.cra_name = "ecb(aes)", 1342 .base.cra_driver_name = "cpt_ecb_aes", 1343 .base.cra_flags = CRYPTO_ALG_ASYNC, 1344 .base.cra_blocksize = AES_BLOCK_SIZE, 1345 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1346 .base.cra_alignmask = 7, 1347 .base.cra_priority = 4001, 1348 .base.cra_module = THIS_MODULE, 1349 1350 .init = otx_cpt_enc_dec_init, 1351 .ivsize = 0, 1352 .min_keysize = AES_MIN_KEY_SIZE, 1353 .max_keysize = AES_MAX_KEY_SIZE, 1354 .setkey = otx_cpt_skcipher_ecb_aes_setkey, 1355 .encrypt = otx_cpt_skcipher_encrypt, 1356 .decrypt = otx_cpt_skcipher_decrypt, 1357 }, { 1358 .base.cra_name = "cfb(aes)", 1359 .base.cra_driver_name = "cpt_cfb_aes", 1360 .base.cra_flags = CRYPTO_ALG_ASYNC, 1361 .base.cra_blocksize = AES_BLOCK_SIZE, 1362 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1363 .base.cra_alignmask = 7, 1364 .base.cra_priority = 4001, 1365 .base.cra_module = THIS_MODULE, 1366 1367 .init = otx_cpt_enc_dec_init, 1368 .ivsize = AES_BLOCK_SIZE, 1369 .min_keysize = AES_MIN_KEY_SIZE, 1370 .max_keysize = AES_MAX_KEY_SIZE, 1371 .setkey = otx_cpt_skcipher_cfb_aes_setkey, 1372 .encrypt = otx_cpt_skcipher_encrypt, 1373 .decrypt = otx_cpt_skcipher_decrypt, 1374 }, { 1375 .base.cra_name = "cbc(des3_ede)", 1376 .base.cra_driver_name = "cpt_cbc_des3_ede", 1377 .base.cra_flags = CRYPTO_ALG_ASYNC, 1378 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1379 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), 1380 .base.cra_alignmask = 7, 1381 .base.cra_priority = 4001, 1382 .base.cra_module = THIS_MODULE, 1383 1384 .init = otx_cpt_enc_dec_init, 1385 .min_keysize = DES3_EDE_KEY_SIZE, 1386 .max_keysize = DES3_EDE_KEY_SIZE, 1387 .ivsize = DES_BLOCK_SIZE, 1388 .setkey = otx_cpt_skcipher_cbc_des3_setkey, 1389 .encrypt = otx_cpt_skcipher_encrypt, 1390 .decrypt = otx_cpt_skcipher_decrypt, 1391 }, { 1392 .base.cra_name = "ecb(des3_ede)", 1393 .base.cra_driver_name = "cpt_ecb_des3_ede", 1394 .base.cra_flags = CRYPTO_ALG_ASYNC, 1395 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1396 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), 1397 .base.cra_alignmask = 7, 1398 .base.cra_priority = 4001, 1399 .base.cra_module = THIS_MODULE, 1400 1401 .init = otx_cpt_enc_dec_init, 1402 .min_keysize = DES3_EDE_KEY_SIZE, 1403 .max_keysize = DES3_EDE_KEY_SIZE, 1404 .ivsize = 0, 1405 .setkey = otx_cpt_skcipher_ecb_des3_setkey, 1406 .encrypt = otx_cpt_skcipher_encrypt, 1407 .decrypt = otx_cpt_skcipher_decrypt, 1408 } }; 1409 1410 static struct aead_alg otx_cpt_aeads[] = { { 1411 .base = { 1412 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1413 .cra_driver_name = "cpt_hmac_sha1_cbc_aes", 1414 .cra_blocksize = AES_BLOCK_SIZE, 1415 .cra_flags = CRYPTO_ALG_ASYNC, 1416 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1417 .cra_priority = 4001, 1418 .cra_alignmask = 0, 1419 .cra_module = THIS_MODULE, 1420 }, 1421 .init = otx_cpt_aead_cbc_aes_sha1_init, 1422 .exit = otx_cpt_aead_exit, 1423 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1424 .setauthsize = otx_cpt_aead_set_authsize, 1425 .encrypt = otx_cpt_aead_encrypt, 1426 .decrypt = otx_cpt_aead_decrypt, 1427 .ivsize = AES_BLOCK_SIZE, 1428 .maxauthsize = SHA1_DIGEST_SIZE, 1429 }, { 1430 .base = { 1431 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1432 .cra_driver_name = "cpt_hmac_sha256_cbc_aes", 1433 .cra_blocksize = AES_BLOCK_SIZE, 1434 .cra_flags = CRYPTO_ALG_ASYNC, 1435 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1436 .cra_priority = 4001, 1437 .cra_alignmask = 0, 1438 .cra_module = THIS_MODULE, 1439 }, 1440 .init = otx_cpt_aead_cbc_aes_sha256_init, 1441 .exit = otx_cpt_aead_exit, 1442 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1443 .setauthsize = otx_cpt_aead_set_authsize, 1444 .encrypt = otx_cpt_aead_encrypt, 1445 .decrypt = otx_cpt_aead_decrypt, 1446 .ivsize = AES_BLOCK_SIZE, 1447 .maxauthsize = SHA256_DIGEST_SIZE, 1448 }, { 1449 .base = { 1450 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1451 .cra_driver_name = "cpt_hmac_sha384_cbc_aes", 1452 .cra_blocksize = AES_BLOCK_SIZE, 1453 .cra_flags = CRYPTO_ALG_ASYNC, 1454 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1455 .cra_priority = 4001, 1456 .cra_alignmask = 0, 1457 .cra_module = THIS_MODULE, 1458 }, 1459 .init = otx_cpt_aead_cbc_aes_sha384_init, 1460 .exit = otx_cpt_aead_exit, 1461 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1462 .setauthsize = otx_cpt_aead_set_authsize, 1463 .encrypt = otx_cpt_aead_encrypt, 1464 .decrypt = otx_cpt_aead_decrypt, 1465 .ivsize = AES_BLOCK_SIZE, 1466 .maxauthsize = SHA384_DIGEST_SIZE, 1467 }, { 1468 .base = { 1469 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1470 .cra_driver_name = "cpt_hmac_sha512_cbc_aes", 1471 .cra_blocksize = AES_BLOCK_SIZE, 1472 .cra_flags = CRYPTO_ALG_ASYNC, 1473 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1474 .cra_priority = 4001, 1475 .cra_alignmask = 0, 1476 .cra_module = THIS_MODULE, 1477 }, 1478 .init = otx_cpt_aead_cbc_aes_sha512_init, 1479 .exit = otx_cpt_aead_exit, 1480 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1481 .setauthsize = otx_cpt_aead_set_authsize, 1482 .encrypt = otx_cpt_aead_encrypt, 1483 .decrypt = otx_cpt_aead_decrypt, 1484 .ivsize = AES_BLOCK_SIZE, 1485 .maxauthsize = SHA512_DIGEST_SIZE, 1486 }, { 1487 .base = { 1488 .cra_name = "authenc(hmac(sha1),ecb(cipher_null))", 1489 .cra_driver_name = "cpt_hmac_sha1_ecb_null", 1490 .cra_blocksize = 1, 1491 .cra_flags = CRYPTO_ALG_ASYNC, 1492 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1493 .cra_priority = 4001, 1494 .cra_alignmask = 0, 1495 .cra_module = THIS_MODULE, 1496 }, 1497 .init = otx_cpt_aead_ecb_null_sha1_init, 1498 .exit = otx_cpt_aead_exit, 1499 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1500 .setauthsize = otx_cpt_aead_set_authsize, 1501 .encrypt = otx_cpt_aead_null_encrypt, 1502 .decrypt = otx_cpt_aead_null_decrypt, 1503 .ivsize = 0, 1504 .maxauthsize = SHA1_DIGEST_SIZE, 1505 }, { 1506 .base = { 1507 .cra_name = "authenc(hmac(sha256),ecb(cipher_null))", 1508 .cra_driver_name = "cpt_hmac_sha256_ecb_null", 1509 .cra_blocksize = 1, 1510 .cra_flags = CRYPTO_ALG_ASYNC, 1511 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1512 .cra_priority = 4001, 1513 .cra_alignmask = 0, 1514 .cra_module = THIS_MODULE, 1515 }, 1516 .init = otx_cpt_aead_ecb_null_sha256_init, 1517 .exit = otx_cpt_aead_exit, 1518 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1519 .setauthsize = otx_cpt_aead_set_authsize, 1520 .encrypt = otx_cpt_aead_null_encrypt, 1521 .decrypt = otx_cpt_aead_null_decrypt, 1522 .ivsize = 0, 1523 .maxauthsize = SHA256_DIGEST_SIZE, 1524 }, { 1525 .base = { 1526 .cra_name = "authenc(hmac(sha384),ecb(cipher_null))", 1527 .cra_driver_name = "cpt_hmac_sha384_ecb_null", 1528 .cra_blocksize = 1, 1529 .cra_flags = CRYPTO_ALG_ASYNC, 1530 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1531 .cra_priority = 4001, 1532 .cra_alignmask = 0, 1533 .cra_module = THIS_MODULE, 1534 }, 1535 .init = otx_cpt_aead_ecb_null_sha384_init, 1536 .exit = otx_cpt_aead_exit, 1537 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1538 .setauthsize = otx_cpt_aead_set_authsize, 1539 .encrypt = otx_cpt_aead_null_encrypt, 1540 .decrypt = otx_cpt_aead_null_decrypt, 1541 .ivsize = 0, 1542 .maxauthsize = SHA384_DIGEST_SIZE, 1543 }, { 1544 .base = { 1545 .cra_name = "authenc(hmac(sha512),ecb(cipher_null))", 1546 .cra_driver_name = "cpt_hmac_sha512_ecb_null", 1547 .cra_blocksize = 1, 1548 .cra_flags = CRYPTO_ALG_ASYNC, 1549 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1550 .cra_priority = 4001, 1551 .cra_alignmask = 0, 1552 .cra_module = THIS_MODULE, 1553 }, 1554 .init = otx_cpt_aead_ecb_null_sha512_init, 1555 .exit = otx_cpt_aead_exit, 1556 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1557 .setauthsize = otx_cpt_aead_set_authsize, 1558 .encrypt = otx_cpt_aead_null_encrypt, 1559 .decrypt = otx_cpt_aead_null_decrypt, 1560 .ivsize = 0, 1561 .maxauthsize = SHA512_DIGEST_SIZE, 1562 }, { 1563 .base = { 1564 .cra_name = "rfc4106(gcm(aes))", 1565 .cra_driver_name = "cpt_rfc4106_gcm_aes", 1566 .cra_blocksize = 1, 1567 .cra_flags = CRYPTO_ALG_ASYNC, 1568 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1569 .cra_priority = 4001, 1570 .cra_alignmask = 0, 1571 .cra_module = THIS_MODULE, 1572 }, 1573 .init = otx_cpt_aead_gcm_aes_init, 1574 .exit = otx_cpt_aead_exit, 1575 .setkey = otx_cpt_aead_gcm_aes_setkey, 1576 .setauthsize = otx_cpt_aead_set_authsize, 1577 .encrypt = otx_cpt_aead_encrypt, 1578 .decrypt = otx_cpt_aead_decrypt, 1579 .ivsize = AES_GCM_IV_SIZE, 1580 .maxauthsize = AES_GCM_ICV_SIZE, 1581 } }; 1582 1583 static inline int is_any_alg_used(void) 1584 { 1585 int i; 1586 1587 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++) 1588 if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1) 1589 return true; 1590 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++) 1591 if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1) 1592 return true; 1593 return false; 1594 } 1595 1596 static inline int cpt_register_algs(void) 1597 { 1598 int i, err = 0; 1599 1600 if (!IS_ENABLED(CONFIG_DM_CRYPT)) { 1601 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++) 1602 otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; 1603 1604 err = crypto_register_skciphers(otx_cpt_skciphers, 1605 ARRAY_SIZE(otx_cpt_skciphers)); 1606 if (err) 1607 return err; 1608 } 1609 1610 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++) 1611 otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; 1612 1613 err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads)); 1614 if (err) { 1615 crypto_unregister_skciphers(otx_cpt_skciphers, 1616 ARRAY_SIZE(otx_cpt_skciphers)); 1617 return err; 1618 } 1619 1620 return 0; 1621 } 1622 1623 static inline void cpt_unregister_algs(void) 1624 { 1625 crypto_unregister_skciphers(otx_cpt_skciphers, 1626 ARRAY_SIZE(otx_cpt_skciphers)); 1627 crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads)); 1628 } 1629 1630 static int compare_func(const void *lptr, const void *rptr) 1631 { 1632 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; 1633 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; 1634 1635 if (ldesc->dev->devfn < rdesc->dev->devfn) 1636 return -1; 1637 if (ldesc->dev->devfn > rdesc->dev->devfn) 1638 return 1; 1639 return 0; 1640 } 1641 1642 static void swap_func(void *lptr, void *rptr, int size) 1643 { 1644 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; 1645 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; 1646 struct cpt_device_desc desc; 1647 1648 desc = *ldesc; 1649 *ldesc = *rdesc; 1650 *rdesc = desc; 1651 } 1652 1653 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, 1654 enum otx_cptpf_type pf_type, 1655 enum otx_cptvf_type engine_type, 1656 int num_queues, int num_devices) 1657 { 1658 int ret = 0; 1659 int count; 1660 1661 mutex_lock(&mutex); 1662 switch (engine_type) { 1663 case OTX_CPT_SE_TYPES: 1664 count = atomic_read(&se_devices.count); 1665 if (count >= CPT_MAX_VF_NUM) { 1666 dev_err(&pdev->dev, "No space to add a new device\n"); 1667 ret = -ENOSPC; 1668 goto err; 1669 } 1670 se_devices.desc[count].pf_type = pf_type; 1671 se_devices.desc[count].num_queues = num_queues; 1672 se_devices.desc[count++].dev = pdev; 1673 atomic_inc(&se_devices.count); 1674 1675 if (atomic_read(&se_devices.count) == num_devices && 1676 is_crypto_registered == false) { 1677 if (cpt_register_algs()) { 1678 dev_err(&pdev->dev, 1679 "Error in registering crypto algorithms\n"); 1680 ret = -EINVAL; 1681 goto err; 1682 } 1683 try_module_get(mod); 1684 is_crypto_registered = true; 1685 } 1686 sort(se_devices.desc, count, sizeof(struct cpt_device_desc), 1687 compare_func, swap_func); 1688 break; 1689 1690 case OTX_CPT_AE_TYPES: 1691 count = atomic_read(&ae_devices.count); 1692 if (count >= CPT_MAX_VF_NUM) { 1693 dev_err(&pdev->dev, "No space to a add new device\n"); 1694 ret = -ENOSPC; 1695 goto err; 1696 } 1697 ae_devices.desc[count].pf_type = pf_type; 1698 ae_devices.desc[count].num_queues = num_queues; 1699 ae_devices.desc[count++].dev = pdev; 1700 atomic_inc(&ae_devices.count); 1701 sort(ae_devices.desc, count, sizeof(struct cpt_device_desc), 1702 compare_func, swap_func); 1703 break; 1704 1705 default: 1706 dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type); 1707 ret = BAD_OTX_CPTVF_TYPE; 1708 } 1709 err: 1710 mutex_unlock(&mutex); 1711 return ret; 1712 } 1713 1714 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod, 1715 enum otx_cptvf_type engine_type) 1716 { 1717 struct cpt_device_table *dev_tbl; 1718 bool dev_found = false; 1719 int i, j, count; 1720 1721 mutex_lock(&mutex); 1722 1723 dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices; 1724 count = atomic_read(&dev_tbl->count); 1725 for (i = 0; i < count; i++) 1726 if (pdev == dev_tbl->desc[i].dev) { 1727 for (j = i; j < count-1; j++) 1728 dev_tbl->desc[j] = dev_tbl->desc[j+1]; 1729 dev_found = true; 1730 break; 1731 } 1732 1733 if (!dev_found) { 1734 dev_err(&pdev->dev, "%s device not found\n", __func__); 1735 goto exit; 1736 } 1737 1738 if (engine_type != OTX_CPT_AE_TYPES) { 1739 if (atomic_dec_and_test(&se_devices.count) && 1740 !is_any_alg_used()) { 1741 cpt_unregister_algs(); 1742 module_put(mod); 1743 is_crypto_registered = false; 1744 } 1745 } else 1746 atomic_dec(&ae_devices.count); 1747 exit: 1748 mutex_unlock(&mutex); 1749 } 1750