1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTX CPT driver 3 * 4 * Copyright (C) 2019 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <crypto/aes.h> 12 #include <crypto/authenc.h> 13 #include <crypto/cryptd.h> 14 #include <crypto/des.h> 15 #include <crypto/internal/aead.h> 16 #include <crypto/sha.h> 17 #include <crypto/xts.h> 18 #include <crypto/scatterwalk.h> 19 #include <linux/rtnetlink.h> 20 #include <linux/sort.h> 21 #include <linux/module.h> 22 #include "otx_cptvf.h" 23 #include "otx_cptvf_algs.h" 24 #include "otx_cptvf_reqmgr.h" 25 26 #define CPT_MAX_VF_NUM 64 27 /* Size of salt in AES GCM mode */ 28 #define AES_GCM_SALT_SIZE 4 29 /* Size of IV in AES GCM mode */ 30 #define AES_GCM_IV_SIZE 8 31 /* Size of ICV (Integrity Check Value) in AES GCM mode */ 32 #define AES_GCM_ICV_SIZE 16 33 /* Offset of IV in AES GCM mode */ 34 #define AES_GCM_IV_OFFSET 8 35 #define CONTROL_WORD_LEN 8 36 #define KEY2_OFFSET 48 37 #define DMA_MODE_FLAG(dma_mode) \ 38 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0) 39 40 /* Truncated SHA digest size */ 41 #define SHA1_TRUNC_DIGEST_SIZE 12 42 #define SHA256_TRUNC_DIGEST_SIZE 16 43 #define SHA384_TRUNC_DIGEST_SIZE 24 44 #define SHA512_TRUNC_DIGEST_SIZE 32 45 46 static DEFINE_MUTEX(mutex); 47 static int is_crypto_registered; 48 49 struct cpt_device_desc { 50 enum otx_cptpf_type pf_type; 51 struct pci_dev *dev; 52 int num_queues; 53 }; 54 55 struct cpt_device_table { 56 atomic_t count; 57 struct cpt_device_desc desc[CPT_MAX_VF_NUM]; 58 }; 59 60 static struct cpt_device_table se_devices = { 61 .count = ATOMIC_INIT(0) 62 }; 63 64 static struct cpt_device_table ae_devices = { 65 .count = ATOMIC_INIT(0) 66 }; 67 68 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) 69 { 70 int count, ret = 0; 71 72 count = atomic_read(&se_devices.count); 73 if (count < 1) 74 return -ENODEV; 75 76 *cpu_num = get_cpu(); 77 78 if (se_devices.desc[0].pf_type == OTX_CPT_SE) { 79 /* 80 * On OcteonTX platform there is one CPT instruction queue bound 81 * to each VF. We get maximum performance if one CPT queue 82 * is available for each cpu otherwise CPT queues need to be 83 * shared between cpus. 84 */ 85 if (*cpu_num >= count) 86 *cpu_num %= count; 87 *pdev = se_devices.desc[*cpu_num].dev; 88 } else { 89 pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type); 90 ret = -EINVAL; 91 } 92 put_cpu(); 93 94 return ret; 95 } 96 97 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req) 98 { 99 struct otx_cpt_req_ctx *rctx; 100 struct aead_request *req; 101 struct crypto_aead *tfm; 102 103 req = container_of(cpt_req->areq, struct aead_request, base); 104 tfm = crypto_aead_reqtfm(req); 105 rctx = aead_request_ctx(req); 106 if (memcmp(rctx->fctx.hmac.s.hmac_calc, 107 rctx->fctx.hmac.s.hmac_recv, 108 crypto_aead_authsize(tfm)) != 0) 109 return -EBADMSG; 110 111 return 0; 112 } 113 114 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2) 115 { 116 struct otx_cpt_info_buffer *cpt_info = arg2; 117 struct crypto_async_request *areq = arg1; 118 struct otx_cpt_req_info *cpt_req; 119 struct pci_dev *pdev; 120 121 cpt_req = cpt_info->req; 122 if (!status) { 123 /* 124 * When selected cipher is NULL we need to manually 125 * verify whether calculated hmac value matches 126 * received hmac value 127 */ 128 if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ && 129 !cpt_req->is_enc) 130 status = validate_hmac_cipher_null(cpt_req); 131 } 132 if (cpt_info) { 133 pdev = cpt_info->pdev; 134 do_request_cleanup(pdev, cpt_info); 135 } 136 if (areq) 137 areq->complete(areq, status); 138 } 139 140 static void output_iv_copyback(struct crypto_async_request *areq) 141 { 142 struct otx_cpt_req_info *req_info; 143 struct skcipher_request *sreq; 144 struct crypto_skcipher *stfm; 145 struct otx_cpt_req_ctx *rctx; 146 struct otx_cpt_enc_ctx *ctx; 147 u32 start, ivsize; 148 149 sreq = container_of(areq, struct skcipher_request, base); 150 stfm = crypto_skcipher_reqtfm(sreq); 151 ctx = crypto_skcipher_ctx(stfm); 152 if (ctx->cipher_type == OTX_CPT_AES_CBC || 153 ctx->cipher_type == OTX_CPT_DES3_CBC) { 154 rctx = skcipher_request_ctx(sreq); 155 req_info = &rctx->cpt_req; 156 ivsize = crypto_skcipher_ivsize(stfm); 157 start = sreq->cryptlen - ivsize; 158 159 if (req_info->is_enc) { 160 scatterwalk_map_and_copy(sreq->iv, sreq->dst, start, 161 ivsize, 0); 162 } else { 163 if (sreq->src != sreq->dst) { 164 scatterwalk_map_and_copy(sreq->iv, sreq->src, 165 start, ivsize, 0); 166 } else { 167 memcpy(sreq->iv, req_info->iv_out, ivsize); 168 kfree(req_info->iv_out); 169 } 170 } 171 } 172 } 173 174 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2) 175 { 176 struct otx_cpt_info_buffer *cpt_info = arg2; 177 struct crypto_async_request *areq = arg1; 178 struct pci_dev *pdev; 179 180 if (areq) { 181 if (!status) 182 output_iv_copyback(areq); 183 if (cpt_info) { 184 pdev = cpt_info->pdev; 185 do_request_cleanup(pdev, cpt_info); 186 } 187 areq->complete(areq, status); 188 } 189 } 190 191 static inline void update_input_data(struct otx_cpt_req_info *req_info, 192 struct scatterlist *inp_sg, 193 u32 nbytes, u32 *argcnt) 194 { 195 req_info->req.dlen += nbytes; 196 197 while (nbytes) { 198 u32 len = min(nbytes, inp_sg->length); 199 u8 *ptr = sg_virt(inp_sg); 200 201 req_info->in[*argcnt].vptr = (void *)ptr; 202 req_info->in[*argcnt].size = len; 203 nbytes -= len; 204 ++(*argcnt); 205 inp_sg = sg_next(inp_sg); 206 } 207 } 208 209 static inline void update_output_data(struct otx_cpt_req_info *req_info, 210 struct scatterlist *outp_sg, 211 u32 offset, u32 nbytes, u32 *argcnt) 212 { 213 req_info->rlen += nbytes; 214 215 while (nbytes) { 216 u32 len = min(nbytes, outp_sg->length - offset); 217 u8 *ptr = sg_virt(outp_sg); 218 219 req_info->out[*argcnt].vptr = (void *) (ptr + offset); 220 req_info->out[*argcnt].size = len; 221 nbytes -= len; 222 ++(*argcnt); 223 offset = 0; 224 outp_sg = sg_next(outp_sg); 225 } 226 } 227 228 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, 229 u32 *argcnt) 230 { 231 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); 232 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 233 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 234 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); 235 struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm); 236 struct otx_cpt_fc_ctx *fctx = &rctx->fctx; 237 int ivsize = crypto_skcipher_ivsize(stfm); 238 u32 start = req->cryptlen - ivsize; 239 u64 *ctrl_flags = NULL; 240 gfp_t flags; 241 242 flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 243 GFP_KERNEL : GFP_ATOMIC; 244 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 245 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 246 247 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC | 248 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 249 if (enc) { 250 req_info->req.opcode.s.minor = 2; 251 } else { 252 req_info->req.opcode.s.minor = 3; 253 if ((ctx->cipher_type == OTX_CPT_AES_CBC || 254 ctx->cipher_type == OTX_CPT_DES3_CBC) && 255 req->src == req->dst) { 256 req_info->iv_out = kmalloc(ivsize, flags); 257 if (!req_info->iv_out) 258 return -ENOMEM; 259 260 scatterwalk_map_and_copy(req_info->iv_out, req->src, 261 start, ivsize, 0); 262 } 263 } 264 /* Encryption data length */ 265 req_info->req.param1 = req->cryptlen; 266 /* Authentication data length */ 267 req_info->req.param2 = 0; 268 269 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; 270 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; 271 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR; 272 273 if (ctx->cipher_type == OTX_CPT_AES_XTS) 274 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); 275 else 276 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); 277 278 memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm)); 279 280 ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags; 281 *ctrl_flags = cpu_to_be64(*ctrl_flags); 282 283 /* 284 * Storing Packet Data Information in offset 285 * Control Word First 8 bytes 286 */ 287 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word; 288 req_info->in[*argcnt].size = CONTROL_WORD_LEN; 289 req_info->req.dlen += CONTROL_WORD_LEN; 290 ++(*argcnt); 291 292 req_info->in[*argcnt].vptr = (u8 *)fctx; 293 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx); 294 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx); 295 296 ++(*argcnt); 297 298 return 0; 299 } 300 301 static inline u32 create_input_list(struct skcipher_request *req, u32 enc, 302 u32 enc_iv_len) 303 { 304 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 305 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 306 u32 argcnt = 0; 307 int ret; 308 309 ret = create_ctx_hdr(req, enc, &argcnt); 310 if (ret) 311 return ret; 312 313 update_input_data(req_info, req->src, req->cryptlen, &argcnt); 314 req_info->incnt = argcnt; 315 316 return 0; 317 } 318 319 static inline void create_output_list(struct skcipher_request *req, 320 u32 enc_iv_len) 321 { 322 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 323 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 324 u32 argcnt = 0; 325 326 /* 327 * OUTPUT Buffer Processing 328 * AES encryption/decryption output would be 329 * received in the following format 330 * 331 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----| 332 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ] 333 */ 334 update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt); 335 req_info->outcnt = argcnt; 336 } 337 338 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc) 339 { 340 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); 341 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req); 342 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 343 u32 enc_iv_len = crypto_skcipher_ivsize(stfm); 344 struct pci_dev *pdev; 345 int status, cpu_num; 346 347 /* Validate that request doesn't exceed maximum CPT supported size */ 348 if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE) 349 return -E2BIG; 350 351 /* Clear control words */ 352 rctx->ctrl_word.flags = 0; 353 rctx->fctx.enc.enc_ctrl.flags = 0; 354 355 status = create_input_list(req, enc, enc_iv_len); 356 if (status) 357 return status; 358 create_output_list(req, enc_iv_len); 359 360 status = get_se_device(&pdev, &cpu_num); 361 if (status) 362 return status; 363 364 req_info->callback = (void *)otx_cpt_skcipher_callback; 365 req_info->areq = &req->base; 366 req_info->req_type = OTX_CPT_ENC_DEC_REQ; 367 req_info->is_enc = enc; 368 req_info->is_trunc_hmac = false; 369 req_info->ctrl.s.grp = 0; 370 371 /* 372 * We perform an asynchronous send and once 373 * the request is completed the driver would 374 * intimate through registered call back functions 375 */ 376 status = otx_cpt_do_request(pdev, req_info, cpu_num); 377 378 return status; 379 } 380 381 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req) 382 { 383 return cpt_enc_dec(req, true); 384 } 385 386 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req) 387 { 388 return cpt_enc_dec(req, false); 389 } 390 391 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm, 392 const u8 *key, u32 keylen) 393 { 394 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 395 const u8 *key2 = key + (keylen / 2); 396 const u8 *key1 = key; 397 int ret; 398 399 ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen); 400 if (ret) 401 return ret; 402 ctx->key_len = keylen; 403 memcpy(ctx->enc_key, key1, keylen / 2); 404 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2); 405 ctx->cipher_type = OTX_CPT_AES_XTS; 406 switch (ctx->key_len) { 407 case 2 * AES_KEYSIZE_128: 408 ctx->key_type = OTX_CPT_AES_128_BIT; 409 break; 410 case 2 * AES_KEYSIZE_256: 411 ctx->key_type = OTX_CPT_AES_256_BIT; 412 break; 413 default: 414 return -EINVAL; 415 } 416 417 return 0; 418 } 419 420 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key, 421 u32 keylen, u8 cipher_type) 422 { 423 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 424 425 if (keylen != DES3_EDE_KEY_SIZE) 426 return -EINVAL; 427 428 ctx->key_len = keylen; 429 ctx->cipher_type = cipher_type; 430 431 memcpy(ctx->enc_key, key, keylen); 432 433 return 0; 434 } 435 436 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 437 u32 keylen, u8 cipher_type) 438 { 439 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 440 441 switch (keylen) { 442 case AES_KEYSIZE_128: 443 ctx->key_type = OTX_CPT_AES_128_BIT; 444 break; 445 case AES_KEYSIZE_192: 446 ctx->key_type = OTX_CPT_AES_192_BIT; 447 break; 448 case AES_KEYSIZE_256: 449 ctx->key_type = OTX_CPT_AES_256_BIT; 450 break; 451 default: 452 return -EINVAL; 453 } 454 ctx->key_len = keylen; 455 ctx->cipher_type = cipher_type; 456 457 memcpy(ctx->enc_key, key, keylen); 458 459 return 0; 460 } 461 462 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm, 463 const u8 *key, u32 keylen) 464 { 465 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC); 466 } 467 468 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm, 469 const u8 *key, u32 keylen) 470 { 471 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB); 472 } 473 474 static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm, 475 const u8 *key, u32 keylen) 476 { 477 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB); 478 } 479 480 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm, 481 const u8 *key, u32 keylen) 482 { 483 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC); 484 } 485 486 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm, 487 const u8 *key, u32 keylen) 488 { 489 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB); 490 } 491 492 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm) 493 { 494 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 495 496 memset(ctx, 0, sizeof(*ctx)); 497 /* 498 * Additional memory for skcipher_request is 499 * allocated since the cryptd daemon uses 500 * this memory for request_ctx information 501 */ 502 crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) + 503 sizeof(struct skcipher_request)); 504 505 return 0; 506 } 507 508 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) 509 { 510 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 511 512 ctx->cipher_type = cipher_type; 513 ctx->mac_type = mac_type; 514 515 /* 516 * When selected cipher is NULL we use HMAC opcode instead of 517 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms 518 * for calculating ipad and opad 519 */ 520 if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) { 521 switch (ctx->mac_type) { 522 case OTX_CPT_SHA1: 523 ctx->hashalg = crypto_alloc_shash("sha1", 0, 524 CRYPTO_ALG_ASYNC); 525 if (IS_ERR(ctx->hashalg)) 526 return PTR_ERR(ctx->hashalg); 527 break; 528 529 case OTX_CPT_SHA256: 530 ctx->hashalg = crypto_alloc_shash("sha256", 0, 531 CRYPTO_ALG_ASYNC); 532 if (IS_ERR(ctx->hashalg)) 533 return PTR_ERR(ctx->hashalg); 534 break; 535 536 case OTX_CPT_SHA384: 537 ctx->hashalg = crypto_alloc_shash("sha384", 0, 538 CRYPTO_ALG_ASYNC); 539 if (IS_ERR(ctx->hashalg)) 540 return PTR_ERR(ctx->hashalg); 541 break; 542 543 case OTX_CPT_SHA512: 544 ctx->hashalg = crypto_alloc_shash("sha512", 0, 545 CRYPTO_ALG_ASYNC); 546 if (IS_ERR(ctx->hashalg)) 547 return PTR_ERR(ctx->hashalg); 548 break; 549 } 550 } 551 552 crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx)); 553 554 return 0; 555 } 556 557 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm) 558 { 559 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1); 560 } 561 562 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm) 563 { 564 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256); 565 } 566 567 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm) 568 { 569 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384); 570 } 571 572 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm) 573 { 574 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512); 575 } 576 577 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm) 578 { 579 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1); 580 } 581 582 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm) 583 { 584 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256); 585 } 586 587 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm) 588 { 589 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384); 590 } 591 592 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm) 593 { 594 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512); 595 } 596 597 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm) 598 { 599 return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL); 600 } 601 602 static void otx_cpt_aead_exit(struct crypto_aead *tfm) 603 { 604 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 605 606 kfree(ctx->ipad); 607 kfree(ctx->opad); 608 if (ctx->hashalg) 609 crypto_free_shash(ctx->hashalg); 610 kfree(ctx->sdesc); 611 } 612 613 /* 614 * This is the Integrity Check Value validation (aka the authentication tag 615 * length) 616 */ 617 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm, 618 unsigned int authsize) 619 { 620 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 621 622 switch (ctx->mac_type) { 623 case OTX_CPT_SHA1: 624 if (authsize != SHA1_DIGEST_SIZE && 625 authsize != SHA1_TRUNC_DIGEST_SIZE) 626 return -EINVAL; 627 628 if (authsize == SHA1_TRUNC_DIGEST_SIZE) 629 ctx->is_trunc_hmac = true; 630 break; 631 632 case OTX_CPT_SHA256: 633 if (authsize != SHA256_DIGEST_SIZE && 634 authsize != SHA256_TRUNC_DIGEST_SIZE) 635 return -EINVAL; 636 637 if (authsize == SHA256_TRUNC_DIGEST_SIZE) 638 ctx->is_trunc_hmac = true; 639 break; 640 641 case OTX_CPT_SHA384: 642 if (authsize != SHA384_DIGEST_SIZE && 643 authsize != SHA384_TRUNC_DIGEST_SIZE) 644 return -EINVAL; 645 646 if (authsize == SHA384_TRUNC_DIGEST_SIZE) 647 ctx->is_trunc_hmac = true; 648 break; 649 650 case OTX_CPT_SHA512: 651 if (authsize != SHA512_DIGEST_SIZE && 652 authsize != SHA512_TRUNC_DIGEST_SIZE) 653 return -EINVAL; 654 655 if (authsize == SHA512_TRUNC_DIGEST_SIZE) 656 ctx->is_trunc_hmac = true; 657 break; 658 659 case OTX_CPT_MAC_NULL: 660 if (ctx->cipher_type == OTX_CPT_AES_GCM) { 661 if (authsize != AES_GCM_ICV_SIZE) 662 return -EINVAL; 663 } else 664 return -EINVAL; 665 break; 666 667 default: 668 return -EINVAL; 669 } 670 671 tfm->authsize = authsize; 672 return 0; 673 } 674 675 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg) 676 { 677 struct otx_cpt_sdesc *sdesc; 678 int size; 679 680 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); 681 sdesc = kmalloc(size, GFP_KERNEL); 682 if (!sdesc) 683 return NULL; 684 685 sdesc->shash.tfm = alg; 686 687 return sdesc; 688 } 689 690 static inline void swap_data32(void *buf, u32 len) 691 { 692 u32 *store = (u32 *) buf; 693 int i = 0; 694 695 for (i = 0 ; i < len/sizeof(u32); i++, store++) 696 *store = cpu_to_be32(*store); 697 } 698 699 static inline void swap_data64(void *buf, u32 len) 700 { 701 u64 *store = (u64 *) buf; 702 int i = 0; 703 704 for (i = 0 ; i < len/sizeof(u64); i++, store++) 705 *store = cpu_to_be64(*store); 706 } 707 708 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) 709 { 710 struct sha512_state *sha512; 711 struct sha256_state *sha256; 712 struct sha1_state *sha1; 713 714 switch (mac_type) { 715 case OTX_CPT_SHA1: 716 sha1 = (struct sha1_state *) in_pad; 717 swap_data32(sha1->state, SHA1_DIGEST_SIZE); 718 memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); 719 break; 720 721 case OTX_CPT_SHA256: 722 sha256 = (struct sha256_state *) in_pad; 723 swap_data32(sha256->state, SHA256_DIGEST_SIZE); 724 memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); 725 break; 726 727 case OTX_CPT_SHA384: 728 case OTX_CPT_SHA512: 729 sha512 = (struct sha512_state *) in_pad; 730 swap_data64(sha512->state, SHA512_DIGEST_SIZE); 731 memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); 732 break; 733 734 default: 735 return -EINVAL; 736 } 737 738 return 0; 739 } 740 741 static int aead_hmac_init(struct crypto_aead *cipher) 742 { 743 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 744 int state_size = crypto_shash_statesize(ctx->hashalg); 745 int ds = crypto_shash_digestsize(ctx->hashalg); 746 int bs = crypto_shash_blocksize(ctx->hashalg); 747 int authkeylen = ctx->auth_key_len; 748 u8 *ipad = NULL, *opad = NULL; 749 int ret = 0, icount = 0; 750 751 ctx->sdesc = alloc_sdesc(ctx->hashalg); 752 if (!ctx->sdesc) 753 return -ENOMEM; 754 755 ctx->ipad = kzalloc(bs, GFP_KERNEL); 756 if (!ctx->ipad) { 757 ret = -ENOMEM; 758 goto calc_fail; 759 } 760 761 ctx->opad = kzalloc(bs, GFP_KERNEL); 762 if (!ctx->opad) { 763 ret = -ENOMEM; 764 goto calc_fail; 765 } 766 767 ipad = kzalloc(state_size, GFP_KERNEL); 768 if (!ipad) { 769 ret = -ENOMEM; 770 goto calc_fail; 771 } 772 773 opad = kzalloc(state_size, GFP_KERNEL); 774 if (!opad) { 775 ret = -ENOMEM; 776 goto calc_fail; 777 } 778 779 if (authkeylen > bs) { 780 ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, 781 authkeylen, ipad); 782 if (ret) 783 goto calc_fail; 784 785 authkeylen = ds; 786 } else { 787 memcpy(ipad, ctx->key, authkeylen); 788 } 789 790 memset(ipad + authkeylen, 0, bs - authkeylen); 791 memcpy(opad, ipad, bs); 792 793 for (icount = 0; icount < bs; icount++) { 794 ipad[icount] ^= 0x36; 795 opad[icount] ^= 0x5c; 796 } 797 798 /* 799 * Partial Hash calculated from the software 800 * algorithm is retrieved for IPAD & OPAD 801 */ 802 803 /* IPAD Calculation */ 804 crypto_shash_init(&ctx->sdesc->shash); 805 crypto_shash_update(&ctx->sdesc->shash, ipad, bs); 806 crypto_shash_export(&ctx->sdesc->shash, ipad); 807 ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); 808 if (ret) 809 goto calc_fail; 810 811 /* OPAD Calculation */ 812 crypto_shash_init(&ctx->sdesc->shash); 813 crypto_shash_update(&ctx->sdesc->shash, opad, bs); 814 crypto_shash_export(&ctx->sdesc->shash, opad); 815 ret = copy_pad(ctx->mac_type, ctx->opad, opad); 816 if (ret) 817 goto calc_fail; 818 819 kfree(ipad); 820 kfree(opad); 821 822 return 0; 823 824 calc_fail: 825 kfree(ctx->ipad); 826 ctx->ipad = NULL; 827 kfree(ctx->opad); 828 ctx->opad = NULL; 829 kfree(ipad); 830 kfree(opad); 831 kfree(ctx->sdesc); 832 ctx->sdesc = NULL; 833 834 return ret; 835 } 836 837 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, 838 const unsigned char *key, 839 unsigned int keylen) 840 { 841 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 842 struct crypto_authenc_key_param *param; 843 int enckeylen = 0, authkeylen = 0; 844 struct rtattr *rta = (void *)key; 845 int status = -EINVAL; 846 847 if (!RTA_OK(rta, keylen)) 848 goto badkey; 849 850 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 851 goto badkey; 852 853 if (RTA_PAYLOAD(rta) < sizeof(*param)) 854 goto badkey; 855 856 param = RTA_DATA(rta); 857 enckeylen = be32_to_cpu(param->enckeylen); 858 key += RTA_ALIGN(rta->rta_len); 859 keylen -= RTA_ALIGN(rta->rta_len); 860 if (keylen < enckeylen) 861 goto badkey; 862 863 if (keylen > OTX_CPT_MAX_KEY_SIZE) 864 goto badkey; 865 866 authkeylen = keylen - enckeylen; 867 memcpy(ctx->key, key, keylen); 868 869 switch (enckeylen) { 870 case AES_KEYSIZE_128: 871 ctx->key_type = OTX_CPT_AES_128_BIT; 872 break; 873 case AES_KEYSIZE_192: 874 ctx->key_type = OTX_CPT_AES_192_BIT; 875 break; 876 case AES_KEYSIZE_256: 877 ctx->key_type = OTX_CPT_AES_256_BIT; 878 break; 879 default: 880 /* Invalid key length */ 881 goto badkey; 882 } 883 884 ctx->enc_key_len = enckeylen; 885 ctx->auth_key_len = authkeylen; 886 887 status = aead_hmac_init(cipher); 888 if (status) 889 goto badkey; 890 891 return 0; 892 badkey: 893 return status; 894 } 895 896 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, 897 const unsigned char *key, 898 unsigned int keylen) 899 { 900 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 901 struct crypto_authenc_key_param *param; 902 struct rtattr *rta = (void *)key; 903 int enckeylen = 0; 904 905 if (!RTA_OK(rta, keylen)) 906 goto badkey; 907 908 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 909 goto badkey; 910 911 if (RTA_PAYLOAD(rta) < sizeof(*param)) 912 goto badkey; 913 914 param = RTA_DATA(rta); 915 enckeylen = be32_to_cpu(param->enckeylen); 916 key += RTA_ALIGN(rta->rta_len); 917 keylen -= RTA_ALIGN(rta->rta_len); 918 if (enckeylen != 0) 919 goto badkey; 920 921 if (keylen > OTX_CPT_MAX_KEY_SIZE) 922 goto badkey; 923 924 memcpy(ctx->key, key, keylen); 925 ctx->enc_key_len = enckeylen; 926 ctx->auth_key_len = keylen; 927 return 0; 928 badkey: 929 return -EINVAL; 930 } 931 932 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, 933 const unsigned char *key, 934 unsigned int keylen) 935 { 936 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher); 937 938 /* 939 * For aes gcm we expect to get encryption key (16, 24, 32 bytes) 940 * and salt (4 bytes) 941 */ 942 switch (keylen) { 943 case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE: 944 ctx->key_type = OTX_CPT_AES_128_BIT; 945 ctx->enc_key_len = AES_KEYSIZE_128; 946 break; 947 case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE: 948 ctx->key_type = OTX_CPT_AES_192_BIT; 949 ctx->enc_key_len = AES_KEYSIZE_192; 950 break; 951 case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE: 952 ctx->key_type = OTX_CPT_AES_256_BIT; 953 ctx->enc_key_len = AES_KEYSIZE_256; 954 break; 955 default: 956 /* Invalid key and salt length */ 957 return -EINVAL; 958 } 959 960 /* Store encryption key and salt */ 961 memcpy(ctx->key, key, keylen); 962 963 return 0; 964 } 965 966 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, 967 u32 *argcnt) 968 { 969 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 970 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 971 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 972 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 973 struct otx_cpt_fc_ctx *fctx = &rctx->fctx; 974 int mac_len = crypto_aead_authsize(tfm); 975 int ds; 976 977 rctx->ctrl_word.e.enc_data_offset = req->assoclen; 978 979 switch (ctx->cipher_type) { 980 case OTX_CPT_AES_CBC: 981 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR; 982 /* Copy encryption key to context */ 983 memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len, 984 ctx->enc_key_len); 985 /* Copy IV to context */ 986 memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm)); 987 988 ds = crypto_shash_digestsize(ctx->hashalg); 989 if (ctx->mac_type == OTX_CPT_SHA384) 990 ds = SHA512_DIGEST_SIZE; 991 if (ctx->ipad) 992 memcpy(fctx->hmac.e.ipad, ctx->ipad, ds); 993 if (ctx->opad) 994 memcpy(fctx->hmac.e.opad, ctx->opad, ds); 995 break; 996 997 case OTX_CPT_AES_GCM: 998 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR; 999 /* Copy encryption key to context */ 1000 memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len); 1001 /* Copy salt to context */ 1002 memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len, 1003 AES_GCM_SALT_SIZE); 1004 1005 rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET; 1006 break; 1007 1008 default: 1009 /* Unknown cipher type */ 1010 return -EINVAL; 1011 } 1012 rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.flags); 1013 1014 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 1015 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 1016 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC | 1017 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 1018 if (enc) { 1019 req_info->req.opcode.s.minor = 2; 1020 req_info->req.param1 = req->cryptlen; 1021 req_info->req.param2 = req->cryptlen + req->assoclen; 1022 } else { 1023 req_info->req.opcode.s.minor = 3; 1024 req_info->req.param1 = req->cryptlen - mac_len; 1025 req_info->req.param2 = req->cryptlen + req->assoclen - mac_len; 1026 } 1027 1028 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; 1029 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; 1030 fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type; 1031 fctx->enc.enc_ctrl.e.mac_len = mac_len; 1032 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); 1033 1034 /* 1035 * Storing Packet Data Information in offset 1036 * Control Word First 8 bytes 1037 */ 1038 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word; 1039 req_info->in[*argcnt].size = CONTROL_WORD_LEN; 1040 req_info->req.dlen += CONTROL_WORD_LEN; 1041 ++(*argcnt); 1042 1043 req_info->in[*argcnt].vptr = (u8 *)fctx; 1044 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx); 1045 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx); 1046 ++(*argcnt); 1047 1048 return 0; 1049 } 1050 1051 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt, 1052 u32 enc) 1053 { 1054 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1055 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1056 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm); 1057 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1058 1059 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 1060 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 1061 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC | 1062 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 1063 req_info->is_trunc_hmac = ctx->is_trunc_hmac; 1064 1065 req_info->req.opcode.s.minor = 0; 1066 req_info->req.param1 = ctx->auth_key_len; 1067 req_info->req.param2 = ctx->mac_type << 8; 1068 1069 /* Add authentication key */ 1070 req_info->in[*argcnt].vptr = ctx->key; 1071 req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8); 1072 req_info->req.dlen += round_up(ctx->auth_key_len, 8); 1073 ++(*argcnt); 1074 1075 return 0; 1076 } 1077 1078 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc) 1079 { 1080 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1081 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1082 u32 inputlen = req->cryptlen + req->assoclen; 1083 u32 status, argcnt = 0; 1084 1085 status = create_aead_ctx_hdr(req, enc, &argcnt); 1086 if (status) 1087 return status; 1088 update_input_data(req_info, req->src, inputlen, &argcnt); 1089 req_info->incnt = argcnt; 1090 1091 return 0; 1092 } 1093 1094 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc, 1095 u32 mac_len) 1096 { 1097 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1098 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1099 u32 argcnt = 0, outputlen = 0; 1100 1101 if (enc) 1102 outputlen = req->cryptlen + req->assoclen + mac_len; 1103 else 1104 outputlen = req->cryptlen + req->assoclen - mac_len; 1105 1106 update_output_data(req_info, req->dst, 0, outputlen, &argcnt); 1107 req_info->outcnt = argcnt; 1108 1109 return 0; 1110 } 1111 1112 static inline u32 create_aead_null_input_list(struct aead_request *req, 1113 u32 enc, u32 mac_len) 1114 { 1115 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1116 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1117 u32 inputlen, argcnt = 0; 1118 1119 if (enc) 1120 inputlen = req->cryptlen + req->assoclen; 1121 else 1122 inputlen = req->cryptlen + req->assoclen - mac_len; 1123 1124 create_hmac_ctx_hdr(req, &argcnt, enc); 1125 update_input_data(req_info, req->src, inputlen, &argcnt); 1126 req_info->incnt = argcnt; 1127 1128 return 0; 1129 } 1130 1131 static inline u32 create_aead_null_output_list(struct aead_request *req, 1132 u32 enc, u32 mac_len) 1133 { 1134 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1135 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1136 struct scatterlist *dst; 1137 u8 *ptr = NULL; 1138 int argcnt = 0, status, offset; 1139 u32 inputlen; 1140 1141 if (enc) 1142 inputlen = req->cryptlen + req->assoclen; 1143 else 1144 inputlen = req->cryptlen + req->assoclen - mac_len; 1145 1146 /* 1147 * If source and destination are different 1148 * then copy payload to destination 1149 */ 1150 if (req->src != req->dst) { 1151 1152 ptr = kmalloc(inputlen, (req_info->areq->flags & 1153 CRYPTO_TFM_REQ_MAY_SLEEP) ? 1154 GFP_KERNEL : GFP_ATOMIC); 1155 if (!ptr) { 1156 status = -ENOMEM; 1157 goto error; 1158 } 1159 1160 status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr, 1161 inputlen); 1162 if (status != inputlen) { 1163 status = -EINVAL; 1164 goto error_free; 1165 } 1166 status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr, 1167 inputlen); 1168 if (status != inputlen) { 1169 status = -EINVAL; 1170 goto error_free; 1171 } 1172 kfree(ptr); 1173 } 1174 1175 if (enc) { 1176 /* 1177 * In an encryption scenario hmac needs 1178 * to be appended after payload 1179 */ 1180 dst = req->dst; 1181 offset = inputlen; 1182 while (offset >= dst->length) { 1183 offset -= dst->length; 1184 dst = sg_next(dst); 1185 if (!dst) { 1186 status = -ENOENT; 1187 goto error; 1188 } 1189 } 1190 1191 update_output_data(req_info, dst, offset, mac_len, &argcnt); 1192 } else { 1193 /* 1194 * In a decryption scenario calculated hmac for received 1195 * payload needs to be compare with hmac received 1196 */ 1197 status = sg_copy_buffer(req->src, sg_nents(req->src), 1198 rctx->fctx.hmac.s.hmac_recv, mac_len, 1199 inputlen, true); 1200 if (status != mac_len) { 1201 status = -EINVAL; 1202 goto error; 1203 } 1204 1205 req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc; 1206 req_info->out[argcnt].size = mac_len; 1207 argcnt++; 1208 } 1209 1210 req_info->outcnt = argcnt; 1211 return 0; 1212 1213 error_free: 1214 kfree(ptr); 1215 error: 1216 return status; 1217 } 1218 1219 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc) 1220 { 1221 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req); 1222 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1223 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1224 struct pci_dev *pdev; 1225 u32 status, cpu_num; 1226 1227 /* Clear control words */ 1228 rctx->ctrl_word.flags = 0; 1229 rctx->fctx.enc.enc_ctrl.flags = 0; 1230 1231 req_info->callback = otx_cpt_aead_callback; 1232 req_info->areq = &req->base; 1233 req_info->req_type = reg_type; 1234 req_info->is_enc = enc; 1235 req_info->is_trunc_hmac = false; 1236 1237 switch (reg_type) { 1238 case OTX_CPT_AEAD_ENC_DEC_REQ: 1239 status = create_aead_input_list(req, enc); 1240 if (status) 1241 return status; 1242 status = create_aead_output_list(req, enc, 1243 crypto_aead_authsize(tfm)); 1244 if (status) 1245 return status; 1246 break; 1247 1248 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ: 1249 status = create_aead_null_input_list(req, enc, 1250 crypto_aead_authsize(tfm)); 1251 if (status) 1252 return status; 1253 status = create_aead_null_output_list(req, enc, 1254 crypto_aead_authsize(tfm)); 1255 if (status) 1256 return status; 1257 break; 1258 1259 default: 1260 return -EINVAL; 1261 } 1262 1263 /* Validate that request doesn't exceed maximum CPT supported size */ 1264 if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE || 1265 req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE) 1266 return -E2BIG; 1267 1268 status = get_se_device(&pdev, &cpu_num); 1269 if (status) 1270 return status; 1271 1272 req_info->ctrl.s.grp = 0; 1273 1274 status = otx_cpt_do_request(pdev, req_info, cpu_num); 1275 /* 1276 * We perform an asynchronous send and once 1277 * the request is completed the driver would 1278 * intimate through registered call back functions 1279 */ 1280 return status; 1281 } 1282 1283 static int otx_cpt_aead_encrypt(struct aead_request *req) 1284 { 1285 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true); 1286 } 1287 1288 static int otx_cpt_aead_decrypt(struct aead_request *req) 1289 { 1290 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false); 1291 } 1292 1293 static int otx_cpt_aead_null_encrypt(struct aead_request *req) 1294 { 1295 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true); 1296 } 1297 1298 static int otx_cpt_aead_null_decrypt(struct aead_request *req) 1299 { 1300 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false); 1301 } 1302 1303 static struct skcipher_alg otx_cpt_skciphers[] = { { 1304 .base.cra_name = "xts(aes)", 1305 .base.cra_driver_name = "cpt_xts_aes", 1306 .base.cra_flags = CRYPTO_ALG_ASYNC, 1307 .base.cra_blocksize = AES_BLOCK_SIZE, 1308 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1309 .base.cra_alignmask = 7, 1310 .base.cra_priority = 4001, 1311 .base.cra_module = THIS_MODULE, 1312 1313 .init = otx_cpt_enc_dec_init, 1314 .ivsize = AES_BLOCK_SIZE, 1315 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1316 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1317 .setkey = otx_cpt_skcipher_xts_setkey, 1318 .encrypt = otx_cpt_skcipher_encrypt, 1319 .decrypt = otx_cpt_skcipher_decrypt, 1320 }, { 1321 .base.cra_name = "cbc(aes)", 1322 .base.cra_driver_name = "cpt_cbc_aes", 1323 .base.cra_flags = CRYPTO_ALG_ASYNC, 1324 .base.cra_blocksize = AES_BLOCK_SIZE, 1325 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1326 .base.cra_alignmask = 7, 1327 .base.cra_priority = 4001, 1328 .base.cra_module = THIS_MODULE, 1329 1330 .init = otx_cpt_enc_dec_init, 1331 .ivsize = AES_BLOCK_SIZE, 1332 .min_keysize = AES_MIN_KEY_SIZE, 1333 .max_keysize = AES_MAX_KEY_SIZE, 1334 .setkey = otx_cpt_skcipher_cbc_aes_setkey, 1335 .encrypt = otx_cpt_skcipher_encrypt, 1336 .decrypt = otx_cpt_skcipher_decrypt, 1337 }, { 1338 .base.cra_name = "ecb(aes)", 1339 .base.cra_driver_name = "cpt_ecb_aes", 1340 .base.cra_flags = CRYPTO_ALG_ASYNC, 1341 .base.cra_blocksize = AES_BLOCK_SIZE, 1342 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1343 .base.cra_alignmask = 7, 1344 .base.cra_priority = 4001, 1345 .base.cra_module = THIS_MODULE, 1346 1347 .init = otx_cpt_enc_dec_init, 1348 .ivsize = 0, 1349 .min_keysize = AES_MIN_KEY_SIZE, 1350 .max_keysize = AES_MAX_KEY_SIZE, 1351 .setkey = otx_cpt_skcipher_ecb_aes_setkey, 1352 .encrypt = otx_cpt_skcipher_encrypt, 1353 .decrypt = otx_cpt_skcipher_decrypt, 1354 }, { 1355 .base.cra_name = "cfb(aes)", 1356 .base.cra_driver_name = "cpt_cfb_aes", 1357 .base.cra_flags = CRYPTO_ALG_ASYNC, 1358 .base.cra_blocksize = AES_BLOCK_SIZE, 1359 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1360 .base.cra_alignmask = 7, 1361 .base.cra_priority = 4001, 1362 .base.cra_module = THIS_MODULE, 1363 1364 .init = otx_cpt_enc_dec_init, 1365 .ivsize = AES_BLOCK_SIZE, 1366 .min_keysize = AES_MIN_KEY_SIZE, 1367 .max_keysize = AES_MAX_KEY_SIZE, 1368 .setkey = otx_cpt_skcipher_cfb_aes_setkey, 1369 .encrypt = otx_cpt_skcipher_encrypt, 1370 .decrypt = otx_cpt_skcipher_decrypt, 1371 }, { 1372 .base.cra_name = "cbc(des3_ede)", 1373 .base.cra_driver_name = "cpt_cbc_des3_ede", 1374 .base.cra_flags = CRYPTO_ALG_ASYNC, 1375 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1376 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), 1377 .base.cra_alignmask = 7, 1378 .base.cra_priority = 4001, 1379 .base.cra_module = THIS_MODULE, 1380 1381 .init = otx_cpt_enc_dec_init, 1382 .min_keysize = DES3_EDE_KEY_SIZE, 1383 .max_keysize = DES3_EDE_KEY_SIZE, 1384 .ivsize = DES_BLOCK_SIZE, 1385 .setkey = otx_cpt_skcipher_cbc_des3_setkey, 1386 .encrypt = otx_cpt_skcipher_encrypt, 1387 .decrypt = otx_cpt_skcipher_decrypt, 1388 }, { 1389 .base.cra_name = "ecb(des3_ede)", 1390 .base.cra_driver_name = "cpt_ecb_des3_ede", 1391 .base.cra_flags = CRYPTO_ALG_ASYNC, 1392 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1393 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), 1394 .base.cra_alignmask = 7, 1395 .base.cra_priority = 4001, 1396 .base.cra_module = THIS_MODULE, 1397 1398 .init = otx_cpt_enc_dec_init, 1399 .min_keysize = DES3_EDE_KEY_SIZE, 1400 .max_keysize = DES3_EDE_KEY_SIZE, 1401 .ivsize = 0, 1402 .setkey = otx_cpt_skcipher_ecb_des3_setkey, 1403 .encrypt = otx_cpt_skcipher_encrypt, 1404 .decrypt = otx_cpt_skcipher_decrypt, 1405 } }; 1406 1407 static struct aead_alg otx_cpt_aeads[] = { { 1408 .base = { 1409 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1410 .cra_driver_name = "cpt_hmac_sha1_cbc_aes", 1411 .cra_blocksize = AES_BLOCK_SIZE, 1412 .cra_flags = CRYPTO_ALG_ASYNC, 1413 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1414 .cra_priority = 4001, 1415 .cra_alignmask = 0, 1416 .cra_module = THIS_MODULE, 1417 }, 1418 .init = otx_cpt_aead_cbc_aes_sha1_init, 1419 .exit = otx_cpt_aead_exit, 1420 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1421 .setauthsize = otx_cpt_aead_set_authsize, 1422 .encrypt = otx_cpt_aead_encrypt, 1423 .decrypt = otx_cpt_aead_decrypt, 1424 .ivsize = AES_BLOCK_SIZE, 1425 .maxauthsize = SHA1_DIGEST_SIZE, 1426 }, { 1427 .base = { 1428 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1429 .cra_driver_name = "cpt_hmac_sha256_cbc_aes", 1430 .cra_blocksize = AES_BLOCK_SIZE, 1431 .cra_flags = CRYPTO_ALG_ASYNC, 1432 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1433 .cra_priority = 4001, 1434 .cra_alignmask = 0, 1435 .cra_module = THIS_MODULE, 1436 }, 1437 .init = otx_cpt_aead_cbc_aes_sha256_init, 1438 .exit = otx_cpt_aead_exit, 1439 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1440 .setauthsize = otx_cpt_aead_set_authsize, 1441 .encrypt = otx_cpt_aead_encrypt, 1442 .decrypt = otx_cpt_aead_decrypt, 1443 .ivsize = AES_BLOCK_SIZE, 1444 .maxauthsize = SHA256_DIGEST_SIZE, 1445 }, { 1446 .base = { 1447 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1448 .cra_driver_name = "cpt_hmac_sha384_cbc_aes", 1449 .cra_blocksize = AES_BLOCK_SIZE, 1450 .cra_flags = CRYPTO_ALG_ASYNC, 1451 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1452 .cra_priority = 4001, 1453 .cra_alignmask = 0, 1454 .cra_module = THIS_MODULE, 1455 }, 1456 .init = otx_cpt_aead_cbc_aes_sha384_init, 1457 .exit = otx_cpt_aead_exit, 1458 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1459 .setauthsize = otx_cpt_aead_set_authsize, 1460 .encrypt = otx_cpt_aead_encrypt, 1461 .decrypt = otx_cpt_aead_decrypt, 1462 .ivsize = AES_BLOCK_SIZE, 1463 .maxauthsize = SHA384_DIGEST_SIZE, 1464 }, { 1465 .base = { 1466 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1467 .cra_driver_name = "cpt_hmac_sha512_cbc_aes", 1468 .cra_blocksize = AES_BLOCK_SIZE, 1469 .cra_flags = CRYPTO_ALG_ASYNC, 1470 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1471 .cra_priority = 4001, 1472 .cra_alignmask = 0, 1473 .cra_module = THIS_MODULE, 1474 }, 1475 .init = otx_cpt_aead_cbc_aes_sha512_init, 1476 .exit = otx_cpt_aead_exit, 1477 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1478 .setauthsize = otx_cpt_aead_set_authsize, 1479 .encrypt = otx_cpt_aead_encrypt, 1480 .decrypt = otx_cpt_aead_decrypt, 1481 .ivsize = AES_BLOCK_SIZE, 1482 .maxauthsize = SHA512_DIGEST_SIZE, 1483 }, { 1484 .base = { 1485 .cra_name = "authenc(hmac(sha1),ecb(cipher_null))", 1486 .cra_driver_name = "cpt_hmac_sha1_ecb_null", 1487 .cra_blocksize = 1, 1488 .cra_flags = CRYPTO_ALG_ASYNC, 1489 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1490 .cra_priority = 4001, 1491 .cra_alignmask = 0, 1492 .cra_module = THIS_MODULE, 1493 }, 1494 .init = otx_cpt_aead_ecb_null_sha1_init, 1495 .exit = otx_cpt_aead_exit, 1496 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1497 .setauthsize = otx_cpt_aead_set_authsize, 1498 .encrypt = otx_cpt_aead_null_encrypt, 1499 .decrypt = otx_cpt_aead_null_decrypt, 1500 .ivsize = 0, 1501 .maxauthsize = SHA1_DIGEST_SIZE, 1502 }, { 1503 .base = { 1504 .cra_name = "authenc(hmac(sha256),ecb(cipher_null))", 1505 .cra_driver_name = "cpt_hmac_sha256_ecb_null", 1506 .cra_blocksize = 1, 1507 .cra_flags = CRYPTO_ALG_ASYNC, 1508 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1509 .cra_priority = 4001, 1510 .cra_alignmask = 0, 1511 .cra_module = THIS_MODULE, 1512 }, 1513 .init = otx_cpt_aead_ecb_null_sha256_init, 1514 .exit = otx_cpt_aead_exit, 1515 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1516 .setauthsize = otx_cpt_aead_set_authsize, 1517 .encrypt = otx_cpt_aead_null_encrypt, 1518 .decrypt = otx_cpt_aead_null_decrypt, 1519 .ivsize = 0, 1520 .maxauthsize = SHA256_DIGEST_SIZE, 1521 }, { 1522 .base = { 1523 .cra_name = "authenc(hmac(sha384),ecb(cipher_null))", 1524 .cra_driver_name = "cpt_hmac_sha384_ecb_null", 1525 .cra_blocksize = 1, 1526 .cra_flags = CRYPTO_ALG_ASYNC, 1527 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1528 .cra_priority = 4001, 1529 .cra_alignmask = 0, 1530 .cra_module = THIS_MODULE, 1531 }, 1532 .init = otx_cpt_aead_ecb_null_sha384_init, 1533 .exit = otx_cpt_aead_exit, 1534 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1535 .setauthsize = otx_cpt_aead_set_authsize, 1536 .encrypt = otx_cpt_aead_null_encrypt, 1537 .decrypt = otx_cpt_aead_null_decrypt, 1538 .ivsize = 0, 1539 .maxauthsize = SHA384_DIGEST_SIZE, 1540 }, { 1541 .base = { 1542 .cra_name = "authenc(hmac(sha512),ecb(cipher_null))", 1543 .cra_driver_name = "cpt_hmac_sha512_ecb_null", 1544 .cra_blocksize = 1, 1545 .cra_flags = CRYPTO_ALG_ASYNC, 1546 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1547 .cra_priority = 4001, 1548 .cra_alignmask = 0, 1549 .cra_module = THIS_MODULE, 1550 }, 1551 .init = otx_cpt_aead_ecb_null_sha512_init, 1552 .exit = otx_cpt_aead_exit, 1553 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1554 .setauthsize = otx_cpt_aead_set_authsize, 1555 .encrypt = otx_cpt_aead_null_encrypt, 1556 .decrypt = otx_cpt_aead_null_decrypt, 1557 .ivsize = 0, 1558 .maxauthsize = SHA512_DIGEST_SIZE, 1559 }, { 1560 .base = { 1561 .cra_name = "rfc4106(gcm(aes))", 1562 .cra_driver_name = "cpt_rfc4106_gcm_aes", 1563 .cra_blocksize = 1, 1564 .cra_flags = CRYPTO_ALG_ASYNC, 1565 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx), 1566 .cra_priority = 4001, 1567 .cra_alignmask = 0, 1568 .cra_module = THIS_MODULE, 1569 }, 1570 .init = otx_cpt_aead_gcm_aes_init, 1571 .exit = otx_cpt_aead_exit, 1572 .setkey = otx_cpt_aead_gcm_aes_setkey, 1573 .setauthsize = otx_cpt_aead_set_authsize, 1574 .encrypt = otx_cpt_aead_encrypt, 1575 .decrypt = otx_cpt_aead_decrypt, 1576 .ivsize = AES_GCM_IV_SIZE, 1577 .maxauthsize = AES_GCM_ICV_SIZE, 1578 } }; 1579 1580 static inline int is_any_alg_used(void) 1581 { 1582 int i; 1583 1584 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++) 1585 if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1) 1586 return true; 1587 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++) 1588 if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1) 1589 return true; 1590 return false; 1591 } 1592 1593 static inline int cpt_register_algs(void) 1594 { 1595 int i, err = 0; 1596 1597 if (!IS_ENABLED(CONFIG_DM_CRYPT)) { 1598 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++) 1599 otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; 1600 1601 err = crypto_register_skciphers(otx_cpt_skciphers, 1602 ARRAY_SIZE(otx_cpt_skciphers)); 1603 if (err) 1604 return err; 1605 } 1606 1607 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++) 1608 otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; 1609 1610 err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads)); 1611 if (err) { 1612 crypto_unregister_skciphers(otx_cpt_skciphers, 1613 ARRAY_SIZE(otx_cpt_skciphers)); 1614 return err; 1615 } 1616 1617 return 0; 1618 } 1619 1620 static inline void cpt_unregister_algs(void) 1621 { 1622 crypto_unregister_skciphers(otx_cpt_skciphers, 1623 ARRAY_SIZE(otx_cpt_skciphers)); 1624 crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads)); 1625 } 1626 1627 static int compare_func(const void *lptr, const void *rptr) 1628 { 1629 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; 1630 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; 1631 1632 if (ldesc->dev->devfn < rdesc->dev->devfn) 1633 return -1; 1634 if (ldesc->dev->devfn > rdesc->dev->devfn) 1635 return 1; 1636 return 0; 1637 } 1638 1639 static void swap_func(void *lptr, void *rptr, int size) 1640 { 1641 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; 1642 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; 1643 struct cpt_device_desc desc; 1644 1645 desc = *ldesc; 1646 *ldesc = *rdesc; 1647 *rdesc = desc; 1648 } 1649 1650 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, 1651 enum otx_cptpf_type pf_type, 1652 enum otx_cptvf_type engine_type, 1653 int num_queues, int num_devices) 1654 { 1655 int ret = 0; 1656 int count; 1657 1658 mutex_lock(&mutex); 1659 switch (engine_type) { 1660 case OTX_CPT_SE_TYPES: 1661 count = atomic_read(&se_devices.count); 1662 if (count >= CPT_MAX_VF_NUM) { 1663 dev_err(&pdev->dev, "No space to add a new device"); 1664 ret = -ENOSPC; 1665 goto err; 1666 } 1667 se_devices.desc[count].pf_type = pf_type; 1668 se_devices.desc[count].num_queues = num_queues; 1669 se_devices.desc[count++].dev = pdev; 1670 atomic_inc(&se_devices.count); 1671 1672 if (atomic_read(&se_devices.count) == num_devices && 1673 is_crypto_registered == false) { 1674 if (cpt_register_algs()) { 1675 dev_err(&pdev->dev, 1676 "Error in registering crypto algorithms\n"); 1677 ret = -EINVAL; 1678 goto err; 1679 } 1680 try_module_get(mod); 1681 is_crypto_registered = true; 1682 } 1683 sort(se_devices.desc, count, sizeof(struct cpt_device_desc), 1684 compare_func, swap_func); 1685 break; 1686 1687 case OTX_CPT_AE_TYPES: 1688 count = atomic_read(&ae_devices.count); 1689 if (count >= CPT_MAX_VF_NUM) { 1690 dev_err(&pdev->dev, "No space to a add new device"); 1691 ret = -ENOSPC; 1692 goto err; 1693 } 1694 ae_devices.desc[count].pf_type = pf_type; 1695 ae_devices.desc[count].num_queues = num_queues; 1696 ae_devices.desc[count++].dev = pdev; 1697 atomic_inc(&ae_devices.count); 1698 sort(ae_devices.desc, count, sizeof(struct cpt_device_desc), 1699 compare_func, swap_func); 1700 break; 1701 1702 default: 1703 dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type); 1704 ret = BAD_OTX_CPTVF_TYPE; 1705 } 1706 err: 1707 mutex_unlock(&mutex); 1708 return ret; 1709 } 1710 1711 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod, 1712 enum otx_cptvf_type engine_type) 1713 { 1714 struct cpt_device_table *dev_tbl; 1715 bool dev_found = false; 1716 int i, j, count; 1717 1718 mutex_lock(&mutex); 1719 1720 dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices; 1721 count = atomic_read(&dev_tbl->count); 1722 for (i = 0; i < count; i++) 1723 if (pdev == dev_tbl->desc[i].dev) { 1724 for (j = i; j < count-1; j++) 1725 dev_tbl->desc[j] = dev_tbl->desc[j+1]; 1726 dev_found = true; 1727 break; 1728 } 1729 1730 if (!dev_found) { 1731 dev_err(&pdev->dev, "%s device not found", __func__); 1732 goto exit; 1733 } 1734 1735 if (engine_type != OTX_CPT_AE_TYPES) { 1736 if (atomic_dec_and_test(&se_devices.count) && 1737 !is_any_alg_used()) { 1738 cpt_unregister_algs(); 1739 module_put(mod); 1740 is_crypto_registered = false; 1741 } 1742 } else 1743 atomic_dec(&ae_devices.count); 1744 exit: 1745 mutex_unlock(&mutex); 1746 } 1747