1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTX CPT driver 3 * 4 * Copyright (C) 2019 Marvell International Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <crypto/aes.h> 12 #include <crypto/authenc.h> 13 #include <crypto/cryptd.h> 14 #include <crypto/des.h> 15 #include <crypto/internal/aead.h> 16 #include <crypto/sha1.h> 17 #include <crypto/sha2.h> 18 #include <crypto/xts.h> 19 #include <crypto/scatterwalk.h> 20 #include <linux/sort.h> 21 #include <linux/module.h> 22 #include "otx_cptvf.h" 23 #include "otx_cptvf_algs.h" 24 #include "otx_cptvf_reqmgr.h" 25 26 #define CPT_MAX_VF_NUM 64 27 /* Size of salt in AES GCM mode */ 28 #define AES_GCM_SALT_SIZE 4 29 /* Size of IV in AES GCM mode */ 30 #define AES_GCM_IV_SIZE 8 31 /* Size of ICV (Integrity Check Value) in AES GCM mode */ 32 #define AES_GCM_ICV_SIZE 16 33 /* Offset of IV in AES GCM mode */ 34 #define AES_GCM_IV_OFFSET 8 35 #define CONTROL_WORD_LEN 8 36 #define KEY2_OFFSET 48 37 #define DMA_MODE_FLAG(dma_mode) \ 38 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0) 39 40 /* Truncated SHA digest size */ 41 #define SHA1_TRUNC_DIGEST_SIZE 12 42 #define SHA256_TRUNC_DIGEST_SIZE 16 43 #define SHA384_TRUNC_DIGEST_SIZE 24 44 #define SHA512_TRUNC_DIGEST_SIZE 32 45 46 static DEFINE_MUTEX(mutex); 47 static int is_crypto_registered; 48 49 struct cpt_device_desc { 50 enum otx_cptpf_type pf_type; 51 struct pci_dev *dev; 52 int num_queues; 53 }; 54 55 struct cpt_device_table { 56 atomic_t count; 57 struct cpt_device_desc desc[CPT_MAX_VF_NUM]; 58 }; 59 60 static struct cpt_device_table se_devices = { 61 .count = ATOMIC_INIT(0) 62 }; 63 64 static struct cpt_device_table ae_devices = { 65 .count = ATOMIC_INIT(0) 66 }; 67 68 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg); 69 70 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) 71 { 72 int count, ret = 0; 73 74 count = atomic_read(&se_devices.count); 75 if (count < 1) 76 return -ENODEV; 77 78 *cpu_num = get_cpu(); 79 80 if (se_devices.desc[0].pf_type == OTX_CPT_SE) { 81 /* 82 * On OcteonTX platform there is one CPT instruction queue bound 83 * to each VF. We get maximum performance if one CPT queue 84 * is available for each cpu otherwise CPT queues need to be 85 * shared between cpus. 86 */ 87 if (*cpu_num >= count) 88 *cpu_num %= count; 89 *pdev = se_devices.desc[*cpu_num].dev; 90 } else { 91 pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type); 92 ret = -EINVAL; 93 } 94 put_cpu(); 95 96 return ret; 97 } 98 99 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req) 100 { 101 struct otx_cpt_req_ctx *rctx; 102 struct aead_request *req; 103 struct crypto_aead *tfm; 104 105 req = container_of(cpt_req->areq, struct aead_request, base); 106 tfm = crypto_aead_reqtfm(req); 107 rctx = aead_request_ctx_dma(req); 108 if (memcmp(rctx->fctx.hmac.s.hmac_calc, 109 rctx->fctx.hmac.s.hmac_recv, 110 crypto_aead_authsize(tfm)) != 0) 111 return -EBADMSG; 112 113 return 0; 114 } 115 116 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2) 117 { 118 struct otx_cpt_info_buffer *cpt_info = arg2; 119 struct crypto_async_request *areq = arg1; 120 struct otx_cpt_req_info *cpt_req; 121 struct pci_dev *pdev; 122 123 if (!cpt_info) 124 goto complete; 125 126 cpt_req = cpt_info->req; 127 if (!status) { 128 /* 129 * When selected cipher is NULL we need to manually 130 * verify whether calculated hmac value matches 131 * received hmac value 132 */ 133 if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ && 134 !cpt_req->is_enc) 135 status = validate_hmac_cipher_null(cpt_req); 136 } 137 pdev = cpt_info->pdev; 138 do_request_cleanup(pdev, cpt_info); 139 140 complete: 141 if (areq) 142 crypto_request_complete(areq, status); 143 } 144 145 static void output_iv_copyback(struct crypto_async_request *areq) 146 { 147 struct otx_cpt_req_info *req_info; 148 struct skcipher_request *sreq; 149 struct crypto_skcipher *stfm; 150 struct otx_cpt_req_ctx *rctx; 151 struct otx_cpt_enc_ctx *ctx; 152 u32 start, ivsize; 153 154 sreq = container_of(areq, struct skcipher_request, base); 155 stfm = crypto_skcipher_reqtfm(sreq); 156 ctx = crypto_skcipher_ctx(stfm); 157 if (ctx->cipher_type == OTX_CPT_AES_CBC || 158 ctx->cipher_type == OTX_CPT_DES3_CBC) { 159 rctx = skcipher_request_ctx_dma(sreq); 160 req_info = &rctx->cpt_req; 161 ivsize = crypto_skcipher_ivsize(stfm); 162 start = sreq->cryptlen - ivsize; 163 164 if (req_info->is_enc) { 165 scatterwalk_map_and_copy(sreq->iv, sreq->dst, start, 166 ivsize, 0); 167 } else { 168 if (sreq->src != sreq->dst) { 169 scatterwalk_map_and_copy(sreq->iv, sreq->src, 170 start, ivsize, 0); 171 } else { 172 memcpy(sreq->iv, req_info->iv_out, ivsize); 173 kfree(req_info->iv_out); 174 } 175 } 176 } 177 } 178 179 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2) 180 { 181 struct otx_cpt_info_buffer *cpt_info = arg2; 182 struct crypto_async_request *areq = arg1; 183 struct pci_dev *pdev; 184 185 if (areq) { 186 if (!status) 187 output_iv_copyback(areq); 188 if (cpt_info) { 189 pdev = cpt_info->pdev; 190 do_request_cleanup(pdev, cpt_info); 191 } 192 crypto_request_complete(areq, status); 193 } 194 } 195 196 static inline void update_input_data(struct otx_cpt_req_info *req_info, 197 struct scatterlist *inp_sg, 198 u32 nbytes, u32 *argcnt) 199 { 200 req_info->req.dlen += nbytes; 201 202 while (nbytes) { 203 u32 len = min(nbytes, inp_sg->length); 204 u8 *ptr = sg_virt(inp_sg); 205 206 req_info->in[*argcnt].vptr = (void *)ptr; 207 req_info->in[*argcnt].size = len; 208 nbytes -= len; 209 ++(*argcnt); 210 inp_sg = sg_next(inp_sg); 211 } 212 } 213 214 static inline void update_output_data(struct otx_cpt_req_info *req_info, 215 struct scatterlist *outp_sg, 216 u32 offset, u32 nbytes, u32 *argcnt) 217 { 218 req_info->rlen += nbytes; 219 220 while (nbytes) { 221 u32 len = min(nbytes, outp_sg->length - offset); 222 u8 *ptr = sg_virt(outp_sg); 223 224 req_info->out[*argcnt].vptr = (void *) (ptr + offset); 225 req_info->out[*argcnt].size = len; 226 nbytes -= len; 227 ++(*argcnt); 228 offset = 0; 229 outp_sg = sg_next(outp_sg); 230 } 231 } 232 233 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, 234 u32 *argcnt) 235 { 236 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); 237 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); 238 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 239 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); 240 struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm); 241 struct otx_cpt_fc_ctx *fctx = &rctx->fctx; 242 int ivsize = crypto_skcipher_ivsize(stfm); 243 u32 start = req->cryptlen - ivsize; 244 gfp_t flags; 245 246 flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 247 GFP_KERNEL : GFP_ATOMIC; 248 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 249 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 250 251 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC | 252 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 253 if (enc) { 254 req_info->req.opcode.s.minor = 2; 255 } else { 256 req_info->req.opcode.s.minor = 3; 257 if ((ctx->cipher_type == OTX_CPT_AES_CBC || 258 ctx->cipher_type == OTX_CPT_DES3_CBC) && 259 req->src == req->dst) { 260 req_info->iv_out = kmalloc(ivsize, flags); 261 if (!req_info->iv_out) 262 return -ENOMEM; 263 264 scatterwalk_map_and_copy(req_info->iv_out, req->src, 265 start, ivsize, 0); 266 } 267 } 268 /* Encryption data length */ 269 req_info->req.param1 = req->cryptlen; 270 /* Authentication data length */ 271 req_info->req.param2 = 0; 272 273 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; 274 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; 275 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR; 276 277 if (ctx->cipher_type == OTX_CPT_AES_XTS) 278 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); 279 else 280 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); 281 282 memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm)); 283 284 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); 285 286 /* 287 * Storing Packet Data Information in offset 288 * Control Word First 8 bytes 289 */ 290 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word; 291 req_info->in[*argcnt].size = CONTROL_WORD_LEN; 292 req_info->req.dlen += CONTROL_WORD_LEN; 293 ++(*argcnt); 294 295 req_info->in[*argcnt].vptr = (u8 *)fctx; 296 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx); 297 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx); 298 299 ++(*argcnt); 300 301 return 0; 302 } 303 304 static inline u32 create_input_list(struct skcipher_request *req, u32 enc, 305 u32 enc_iv_len) 306 { 307 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); 308 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 309 u32 argcnt = 0; 310 int ret; 311 312 ret = create_ctx_hdr(req, enc, &argcnt); 313 if (ret) 314 return ret; 315 316 update_input_data(req_info, req->src, req->cryptlen, &argcnt); 317 req_info->incnt = argcnt; 318 319 return 0; 320 } 321 322 static inline void create_output_list(struct skcipher_request *req, 323 u32 enc_iv_len) 324 { 325 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); 326 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 327 u32 argcnt = 0; 328 329 /* 330 * OUTPUT Buffer Processing 331 * AES encryption/decryption output would be 332 * received in the following format 333 * 334 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----| 335 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ] 336 */ 337 update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt); 338 req_info->outcnt = argcnt; 339 } 340 341 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc) 342 { 343 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req); 344 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx_dma(req); 345 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 346 u32 enc_iv_len = crypto_skcipher_ivsize(stfm); 347 struct pci_dev *pdev; 348 int status, cpu_num; 349 350 /* Validate that request doesn't exceed maximum CPT supported size */ 351 if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE) 352 return -E2BIG; 353 354 /* Clear control words */ 355 rctx->ctrl_word.flags = 0; 356 rctx->fctx.enc.enc_ctrl.flags = 0; 357 358 status = create_input_list(req, enc, enc_iv_len); 359 if (status) 360 return status; 361 create_output_list(req, enc_iv_len); 362 363 status = get_se_device(&pdev, &cpu_num); 364 if (status) 365 return status; 366 367 req_info->callback = (void *)otx_cpt_skcipher_callback; 368 req_info->areq = &req->base; 369 req_info->req_type = OTX_CPT_ENC_DEC_REQ; 370 req_info->is_enc = enc; 371 req_info->is_trunc_hmac = false; 372 req_info->ctrl.s.grp = 0; 373 374 /* 375 * We perform an asynchronous send and once 376 * the request is completed the driver would 377 * intimate through registered call back functions 378 */ 379 status = otx_cpt_do_request(pdev, req_info, cpu_num); 380 381 return status; 382 } 383 384 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req) 385 { 386 return cpt_enc_dec(req, true); 387 } 388 389 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req) 390 { 391 return cpt_enc_dec(req, false); 392 } 393 394 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm, 395 const u8 *key, u32 keylen) 396 { 397 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 398 const u8 *key2 = key + (keylen / 2); 399 const u8 *key1 = key; 400 int ret; 401 402 ret = xts_verify_key(tfm, key, keylen); 403 if (ret) 404 return ret; 405 ctx->key_len = keylen; 406 memcpy(ctx->enc_key, key1, keylen / 2); 407 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2); 408 ctx->cipher_type = OTX_CPT_AES_XTS; 409 switch (ctx->key_len) { 410 case 2 * AES_KEYSIZE_128: 411 ctx->key_type = OTX_CPT_AES_128_BIT; 412 break; 413 case 2 * AES_KEYSIZE_256: 414 ctx->key_type = OTX_CPT_AES_256_BIT; 415 break; 416 default: 417 return -EINVAL; 418 } 419 420 return 0; 421 } 422 423 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key, 424 u32 keylen, u8 cipher_type) 425 { 426 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 427 428 if (keylen != DES3_EDE_KEY_SIZE) 429 return -EINVAL; 430 431 ctx->key_len = keylen; 432 ctx->cipher_type = cipher_type; 433 434 memcpy(ctx->enc_key, key, keylen); 435 436 return 0; 437 } 438 439 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 440 u32 keylen, u8 cipher_type) 441 { 442 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 443 444 switch (keylen) { 445 case AES_KEYSIZE_128: 446 ctx->key_type = OTX_CPT_AES_128_BIT; 447 break; 448 case AES_KEYSIZE_192: 449 ctx->key_type = OTX_CPT_AES_192_BIT; 450 break; 451 case AES_KEYSIZE_256: 452 ctx->key_type = OTX_CPT_AES_256_BIT; 453 break; 454 default: 455 return -EINVAL; 456 } 457 ctx->key_len = keylen; 458 ctx->cipher_type = cipher_type; 459 460 memcpy(ctx->enc_key, key, keylen); 461 462 return 0; 463 } 464 465 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm, 466 const u8 *key, u32 keylen) 467 { 468 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC); 469 } 470 471 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm, 472 const u8 *key, u32 keylen) 473 { 474 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB); 475 } 476 477 static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm, 478 const u8 *key, u32 keylen) 479 { 480 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB); 481 } 482 483 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm, 484 const u8 *key, u32 keylen) 485 { 486 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC); 487 } 488 489 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm, 490 const u8 *key, u32 keylen) 491 { 492 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB); 493 } 494 495 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm) 496 { 497 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm); 498 499 memset(ctx, 0, sizeof(*ctx)); 500 /* 501 * Additional memory for skcipher_request is 502 * allocated since the cryptd daemon uses 503 * this memory for request_ctx information 504 */ 505 crypto_skcipher_set_reqsize_dma( 506 tfm, sizeof(struct otx_cpt_req_ctx) + 507 sizeof(struct skcipher_request)); 508 509 return 0; 510 } 511 512 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) 513 { 514 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); 515 516 ctx->cipher_type = cipher_type; 517 ctx->mac_type = mac_type; 518 519 switch (ctx->mac_type) { 520 case OTX_CPT_SHA1: 521 ctx->hashalg = crypto_alloc_shash("sha1", 0, 0); 522 break; 523 524 case OTX_CPT_SHA256: 525 ctx->hashalg = crypto_alloc_shash("sha256", 0, 0); 526 break; 527 528 case OTX_CPT_SHA384: 529 ctx->hashalg = crypto_alloc_shash("sha384", 0, 0); 530 break; 531 532 case OTX_CPT_SHA512: 533 ctx->hashalg = crypto_alloc_shash("sha512", 0, 0); 534 break; 535 } 536 537 if (IS_ERR(ctx->hashalg)) 538 return PTR_ERR(ctx->hashalg); 539 540 crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); 541 542 if (!ctx->hashalg) 543 return 0; 544 545 /* 546 * When selected cipher is NULL we use HMAC opcode instead of 547 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms 548 * for calculating ipad and opad 549 */ 550 if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) { 551 int ss = crypto_shash_statesize(ctx->hashalg); 552 553 ctx->ipad = kzalloc(ss, GFP_KERNEL); 554 if (!ctx->ipad) { 555 crypto_free_shash(ctx->hashalg); 556 return -ENOMEM; 557 } 558 559 ctx->opad = kzalloc(ss, GFP_KERNEL); 560 if (!ctx->opad) { 561 kfree(ctx->ipad); 562 crypto_free_shash(ctx->hashalg); 563 return -ENOMEM; 564 } 565 } 566 567 ctx->sdesc = alloc_sdesc(ctx->hashalg); 568 if (!ctx->sdesc) { 569 kfree(ctx->opad); 570 kfree(ctx->ipad); 571 crypto_free_shash(ctx->hashalg); 572 return -ENOMEM; 573 } 574 575 return 0; 576 } 577 578 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm) 579 { 580 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1); 581 } 582 583 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm) 584 { 585 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256); 586 } 587 588 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm) 589 { 590 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384); 591 } 592 593 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm) 594 { 595 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512); 596 } 597 598 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm) 599 { 600 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1); 601 } 602 603 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm) 604 { 605 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256); 606 } 607 608 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm) 609 { 610 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384); 611 } 612 613 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm) 614 { 615 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512); 616 } 617 618 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm) 619 { 620 return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL); 621 } 622 623 static void otx_cpt_aead_exit(struct crypto_aead *tfm) 624 { 625 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); 626 627 kfree(ctx->ipad); 628 kfree(ctx->opad); 629 crypto_free_shash(ctx->hashalg); 630 kfree(ctx->sdesc); 631 } 632 633 /* 634 * This is the Integrity Check Value validation (aka the authentication tag 635 * length) 636 */ 637 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm, 638 unsigned int authsize) 639 { 640 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); 641 642 switch (ctx->mac_type) { 643 case OTX_CPT_SHA1: 644 if (authsize != SHA1_DIGEST_SIZE && 645 authsize != SHA1_TRUNC_DIGEST_SIZE) 646 return -EINVAL; 647 648 if (authsize == SHA1_TRUNC_DIGEST_SIZE) 649 ctx->is_trunc_hmac = true; 650 break; 651 652 case OTX_CPT_SHA256: 653 if (authsize != SHA256_DIGEST_SIZE && 654 authsize != SHA256_TRUNC_DIGEST_SIZE) 655 return -EINVAL; 656 657 if (authsize == SHA256_TRUNC_DIGEST_SIZE) 658 ctx->is_trunc_hmac = true; 659 break; 660 661 case OTX_CPT_SHA384: 662 if (authsize != SHA384_DIGEST_SIZE && 663 authsize != SHA384_TRUNC_DIGEST_SIZE) 664 return -EINVAL; 665 666 if (authsize == SHA384_TRUNC_DIGEST_SIZE) 667 ctx->is_trunc_hmac = true; 668 break; 669 670 case OTX_CPT_SHA512: 671 if (authsize != SHA512_DIGEST_SIZE && 672 authsize != SHA512_TRUNC_DIGEST_SIZE) 673 return -EINVAL; 674 675 if (authsize == SHA512_TRUNC_DIGEST_SIZE) 676 ctx->is_trunc_hmac = true; 677 break; 678 679 case OTX_CPT_MAC_NULL: 680 if (ctx->cipher_type == OTX_CPT_AES_GCM) { 681 if (authsize != AES_GCM_ICV_SIZE) 682 return -EINVAL; 683 } else 684 return -EINVAL; 685 break; 686 687 default: 688 return -EINVAL; 689 } 690 691 tfm->authsize = authsize; 692 return 0; 693 } 694 695 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg) 696 { 697 struct otx_cpt_sdesc *sdesc; 698 int size; 699 700 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); 701 sdesc = kmalloc(size, GFP_KERNEL); 702 if (!sdesc) 703 return NULL; 704 705 sdesc->shash.tfm = alg; 706 707 return sdesc; 708 } 709 710 static inline void swap_data32(void *buf, u32 len) 711 { 712 cpu_to_be32_array(buf, buf, len / 4); 713 } 714 715 static inline void swap_data64(void *buf, u32 len) 716 { 717 __be64 *dst = buf; 718 u64 *src = buf; 719 int i = 0; 720 721 for (i = 0 ; i < len / 8; i++, src++, dst++) 722 *dst = cpu_to_be64p(src); 723 } 724 725 static int swap_pad(u8 mac_type, u8 *pad) 726 { 727 struct sha512_state *sha512; 728 struct sha256_state *sha256; 729 struct sha1_state *sha1; 730 731 switch (mac_type) { 732 case OTX_CPT_SHA1: 733 sha1 = (struct sha1_state *)pad; 734 swap_data32(sha1->state, SHA1_DIGEST_SIZE); 735 break; 736 737 case OTX_CPT_SHA256: 738 sha256 = (struct sha256_state *)pad; 739 swap_data32(sha256->state, SHA256_DIGEST_SIZE); 740 break; 741 742 case OTX_CPT_SHA384: 743 case OTX_CPT_SHA512: 744 sha512 = (struct sha512_state *)pad; 745 swap_data64(sha512->state, SHA512_DIGEST_SIZE); 746 break; 747 748 default: 749 return -EINVAL; 750 } 751 752 return 0; 753 } 754 755 static int aead_hmac_init(struct crypto_aead *cipher, 756 struct crypto_authenc_keys *keys) 757 { 758 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); 759 int ds = crypto_shash_digestsize(ctx->hashalg); 760 int bs = crypto_shash_blocksize(ctx->hashalg); 761 int authkeylen = keys->authkeylen; 762 u8 *ipad = NULL, *opad = NULL; 763 int icount = 0; 764 int ret; 765 766 if (authkeylen > bs) { 767 ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey, 768 authkeylen, ctx->key); 769 if (ret) 770 return ret; 771 authkeylen = ds; 772 } else 773 memcpy(ctx->key, keys->authkey, authkeylen); 774 775 ctx->enc_key_len = keys->enckeylen; 776 ctx->auth_key_len = authkeylen; 777 778 if (ctx->cipher_type == OTX_CPT_CIPHER_NULL) 779 return keys->enckeylen ? -EINVAL : 0; 780 781 switch (keys->enckeylen) { 782 case AES_KEYSIZE_128: 783 ctx->key_type = OTX_CPT_AES_128_BIT; 784 break; 785 case AES_KEYSIZE_192: 786 ctx->key_type = OTX_CPT_AES_192_BIT; 787 break; 788 case AES_KEYSIZE_256: 789 ctx->key_type = OTX_CPT_AES_256_BIT; 790 break; 791 default: 792 /* Invalid key length */ 793 return -EINVAL; 794 } 795 796 memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen); 797 798 ipad = ctx->ipad; 799 opad = ctx->opad; 800 801 memcpy(ipad, ctx->key, authkeylen); 802 memset(ipad + authkeylen, 0, bs - authkeylen); 803 memcpy(opad, ipad, bs); 804 805 for (icount = 0; icount < bs; icount++) { 806 ipad[icount] ^= 0x36; 807 opad[icount] ^= 0x5c; 808 } 809 810 /* 811 * Partial Hash calculated from the software 812 * algorithm is retrieved for IPAD & OPAD 813 */ 814 815 /* IPAD Calculation */ 816 crypto_shash_init(&ctx->sdesc->shash); 817 crypto_shash_update(&ctx->sdesc->shash, ipad, bs); 818 crypto_shash_export(&ctx->sdesc->shash, ipad); 819 ret = swap_pad(ctx->mac_type, ipad); 820 if (ret) 821 goto calc_fail; 822 823 /* OPAD Calculation */ 824 crypto_shash_init(&ctx->sdesc->shash); 825 crypto_shash_update(&ctx->sdesc->shash, opad, bs); 826 crypto_shash_export(&ctx->sdesc->shash, opad); 827 ret = swap_pad(ctx->mac_type, opad); 828 829 calc_fail: 830 return ret; 831 } 832 833 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, 834 const unsigned char *key, 835 unsigned int keylen) 836 { 837 struct crypto_authenc_keys authenc_keys; 838 int status; 839 840 status = crypto_authenc_extractkeys(&authenc_keys, key, keylen); 841 if (status) 842 goto badkey; 843 844 status = aead_hmac_init(cipher, &authenc_keys); 845 846 badkey: 847 return status; 848 } 849 850 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, 851 const unsigned char *key, 852 unsigned int keylen) 853 { 854 return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen); 855 } 856 857 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, 858 const unsigned char *key, 859 unsigned int keylen) 860 { 861 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); 862 863 /* 864 * For aes gcm we expect to get encryption key (16, 24, 32 bytes) 865 * and salt (4 bytes) 866 */ 867 switch (keylen) { 868 case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE: 869 ctx->key_type = OTX_CPT_AES_128_BIT; 870 ctx->enc_key_len = AES_KEYSIZE_128; 871 break; 872 case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE: 873 ctx->key_type = OTX_CPT_AES_192_BIT; 874 ctx->enc_key_len = AES_KEYSIZE_192; 875 break; 876 case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE: 877 ctx->key_type = OTX_CPT_AES_256_BIT; 878 ctx->enc_key_len = AES_KEYSIZE_256; 879 break; 880 default: 881 /* Invalid key and salt length */ 882 return -EINVAL; 883 } 884 885 /* Store encryption key and salt */ 886 memcpy(ctx->key, key, keylen); 887 888 return 0; 889 } 890 891 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc, 892 u32 *argcnt) 893 { 894 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); 895 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 896 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); 897 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 898 struct otx_cpt_fc_ctx *fctx = &rctx->fctx; 899 int mac_len = crypto_aead_authsize(tfm); 900 int ds; 901 902 rctx->ctrl_word.e.enc_data_offset = req->assoclen; 903 904 switch (ctx->cipher_type) { 905 case OTX_CPT_AES_CBC: 906 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR; 907 /* Copy encryption key to context */ 908 memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len, 909 ctx->enc_key_len); 910 /* Copy IV to context */ 911 memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm)); 912 913 ds = crypto_shash_digestsize(ctx->hashalg); 914 if (ctx->mac_type == OTX_CPT_SHA384) 915 ds = SHA512_DIGEST_SIZE; 916 if (ctx->ipad) 917 memcpy(fctx->hmac.e.ipad, ctx->ipad, ds); 918 if (ctx->opad) 919 memcpy(fctx->hmac.e.opad, ctx->opad, ds); 920 break; 921 922 case OTX_CPT_AES_GCM: 923 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR; 924 /* Copy encryption key to context */ 925 memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len); 926 /* Copy salt to context */ 927 memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len, 928 AES_GCM_SALT_SIZE); 929 930 rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET; 931 break; 932 933 default: 934 /* Unknown cipher type */ 935 return -EINVAL; 936 } 937 rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags); 938 939 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 940 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 941 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC | 942 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 943 if (enc) { 944 req_info->req.opcode.s.minor = 2; 945 req_info->req.param1 = req->cryptlen; 946 req_info->req.param2 = req->cryptlen + req->assoclen; 947 } else { 948 req_info->req.opcode.s.minor = 3; 949 req_info->req.param1 = req->cryptlen - mac_len; 950 req_info->req.param2 = req->cryptlen + req->assoclen - mac_len; 951 } 952 953 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; 954 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; 955 fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type; 956 fctx->enc.enc_ctrl.e.mac_len = mac_len; 957 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags); 958 959 /* 960 * Storing Packet Data Information in offset 961 * Control Word First 8 bytes 962 */ 963 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word; 964 req_info->in[*argcnt].size = CONTROL_WORD_LEN; 965 req_info->req.dlen += CONTROL_WORD_LEN; 966 ++(*argcnt); 967 968 req_info->in[*argcnt].vptr = (u8 *)fctx; 969 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx); 970 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx); 971 ++(*argcnt); 972 973 return 0; 974 } 975 976 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt, 977 u32 enc) 978 { 979 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); 980 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 981 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(tfm); 982 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 983 984 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER; 985 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ; 986 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC | 987 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER); 988 req_info->is_trunc_hmac = ctx->is_trunc_hmac; 989 990 req_info->req.opcode.s.minor = 0; 991 req_info->req.param1 = ctx->auth_key_len; 992 req_info->req.param2 = ctx->mac_type << 8; 993 994 /* Add authentication key */ 995 req_info->in[*argcnt].vptr = ctx->key; 996 req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8); 997 req_info->req.dlen += round_up(ctx->auth_key_len, 8); 998 ++(*argcnt); 999 1000 return 0; 1001 } 1002 1003 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc) 1004 { 1005 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); 1006 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1007 u32 inputlen = req->cryptlen + req->assoclen; 1008 u32 status, argcnt = 0; 1009 1010 status = create_aead_ctx_hdr(req, enc, &argcnt); 1011 if (status) 1012 return status; 1013 update_input_data(req_info, req->src, inputlen, &argcnt); 1014 req_info->incnt = argcnt; 1015 1016 return 0; 1017 } 1018 1019 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc, 1020 u32 mac_len) 1021 { 1022 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); 1023 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1024 u32 argcnt = 0, outputlen = 0; 1025 1026 if (enc) 1027 outputlen = req->cryptlen + req->assoclen + mac_len; 1028 else 1029 outputlen = req->cryptlen + req->assoclen - mac_len; 1030 1031 update_output_data(req_info, req->dst, 0, outputlen, &argcnt); 1032 req_info->outcnt = argcnt; 1033 1034 return 0; 1035 } 1036 1037 static inline u32 create_aead_null_input_list(struct aead_request *req, 1038 u32 enc, u32 mac_len) 1039 { 1040 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); 1041 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1042 u32 inputlen, argcnt = 0; 1043 1044 if (enc) 1045 inputlen = req->cryptlen + req->assoclen; 1046 else 1047 inputlen = req->cryptlen + req->assoclen - mac_len; 1048 1049 create_hmac_ctx_hdr(req, &argcnt, enc); 1050 update_input_data(req_info, req->src, inputlen, &argcnt); 1051 req_info->incnt = argcnt; 1052 1053 return 0; 1054 } 1055 1056 static inline u32 create_aead_null_output_list(struct aead_request *req, 1057 u32 enc, u32 mac_len) 1058 { 1059 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); 1060 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1061 struct scatterlist *dst; 1062 u8 *ptr = NULL; 1063 int argcnt = 0, status, offset; 1064 u32 inputlen; 1065 1066 if (enc) 1067 inputlen = req->cryptlen + req->assoclen; 1068 else 1069 inputlen = req->cryptlen + req->assoclen - mac_len; 1070 1071 /* 1072 * If source and destination are different 1073 * then copy payload to destination 1074 */ 1075 if (req->src != req->dst) { 1076 1077 ptr = kmalloc(inputlen, (req_info->areq->flags & 1078 CRYPTO_TFM_REQ_MAY_SLEEP) ? 1079 GFP_KERNEL : GFP_ATOMIC); 1080 if (!ptr) { 1081 status = -ENOMEM; 1082 goto error; 1083 } 1084 1085 status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr, 1086 inputlen); 1087 if (status != inputlen) { 1088 status = -EINVAL; 1089 goto error_free; 1090 } 1091 status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr, 1092 inputlen); 1093 if (status != inputlen) { 1094 status = -EINVAL; 1095 goto error_free; 1096 } 1097 kfree(ptr); 1098 } 1099 1100 if (enc) { 1101 /* 1102 * In an encryption scenario hmac needs 1103 * to be appended after payload 1104 */ 1105 dst = req->dst; 1106 offset = inputlen; 1107 while (offset >= dst->length) { 1108 offset -= dst->length; 1109 dst = sg_next(dst); 1110 if (!dst) { 1111 status = -ENOENT; 1112 goto error; 1113 } 1114 } 1115 1116 update_output_data(req_info, dst, offset, mac_len, &argcnt); 1117 } else { 1118 /* 1119 * In a decryption scenario calculated hmac for received 1120 * payload needs to be compare with hmac received 1121 */ 1122 status = sg_copy_buffer(req->src, sg_nents(req->src), 1123 rctx->fctx.hmac.s.hmac_recv, mac_len, 1124 inputlen, true); 1125 if (status != mac_len) { 1126 status = -EINVAL; 1127 goto error; 1128 } 1129 1130 req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc; 1131 req_info->out[argcnt].size = mac_len; 1132 argcnt++; 1133 } 1134 1135 req_info->outcnt = argcnt; 1136 return 0; 1137 1138 error_free: 1139 kfree(ptr); 1140 error: 1141 return status; 1142 } 1143 1144 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc) 1145 { 1146 struct otx_cpt_req_ctx *rctx = aead_request_ctx_dma(req); 1147 struct otx_cpt_req_info *req_info = &rctx->cpt_req; 1148 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1149 struct pci_dev *pdev; 1150 u32 status, cpu_num; 1151 1152 /* Clear control words */ 1153 rctx->ctrl_word.flags = 0; 1154 rctx->fctx.enc.enc_ctrl.flags = 0; 1155 1156 req_info->callback = otx_cpt_aead_callback; 1157 req_info->areq = &req->base; 1158 req_info->req_type = reg_type; 1159 req_info->is_enc = enc; 1160 req_info->is_trunc_hmac = false; 1161 1162 switch (reg_type) { 1163 case OTX_CPT_AEAD_ENC_DEC_REQ: 1164 status = create_aead_input_list(req, enc); 1165 if (status) 1166 return status; 1167 status = create_aead_output_list(req, enc, 1168 crypto_aead_authsize(tfm)); 1169 if (status) 1170 return status; 1171 break; 1172 1173 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ: 1174 status = create_aead_null_input_list(req, enc, 1175 crypto_aead_authsize(tfm)); 1176 if (status) 1177 return status; 1178 status = create_aead_null_output_list(req, enc, 1179 crypto_aead_authsize(tfm)); 1180 if (status) 1181 return status; 1182 break; 1183 1184 default: 1185 return -EINVAL; 1186 } 1187 1188 /* Validate that request doesn't exceed maximum CPT supported size */ 1189 if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE || 1190 req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE) 1191 return -E2BIG; 1192 1193 status = get_se_device(&pdev, &cpu_num); 1194 if (status) 1195 return status; 1196 1197 req_info->ctrl.s.grp = 0; 1198 1199 status = otx_cpt_do_request(pdev, req_info, cpu_num); 1200 /* 1201 * We perform an asynchronous send and once 1202 * the request is completed the driver would 1203 * intimate through registered call back functions 1204 */ 1205 return status; 1206 } 1207 1208 static int otx_cpt_aead_encrypt(struct aead_request *req) 1209 { 1210 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true); 1211 } 1212 1213 static int otx_cpt_aead_decrypt(struct aead_request *req) 1214 { 1215 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false); 1216 } 1217 1218 static int otx_cpt_aead_null_encrypt(struct aead_request *req) 1219 { 1220 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true); 1221 } 1222 1223 static int otx_cpt_aead_null_decrypt(struct aead_request *req) 1224 { 1225 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false); 1226 } 1227 1228 static struct skcipher_alg otx_cpt_skciphers[] = { { 1229 .base.cra_name = "xts(aes)", 1230 .base.cra_driver_name = "cpt_xts_aes", 1231 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1232 .base.cra_blocksize = AES_BLOCK_SIZE, 1233 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1234 .base.cra_alignmask = 7, 1235 .base.cra_priority = 4001, 1236 .base.cra_module = THIS_MODULE, 1237 1238 .init = otx_cpt_enc_dec_init, 1239 .ivsize = AES_BLOCK_SIZE, 1240 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1241 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1242 .setkey = otx_cpt_skcipher_xts_setkey, 1243 .encrypt = otx_cpt_skcipher_encrypt, 1244 .decrypt = otx_cpt_skcipher_decrypt, 1245 }, { 1246 .base.cra_name = "cbc(aes)", 1247 .base.cra_driver_name = "cpt_cbc_aes", 1248 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1249 .base.cra_blocksize = AES_BLOCK_SIZE, 1250 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1251 .base.cra_alignmask = 7, 1252 .base.cra_priority = 4001, 1253 .base.cra_module = THIS_MODULE, 1254 1255 .init = otx_cpt_enc_dec_init, 1256 .ivsize = AES_BLOCK_SIZE, 1257 .min_keysize = AES_MIN_KEY_SIZE, 1258 .max_keysize = AES_MAX_KEY_SIZE, 1259 .setkey = otx_cpt_skcipher_cbc_aes_setkey, 1260 .encrypt = otx_cpt_skcipher_encrypt, 1261 .decrypt = otx_cpt_skcipher_decrypt, 1262 }, { 1263 .base.cra_name = "ecb(aes)", 1264 .base.cra_driver_name = "cpt_ecb_aes", 1265 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1266 .base.cra_blocksize = AES_BLOCK_SIZE, 1267 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1268 .base.cra_alignmask = 7, 1269 .base.cra_priority = 4001, 1270 .base.cra_module = THIS_MODULE, 1271 1272 .init = otx_cpt_enc_dec_init, 1273 .ivsize = 0, 1274 .min_keysize = AES_MIN_KEY_SIZE, 1275 .max_keysize = AES_MAX_KEY_SIZE, 1276 .setkey = otx_cpt_skcipher_ecb_aes_setkey, 1277 .encrypt = otx_cpt_skcipher_encrypt, 1278 .decrypt = otx_cpt_skcipher_decrypt, 1279 }, { 1280 .base.cra_name = "cfb(aes)", 1281 .base.cra_driver_name = "cpt_cfb_aes", 1282 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1283 .base.cra_blocksize = AES_BLOCK_SIZE, 1284 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), 1285 .base.cra_alignmask = 7, 1286 .base.cra_priority = 4001, 1287 .base.cra_module = THIS_MODULE, 1288 1289 .init = otx_cpt_enc_dec_init, 1290 .ivsize = AES_BLOCK_SIZE, 1291 .min_keysize = AES_MIN_KEY_SIZE, 1292 .max_keysize = AES_MAX_KEY_SIZE, 1293 .setkey = otx_cpt_skcipher_cfb_aes_setkey, 1294 .encrypt = otx_cpt_skcipher_encrypt, 1295 .decrypt = otx_cpt_skcipher_decrypt, 1296 }, { 1297 .base.cra_name = "cbc(des3_ede)", 1298 .base.cra_driver_name = "cpt_cbc_des3_ede", 1299 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1300 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1301 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), 1302 .base.cra_alignmask = 7, 1303 .base.cra_priority = 4001, 1304 .base.cra_module = THIS_MODULE, 1305 1306 .init = otx_cpt_enc_dec_init, 1307 .min_keysize = DES3_EDE_KEY_SIZE, 1308 .max_keysize = DES3_EDE_KEY_SIZE, 1309 .ivsize = DES_BLOCK_SIZE, 1310 .setkey = otx_cpt_skcipher_cbc_des3_setkey, 1311 .encrypt = otx_cpt_skcipher_encrypt, 1312 .decrypt = otx_cpt_skcipher_decrypt, 1313 }, { 1314 .base.cra_name = "ecb(des3_ede)", 1315 .base.cra_driver_name = "cpt_ecb_des3_ede", 1316 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1317 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1318 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx), 1319 .base.cra_alignmask = 7, 1320 .base.cra_priority = 4001, 1321 .base.cra_module = THIS_MODULE, 1322 1323 .init = otx_cpt_enc_dec_init, 1324 .min_keysize = DES3_EDE_KEY_SIZE, 1325 .max_keysize = DES3_EDE_KEY_SIZE, 1326 .ivsize = 0, 1327 .setkey = otx_cpt_skcipher_ecb_des3_setkey, 1328 .encrypt = otx_cpt_skcipher_encrypt, 1329 .decrypt = otx_cpt_skcipher_decrypt, 1330 } }; 1331 1332 static struct aead_alg otx_cpt_aeads[] = { { 1333 .base = { 1334 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1335 .cra_driver_name = "cpt_hmac_sha1_cbc_aes", 1336 .cra_blocksize = AES_BLOCK_SIZE, 1337 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1338 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1339 .cra_priority = 4001, 1340 .cra_alignmask = 0, 1341 .cra_module = THIS_MODULE, 1342 }, 1343 .init = otx_cpt_aead_cbc_aes_sha1_init, 1344 .exit = otx_cpt_aead_exit, 1345 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1346 .setauthsize = otx_cpt_aead_set_authsize, 1347 .encrypt = otx_cpt_aead_encrypt, 1348 .decrypt = otx_cpt_aead_decrypt, 1349 .ivsize = AES_BLOCK_SIZE, 1350 .maxauthsize = SHA1_DIGEST_SIZE, 1351 }, { 1352 .base = { 1353 .cra_name = "authenc(hmac(sha256),cbc(aes))", 1354 .cra_driver_name = "cpt_hmac_sha256_cbc_aes", 1355 .cra_blocksize = AES_BLOCK_SIZE, 1356 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1357 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1358 .cra_priority = 4001, 1359 .cra_alignmask = 0, 1360 .cra_module = THIS_MODULE, 1361 }, 1362 .init = otx_cpt_aead_cbc_aes_sha256_init, 1363 .exit = otx_cpt_aead_exit, 1364 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1365 .setauthsize = otx_cpt_aead_set_authsize, 1366 .encrypt = otx_cpt_aead_encrypt, 1367 .decrypt = otx_cpt_aead_decrypt, 1368 .ivsize = AES_BLOCK_SIZE, 1369 .maxauthsize = SHA256_DIGEST_SIZE, 1370 }, { 1371 .base = { 1372 .cra_name = "authenc(hmac(sha384),cbc(aes))", 1373 .cra_driver_name = "cpt_hmac_sha384_cbc_aes", 1374 .cra_blocksize = AES_BLOCK_SIZE, 1375 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1376 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1377 .cra_priority = 4001, 1378 .cra_alignmask = 0, 1379 .cra_module = THIS_MODULE, 1380 }, 1381 .init = otx_cpt_aead_cbc_aes_sha384_init, 1382 .exit = otx_cpt_aead_exit, 1383 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1384 .setauthsize = otx_cpt_aead_set_authsize, 1385 .encrypt = otx_cpt_aead_encrypt, 1386 .decrypt = otx_cpt_aead_decrypt, 1387 .ivsize = AES_BLOCK_SIZE, 1388 .maxauthsize = SHA384_DIGEST_SIZE, 1389 }, { 1390 .base = { 1391 .cra_name = "authenc(hmac(sha512),cbc(aes))", 1392 .cra_driver_name = "cpt_hmac_sha512_cbc_aes", 1393 .cra_blocksize = AES_BLOCK_SIZE, 1394 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1395 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1396 .cra_priority = 4001, 1397 .cra_alignmask = 0, 1398 .cra_module = THIS_MODULE, 1399 }, 1400 .init = otx_cpt_aead_cbc_aes_sha512_init, 1401 .exit = otx_cpt_aead_exit, 1402 .setkey = otx_cpt_aead_cbc_aes_sha_setkey, 1403 .setauthsize = otx_cpt_aead_set_authsize, 1404 .encrypt = otx_cpt_aead_encrypt, 1405 .decrypt = otx_cpt_aead_decrypt, 1406 .ivsize = AES_BLOCK_SIZE, 1407 .maxauthsize = SHA512_DIGEST_SIZE, 1408 }, { 1409 .base = { 1410 .cra_name = "authenc(hmac(sha1),ecb(cipher_null))", 1411 .cra_driver_name = "cpt_hmac_sha1_ecb_null", 1412 .cra_blocksize = 1, 1413 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1414 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1415 .cra_priority = 4001, 1416 .cra_alignmask = 0, 1417 .cra_module = THIS_MODULE, 1418 }, 1419 .init = otx_cpt_aead_ecb_null_sha1_init, 1420 .exit = otx_cpt_aead_exit, 1421 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1422 .setauthsize = otx_cpt_aead_set_authsize, 1423 .encrypt = otx_cpt_aead_null_encrypt, 1424 .decrypt = otx_cpt_aead_null_decrypt, 1425 .ivsize = 0, 1426 .maxauthsize = SHA1_DIGEST_SIZE, 1427 }, { 1428 .base = { 1429 .cra_name = "authenc(hmac(sha256),ecb(cipher_null))", 1430 .cra_driver_name = "cpt_hmac_sha256_ecb_null", 1431 .cra_blocksize = 1, 1432 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1433 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1434 .cra_priority = 4001, 1435 .cra_alignmask = 0, 1436 .cra_module = THIS_MODULE, 1437 }, 1438 .init = otx_cpt_aead_ecb_null_sha256_init, 1439 .exit = otx_cpt_aead_exit, 1440 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1441 .setauthsize = otx_cpt_aead_set_authsize, 1442 .encrypt = otx_cpt_aead_null_encrypt, 1443 .decrypt = otx_cpt_aead_null_decrypt, 1444 .ivsize = 0, 1445 .maxauthsize = SHA256_DIGEST_SIZE, 1446 }, { 1447 .base = { 1448 .cra_name = "authenc(hmac(sha384),ecb(cipher_null))", 1449 .cra_driver_name = "cpt_hmac_sha384_ecb_null", 1450 .cra_blocksize = 1, 1451 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1452 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1453 .cra_priority = 4001, 1454 .cra_alignmask = 0, 1455 .cra_module = THIS_MODULE, 1456 }, 1457 .init = otx_cpt_aead_ecb_null_sha384_init, 1458 .exit = otx_cpt_aead_exit, 1459 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1460 .setauthsize = otx_cpt_aead_set_authsize, 1461 .encrypt = otx_cpt_aead_null_encrypt, 1462 .decrypt = otx_cpt_aead_null_decrypt, 1463 .ivsize = 0, 1464 .maxauthsize = SHA384_DIGEST_SIZE, 1465 }, { 1466 .base = { 1467 .cra_name = "authenc(hmac(sha512),ecb(cipher_null))", 1468 .cra_driver_name = "cpt_hmac_sha512_ecb_null", 1469 .cra_blocksize = 1, 1470 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1471 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1472 .cra_priority = 4001, 1473 .cra_alignmask = 0, 1474 .cra_module = THIS_MODULE, 1475 }, 1476 .init = otx_cpt_aead_ecb_null_sha512_init, 1477 .exit = otx_cpt_aead_exit, 1478 .setkey = otx_cpt_aead_ecb_null_sha_setkey, 1479 .setauthsize = otx_cpt_aead_set_authsize, 1480 .encrypt = otx_cpt_aead_null_encrypt, 1481 .decrypt = otx_cpt_aead_null_decrypt, 1482 .ivsize = 0, 1483 .maxauthsize = SHA512_DIGEST_SIZE, 1484 }, { 1485 .base = { 1486 .cra_name = "rfc4106(gcm(aes))", 1487 .cra_driver_name = "cpt_rfc4106_gcm_aes", 1488 .cra_blocksize = 1, 1489 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, 1490 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx) + CRYPTO_DMA_PADDING, 1491 .cra_priority = 4001, 1492 .cra_alignmask = 0, 1493 .cra_module = THIS_MODULE, 1494 }, 1495 .init = otx_cpt_aead_gcm_aes_init, 1496 .exit = otx_cpt_aead_exit, 1497 .setkey = otx_cpt_aead_gcm_aes_setkey, 1498 .setauthsize = otx_cpt_aead_set_authsize, 1499 .encrypt = otx_cpt_aead_encrypt, 1500 .decrypt = otx_cpt_aead_decrypt, 1501 .ivsize = AES_GCM_IV_SIZE, 1502 .maxauthsize = AES_GCM_ICV_SIZE, 1503 } }; 1504 1505 static inline int is_any_alg_used(void) 1506 { 1507 int i; 1508 1509 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++) 1510 if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1) 1511 return true; 1512 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++) 1513 if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1) 1514 return true; 1515 return false; 1516 } 1517 1518 static inline int cpt_register_algs(void) 1519 { 1520 int i, err = 0; 1521 1522 if (!IS_ENABLED(CONFIG_DM_CRYPT)) { 1523 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++) 1524 otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; 1525 1526 err = crypto_register_skciphers(otx_cpt_skciphers, 1527 ARRAY_SIZE(otx_cpt_skciphers)); 1528 if (err) 1529 return err; 1530 } 1531 1532 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++) 1533 otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD; 1534 1535 err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads)); 1536 if (err) { 1537 crypto_unregister_skciphers(otx_cpt_skciphers, 1538 ARRAY_SIZE(otx_cpt_skciphers)); 1539 return err; 1540 } 1541 1542 return 0; 1543 } 1544 1545 static inline void cpt_unregister_algs(void) 1546 { 1547 crypto_unregister_skciphers(otx_cpt_skciphers, 1548 ARRAY_SIZE(otx_cpt_skciphers)); 1549 crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads)); 1550 } 1551 1552 static int compare_func(const void *lptr, const void *rptr) 1553 { 1554 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; 1555 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; 1556 1557 if (ldesc->dev->devfn < rdesc->dev->devfn) 1558 return -1; 1559 if (ldesc->dev->devfn > rdesc->dev->devfn) 1560 return 1; 1561 return 0; 1562 } 1563 1564 static void swap_func(void *lptr, void *rptr, int size) 1565 { 1566 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr; 1567 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr; 1568 1569 swap(*ldesc, *rdesc); 1570 } 1571 1572 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, 1573 enum otx_cptpf_type pf_type, 1574 enum otx_cptvf_type engine_type, 1575 int num_queues, int num_devices) 1576 { 1577 int ret = 0; 1578 int count; 1579 1580 mutex_lock(&mutex); 1581 switch (engine_type) { 1582 case OTX_CPT_SE_TYPES: 1583 count = atomic_read(&se_devices.count); 1584 if (count >= CPT_MAX_VF_NUM) { 1585 dev_err(&pdev->dev, "No space to add a new device\n"); 1586 ret = -ENOSPC; 1587 goto err; 1588 } 1589 se_devices.desc[count].pf_type = pf_type; 1590 se_devices.desc[count].num_queues = num_queues; 1591 se_devices.desc[count++].dev = pdev; 1592 atomic_inc(&se_devices.count); 1593 1594 if (atomic_read(&se_devices.count) == num_devices && 1595 is_crypto_registered == false) { 1596 if (cpt_register_algs()) { 1597 dev_err(&pdev->dev, 1598 "Error in registering crypto algorithms\n"); 1599 ret = -EINVAL; 1600 goto err; 1601 } 1602 try_module_get(mod); 1603 is_crypto_registered = true; 1604 } 1605 sort(se_devices.desc, count, sizeof(struct cpt_device_desc), 1606 compare_func, swap_func); 1607 break; 1608 1609 case OTX_CPT_AE_TYPES: 1610 count = atomic_read(&ae_devices.count); 1611 if (count >= CPT_MAX_VF_NUM) { 1612 dev_err(&pdev->dev, "No space to a add new device\n"); 1613 ret = -ENOSPC; 1614 goto err; 1615 } 1616 ae_devices.desc[count].pf_type = pf_type; 1617 ae_devices.desc[count].num_queues = num_queues; 1618 ae_devices.desc[count++].dev = pdev; 1619 atomic_inc(&ae_devices.count); 1620 sort(ae_devices.desc, count, sizeof(struct cpt_device_desc), 1621 compare_func, swap_func); 1622 break; 1623 1624 default: 1625 dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type); 1626 ret = BAD_OTX_CPTVF_TYPE; 1627 } 1628 err: 1629 mutex_unlock(&mutex); 1630 return ret; 1631 } 1632 1633 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod, 1634 enum otx_cptvf_type engine_type) 1635 { 1636 struct cpt_device_table *dev_tbl; 1637 bool dev_found = false; 1638 int i, j, count; 1639 1640 mutex_lock(&mutex); 1641 1642 dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices; 1643 count = atomic_read(&dev_tbl->count); 1644 for (i = 0; i < count; i++) 1645 if (pdev == dev_tbl->desc[i].dev) { 1646 for (j = i; j < count-1; j++) 1647 dev_tbl->desc[j] = dev_tbl->desc[j+1]; 1648 dev_found = true; 1649 break; 1650 } 1651 1652 if (!dev_found) { 1653 dev_err(&pdev->dev, "%s device not found\n", __func__); 1654 goto exit; 1655 } 1656 1657 if (engine_type != OTX_CPT_AE_TYPES) { 1658 if (atomic_dec_and_test(&se_devices.count) && 1659 !is_any_alg_used()) { 1660 cpt_unregister_algs(); 1661 module_put(mod); 1662 is_crypto_registered = false; 1663 } 1664 } else 1665 atomic_dec(&ae_devices.count); 1666 exit: 1667 mutex_unlock(&mutex); 1668 } 1669