1 /* 2 * This file is part of the Chelsio T6 Crypto driver for Linux. 3 * 4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written and Maintained by: 35 * Manoj Malviya (manojmalviya@chelsio.com) 36 * Atul Gupta (atul.gupta@chelsio.com) 37 * Jitendra Lulla (jlulla@chelsio.com) 38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 39 * Harsh Jain (harsh@chelsio.com) 40 */ 41 42 #define pr_fmt(fmt) "chcr:" fmt 43 44 #include <linux/kernel.h> 45 #include <linux/module.h> 46 #include <linux/crypto.h> 47 #include <linux/skbuff.h> 48 #include <linux/rtnetlink.h> 49 #include <linux/highmem.h> 50 #include <linux/scatterlist.h> 51 52 #include <crypto/aes.h> 53 #include <crypto/algapi.h> 54 #include <crypto/hash.h> 55 #include <crypto/gcm.h> 56 #include <crypto/sha.h> 57 #include <crypto/authenc.h> 58 #include <crypto/ctr.h> 59 #include <crypto/gf128mul.h> 60 #include <crypto/internal/aead.h> 61 #include <crypto/null.h> 62 #include <crypto/internal/skcipher.h> 63 #include <crypto/aead.h> 64 #include <crypto/scatterwalk.h> 65 #include <crypto/internal/hash.h> 66 67 #include "t4fw_api.h" 68 #include "t4_msg.h" 69 #include "chcr_core.h" 70 #include "chcr_algo.h" 71 #include "chcr_crypto.h" 72 73 #define IV AES_BLOCK_SIZE 74 75 static unsigned int sgl_ent_len[] = { 76 0, 0, 16, 24, 40, 48, 64, 72, 88, 77 96, 112, 120, 136, 144, 160, 168, 184, 78 192, 208, 216, 232, 240, 256, 264, 280, 79 288, 304, 312, 328, 336, 352, 360, 376 80 }; 81 82 static unsigned int dsgl_ent_len[] = { 83 0, 32, 32, 48, 48, 64, 64, 80, 80, 84 112, 112, 128, 128, 144, 144, 160, 160, 85 192, 192, 208, 208, 224, 224, 240, 240, 86 272, 272, 288, 288, 304, 304, 320, 320 87 }; 88 89 static u32 round_constant[11] = { 90 0x01000000, 0x02000000, 0x04000000, 0x08000000, 91 0x10000000, 0x20000000, 0x40000000, 0x80000000, 92 0x1B000000, 0x36000000, 0x6C000000 93 }; 94 95 static int chcr_handle_cipher_resp(struct skcipher_request *req, 96 unsigned char *input, int err); 97 98 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) 99 { 100 return ctx->crypto_ctx->aeadctx; 101 } 102 103 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) 104 { 105 return ctx->crypto_ctx->ablkctx; 106 } 107 108 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) 109 { 110 return ctx->crypto_ctx->hmacctx; 111 } 112 113 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) 114 { 115 return gctx->ctx->gcm; 116 } 117 118 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) 119 { 120 return gctx->ctx->authenc; 121 } 122 123 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) 124 { 125 return container_of(ctx->dev, struct uld_ctx, dev); 126 } 127 128 static inline int is_ofld_imm(const struct sk_buff *skb) 129 { 130 return (skb->len <= SGE_MAX_WR_LEN); 131 } 132 133 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) 134 { 135 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); 136 } 137 138 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, 139 unsigned int entlen, 140 unsigned int skip) 141 { 142 int nents = 0; 143 unsigned int less; 144 unsigned int skip_len = 0; 145 146 while (sg && skip) { 147 if (sg_dma_len(sg) <= skip) { 148 skip -= sg_dma_len(sg); 149 skip_len = 0; 150 sg = sg_next(sg); 151 } else { 152 skip_len = skip; 153 skip = 0; 154 } 155 } 156 157 while (sg && reqlen) { 158 less = min(reqlen, sg_dma_len(sg) - skip_len); 159 nents += DIV_ROUND_UP(less, entlen); 160 reqlen -= less; 161 skip_len = 0; 162 sg = sg_next(sg); 163 } 164 return nents; 165 } 166 167 static inline int get_aead_subtype(struct crypto_aead *aead) 168 { 169 struct aead_alg *alg = crypto_aead_alg(aead); 170 struct chcr_alg_template *chcr_crypto_alg = 171 container_of(alg, struct chcr_alg_template, alg.aead); 172 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; 173 } 174 175 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) 176 { 177 u8 temp[SHA512_DIGEST_SIZE]; 178 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 179 int authsize = crypto_aead_authsize(tfm); 180 struct cpl_fw6_pld *fw6_pld; 181 int cmp = 0; 182 183 fw6_pld = (struct cpl_fw6_pld *)input; 184 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || 185 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { 186 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize); 187 } else { 188 189 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, 190 authsize, req->assoclen + 191 req->cryptlen - authsize); 192 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize); 193 } 194 if (cmp) 195 *err = -EBADMSG; 196 else 197 *err = 0; 198 } 199 200 static int chcr_inc_wrcount(struct chcr_dev *dev) 201 { 202 if (dev->state == CHCR_DETACH) 203 return 1; 204 atomic_inc(&dev->inflight); 205 return 0; 206 } 207 208 static inline void chcr_dec_wrcount(struct chcr_dev *dev) 209 { 210 atomic_dec(&dev->inflight); 211 } 212 213 static inline int chcr_handle_aead_resp(struct aead_request *req, 214 unsigned char *input, 215 int err) 216 { 217 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 218 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 219 struct chcr_dev *dev = a_ctx(tfm)->dev; 220 221 chcr_aead_common_exit(req); 222 if (reqctx->verify == VERIFY_SW) { 223 chcr_verify_tag(req, input, &err); 224 reqctx->verify = VERIFY_HW; 225 } 226 chcr_dec_wrcount(dev); 227 req->base.complete(&req->base, err); 228 229 return err; 230 } 231 232 static void get_aes_decrypt_key(unsigned char *dec_key, 233 const unsigned char *key, 234 unsigned int keylength) 235 { 236 u32 temp; 237 u32 w_ring[MAX_NK]; 238 int i, j, k; 239 u8 nr, nk; 240 241 switch (keylength) { 242 case AES_KEYLENGTH_128BIT: 243 nk = KEYLENGTH_4BYTES; 244 nr = NUMBER_OF_ROUNDS_10; 245 break; 246 case AES_KEYLENGTH_192BIT: 247 nk = KEYLENGTH_6BYTES; 248 nr = NUMBER_OF_ROUNDS_12; 249 break; 250 case AES_KEYLENGTH_256BIT: 251 nk = KEYLENGTH_8BYTES; 252 nr = NUMBER_OF_ROUNDS_14; 253 break; 254 default: 255 return; 256 } 257 for (i = 0; i < nk; i++) 258 w_ring[i] = get_unaligned_be32(&key[i * 4]); 259 260 i = 0; 261 temp = w_ring[nk - 1]; 262 while (i + nk < (nr + 1) * 4) { 263 if (!(i % nk)) { 264 /* RotWord(temp) */ 265 temp = (temp << 8) | (temp >> 24); 266 temp = aes_ks_subword(temp); 267 temp ^= round_constant[i / nk]; 268 } else if (nk == 8 && (i % 4 == 0)) { 269 temp = aes_ks_subword(temp); 270 } 271 w_ring[i % nk] ^= temp; 272 temp = w_ring[i % nk]; 273 i++; 274 } 275 i--; 276 for (k = 0, j = i % nk; k < nk; k++) { 277 put_unaligned_be32(w_ring[j], &dec_key[k * 4]); 278 j--; 279 if (j < 0) 280 j += nk; 281 } 282 } 283 284 static struct crypto_shash *chcr_alloc_shash(unsigned int ds) 285 { 286 struct crypto_shash *base_hash = ERR_PTR(-EINVAL); 287 288 switch (ds) { 289 case SHA1_DIGEST_SIZE: 290 base_hash = crypto_alloc_shash("sha1", 0, 0); 291 break; 292 case SHA224_DIGEST_SIZE: 293 base_hash = crypto_alloc_shash("sha224", 0, 0); 294 break; 295 case SHA256_DIGEST_SIZE: 296 base_hash = crypto_alloc_shash("sha256", 0, 0); 297 break; 298 case SHA384_DIGEST_SIZE: 299 base_hash = crypto_alloc_shash("sha384", 0, 0); 300 break; 301 case SHA512_DIGEST_SIZE: 302 base_hash = crypto_alloc_shash("sha512", 0, 0); 303 break; 304 } 305 306 return base_hash; 307 } 308 309 static int chcr_compute_partial_hash(struct shash_desc *desc, 310 char *iopad, char *result_hash, 311 int digest_size) 312 { 313 struct sha1_state sha1_st; 314 struct sha256_state sha256_st; 315 struct sha512_state sha512_st; 316 int error; 317 318 if (digest_size == SHA1_DIGEST_SIZE) { 319 error = crypto_shash_init(desc) ?: 320 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: 321 crypto_shash_export(desc, (void *)&sha1_st); 322 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); 323 } else if (digest_size == SHA224_DIGEST_SIZE) { 324 error = crypto_shash_init(desc) ?: 325 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: 326 crypto_shash_export(desc, (void *)&sha256_st); 327 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); 328 329 } else if (digest_size == SHA256_DIGEST_SIZE) { 330 error = crypto_shash_init(desc) ?: 331 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: 332 crypto_shash_export(desc, (void *)&sha256_st); 333 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); 334 335 } else if (digest_size == SHA384_DIGEST_SIZE) { 336 error = crypto_shash_init(desc) ?: 337 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: 338 crypto_shash_export(desc, (void *)&sha512_st); 339 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); 340 341 } else if (digest_size == SHA512_DIGEST_SIZE) { 342 error = crypto_shash_init(desc) ?: 343 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: 344 crypto_shash_export(desc, (void *)&sha512_st); 345 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); 346 } else { 347 error = -EINVAL; 348 pr_err("Unknown digest size %d\n", digest_size); 349 } 350 return error; 351 } 352 353 static void chcr_change_order(char *buf, int ds) 354 { 355 int i; 356 357 if (ds == SHA512_DIGEST_SIZE) { 358 for (i = 0; i < (ds / sizeof(u64)); i++) 359 *((__be64 *)buf + i) = 360 cpu_to_be64(*((u64 *)buf + i)); 361 } else { 362 for (i = 0; i < (ds / sizeof(u32)); i++) 363 *((__be32 *)buf + i) = 364 cpu_to_be32(*((u32 *)buf + i)); 365 } 366 } 367 368 static inline int is_hmac(struct crypto_tfm *tfm) 369 { 370 struct crypto_alg *alg = tfm->__crt_alg; 371 struct chcr_alg_template *chcr_crypto_alg = 372 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, 373 alg.hash); 374 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC) 375 return 1; 376 return 0; 377 } 378 379 static inline void dsgl_walk_init(struct dsgl_walk *walk, 380 struct cpl_rx_phys_dsgl *dsgl) 381 { 382 walk->dsgl = dsgl; 383 walk->nents = 0; 384 walk->to = (struct phys_sge_pairs *)(dsgl + 1); 385 } 386 387 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid, 388 int pci_chan_id) 389 { 390 struct cpl_rx_phys_dsgl *phys_cpl; 391 392 phys_cpl = walk->dsgl; 393 394 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) 395 | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); 396 phys_cpl->pcirlxorder_to_noofsgentr = 397 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | 398 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | 399 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | 400 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | 401 CPL_RX_PHYS_DSGL_DCAID_V(0) | 402 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents)); 403 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 404 phys_cpl->rss_hdr_int.qid = htons(qid); 405 phys_cpl->rss_hdr_int.hash_val = 0; 406 phys_cpl->rss_hdr_int.channel = pci_chan_id; 407 } 408 409 static inline void dsgl_walk_add_page(struct dsgl_walk *walk, 410 size_t size, 411 dma_addr_t addr) 412 { 413 int j; 414 415 if (!size) 416 return; 417 j = walk->nents; 418 walk->to->len[j % 8] = htons(size); 419 walk->to->addr[j % 8] = cpu_to_be64(addr); 420 j++; 421 if ((j % 8) == 0) 422 walk->to++; 423 walk->nents = j; 424 } 425 426 static void dsgl_walk_add_sg(struct dsgl_walk *walk, 427 struct scatterlist *sg, 428 unsigned int slen, 429 unsigned int skip) 430 { 431 int skip_len = 0; 432 unsigned int left_size = slen, len = 0; 433 unsigned int j = walk->nents; 434 int offset, ent_len; 435 436 if (!slen) 437 return; 438 while (sg && skip) { 439 if (sg_dma_len(sg) <= skip) { 440 skip -= sg_dma_len(sg); 441 skip_len = 0; 442 sg = sg_next(sg); 443 } else { 444 skip_len = skip; 445 skip = 0; 446 } 447 } 448 449 while (left_size && sg) { 450 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len); 451 offset = 0; 452 while (len) { 453 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE); 454 walk->to->len[j % 8] = htons(ent_len); 455 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + 456 offset + skip_len); 457 offset += ent_len; 458 len -= ent_len; 459 j++; 460 if ((j % 8) == 0) 461 walk->to++; 462 } 463 walk->last_sg = sg; 464 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) - 465 skip_len) + skip_len; 466 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len); 467 skip_len = 0; 468 sg = sg_next(sg); 469 } 470 walk->nents = j; 471 } 472 473 static inline void ulptx_walk_init(struct ulptx_walk *walk, 474 struct ulptx_sgl *ulp) 475 { 476 walk->sgl = ulp; 477 walk->nents = 0; 478 walk->pair_idx = 0; 479 walk->pair = ulp->sge; 480 walk->last_sg = NULL; 481 walk->last_sg_len = 0; 482 } 483 484 static inline void ulptx_walk_end(struct ulptx_walk *walk) 485 { 486 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 487 ULPTX_NSGE_V(walk->nents)); 488 } 489 490 491 static inline void ulptx_walk_add_page(struct ulptx_walk *walk, 492 size_t size, 493 dma_addr_t addr) 494 { 495 if (!size) 496 return; 497 498 if (walk->nents == 0) { 499 walk->sgl->len0 = cpu_to_be32(size); 500 walk->sgl->addr0 = cpu_to_be64(addr); 501 } else { 502 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr); 503 walk->pair->len[walk->pair_idx] = cpu_to_be32(size); 504 walk->pair_idx = !walk->pair_idx; 505 if (!walk->pair_idx) 506 walk->pair++; 507 } 508 walk->nents++; 509 } 510 511 static void ulptx_walk_add_sg(struct ulptx_walk *walk, 512 struct scatterlist *sg, 513 unsigned int len, 514 unsigned int skip) 515 { 516 int small; 517 int skip_len = 0; 518 unsigned int sgmin; 519 520 if (!len) 521 return; 522 while (sg && skip) { 523 if (sg_dma_len(sg) <= skip) { 524 skip -= sg_dma_len(sg); 525 skip_len = 0; 526 sg = sg_next(sg); 527 } else { 528 skip_len = skip; 529 skip = 0; 530 } 531 } 532 WARN(!sg, "SG should not be null here\n"); 533 if (sg && (walk->nents == 0)) { 534 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); 535 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); 536 walk->sgl->len0 = cpu_to_be32(sgmin); 537 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len); 538 walk->nents++; 539 len -= sgmin; 540 walk->last_sg = sg; 541 walk->last_sg_len = sgmin + skip_len; 542 skip_len += sgmin; 543 if (sg_dma_len(sg) == skip_len) { 544 sg = sg_next(sg); 545 skip_len = 0; 546 } 547 } 548 549 while (sg && len) { 550 small = min(sg_dma_len(sg) - skip_len, len); 551 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); 552 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin); 553 walk->pair->addr[walk->pair_idx] = 554 cpu_to_be64(sg_dma_address(sg) + skip_len); 555 walk->pair_idx = !walk->pair_idx; 556 walk->nents++; 557 if (!walk->pair_idx) 558 walk->pair++; 559 len -= sgmin; 560 skip_len += sgmin; 561 walk->last_sg = sg; 562 walk->last_sg_len = skip_len; 563 if (sg_dma_len(sg) == skip_len) { 564 sg = sg_next(sg); 565 skip_len = 0; 566 } 567 } 568 } 569 570 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm) 571 { 572 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 573 struct chcr_alg_template *chcr_crypto_alg = 574 container_of(alg, struct chcr_alg_template, alg.skcipher); 575 576 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; 577 } 578 579 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) 580 { 581 struct adapter *adap = netdev2adap(dev); 582 struct sge_uld_txq_info *txq_info = 583 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 584 struct sge_uld_txq *txq; 585 int ret = 0; 586 587 local_bh_disable(); 588 txq = &txq_info->uldtxq[idx]; 589 spin_lock(&txq->sendq.lock); 590 if (txq->full) 591 ret = -1; 592 spin_unlock(&txq->sendq.lock); 593 local_bh_enable(); 594 return ret; 595 } 596 597 static int generate_copy_rrkey(struct ablk_ctx *ablkctx, 598 struct _key_ctx *key_ctx) 599 { 600 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { 601 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len); 602 } else { 603 memcpy(key_ctx->key, 604 ablkctx->key + (ablkctx->enckey_len >> 1), 605 ablkctx->enckey_len >> 1); 606 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1), 607 ablkctx->rrkey, ablkctx->enckey_len >> 1); 608 } 609 return 0; 610 } 611 612 static int chcr_hash_ent_in_wr(struct scatterlist *src, 613 unsigned int minsg, 614 unsigned int space, 615 unsigned int srcskip) 616 { 617 int srclen = 0; 618 int srcsg = minsg; 619 int soffset = 0, sless; 620 621 if (sg_dma_len(src) == srcskip) { 622 src = sg_next(src); 623 srcskip = 0; 624 } 625 while (src && space > (sgl_ent_len[srcsg + 1])) { 626 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip, 627 CHCR_SRC_SG_SIZE); 628 srclen += sless; 629 soffset += sless; 630 srcsg++; 631 if (sg_dma_len(src) == (soffset + srcskip)) { 632 src = sg_next(src); 633 soffset = 0; 634 srcskip = 0; 635 } 636 } 637 return srclen; 638 } 639 640 static int chcr_sg_ent_in_wr(struct scatterlist *src, 641 struct scatterlist *dst, 642 unsigned int minsg, 643 unsigned int space, 644 unsigned int srcskip, 645 unsigned int dstskip) 646 { 647 int srclen = 0, dstlen = 0; 648 int srcsg = minsg, dstsg = minsg; 649 int offset = 0, soffset = 0, less, sless = 0; 650 651 if (sg_dma_len(src) == srcskip) { 652 src = sg_next(src); 653 srcskip = 0; 654 } 655 if (sg_dma_len(dst) == dstskip) { 656 dst = sg_next(dst); 657 dstskip = 0; 658 } 659 660 while (src && dst && 661 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { 662 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset, 663 CHCR_SRC_SG_SIZE); 664 srclen += sless; 665 srcsg++; 666 offset = 0; 667 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && 668 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { 669 if (srclen <= dstlen) 670 break; 671 less = min_t(unsigned int, sg_dma_len(dst) - offset - 672 dstskip, CHCR_DST_SG_SIZE); 673 dstlen += less; 674 offset += less; 675 if ((offset + dstskip) == sg_dma_len(dst)) { 676 dst = sg_next(dst); 677 offset = 0; 678 } 679 dstsg++; 680 dstskip = 0; 681 } 682 soffset += sless; 683 if ((soffset + srcskip) == sg_dma_len(src)) { 684 src = sg_next(src); 685 srcskip = 0; 686 soffset = 0; 687 } 688 689 } 690 return min(srclen, dstlen); 691 } 692 693 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher, 694 u32 flags, 695 struct scatterlist *src, 696 struct scatterlist *dst, 697 unsigned int nbytes, 698 u8 *iv, 699 unsigned short op_type) 700 { 701 int err; 702 703 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher); 704 705 skcipher_request_set_sync_tfm(subreq, cipher); 706 skcipher_request_set_callback(subreq, flags, NULL, NULL); 707 skcipher_request_set_crypt(subreq, src, dst, 708 nbytes, iv); 709 710 err = op_type ? crypto_skcipher_decrypt(subreq) : 711 crypto_skcipher_encrypt(subreq); 712 skcipher_request_zero(subreq); 713 714 return err; 715 716 } 717 718 static inline int get_qidxs(struct crypto_async_request *req, 719 unsigned int *txqidx, unsigned int *rxqidx) 720 { 721 struct crypto_tfm *tfm = req->tfm; 722 int ret = 0; 723 724 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 725 case CRYPTO_ALG_TYPE_AEAD: 726 { 727 struct aead_request *aead_req = 728 container_of(req, struct aead_request, base); 729 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req); 730 *txqidx = reqctx->txqidx; 731 *rxqidx = reqctx->rxqidx; 732 break; 733 } 734 case CRYPTO_ALG_TYPE_SKCIPHER: 735 { 736 struct skcipher_request *sk_req = 737 container_of(req, struct skcipher_request, base); 738 struct chcr_skcipher_req_ctx *reqctx = 739 skcipher_request_ctx(sk_req); 740 *txqidx = reqctx->txqidx; 741 *rxqidx = reqctx->rxqidx; 742 break; 743 } 744 case CRYPTO_ALG_TYPE_AHASH: 745 { 746 struct ahash_request *ahash_req = 747 container_of(req, struct ahash_request, base); 748 struct chcr_ahash_req_ctx *reqctx = 749 ahash_request_ctx(ahash_req); 750 *txqidx = reqctx->txqidx; 751 *rxqidx = reqctx->rxqidx; 752 break; 753 } 754 default: 755 ret = -EINVAL; 756 /* should never get here */ 757 BUG(); 758 break; 759 } 760 return ret; 761 } 762 763 static inline void create_wreq(struct chcr_context *ctx, 764 struct chcr_wr *chcr_req, 765 struct crypto_async_request *req, 766 unsigned int imm, 767 int hash_sz, 768 unsigned int len16, 769 unsigned int sc_len, 770 unsigned int lcb) 771 { 772 struct uld_ctx *u_ctx = ULD_CTX(ctx); 773 unsigned int tx_channel_id, rx_channel_id; 774 unsigned int txqidx = 0, rxqidx = 0; 775 unsigned int qid, fid; 776 777 get_qidxs(req, &txqidx, &rxqidx); 778 qid = u_ctx->lldi.rxq_ids[rxqidx]; 779 fid = u_ctx->lldi.rxq_ids[0]; 780 tx_channel_id = txqidx / ctx->txq_perchan; 781 rx_channel_id = rxqidx / ctx->rxq_perchan; 782 783 784 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; 785 chcr_req->wreq.pld_size_hash_size = 786 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); 787 chcr_req->wreq.len16_pkd = 788 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); 789 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); 790 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid, 791 !!lcb, txqidx); 792 793 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid); 794 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - 795 ((sizeof(chcr_req->wreq)) >> 4))); 796 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); 797 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + 798 sizeof(chcr_req->key_ctx) + sc_len); 799 } 800 801 /** 802 * create_cipher_wr - form the WR for cipher operations 803 * @req: cipher req. 804 * @ctx: crypto driver context of the request. 805 * @qid: ingress qid where response of this WR should be received. 806 * @op_type: encryption or decryption 807 */ 808 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) 809 { 810 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); 811 struct chcr_context *ctx = c_ctx(tfm); 812 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 813 struct sk_buff *skb = NULL; 814 struct chcr_wr *chcr_req; 815 struct cpl_rx_phys_dsgl *phys_cpl; 816 struct ulptx_sgl *ulptx; 817 struct chcr_skcipher_req_ctx *reqctx = 818 skcipher_request_ctx(wrparam->req); 819 unsigned int temp = 0, transhdr_len, dst_size; 820 int error; 821 int nents; 822 unsigned int kctx_len; 823 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 824 GFP_KERNEL : GFP_ATOMIC; 825 struct adapter *adap = padap(ctx->dev); 826 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; 827 828 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, 829 reqctx->dst_ofst); 830 dst_size = get_space_for_phys_dsgl(nents); 831 kctx_len = roundup(ablkctx->enckey_len, 16); 832 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 833 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, 834 CHCR_SRC_SG_SIZE, reqctx->src_ofst); 835 temp = reqctx->imm ? roundup(wrparam->bytes, 16) : 836 (sgl_len(nents) * 8); 837 transhdr_len += temp; 838 transhdr_len = roundup(transhdr_len, 16); 839 skb = alloc_skb(SGE_MAX_WR_LEN, flags); 840 if (!skb) { 841 error = -ENOMEM; 842 goto err; 843 } 844 chcr_req = __skb_put_zero(skb, transhdr_len); 845 chcr_req->sec_cpl.op_ivinsrtofst = 846 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); 847 848 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); 849 chcr_req->sec_cpl.aadstart_cipherstop_hi = 850 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0); 851 852 chcr_req->sec_cpl.cipherstop_lo_authinsert = 853 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); 854 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, 855 ablkctx->ciph_mode, 856 0, 0, IV >> 1); 857 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, 858 0, 1, dst_size); 859 860 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; 861 if ((reqctx->op == CHCR_DECRYPT_OP) && 862 (!(get_cryptoalg_subtype(tfm) == 863 CRYPTO_ALG_SUB_TYPE_CTR)) && 864 (!(get_cryptoalg_subtype(tfm) == 865 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { 866 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); 867 } else { 868 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) || 869 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) { 870 memcpy(chcr_req->key_ctx.key, ablkctx->key, 871 ablkctx->enckey_len); 872 } else { 873 memcpy(chcr_req->key_ctx.key, ablkctx->key + 874 (ablkctx->enckey_len >> 1), 875 ablkctx->enckey_len >> 1); 876 memcpy(chcr_req->key_ctx.key + 877 (ablkctx->enckey_len >> 1), 878 ablkctx->key, 879 ablkctx->enckey_len >> 1); 880 } 881 } 882 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 883 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 884 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam); 885 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); 886 887 atomic_inc(&adap->chcr_stats.cipher_rqst); 888 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV 889 + (reqctx->imm ? (wrparam->bytes) : 0); 890 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, 891 transhdr_len, temp, 892 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); 893 reqctx->skb = skb; 894 895 if (reqctx->op && (ablkctx->ciph_mode == 896 CHCR_SCMD_CIPHER_MODE_AES_CBC)) 897 sg_pcopy_to_buffer(wrparam->req->src, 898 sg_nents(wrparam->req->src), wrparam->req->iv, 16, 899 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE); 900 901 return skb; 902 err: 903 return ERR_PTR(error); 904 } 905 906 static inline int chcr_keyctx_ck_size(unsigned int keylen) 907 { 908 int ck_size = 0; 909 910 if (keylen == AES_KEYSIZE_128) 911 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 912 else if (keylen == AES_KEYSIZE_192) 913 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 914 else if (keylen == AES_KEYSIZE_256) 915 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 916 else 917 ck_size = 0; 918 919 return ck_size; 920 } 921 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, 922 const u8 *key, 923 unsigned int keylen) 924 { 925 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 926 927 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, 928 CRYPTO_TFM_REQ_MASK); 929 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, 930 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); 931 return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); 932 } 933 934 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, 935 const u8 *key, 936 unsigned int keylen) 937 { 938 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 939 unsigned int ck_size, context_size; 940 u16 alignment = 0; 941 int err; 942 943 err = chcr_cipher_fallback_setkey(cipher, key, keylen); 944 if (err) 945 goto badkey_err; 946 947 ck_size = chcr_keyctx_ck_size(keylen); 948 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0; 949 memcpy(ablkctx->key, key, keylen); 950 ablkctx->enckey_len = keylen; 951 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); 952 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + 953 keylen + alignment) >> 4; 954 955 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 956 0, 0, context_size); 957 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; 958 return 0; 959 badkey_err: 960 ablkctx->enckey_len = 0; 961 962 return err; 963 } 964 965 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, 966 const u8 *key, 967 unsigned int keylen) 968 { 969 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 970 unsigned int ck_size, context_size; 971 u16 alignment = 0; 972 int err; 973 974 err = chcr_cipher_fallback_setkey(cipher, key, keylen); 975 if (err) 976 goto badkey_err; 977 ck_size = chcr_keyctx_ck_size(keylen); 978 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; 979 memcpy(ablkctx->key, key, keylen); 980 ablkctx->enckey_len = keylen; 981 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + 982 keylen + alignment) >> 4; 983 984 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 985 0, 0, context_size); 986 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; 987 988 return 0; 989 badkey_err: 990 ablkctx->enckey_len = 0; 991 992 return err; 993 } 994 995 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, 996 const u8 *key, 997 unsigned int keylen) 998 { 999 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 1000 unsigned int ck_size, context_size; 1001 u16 alignment = 0; 1002 int err; 1003 1004 if (keylen < CTR_RFC3686_NONCE_SIZE) 1005 return -EINVAL; 1006 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), 1007 CTR_RFC3686_NONCE_SIZE); 1008 1009 keylen -= CTR_RFC3686_NONCE_SIZE; 1010 err = chcr_cipher_fallback_setkey(cipher, key, keylen); 1011 if (err) 1012 goto badkey_err; 1013 1014 ck_size = chcr_keyctx_ck_size(keylen); 1015 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; 1016 memcpy(ablkctx->key, key, keylen); 1017 ablkctx->enckey_len = keylen; 1018 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + 1019 keylen + alignment) >> 4; 1020 1021 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 1022 0, 0, context_size); 1023 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; 1024 1025 return 0; 1026 badkey_err: 1027 ablkctx->enckey_len = 0; 1028 1029 return err; 1030 } 1031 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add) 1032 { 1033 unsigned int size = AES_BLOCK_SIZE; 1034 __be32 *b = (__be32 *)(dstiv + size); 1035 u32 c, prev; 1036 1037 memcpy(dstiv, srciv, AES_BLOCK_SIZE); 1038 for (; size >= 4; size -= 4) { 1039 prev = be32_to_cpu(*--b); 1040 c = prev + add; 1041 *b = cpu_to_be32(c); 1042 if (prev < c) 1043 break; 1044 add = 1; 1045 } 1046 1047 } 1048 1049 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) 1050 { 1051 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE); 1052 u64 c; 1053 u32 temp = be32_to_cpu(*--b); 1054 1055 temp = ~temp; 1056 c = (u64)temp + 1; // No of block can processed without overflow 1057 if ((bytes / AES_BLOCK_SIZE) >= c) 1058 bytes = c * AES_BLOCK_SIZE; 1059 return bytes; 1060 } 1061 1062 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv, 1063 u32 isfinal) 1064 { 1065 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1066 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 1067 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 1068 struct crypto_aes_ctx aes; 1069 int ret, i; 1070 u8 *key; 1071 unsigned int keylen; 1072 int round = reqctx->last_req_len / AES_BLOCK_SIZE; 1073 int round8 = round / 8; 1074 1075 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); 1076 1077 keylen = ablkctx->enckey_len / 2; 1078 key = ablkctx->key + keylen; 1079 /* For a 192 bit key remove the padded zeroes which was 1080 * added in chcr_xts_setkey 1081 */ 1082 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr)) 1083 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) 1084 ret = aes_expandkey(&aes, key, keylen - 8); 1085 else 1086 ret = aes_expandkey(&aes, key, keylen); 1087 if (ret) 1088 return ret; 1089 aes_encrypt(&aes, iv, iv); 1090 for (i = 0; i < round8; i++) 1091 gf128mul_x8_ble((le128 *)iv, (le128 *)iv); 1092 1093 for (i = 0; i < (round % 8); i++) 1094 gf128mul_x_ble((le128 *)iv, (le128 *)iv); 1095 1096 if (!isfinal) 1097 aes_decrypt(&aes, iv, iv); 1098 1099 memzero_explicit(&aes, sizeof(aes)); 1100 return 0; 1101 } 1102 1103 static int chcr_update_cipher_iv(struct skcipher_request *req, 1104 struct cpl_fw6_pld *fw6_pld, u8 *iv) 1105 { 1106 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1107 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 1108 int subtype = get_cryptoalg_subtype(tfm); 1109 int ret = 0; 1110 1111 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) 1112 ctr_add_iv(iv, req->iv, (reqctx->processed / 1113 AES_BLOCK_SIZE)); 1114 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) 1115 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + 1116 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / 1117 AES_BLOCK_SIZE) + 1); 1118 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) 1119 ret = chcr_update_tweak(req, iv, 0); 1120 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 1121 if (reqctx->op) 1122 /*Updated before sending last WR*/ 1123 memcpy(iv, req->iv, AES_BLOCK_SIZE); 1124 else 1125 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); 1126 } 1127 1128 return ret; 1129 1130 } 1131 1132 /* We need separate function for final iv because in rfc3686 Initial counter 1133 * starts from 1 and buffer size of iv is 8 byte only which remains constant 1134 * for subsequent update requests 1135 */ 1136 1137 static int chcr_final_cipher_iv(struct skcipher_request *req, 1138 struct cpl_fw6_pld *fw6_pld, u8 *iv) 1139 { 1140 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1141 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 1142 int subtype = get_cryptoalg_subtype(tfm); 1143 int ret = 0; 1144 1145 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) 1146 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed, 1147 AES_BLOCK_SIZE)); 1148 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) { 1149 if (!reqctx->partial_req) 1150 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); 1151 else 1152 ret = chcr_update_tweak(req, iv, 1); 1153 } 1154 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 1155 /*Already updated for Decrypt*/ 1156 if (!reqctx->op) 1157 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); 1158 1159 } 1160 return ret; 1161 1162 } 1163 1164 static int chcr_handle_cipher_resp(struct skcipher_request *req, 1165 unsigned char *input, int err) 1166 { 1167 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 1168 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1169 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; 1170 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 1171 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); 1172 struct chcr_dev *dev = c_ctx(tfm)->dev; 1173 struct chcr_context *ctx = c_ctx(tfm); 1174 struct adapter *adap = padap(ctx->dev); 1175 struct cipher_wr_param wrparam; 1176 struct sk_buff *skb; 1177 int bytes; 1178 1179 if (err) 1180 goto unmap; 1181 if (req->cryptlen == reqctx->processed) { 1182 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1183 req); 1184 err = chcr_final_cipher_iv(req, fw6_pld, req->iv); 1185 goto complete; 1186 } 1187 1188 if (!reqctx->imm) { 1189 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, 1190 CIP_SPACE_LEFT(ablkctx->enckey_len), 1191 reqctx->src_ofst, reqctx->dst_ofst); 1192 if ((bytes + reqctx->processed) >= req->cryptlen) 1193 bytes = req->cryptlen - reqctx->processed; 1194 else 1195 bytes = rounddown(bytes, 16); 1196 } else { 1197 /*CTR mode counter overfloa*/ 1198 bytes = req->cryptlen - reqctx->processed; 1199 } 1200 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); 1201 if (err) 1202 goto unmap; 1203 1204 if (unlikely(bytes == 0)) { 1205 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1206 req); 1207 memcpy(req->iv, reqctx->init_iv, IV); 1208 atomic_inc(&adap->chcr_stats.fallback); 1209 err = chcr_cipher_fallback(ablkctx->sw_cipher, 1210 req->base.flags, 1211 req->src, 1212 req->dst, 1213 req->cryptlen, 1214 req->iv, 1215 reqctx->op); 1216 goto complete; 1217 } 1218 1219 if (get_cryptoalg_subtype(tfm) == 1220 CRYPTO_ALG_SUB_TYPE_CTR) 1221 bytes = adjust_ctr_overflow(reqctx->iv, bytes); 1222 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx]; 1223 wrparam.req = req; 1224 wrparam.bytes = bytes; 1225 skb = create_cipher_wr(&wrparam); 1226 if (IS_ERR(skb)) { 1227 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); 1228 err = PTR_ERR(skb); 1229 goto unmap; 1230 } 1231 skb->dev = u_ctx->lldi.ports[0]; 1232 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); 1233 chcr_send_wr(skb); 1234 reqctx->last_req_len = bytes; 1235 reqctx->processed += bytes; 1236 if (get_cryptoalg_subtype(tfm) == 1237 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == 1238 CRYPTO_TFM_REQ_MAY_SLEEP ) { 1239 complete(&ctx->cbc_aes_aio_done); 1240 } 1241 return 0; 1242 unmap: 1243 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); 1244 complete: 1245 if (get_cryptoalg_subtype(tfm) == 1246 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == 1247 CRYPTO_TFM_REQ_MAY_SLEEP ) { 1248 complete(&ctx->cbc_aes_aio_done); 1249 } 1250 chcr_dec_wrcount(dev); 1251 req->base.complete(&req->base, err); 1252 return err; 1253 } 1254 1255 static int process_cipher(struct skcipher_request *req, 1256 unsigned short qid, 1257 struct sk_buff **skb, 1258 unsigned short op_type) 1259 { 1260 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 1261 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1262 unsigned int ivsize = crypto_skcipher_ivsize(tfm); 1263 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 1264 struct adapter *adap = padap(c_ctx(tfm)->dev); 1265 struct cipher_wr_param wrparam; 1266 int bytes, err = -EINVAL; 1267 int subtype; 1268 1269 reqctx->processed = 0; 1270 reqctx->partial_req = 0; 1271 if (!req->iv) 1272 goto error; 1273 subtype = get_cryptoalg_subtype(tfm); 1274 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || 1275 (req->cryptlen == 0) || 1276 (req->cryptlen % crypto_skcipher_blocksize(tfm))) { 1277 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS) 1278 goto fallback; 1279 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) && 1280 subtype == CRYPTO_ALG_SUB_TYPE_XTS) 1281 goto fallback; 1282 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", 1283 ablkctx->enckey_len, req->cryptlen, ivsize); 1284 goto error; 1285 } 1286 1287 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); 1288 if (err) 1289 goto error; 1290 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + 1291 AES_MIN_KEY_SIZE + 1292 sizeof(struct cpl_rx_phys_dsgl) + 1293 /*Min dsgl size*/ 1294 32))) { 1295 /* Can be sent as Imm*/ 1296 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; 1297 1298 dnents = sg_nents_xlen(req->dst, req->cryptlen, 1299 CHCR_DST_SG_SIZE, 0); 1300 phys_dsgl = get_space_for_phys_dsgl(dnents); 1301 kctx_len = roundup(ablkctx->enckey_len, 16); 1302 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); 1303 reqctx->imm = (transhdr_len + IV + req->cryptlen) <= 1304 SGE_MAX_WR_LEN; 1305 bytes = IV + req->cryptlen; 1306 1307 } else { 1308 reqctx->imm = 0; 1309 } 1310 1311 if (!reqctx->imm) { 1312 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, 1313 CIP_SPACE_LEFT(ablkctx->enckey_len), 1314 0, 0); 1315 if ((bytes + reqctx->processed) >= req->cryptlen) 1316 bytes = req->cryptlen - reqctx->processed; 1317 else 1318 bytes = rounddown(bytes, 16); 1319 } else { 1320 bytes = req->cryptlen; 1321 } 1322 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) { 1323 bytes = adjust_ctr_overflow(req->iv, bytes); 1324 } 1325 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { 1326 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); 1327 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, 1328 CTR_RFC3686_IV_SIZE); 1329 1330 /* initialize counter portion of counter block */ 1331 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + 1332 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); 1333 memcpy(reqctx->init_iv, reqctx->iv, IV); 1334 1335 } else { 1336 1337 memcpy(reqctx->iv, req->iv, IV); 1338 memcpy(reqctx->init_iv, req->iv, IV); 1339 } 1340 if (unlikely(bytes == 0)) { 1341 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1342 req); 1343 fallback: atomic_inc(&adap->chcr_stats.fallback); 1344 err = chcr_cipher_fallback(ablkctx->sw_cipher, 1345 req->base.flags, 1346 req->src, 1347 req->dst, 1348 req->cryptlen, 1349 subtype == 1350 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? 1351 reqctx->iv : req->iv, 1352 op_type); 1353 goto error; 1354 } 1355 reqctx->op = op_type; 1356 reqctx->srcsg = req->src; 1357 reqctx->dstsg = req->dst; 1358 reqctx->src_ofst = 0; 1359 reqctx->dst_ofst = 0; 1360 wrparam.qid = qid; 1361 wrparam.req = req; 1362 wrparam.bytes = bytes; 1363 *skb = create_cipher_wr(&wrparam); 1364 if (IS_ERR(*skb)) { 1365 err = PTR_ERR(*skb); 1366 goto unmap; 1367 } 1368 reqctx->processed = bytes; 1369 reqctx->last_req_len = bytes; 1370 reqctx->partial_req = !!(req->cryptlen - reqctx->processed); 1371 1372 return 0; 1373 unmap: 1374 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); 1375 error: 1376 return err; 1377 } 1378 1379 static int chcr_aes_encrypt(struct skcipher_request *req) 1380 { 1381 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1382 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 1383 struct chcr_dev *dev = c_ctx(tfm)->dev; 1384 struct sk_buff *skb = NULL; 1385 int err; 1386 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); 1387 struct chcr_context *ctx = c_ctx(tfm); 1388 unsigned int cpu; 1389 1390 cpu = get_cpu(); 1391 reqctx->txqidx = cpu % ctx->ntxq; 1392 reqctx->rxqidx = cpu % ctx->nrxq; 1393 put_cpu(); 1394 1395 err = chcr_inc_wrcount(dev); 1396 if (err) 1397 return -ENXIO; 1398 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1399 reqctx->txqidx) && 1400 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1401 err = -ENOSPC; 1402 goto error; 1403 } 1404 1405 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], 1406 &skb, CHCR_ENCRYPT_OP); 1407 if (err || !skb) 1408 return err; 1409 skb->dev = u_ctx->lldi.ports[0]; 1410 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); 1411 chcr_send_wr(skb); 1412 if (get_cryptoalg_subtype(tfm) == 1413 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == 1414 CRYPTO_TFM_REQ_MAY_SLEEP ) { 1415 reqctx->partial_req = 1; 1416 wait_for_completion(&ctx->cbc_aes_aio_done); 1417 } 1418 return -EINPROGRESS; 1419 error: 1420 chcr_dec_wrcount(dev); 1421 return err; 1422 } 1423 1424 static int chcr_aes_decrypt(struct skcipher_request *req) 1425 { 1426 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1427 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 1428 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); 1429 struct chcr_dev *dev = c_ctx(tfm)->dev; 1430 struct sk_buff *skb = NULL; 1431 int err; 1432 struct chcr_context *ctx = c_ctx(tfm); 1433 unsigned int cpu; 1434 1435 cpu = get_cpu(); 1436 reqctx->txqidx = cpu % ctx->ntxq; 1437 reqctx->rxqidx = cpu % ctx->nrxq; 1438 put_cpu(); 1439 1440 err = chcr_inc_wrcount(dev); 1441 if (err) 1442 return -ENXIO; 1443 1444 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1445 reqctx->txqidx) && 1446 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) 1447 return -ENOSPC; 1448 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], 1449 &skb, CHCR_DECRYPT_OP); 1450 if (err || !skb) 1451 return err; 1452 skb->dev = u_ctx->lldi.ports[0]; 1453 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); 1454 chcr_send_wr(skb); 1455 return -EINPROGRESS; 1456 } 1457 static int chcr_device_init(struct chcr_context *ctx) 1458 { 1459 struct uld_ctx *u_ctx = NULL; 1460 int txq_perchan, ntxq; 1461 int err = 0, rxq_perchan; 1462 1463 if (!ctx->dev) { 1464 u_ctx = assign_chcr_device(); 1465 if (!u_ctx) { 1466 err = -ENXIO; 1467 pr_err("chcr device assignment fails\n"); 1468 goto out; 1469 } 1470 ctx->dev = &u_ctx->dev; 1471 ntxq = u_ctx->lldi.ntxq; 1472 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; 1473 txq_perchan = ntxq / u_ctx->lldi.nchan; 1474 ctx->ntxq = ntxq; 1475 ctx->nrxq = u_ctx->lldi.nrxq; 1476 ctx->rxq_perchan = rxq_perchan; 1477 ctx->txq_perchan = txq_perchan; 1478 } 1479 out: 1480 return err; 1481 } 1482 1483 static int chcr_init_tfm(struct crypto_skcipher *tfm) 1484 { 1485 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 1486 struct chcr_context *ctx = crypto_skcipher_ctx(tfm); 1487 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1488 1489 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, 1490 CRYPTO_ALG_NEED_FALLBACK); 1491 if (IS_ERR(ablkctx->sw_cipher)) { 1492 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); 1493 return PTR_ERR(ablkctx->sw_cipher); 1494 } 1495 init_completion(&ctx->cbc_aes_aio_done); 1496 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); 1497 1498 return chcr_device_init(ctx); 1499 } 1500 1501 static int chcr_rfc3686_init(struct crypto_skcipher *tfm) 1502 { 1503 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 1504 struct chcr_context *ctx = crypto_skcipher_ctx(tfm); 1505 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1506 1507 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) 1508 * cannot be used as fallback in chcr_handle_cipher_response 1509 */ 1510 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, 1511 CRYPTO_ALG_NEED_FALLBACK); 1512 if (IS_ERR(ablkctx->sw_cipher)) { 1513 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); 1514 return PTR_ERR(ablkctx->sw_cipher); 1515 } 1516 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); 1517 return chcr_device_init(ctx); 1518 } 1519 1520 1521 static void chcr_exit_tfm(struct crypto_skcipher *tfm) 1522 { 1523 struct chcr_context *ctx = crypto_skcipher_ctx(tfm); 1524 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1525 1526 crypto_free_sync_skcipher(ablkctx->sw_cipher); 1527 } 1528 1529 static int get_alg_config(struct algo_param *params, 1530 unsigned int auth_size) 1531 { 1532 switch (auth_size) { 1533 case SHA1_DIGEST_SIZE: 1534 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; 1535 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; 1536 params->result_size = SHA1_DIGEST_SIZE; 1537 break; 1538 case SHA224_DIGEST_SIZE: 1539 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 1540 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; 1541 params->result_size = SHA256_DIGEST_SIZE; 1542 break; 1543 case SHA256_DIGEST_SIZE: 1544 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 1545 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; 1546 params->result_size = SHA256_DIGEST_SIZE; 1547 break; 1548 case SHA384_DIGEST_SIZE: 1549 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 1550 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; 1551 params->result_size = SHA512_DIGEST_SIZE; 1552 break; 1553 case SHA512_DIGEST_SIZE: 1554 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; 1555 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; 1556 params->result_size = SHA512_DIGEST_SIZE; 1557 break; 1558 default: 1559 pr_err("chcr : ERROR, unsupported digest size\n"); 1560 return -EINVAL; 1561 } 1562 return 0; 1563 } 1564 1565 static inline void chcr_free_shash(struct crypto_shash *base_hash) 1566 { 1567 crypto_free_shash(base_hash); 1568 } 1569 1570 /** 1571 * create_hash_wr - Create hash work request 1572 * @req - Cipher req base 1573 */ 1574 static struct sk_buff *create_hash_wr(struct ahash_request *req, 1575 struct hash_wr_param *param) 1576 { 1577 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1578 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1579 struct chcr_context *ctx = h_ctx(tfm); 1580 struct hmac_ctx *hmacctx = HMAC_CTX(ctx); 1581 struct sk_buff *skb = NULL; 1582 struct uld_ctx *u_ctx = ULD_CTX(ctx); 1583 struct chcr_wr *chcr_req; 1584 struct ulptx_sgl *ulptx; 1585 unsigned int nents = 0, transhdr_len; 1586 unsigned int temp = 0; 1587 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1588 GFP_ATOMIC; 1589 struct adapter *adap = padap(h_ctx(tfm)->dev); 1590 int error = 0; 1591 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; 1592 1593 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); 1594 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + 1595 param->sg_len) <= SGE_MAX_WR_LEN; 1596 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len, 1597 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst); 1598 nents += param->bfr_len ? 1 : 0; 1599 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len + 1600 param->sg_len, 16) : (sgl_len(nents) * 8); 1601 transhdr_len = roundup(transhdr_len, 16); 1602 1603 skb = alloc_skb(transhdr_len, flags); 1604 if (!skb) 1605 return ERR_PTR(-ENOMEM); 1606 chcr_req = __skb_put_zero(skb, transhdr_len); 1607 1608 chcr_req->sec_cpl.op_ivinsrtofst = 1609 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0); 1610 1611 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); 1612 1613 chcr_req->sec_cpl.aadstart_cipherstop_hi = 1614 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); 1615 chcr_req->sec_cpl.cipherstop_lo_authinsert = 1616 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); 1617 chcr_req->sec_cpl.seqno_numivs = 1618 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, 1619 param->opad_needed, 0); 1620 1621 chcr_req->sec_cpl.ivgen_hdrlen = 1622 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); 1623 1624 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash, 1625 param->alg_prm.result_size); 1626 1627 if (param->opad_needed) 1628 memcpy(chcr_req->key_ctx.key + 1629 ((param->alg_prm.result_size <= 32) ? 32 : 1630 CHCR_HASH_MAX_DIGEST_SIZE), 1631 hmacctx->opad, param->alg_prm.result_size); 1632 1633 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, 1634 param->alg_prm.mk_size, 0, 1635 param->opad_needed, 1636 ((param->kctx_len + 1637 sizeof(chcr_req->key_ctx)) >> 4)); 1638 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); 1639 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len + 1640 DUMMY_BYTES); 1641 if (param->bfr_len != 0) { 1642 req_ctx->hctx_wr.dma_addr = 1643 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr, 1644 param->bfr_len, DMA_TO_DEVICE); 1645 if (dma_mapping_error(&u_ctx->lldi.pdev->dev, 1646 req_ctx->hctx_wr. dma_addr)) { 1647 error = -ENOMEM; 1648 goto err; 1649 } 1650 req_ctx->hctx_wr.dma_len = param->bfr_len; 1651 } else { 1652 req_ctx->hctx_wr.dma_addr = 0; 1653 } 1654 chcr_add_hash_src_ent(req, ulptx, param); 1655 /* Request upto max wr size */ 1656 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ? 1657 (param->sg_len + param->bfr_len) : 0); 1658 atomic_inc(&adap->chcr_stats.digest_rqst); 1659 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm, 1660 param->hash_size, transhdr_len, 1661 temp, 0); 1662 req_ctx->hctx_wr.skb = skb; 1663 return skb; 1664 err: 1665 kfree_skb(skb); 1666 return ERR_PTR(error); 1667 } 1668 1669 static int chcr_ahash_update(struct ahash_request *req) 1670 { 1671 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1672 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1673 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); 1674 struct chcr_context *ctx = h_ctx(rtfm); 1675 struct chcr_dev *dev = h_ctx(rtfm)->dev; 1676 struct sk_buff *skb; 1677 u8 remainder = 0, bs; 1678 unsigned int nbytes = req->nbytes; 1679 struct hash_wr_param params; 1680 int error; 1681 unsigned int cpu; 1682 1683 cpu = get_cpu(); 1684 req_ctx->txqidx = cpu % ctx->ntxq; 1685 req_ctx->rxqidx = cpu % ctx->nrxq; 1686 put_cpu(); 1687 1688 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1689 1690 if (nbytes + req_ctx->reqlen >= bs) { 1691 remainder = (nbytes + req_ctx->reqlen) % bs; 1692 nbytes = nbytes + req_ctx->reqlen - remainder; 1693 } else { 1694 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr 1695 + req_ctx->reqlen, nbytes, 0); 1696 req_ctx->reqlen += nbytes; 1697 return 0; 1698 } 1699 error = chcr_inc_wrcount(dev); 1700 if (error) 1701 return -ENXIO; 1702 /* Detach state for CHCR means lldi or padap is freed. Increasing 1703 * inflight count for dev guarantees that lldi and padap is valid 1704 */ 1705 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1706 req_ctx->txqidx) && 1707 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1708 error = -ENOSPC; 1709 goto err; 1710 } 1711 1712 chcr_init_hctx_per_wr(req_ctx); 1713 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1714 if (error) { 1715 error = -ENOMEM; 1716 goto err; 1717 } 1718 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); 1719 params.kctx_len = roundup(params.alg_prm.result_size, 16); 1720 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, 1721 HASH_SPACE_LEFT(params.kctx_len), 0); 1722 if (params.sg_len > req->nbytes) 1723 params.sg_len = req->nbytes; 1724 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) - 1725 req_ctx->reqlen; 1726 params.opad_needed = 0; 1727 params.more = 1; 1728 params.last = 0; 1729 params.bfr_len = req_ctx->reqlen; 1730 params.scmd1 = 0; 1731 req_ctx->hctx_wr.srcsg = req->src; 1732 1733 params.hash_size = params.alg_prm.result_size; 1734 req_ctx->data_len += params.sg_len + params.bfr_len; 1735 skb = create_hash_wr(req, ¶ms); 1736 if (IS_ERR(skb)) { 1737 error = PTR_ERR(skb); 1738 goto unmap; 1739 } 1740 1741 req_ctx->hctx_wr.processed += params.sg_len; 1742 if (remainder) { 1743 /* Swap buffers */ 1744 swap(req_ctx->reqbfr, req_ctx->skbfr); 1745 sg_pcopy_to_buffer(req->src, sg_nents(req->src), 1746 req_ctx->reqbfr, remainder, req->nbytes - 1747 remainder); 1748 } 1749 req_ctx->reqlen = remainder; 1750 skb->dev = u_ctx->lldi.ports[0]; 1751 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); 1752 chcr_send_wr(skb); 1753 return -EINPROGRESS; 1754 unmap: 1755 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 1756 err: 1757 chcr_dec_wrcount(dev); 1758 return error; 1759 } 1760 1761 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) 1762 { 1763 memset(bfr_ptr, 0, bs); 1764 *bfr_ptr = 0x80; 1765 if (bs == 64) 1766 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); 1767 else 1768 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); 1769 } 1770 1771 static int chcr_ahash_final(struct ahash_request *req) 1772 { 1773 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1774 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1775 struct chcr_dev *dev = h_ctx(rtfm)->dev; 1776 struct hash_wr_param params; 1777 struct sk_buff *skb; 1778 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); 1779 struct chcr_context *ctx = h_ctx(rtfm); 1780 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1781 int error; 1782 unsigned int cpu; 1783 1784 cpu = get_cpu(); 1785 req_ctx->txqidx = cpu % ctx->ntxq; 1786 req_ctx->rxqidx = cpu % ctx->nrxq; 1787 put_cpu(); 1788 1789 error = chcr_inc_wrcount(dev); 1790 if (error) 1791 return -ENXIO; 1792 1793 chcr_init_hctx_per_wr(req_ctx); 1794 if (is_hmac(crypto_ahash_tfm(rtfm))) 1795 params.opad_needed = 1; 1796 else 1797 params.opad_needed = 0; 1798 params.sg_len = 0; 1799 req_ctx->hctx_wr.isfinal = 1; 1800 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); 1801 params.kctx_len = roundup(params.alg_prm.result_size, 16); 1802 if (is_hmac(crypto_ahash_tfm(rtfm))) { 1803 params.opad_needed = 1; 1804 params.kctx_len *= 2; 1805 } else { 1806 params.opad_needed = 0; 1807 } 1808 1809 req_ctx->hctx_wr.result = 1; 1810 params.bfr_len = req_ctx->reqlen; 1811 req_ctx->data_len += params.bfr_len + params.sg_len; 1812 req_ctx->hctx_wr.srcsg = req->src; 1813 if (req_ctx->reqlen == 0) { 1814 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); 1815 params.last = 0; 1816 params.more = 1; 1817 params.scmd1 = 0; 1818 params.bfr_len = bs; 1819 1820 } else { 1821 params.scmd1 = req_ctx->data_len; 1822 params.last = 1; 1823 params.more = 0; 1824 } 1825 params.hash_size = crypto_ahash_digestsize(rtfm); 1826 skb = create_hash_wr(req, ¶ms); 1827 if (IS_ERR(skb)) { 1828 error = PTR_ERR(skb); 1829 goto err; 1830 } 1831 req_ctx->reqlen = 0; 1832 skb->dev = u_ctx->lldi.ports[0]; 1833 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); 1834 chcr_send_wr(skb); 1835 return -EINPROGRESS; 1836 err: 1837 chcr_dec_wrcount(dev); 1838 return error; 1839 } 1840 1841 static int chcr_ahash_finup(struct ahash_request *req) 1842 { 1843 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1844 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1845 struct chcr_dev *dev = h_ctx(rtfm)->dev; 1846 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); 1847 struct chcr_context *ctx = h_ctx(rtfm); 1848 struct sk_buff *skb; 1849 struct hash_wr_param params; 1850 u8 bs; 1851 int error; 1852 unsigned int cpu; 1853 1854 cpu = get_cpu(); 1855 req_ctx->txqidx = cpu % ctx->ntxq; 1856 req_ctx->rxqidx = cpu % ctx->nrxq; 1857 put_cpu(); 1858 1859 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1860 error = chcr_inc_wrcount(dev); 1861 if (error) 1862 return -ENXIO; 1863 1864 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1865 req_ctx->txqidx) && 1866 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1867 error = -ENOSPC; 1868 goto err; 1869 } 1870 chcr_init_hctx_per_wr(req_ctx); 1871 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1872 if (error) { 1873 error = -ENOMEM; 1874 goto err; 1875 } 1876 1877 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); 1878 params.kctx_len = roundup(params.alg_prm.result_size, 16); 1879 if (is_hmac(crypto_ahash_tfm(rtfm))) { 1880 params.kctx_len *= 2; 1881 params.opad_needed = 1; 1882 } else { 1883 params.opad_needed = 0; 1884 } 1885 1886 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, 1887 HASH_SPACE_LEFT(params.kctx_len), 0); 1888 if (params.sg_len < req->nbytes) { 1889 if (is_hmac(crypto_ahash_tfm(rtfm))) { 1890 params.kctx_len /= 2; 1891 params.opad_needed = 0; 1892 } 1893 params.last = 0; 1894 params.more = 1; 1895 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) 1896 - req_ctx->reqlen; 1897 params.hash_size = params.alg_prm.result_size; 1898 params.scmd1 = 0; 1899 } else { 1900 params.last = 1; 1901 params.more = 0; 1902 params.sg_len = req->nbytes; 1903 params.hash_size = crypto_ahash_digestsize(rtfm); 1904 params.scmd1 = req_ctx->data_len + req_ctx->reqlen + 1905 params.sg_len; 1906 } 1907 params.bfr_len = req_ctx->reqlen; 1908 req_ctx->data_len += params.bfr_len + params.sg_len; 1909 req_ctx->hctx_wr.result = 1; 1910 req_ctx->hctx_wr.srcsg = req->src; 1911 if ((req_ctx->reqlen + req->nbytes) == 0) { 1912 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); 1913 params.last = 0; 1914 params.more = 1; 1915 params.scmd1 = 0; 1916 params.bfr_len = bs; 1917 } 1918 skb = create_hash_wr(req, ¶ms); 1919 if (IS_ERR(skb)) { 1920 error = PTR_ERR(skb); 1921 goto unmap; 1922 } 1923 req_ctx->reqlen = 0; 1924 req_ctx->hctx_wr.processed += params.sg_len; 1925 skb->dev = u_ctx->lldi.ports[0]; 1926 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); 1927 chcr_send_wr(skb); 1928 return -EINPROGRESS; 1929 unmap: 1930 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 1931 err: 1932 chcr_dec_wrcount(dev); 1933 return error; 1934 } 1935 1936 static int chcr_ahash_digest(struct ahash_request *req) 1937 { 1938 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1939 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1940 struct chcr_dev *dev = h_ctx(rtfm)->dev; 1941 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); 1942 struct chcr_context *ctx = h_ctx(rtfm); 1943 struct sk_buff *skb; 1944 struct hash_wr_param params; 1945 u8 bs; 1946 int error; 1947 unsigned int cpu; 1948 1949 cpu = get_cpu(); 1950 req_ctx->txqidx = cpu % ctx->ntxq; 1951 req_ctx->rxqidx = cpu % ctx->nrxq; 1952 put_cpu(); 1953 1954 rtfm->init(req); 1955 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1956 error = chcr_inc_wrcount(dev); 1957 if (error) 1958 return -ENXIO; 1959 1960 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1961 req_ctx->txqidx) && 1962 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { 1963 error = -ENOSPC; 1964 goto err; 1965 } 1966 1967 chcr_init_hctx_per_wr(req_ctx); 1968 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1969 if (error) { 1970 error = -ENOMEM; 1971 goto err; 1972 } 1973 1974 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); 1975 params.kctx_len = roundup(params.alg_prm.result_size, 16); 1976 if (is_hmac(crypto_ahash_tfm(rtfm))) { 1977 params.kctx_len *= 2; 1978 params.opad_needed = 1; 1979 } else { 1980 params.opad_needed = 0; 1981 } 1982 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, 1983 HASH_SPACE_LEFT(params.kctx_len), 0); 1984 if (params.sg_len < req->nbytes) { 1985 if (is_hmac(crypto_ahash_tfm(rtfm))) { 1986 params.kctx_len /= 2; 1987 params.opad_needed = 0; 1988 } 1989 params.last = 0; 1990 params.more = 1; 1991 params.scmd1 = 0; 1992 params.sg_len = rounddown(params.sg_len, bs); 1993 params.hash_size = params.alg_prm.result_size; 1994 } else { 1995 params.sg_len = req->nbytes; 1996 params.hash_size = crypto_ahash_digestsize(rtfm); 1997 params.last = 1; 1998 params.more = 0; 1999 params.scmd1 = req->nbytes + req_ctx->data_len; 2000 2001 } 2002 params.bfr_len = 0; 2003 req_ctx->hctx_wr.result = 1; 2004 req_ctx->hctx_wr.srcsg = req->src; 2005 req_ctx->data_len += params.bfr_len + params.sg_len; 2006 2007 if (req->nbytes == 0) { 2008 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); 2009 params.more = 1; 2010 params.bfr_len = bs; 2011 } 2012 2013 skb = create_hash_wr(req, ¶ms); 2014 if (IS_ERR(skb)) { 2015 error = PTR_ERR(skb); 2016 goto unmap; 2017 } 2018 req_ctx->hctx_wr.processed += params.sg_len; 2019 skb->dev = u_ctx->lldi.ports[0]; 2020 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); 2021 chcr_send_wr(skb); 2022 return -EINPROGRESS; 2023 unmap: 2024 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 2025 err: 2026 chcr_dec_wrcount(dev); 2027 return error; 2028 } 2029 2030 static int chcr_ahash_continue(struct ahash_request *req) 2031 { 2032 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); 2033 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; 2034 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 2035 struct chcr_context *ctx = h_ctx(rtfm); 2036 struct uld_ctx *u_ctx = ULD_CTX(ctx); 2037 struct sk_buff *skb; 2038 struct hash_wr_param params; 2039 u8 bs; 2040 int error; 2041 unsigned int cpu; 2042 2043 cpu = get_cpu(); 2044 reqctx->txqidx = cpu % ctx->ntxq; 2045 reqctx->rxqidx = cpu % ctx->nrxq; 2046 put_cpu(); 2047 2048 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 2049 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); 2050 params.kctx_len = roundup(params.alg_prm.result_size, 16); 2051 if (is_hmac(crypto_ahash_tfm(rtfm))) { 2052 params.kctx_len *= 2; 2053 params.opad_needed = 1; 2054 } else { 2055 params.opad_needed = 0; 2056 } 2057 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0, 2058 HASH_SPACE_LEFT(params.kctx_len), 2059 hctx_wr->src_ofst); 2060 if ((params.sg_len + hctx_wr->processed) > req->nbytes) 2061 params.sg_len = req->nbytes - hctx_wr->processed; 2062 if (!hctx_wr->result || 2063 ((params.sg_len + hctx_wr->processed) < req->nbytes)) { 2064 if (is_hmac(crypto_ahash_tfm(rtfm))) { 2065 params.kctx_len /= 2; 2066 params.opad_needed = 0; 2067 } 2068 params.last = 0; 2069 params.more = 1; 2070 params.sg_len = rounddown(params.sg_len, bs); 2071 params.hash_size = params.alg_prm.result_size; 2072 params.scmd1 = 0; 2073 } else { 2074 params.last = 1; 2075 params.more = 0; 2076 params.hash_size = crypto_ahash_digestsize(rtfm); 2077 params.scmd1 = reqctx->data_len + params.sg_len; 2078 } 2079 params.bfr_len = 0; 2080 reqctx->data_len += params.sg_len; 2081 skb = create_hash_wr(req, ¶ms); 2082 if (IS_ERR(skb)) { 2083 error = PTR_ERR(skb); 2084 goto err; 2085 } 2086 hctx_wr->processed += params.sg_len; 2087 skb->dev = u_ctx->lldi.ports[0]; 2088 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); 2089 chcr_send_wr(skb); 2090 return 0; 2091 err: 2092 return error; 2093 } 2094 2095 static inline void chcr_handle_ahash_resp(struct ahash_request *req, 2096 unsigned char *input, 2097 int err) 2098 { 2099 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); 2100 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; 2101 int digestsize, updated_digestsize; 2102 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2103 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); 2104 struct chcr_dev *dev = h_ctx(tfm)->dev; 2105 2106 if (input == NULL) 2107 goto out; 2108 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 2109 updated_digestsize = digestsize; 2110 if (digestsize == SHA224_DIGEST_SIZE) 2111 updated_digestsize = SHA256_DIGEST_SIZE; 2112 else if (digestsize == SHA384_DIGEST_SIZE) 2113 updated_digestsize = SHA512_DIGEST_SIZE; 2114 2115 if (hctx_wr->dma_addr) { 2116 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr, 2117 hctx_wr->dma_len, DMA_TO_DEVICE); 2118 hctx_wr->dma_addr = 0; 2119 } 2120 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) == 2121 req->nbytes)) { 2122 if (hctx_wr->result == 1) { 2123 hctx_wr->result = 0; 2124 memcpy(req->result, input + sizeof(struct cpl_fw6_pld), 2125 digestsize); 2126 } else { 2127 memcpy(reqctx->partial_hash, 2128 input + sizeof(struct cpl_fw6_pld), 2129 updated_digestsize); 2130 2131 } 2132 goto unmap; 2133 } 2134 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), 2135 updated_digestsize); 2136 2137 err = chcr_ahash_continue(req); 2138 if (err) 2139 goto unmap; 2140 return; 2141 unmap: 2142 if (hctx_wr->is_sg_map) 2143 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 2144 2145 2146 out: 2147 chcr_dec_wrcount(dev); 2148 req->base.complete(&req->base, err); 2149 } 2150 2151 /* 2152 * chcr_handle_resp - Unmap the DMA buffers associated with the request 2153 * @req: crypto request 2154 */ 2155 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, 2156 int err) 2157 { 2158 struct crypto_tfm *tfm = req->tfm; 2159 struct chcr_context *ctx = crypto_tfm_ctx(tfm); 2160 struct adapter *adap = padap(ctx->dev); 2161 2162 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 2163 case CRYPTO_ALG_TYPE_AEAD: 2164 err = chcr_handle_aead_resp(aead_request_cast(req), input, err); 2165 break; 2166 2167 case CRYPTO_ALG_TYPE_SKCIPHER: 2168 chcr_handle_cipher_resp(skcipher_request_cast(req), 2169 input, err); 2170 break; 2171 case CRYPTO_ALG_TYPE_AHASH: 2172 chcr_handle_ahash_resp(ahash_request_cast(req), input, err); 2173 } 2174 atomic_inc(&adap->chcr_stats.complete); 2175 return err; 2176 } 2177 static int chcr_ahash_export(struct ahash_request *areq, void *out) 2178 { 2179 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2180 struct chcr_ahash_req_ctx *state = out; 2181 2182 state->reqlen = req_ctx->reqlen; 2183 state->data_len = req_ctx->data_len; 2184 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); 2185 memcpy(state->partial_hash, req_ctx->partial_hash, 2186 CHCR_HASH_MAX_DIGEST_SIZE); 2187 chcr_init_hctx_per_wr(state); 2188 return 0; 2189 } 2190 2191 static int chcr_ahash_import(struct ahash_request *areq, const void *in) 2192 { 2193 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2194 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; 2195 2196 req_ctx->reqlen = state->reqlen; 2197 req_ctx->data_len = state->data_len; 2198 req_ctx->reqbfr = req_ctx->bfr1; 2199 req_ctx->skbfr = req_ctx->bfr2; 2200 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); 2201 memcpy(req_ctx->partial_hash, state->partial_hash, 2202 CHCR_HASH_MAX_DIGEST_SIZE); 2203 chcr_init_hctx_per_wr(req_ctx); 2204 return 0; 2205 } 2206 2207 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 2208 unsigned int keylen) 2209 { 2210 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); 2211 unsigned int digestsize = crypto_ahash_digestsize(tfm); 2212 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2213 unsigned int i, err = 0, updated_digestsize; 2214 2215 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); 2216 2217 /* use the key to calculate the ipad and opad. ipad will sent with the 2218 * first request's data. opad will be sent with the final hash result 2219 * ipad in hmacctx->ipad and opad in hmacctx->opad location 2220 */ 2221 shash->tfm = hmacctx->base_hash; 2222 if (keylen > bs) { 2223 err = crypto_shash_digest(shash, key, keylen, 2224 hmacctx->ipad); 2225 if (err) 2226 goto out; 2227 keylen = digestsize; 2228 } else { 2229 memcpy(hmacctx->ipad, key, keylen); 2230 } 2231 memset(hmacctx->ipad + keylen, 0, bs - keylen); 2232 memcpy(hmacctx->opad, hmacctx->ipad, bs); 2233 2234 for (i = 0; i < bs / sizeof(int); i++) { 2235 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; 2236 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; 2237 } 2238 2239 updated_digestsize = digestsize; 2240 if (digestsize == SHA224_DIGEST_SIZE) 2241 updated_digestsize = SHA256_DIGEST_SIZE; 2242 else if (digestsize == SHA384_DIGEST_SIZE) 2243 updated_digestsize = SHA512_DIGEST_SIZE; 2244 err = chcr_compute_partial_hash(shash, hmacctx->ipad, 2245 hmacctx->ipad, digestsize); 2246 if (err) 2247 goto out; 2248 chcr_change_order(hmacctx->ipad, updated_digestsize); 2249 2250 err = chcr_compute_partial_hash(shash, hmacctx->opad, 2251 hmacctx->opad, digestsize); 2252 if (err) 2253 goto out; 2254 chcr_change_order(hmacctx->opad, updated_digestsize); 2255 out: 2256 return err; 2257 } 2258 2259 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, 2260 unsigned int key_len) 2261 { 2262 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 2263 unsigned short context_size = 0; 2264 int err; 2265 2266 err = chcr_cipher_fallback_setkey(cipher, key, key_len); 2267 if (err) 2268 goto badkey_err; 2269 2270 memcpy(ablkctx->key, key, key_len); 2271 ablkctx->enckey_len = key_len; 2272 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); 2273 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; 2274 /* Both keys for xts must be aligned to 16 byte boundary 2275 * by padding with zeros. So for 24 byte keys padding 8 zeroes. 2276 */ 2277 if (key_len == 48) { 2278 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len 2279 + 16) >> 4; 2280 memmove(ablkctx->key + 32, ablkctx->key + 24, 24); 2281 memset(ablkctx->key + 24, 0, 8); 2282 memset(ablkctx->key + 56, 0, 8); 2283 ablkctx->enckey_len = 64; 2284 ablkctx->key_ctx_hdr = 2285 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192, 2286 CHCR_KEYCTX_NO_KEY, 1, 2287 0, context_size); 2288 } else { 2289 ablkctx->key_ctx_hdr = 2290 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? 2291 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : 2292 CHCR_KEYCTX_CIPHER_KEY_SIZE_256, 2293 CHCR_KEYCTX_NO_KEY, 1, 2294 0, context_size); 2295 } 2296 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; 2297 return 0; 2298 badkey_err: 2299 ablkctx->enckey_len = 0; 2300 2301 return err; 2302 } 2303 2304 static int chcr_sha_init(struct ahash_request *areq) 2305 { 2306 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2307 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2308 int digestsize = crypto_ahash_digestsize(tfm); 2309 2310 req_ctx->data_len = 0; 2311 req_ctx->reqlen = 0; 2312 req_ctx->reqbfr = req_ctx->bfr1; 2313 req_ctx->skbfr = req_ctx->bfr2; 2314 copy_hash_init_values(req_ctx->partial_hash, digestsize); 2315 2316 return 0; 2317 } 2318 2319 static int chcr_sha_cra_init(struct crypto_tfm *tfm) 2320 { 2321 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2322 sizeof(struct chcr_ahash_req_ctx)); 2323 return chcr_device_init(crypto_tfm_ctx(tfm)); 2324 } 2325 2326 static int chcr_hmac_init(struct ahash_request *areq) 2327 { 2328 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2329 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); 2330 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm)); 2331 unsigned int digestsize = crypto_ahash_digestsize(rtfm); 2332 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 2333 2334 chcr_sha_init(areq); 2335 req_ctx->data_len = bs; 2336 if (is_hmac(crypto_ahash_tfm(rtfm))) { 2337 if (digestsize == SHA224_DIGEST_SIZE) 2338 memcpy(req_ctx->partial_hash, hmacctx->ipad, 2339 SHA256_DIGEST_SIZE); 2340 else if (digestsize == SHA384_DIGEST_SIZE) 2341 memcpy(req_ctx->partial_hash, hmacctx->ipad, 2342 SHA512_DIGEST_SIZE); 2343 else 2344 memcpy(req_ctx->partial_hash, hmacctx->ipad, 2345 digestsize); 2346 } 2347 return 0; 2348 } 2349 2350 static int chcr_hmac_cra_init(struct crypto_tfm *tfm) 2351 { 2352 struct chcr_context *ctx = crypto_tfm_ctx(tfm); 2353 struct hmac_ctx *hmacctx = HMAC_CTX(ctx); 2354 unsigned int digestsize = 2355 crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); 2356 2357 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2358 sizeof(struct chcr_ahash_req_ctx)); 2359 hmacctx->base_hash = chcr_alloc_shash(digestsize); 2360 if (IS_ERR(hmacctx->base_hash)) 2361 return PTR_ERR(hmacctx->base_hash); 2362 return chcr_device_init(crypto_tfm_ctx(tfm)); 2363 } 2364 2365 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) 2366 { 2367 struct chcr_context *ctx = crypto_tfm_ctx(tfm); 2368 struct hmac_ctx *hmacctx = HMAC_CTX(ctx); 2369 2370 if (hmacctx->base_hash) { 2371 chcr_free_shash(hmacctx->base_hash); 2372 hmacctx->base_hash = NULL; 2373 } 2374 } 2375 2376 inline void chcr_aead_common_exit(struct aead_request *req) 2377 { 2378 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2379 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2380 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); 2381 2382 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); 2383 } 2384 2385 static int chcr_aead_common_init(struct aead_request *req) 2386 { 2387 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2388 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2389 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2390 unsigned int authsize = crypto_aead_authsize(tfm); 2391 int error = -EINVAL; 2392 2393 /* validate key size */ 2394 if (aeadctx->enckey_len == 0) 2395 goto err; 2396 if (reqctx->op && req->cryptlen < authsize) 2397 goto err; 2398 if (reqctx->b0_len) 2399 reqctx->scratch_pad = reqctx->iv + IV; 2400 else 2401 reqctx->scratch_pad = NULL; 2402 2403 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2404 reqctx->op); 2405 if (error) { 2406 error = -ENOMEM; 2407 goto err; 2408 } 2409 2410 return 0; 2411 err: 2412 return error; 2413 } 2414 2415 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents, 2416 int aadmax, int wrlen, 2417 unsigned short op_type) 2418 { 2419 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 2420 2421 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || 2422 dst_nents > MAX_DSGL_ENT || 2423 (req->assoclen > aadmax) || 2424 (wrlen > SGE_MAX_WR_LEN)) 2425 return 1; 2426 return 0; 2427 } 2428 2429 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) 2430 { 2431 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2432 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2433 struct aead_request *subreq = aead_request_ctx(req); 2434 2435 aead_request_set_tfm(subreq, aeadctx->sw_cipher); 2436 aead_request_set_callback(subreq, req->base.flags, 2437 req->base.complete, req->base.data); 2438 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 2439 req->iv); 2440 aead_request_set_ad(subreq, req->assoclen); 2441 return op_type ? crypto_aead_decrypt(subreq) : 2442 crypto_aead_encrypt(subreq); 2443 } 2444 2445 static struct sk_buff *create_authenc_wr(struct aead_request *req, 2446 unsigned short qid, 2447 int size) 2448 { 2449 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2450 struct chcr_context *ctx = a_ctx(tfm); 2451 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2452 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 2453 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2454 struct sk_buff *skb = NULL; 2455 struct chcr_wr *chcr_req; 2456 struct cpl_rx_phys_dsgl *phys_cpl; 2457 struct ulptx_sgl *ulptx; 2458 unsigned int transhdr_len; 2459 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); 2460 unsigned int kctx_len = 0, dnents, snents; 2461 unsigned int authsize = crypto_aead_authsize(tfm); 2462 int error = -EINVAL; 2463 u8 *ivptr; 2464 int null = 0; 2465 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 2466 GFP_ATOMIC; 2467 struct adapter *adap = padap(ctx->dev); 2468 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; 2469 2470 if (req->cryptlen == 0) 2471 return NULL; 2472 2473 reqctx->b0_len = 0; 2474 error = chcr_aead_common_init(req); 2475 if (error) 2476 return ERR_PTR(error); 2477 2478 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || 2479 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 2480 null = 1; 2481 } 2482 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + 2483 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0); 2484 dnents += MIN_AUTH_SG; // For IV 2485 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, 2486 CHCR_SRC_SG_SIZE, 0); 2487 dst_size = get_space_for_phys_dsgl(dnents); 2488 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4) 2489 - sizeof(chcr_req->key_ctx); 2490 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 2491 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) < 2492 SGE_MAX_WR_LEN; 2493 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) 2494 : (sgl_len(snents) * 8); 2495 transhdr_len += temp; 2496 transhdr_len = roundup(transhdr_len, 16); 2497 2498 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 2499 transhdr_len, reqctx->op)) { 2500 atomic_inc(&adap->chcr_stats.fallback); 2501 chcr_aead_common_exit(req); 2502 return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); 2503 } 2504 skb = alloc_skb(transhdr_len, flags); 2505 if (!skb) { 2506 error = -ENOMEM; 2507 goto err; 2508 } 2509 2510 chcr_req = __skb_put_zero(skb, transhdr_len); 2511 2512 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; 2513 2514 /* 2515 * Input order is AAD,IV and Payload. where IV should be included as 2516 * the part of authdata. All other fields should be filled according 2517 * to the hardware spec 2518 */ 2519 chcr_req->sec_cpl.op_ivinsrtofst = 2520 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); 2521 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen); 2522 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 2523 null ? 0 : 1 + IV, 2524 null ? 0 : IV + req->assoclen, 2525 req->assoclen + IV + 1, 2526 (temp & 0x1F0) >> 4); 2527 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( 2528 temp & 0xF, 2529 null ? 0 : req->assoclen + IV + 1, 2530 temp, temp); 2531 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || 2532 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) 2533 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; 2534 else 2535 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; 2536 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 2537 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, 2538 temp, 2539 actx->auth_mode, aeadctx->hmac_ctrl, 2540 IV >> 1); 2541 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 2542 0, 0, dst_size); 2543 2544 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2545 if (reqctx->op == CHCR_ENCRYPT_OP || 2546 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 2547 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) 2548 memcpy(chcr_req->key_ctx.key, aeadctx->key, 2549 aeadctx->enckey_len); 2550 else 2551 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, 2552 aeadctx->enckey_len); 2553 2554 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), 2555 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); 2556 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2557 ivptr = (u8 *)(phys_cpl + 1) + dst_size; 2558 ulptx = (struct ulptx_sgl *)(ivptr + IV); 2559 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 2560 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 2561 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); 2562 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv, 2563 CTR_RFC3686_IV_SIZE); 2564 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE + 2565 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); 2566 } else { 2567 memcpy(ivptr, req->iv, IV); 2568 } 2569 chcr_add_aead_dst_ent(req, phys_cpl, qid); 2570 chcr_add_aead_src_ent(req, ulptx); 2571 atomic_inc(&adap->chcr_stats.cipher_rqst); 2572 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + 2573 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); 2574 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, 2575 transhdr_len, temp, 0); 2576 reqctx->skb = skb; 2577 2578 return skb; 2579 err: 2580 chcr_aead_common_exit(req); 2581 2582 return ERR_PTR(error); 2583 } 2584 2585 int chcr_aead_dma_map(struct device *dev, 2586 struct aead_request *req, 2587 unsigned short op_type) 2588 { 2589 int error; 2590 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2591 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2592 unsigned int authsize = crypto_aead_authsize(tfm); 2593 int src_len, dst_len; 2594 2595 /* calculate and handle src and dst sg length separately 2596 * for inplace and out-of place operations 2597 */ 2598 if (req->src == req->dst) { 2599 src_len = req->assoclen + req->cryptlen + (op_type ? 2600 0 : authsize); 2601 dst_len = src_len; 2602 } else { 2603 src_len = req->assoclen + req->cryptlen; 2604 dst_len = req->assoclen + req->cryptlen + (op_type ? 2605 -authsize : authsize); 2606 } 2607 2608 if (!req->cryptlen || !src_len || !dst_len) 2609 return 0; 2610 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), 2611 DMA_BIDIRECTIONAL); 2612 if (dma_mapping_error(dev, reqctx->iv_dma)) 2613 return -ENOMEM; 2614 if (reqctx->b0_len) 2615 reqctx->b0_dma = reqctx->iv_dma + IV; 2616 else 2617 reqctx->b0_dma = 0; 2618 if (req->src == req->dst) { 2619 error = dma_map_sg(dev, req->src, 2620 sg_nents_for_len(req->src, src_len), 2621 DMA_BIDIRECTIONAL); 2622 if (!error) 2623 goto err; 2624 } else { 2625 error = dma_map_sg(dev, req->src, 2626 sg_nents_for_len(req->src, src_len), 2627 DMA_TO_DEVICE); 2628 if (!error) 2629 goto err; 2630 error = dma_map_sg(dev, req->dst, 2631 sg_nents_for_len(req->dst, dst_len), 2632 DMA_FROM_DEVICE); 2633 if (!error) { 2634 dma_unmap_sg(dev, req->src, 2635 sg_nents_for_len(req->src, src_len), 2636 DMA_TO_DEVICE); 2637 goto err; 2638 } 2639 } 2640 2641 return 0; 2642 err: 2643 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); 2644 return -ENOMEM; 2645 } 2646 2647 void chcr_aead_dma_unmap(struct device *dev, 2648 struct aead_request *req, 2649 unsigned short op_type) 2650 { 2651 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2652 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2653 unsigned int authsize = crypto_aead_authsize(tfm); 2654 int src_len, dst_len; 2655 2656 /* calculate and handle src and dst sg length separately 2657 * for inplace and out-of place operations 2658 */ 2659 if (req->src == req->dst) { 2660 src_len = req->assoclen + req->cryptlen + (op_type ? 2661 0 : authsize); 2662 dst_len = src_len; 2663 } else { 2664 src_len = req->assoclen + req->cryptlen; 2665 dst_len = req->assoclen + req->cryptlen + (op_type ? 2666 -authsize : authsize); 2667 } 2668 2669 if (!req->cryptlen || !src_len || !dst_len) 2670 return; 2671 2672 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), 2673 DMA_BIDIRECTIONAL); 2674 if (req->src == req->dst) { 2675 dma_unmap_sg(dev, req->src, 2676 sg_nents_for_len(req->src, src_len), 2677 DMA_BIDIRECTIONAL); 2678 } else { 2679 dma_unmap_sg(dev, req->src, 2680 sg_nents_for_len(req->src, src_len), 2681 DMA_TO_DEVICE); 2682 dma_unmap_sg(dev, req->dst, 2683 sg_nents_for_len(req->dst, dst_len), 2684 DMA_FROM_DEVICE); 2685 } 2686 } 2687 2688 void chcr_add_aead_src_ent(struct aead_request *req, 2689 struct ulptx_sgl *ulptx) 2690 { 2691 struct ulptx_walk ulp_walk; 2692 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2693 2694 if (reqctx->imm) { 2695 u8 *buf = (u8 *)ulptx; 2696 2697 if (reqctx->b0_len) { 2698 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); 2699 buf += reqctx->b0_len; 2700 } 2701 sg_pcopy_to_buffer(req->src, sg_nents(req->src), 2702 buf, req->cryptlen + req->assoclen, 0); 2703 } else { 2704 ulptx_walk_init(&ulp_walk, ulptx); 2705 if (reqctx->b0_len) 2706 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, 2707 reqctx->b0_dma); 2708 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen + 2709 req->assoclen, 0); 2710 ulptx_walk_end(&ulp_walk); 2711 } 2712 } 2713 2714 void chcr_add_aead_dst_ent(struct aead_request *req, 2715 struct cpl_rx_phys_dsgl *phys_cpl, 2716 unsigned short qid) 2717 { 2718 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2719 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2720 struct dsgl_walk dsgl_walk; 2721 unsigned int authsize = crypto_aead_authsize(tfm); 2722 struct chcr_context *ctx = a_ctx(tfm); 2723 u32 temp; 2724 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; 2725 2726 dsgl_walk_init(&dsgl_walk, phys_cpl); 2727 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); 2728 temp = req->assoclen + req->cryptlen + 2729 (reqctx->op ? -authsize : authsize); 2730 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0); 2731 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id); 2732 } 2733 2734 void chcr_add_cipher_src_ent(struct skcipher_request *req, 2735 void *ulptx, 2736 struct cipher_wr_param *wrparam) 2737 { 2738 struct ulptx_walk ulp_walk; 2739 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 2740 u8 *buf = ulptx; 2741 2742 memcpy(buf, reqctx->iv, IV); 2743 buf += IV; 2744 if (reqctx->imm) { 2745 sg_pcopy_to_buffer(req->src, sg_nents(req->src), 2746 buf, wrparam->bytes, reqctx->processed); 2747 } else { 2748 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf); 2749 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, 2750 reqctx->src_ofst); 2751 reqctx->srcsg = ulp_walk.last_sg; 2752 reqctx->src_ofst = ulp_walk.last_sg_len; 2753 ulptx_walk_end(&ulp_walk); 2754 } 2755 } 2756 2757 void chcr_add_cipher_dst_ent(struct skcipher_request *req, 2758 struct cpl_rx_phys_dsgl *phys_cpl, 2759 struct cipher_wr_param *wrparam, 2760 unsigned short qid) 2761 { 2762 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 2763 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); 2764 struct chcr_context *ctx = c_ctx(tfm); 2765 struct dsgl_walk dsgl_walk; 2766 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; 2767 2768 dsgl_walk_init(&dsgl_walk, phys_cpl); 2769 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, 2770 reqctx->dst_ofst); 2771 reqctx->dstsg = dsgl_walk.last_sg; 2772 reqctx->dst_ofst = dsgl_walk.last_sg_len; 2773 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id); 2774 } 2775 2776 void chcr_add_hash_src_ent(struct ahash_request *req, 2777 struct ulptx_sgl *ulptx, 2778 struct hash_wr_param *param) 2779 { 2780 struct ulptx_walk ulp_walk; 2781 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); 2782 2783 if (reqctx->hctx_wr.imm) { 2784 u8 *buf = (u8 *)ulptx; 2785 2786 if (param->bfr_len) { 2787 memcpy(buf, reqctx->reqbfr, param->bfr_len); 2788 buf += param->bfr_len; 2789 } 2790 2791 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg, 2792 sg_nents(reqctx->hctx_wr.srcsg), buf, 2793 param->sg_len, 0); 2794 } else { 2795 ulptx_walk_init(&ulp_walk, ulptx); 2796 if (param->bfr_len) 2797 ulptx_walk_add_page(&ulp_walk, param->bfr_len, 2798 reqctx->hctx_wr.dma_addr); 2799 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg, 2800 param->sg_len, reqctx->hctx_wr.src_ofst); 2801 reqctx->hctx_wr.srcsg = ulp_walk.last_sg; 2802 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len; 2803 ulptx_walk_end(&ulp_walk); 2804 } 2805 } 2806 2807 int chcr_hash_dma_map(struct device *dev, 2808 struct ahash_request *req) 2809 { 2810 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 2811 int error = 0; 2812 2813 if (!req->nbytes) 2814 return 0; 2815 error = dma_map_sg(dev, req->src, sg_nents(req->src), 2816 DMA_TO_DEVICE); 2817 if (!error) 2818 return -ENOMEM; 2819 req_ctx->hctx_wr.is_sg_map = 1; 2820 return 0; 2821 } 2822 2823 void chcr_hash_dma_unmap(struct device *dev, 2824 struct ahash_request *req) 2825 { 2826 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 2827 2828 if (!req->nbytes) 2829 return; 2830 2831 dma_unmap_sg(dev, req->src, sg_nents(req->src), 2832 DMA_TO_DEVICE); 2833 req_ctx->hctx_wr.is_sg_map = 0; 2834 2835 } 2836 2837 int chcr_cipher_dma_map(struct device *dev, 2838 struct skcipher_request *req) 2839 { 2840 int error; 2841 2842 if (req->src == req->dst) { 2843 error = dma_map_sg(dev, req->src, sg_nents(req->src), 2844 DMA_BIDIRECTIONAL); 2845 if (!error) 2846 goto err; 2847 } else { 2848 error = dma_map_sg(dev, req->src, sg_nents(req->src), 2849 DMA_TO_DEVICE); 2850 if (!error) 2851 goto err; 2852 error = dma_map_sg(dev, req->dst, sg_nents(req->dst), 2853 DMA_FROM_DEVICE); 2854 if (!error) { 2855 dma_unmap_sg(dev, req->src, sg_nents(req->src), 2856 DMA_TO_DEVICE); 2857 goto err; 2858 } 2859 } 2860 2861 return 0; 2862 err: 2863 return -ENOMEM; 2864 } 2865 2866 void chcr_cipher_dma_unmap(struct device *dev, 2867 struct skcipher_request *req) 2868 { 2869 if (req->src == req->dst) { 2870 dma_unmap_sg(dev, req->src, sg_nents(req->src), 2871 DMA_BIDIRECTIONAL); 2872 } else { 2873 dma_unmap_sg(dev, req->src, sg_nents(req->src), 2874 DMA_TO_DEVICE); 2875 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), 2876 DMA_FROM_DEVICE); 2877 } 2878 } 2879 2880 static int set_msg_len(u8 *block, unsigned int msglen, int csize) 2881 { 2882 __be32 data; 2883 2884 memset(block, 0, csize); 2885 block += csize; 2886 2887 if (csize >= 4) 2888 csize = 4; 2889 else if (msglen > (unsigned int)(1 << (8 * csize))) 2890 return -EOVERFLOW; 2891 2892 data = cpu_to_be32(msglen); 2893 memcpy(block - csize, (u8 *)&data + 4 - csize, csize); 2894 2895 return 0; 2896 } 2897 2898 static int generate_b0(struct aead_request *req, u8 *ivptr, 2899 unsigned short op_type) 2900 { 2901 unsigned int l, lp, m; 2902 int rc; 2903 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2904 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2905 u8 *b0 = reqctx->scratch_pad; 2906 2907 m = crypto_aead_authsize(aead); 2908 2909 memcpy(b0, ivptr, 16); 2910 2911 lp = b0[0]; 2912 l = lp + 1; 2913 2914 /* set m, bits 3-5 */ 2915 *b0 |= (8 * ((m - 2) / 2)); 2916 2917 /* set adata, bit 6, if associated data is used */ 2918 if (req->assoclen) 2919 *b0 |= 64; 2920 rc = set_msg_len(b0 + 16 - l, 2921 (op_type == CHCR_DECRYPT_OP) ? 2922 req->cryptlen - m : req->cryptlen, l); 2923 2924 return rc; 2925 } 2926 2927 static inline int crypto_ccm_check_iv(const u8 *iv) 2928 { 2929 /* 2 <= L <= 8, so 1 <= L' <= 7. */ 2930 if (iv[0] < 1 || iv[0] > 7) 2931 return -EINVAL; 2932 2933 return 0; 2934 } 2935 2936 static int ccm_format_packet(struct aead_request *req, 2937 u8 *ivptr, 2938 unsigned int sub_type, 2939 unsigned short op_type, 2940 unsigned int assoclen) 2941 { 2942 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2943 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2944 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2945 int rc = 0; 2946 2947 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { 2948 ivptr[0] = 3; 2949 memcpy(ivptr + 1, &aeadctx->salt[0], 3); 2950 memcpy(ivptr + 4, req->iv, 8); 2951 memset(ivptr + 12, 0, 4); 2952 } else { 2953 memcpy(ivptr, req->iv, 16); 2954 } 2955 if (assoclen) 2956 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]); 2957 2958 rc = generate_b0(req, ivptr, op_type); 2959 /* zero the ctr value */ 2960 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1); 2961 return rc; 2962 } 2963 2964 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, 2965 unsigned int dst_size, 2966 struct aead_request *req, 2967 unsigned short op_type) 2968 { 2969 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2970 struct chcr_context *ctx = a_ctx(tfm); 2971 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2972 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2973 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; 2974 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; 2975 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; 2976 unsigned int ccm_xtra; 2977 unsigned int tag_offset = 0, auth_offset = 0; 2978 unsigned int assoclen; 2979 2980 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) 2981 assoclen = req->assoclen - 8; 2982 else 2983 assoclen = req->assoclen; 2984 ccm_xtra = CCM_B0_SIZE + 2985 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); 2986 2987 auth_offset = req->cryptlen ? 2988 (req->assoclen + IV + 1 + ccm_xtra) : 0; 2989 if (op_type == CHCR_DECRYPT_OP) { 2990 if (crypto_aead_authsize(tfm) != req->cryptlen) 2991 tag_offset = crypto_aead_authsize(tfm); 2992 else 2993 auth_offset = 0; 2994 } 2995 2996 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); 2997 sec_cpl->pldlen = 2998 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra); 2999 /* For CCM there wil be b0 always. So AAD start will be 1 always */ 3000 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 3001 1 + IV, IV + assoclen + ccm_xtra, 3002 req->assoclen + IV + 1 + ccm_xtra, 0); 3003 3004 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 3005 auth_offset, tag_offset, 3006 (op_type == CHCR_ENCRYPT_OP) ? 0 : 3007 crypto_aead_authsize(tfm)); 3008 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 3009 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, 3010 cipher_mode, mac_mode, 3011 aeadctx->hmac_ctrl, IV >> 1); 3012 3013 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, 3014 0, dst_size); 3015 } 3016 3017 static int aead_ccm_validate_input(unsigned short op_type, 3018 struct aead_request *req, 3019 struct chcr_aead_ctx *aeadctx, 3020 unsigned int sub_type) 3021 { 3022 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { 3023 if (crypto_ccm_check_iv(req->iv)) { 3024 pr_err("CCM: IV check fails\n"); 3025 return -EINVAL; 3026 } 3027 } else { 3028 if (req->assoclen != 16 && req->assoclen != 20) { 3029 pr_err("RFC4309: Invalid AAD length %d\n", 3030 req->assoclen); 3031 return -EINVAL; 3032 } 3033 } 3034 return 0; 3035 } 3036 3037 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, 3038 unsigned short qid, 3039 int size) 3040 { 3041 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3042 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3043 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3044 struct sk_buff *skb = NULL; 3045 struct chcr_wr *chcr_req; 3046 struct cpl_rx_phys_dsgl *phys_cpl; 3047 struct ulptx_sgl *ulptx; 3048 unsigned int transhdr_len; 3049 unsigned int dst_size = 0, kctx_len, dnents, temp, snents; 3050 unsigned int sub_type, assoclen = req->assoclen; 3051 unsigned int authsize = crypto_aead_authsize(tfm); 3052 int error = -EINVAL; 3053 u8 *ivptr; 3054 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 3055 GFP_ATOMIC; 3056 struct adapter *adap = padap(a_ctx(tfm)->dev); 3057 3058 sub_type = get_aead_subtype(tfm); 3059 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) 3060 assoclen -= 8; 3061 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); 3062 error = chcr_aead_common_init(req); 3063 if (error) 3064 return ERR_PTR(error); 3065 3066 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type); 3067 if (error) 3068 goto err; 3069 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen 3070 + (reqctx->op ? -authsize : authsize), 3071 CHCR_DST_SG_SIZE, 0); 3072 dnents += MIN_CCM_SG; // For IV and B0 3073 dst_size = get_space_for_phys_dsgl(dnents); 3074 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, 3075 CHCR_SRC_SG_SIZE, 0); 3076 snents += MIN_CCM_SG; //For B0 3077 kctx_len = roundup(aeadctx->enckey_len, 16) * 2; 3078 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 3079 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen + 3080 reqctx->b0_len) <= SGE_MAX_WR_LEN; 3081 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen + 3082 reqctx->b0_len, 16) : 3083 (sgl_len(snents) * 8); 3084 transhdr_len += temp; 3085 transhdr_len = roundup(transhdr_len, 16); 3086 3087 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - 3088 reqctx->b0_len, transhdr_len, reqctx->op)) { 3089 atomic_inc(&adap->chcr_stats.fallback); 3090 chcr_aead_common_exit(req); 3091 return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); 3092 } 3093 skb = alloc_skb(transhdr_len, flags); 3094 3095 if (!skb) { 3096 error = -ENOMEM; 3097 goto err; 3098 } 3099 3100 chcr_req = __skb_put_zero(skb, transhdr_len); 3101 3102 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op); 3103 3104 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 3105 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); 3106 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), 3107 aeadctx->key, aeadctx->enckey_len); 3108 3109 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 3110 ivptr = (u8 *)(phys_cpl + 1) + dst_size; 3111 ulptx = (struct ulptx_sgl *)(ivptr + IV); 3112 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen); 3113 if (error) 3114 goto dstmap_fail; 3115 chcr_add_aead_dst_ent(req, phys_cpl, qid); 3116 chcr_add_aead_src_ent(req, ulptx); 3117 3118 atomic_inc(&adap->chcr_stats.aead_rqst); 3119 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + 3120 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen + 3121 reqctx->b0_len) : 0); 3122 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, 3123 transhdr_len, temp, 0); 3124 reqctx->skb = skb; 3125 3126 return skb; 3127 dstmap_fail: 3128 kfree_skb(skb); 3129 err: 3130 chcr_aead_common_exit(req); 3131 return ERR_PTR(error); 3132 } 3133 3134 static struct sk_buff *create_gcm_wr(struct aead_request *req, 3135 unsigned short qid, 3136 int size) 3137 { 3138 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3139 struct chcr_context *ctx = a_ctx(tfm); 3140 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 3141 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3142 struct sk_buff *skb = NULL; 3143 struct chcr_wr *chcr_req; 3144 struct cpl_rx_phys_dsgl *phys_cpl; 3145 struct ulptx_sgl *ulptx; 3146 unsigned int transhdr_len, dnents = 0, snents; 3147 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; 3148 unsigned int authsize = crypto_aead_authsize(tfm); 3149 int error = -EINVAL; 3150 u8 *ivptr; 3151 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 3152 GFP_ATOMIC; 3153 struct adapter *adap = padap(ctx->dev); 3154 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; 3155 3156 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) 3157 assoclen = req->assoclen - 8; 3158 3159 reqctx->b0_len = 0; 3160 error = chcr_aead_common_init(req); 3161 if (error) 3162 return ERR_PTR(error); 3163 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + 3164 (reqctx->op ? -authsize : authsize), 3165 CHCR_DST_SG_SIZE, 0); 3166 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, 3167 CHCR_SRC_SG_SIZE, 0); 3168 dnents += MIN_GCM_SG; // For IV 3169 dst_size = get_space_for_phys_dsgl(dnents); 3170 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; 3171 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 3172 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <= 3173 SGE_MAX_WR_LEN; 3174 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) : 3175 (sgl_len(snents) * 8); 3176 transhdr_len += temp; 3177 transhdr_len = roundup(transhdr_len, 16); 3178 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 3179 transhdr_len, reqctx->op)) { 3180 3181 atomic_inc(&adap->chcr_stats.fallback); 3182 chcr_aead_common_exit(req); 3183 return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); 3184 } 3185 skb = alloc_skb(transhdr_len, flags); 3186 if (!skb) { 3187 error = -ENOMEM; 3188 goto err; 3189 } 3190 3191 chcr_req = __skb_put_zero(skb, transhdr_len); 3192 3193 //Offset of tag from end 3194 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; 3195 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( 3196 rx_channel_id, 2, 1); 3197 chcr_req->sec_cpl.pldlen = 3198 htonl(req->assoclen + IV + req->cryptlen); 3199 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 3200 assoclen ? 1 + IV : 0, 3201 assoclen ? IV + assoclen : 0, 3202 req->assoclen + IV + 1, 0); 3203 chcr_req->sec_cpl.cipherstop_lo_authinsert = 3204 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1, 3205 temp, temp); 3206 chcr_req->sec_cpl.seqno_numivs = 3207 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == 3208 CHCR_ENCRYPT_OP) ? 1 : 0, 3209 CHCR_SCMD_CIPHER_MODE_AES_GCM, 3210 CHCR_SCMD_AUTH_MODE_GHASH, 3211 aeadctx->hmac_ctrl, IV >> 1); 3212 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 3213 0, 0, dst_size); 3214 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 3215 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); 3216 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), 3217 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); 3218 3219 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 3220 ivptr = (u8 *)(phys_cpl + 1) + dst_size; 3221 /* prepare a 16 byte iv */ 3222 /* S A L T | IV | 0x00000001 */ 3223 if (get_aead_subtype(tfm) == 3224 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { 3225 memcpy(ivptr, aeadctx->salt, 4); 3226 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE); 3227 } else { 3228 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE); 3229 } 3230 put_unaligned_be32(0x01, &ivptr[12]); 3231 ulptx = (struct ulptx_sgl *)(ivptr + 16); 3232 3233 chcr_add_aead_dst_ent(req, phys_cpl, qid); 3234 chcr_add_aead_src_ent(req, ulptx); 3235 atomic_inc(&adap->chcr_stats.aead_rqst); 3236 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + 3237 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); 3238 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, 3239 transhdr_len, temp, reqctx->verify); 3240 reqctx->skb = skb; 3241 return skb; 3242 3243 err: 3244 chcr_aead_common_exit(req); 3245 return ERR_PTR(error); 3246 } 3247 3248 3249 3250 static int chcr_aead_cra_init(struct crypto_aead *tfm) 3251 { 3252 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3253 struct aead_alg *alg = crypto_aead_alg(tfm); 3254 3255 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, 3256 CRYPTO_ALG_NEED_FALLBACK | 3257 CRYPTO_ALG_ASYNC); 3258 if (IS_ERR(aeadctx->sw_cipher)) 3259 return PTR_ERR(aeadctx->sw_cipher); 3260 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), 3261 sizeof(struct aead_request) + 3262 crypto_aead_reqsize(aeadctx->sw_cipher))); 3263 return chcr_device_init(a_ctx(tfm)); 3264 } 3265 3266 static void chcr_aead_cra_exit(struct crypto_aead *tfm) 3267 { 3268 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3269 3270 crypto_free_aead(aeadctx->sw_cipher); 3271 } 3272 3273 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, 3274 unsigned int authsize) 3275 { 3276 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3277 3278 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; 3279 aeadctx->mayverify = VERIFY_HW; 3280 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); 3281 } 3282 static int chcr_authenc_setauthsize(struct crypto_aead *tfm, 3283 unsigned int authsize) 3284 { 3285 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3286 u32 maxauth = crypto_aead_maxauthsize(tfm); 3287 3288 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not 3289 * true for sha1. authsize == 12 condition should be before 3290 * authsize == (maxauth >> 1) 3291 */ 3292 if (authsize == ICV_4) { 3293 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; 3294 aeadctx->mayverify = VERIFY_HW; 3295 } else if (authsize == ICV_6) { 3296 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; 3297 aeadctx->mayverify = VERIFY_HW; 3298 } else if (authsize == ICV_10) { 3299 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; 3300 aeadctx->mayverify = VERIFY_HW; 3301 } else if (authsize == ICV_12) { 3302 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 3303 aeadctx->mayverify = VERIFY_HW; 3304 } else if (authsize == ICV_14) { 3305 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; 3306 aeadctx->mayverify = VERIFY_HW; 3307 } else if (authsize == (maxauth >> 1)) { 3308 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 3309 aeadctx->mayverify = VERIFY_HW; 3310 } else if (authsize == maxauth) { 3311 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 3312 aeadctx->mayverify = VERIFY_HW; 3313 } else { 3314 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 3315 aeadctx->mayverify = VERIFY_SW; 3316 } 3317 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); 3318 } 3319 3320 3321 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 3322 { 3323 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3324 3325 switch (authsize) { 3326 case ICV_4: 3327 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; 3328 aeadctx->mayverify = VERIFY_HW; 3329 break; 3330 case ICV_8: 3331 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 3332 aeadctx->mayverify = VERIFY_HW; 3333 break; 3334 case ICV_12: 3335 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 3336 aeadctx->mayverify = VERIFY_HW; 3337 break; 3338 case ICV_14: 3339 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; 3340 aeadctx->mayverify = VERIFY_HW; 3341 break; 3342 case ICV_16: 3343 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 3344 aeadctx->mayverify = VERIFY_HW; 3345 break; 3346 case ICV_13: 3347 case ICV_15: 3348 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 3349 aeadctx->mayverify = VERIFY_SW; 3350 break; 3351 default: 3352 return -EINVAL; 3353 } 3354 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); 3355 } 3356 3357 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, 3358 unsigned int authsize) 3359 { 3360 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3361 3362 switch (authsize) { 3363 case ICV_8: 3364 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 3365 aeadctx->mayverify = VERIFY_HW; 3366 break; 3367 case ICV_12: 3368 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 3369 aeadctx->mayverify = VERIFY_HW; 3370 break; 3371 case ICV_16: 3372 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 3373 aeadctx->mayverify = VERIFY_HW; 3374 break; 3375 default: 3376 return -EINVAL; 3377 } 3378 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); 3379 } 3380 3381 static int chcr_ccm_setauthsize(struct crypto_aead *tfm, 3382 unsigned int authsize) 3383 { 3384 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3385 3386 switch (authsize) { 3387 case ICV_4: 3388 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; 3389 aeadctx->mayverify = VERIFY_HW; 3390 break; 3391 case ICV_6: 3392 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; 3393 aeadctx->mayverify = VERIFY_HW; 3394 break; 3395 case ICV_8: 3396 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 3397 aeadctx->mayverify = VERIFY_HW; 3398 break; 3399 case ICV_10: 3400 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; 3401 aeadctx->mayverify = VERIFY_HW; 3402 break; 3403 case ICV_12: 3404 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 3405 aeadctx->mayverify = VERIFY_HW; 3406 break; 3407 case ICV_14: 3408 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; 3409 aeadctx->mayverify = VERIFY_HW; 3410 break; 3411 case ICV_16: 3412 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 3413 aeadctx->mayverify = VERIFY_HW; 3414 break; 3415 default: 3416 return -EINVAL; 3417 } 3418 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); 3419 } 3420 3421 static int chcr_ccm_common_setkey(struct crypto_aead *aead, 3422 const u8 *key, 3423 unsigned int keylen) 3424 { 3425 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3426 unsigned char ck_size, mk_size; 3427 int key_ctx_size = 0; 3428 3429 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2; 3430 if (keylen == AES_KEYSIZE_128) { 3431 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 3432 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; 3433 } else if (keylen == AES_KEYSIZE_192) { 3434 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 3435 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; 3436 } else if (keylen == AES_KEYSIZE_256) { 3437 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 3438 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; 3439 } else { 3440 aeadctx->enckey_len = 0; 3441 return -EINVAL; 3442 } 3443 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, 3444 key_ctx_size >> 4); 3445 memcpy(aeadctx->key, key, keylen); 3446 aeadctx->enckey_len = keylen; 3447 3448 return 0; 3449 } 3450 3451 static int chcr_aead_ccm_setkey(struct crypto_aead *aead, 3452 const u8 *key, 3453 unsigned int keylen) 3454 { 3455 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3456 int error; 3457 3458 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 3459 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & 3460 CRYPTO_TFM_REQ_MASK); 3461 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); 3462 if (error) 3463 return error; 3464 return chcr_ccm_common_setkey(aead, key, keylen); 3465 } 3466 3467 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, 3468 unsigned int keylen) 3469 { 3470 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3471 int error; 3472 3473 if (keylen < 3) { 3474 aeadctx->enckey_len = 0; 3475 return -EINVAL; 3476 } 3477 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 3478 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & 3479 CRYPTO_TFM_REQ_MASK); 3480 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); 3481 if (error) 3482 return error; 3483 keylen -= 3; 3484 memcpy(aeadctx->salt, key + keylen, 3); 3485 return chcr_ccm_common_setkey(aead, key, keylen); 3486 } 3487 3488 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, 3489 unsigned int keylen) 3490 { 3491 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3492 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); 3493 unsigned int ck_size; 3494 int ret = 0, key_ctx_size = 0; 3495 struct crypto_aes_ctx aes; 3496 3497 aeadctx->enckey_len = 0; 3498 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 3499 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) 3500 & CRYPTO_TFM_REQ_MASK); 3501 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); 3502 if (ret) 3503 goto out; 3504 3505 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && 3506 keylen > 3) { 3507 keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 3508 memcpy(aeadctx->salt, key + keylen, 4); 3509 } 3510 if (keylen == AES_KEYSIZE_128) { 3511 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 3512 } else if (keylen == AES_KEYSIZE_192) { 3513 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 3514 } else if (keylen == AES_KEYSIZE_256) { 3515 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 3516 } else { 3517 pr_err("GCM: Invalid key length %d\n", keylen); 3518 ret = -EINVAL; 3519 goto out; 3520 } 3521 3522 memcpy(aeadctx->key, key, keylen); 3523 aeadctx->enckey_len = keylen; 3524 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + 3525 AEAD_H_SIZE; 3526 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, 3527 CHCR_KEYCTX_MAC_KEY_SIZE_128, 3528 0, 0, 3529 key_ctx_size >> 4); 3530 /* Calculate the H = CIPH(K, 0 repeated 16 times). 3531 * It will go in key context 3532 */ 3533 ret = aes_expandkey(&aes, key, keylen); 3534 if (ret) { 3535 aeadctx->enckey_len = 0; 3536 goto out; 3537 } 3538 memset(gctx->ghash_h, 0, AEAD_H_SIZE); 3539 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h); 3540 memzero_explicit(&aes, sizeof(aes)); 3541 3542 out: 3543 return ret; 3544 } 3545 3546 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 3547 unsigned int keylen) 3548 { 3549 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); 3550 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 3551 /* it contains auth and cipher key both*/ 3552 struct crypto_authenc_keys keys; 3553 unsigned int bs, subtype; 3554 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; 3555 int err = 0, i, key_ctx_len = 0; 3556 unsigned char ck_size = 0; 3557 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; 3558 struct crypto_shash *base_hash = ERR_PTR(-EINVAL); 3559 struct algo_param param; 3560 int align; 3561 u8 *o_ptr = NULL; 3562 3563 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 3564 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) 3565 & CRYPTO_TFM_REQ_MASK); 3566 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); 3567 if (err) 3568 goto out; 3569 3570 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 3571 goto out; 3572 3573 if (get_alg_config(¶m, max_authsize)) { 3574 pr_err("chcr : Unsupported digest size\n"); 3575 goto out; 3576 } 3577 subtype = get_aead_subtype(authenc); 3578 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 3579 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 3580 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) 3581 goto out; 3582 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen 3583 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); 3584 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; 3585 } 3586 if (keys.enckeylen == AES_KEYSIZE_128) { 3587 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 3588 } else if (keys.enckeylen == AES_KEYSIZE_192) { 3589 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 3590 } else if (keys.enckeylen == AES_KEYSIZE_256) { 3591 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 3592 } else { 3593 pr_err("chcr : Unsupported cipher key\n"); 3594 goto out; 3595 } 3596 3597 /* Copy only encryption key. We use authkey to generate h(ipad) and 3598 * h(opad) so authkey is not needed again. authkeylen size have the 3599 * size of the hash digest size. 3600 */ 3601 memcpy(aeadctx->key, keys.enckey, keys.enckeylen); 3602 aeadctx->enckey_len = keys.enckeylen; 3603 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || 3604 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { 3605 3606 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, 3607 aeadctx->enckey_len << 3); 3608 } 3609 base_hash = chcr_alloc_shash(max_authsize); 3610 if (IS_ERR(base_hash)) { 3611 pr_err("chcr : Base driver cannot be loaded\n"); 3612 aeadctx->enckey_len = 0; 3613 memzero_explicit(&keys, sizeof(keys)); 3614 return -EINVAL; 3615 } 3616 { 3617 SHASH_DESC_ON_STACK(shash, base_hash); 3618 3619 shash->tfm = base_hash; 3620 bs = crypto_shash_blocksize(base_hash); 3621 align = KEYCTX_ALIGN_PAD(max_authsize); 3622 o_ptr = actx->h_iopad + param.result_size + align; 3623 3624 if (keys.authkeylen > bs) { 3625 err = crypto_shash_digest(shash, keys.authkey, 3626 keys.authkeylen, 3627 o_ptr); 3628 if (err) { 3629 pr_err("chcr : Base driver cannot be loaded\n"); 3630 goto out; 3631 } 3632 keys.authkeylen = max_authsize; 3633 } else 3634 memcpy(o_ptr, keys.authkey, keys.authkeylen); 3635 3636 /* Compute the ipad-digest*/ 3637 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); 3638 memcpy(pad, o_ptr, keys.authkeylen); 3639 for (i = 0; i < bs >> 2; i++) 3640 *((unsigned int *)pad + i) ^= IPAD_DATA; 3641 3642 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad, 3643 max_authsize)) 3644 goto out; 3645 /* Compute the opad-digest */ 3646 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); 3647 memcpy(pad, o_ptr, keys.authkeylen); 3648 for (i = 0; i < bs >> 2; i++) 3649 *((unsigned int *)pad + i) ^= OPAD_DATA; 3650 3651 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize)) 3652 goto out; 3653 3654 /* convert the ipad and opad digest to network order */ 3655 chcr_change_order(actx->h_iopad, param.result_size); 3656 chcr_change_order(o_ptr, param.result_size); 3657 key_ctx_len = sizeof(struct _key_ctx) + 3658 roundup(keys.enckeylen, 16) + 3659 (param.result_size + align) * 2; 3660 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 3661 0, 1, key_ctx_len >> 4); 3662 actx->auth_mode = param.auth_mode; 3663 chcr_free_shash(base_hash); 3664 3665 memzero_explicit(&keys, sizeof(keys)); 3666 return 0; 3667 } 3668 out: 3669 aeadctx->enckey_len = 0; 3670 memzero_explicit(&keys, sizeof(keys)); 3671 if (!IS_ERR(base_hash)) 3672 chcr_free_shash(base_hash); 3673 return -EINVAL; 3674 } 3675 3676 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, 3677 const u8 *key, unsigned int keylen) 3678 { 3679 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); 3680 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 3681 struct crypto_authenc_keys keys; 3682 int err; 3683 /* it contains auth and cipher key both*/ 3684 unsigned int subtype; 3685 int key_ctx_len = 0; 3686 unsigned char ck_size = 0; 3687 3688 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 3689 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) 3690 & CRYPTO_TFM_REQ_MASK); 3691 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); 3692 if (err) 3693 goto out; 3694 3695 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 3696 goto out; 3697 3698 subtype = get_aead_subtype(authenc); 3699 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 3700 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 3701 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) 3702 goto out; 3703 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen 3704 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); 3705 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; 3706 } 3707 if (keys.enckeylen == AES_KEYSIZE_128) { 3708 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 3709 } else if (keys.enckeylen == AES_KEYSIZE_192) { 3710 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 3711 } else if (keys.enckeylen == AES_KEYSIZE_256) { 3712 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 3713 } else { 3714 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); 3715 goto out; 3716 } 3717 memcpy(aeadctx->key, keys.enckey, keys.enckeylen); 3718 aeadctx->enckey_len = keys.enckeylen; 3719 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || 3720 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { 3721 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, 3722 aeadctx->enckey_len << 3); 3723 } 3724 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16); 3725 3726 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, 3727 0, key_ctx_len >> 4); 3728 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; 3729 memzero_explicit(&keys, sizeof(keys)); 3730 return 0; 3731 out: 3732 aeadctx->enckey_len = 0; 3733 memzero_explicit(&keys, sizeof(keys)); 3734 return -EINVAL; 3735 } 3736 3737 static int chcr_aead_op(struct aead_request *req, 3738 int size, 3739 create_wr_t create_wr_fn) 3740 { 3741 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3742 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3743 struct chcr_context *ctx = a_ctx(tfm); 3744 struct uld_ctx *u_ctx = ULD_CTX(ctx); 3745 struct sk_buff *skb; 3746 struct chcr_dev *cdev; 3747 3748 cdev = a_ctx(tfm)->dev; 3749 if (!cdev) { 3750 pr_err("chcr : %s : No crypto device.\n", __func__); 3751 return -ENXIO; 3752 } 3753 3754 if (chcr_inc_wrcount(cdev)) { 3755 /* Detach state for CHCR means lldi or padap is freed. 3756 * We cannot increment fallback here. 3757 */ 3758 return chcr_aead_fallback(req, reqctx->op); 3759 } 3760 3761 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 3762 reqctx->txqidx) && 3763 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) { 3764 chcr_dec_wrcount(cdev); 3765 return -ENOSPC; 3766 } 3767 3768 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && 3769 crypto_ipsec_check_assoclen(req->assoclen) != 0) { 3770 pr_err("RFC4106: Invalid value of assoclen %d\n", 3771 req->assoclen); 3772 return -EINVAL; 3773 } 3774 3775 /* Form a WR from req */ 3776 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size); 3777 3778 if (IS_ERR_OR_NULL(skb)) { 3779 chcr_dec_wrcount(cdev); 3780 return PTR_ERR_OR_ZERO(skb); 3781 } 3782 3783 skb->dev = u_ctx->lldi.ports[0]; 3784 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); 3785 chcr_send_wr(skb); 3786 return -EINPROGRESS; 3787 } 3788 3789 static int chcr_aead_encrypt(struct aead_request *req) 3790 { 3791 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3792 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3793 struct chcr_context *ctx = a_ctx(tfm); 3794 unsigned int cpu; 3795 3796 cpu = get_cpu(); 3797 reqctx->txqidx = cpu % ctx->ntxq; 3798 reqctx->rxqidx = cpu % ctx->nrxq; 3799 put_cpu(); 3800 3801 reqctx->verify = VERIFY_HW; 3802 reqctx->op = CHCR_ENCRYPT_OP; 3803 3804 switch (get_aead_subtype(tfm)) { 3805 case CRYPTO_ALG_SUB_TYPE_CTR_SHA: 3806 case CRYPTO_ALG_SUB_TYPE_CBC_SHA: 3807 case CRYPTO_ALG_SUB_TYPE_CBC_NULL: 3808 case CRYPTO_ALG_SUB_TYPE_CTR_NULL: 3809 return chcr_aead_op(req, 0, create_authenc_wr); 3810 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3811 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3812 return chcr_aead_op(req, 0, create_aead_ccm_wr); 3813 default: 3814 return chcr_aead_op(req, 0, create_gcm_wr); 3815 } 3816 } 3817 3818 static int chcr_aead_decrypt(struct aead_request *req) 3819 { 3820 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3821 struct chcr_context *ctx = a_ctx(tfm); 3822 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 3823 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3824 int size; 3825 unsigned int cpu; 3826 3827 cpu = get_cpu(); 3828 reqctx->txqidx = cpu % ctx->ntxq; 3829 reqctx->rxqidx = cpu % ctx->nrxq; 3830 put_cpu(); 3831 3832 if (aeadctx->mayverify == VERIFY_SW) { 3833 size = crypto_aead_maxauthsize(tfm); 3834 reqctx->verify = VERIFY_SW; 3835 } else { 3836 size = 0; 3837 reqctx->verify = VERIFY_HW; 3838 } 3839 reqctx->op = CHCR_DECRYPT_OP; 3840 switch (get_aead_subtype(tfm)) { 3841 case CRYPTO_ALG_SUB_TYPE_CBC_SHA: 3842 case CRYPTO_ALG_SUB_TYPE_CTR_SHA: 3843 case CRYPTO_ALG_SUB_TYPE_CBC_NULL: 3844 case CRYPTO_ALG_SUB_TYPE_CTR_NULL: 3845 return chcr_aead_op(req, size, create_authenc_wr); 3846 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3847 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3848 return chcr_aead_op(req, size, create_aead_ccm_wr); 3849 default: 3850 return chcr_aead_op(req, size, create_gcm_wr); 3851 } 3852 } 3853 3854 static struct chcr_alg_template driver_algs[] = { 3855 /* AES-CBC */ 3856 { 3857 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, 3858 .is_registered = 0, 3859 .alg.skcipher = { 3860 .base.cra_name = "cbc(aes)", 3861 .base.cra_driver_name = "cbc-aes-chcr", 3862 .base.cra_blocksize = AES_BLOCK_SIZE, 3863 3864 .init = chcr_init_tfm, 3865 .exit = chcr_exit_tfm, 3866 .min_keysize = AES_MIN_KEY_SIZE, 3867 .max_keysize = AES_MAX_KEY_SIZE, 3868 .ivsize = AES_BLOCK_SIZE, 3869 .setkey = chcr_aes_cbc_setkey, 3870 .encrypt = chcr_aes_encrypt, 3871 .decrypt = chcr_aes_decrypt, 3872 } 3873 }, 3874 { 3875 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, 3876 .is_registered = 0, 3877 .alg.skcipher = { 3878 .base.cra_name = "xts(aes)", 3879 .base.cra_driver_name = "xts-aes-chcr", 3880 .base.cra_blocksize = AES_BLOCK_SIZE, 3881 3882 .init = chcr_init_tfm, 3883 .exit = chcr_exit_tfm, 3884 .min_keysize = 2 * AES_MIN_KEY_SIZE, 3885 .max_keysize = 2 * AES_MAX_KEY_SIZE, 3886 .ivsize = AES_BLOCK_SIZE, 3887 .setkey = chcr_aes_xts_setkey, 3888 .encrypt = chcr_aes_encrypt, 3889 .decrypt = chcr_aes_decrypt, 3890 } 3891 }, 3892 { 3893 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, 3894 .is_registered = 0, 3895 .alg.skcipher = { 3896 .base.cra_name = "ctr(aes)", 3897 .base.cra_driver_name = "ctr-aes-chcr", 3898 .base.cra_blocksize = 1, 3899 3900 .init = chcr_init_tfm, 3901 .exit = chcr_exit_tfm, 3902 .min_keysize = AES_MIN_KEY_SIZE, 3903 .max_keysize = AES_MAX_KEY_SIZE, 3904 .ivsize = AES_BLOCK_SIZE, 3905 .setkey = chcr_aes_ctr_setkey, 3906 .encrypt = chcr_aes_encrypt, 3907 .decrypt = chcr_aes_decrypt, 3908 } 3909 }, 3910 { 3911 .type = CRYPTO_ALG_TYPE_SKCIPHER | 3912 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, 3913 .is_registered = 0, 3914 .alg.skcipher = { 3915 .base.cra_name = "rfc3686(ctr(aes))", 3916 .base.cra_driver_name = "rfc3686-ctr-aes-chcr", 3917 .base.cra_blocksize = 1, 3918 3919 .init = chcr_rfc3686_init, 3920 .exit = chcr_exit_tfm, 3921 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 3922 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 3923 .ivsize = CTR_RFC3686_IV_SIZE, 3924 .setkey = chcr_aes_rfc3686_setkey, 3925 .encrypt = chcr_aes_encrypt, 3926 .decrypt = chcr_aes_decrypt, 3927 } 3928 }, 3929 /* SHA */ 3930 { 3931 .type = CRYPTO_ALG_TYPE_AHASH, 3932 .is_registered = 0, 3933 .alg.hash = { 3934 .halg.digestsize = SHA1_DIGEST_SIZE, 3935 .halg.base = { 3936 .cra_name = "sha1", 3937 .cra_driver_name = "sha1-chcr", 3938 .cra_blocksize = SHA1_BLOCK_SIZE, 3939 } 3940 } 3941 }, 3942 { 3943 .type = CRYPTO_ALG_TYPE_AHASH, 3944 .is_registered = 0, 3945 .alg.hash = { 3946 .halg.digestsize = SHA256_DIGEST_SIZE, 3947 .halg.base = { 3948 .cra_name = "sha256", 3949 .cra_driver_name = "sha256-chcr", 3950 .cra_blocksize = SHA256_BLOCK_SIZE, 3951 } 3952 } 3953 }, 3954 { 3955 .type = CRYPTO_ALG_TYPE_AHASH, 3956 .is_registered = 0, 3957 .alg.hash = { 3958 .halg.digestsize = SHA224_DIGEST_SIZE, 3959 .halg.base = { 3960 .cra_name = "sha224", 3961 .cra_driver_name = "sha224-chcr", 3962 .cra_blocksize = SHA224_BLOCK_SIZE, 3963 } 3964 } 3965 }, 3966 { 3967 .type = CRYPTO_ALG_TYPE_AHASH, 3968 .is_registered = 0, 3969 .alg.hash = { 3970 .halg.digestsize = SHA384_DIGEST_SIZE, 3971 .halg.base = { 3972 .cra_name = "sha384", 3973 .cra_driver_name = "sha384-chcr", 3974 .cra_blocksize = SHA384_BLOCK_SIZE, 3975 } 3976 } 3977 }, 3978 { 3979 .type = CRYPTO_ALG_TYPE_AHASH, 3980 .is_registered = 0, 3981 .alg.hash = { 3982 .halg.digestsize = SHA512_DIGEST_SIZE, 3983 .halg.base = { 3984 .cra_name = "sha512", 3985 .cra_driver_name = "sha512-chcr", 3986 .cra_blocksize = SHA512_BLOCK_SIZE, 3987 } 3988 } 3989 }, 3990 /* HMAC */ 3991 { 3992 .type = CRYPTO_ALG_TYPE_HMAC, 3993 .is_registered = 0, 3994 .alg.hash = { 3995 .halg.digestsize = SHA1_DIGEST_SIZE, 3996 .halg.base = { 3997 .cra_name = "hmac(sha1)", 3998 .cra_driver_name = "hmac-sha1-chcr", 3999 .cra_blocksize = SHA1_BLOCK_SIZE, 4000 } 4001 } 4002 }, 4003 { 4004 .type = CRYPTO_ALG_TYPE_HMAC, 4005 .is_registered = 0, 4006 .alg.hash = { 4007 .halg.digestsize = SHA224_DIGEST_SIZE, 4008 .halg.base = { 4009 .cra_name = "hmac(sha224)", 4010 .cra_driver_name = "hmac-sha224-chcr", 4011 .cra_blocksize = SHA224_BLOCK_SIZE, 4012 } 4013 } 4014 }, 4015 { 4016 .type = CRYPTO_ALG_TYPE_HMAC, 4017 .is_registered = 0, 4018 .alg.hash = { 4019 .halg.digestsize = SHA256_DIGEST_SIZE, 4020 .halg.base = { 4021 .cra_name = "hmac(sha256)", 4022 .cra_driver_name = "hmac-sha256-chcr", 4023 .cra_blocksize = SHA256_BLOCK_SIZE, 4024 } 4025 } 4026 }, 4027 { 4028 .type = CRYPTO_ALG_TYPE_HMAC, 4029 .is_registered = 0, 4030 .alg.hash = { 4031 .halg.digestsize = SHA384_DIGEST_SIZE, 4032 .halg.base = { 4033 .cra_name = "hmac(sha384)", 4034 .cra_driver_name = "hmac-sha384-chcr", 4035 .cra_blocksize = SHA384_BLOCK_SIZE, 4036 } 4037 } 4038 }, 4039 { 4040 .type = CRYPTO_ALG_TYPE_HMAC, 4041 .is_registered = 0, 4042 .alg.hash = { 4043 .halg.digestsize = SHA512_DIGEST_SIZE, 4044 .halg.base = { 4045 .cra_name = "hmac(sha512)", 4046 .cra_driver_name = "hmac-sha512-chcr", 4047 .cra_blocksize = SHA512_BLOCK_SIZE, 4048 } 4049 } 4050 }, 4051 /* Add AEAD Algorithms */ 4052 { 4053 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM, 4054 .is_registered = 0, 4055 .alg.aead = { 4056 .base = { 4057 .cra_name = "gcm(aes)", 4058 .cra_driver_name = "gcm-aes-chcr", 4059 .cra_blocksize = 1, 4060 .cra_priority = CHCR_AEAD_PRIORITY, 4061 .cra_ctxsize = sizeof(struct chcr_context) + 4062 sizeof(struct chcr_aead_ctx) + 4063 sizeof(struct chcr_gcm_ctx), 4064 }, 4065 .ivsize = GCM_AES_IV_SIZE, 4066 .maxauthsize = GHASH_DIGEST_SIZE, 4067 .setkey = chcr_gcm_setkey, 4068 .setauthsize = chcr_gcm_setauthsize, 4069 } 4070 }, 4071 { 4072 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106, 4073 .is_registered = 0, 4074 .alg.aead = { 4075 .base = { 4076 .cra_name = "rfc4106(gcm(aes))", 4077 .cra_driver_name = "rfc4106-gcm-aes-chcr", 4078 .cra_blocksize = 1, 4079 .cra_priority = CHCR_AEAD_PRIORITY + 1, 4080 .cra_ctxsize = sizeof(struct chcr_context) + 4081 sizeof(struct chcr_aead_ctx) + 4082 sizeof(struct chcr_gcm_ctx), 4083 4084 }, 4085 .ivsize = GCM_RFC4106_IV_SIZE, 4086 .maxauthsize = GHASH_DIGEST_SIZE, 4087 .setkey = chcr_gcm_setkey, 4088 .setauthsize = chcr_4106_4309_setauthsize, 4089 } 4090 }, 4091 { 4092 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM, 4093 .is_registered = 0, 4094 .alg.aead = { 4095 .base = { 4096 .cra_name = "ccm(aes)", 4097 .cra_driver_name = "ccm-aes-chcr", 4098 .cra_blocksize = 1, 4099 .cra_priority = CHCR_AEAD_PRIORITY, 4100 .cra_ctxsize = sizeof(struct chcr_context) + 4101 sizeof(struct chcr_aead_ctx), 4102 4103 }, 4104 .ivsize = AES_BLOCK_SIZE, 4105 .maxauthsize = GHASH_DIGEST_SIZE, 4106 .setkey = chcr_aead_ccm_setkey, 4107 .setauthsize = chcr_ccm_setauthsize, 4108 } 4109 }, 4110 { 4111 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309, 4112 .is_registered = 0, 4113 .alg.aead = { 4114 .base = { 4115 .cra_name = "rfc4309(ccm(aes))", 4116 .cra_driver_name = "rfc4309-ccm-aes-chcr", 4117 .cra_blocksize = 1, 4118 .cra_priority = CHCR_AEAD_PRIORITY + 1, 4119 .cra_ctxsize = sizeof(struct chcr_context) + 4120 sizeof(struct chcr_aead_ctx), 4121 4122 }, 4123 .ivsize = 8, 4124 .maxauthsize = GHASH_DIGEST_SIZE, 4125 .setkey = chcr_aead_rfc4309_setkey, 4126 .setauthsize = chcr_4106_4309_setauthsize, 4127 } 4128 }, 4129 { 4130 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 4131 .is_registered = 0, 4132 .alg.aead = { 4133 .base = { 4134 .cra_name = "authenc(hmac(sha1),cbc(aes))", 4135 .cra_driver_name = 4136 "authenc-hmac-sha1-cbc-aes-chcr", 4137 .cra_blocksize = AES_BLOCK_SIZE, 4138 .cra_priority = CHCR_AEAD_PRIORITY, 4139 .cra_ctxsize = sizeof(struct chcr_context) + 4140 sizeof(struct chcr_aead_ctx) + 4141 sizeof(struct chcr_authenc_ctx), 4142 4143 }, 4144 .ivsize = AES_BLOCK_SIZE, 4145 .maxauthsize = SHA1_DIGEST_SIZE, 4146 .setkey = chcr_authenc_setkey, 4147 .setauthsize = chcr_authenc_setauthsize, 4148 } 4149 }, 4150 { 4151 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 4152 .is_registered = 0, 4153 .alg.aead = { 4154 .base = { 4155 4156 .cra_name = "authenc(hmac(sha256),cbc(aes))", 4157 .cra_driver_name = 4158 "authenc-hmac-sha256-cbc-aes-chcr", 4159 .cra_blocksize = AES_BLOCK_SIZE, 4160 .cra_priority = CHCR_AEAD_PRIORITY, 4161 .cra_ctxsize = sizeof(struct chcr_context) + 4162 sizeof(struct chcr_aead_ctx) + 4163 sizeof(struct chcr_authenc_ctx), 4164 4165 }, 4166 .ivsize = AES_BLOCK_SIZE, 4167 .maxauthsize = SHA256_DIGEST_SIZE, 4168 .setkey = chcr_authenc_setkey, 4169 .setauthsize = chcr_authenc_setauthsize, 4170 } 4171 }, 4172 { 4173 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 4174 .is_registered = 0, 4175 .alg.aead = { 4176 .base = { 4177 .cra_name = "authenc(hmac(sha224),cbc(aes))", 4178 .cra_driver_name = 4179 "authenc-hmac-sha224-cbc-aes-chcr", 4180 .cra_blocksize = AES_BLOCK_SIZE, 4181 .cra_priority = CHCR_AEAD_PRIORITY, 4182 .cra_ctxsize = sizeof(struct chcr_context) + 4183 sizeof(struct chcr_aead_ctx) + 4184 sizeof(struct chcr_authenc_ctx), 4185 }, 4186 .ivsize = AES_BLOCK_SIZE, 4187 .maxauthsize = SHA224_DIGEST_SIZE, 4188 .setkey = chcr_authenc_setkey, 4189 .setauthsize = chcr_authenc_setauthsize, 4190 } 4191 }, 4192 { 4193 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 4194 .is_registered = 0, 4195 .alg.aead = { 4196 .base = { 4197 .cra_name = "authenc(hmac(sha384),cbc(aes))", 4198 .cra_driver_name = 4199 "authenc-hmac-sha384-cbc-aes-chcr", 4200 .cra_blocksize = AES_BLOCK_SIZE, 4201 .cra_priority = CHCR_AEAD_PRIORITY, 4202 .cra_ctxsize = sizeof(struct chcr_context) + 4203 sizeof(struct chcr_aead_ctx) + 4204 sizeof(struct chcr_authenc_ctx), 4205 4206 }, 4207 .ivsize = AES_BLOCK_SIZE, 4208 .maxauthsize = SHA384_DIGEST_SIZE, 4209 .setkey = chcr_authenc_setkey, 4210 .setauthsize = chcr_authenc_setauthsize, 4211 } 4212 }, 4213 { 4214 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 4215 .is_registered = 0, 4216 .alg.aead = { 4217 .base = { 4218 .cra_name = "authenc(hmac(sha512),cbc(aes))", 4219 .cra_driver_name = 4220 "authenc-hmac-sha512-cbc-aes-chcr", 4221 .cra_blocksize = AES_BLOCK_SIZE, 4222 .cra_priority = CHCR_AEAD_PRIORITY, 4223 .cra_ctxsize = sizeof(struct chcr_context) + 4224 sizeof(struct chcr_aead_ctx) + 4225 sizeof(struct chcr_authenc_ctx), 4226 4227 }, 4228 .ivsize = AES_BLOCK_SIZE, 4229 .maxauthsize = SHA512_DIGEST_SIZE, 4230 .setkey = chcr_authenc_setkey, 4231 .setauthsize = chcr_authenc_setauthsize, 4232 } 4233 }, 4234 { 4235 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL, 4236 .is_registered = 0, 4237 .alg.aead = { 4238 .base = { 4239 .cra_name = "authenc(digest_null,cbc(aes))", 4240 .cra_driver_name = 4241 "authenc-digest_null-cbc-aes-chcr", 4242 .cra_blocksize = AES_BLOCK_SIZE, 4243 .cra_priority = CHCR_AEAD_PRIORITY, 4244 .cra_ctxsize = sizeof(struct chcr_context) + 4245 sizeof(struct chcr_aead_ctx) + 4246 sizeof(struct chcr_authenc_ctx), 4247 4248 }, 4249 .ivsize = AES_BLOCK_SIZE, 4250 .maxauthsize = 0, 4251 .setkey = chcr_aead_digest_null_setkey, 4252 .setauthsize = chcr_authenc_null_setauthsize, 4253 } 4254 }, 4255 { 4256 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 4257 .is_registered = 0, 4258 .alg.aead = { 4259 .base = { 4260 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", 4261 .cra_driver_name = 4262 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr", 4263 .cra_blocksize = 1, 4264 .cra_priority = CHCR_AEAD_PRIORITY, 4265 .cra_ctxsize = sizeof(struct chcr_context) + 4266 sizeof(struct chcr_aead_ctx) + 4267 sizeof(struct chcr_authenc_ctx), 4268 4269 }, 4270 .ivsize = CTR_RFC3686_IV_SIZE, 4271 .maxauthsize = SHA1_DIGEST_SIZE, 4272 .setkey = chcr_authenc_setkey, 4273 .setauthsize = chcr_authenc_setauthsize, 4274 } 4275 }, 4276 { 4277 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 4278 .is_registered = 0, 4279 .alg.aead = { 4280 .base = { 4281 4282 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", 4283 .cra_driver_name = 4284 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr", 4285 .cra_blocksize = 1, 4286 .cra_priority = CHCR_AEAD_PRIORITY, 4287 .cra_ctxsize = sizeof(struct chcr_context) + 4288 sizeof(struct chcr_aead_ctx) + 4289 sizeof(struct chcr_authenc_ctx), 4290 4291 }, 4292 .ivsize = CTR_RFC3686_IV_SIZE, 4293 .maxauthsize = SHA256_DIGEST_SIZE, 4294 .setkey = chcr_authenc_setkey, 4295 .setauthsize = chcr_authenc_setauthsize, 4296 } 4297 }, 4298 { 4299 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 4300 .is_registered = 0, 4301 .alg.aead = { 4302 .base = { 4303 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", 4304 .cra_driver_name = 4305 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr", 4306 .cra_blocksize = 1, 4307 .cra_priority = CHCR_AEAD_PRIORITY, 4308 .cra_ctxsize = sizeof(struct chcr_context) + 4309 sizeof(struct chcr_aead_ctx) + 4310 sizeof(struct chcr_authenc_ctx), 4311 }, 4312 .ivsize = CTR_RFC3686_IV_SIZE, 4313 .maxauthsize = SHA224_DIGEST_SIZE, 4314 .setkey = chcr_authenc_setkey, 4315 .setauthsize = chcr_authenc_setauthsize, 4316 } 4317 }, 4318 { 4319 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 4320 .is_registered = 0, 4321 .alg.aead = { 4322 .base = { 4323 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", 4324 .cra_driver_name = 4325 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr", 4326 .cra_blocksize = 1, 4327 .cra_priority = CHCR_AEAD_PRIORITY, 4328 .cra_ctxsize = sizeof(struct chcr_context) + 4329 sizeof(struct chcr_aead_ctx) + 4330 sizeof(struct chcr_authenc_ctx), 4331 4332 }, 4333 .ivsize = CTR_RFC3686_IV_SIZE, 4334 .maxauthsize = SHA384_DIGEST_SIZE, 4335 .setkey = chcr_authenc_setkey, 4336 .setauthsize = chcr_authenc_setauthsize, 4337 } 4338 }, 4339 { 4340 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 4341 .is_registered = 0, 4342 .alg.aead = { 4343 .base = { 4344 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", 4345 .cra_driver_name = 4346 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr", 4347 .cra_blocksize = 1, 4348 .cra_priority = CHCR_AEAD_PRIORITY, 4349 .cra_ctxsize = sizeof(struct chcr_context) + 4350 sizeof(struct chcr_aead_ctx) + 4351 sizeof(struct chcr_authenc_ctx), 4352 4353 }, 4354 .ivsize = CTR_RFC3686_IV_SIZE, 4355 .maxauthsize = SHA512_DIGEST_SIZE, 4356 .setkey = chcr_authenc_setkey, 4357 .setauthsize = chcr_authenc_setauthsize, 4358 } 4359 }, 4360 { 4361 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL, 4362 .is_registered = 0, 4363 .alg.aead = { 4364 .base = { 4365 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))", 4366 .cra_driver_name = 4367 "authenc-digest_null-rfc3686-ctr-aes-chcr", 4368 .cra_blocksize = 1, 4369 .cra_priority = CHCR_AEAD_PRIORITY, 4370 .cra_ctxsize = sizeof(struct chcr_context) + 4371 sizeof(struct chcr_aead_ctx) + 4372 sizeof(struct chcr_authenc_ctx), 4373 4374 }, 4375 .ivsize = CTR_RFC3686_IV_SIZE, 4376 .maxauthsize = 0, 4377 .setkey = chcr_aead_digest_null_setkey, 4378 .setauthsize = chcr_authenc_null_setauthsize, 4379 } 4380 }, 4381 }; 4382 4383 /* 4384 * chcr_unregister_alg - Deregister crypto algorithms with 4385 * kernel framework. 4386 */ 4387 static int chcr_unregister_alg(void) 4388 { 4389 int i; 4390 4391 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 4392 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { 4393 case CRYPTO_ALG_TYPE_SKCIPHER: 4394 if (driver_algs[i].is_registered && refcount_read( 4395 &driver_algs[i].alg.skcipher.base.cra_refcnt) 4396 == 1) { 4397 crypto_unregister_skcipher( 4398 &driver_algs[i].alg.skcipher); 4399 driver_algs[i].is_registered = 0; 4400 } 4401 break; 4402 case CRYPTO_ALG_TYPE_AEAD: 4403 if (driver_algs[i].is_registered && refcount_read( 4404 &driver_algs[i].alg.aead.base.cra_refcnt) == 1) { 4405 crypto_unregister_aead( 4406 &driver_algs[i].alg.aead); 4407 driver_algs[i].is_registered = 0; 4408 } 4409 break; 4410 case CRYPTO_ALG_TYPE_AHASH: 4411 if (driver_algs[i].is_registered && refcount_read( 4412 &driver_algs[i].alg.hash.halg.base.cra_refcnt) 4413 == 1) { 4414 crypto_unregister_ahash( 4415 &driver_algs[i].alg.hash); 4416 driver_algs[i].is_registered = 0; 4417 } 4418 break; 4419 } 4420 } 4421 return 0; 4422 } 4423 4424 #define SZ_AHASH_CTX sizeof(struct chcr_context) 4425 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) 4426 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) 4427 4428 /* 4429 * chcr_register_alg - Register crypto algorithms with kernel framework. 4430 */ 4431 static int chcr_register_alg(void) 4432 { 4433 struct crypto_alg ai; 4434 struct ahash_alg *a_hash; 4435 int err = 0, i; 4436 char *name = NULL; 4437 4438 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 4439 if (driver_algs[i].is_registered) 4440 continue; 4441 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { 4442 case CRYPTO_ALG_TYPE_SKCIPHER: 4443 driver_algs[i].alg.skcipher.base.cra_priority = 4444 CHCR_CRA_PRIORITY; 4445 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; 4446 driver_algs[i].alg.skcipher.base.cra_flags = 4447 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | 4448 CRYPTO_ALG_NEED_FALLBACK; 4449 driver_algs[i].alg.skcipher.base.cra_ctxsize = 4450 sizeof(struct chcr_context) + 4451 sizeof(struct ablk_ctx); 4452 driver_algs[i].alg.skcipher.base.cra_alignmask = 0; 4453 4454 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher); 4455 name = driver_algs[i].alg.skcipher.base.cra_driver_name; 4456 break; 4457 case CRYPTO_ALG_TYPE_AEAD: 4458 driver_algs[i].alg.aead.base.cra_flags = 4459 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; 4460 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; 4461 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; 4462 driver_algs[i].alg.aead.init = chcr_aead_cra_init; 4463 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit; 4464 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE; 4465 err = crypto_register_aead(&driver_algs[i].alg.aead); 4466 name = driver_algs[i].alg.aead.base.cra_driver_name; 4467 break; 4468 case CRYPTO_ALG_TYPE_AHASH: 4469 a_hash = &driver_algs[i].alg.hash; 4470 a_hash->update = chcr_ahash_update; 4471 a_hash->final = chcr_ahash_final; 4472 a_hash->finup = chcr_ahash_finup; 4473 a_hash->digest = chcr_ahash_digest; 4474 a_hash->export = chcr_ahash_export; 4475 a_hash->import = chcr_ahash_import; 4476 a_hash->halg.statesize = SZ_AHASH_REQ_CTX; 4477 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; 4478 a_hash->halg.base.cra_module = THIS_MODULE; 4479 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC; 4480 a_hash->halg.base.cra_alignmask = 0; 4481 a_hash->halg.base.cra_exit = NULL; 4482 4483 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { 4484 a_hash->halg.base.cra_init = chcr_hmac_cra_init; 4485 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; 4486 a_hash->init = chcr_hmac_init; 4487 a_hash->setkey = chcr_ahash_setkey; 4488 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; 4489 } else { 4490 a_hash->init = chcr_sha_init; 4491 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; 4492 a_hash->halg.base.cra_init = chcr_sha_cra_init; 4493 } 4494 err = crypto_register_ahash(&driver_algs[i].alg.hash); 4495 ai = driver_algs[i].alg.hash.halg.base; 4496 name = ai.cra_driver_name; 4497 break; 4498 } 4499 if (err) { 4500 pr_err("chcr : %s : Algorithm registration failed\n", 4501 name); 4502 goto register_err; 4503 } else { 4504 driver_algs[i].is_registered = 1; 4505 } 4506 } 4507 return 0; 4508 4509 register_err: 4510 chcr_unregister_alg(); 4511 return err; 4512 } 4513 4514 /* 4515 * start_crypto - Register the crypto algorithms. 4516 * This should called once when the first device comesup. After this 4517 * kernel will start calling driver APIs for crypto operations. 4518 */ 4519 int start_crypto(void) 4520 { 4521 return chcr_register_alg(); 4522 } 4523 4524 /* 4525 * stop_crypto - Deregister all the crypto algorithms with kernel. 4526 * This should be called once when the last device goes down. After this 4527 * kernel will not call the driver API for crypto operations. 4528 */ 4529 int stop_crypto(void) 4530 { 4531 chcr_unregister_alg(); 4532 return 0; 4533 } 4534