1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67 
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73 
74 #define IV AES_BLOCK_SIZE
75 
76 static unsigned int sgl_ent_len[] = {
77 	0, 0, 16, 24, 40, 48, 64, 72, 88,
78 	96, 112, 120, 136, 144, 160, 168, 184,
79 	192, 208, 216, 232, 240, 256, 264, 280,
80 	288, 304, 312, 328, 336, 352, 360, 376
81 };
82 
83 static unsigned int dsgl_ent_len[] = {
84 	0, 32, 32, 48, 48, 64, 64, 80, 80,
85 	112, 112, 128, 128, 144, 144, 160, 160,
86 	192, 192, 208, 208, 224, 224, 240, 240,
87 	272, 272, 288, 288, 304, 304, 320, 320
88 };
89 
90 static u32 round_constant[11] = {
91 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 	0x1B000000, 0x36000000, 0x6C000000
94 };
95 
96 static int chcr_handle_cipher_resp(struct skcipher_request *req,
97 				   unsigned char *input, int err);
98 
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 	return ctx->crypto_ctx->aeadctx;
102 }
103 
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 	return ctx->crypto_ctx->ablkctx;
107 }
108 
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 	return ctx->crypto_ctx->hmacctx;
112 }
113 
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 	return gctx->ctx->gcm;
117 }
118 
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 	return gctx->ctx->authenc;
122 }
123 
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 	return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128 
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131 	return (skb->len <= SGE_MAX_WR_LEN);
132 }
133 
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138 
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 			 unsigned int entlen,
141 			 unsigned int skip)
142 {
143 	int nents = 0;
144 	unsigned int less;
145 	unsigned int skip_len = 0;
146 
147 	while (sg && skip) {
148 		if (sg_dma_len(sg) <= skip) {
149 			skip -= sg_dma_len(sg);
150 			skip_len = 0;
151 			sg = sg_next(sg);
152 		} else {
153 			skip_len = skip;
154 			skip = 0;
155 		}
156 	}
157 
158 	while (sg && reqlen) {
159 		less = min(reqlen, sg_dma_len(sg) - skip_len);
160 		nents += DIV_ROUND_UP(less, entlen);
161 		reqlen -= less;
162 		skip_len = 0;
163 		sg = sg_next(sg);
164 	}
165 	return nents;
166 }
167 
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170 	struct aead_alg *alg = crypto_aead_alg(aead);
171 	struct chcr_alg_template *chcr_crypto_alg =
172 		container_of(alg, struct chcr_alg_template, alg.aead);
173 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175 
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178 	u8 temp[SHA512_DIGEST_SIZE];
179 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 	int authsize = crypto_aead_authsize(tfm);
181 	struct cpl_fw6_pld *fw6_pld;
182 	int cmp = 0;
183 
184 	fw6_pld = (struct cpl_fw6_pld *)input;
185 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 	} else {
189 
190 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 				authsize, req->assoclen +
192 				req->cryptlen - authsize);
193 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 	}
195 	if (cmp)
196 		*err = -EBADMSG;
197 	else
198 		*err = 0;
199 }
200 
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203 	if (dev->state == CHCR_DETACH)
204 		return 1;
205 	atomic_inc(&dev->inflight);
206 	return 0;
207 }
208 
209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
210 {
211 	atomic_dec(&dev->inflight);
212 }
213 
214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215 					 unsigned char *input,
216 					 int err)
217 {
218 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 	struct chcr_dev *dev = a_ctx(tfm)->dev;
221 
222 	chcr_aead_common_exit(req);
223 	if (reqctx->verify == VERIFY_SW) {
224 		chcr_verify_tag(req, input, &err);
225 		reqctx->verify = VERIFY_HW;
226 	}
227 	chcr_dec_wrcount(dev);
228 	req->base.complete(&req->base, err);
229 
230 	return err;
231 }
232 
233 static void get_aes_decrypt_key(unsigned char *dec_key,
234 				       const unsigned char *key,
235 				       unsigned int keylength)
236 {
237 	u32 temp;
238 	u32 w_ring[MAX_NK];
239 	int i, j, k;
240 	u8  nr, nk;
241 
242 	switch (keylength) {
243 	case AES_KEYLENGTH_128BIT:
244 		nk = KEYLENGTH_4BYTES;
245 		nr = NUMBER_OF_ROUNDS_10;
246 		break;
247 	case AES_KEYLENGTH_192BIT:
248 		nk = KEYLENGTH_6BYTES;
249 		nr = NUMBER_OF_ROUNDS_12;
250 		break;
251 	case AES_KEYLENGTH_256BIT:
252 		nk = KEYLENGTH_8BYTES;
253 		nr = NUMBER_OF_ROUNDS_14;
254 		break;
255 	default:
256 		return;
257 	}
258 	for (i = 0; i < nk; i++)
259 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
260 
261 	i = 0;
262 	temp = w_ring[nk - 1];
263 	while (i + nk < (nr + 1) * 4) {
264 		if (!(i % nk)) {
265 			/* RotWord(temp) */
266 			temp = (temp << 8) | (temp >> 24);
267 			temp = aes_ks_subword(temp);
268 			temp ^= round_constant[i / nk];
269 		} else if (nk == 8 && (i % 4 == 0)) {
270 			temp = aes_ks_subword(temp);
271 		}
272 		w_ring[i % nk] ^= temp;
273 		temp = w_ring[i % nk];
274 		i++;
275 	}
276 	i--;
277 	for (k = 0, j = i % nk; k < nk; k++) {
278 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
279 		j--;
280 		if (j < 0)
281 			j += nk;
282 	}
283 }
284 
285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
286 {
287 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
288 
289 	switch (ds) {
290 	case SHA1_DIGEST_SIZE:
291 		base_hash = crypto_alloc_shash("sha1", 0, 0);
292 		break;
293 	case SHA224_DIGEST_SIZE:
294 		base_hash = crypto_alloc_shash("sha224", 0, 0);
295 		break;
296 	case SHA256_DIGEST_SIZE:
297 		base_hash = crypto_alloc_shash("sha256", 0, 0);
298 		break;
299 	case SHA384_DIGEST_SIZE:
300 		base_hash = crypto_alloc_shash("sha384", 0, 0);
301 		break;
302 	case SHA512_DIGEST_SIZE:
303 		base_hash = crypto_alloc_shash("sha512", 0, 0);
304 		break;
305 	}
306 
307 	return base_hash;
308 }
309 
310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311 				     char *iopad, char *result_hash,
312 				     int digest_size)
313 {
314 	struct sha1_state sha1_st;
315 	struct sha256_state sha256_st;
316 	struct sha512_state sha512_st;
317 	int error;
318 
319 	if (digest_size == SHA1_DIGEST_SIZE) {
320 		error = crypto_shash_init(desc) ?:
321 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322 			crypto_shash_export(desc, (void *)&sha1_st);
323 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324 	} else if (digest_size == SHA224_DIGEST_SIZE) {
325 		error = crypto_shash_init(desc) ?:
326 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327 			crypto_shash_export(desc, (void *)&sha256_st);
328 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
329 
330 	} else if (digest_size == SHA256_DIGEST_SIZE) {
331 		error = crypto_shash_init(desc) ?:
332 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333 			crypto_shash_export(desc, (void *)&sha256_st);
334 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
335 
336 	} else if (digest_size == SHA384_DIGEST_SIZE) {
337 		error = crypto_shash_init(desc) ?:
338 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339 			crypto_shash_export(desc, (void *)&sha512_st);
340 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
341 
342 	} else if (digest_size == SHA512_DIGEST_SIZE) {
343 		error = crypto_shash_init(desc) ?:
344 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345 			crypto_shash_export(desc, (void *)&sha512_st);
346 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
347 	} else {
348 		error = -EINVAL;
349 		pr_err("Unknown digest size %d\n", digest_size);
350 	}
351 	return error;
352 }
353 
354 static void chcr_change_order(char *buf, int ds)
355 {
356 	int i;
357 
358 	if (ds == SHA512_DIGEST_SIZE) {
359 		for (i = 0; i < (ds / sizeof(u64)); i++)
360 			*((__be64 *)buf + i) =
361 				cpu_to_be64(*((u64 *)buf + i));
362 	} else {
363 		for (i = 0; i < (ds / sizeof(u32)); i++)
364 			*((__be32 *)buf + i) =
365 				cpu_to_be32(*((u32 *)buf + i));
366 	}
367 }
368 
369 static inline int is_hmac(struct crypto_tfm *tfm)
370 {
371 	struct crypto_alg *alg = tfm->__crt_alg;
372 	struct chcr_alg_template *chcr_crypto_alg =
373 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
374 			     alg.hash);
375 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
376 		return 1;
377 	return 0;
378 }
379 
380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381 				   struct cpl_rx_phys_dsgl *dsgl)
382 {
383 	walk->dsgl = dsgl;
384 	walk->nents = 0;
385 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
386 }
387 
388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
389 				 int pci_chan_id)
390 {
391 	struct cpl_rx_phys_dsgl *phys_cpl;
392 
393 	phys_cpl = walk->dsgl;
394 
395 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 	phys_cpl->pcirlxorder_to_noofsgentr =
398 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405 	phys_cpl->rss_hdr_int.qid = htons(qid);
406 	phys_cpl->rss_hdr_int.hash_val = 0;
407 	phys_cpl->rss_hdr_int.channel = pci_chan_id;
408 }
409 
410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
411 					size_t size,
412 					dma_addr_t addr)
413 {
414 	int j;
415 
416 	if (!size)
417 		return;
418 	j = walk->nents;
419 	walk->to->len[j % 8] = htons(size);
420 	walk->to->addr[j % 8] = cpu_to_be64(addr);
421 	j++;
422 	if ((j % 8) == 0)
423 		walk->to++;
424 	walk->nents = j;
425 }
426 
427 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
428 			   struct scatterlist *sg,
429 			      unsigned int slen,
430 			      unsigned int skip)
431 {
432 	int skip_len = 0;
433 	unsigned int left_size = slen, len = 0;
434 	unsigned int j = walk->nents;
435 	int offset, ent_len;
436 
437 	if (!slen)
438 		return;
439 	while (sg && skip) {
440 		if (sg_dma_len(sg) <= skip) {
441 			skip -= sg_dma_len(sg);
442 			skip_len = 0;
443 			sg = sg_next(sg);
444 		} else {
445 			skip_len = skip;
446 			skip = 0;
447 		}
448 	}
449 
450 	while (left_size && sg) {
451 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
452 		offset = 0;
453 		while (len) {
454 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
455 			walk->to->len[j % 8] = htons(ent_len);
456 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
457 						      offset + skip_len);
458 			offset += ent_len;
459 			len -= ent_len;
460 			j++;
461 			if ((j % 8) == 0)
462 				walk->to++;
463 		}
464 		walk->last_sg = sg;
465 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466 					  skip_len) + skip_len;
467 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
468 		skip_len = 0;
469 		sg = sg_next(sg);
470 	}
471 	walk->nents = j;
472 }
473 
474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475 				   struct ulptx_sgl *ulp)
476 {
477 	walk->sgl = ulp;
478 	walk->nents = 0;
479 	walk->pair_idx = 0;
480 	walk->pair = ulp->sge;
481 	walk->last_sg = NULL;
482 	walk->last_sg_len = 0;
483 }
484 
485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
486 {
487 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488 			      ULPTX_NSGE_V(walk->nents));
489 }
490 
491 
492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
493 					size_t size,
494 					dma_addr_t addr)
495 {
496 	if (!size)
497 		return;
498 
499 	if (walk->nents == 0) {
500 		walk->sgl->len0 = cpu_to_be32(size);
501 		walk->sgl->addr0 = cpu_to_be64(addr);
502 	} else {
503 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505 		walk->pair_idx = !walk->pair_idx;
506 		if (!walk->pair_idx)
507 			walk->pair++;
508 	}
509 	walk->nents++;
510 }
511 
512 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
513 					struct scatterlist *sg,
514 			       unsigned int len,
515 			       unsigned int skip)
516 {
517 	int small;
518 	int skip_len = 0;
519 	unsigned int sgmin;
520 
521 	if (!len)
522 		return;
523 	while (sg && skip) {
524 		if (sg_dma_len(sg) <= skip) {
525 			skip -= sg_dma_len(sg);
526 			skip_len = 0;
527 			sg = sg_next(sg);
528 		} else {
529 			skip_len = skip;
530 			skip = 0;
531 		}
532 	}
533 	WARN(!sg, "SG should not be null here\n");
534 	if (sg && (walk->nents == 0)) {
535 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537 		walk->sgl->len0 = cpu_to_be32(sgmin);
538 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
539 		walk->nents++;
540 		len -= sgmin;
541 		walk->last_sg = sg;
542 		walk->last_sg_len = sgmin + skip_len;
543 		skip_len += sgmin;
544 		if (sg_dma_len(sg) == skip_len) {
545 			sg = sg_next(sg);
546 			skip_len = 0;
547 		}
548 	}
549 
550 	while (sg && len) {
551 		small = min(sg_dma_len(sg) - skip_len, len);
552 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554 		walk->pair->addr[walk->pair_idx] =
555 			cpu_to_be64(sg_dma_address(sg) + skip_len);
556 		walk->pair_idx = !walk->pair_idx;
557 		walk->nents++;
558 		if (!walk->pair_idx)
559 			walk->pair++;
560 		len -= sgmin;
561 		skip_len += sgmin;
562 		walk->last_sg = sg;
563 		walk->last_sg_len = skip_len;
564 		if (sg_dma_len(sg) == skip_len) {
565 			sg = sg_next(sg);
566 			skip_len = 0;
567 		}
568 	}
569 }
570 
571 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
572 {
573 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
574 	struct chcr_alg_template *chcr_crypto_alg =
575 		container_of(alg, struct chcr_alg_template, alg.skcipher);
576 
577 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
578 }
579 
580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
581 {
582 	struct adapter *adap = netdev2adap(dev);
583 	struct sge_uld_txq_info *txq_info =
584 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585 	struct sge_uld_txq *txq;
586 	int ret = 0;
587 
588 	local_bh_disable();
589 	txq = &txq_info->uldtxq[idx];
590 	spin_lock(&txq->sendq.lock);
591 	if (txq->full)
592 		ret = -1;
593 	spin_unlock(&txq->sendq.lock);
594 	local_bh_enable();
595 	return ret;
596 }
597 
598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599 			       struct _key_ctx *key_ctx)
600 {
601 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
603 	} else {
604 		memcpy(key_ctx->key,
605 		       ablkctx->key + (ablkctx->enckey_len >> 1),
606 		       ablkctx->enckey_len >> 1);
607 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
609 	}
610 	return 0;
611 }
612 
613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
614 			     unsigned int minsg,
615 			     unsigned int space,
616 			     unsigned int srcskip)
617 {
618 	int srclen = 0;
619 	int srcsg = minsg;
620 	int soffset = 0, sless;
621 
622 	if (sg_dma_len(src) == srcskip) {
623 		src = sg_next(src);
624 		srcskip = 0;
625 	}
626 	while (src && space > (sgl_ent_len[srcsg + 1])) {
627 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
628 							CHCR_SRC_SG_SIZE);
629 		srclen += sless;
630 		soffset += sless;
631 		srcsg++;
632 		if (sg_dma_len(src) == (soffset + srcskip)) {
633 			src = sg_next(src);
634 			soffset = 0;
635 			srcskip = 0;
636 		}
637 	}
638 	return srclen;
639 }
640 
641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642 			     struct scatterlist *dst,
643 			     unsigned int minsg,
644 			     unsigned int space,
645 			     unsigned int srcskip,
646 			     unsigned int dstskip)
647 {
648 	int srclen = 0, dstlen = 0;
649 	int srcsg = minsg, dstsg = minsg;
650 	int offset = 0, soffset = 0, less, sless = 0;
651 
652 	if (sg_dma_len(src) == srcskip) {
653 		src = sg_next(src);
654 		srcskip = 0;
655 	}
656 	if (sg_dma_len(dst) == dstskip) {
657 		dst = sg_next(dst);
658 		dstskip = 0;
659 	}
660 
661 	while (src && dst &&
662 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
664 				CHCR_SRC_SG_SIZE);
665 		srclen += sless;
666 		srcsg++;
667 		offset = 0;
668 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670 			if (srclen <= dstlen)
671 				break;
672 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
673 				     dstskip, CHCR_DST_SG_SIZE);
674 			dstlen += less;
675 			offset += less;
676 			if ((offset + dstskip) == sg_dma_len(dst)) {
677 				dst = sg_next(dst);
678 				offset = 0;
679 			}
680 			dstsg++;
681 			dstskip = 0;
682 		}
683 		soffset += sless;
684 		if ((soffset + srcskip) == sg_dma_len(src)) {
685 			src = sg_next(src);
686 			srcskip = 0;
687 			soffset = 0;
688 		}
689 
690 	}
691 	return min(srclen, dstlen);
692 }
693 
694 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
695 				u32 flags,
696 				struct scatterlist *src,
697 				struct scatterlist *dst,
698 				unsigned int nbytes,
699 				u8 *iv,
700 				unsigned short op_type)
701 {
702 	int err;
703 
704 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
705 
706 	skcipher_request_set_sync_tfm(subreq, cipher);
707 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
708 	skcipher_request_set_crypt(subreq, src, dst,
709 				   nbytes, iv);
710 
711 	err = op_type ? crypto_skcipher_decrypt(subreq) :
712 		crypto_skcipher_encrypt(subreq);
713 	skcipher_request_zero(subreq);
714 
715 	return err;
716 
717 }
718 
719 static inline int get_qidxs(struct crypto_async_request *req,
720 			    unsigned int *txqidx, unsigned int *rxqidx)
721 {
722 	struct crypto_tfm *tfm = req->tfm;
723 	int ret = 0;
724 
725 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
726 	case CRYPTO_ALG_TYPE_AEAD:
727 	{
728 		struct aead_request *aead_req =
729 			container_of(req, struct aead_request, base);
730 		struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
731 		*txqidx = reqctx->txqidx;
732 		*rxqidx = reqctx->rxqidx;
733 		break;
734 	}
735 	case CRYPTO_ALG_TYPE_SKCIPHER:
736 	{
737 		struct skcipher_request *sk_req =
738 			container_of(req, struct skcipher_request, base);
739 		struct chcr_skcipher_req_ctx *reqctx =
740 			skcipher_request_ctx(sk_req);
741 		*txqidx = reqctx->txqidx;
742 		*rxqidx = reqctx->rxqidx;
743 		break;
744 	}
745 	case CRYPTO_ALG_TYPE_AHASH:
746 	{
747 		struct ahash_request *ahash_req =
748 			container_of(req, struct ahash_request, base);
749 		struct chcr_ahash_req_ctx *reqctx =
750 			ahash_request_ctx(ahash_req);
751 		*txqidx = reqctx->txqidx;
752 		*rxqidx = reqctx->rxqidx;
753 		break;
754 	}
755 	default:
756 		ret = -EINVAL;
757 		/* should never get here */
758 		BUG();
759 		break;
760 	}
761 	return ret;
762 }
763 
764 static inline void create_wreq(struct chcr_context *ctx,
765 			       struct chcr_wr *chcr_req,
766 			       struct crypto_async_request *req,
767 			       unsigned int imm,
768 			       int hash_sz,
769 			       unsigned int len16,
770 			       unsigned int sc_len,
771 			       unsigned int lcb)
772 {
773 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
774 	unsigned int tx_channel_id, rx_channel_id;
775 	unsigned int txqidx = 0, rxqidx = 0;
776 	unsigned int qid, fid;
777 
778 	get_qidxs(req, &txqidx, &rxqidx);
779 	qid = u_ctx->lldi.rxq_ids[rxqidx];
780 	fid = u_ctx->lldi.rxq_ids[0];
781 	tx_channel_id = txqidx / ctx->txq_perchan;
782 	rx_channel_id = rxqidx / ctx->rxq_perchan;
783 
784 
785 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
786 	chcr_req->wreq.pld_size_hash_size =
787 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
788 	chcr_req->wreq.len16_pkd =
789 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
790 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
791 	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
792 							    !!lcb, txqidx);
793 
794 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
795 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
796 				((sizeof(chcr_req->wreq)) >> 4)));
797 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
798 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
799 					   sizeof(chcr_req->key_ctx) + sc_len);
800 }
801 
802 /**
803  *	create_cipher_wr - form the WR for cipher operations
804  *	@req: cipher req.
805  *	@ctx: crypto driver context of the request.
806  *	@qid: ingress qid where response of this WR should be received.
807  *	@op_type:	encryption or decryption
808  */
809 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
810 {
811 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
812 	struct chcr_context *ctx = c_ctx(tfm);
813 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
814 	struct sk_buff *skb = NULL;
815 	struct chcr_wr *chcr_req;
816 	struct cpl_rx_phys_dsgl *phys_cpl;
817 	struct ulptx_sgl *ulptx;
818 	struct chcr_skcipher_req_ctx *reqctx =
819 		skcipher_request_ctx(wrparam->req);
820 	unsigned int temp = 0, transhdr_len, dst_size;
821 	int error;
822 	int nents;
823 	unsigned int kctx_len;
824 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
825 			GFP_KERNEL : GFP_ATOMIC;
826 	struct adapter *adap = padap(ctx->dev);
827 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
828 
829 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
830 			      reqctx->dst_ofst);
831 	dst_size = get_space_for_phys_dsgl(nents);
832 	kctx_len = roundup(ablkctx->enckey_len, 16);
833 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
834 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
835 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
836 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
837 				     (sgl_len(nents) * 8);
838 	transhdr_len += temp;
839 	transhdr_len = roundup(transhdr_len, 16);
840 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
841 	if (!skb) {
842 		error = -ENOMEM;
843 		goto err;
844 	}
845 	chcr_req = __skb_put_zero(skb, transhdr_len);
846 	chcr_req->sec_cpl.op_ivinsrtofst =
847 			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
848 
849 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
850 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
851 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
852 
853 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
854 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
855 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
856 							 ablkctx->ciph_mode,
857 							 0, 0, IV >> 1);
858 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
859 							  0, 1, dst_size);
860 
861 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
862 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
863 	    (!(get_cryptoalg_subtype(tfm) ==
864 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
865 	    (!(get_cryptoalg_subtype(tfm) ==
866 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
867 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
868 	} else {
869 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
870 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
871 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
872 			       ablkctx->enckey_len);
873 		} else {
874 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
875 			       (ablkctx->enckey_len >> 1),
876 			       ablkctx->enckey_len >> 1);
877 			memcpy(chcr_req->key_ctx.key +
878 			       (ablkctx->enckey_len >> 1),
879 			       ablkctx->key,
880 			       ablkctx->enckey_len >> 1);
881 		}
882 	}
883 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
884 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
885 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
886 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
887 
888 	atomic_inc(&adap->chcr_stats.cipher_rqst);
889 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
890 		+ (reqctx->imm ? (wrparam->bytes) : 0);
891 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
892 		    transhdr_len, temp,
893 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
894 	reqctx->skb = skb;
895 
896 	if (reqctx->op && (ablkctx->ciph_mode ==
897 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
898 		sg_pcopy_to_buffer(wrparam->req->src,
899 			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
900 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
901 
902 	return skb;
903 err:
904 	return ERR_PTR(error);
905 }
906 
907 static inline int chcr_keyctx_ck_size(unsigned int keylen)
908 {
909 	int ck_size = 0;
910 
911 	if (keylen == AES_KEYSIZE_128)
912 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
913 	else if (keylen == AES_KEYSIZE_192)
914 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
915 	else if (keylen == AES_KEYSIZE_256)
916 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
917 	else
918 		ck_size = 0;
919 
920 	return ck_size;
921 }
922 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
923 				       const u8 *key,
924 				       unsigned int keylen)
925 {
926 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
927 
928 	crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
929 				CRYPTO_TFM_REQ_MASK);
930 	crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
931 				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
932 	return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
933 }
934 
935 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
936 			       const u8 *key,
937 			       unsigned int keylen)
938 {
939 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
940 	unsigned int ck_size, context_size;
941 	u16 alignment = 0;
942 	int err;
943 
944 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
945 	if (err)
946 		goto badkey_err;
947 
948 	ck_size = chcr_keyctx_ck_size(keylen);
949 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
950 	memcpy(ablkctx->key, key, keylen);
951 	ablkctx->enckey_len = keylen;
952 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
953 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
954 			keylen + alignment) >> 4;
955 
956 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
957 						0, 0, context_size);
958 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
959 	return 0;
960 badkey_err:
961 	ablkctx->enckey_len = 0;
962 
963 	return err;
964 }
965 
966 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
967 				   const u8 *key,
968 				   unsigned int keylen)
969 {
970 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
971 	unsigned int ck_size, context_size;
972 	u16 alignment = 0;
973 	int err;
974 
975 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
976 	if (err)
977 		goto badkey_err;
978 	ck_size = chcr_keyctx_ck_size(keylen);
979 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
980 	memcpy(ablkctx->key, key, keylen);
981 	ablkctx->enckey_len = keylen;
982 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
983 			keylen + alignment) >> 4;
984 
985 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
986 						0, 0, context_size);
987 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
988 
989 	return 0;
990 badkey_err:
991 	ablkctx->enckey_len = 0;
992 
993 	return err;
994 }
995 
996 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
997 				   const u8 *key,
998 				   unsigned int keylen)
999 {
1000 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
1001 	unsigned int ck_size, context_size;
1002 	u16 alignment = 0;
1003 	int err;
1004 
1005 	if (keylen < CTR_RFC3686_NONCE_SIZE)
1006 		return -EINVAL;
1007 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1008 	       CTR_RFC3686_NONCE_SIZE);
1009 
1010 	keylen -= CTR_RFC3686_NONCE_SIZE;
1011 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1012 	if (err)
1013 		goto badkey_err;
1014 
1015 	ck_size = chcr_keyctx_ck_size(keylen);
1016 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1017 	memcpy(ablkctx->key, key, keylen);
1018 	ablkctx->enckey_len = keylen;
1019 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1020 			keylen + alignment) >> 4;
1021 
1022 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1023 						0, 0, context_size);
1024 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1025 
1026 	return 0;
1027 badkey_err:
1028 	ablkctx->enckey_len = 0;
1029 
1030 	return err;
1031 }
1032 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1033 {
1034 	unsigned int size = AES_BLOCK_SIZE;
1035 	__be32 *b = (__be32 *)(dstiv + size);
1036 	u32 c, prev;
1037 
1038 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1039 	for (; size >= 4; size -= 4) {
1040 		prev = be32_to_cpu(*--b);
1041 		c = prev + add;
1042 		*b = cpu_to_be32(c);
1043 		if (prev < c)
1044 			break;
1045 		add = 1;
1046 	}
1047 
1048 }
1049 
1050 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1051 {
1052 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1053 	u64 c;
1054 	u32 temp = be32_to_cpu(*--b);
1055 
1056 	temp = ~temp;
1057 	c = (u64)temp +  1; // No of block can processed withou overflow
1058 	if ((bytes / AES_BLOCK_SIZE) > c)
1059 		bytes = c * AES_BLOCK_SIZE;
1060 	return bytes;
1061 }
1062 
1063 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1064 			     u32 isfinal)
1065 {
1066 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1067 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1068 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1069 	struct crypto_aes_ctx aes;
1070 	int ret, i;
1071 	u8 *key;
1072 	unsigned int keylen;
1073 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1074 	int round8 = round / 8;
1075 
1076 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1077 
1078 	keylen = ablkctx->enckey_len / 2;
1079 	key = ablkctx->key + keylen;
1080 	ret = aes_expandkey(&aes, key, keylen);
1081 	if (ret)
1082 		return ret;
1083 	aes_encrypt(&aes, iv, iv);
1084 	for (i = 0; i < round8; i++)
1085 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1086 
1087 	for (i = 0; i < (round % 8); i++)
1088 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1089 
1090 	if (!isfinal)
1091 		aes_decrypt(&aes, iv, iv);
1092 
1093 	memzero_explicit(&aes, sizeof(aes));
1094 	return 0;
1095 }
1096 
1097 static int chcr_update_cipher_iv(struct skcipher_request *req,
1098 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1099 {
1100 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1101 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1102 	int subtype = get_cryptoalg_subtype(tfm);
1103 	int ret = 0;
1104 
1105 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1106 		ctr_add_iv(iv, req->iv, (reqctx->processed /
1107 			   AES_BLOCK_SIZE));
1108 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1109 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1110 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1111 						AES_BLOCK_SIZE) + 1);
1112 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1113 		ret = chcr_update_tweak(req, iv, 0);
1114 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1115 		if (reqctx->op)
1116 			/*Updated before sending last WR*/
1117 			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1118 		else
1119 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1120 	}
1121 
1122 	return ret;
1123 
1124 }
1125 
1126 /* We need separate function for final iv because in rfc3686  Initial counter
1127  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1128  * for subsequent update requests
1129  */
1130 
1131 static int chcr_final_cipher_iv(struct skcipher_request *req,
1132 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1133 {
1134 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1135 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1136 	int subtype = get_cryptoalg_subtype(tfm);
1137 	int ret = 0;
1138 
1139 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1140 		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1141 						       AES_BLOCK_SIZE));
1142 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1143 		if (!reqctx->partial_req)
1144 			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1145 		else
1146 			ret = chcr_update_tweak(req, iv, 1);
1147 	}
1148 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1149 		/*Already updated for Decrypt*/
1150 		if (!reqctx->op)
1151 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1152 
1153 	}
1154 	return ret;
1155 
1156 }
1157 
1158 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1159 				   unsigned char *input, int err)
1160 {
1161 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1162 	struct chcr_context *ctx = c_ctx(tfm);
1163 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1164 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1165 	struct sk_buff *skb;
1166 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1167 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1168 	struct cipher_wr_param wrparam;
1169 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1170 	int bytes;
1171 
1172 	if (err)
1173 		goto unmap;
1174 	if (req->cryptlen == reqctx->processed) {
1175 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1176 				      req);
1177 		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1178 		goto complete;
1179 	}
1180 
1181 	if (!reqctx->imm) {
1182 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1183 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1184 					  reqctx->src_ofst, reqctx->dst_ofst);
1185 		if ((bytes + reqctx->processed) >= req->cryptlen)
1186 			bytes  = req->cryptlen - reqctx->processed;
1187 		else
1188 			bytes = rounddown(bytes, 16);
1189 	} else {
1190 		/*CTR mode counter overfloa*/
1191 		bytes  = req->cryptlen - reqctx->processed;
1192 	}
1193 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1194 	if (err)
1195 		goto unmap;
1196 
1197 	if (unlikely(bytes == 0)) {
1198 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1199 				      req);
1200 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1201 				     req->base.flags,
1202 				     req->src,
1203 				     req->dst,
1204 				     req->cryptlen,
1205 				     req->iv,
1206 				     reqctx->op);
1207 		goto complete;
1208 	}
1209 
1210 	if (get_cryptoalg_subtype(tfm) ==
1211 	    CRYPTO_ALG_SUB_TYPE_CTR)
1212 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1213 	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1214 	wrparam.req = req;
1215 	wrparam.bytes = bytes;
1216 	skb = create_cipher_wr(&wrparam);
1217 	if (IS_ERR(skb)) {
1218 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1219 		err = PTR_ERR(skb);
1220 		goto unmap;
1221 	}
1222 	skb->dev = u_ctx->lldi.ports[0];
1223 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1224 	chcr_send_wr(skb);
1225 	reqctx->last_req_len = bytes;
1226 	reqctx->processed += bytes;
1227 	if (get_cryptoalg_subtype(tfm) ==
1228 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1229 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1230 		complete(&ctx->cbc_aes_aio_done);
1231 	}
1232 	return 0;
1233 unmap:
1234 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1235 complete:
1236 	if (get_cryptoalg_subtype(tfm) ==
1237 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1238 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1239 		complete(&ctx->cbc_aes_aio_done);
1240 	}
1241 	chcr_dec_wrcount(dev);
1242 	req->base.complete(&req->base, err);
1243 	return err;
1244 }
1245 
1246 static int process_cipher(struct skcipher_request *req,
1247 				  unsigned short qid,
1248 				  struct sk_buff **skb,
1249 				  unsigned short op_type)
1250 {
1251 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1252 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1253 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1254 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1255 	struct	cipher_wr_param wrparam;
1256 	int bytes, err = -EINVAL;
1257 
1258 	reqctx->processed = 0;
1259 	reqctx->partial_req = 0;
1260 	if (!req->iv)
1261 		goto error;
1262 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1263 	    (req->cryptlen == 0) ||
1264 	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1265 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1266 		       ablkctx->enckey_len, req->cryptlen, ivsize);
1267 		goto error;
1268 	}
1269 
1270 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1271 	if (err)
1272 		goto error;
1273 	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1274 					    AES_MIN_KEY_SIZE +
1275 					    sizeof(struct cpl_rx_phys_dsgl) +
1276 					/*Min dsgl size*/
1277 					    32))) {
1278 		/* Can be sent as Imm*/
1279 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1280 
1281 		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1282 				       CHCR_DST_SG_SIZE, 0);
1283 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1284 		kctx_len = roundup(ablkctx->enckey_len, 16);
1285 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1286 		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1287 			SGE_MAX_WR_LEN;
1288 		bytes = IV + req->cryptlen;
1289 
1290 	} else {
1291 		reqctx->imm = 0;
1292 	}
1293 
1294 	if (!reqctx->imm) {
1295 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1296 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1297 					  0, 0);
1298 		if ((bytes + reqctx->processed) >= req->cryptlen)
1299 			bytes  = req->cryptlen - reqctx->processed;
1300 		else
1301 			bytes = rounddown(bytes, 16);
1302 	} else {
1303 		bytes = req->cryptlen;
1304 	}
1305 	if (get_cryptoalg_subtype(tfm) ==
1306 	    CRYPTO_ALG_SUB_TYPE_CTR) {
1307 		bytes = adjust_ctr_overflow(req->iv, bytes);
1308 	}
1309 	if (get_cryptoalg_subtype(tfm) ==
1310 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1311 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1312 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1313 				CTR_RFC3686_IV_SIZE);
1314 
1315 		/* initialize counter portion of counter block */
1316 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1317 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1318 
1319 	} else {
1320 
1321 		memcpy(reqctx->iv, req->iv, IV);
1322 	}
1323 	if (unlikely(bytes == 0)) {
1324 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1325 				      req);
1326 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1327 					   req->base.flags,
1328 					   req->src,
1329 					   req->dst,
1330 					   req->cryptlen,
1331 					   reqctx->iv,
1332 					   op_type);
1333 		goto error;
1334 	}
1335 	reqctx->op = op_type;
1336 	reqctx->srcsg = req->src;
1337 	reqctx->dstsg = req->dst;
1338 	reqctx->src_ofst = 0;
1339 	reqctx->dst_ofst = 0;
1340 	wrparam.qid = qid;
1341 	wrparam.req = req;
1342 	wrparam.bytes = bytes;
1343 	*skb = create_cipher_wr(&wrparam);
1344 	if (IS_ERR(*skb)) {
1345 		err = PTR_ERR(*skb);
1346 		goto unmap;
1347 	}
1348 	reqctx->processed = bytes;
1349 	reqctx->last_req_len = bytes;
1350 	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1351 
1352 	return 0;
1353 unmap:
1354 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1355 error:
1356 	return err;
1357 }
1358 
1359 static int chcr_aes_encrypt(struct skcipher_request *req)
1360 {
1361 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1362 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1363 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1364 	struct sk_buff *skb = NULL;
1365 	int err;
1366 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1367 	struct chcr_context *ctx = c_ctx(tfm);
1368 	unsigned int cpu;
1369 
1370 	cpu = get_cpu();
1371 	reqctx->txqidx = cpu % ctx->ntxq;
1372 	reqctx->rxqidx = cpu % ctx->nrxq;
1373 	put_cpu();
1374 
1375 	err = chcr_inc_wrcount(dev);
1376 	if (err)
1377 		return -ENXIO;
1378 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1379 						reqctx->txqidx) &&
1380 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1381 			err = -ENOSPC;
1382 			goto error;
1383 	}
1384 
1385 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1386 			     &skb, CHCR_ENCRYPT_OP);
1387 	if (err || !skb)
1388 		return  err;
1389 	skb->dev = u_ctx->lldi.ports[0];
1390 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1391 	chcr_send_wr(skb);
1392 	if (get_cryptoalg_subtype(tfm) ==
1393 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1394 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1395 			reqctx->partial_req = 1;
1396 			wait_for_completion(&ctx->cbc_aes_aio_done);
1397         }
1398 	return -EINPROGRESS;
1399 error:
1400 	chcr_dec_wrcount(dev);
1401 	return err;
1402 }
1403 
1404 static int chcr_aes_decrypt(struct skcipher_request *req)
1405 {
1406 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1407 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1408 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1409 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1410 	struct sk_buff *skb = NULL;
1411 	int err;
1412 	struct chcr_context *ctx = c_ctx(tfm);
1413 	unsigned int cpu;
1414 
1415 	cpu = get_cpu();
1416 	reqctx->txqidx = cpu % ctx->ntxq;
1417 	reqctx->rxqidx = cpu % ctx->nrxq;
1418 	put_cpu();
1419 
1420 	err = chcr_inc_wrcount(dev);
1421 	if (err)
1422 		return -ENXIO;
1423 
1424 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1425 						reqctx->txqidx) &&
1426 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1427 			return -ENOSPC;
1428 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1429 			     &skb, CHCR_DECRYPT_OP);
1430 	if (err || !skb)
1431 		return err;
1432 	skb->dev = u_ctx->lldi.ports[0];
1433 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1434 	chcr_send_wr(skb);
1435 	return -EINPROGRESS;
1436 }
1437 static int chcr_device_init(struct chcr_context *ctx)
1438 {
1439 	struct uld_ctx *u_ctx = NULL;
1440 	int txq_perchan, ntxq;
1441 	int err = 0, rxq_perchan;
1442 
1443 	if (!ctx->dev) {
1444 		u_ctx = assign_chcr_device();
1445 		if (!u_ctx) {
1446 			pr_err("chcr device assignment fails\n");
1447 			goto out;
1448 		}
1449 		ctx->dev = &u_ctx->dev;
1450 		ntxq = u_ctx->lldi.ntxq;
1451 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1452 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1453 		ctx->ntxq = ntxq;
1454 		ctx->nrxq = u_ctx->lldi.nrxq;
1455 		ctx->rxq_perchan = rxq_perchan;
1456 		ctx->txq_perchan = txq_perchan;
1457 	}
1458 out:
1459 	return err;
1460 }
1461 
1462 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1463 {
1464 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1465 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1466 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1467 
1468 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
1469 				CRYPTO_ALG_NEED_FALLBACK);
1470 	if (IS_ERR(ablkctx->sw_cipher)) {
1471 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1472 		return PTR_ERR(ablkctx->sw_cipher);
1473 	}
1474 	init_completion(&ctx->cbc_aes_aio_done);
1475 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1476 
1477 	return chcr_device_init(ctx);
1478 }
1479 
1480 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1481 {
1482 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1483 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1484 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1485 
1486 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1487 	 * cannot be used as fallback in chcr_handle_cipher_response
1488 	 */
1489 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1490 				CRYPTO_ALG_NEED_FALLBACK);
1491 	if (IS_ERR(ablkctx->sw_cipher)) {
1492 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1493 		return PTR_ERR(ablkctx->sw_cipher);
1494 	}
1495 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1496 	return chcr_device_init(ctx);
1497 }
1498 
1499 
1500 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1501 {
1502 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1503 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1504 
1505 	crypto_free_sync_skcipher(ablkctx->sw_cipher);
1506 }
1507 
1508 static int get_alg_config(struct algo_param *params,
1509 			  unsigned int auth_size)
1510 {
1511 	switch (auth_size) {
1512 	case SHA1_DIGEST_SIZE:
1513 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1514 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1515 		params->result_size = SHA1_DIGEST_SIZE;
1516 		break;
1517 	case SHA224_DIGEST_SIZE:
1518 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1519 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1520 		params->result_size = SHA256_DIGEST_SIZE;
1521 		break;
1522 	case SHA256_DIGEST_SIZE:
1523 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1524 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1525 		params->result_size = SHA256_DIGEST_SIZE;
1526 		break;
1527 	case SHA384_DIGEST_SIZE:
1528 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1529 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1530 		params->result_size = SHA512_DIGEST_SIZE;
1531 		break;
1532 	case SHA512_DIGEST_SIZE:
1533 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1534 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1535 		params->result_size = SHA512_DIGEST_SIZE;
1536 		break;
1537 	default:
1538 		pr_err("chcr : ERROR, unsupported digest size\n");
1539 		return -EINVAL;
1540 	}
1541 	return 0;
1542 }
1543 
1544 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1545 {
1546 		crypto_free_shash(base_hash);
1547 }
1548 
1549 /**
1550  *	create_hash_wr - Create hash work request
1551  *	@req - Cipher req base
1552  */
1553 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1554 				      struct hash_wr_param *param)
1555 {
1556 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1557 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1558 	struct chcr_context *ctx = h_ctx(tfm);
1559 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1560 	struct sk_buff *skb = NULL;
1561 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1562 	struct chcr_wr *chcr_req;
1563 	struct ulptx_sgl *ulptx;
1564 	unsigned int nents = 0, transhdr_len;
1565 	unsigned int temp = 0;
1566 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1567 		GFP_ATOMIC;
1568 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1569 	int error = 0;
1570 	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1571 
1572 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1573 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1574 				param->sg_len) <= SGE_MAX_WR_LEN;
1575 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1576 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1577 	nents += param->bfr_len ? 1 : 0;
1578 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1579 				param->sg_len, 16) : (sgl_len(nents) * 8);
1580 	transhdr_len = roundup(transhdr_len, 16);
1581 
1582 	skb = alloc_skb(transhdr_len, flags);
1583 	if (!skb)
1584 		return ERR_PTR(-ENOMEM);
1585 	chcr_req = __skb_put_zero(skb, transhdr_len);
1586 
1587 	chcr_req->sec_cpl.op_ivinsrtofst =
1588 		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1589 
1590 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1591 
1592 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1593 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1594 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1595 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1596 	chcr_req->sec_cpl.seqno_numivs =
1597 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1598 					 param->opad_needed, 0);
1599 
1600 	chcr_req->sec_cpl.ivgen_hdrlen =
1601 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1602 
1603 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1604 	       param->alg_prm.result_size);
1605 
1606 	if (param->opad_needed)
1607 		memcpy(chcr_req->key_ctx.key +
1608 		       ((param->alg_prm.result_size <= 32) ? 32 :
1609 			CHCR_HASH_MAX_DIGEST_SIZE),
1610 		       hmacctx->opad, param->alg_prm.result_size);
1611 
1612 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1613 					    param->alg_prm.mk_size, 0,
1614 					    param->opad_needed,
1615 					    ((param->kctx_len +
1616 					     sizeof(chcr_req->key_ctx)) >> 4));
1617 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1618 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1619 				     DUMMY_BYTES);
1620 	if (param->bfr_len != 0) {
1621 		req_ctx->hctx_wr.dma_addr =
1622 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1623 				       param->bfr_len, DMA_TO_DEVICE);
1624 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1625 				       req_ctx->hctx_wr. dma_addr)) {
1626 			error = -ENOMEM;
1627 			goto err;
1628 		}
1629 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1630 	} else {
1631 		req_ctx->hctx_wr.dma_addr = 0;
1632 	}
1633 	chcr_add_hash_src_ent(req, ulptx, param);
1634 	/* Request upto max wr size */
1635 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1636 				(param->sg_len + param->bfr_len) : 0);
1637 	atomic_inc(&adap->chcr_stats.digest_rqst);
1638 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1639 		    param->hash_size, transhdr_len,
1640 		    temp,  0);
1641 	req_ctx->hctx_wr.skb = skb;
1642 	return skb;
1643 err:
1644 	kfree_skb(skb);
1645 	return  ERR_PTR(error);
1646 }
1647 
1648 static int chcr_ahash_update(struct ahash_request *req)
1649 {
1650 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1651 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1652 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1653 	struct chcr_context *ctx = h_ctx(rtfm);
1654 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1655 	struct sk_buff *skb;
1656 	u8 remainder = 0, bs;
1657 	unsigned int nbytes = req->nbytes;
1658 	struct hash_wr_param params;
1659 	int error;
1660 	unsigned int cpu;
1661 
1662 	cpu = get_cpu();
1663 	req_ctx->txqidx = cpu % ctx->ntxq;
1664 	req_ctx->rxqidx = cpu % ctx->nrxq;
1665 	put_cpu();
1666 
1667 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1668 
1669 	if (nbytes + req_ctx->reqlen >= bs) {
1670 		remainder = (nbytes + req_ctx->reqlen) % bs;
1671 		nbytes = nbytes + req_ctx->reqlen - remainder;
1672 	} else {
1673 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1674 				   + req_ctx->reqlen, nbytes, 0);
1675 		req_ctx->reqlen += nbytes;
1676 		return 0;
1677 	}
1678 	error = chcr_inc_wrcount(dev);
1679 	if (error)
1680 		return -ENXIO;
1681 	/* Detach state for CHCR means lldi or padap is freed. Increasing
1682 	 * inflight count for dev guarantees that lldi and padap is valid
1683 	 */
1684 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1685 						req_ctx->txqidx) &&
1686 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1687 			error = -ENOSPC;
1688 			goto err;
1689 	}
1690 
1691 	chcr_init_hctx_per_wr(req_ctx);
1692 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1693 	if (error) {
1694 		error = -ENOMEM;
1695 		goto err;
1696 	}
1697 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1698 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1699 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1700 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1701 	if (params.sg_len > req->nbytes)
1702 		params.sg_len = req->nbytes;
1703 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1704 			req_ctx->reqlen;
1705 	params.opad_needed = 0;
1706 	params.more = 1;
1707 	params.last = 0;
1708 	params.bfr_len = req_ctx->reqlen;
1709 	params.scmd1 = 0;
1710 	req_ctx->hctx_wr.srcsg = req->src;
1711 
1712 	params.hash_size = params.alg_prm.result_size;
1713 	req_ctx->data_len += params.sg_len + params.bfr_len;
1714 	skb = create_hash_wr(req, &params);
1715 	if (IS_ERR(skb)) {
1716 		error = PTR_ERR(skb);
1717 		goto unmap;
1718 	}
1719 
1720 	req_ctx->hctx_wr.processed += params.sg_len;
1721 	if (remainder) {
1722 		/* Swap buffers */
1723 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1724 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1725 				   req_ctx->reqbfr, remainder, req->nbytes -
1726 				   remainder);
1727 	}
1728 	req_ctx->reqlen = remainder;
1729 	skb->dev = u_ctx->lldi.ports[0];
1730 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1731 	chcr_send_wr(skb);
1732 	return -EINPROGRESS;
1733 unmap:
1734 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1735 err:
1736 	chcr_dec_wrcount(dev);
1737 	return error;
1738 }
1739 
1740 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1741 {
1742 	memset(bfr_ptr, 0, bs);
1743 	*bfr_ptr = 0x80;
1744 	if (bs == 64)
1745 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1746 	else
1747 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1748 }
1749 
1750 static int chcr_ahash_final(struct ahash_request *req)
1751 {
1752 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1753 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1754 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1755 	struct hash_wr_param params;
1756 	struct sk_buff *skb;
1757 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1758 	struct chcr_context *ctx = h_ctx(rtfm);
1759 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1760 	int error = -EINVAL;
1761 	unsigned int cpu;
1762 
1763 	cpu = get_cpu();
1764 	req_ctx->txqidx = cpu % ctx->ntxq;
1765 	req_ctx->rxqidx = cpu % ctx->nrxq;
1766 	put_cpu();
1767 
1768 	error = chcr_inc_wrcount(dev);
1769 	if (error)
1770 		return -ENXIO;
1771 
1772 	chcr_init_hctx_per_wr(req_ctx);
1773 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1774 		params.opad_needed = 1;
1775 	else
1776 		params.opad_needed = 0;
1777 	params.sg_len = 0;
1778 	req_ctx->hctx_wr.isfinal = 1;
1779 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1780 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1781 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1782 		params.opad_needed = 1;
1783 		params.kctx_len *= 2;
1784 	} else {
1785 		params.opad_needed = 0;
1786 	}
1787 
1788 	req_ctx->hctx_wr.result = 1;
1789 	params.bfr_len = req_ctx->reqlen;
1790 	req_ctx->data_len += params.bfr_len + params.sg_len;
1791 	req_ctx->hctx_wr.srcsg = req->src;
1792 	if (req_ctx->reqlen == 0) {
1793 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1794 		params.last = 0;
1795 		params.more = 1;
1796 		params.scmd1 = 0;
1797 		params.bfr_len = bs;
1798 
1799 	} else {
1800 		params.scmd1 = req_ctx->data_len;
1801 		params.last = 1;
1802 		params.more = 0;
1803 	}
1804 	params.hash_size = crypto_ahash_digestsize(rtfm);
1805 	skb = create_hash_wr(req, &params);
1806 	if (IS_ERR(skb)) {
1807 		error = PTR_ERR(skb);
1808 		goto err;
1809 	}
1810 	req_ctx->reqlen = 0;
1811 	skb->dev = u_ctx->lldi.ports[0];
1812 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1813 	chcr_send_wr(skb);
1814 	return -EINPROGRESS;
1815 err:
1816 	chcr_dec_wrcount(dev);
1817 	return error;
1818 }
1819 
1820 static int chcr_ahash_finup(struct ahash_request *req)
1821 {
1822 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1823 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1824 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1825 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1826 	struct chcr_context *ctx = h_ctx(rtfm);
1827 	struct sk_buff *skb;
1828 	struct hash_wr_param params;
1829 	u8  bs;
1830 	int error;
1831 	unsigned int cpu;
1832 
1833 	cpu = get_cpu();
1834 	req_ctx->txqidx = cpu % ctx->ntxq;
1835 	req_ctx->rxqidx = cpu % ctx->nrxq;
1836 	put_cpu();
1837 
1838 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1839 	error = chcr_inc_wrcount(dev);
1840 	if (error)
1841 		return -ENXIO;
1842 
1843 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1844 						req_ctx->txqidx) &&
1845 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1846 			error = -ENOSPC;
1847 			goto err;
1848 	}
1849 	chcr_init_hctx_per_wr(req_ctx);
1850 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1851 	if (error) {
1852 		error = -ENOMEM;
1853 		goto err;
1854 	}
1855 
1856 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1857 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1858 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1859 		params.kctx_len *= 2;
1860 		params.opad_needed = 1;
1861 	} else {
1862 		params.opad_needed = 0;
1863 	}
1864 
1865 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1866 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1867 	if (params.sg_len < req->nbytes) {
1868 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1869 			params.kctx_len /= 2;
1870 			params.opad_needed = 0;
1871 		}
1872 		params.last = 0;
1873 		params.more = 1;
1874 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1875 					- req_ctx->reqlen;
1876 		params.hash_size = params.alg_prm.result_size;
1877 		params.scmd1 = 0;
1878 	} else {
1879 		params.last = 1;
1880 		params.more = 0;
1881 		params.sg_len = req->nbytes;
1882 		params.hash_size = crypto_ahash_digestsize(rtfm);
1883 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1884 				params.sg_len;
1885 	}
1886 	params.bfr_len = req_ctx->reqlen;
1887 	req_ctx->data_len += params.bfr_len + params.sg_len;
1888 	req_ctx->hctx_wr.result = 1;
1889 	req_ctx->hctx_wr.srcsg = req->src;
1890 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1891 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1892 		params.last = 0;
1893 		params.more = 1;
1894 		params.scmd1 = 0;
1895 		params.bfr_len = bs;
1896 	}
1897 	skb = create_hash_wr(req, &params);
1898 	if (IS_ERR(skb)) {
1899 		error = PTR_ERR(skb);
1900 		goto unmap;
1901 	}
1902 	req_ctx->reqlen = 0;
1903 	req_ctx->hctx_wr.processed += params.sg_len;
1904 	skb->dev = u_ctx->lldi.ports[0];
1905 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1906 	chcr_send_wr(skb);
1907 	return -EINPROGRESS;
1908 unmap:
1909 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1910 err:
1911 	chcr_dec_wrcount(dev);
1912 	return error;
1913 }
1914 
1915 static int chcr_ahash_digest(struct ahash_request *req)
1916 {
1917 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1918 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1919 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1920 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1921 	struct chcr_context *ctx = h_ctx(rtfm);
1922 	struct sk_buff *skb;
1923 	struct hash_wr_param params;
1924 	u8  bs;
1925 	int error;
1926 	unsigned int cpu;
1927 
1928 	cpu = get_cpu();
1929 	req_ctx->txqidx = cpu % ctx->ntxq;
1930 	req_ctx->rxqidx = cpu % ctx->nrxq;
1931 	put_cpu();
1932 
1933 	rtfm->init(req);
1934 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1935 	error = chcr_inc_wrcount(dev);
1936 	if (error)
1937 		return -ENXIO;
1938 
1939 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1940 						req_ctx->txqidx) &&
1941 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1942 			error = -ENOSPC;
1943 			goto err;
1944 	}
1945 
1946 	chcr_init_hctx_per_wr(req_ctx);
1947 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1948 	if (error) {
1949 		error = -ENOMEM;
1950 		goto err;
1951 	}
1952 
1953 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1954 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1955 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1956 		params.kctx_len *= 2;
1957 		params.opad_needed = 1;
1958 	} else {
1959 		params.opad_needed = 0;
1960 	}
1961 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1962 				HASH_SPACE_LEFT(params.kctx_len), 0);
1963 	if (params.sg_len < req->nbytes) {
1964 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1965 			params.kctx_len /= 2;
1966 			params.opad_needed = 0;
1967 		}
1968 		params.last = 0;
1969 		params.more = 1;
1970 		params.scmd1 = 0;
1971 		params.sg_len = rounddown(params.sg_len, bs);
1972 		params.hash_size = params.alg_prm.result_size;
1973 	} else {
1974 		params.sg_len = req->nbytes;
1975 		params.hash_size = crypto_ahash_digestsize(rtfm);
1976 		params.last = 1;
1977 		params.more = 0;
1978 		params.scmd1 = req->nbytes + req_ctx->data_len;
1979 
1980 	}
1981 	params.bfr_len = 0;
1982 	req_ctx->hctx_wr.result = 1;
1983 	req_ctx->hctx_wr.srcsg = req->src;
1984 	req_ctx->data_len += params.bfr_len + params.sg_len;
1985 
1986 	if (req->nbytes == 0) {
1987 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1988 		params.more = 1;
1989 		params.bfr_len = bs;
1990 	}
1991 
1992 	skb = create_hash_wr(req, &params);
1993 	if (IS_ERR(skb)) {
1994 		error = PTR_ERR(skb);
1995 		goto unmap;
1996 	}
1997 	req_ctx->hctx_wr.processed += params.sg_len;
1998 	skb->dev = u_ctx->lldi.ports[0];
1999 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2000 	chcr_send_wr(skb);
2001 	return -EINPROGRESS;
2002 unmap:
2003 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2004 err:
2005 	chcr_dec_wrcount(dev);
2006 	return error;
2007 }
2008 
2009 static int chcr_ahash_continue(struct ahash_request *req)
2010 {
2011 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2012 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2013 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2014 	struct chcr_context *ctx = h_ctx(rtfm);
2015 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2016 	struct sk_buff *skb;
2017 	struct hash_wr_param params;
2018 	u8  bs;
2019 	int error;
2020 	unsigned int cpu;
2021 
2022 	cpu = get_cpu();
2023 	reqctx->txqidx = cpu % ctx->ntxq;
2024 	reqctx->rxqidx = cpu % ctx->nrxq;
2025 	put_cpu();
2026 
2027 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2028 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2029 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2030 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2031 		params.kctx_len *= 2;
2032 		params.opad_needed = 1;
2033 	} else {
2034 		params.opad_needed = 0;
2035 	}
2036 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2037 					    HASH_SPACE_LEFT(params.kctx_len),
2038 					    hctx_wr->src_ofst);
2039 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2040 		params.sg_len = req->nbytes - hctx_wr->processed;
2041 	if (!hctx_wr->result ||
2042 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2043 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2044 			params.kctx_len /= 2;
2045 			params.opad_needed = 0;
2046 		}
2047 		params.last = 0;
2048 		params.more = 1;
2049 		params.sg_len = rounddown(params.sg_len, bs);
2050 		params.hash_size = params.alg_prm.result_size;
2051 		params.scmd1 = 0;
2052 	} else {
2053 		params.last = 1;
2054 		params.more = 0;
2055 		params.hash_size = crypto_ahash_digestsize(rtfm);
2056 		params.scmd1 = reqctx->data_len + params.sg_len;
2057 	}
2058 	params.bfr_len = 0;
2059 	reqctx->data_len += params.sg_len;
2060 	skb = create_hash_wr(req, &params);
2061 	if (IS_ERR(skb)) {
2062 		error = PTR_ERR(skb);
2063 		goto err;
2064 	}
2065 	hctx_wr->processed += params.sg_len;
2066 	skb->dev = u_ctx->lldi.ports[0];
2067 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2068 	chcr_send_wr(skb);
2069 	return 0;
2070 err:
2071 	return error;
2072 }
2073 
2074 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2075 					  unsigned char *input,
2076 					  int err)
2077 {
2078 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2079 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2080 	int digestsize, updated_digestsize;
2081 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2082 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2083 	struct chcr_dev *dev = h_ctx(tfm)->dev;
2084 
2085 	if (input == NULL)
2086 		goto out;
2087 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2088 	updated_digestsize = digestsize;
2089 	if (digestsize == SHA224_DIGEST_SIZE)
2090 		updated_digestsize = SHA256_DIGEST_SIZE;
2091 	else if (digestsize == SHA384_DIGEST_SIZE)
2092 		updated_digestsize = SHA512_DIGEST_SIZE;
2093 
2094 	if (hctx_wr->dma_addr) {
2095 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2096 				 hctx_wr->dma_len, DMA_TO_DEVICE);
2097 		hctx_wr->dma_addr = 0;
2098 	}
2099 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2100 				 req->nbytes)) {
2101 		if (hctx_wr->result == 1) {
2102 			hctx_wr->result = 0;
2103 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2104 			       digestsize);
2105 		} else {
2106 			memcpy(reqctx->partial_hash,
2107 			       input + sizeof(struct cpl_fw6_pld),
2108 			       updated_digestsize);
2109 
2110 		}
2111 		goto unmap;
2112 	}
2113 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2114 	       updated_digestsize);
2115 
2116 	err = chcr_ahash_continue(req);
2117 	if (err)
2118 		goto unmap;
2119 	return;
2120 unmap:
2121 	if (hctx_wr->is_sg_map)
2122 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2123 
2124 
2125 out:
2126 	chcr_dec_wrcount(dev);
2127 	req->base.complete(&req->base, err);
2128 }
2129 
2130 /*
2131  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2132  *	@req: crypto request
2133  */
2134 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2135 			 int err)
2136 {
2137 	struct crypto_tfm *tfm = req->tfm;
2138 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2139 	struct adapter *adap = padap(ctx->dev);
2140 
2141 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2142 	case CRYPTO_ALG_TYPE_AEAD:
2143 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2144 		break;
2145 
2146 	case CRYPTO_ALG_TYPE_SKCIPHER:
2147 		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2148 					       input, err);
2149 		break;
2150 	case CRYPTO_ALG_TYPE_AHASH:
2151 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2152 		}
2153 	atomic_inc(&adap->chcr_stats.complete);
2154 	return err;
2155 }
2156 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2157 {
2158 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2159 	struct chcr_ahash_req_ctx *state = out;
2160 
2161 	state->reqlen = req_ctx->reqlen;
2162 	state->data_len = req_ctx->data_len;
2163 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2164 	memcpy(state->partial_hash, req_ctx->partial_hash,
2165 	       CHCR_HASH_MAX_DIGEST_SIZE);
2166 	chcr_init_hctx_per_wr(state);
2167 	return 0;
2168 }
2169 
2170 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2171 {
2172 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2173 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2174 
2175 	req_ctx->reqlen = state->reqlen;
2176 	req_ctx->data_len = state->data_len;
2177 	req_ctx->reqbfr = req_ctx->bfr1;
2178 	req_ctx->skbfr = req_ctx->bfr2;
2179 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2180 	memcpy(req_ctx->partial_hash, state->partial_hash,
2181 	       CHCR_HASH_MAX_DIGEST_SIZE);
2182 	chcr_init_hctx_per_wr(req_ctx);
2183 	return 0;
2184 }
2185 
2186 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2187 			     unsigned int keylen)
2188 {
2189 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2190 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2191 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2192 	unsigned int i, err = 0, updated_digestsize;
2193 
2194 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2195 
2196 	/* use the key to calculate the ipad and opad. ipad will sent with the
2197 	 * first request's data. opad will be sent with the final hash result
2198 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2199 	 */
2200 	shash->tfm = hmacctx->base_hash;
2201 	if (keylen > bs) {
2202 		err = crypto_shash_digest(shash, key, keylen,
2203 					  hmacctx->ipad);
2204 		if (err)
2205 			goto out;
2206 		keylen = digestsize;
2207 	} else {
2208 		memcpy(hmacctx->ipad, key, keylen);
2209 	}
2210 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2211 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2212 
2213 	for (i = 0; i < bs / sizeof(int); i++) {
2214 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2215 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2216 	}
2217 
2218 	updated_digestsize = digestsize;
2219 	if (digestsize == SHA224_DIGEST_SIZE)
2220 		updated_digestsize = SHA256_DIGEST_SIZE;
2221 	else if (digestsize == SHA384_DIGEST_SIZE)
2222 		updated_digestsize = SHA512_DIGEST_SIZE;
2223 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2224 					hmacctx->ipad, digestsize);
2225 	if (err)
2226 		goto out;
2227 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2228 
2229 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2230 					hmacctx->opad, digestsize);
2231 	if (err)
2232 		goto out;
2233 	chcr_change_order(hmacctx->opad, updated_digestsize);
2234 out:
2235 	return err;
2236 }
2237 
2238 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2239 			       unsigned int key_len)
2240 {
2241 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2242 	unsigned short context_size = 0;
2243 	int err;
2244 
2245 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2246 	if (err)
2247 		goto badkey_err;
2248 
2249 	memcpy(ablkctx->key, key, key_len);
2250 	ablkctx->enckey_len = key_len;
2251 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2252 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2253 	ablkctx->key_ctx_hdr =
2254 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2255 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2256 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2257 				 CHCR_KEYCTX_NO_KEY, 1,
2258 				 0, context_size);
2259 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2260 	return 0;
2261 badkey_err:
2262 	ablkctx->enckey_len = 0;
2263 
2264 	return err;
2265 }
2266 
2267 static int chcr_sha_init(struct ahash_request *areq)
2268 {
2269 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2270 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2271 	int digestsize =  crypto_ahash_digestsize(tfm);
2272 
2273 	req_ctx->data_len = 0;
2274 	req_ctx->reqlen = 0;
2275 	req_ctx->reqbfr = req_ctx->bfr1;
2276 	req_ctx->skbfr = req_ctx->bfr2;
2277 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2278 
2279 	return 0;
2280 }
2281 
2282 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2283 {
2284 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2285 				 sizeof(struct chcr_ahash_req_ctx));
2286 	return chcr_device_init(crypto_tfm_ctx(tfm));
2287 }
2288 
2289 static int chcr_hmac_init(struct ahash_request *areq)
2290 {
2291 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2292 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2293 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2294 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2295 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2296 
2297 	chcr_sha_init(areq);
2298 	req_ctx->data_len = bs;
2299 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2300 		if (digestsize == SHA224_DIGEST_SIZE)
2301 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2302 			       SHA256_DIGEST_SIZE);
2303 		else if (digestsize == SHA384_DIGEST_SIZE)
2304 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2305 			       SHA512_DIGEST_SIZE);
2306 		else
2307 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2308 			       digestsize);
2309 	}
2310 	return 0;
2311 }
2312 
2313 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2314 {
2315 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2316 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2317 	unsigned int digestsize =
2318 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2319 
2320 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2321 				 sizeof(struct chcr_ahash_req_ctx));
2322 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2323 	if (IS_ERR(hmacctx->base_hash))
2324 		return PTR_ERR(hmacctx->base_hash);
2325 	return chcr_device_init(crypto_tfm_ctx(tfm));
2326 }
2327 
2328 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2329 {
2330 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2331 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2332 
2333 	if (hmacctx->base_hash) {
2334 		chcr_free_shash(hmacctx->base_hash);
2335 		hmacctx->base_hash = NULL;
2336 	}
2337 }
2338 
2339 inline void chcr_aead_common_exit(struct aead_request *req)
2340 {
2341 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2342 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2343 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2344 
2345 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2346 }
2347 
2348 static int chcr_aead_common_init(struct aead_request *req)
2349 {
2350 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2351 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2352 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2353 	unsigned int authsize = crypto_aead_authsize(tfm);
2354 	int error = -EINVAL;
2355 
2356 	/* validate key size */
2357 	if (aeadctx->enckey_len == 0)
2358 		goto err;
2359 	if (reqctx->op && req->cryptlen < authsize)
2360 		goto err;
2361 	if (reqctx->b0_len)
2362 		reqctx->scratch_pad = reqctx->iv + IV;
2363 	else
2364 		reqctx->scratch_pad = NULL;
2365 
2366 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2367 				  reqctx->op);
2368 	if (error) {
2369 		error = -ENOMEM;
2370 		goto err;
2371 	}
2372 
2373 	return 0;
2374 err:
2375 	return error;
2376 }
2377 
2378 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2379 				   int aadmax, int wrlen,
2380 				   unsigned short op_type)
2381 {
2382 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2383 
2384 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2385 	    dst_nents > MAX_DSGL_ENT ||
2386 	    (req->assoclen > aadmax) ||
2387 	    (wrlen > SGE_MAX_WR_LEN))
2388 		return 1;
2389 	return 0;
2390 }
2391 
2392 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2393 {
2394 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2395 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2396 	struct aead_request *subreq = aead_request_ctx(req);
2397 
2398 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2399 	aead_request_set_callback(subreq, req->base.flags,
2400 				  req->base.complete, req->base.data);
2401 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2402 				 req->iv);
2403 	aead_request_set_ad(subreq, req->assoclen);
2404 	return op_type ? crypto_aead_decrypt(subreq) :
2405 		crypto_aead_encrypt(subreq);
2406 }
2407 
2408 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2409 					 unsigned short qid,
2410 					 int size)
2411 {
2412 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2413 	struct chcr_context *ctx = a_ctx(tfm);
2414 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2415 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2416 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2417 	struct sk_buff *skb = NULL;
2418 	struct chcr_wr *chcr_req;
2419 	struct cpl_rx_phys_dsgl *phys_cpl;
2420 	struct ulptx_sgl *ulptx;
2421 	unsigned int transhdr_len;
2422 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2423 	unsigned int   kctx_len = 0, dnents, snents;
2424 	unsigned int  authsize = crypto_aead_authsize(tfm);
2425 	int error = -EINVAL;
2426 	u8 *ivptr;
2427 	int null = 0;
2428 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2429 		GFP_ATOMIC;
2430 	struct adapter *adap = padap(ctx->dev);
2431 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2432 
2433 	if (req->cryptlen == 0)
2434 		return NULL;
2435 
2436 	reqctx->b0_len = 0;
2437 	error = chcr_aead_common_init(req);
2438 	if (error)
2439 		return ERR_PTR(error);
2440 
2441 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2442 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2443 		null = 1;
2444 	}
2445 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2446 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2447 	dnents += MIN_AUTH_SG; // For IV
2448 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2449 			       CHCR_SRC_SG_SIZE, 0);
2450 	dst_size = get_space_for_phys_dsgl(dnents);
2451 	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2452 		- sizeof(chcr_req->key_ctx);
2453 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2454 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2455 			SGE_MAX_WR_LEN;
2456 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2457 			: (sgl_len(snents) * 8);
2458 	transhdr_len += temp;
2459 	transhdr_len = roundup(transhdr_len, 16);
2460 
2461 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2462 				    transhdr_len, reqctx->op)) {
2463 		atomic_inc(&adap->chcr_stats.fallback);
2464 		chcr_aead_common_exit(req);
2465 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2466 	}
2467 	skb = alloc_skb(transhdr_len, flags);
2468 	if (!skb) {
2469 		error = -ENOMEM;
2470 		goto err;
2471 	}
2472 
2473 	chcr_req = __skb_put_zero(skb, transhdr_len);
2474 
2475 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2476 
2477 	/*
2478 	 * Input order	is AAD,IV and Payload. where IV should be included as
2479 	 * the part of authdata. All other fields should be filled according
2480 	 * to the hardware spec
2481 	 */
2482 	chcr_req->sec_cpl.op_ivinsrtofst =
2483 				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2484 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2485 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2486 					null ? 0 : 1 + IV,
2487 					null ? 0 : IV + req->assoclen,
2488 					req->assoclen + IV + 1,
2489 					(temp & 0x1F0) >> 4);
2490 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2491 					temp & 0xF,
2492 					null ? 0 : req->assoclen + IV + 1,
2493 					temp, temp);
2494 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2495 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2496 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2497 	else
2498 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2499 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2500 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2501 					temp,
2502 					actx->auth_mode, aeadctx->hmac_ctrl,
2503 					IV >> 1);
2504 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2505 					 0, 0, dst_size);
2506 
2507 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2508 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2509 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2510 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2511 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2512 		       aeadctx->enckey_len);
2513 	else
2514 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2515 		       aeadctx->enckey_len);
2516 
2517 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2518 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2519 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2520 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2521 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2522 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2523 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2524 		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2525 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2526 				CTR_RFC3686_IV_SIZE);
2527 		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2528 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2529 	} else {
2530 		memcpy(ivptr, req->iv, IV);
2531 	}
2532 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2533 	chcr_add_aead_src_ent(req, ulptx);
2534 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2535 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2536 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2537 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2538 		   transhdr_len, temp, 0);
2539 	reqctx->skb = skb;
2540 
2541 	return skb;
2542 err:
2543 	chcr_aead_common_exit(req);
2544 
2545 	return ERR_PTR(error);
2546 }
2547 
2548 int chcr_aead_dma_map(struct device *dev,
2549 		      struct aead_request *req,
2550 		      unsigned short op_type)
2551 {
2552 	int error;
2553 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2554 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2555 	unsigned int authsize = crypto_aead_authsize(tfm);
2556 	int dst_size;
2557 
2558 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2559 				-authsize : authsize);
2560 	if (!req->cryptlen || !dst_size)
2561 		return 0;
2562 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2563 					DMA_BIDIRECTIONAL);
2564 	if (dma_mapping_error(dev, reqctx->iv_dma))
2565 		return -ENOMEM;
2566 	if (reqctx->b0_len)
2567 		reqctx->b0_dma = reqctx->iv_dma + IV;
2568 	else
2569 		reqctx->b0_dma = 0;
2570 	if (req->src == req->dst) {
2571 		error = dma_map_sg(dev, req->src,
2572 				sg_nents_for_len(req->src, dst_size),
2573 					DMA_BIDIRECTIONAL);
2574 		if (!error)
2575 			goto err;
2576 	} else {
2577 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2578 				   DMA_TO_DEVICE);
2579 		if (!error)
2580 			goto err;
2581 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2582 				   DMA_FROM_DEVICE);
2583 		if (!error) {
2584 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2585 				   DMA_TO_DEVICE);
2586 			goto err;
2587 		}
2588 	}
2589 
2590 	return 0;
2591 err:
2592 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2593 	return -ENOMEM;
2594 }
2595 
2596 void chcr_aead_dma_unmap(struct device *dev,
2597 			 struct aead_request *req,
2598 			 unsigned short op_type)
2599 {
2600 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2601 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2602 	unsigned int authsize = crypto_aead_authsize(tfm);
2603 	int dst_size;
2604 
2605 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2606 					-authsize : authsize);
2607 	if (!req->cryptlen || !dst_size)
2608 		return;
2609 
2610 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2611 					DMA_BIDIRECTIONAL);
2612 	if (req->src == req->dst) {
2613 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2614 				   DMA_BIDIRECTIONAL);
2615 	} else {
2616 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2617 				   DMA_TO_DEVICE);
2618 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2619 				   DMA_FROM_DEVICE);
2620 	}
2621 }
2622 
2623 void chcr_add_aead_src_ent(struct aead_request *req,
2624 			   struct ulptx_sgl *ulptx)
2625 {
2626 	struct ulptx_walk ulp_walk;
2627 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2628 
2629 	if (reqctx->imm) {
2630 		u8 *buf = (u8 *)ulptx;
2631 
2632 		if (reqctx->b0_len) {
2633 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2634 			buf += reqctx->b0_len;
2635 		}
2636 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2637 				   buf, req->cryptlen + req->assoclen, 0);
2638 	} else {
2639 		ulptx_walk_init(&ulp_walk, ulptx);
2640 		if (reqctx->b0_len)
2641 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2642 					    reqctx->b0_dma);
2643 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2644 				  req->assoclen,  0);
2645 		ulptx_walk_end(&ulp_walk);
2646 	}
2647 }
2648 
2649 void chcr_add_aead_dst_ent(struct aead_request *req,
2650 			   struct cpl_rx_phys_dsgl *phys_cpl,
2651 			   unsigned short qid)
2652 {
2653 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2654 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2655 	struct dsgl_walk dsgl_walk;
2656 	unsigned int authsize = crypto_aead_authsize(tfm);
2657 	struct chcr_context *ctx = a_ctx(tfm);
2658 	u32 temp;
2659 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2660 
2661 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2662 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2663 	temp = req->assoclen + req->cryptlen +
2664 		(reqctx->op ? -authsize : authsize);
2665 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2666 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2667 }
2668 
2669 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2670 			     void *ulptx,
2671 			     struct  cipher_wr_param *wrparam)
2672 {
2673 	struct ulptx_walk ulp_walk;
2674 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2675 	u8 *buf = ulptx;
2676 
2677 	memcpy(buf, reqctx->iv, IV);
2678 	buf += IV;
2679 	if (reqctx->imm) {
2680 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2681 				   buf, wrparam->bytes, reqctx->processed);
2682 	} else {
2683 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2684 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2685 				  reqctx->src_ofst);
2686 		reqctx->srcsg = ulp_walk.last_sg;
2687 		reqctx->src_ofst = ulp_walk.last_sg_len;
2688 		ulptx_walk_end(&ulp_walk);
2689 	}
2690 }
2691 
2692 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2693 			     struct cpl_rx_phys_dsgl *phys_cpl,
2694 			     struct  cipher_wr_param *wrparam,
2695 			     unsigned short qid)
2696 {
2697 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2698 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2699 	struct chcr_context *ctx = c_ctx(tfm);
2700 	struct dsgl_walk dsgl_walk;
2701 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2702 
2703 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2704 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2705 			 reqctx->dst_ofst);
2706 	reqctx->dstsg = dsgl_walk.last_sg;
2707 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2708 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2709 }
2710 
2711 void chcr_add_hash_src_ent(struct ahash_request *req,
2712 			   struct ulptx_sgl *ulptx,
2713 			   struct hash_wr_param *param)
2714 {
2715 	struct ulptx_walk ulp_walk;
2716 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2717 
2718 	if (reqctx->hctx_wr.imm) {
2719 		u8 *buf = (u8 *)ulptx;
2720 
2721 		if (param->bfr_len) {
2722 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2723 			buf += param->bfr_len;
2724 		}
2725 
2726 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2727 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2728 				   param->sg_len, 0);
2729 	} else {
2730 		ulptx_walk_init(&ulp_walk, ulptx);
2731 		if (param->bfr_len)
2732 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2733 					    reqctx->hctx_wr.dma_addr);
2734 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2735 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2736 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2737 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2738 		ulptx_walk_end(&ulp_walk);
2739 	}
2740 }
2741 
2742 int chcr_hash_dma_map(struct device *dev,
2743 		      struct ahash_request *req)
2744 {
2745 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2746 	int error = 0;
2747 
2748 	if (!req->nbytes)
2749 		return 0;
2750 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2751 			   DMA_TO_DEVICE);
2752 	if (!error)
2753 		return -ENOMEM;
2754 	req_ctx->hctx_wr.is_sg_map = 1;
2755 	return 0;
2756 }
2757 
2758 void chcr_hash_dma_unmap(struct device *dev,
2759 			 struct ahash_request *req)
2760 {
2761 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2762 
2763 	if (!req->nbytes)
2764 		return;
2765 
2766 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2767 			   DMA_TO_DEVICE);
2768 	req_ctx->hctx_wr.is_sg_map = 0;
2769 
2770 }
2771 
2772 int chcr_cipher_dma_map(struct device *dev,
2773 			struct skcipher_request *req)
2774 {
2775 	int error;
2776 
2777 	if (req->src == req->dst) {
2778 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2779 				   DMA_BIDIRECTIONAL);
2780 		if (!error)
2781 			goto err;
2782 	} else {
2783 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2784 				   DMA_TO_DEVICE);
2785 		if (!error)
2786 			goto err;
2787 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2788 				   DMA_FROM_DEVICE);
2789 		if (!error) {
2790 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2791 				   DMA_TO_DEVICE);
2792 			goto err;
2793 		}
2794 	}
2795 
2796 	return 0;
2797 err:
2798 	return -ENOMEM;
2799 }
2800 
2801 void chcr_cipher_dma_unmap(struct device *dev,
2802 			   struct skcipher_request *req)
2803 {
2804 	if (req->src == req->dst) {
2805 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2806 				   DMA_BIDIRECTIONAL);
2807 	} else {
2808 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2809 				   DMA_TO_DEVICE);
2810 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2811 				   DMA_FROM_DEVICE);
2812 	}
2813 }
2814 
2815 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2816 {
2817 	__be32 data;
2818 
2819 	memset(block, 0, csize);
2820 	block += csize;
2821 
2822 	if (csize >= 4)
2823 		csize = 4;
2824 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2825 		return -EOVERFLOW;
2826 
2827 	data = cpu_to_be32(msglen);
2828 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2829 
2830 	return 0;
2831 }
2832 
2833 static int generate_b0(struct aead_request *req, u8 *ivptr,
2834 			unsigned short op_type)
2835 {
2836 	unsigned int l, lp, m;
2837 	int rc;
2838 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2839 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2840 	u8 *b0 = reqctx->scratch_pad;
2841 
2842 	m = crypto_aead_authsize(aead);
2843 
2844 	memcpy(b0, ivptr, 16);
2845 
2846 	lp = b0[0];
2847 	l = lp + 1;
2848 
2849 	/* set m, bits 3-5 */
2850 	*b0 |= (8 * ((m - 2) / 2));
2851 
2852 	/* set adata, bit 6, if associated data is used */
2853 	if (req->assoclen)
2854 		*b0 |= 64;
2855 	rc = set_msg_len(b0 + 16 - l,
2856 			 (op_type == CHCR_DECRYPT_OP) ?
2857 			 req->cryptlen - m : req->cryptlen, l);
2858 
2859 	return rc;
2860 }
2861 
2862 static inline int crypto_ccm_check_iv(const u8 *iv)
2863 {
2864 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2865 	if (iv[0] < 1 || iv[0] > 7)
2866 		return -EINVAL;
2867 
2868 	return 0;
2869 }
2870 
2871 static int ccm_format_packet(struct aead_request *req,
2872 			     u8 *ivptr,
2873 			     unsigned int sub_type,
2874 			     unsigned short op_type,
2875 			     unsigned int assoclen)
2876 {
2877 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2878 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2879 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2880 	int rc = 0;
2881 
2882 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2883 		ivptr[0] = 3;
2884 		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2885 		memcpy(ivptr + 4, req->iv, 8);
2886 		memset(ivptr + 12, 0, 4);
2887 	} else {
2888 		memcpy(ivptr, req->iv, 16);
2889 	}
2890 	if (assoclen)
2891 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2892 				htons(assoclen);
2893 
2894 	rc = generate_b0(req, ivptr, op_type);
2895 	/* zero the ctr value */
2896 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2897 	return rc;
2898 }
2899 
2900 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2901 				  unsigned int dst_size,
2902 				  struct aead_request *req,
2903 				  unsigned short op_type)
2904 {
2905 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2906 	struct chcr_context *ctx = a_ctx(tfm);
2907 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2908 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2909 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2910 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2911 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2912 	unsigned int ccm_xtra;
2913 	unsigned char tag_offset = 0, auth_offset = 0;
2914 	unsigned int assoclen;
2915 
2916 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2917 		assoclen = req->assoclen - 8;
2918 	else
2919 		assoclen = req->assoclen;
2920 	ccm_xtra = CCM_B0_SIZE +
2921 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2922 
2923 	auth_offset = req->cryptlen ?
2924 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2925 	if (op_type == CHCR_DECRYPT_OP) {
2926 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2927 			tag_offset = crypto_aead_authsize(tfm);
2928 		else
2929 			auth_offset = 0;
2930 	}
2931 
2932 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2933 	sec_cpl->pldlen =
2934 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2935 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2936 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2937 				1 + IV,	IV + assoclen + ccm_xtra,
2938 				req->assoclen + IV + 1 + ccm_xtra, 0);
2939 
2940 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2941 					auth_offset, tag_offset,
2942 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2943 					crypto_aead_authsize(tfm));
2944 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2945 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2946 					cipher_mode, mac_mode,
2947 					aeadctx->hmac_ctrl, IV >> 1);
2948 
2949 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2950 					0, dst_size);
2951 }
2952 
2953 static int aead_ccm_validate_input(unsigned short op_type,
2954 				   struct aead_request *req,
2955 				   struct chcr_aead_ctx *aeadctx,
2956 				   unsigned int sub_type)
2957 {
2958 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2959 		if (crypto_ccm_check_iv(req->iv)) {
2960 			pr_err("CCM: IV check fails\n");
2961 			return -EINVAL;
2962 		}
2963 	} else {
2964 		if (req->assoclen != 16 && req->assoclen != 20) {
2965 			pr_err("RFC4309: Invalid AAD length %d\n",
2966 			       req->assoclen);
2967 			return -EINVAL;
2968 		}
2969 	}
2970 	return 0;
2971 }
2972 
2973 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2974 					  unsigned short qid,
2975 					  int size)
2976 {
2977 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2978 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2979 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2980 	struct sk_buff *skb = NULL;
2981 	struct chcr_wr *chcr_req;
2982 	struct cpl_rx_phys_dsgl *phys_cpl;
2983 	struct ulptx_sgl *ulptx;
2984 	unsigned int transhdr_len;
2985 	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2986 	unsigned int sub_type, assoclen = req->assoclen;
2987 	unsigned int authsize = crypto_aead_authsize(tfm);
2988 	int error = -EINVAL;
2989 	u8 *ivptr;
2990 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2991 		GFP_ATOMIC;
2992 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2993 
2994 	sub_type = get_aead_subtype(tfm);
2995 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2996 		assoclen -= 8;
2997 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2998 	error = chcr_aead_common_init(req);
2999 	if (error)
3000 		return ERR_PTR(error);
3001 
3002 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3003 	if (error)
3004 		goto err;
3005 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3006 			+ (reqctx->op ? -authsize : authsize),
3007 			CHCR_DST_SG_SIZE, 0);
3008 	dnents += MIN_CCM_SG; // For IV and B0
3009 	dst_size = get_space_for_phys_dsgl(dnents);
3010 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3011 			       CHCR_SRC_SG_SIZE, 0);
3012 	snents += MIN_CCM_SG; //For B0
3013 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3014 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3015 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3016 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3017 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3018 				     reqctx->b0_len, 16) :
3019 		(sgl_len(snents) *  8);
3020 	transhdr_len += temp;
3021 	transhdr_len = roundup(transhdr_len, 16);
3022 
3023 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3024 				reqctx->b0_len, transhdr_len, reqctx->op)) {
3025 		atomic_inc(&adap->chcr_stats.fallback);
3026 		chcr_aead_common_exit(req);
3027 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3028 	}
3029 	skb = alloc_skb(transhdr_len,  flags);
3030 
3031 	if (!skb) {
3032 		error = -ENOMEM;
3033 		goto err;
3034 	}
3035 
3036 	chcr_req = __skb_put_zero(skb, transhdr_len);
3037 
3038 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3039 
3040 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3041 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3042 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3043 			aeadctx->key, aeadctx->enckey_len);
3044 
3045 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3046 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3047 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3048 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3049 	if (error)
3050 		goto dstmap_fail;
3051 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3052 	chcr_add_aead_src_ent(req, ulptx);
3053 
3054 	atomic_inc(&adap->chcr_stats.aead_rqst);
3055 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3056 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3057 		reqctx->b0_len) : 0);
3058 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3059 		    transhdr_len, temp, 0);
3060 	reqctx->skb = skb;
3061 
3062 	return skb;
3063 dstmap_fail:
3064 	kfree_skb(skb);
3065 err:
3066 	chcr_aead_common_exit(req);
3067 	return ERR_PTR(error);
3068 }
3069 
3070 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3071 				     unsigned short qid,
3072 				     int size)
3073 {
3074 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3075 	struct chcr_context *ctx = a_ctx(tfm);
3076 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3077 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3078 	struct sk_buff *skb = NULL;
3079 	struct chcr_wr *chcr_req;
3080 	struct cpl_rx_phys_dsgl *phys_cpl;
3081 	struct ulptx_sgl *ulptx;
3082 	unsigned int transhdr_len, dnents = 0, snents;
3083 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3084 	unsigned int authsize = crypto_aead_authsize(tfm);
3085 	int error = -EINVAL;
3086 	u8 *ivptr;
3087 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3088 		GFP_ATOMIC;
3089 	struct adapter *adap = padap(ctx->dev);
3090 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3091 
3092 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3093 		assoclen = req->assoclen - 8;
3094 
3095 	reqctx->b0_len = 0;
3096 	error = chcr_aead_common_init(req);
3097 	if (error)
3098 		return ERR_PTR(error);
3099 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3100 				(reqctx->op ? -authsize : authsize),
3101 				CHCR_DST_SG_SIZE, 0);
3102 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3103 			       CHCR_SRC_SG_SIZE, 0);
3104 	dnents += MIN_GCM_SG; // For IV
3105 	dst_size = get_space_for_phys_dsgl(dnents);
3106 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3107 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3108 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3109 			SGE_MAX_WR_LEN;
3110 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3111 		(sgl_len(snents) * 8);
3112 	transhdr_len += temp;
3113 	transhdr_len = roundup(transhdr_len, 16);
3114 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3115 			    transhdr_len, reqctx->op)) {
3116 
3117 		atomic_inc(&adap->chcr_stats.fallback);
3118 		chcr_aead_common_exit(req);
3119 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3120 	}
3121 	skb = alloc_skb(transhdr_len, flags);
3122 	if (!skb) {
3123 		error = -ENOMEM;
3124 		goto err;
3125 	}
3126 
3127 	chcr_req = __skb_put_zero(skb, transhdr_len);
3128 
3129 	//Offset of tag from end
3130 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3131 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3132 						rx_channel_id, 2, 1);
3133 	chcr_req->sec_cpl.pldlen =
3134 		htonl(req->assoclen + IV + req->cryptlen);
3135 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3136 					assoclen ? 1 + IV : 0,
3137 					assoclen ? IV + assoclen : 0,
3138 					req->assoclen + IV + 1, 0);
3139 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3140 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3141 						temp, temp);
3142 	chcr_req->sec_cpl.seqno_numivs =
3143 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3144 					CHCR_ENCRYPT_OP) ? 1 : 0,
3145 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3146 					CHCR_SCMD_AUTH_MODE_GHASH,
3147 					aeadctx->hmac_ctrl, IV >> 1);
3148 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3149 					0, 0, dst_size);
3150 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3151 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3152 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3153 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3154 
3155 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3156 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3157 	/* prepare a 16 byte iv */
3158 	/* S   A   L  T |  IV | 0x00000001 */
3159 	if (get_aead_subtype(tfm) ==
3160 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3161 		memcpy(ivptr, aeadctx->salt, 4);
3162 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3163 	} else {
3164 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3165 	}
3166 	*((unsigned int *)(ivptr + 12)) = htonl(0x01);
3167 
3168 	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3169 
3170 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3171 	chcr_add_aead_src_ent(req, ulptx);
3172 	atomic_inc(&adap->chcr_stats.aead_rqst);
3173 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3174 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3175 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3176 		    transhdr_len, temp, reqctx->verify);
3177 	reqctx->skb = skb;
3178 	return skb;
3179 
3180 err:
3181 	chcr_aead_common_exit(req);
3182 	return ERR_PTR(error);
3183 }
3184 
3185 
3186 
3187 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3188 {
3189 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3190 	struct aead_alg *alg = crypto_aead_alg(tfm);
3191 
3192 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3193 					       CRYPTO_ALG_NEED_FALLBACK |
3194 					       CRYPTO_ALG_ASYNC);
3195 	if  (IS_ERR(aeadctx->sw_cipher))
3196 		return PTR_ERR(aeadctx->sw_cipher);
3197 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3198 				 sizeof(struct aead_request) +
3199 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3200 	return chcr_device_init(a_ctx(tfm));
3201 }
3202 
3203 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3204 {
3205 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3206 
3207 	crypto_free_aead(aeadctx->sw_cipher);
3208 }
3209 
3210 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3211 					unsigned int authsize)
3212 {
3213 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3214 
3215 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3216 	aeadctx->mayverify = VERIFY_HW;
3217 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3218 }
3219 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3220 				    unsigned int authsize)
3221 {
3222 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3223 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3224 
3225 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3226 	 * true for sha1. authsize == 12 condition should be before
3227 	 * authsize == (maxauth >> 1)
3228 	 */
3229 	if (authsize == ICV_4) {
3230 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3231 		aeadctx->mayverify = VERIFY_HW;
3232 	} else if (authsize == ICV_6) {
3233 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3234 		aeadctx->mayverify = VERIFY_HW;
3235 	} else if (authsize == ICV_10) {
3236 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3237 		aeadctx->mayverify = VERIFY_HW;
3238 	} else if (authsize == ICV_12) {
3239 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3240 		aeadctx->mayverify = VERIFY_HW;
3241 	} else if (authsize == ICV_14) {
3242 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3243 		aeadctx->mayverify = VERIFY_HW;
3244 	} else if (authsize == (maxauth >> 1)) {
3245 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3246 		aeadctx->mayverify = VERIFY_HW;
3247 	} else if (authsize == maxauth) {
3248 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3249 		aeadctx->mayverify = VERIFY_HW;
3250 	} else {
3251 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3252 		aeadctx->mayverify = VERIFY_SW;
3253 	}
3254 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3255 }
3256 
3257 
3258 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3259 {
3260 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3261 
3262 	switch (authsize) {
3263 	case ICV_4:
3264 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3265 		aeadctx->mayverify = VERIFY_HW;
3266 		break;
3267 	case ICV_8:
3268 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3269 		aeadctx->mayverify = VERIFY_HW;
3270 		break;
3271 	case ICV_12:
3272 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3273 		aeadctx->mayverify = VERIFY_HW;
3274 		break;
3275 	case ICV_14:
3276 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3277 		aeadctx->mayverify = VERIFY_HW;
3278 		break;
3279 	case ICV_16:
3280 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3281 		aeadctx->mayverify = VERIFY_HW;
3282 		break;
3283 	case ICV_13:
3284 	case ICV_15:
3285 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3286 		aeadctx->mayverify = VERIFY_SW;
3287 		break;
3288 	default:
3289 		return -EINVAL;
3290 	}
3291 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3292 }
3293 
3294 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3295 					  unsigned int authsize)
3296 {
3297 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3298 
3299 	switch (authsize) {
3300 	case ICV_8:
3301 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3302 		aeadctx->mayverify = VERIFY_HW;
3303 		break;
3304 	case ICV_12:
3305 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3306 		aeadctx->mayverify = VERIFY_HW;
3307 		break;
3308 	case ICV_16:
3309 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3310 		aeadctx->mayverify = VERIFY_HW;
3311 		break;
3312 	default:
3313 		return -EINVAL;
3314 	}
3315 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3316 }
3317 
3318 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3319 				unsigned int authsize)
3320 {
3321 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3322 
3323 	switch (authsize) {
3324 	case ICV_4:
3325 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3326 		aeadctx->mayverify = VERIFY_HW;
3327 		break;
3328 	case ICV_6:
3329 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3330 		aeadctx->mayverify = VERIFY_HW;
3331 		break;
3332 	case ICV_8:
3333 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3334 		aeadctx->mayverify = VERIFY_HW;
3335 		break;
3336 	case ICV_10:
3337 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3338 		aeadctx->mayverify = VERIFY_HW;
3339 		break;
3340 	case ICV_12:
3341 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3342 		aeadctx->mayverify = VERIFY_HW;
3343 		break;
3344 	case ICV_14:
3345 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3346 		aeadctx->mayverify = VERIFY_HW;
3347 		break;
3348 	case ICV_16:
3349 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3350 		aeadctx->mayverify = VERIFY_HW;
3351 		break;
3352 	default:
3353 		return -EINVAL;
3354 	}
3355 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3356 }
3357 
3358 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3359 				const u8 *key,
3360 				unsigned int keylen)
3361 {
3362 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3363 	unsigned char ck_size, mk_size;
3364 	int key_ctx_size = 0;
3365 
3366 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3367 	if (keylen == AES_KEYSIZE_128) {
3368 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3369 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3370 	} else if (keylen == AES_KEYSIZE_192) {
3371 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3372 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3373 	} else if (keylen == AES_KEYSIZE_256) {
3374 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3375 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3376 	} else {
3377 		aeadctx->enckey_len = 0;
3378 		return	-EINVAL;
3379 	}
3380 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3381 						key_ctx_size >> 4);
3382 	memcpy(aeadctx->key, key, keylen);
3383 	aeadctx->enckey_len = keylen;
3384 
3385 	return 0;
3386 }
3387 
3388 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3389 				const u8 *key,
3390 				unsigned int keylen)
3391 {
3392 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3393 	int error;
3394 
3395 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3396 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3397 			      CRYPTO_TFM_REQ_MASK);
3398 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3399 	if (error)
3400 		return error;
3401 	return chcr_ccm_common_setkey(aead, key, keylen);
3402 }
3403 
3404 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3405 				    unsigned int keylen)
3406 {
3407 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3408 	int error;
3409 
3410 	if (keylen < 3) {
3411 		aeadctx->enckey_len = 0;
3412 		return	-EINVAL;
3413 	}
3414 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3415 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3416 			      CRYPTO_TFM_REQ_MASK);
3417 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3418 	if (error)
3419 		return error;
3420 	keylen -= 3;
3421 	memcpy(aeadctx->salt, key + keylen, 3);
3422 	return chcr_ccm_common_setkey(aead, key, keylen);
3423 }
3424 
3425 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3426 			   unsigned int keylen)
3427 {
3428 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3429 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3430 	unsigned int ck_size;
3431 	int ret = 0, key_ctx_size = 0;
3432 	struct crypto_aes_ctx aes;
3433 
3434 	aeadctx->enckey_len = 0;
3435 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3436 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3437 			      & CRYPTO_TFM_REQ_MASK);
3438 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3439 	if (ret)
3440 		goto out;
3441 
3442 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3443 	    keylen > 3) {
3444 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3445 		memcpy(aeadctx->salt, key + keylen, 4);
3446 	}
3447 	if (keylen == AES_KEYSIZE_128) {
3448 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3449 	} else if (keylen == AES_KEYSIZE_192) {
3450 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3451 	} else if (keylen == AES_KEYSIZE_256) {
3452 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3453 	} else {
3454 		pr_err("GCM: Invalid key length %d\n", keylen);
3455 		ret = -EINVAL;
3456 		goto out;
3457 	}
3458 
3459 	memcpy(aeadctx->key, key, keylen);
3460 	aeadctx->enckey_len = keylen;
3461 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3462 		AEAD_H_SIZE;
3463 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3464 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3465 						0, 0,
3466 						key_ctx_size >> 4);
3467 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3468 	 * It will go in key context
3469 	 */
3470 	ret = aes_expandkey(&aes, key, keylen);
3471 	if (ret) {
3472 		aeadctx->enckey_len = 0;
3473 		goto out;
3474 	}
3475 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3476 	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3477 	memzero_explicit(&aes, sizeof(aes));
3478 
3479 out:
3480 	return ret;
3481 }
3482 
3483 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3484 				   unsigned int keylen)
3485 {
3486 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3487 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3488 	/* it contains auth and cipher key both*/
3489 	struct crypto_authenc_keys keys;
3490 	unsigned int bs, subtype;
3491 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3492 	int err = 0, i, key_ctx_len = 0;
3493 	unsigned char ck_size = 0;
3494 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3495 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3496 	struct algo_param param;
3497 	int align;
3498 	u8 *o_ptr = NULL;
3499 
3500 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3501 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3502 			      & CRYPTO_TFM_REQ_MASK);
3503 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3504 	if (err)
3505 		goto out;
3506 
3507 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3508 		goto out;
3509 
3510 	if (get_alg_config(&param, max_authsize)) {
3511 		pr_err("chcr : Unsupported digest size\n");
3512 		goto out;
3513 	}
3514 	subtype = get_aead_subtype(authenc);
3515 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3516 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3517 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3518 			goto out;
3519 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3520 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3521 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3522 	}
3523 	if (keys.enckeylen == AES_KEYSIZE_128) {
3524 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3525 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3526 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3527 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3528 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3529 	} else {
3530 		pr_err("chcr : Unsupported cipher key\n");
3531 		goto out;
3532 	}
3533 
3534 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3535 	 * h(opad) so authkey is not needed again. authkeylen size have the
3536 	 * size of the hash digest size.
3537 	 */
3538 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3539 	aeadctx->enckey_len = keys.enckeylen;
3540 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3541 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3542 
3543 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3544 			    aeadctx->enckey_len << 3);
3545 	}
3546 	base_hash  = chcr_alloc_shash(max_authsize);
3547 	if (IS_ERR(base_hash)) {
3548 		pr_err("chcr : Base driver cannot be loaded\n");
3549 		aeadctx->enckey_len = 0;
3550 		memzero_explicit(&keys, sizeof(keys));
3551 		return -EINVAL;
3552 	}
3553 	{
3554 		SHASH_DESC_ON_STACK(shash, base_hash);
3555 
3556 		shash->tfm = base_hash;
3557 		bs = crypto_shash_blocksize(base_hash);
3558 		align = KEYCTX_ALIGN_PAD(max_authsize);
3559 		o_ptr =  actx->h_iopad + param.result_size + align;
3560 
3561 		if (keys.authkeylen > bs) {
3562 			err = crypto_shash_digest(shash, keys.authkey,
3563 						  keys.authkeylen,
3564 						  o_ptr);
3565 			if (err) {
3566 				pr_err("chcr : Base driver cannot be loaded\n");
3567 				goto out;
3568 			}
3569 			keys.authkeylen = max_authsize;
3570 		} else
3571 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3572 
3573 		/* Compute the ipad-digest*/
3574 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3575 		memcpy(pad, o_ptr, keys.authkeylen);
3576 		for (i = 0; i < bs >> 2; i++)
3577 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3578 
3579 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3580 					      max_authsize))
3581 			goto out;
3582 		/* Compute the opad-digest */
3583 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3584 		memcpy(pad, o_ptr, keys.authkeylen);
3585 		for (i = 0; i < bs >> 2; i++)
3586 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3587 
3588 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3589 			goto out;
3590 
3591 		/* convert the ipad and opad digest to network order */
3592 		chcr_change_order(actx->h_iopad, param.result_size);
3593 		chcr_change_order(o_ptr, param.result_size);
3594 		key_ctx_len = sizeof(struct _key_ctx) +
3595 			roundup(keys.enckeylen, 16) +
3596 			(param.result_size + align) * 2;
3597 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3598 						0, 1, key_ctx_len >> 4);
3599 		actx->auth_mode = param.auth_mode;
3600 		chcr_free_shash(base_hash);
3601 
3602 		memzero_explicit(&keys, sizeof(keys));
3603 		return 0;
3604 	}
3605 out:
3606 	aeadctx->enckey_len = 0;
3607 	memzero_explicit(&keys, sizeof(keys));
3608 	if (!IS_ERR(base_hash))
3609 		chcr_free_shash(base_hash);
3610 	return -EINVAL;
3611 }
3612 
3613 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3614 					const u8 *key, unsigned int keylen)
3615 {
3616 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3617 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3618 	struct crypto_authenc_keys keys;
3619 	int err;
3620 	/* it contains auth and cipher key both*/
3621 	unsigned int subtype;
3622 	int key_ctx_len = 0;
3623 	unsigned char ck_size = 0;
3624 
3625 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3626 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3627 			      & CRYPTO_TFM_REQ_MASK);
3628 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3629 	if (err)
3630 		goto out;
3631 
3632 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3633 		goto out;
3634 
3635 	subtype = get_aead_subtype(authenc);
3636 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3637 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3638 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3639 			goto out;
3640 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3641 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3642 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3643 	}
3644 	if (keys.enckeylen == AES_KEYSIZE_128) {
3645 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3646 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3647 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3648 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3649 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3650 	} else {
3651 		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3652 		goto out;
3653 	}
3654 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3655 	aeadctx->enckey_len = keys.enckeylen;
3656 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3657 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3658 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3659 				aeadctx->enckey_len << 3);
3660 	}
3661 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3662 
3663 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3664 						0, key_ctx_len >> 4);
3665 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3666 	memzero_explicit(&keys, sizeof(keys));
3667 	return 0;
3668 out:
3669 	aeadctx->enckey_len = 0;
3670 	memzero_explicit(&keys, sizeof(keys));
3671 	return -EINVAL;
3672 }
3673 
3674 static int chcr_aead_op(struct aead_request *req,
3675 			int size,
3676 			create_wr_t create_wr_fn)
3677 {
3678 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3679 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3680 	struct chcr_context *ctx = a_ctx(tfm);
3681 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3682 	struct sk_buff *skb;
3683 	struct chcr_dev *cdev;
3684 
3685 	cdev = a_ctx(tfm)->dev;
3686 	if (!cdev) {
3687 		pr_err("chcr : %s : No crypto device.\n", __func__);
3688 		return -ENXIO;
3689 	}
3690 
3691 	if (chcr_inc_wrcount(cdev)) {
3692 	/* Detach state for CHCR means lldi or padap is freed.
3693 	 * We cannot increment fallback here.
3694 	 */
3695 		return chcr_aead_fallback(req, reqctx->op);
3696 	}
3697 
3698 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3699 					reqctx->txqidx) &&
3700 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3701 			chcr_dec_wrcount(cdev);
3702 			return -ENOSPC;
3703 	}
3704 
3705 	/* Form a WR from req */
3706 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3707 
3708 	if (IS_ERR_OR_NULL(skb)) {
3709 		chcr_dec_wrcount(cdev);
3710 		return PTR_ERR_OR_ZERO(skb);
3711 	}
3712 
3713 	skb->dev = u_ctx->lldi.ports[0];
3714 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3715 	chcr_send_wr(skb);
3716 	return -EINPROGRESS;
3717 }
3718 
3719 static int chcr_aead_encrypt(struct aead_request *req)
3720 {
3721 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3722 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3723 	struct chcr_context *ctx = a_ctx(tfm);
3724 	unsigned int cpu;
3725 
3726 	cpu = get_cpu();
3727 	reqctx->txqidx = cpu % ctx->ntxq;
3728 	reqctx->rxqidx = cpu % ctx->nrxq;
3729 	put_cpu();
3730 
3731 	reqctx->verify = VERIFY_HW;
3732 	reqctx->op = CHCR_ENCRYPT_OP;
3733 
3734 	switch (get_aead_subtype(tfm)) {
3735 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3736 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3737 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3738 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3739 		return chcr_aead_op(req, 0, create_authenc_wr);
3740 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3741 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3742 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3743 	default:
3744 		return chcr_aead_op(req, 0, create_gcm_wr);
3745 	}
3746 }
3747 
3748 static int chcr_aead_decrypt(struct aead_request *req)
3749 {
3750 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3751 	struct chcr_context *ctx = a_ctx(tfm);
3752 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3753 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3754 	int size;
3755 	unsigned int cpu;
3756 
3757 	cpu = get_cpu();
3758 	reqctx->txqidx = cpu % ctx->ntxq;
3759 	reqctx->rxqidx = cpu % ctx->nrxq;
3760 	put_cpu();
3761 
3762 	if (aeadctx->mayverify == VERIFY_SW) {
3763 		size = crypto_aead_maxauthsize(tfm);
3764 		reqctx->verify = VERIFY_SW;
3765 	} else {
3766 		size = 0;
3767 		reqctx->verify = VERIFY_HW;
3768 	}
3769 	reqctx->op = CHCR_DECRYPT_OP;
3770 	switch (get_aead_subtype(tfm)) {
3771 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3772 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3773 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3774 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3775 		return chcr_aead_op(req, size, create_authenc_wr);
3776 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3777 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3778 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3779 	default:
3780 		return chcr_aead_op(req, size, create_gcm_wr);
3781 	}
3782 }
3783 
3784 static struct chcr_alg_template driver_algs[] = {
3785 	/* AES-CBC */
3786 	{
3787 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3788 		.is_registered = 0,
3789 		.alg.skcipher = {
3790 			.base.cra_name		= "cbc(aes)",
3791 			.base.cra_driver_name	= "cbc-aes-chcr",
3792 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3793 
3794 			.init			= chcr_init_tfm,
3795 			.exit			= chcr_exit_tfm,
3796 			.min_keysize		= AES_MIN_KEY_SIZE,
3797 			.max_keysize		= AES_MAX_KEY_SIZE,
3798 			.ivsize			= AES_BLOCK_SIZE,
3799 			.setkey			= chcr_aes_cbc_setkey,
3800 			.encrypt		= chcr_aes_encrypt,
3801 			.decrypt		= chcr_aes_decrypt,
3802 			}
3803 	},
3804 	{
3805 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3806 		.is_registered = 0,
3807 		.alg.skcipher = {
3808 			.base.cra_name		= "xts(aes)",
3809 			.base.cra_driver_name	= "xts-aes-chcr",
3810 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3811 
3812 			.init			= chcr_init_tfm,
3813 			.exit			= chcr_exit_tfm,
3814 			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3815 			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3816 			.ivsize			= AES_BLOCK_SIZE,
3817 			.setkey			= chcr_aes_xts_setkey,
3818 			.encrypt		= chcr_aes_encrypt,
3819 			.decrypt		= chcr_aes_decrypt,
3820 			}
3821 	},
3822 	{
3823 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3824 		.is_registered = 0,
3825 		.alg.skcipher = {
3826 			.base.cra_name		= "ctr(aes)",
3827 			.base.cra_driver_name	= "ctr-aes-chcr",
3828 			.base.cra_blocksize	= 1,
3829 
3830 			.init			= chcr_init_tfm,
3831 			.exit			= chcr_exit_tfm,
3832 			.min_keysize		= AES_MIN_KEY_SIZE,
3833 			.max_keysize		= AES_MAX_KEY_SIZE,
3834 			.ivsize			= AES_BLOCK_SIZE,
3835 			.setkey			= chcr_aes_ctr_setkey,
3836 			.encrypt		= chcr_aes_encrypt,
3837 			.decrypt		= chcr_aes_decrypt,
3838 		}
3839 	},
3840 	{
3841 		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3842 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3843 		.is_registered = 0,
3844 		.alg.skcipher = {
3845 			.base.cra_name		= "rfc3686(ctr(aes))",
3846 			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3847 			.base.cra_blocksize	= 1,
3848 
3849 			.init			= chcr_rfc3686_init,
3850 			.exit			= chcr_exit_tfm,
3851 			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3852 			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3853 			.ivsize			= CTR_RFC3686_IV_SIZE,
3854 			.setkey			= chcr_aes_rfc3686_setkey,
3855 			.encrypt		= chcr_aes_encrypt,
3856 			.decrypt		= chcr_aes_decrypt,
3857 		}
3858 	},
3859 	/* SHA */
3860 	{
3861 		.type = CRYPTO_ALG_TYPE_AHASH,
3862 		.is_registered = 0,
3863 		.alg.hash = {
3864 			.halg.digestsize = SHA1_DIGEST_SIZE,
3865 			.halg.base = {
3866 				.cra_name = "sha1",
3867 				.cra_driver_name = "sha1-chcr",
3868 				.cra_blocksize = SHA1_BLOCK_SIZE,
3869 			}
3870 		}
3871 	},
3872 	{
3873 		.type = CRYPTO_ALG_TYPE_AHASH,
3874 		.is_registered = 0,
3875 		.alg.hash = {
3876 			.halg.digestsize = SHA256_DIGEST_SIZE,
3877 			.halg.base = {
3878 				.cra_name = "sha256",
3879 				.cra_driver_name = "sha256-chcr",
3880 				.cra_blocksize = SHA256_BLOCK_SIZE,
3881 			}
3882 		}
3883 	},
3884 	{
3885 		.type = CRYPTO_ALG_TYPE_AHASH,
3886 		.is_registered = 0,
3887 		.alg.hash = {
3888 			.halg.digestsize = SHA224_DIGEST_SIZE,
3889 			.halg.base = {
3890 				.cra_name = "sha224",
3891 				.cra_driver_name = "sha224-chcr",
3892 				.cra_blocksize = SHA224_BLOCK_SIZE,
3893 			}
3894 		}
3895 	},
3896 	{
3897 		.type = CRYPTO_ALG_TYPE_AHASH,
3898 		.is_registered = 0,
3899 		.alg.hash = {
3900 			.halg.digestsize = SHA384_DIGEST_SIZE,
3901 			.halg.base = {
3902 				.cra_name = "sha384",
3903 				.cra_driver_name = "sha384-chcr",
3904 				.cra_blocksize = SHA384_BLOCK_SIZE,
3905 			}
3906 		}
3907 	},
3908 	{
3909 		.type = CRYPTO_ALG_TYPE_AHASH,
3910 		.is_registered = 0,
3911 		.alg.hash = {
3912 			.halg.digestsize = SHA512_DIGEST_SIZE,
3913 			.halg.base = {
3914 				.cra_name = "sha512",
3915 				.cra_driver_name = "sha512-chcr",
3916 				.cra_blocksize = SHA512_BLOCK_SIZE,
3917 			}
3918 		}
3919 	},
3920 	/* HMAC */
3921 	{
3922 		.type = CRYPTO_ALG_TYPE_HMAC,
3923 		.is_registered = 0,
3924 		.alg.hash = {
3925 			.halg.digestsize = SHA1_DIGEST_SIZE,
3926 			.halg.base = {
3927 				.cra_name = "hmac(sha1)",
3928 				.cra_driver_name = "hmac-sha1-chcr",
3929 				.cra_blocksize = SHA1_BLOCK_SIZE,
3930 			}
3931 		}
3932 	},
3933 	{
3934 		.type = CRYPTO_ALG_TYPE_HMAC,
3935 		.is_registered = 0,
3936 		.alg.hash = {
3937 			.halg.digestsize = SHA224_DIGEST_SIZE,
3938 			.halg.base = {
3939 				.cra_name = "hmac(sha224)",
3940 				.cra_driver_name = "hmac-sha224-chcr",
3941 				.cra_blocksize = SHA224_BLOCK_SIZE,
3942 			}
3943 		}
3944 	},
3945 	{
3946 		.type = CRYPTO_ALG_TYPE_HMAC,
3947 		.is_registered = 0,
3948 		.alg.hash = {
3949 			.halg.digestsize = SHA256_DIGEST_SIZE,
3950 			.halg.base = {
3951 				.cra_name = "hmac(sha256)",
3952 				.cra_driver_name = "hmac-sha256-chcr",
3953 				.cra_blocksize = SHA256_BLOCK_SIZE,
3954 			}
3955 		}
3956 	},
3957 	{
3958 		.type = CRYPTO_ALG_TYPE_HMAC,
3959 		.is_registered = 0,
3960 		.alg.hash = {
3961 			.halg.digestsize = SHA384_DIGEST_SIZE,
3962 			.halg.base = {
3963 				.cra_name = "hmac(sha384)",
3964 				.cra_driver_name = "hmac-sha384-chcr",
3965 				.cra_blocksize = SHA384_BLOCK_SIZE,
3966 			}
3967 		}
3968 	},
3969 	{
3970 		.type = CRYPTO_ALG_TYPE_HMAC,
3971 		.is_registered = 0,
3972 		.alg.hash = {
3973 			.halg.digestsize = SHA512_DIGEST_SIZE,
3974 			.halg.base = {
3975 				.cra_name = "hmac(sha512)",
3976 				.cra_driver_name = "hmac-sha512-chcr",
3977 				.cra_blocksize = SHA512_BLOCK_SIZE,
3978 			}
3979 		}
3980 	},
3981 	/* Add AEAD Algorithms */
3982 	{
3983 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3984 		.is_registered = 0,
3985 		.alg.aead = {
3986 			.base = {
3987 				.cra_name = "gcm(aes)",
3988 				.cra_driver_name = "gcm-aes-chcr",
3989 				.cra_blocksize	= 1,
3990 				.cra_priority = CHCR_AEAD_PRIORITY,
3991 				.cra_ctxsize =	sizeof(struct chcr_context) +
3992 						sizeof(struct chcr_aead_ctx) +
3993 						sizeof(struct chcr_gcm_ctx),
3994 			},
3995 			.ivsize = GCM_AES_IV_SIZE,
3996 			.maxauthsize = GHASH_DIGEST_SIZE,
3997 			.setkey = chcr_gcm_setkey,
3998 			.setauthsize = chcr_gcm_setauthsize,
3999 		}
4000 	},
4001 	{
4002 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4003 		.is_registered = 0,
4004 		.alg.aead = {
4005 			.base = {
4006 				.cra_name = "rfc4106(gcm(aes))",
4007 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4008 				.cra_blocksize	 = 1,
4009 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4010 				.cra_ctxsize =	sizeof(struct chcr_context) +
4011 						sizeof(struct chcr_aead_ctx) +
4012 						sizeof(struct chcr_gcm_ctx),
4013 
4014 			},
4015 			.ivsize = GCM_RFC4106_IV_SIZE,
4016 			.maxauthsize	= GHASH_DIGEST_SIZE,
4017 			.setkey = chcr_gcm_setkey,
4018 			.setauthsize	= chcr_4106_4309_setauthsize,
4019 		}
4020 	},
4021 	{
4022 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4023 		.is_registered = 0,
4024 		.alg.aead = {
4025 			.base = {
4026 				.cra_name = "ccm(aes)",
4027 				.cra_driver_name = "ccm-aes-chcr",
4028 				.cra_blocksize	 = 1,
4029 				.cra_priority = CHCR_AEAD_PRIORITY,
4030 				.cra_ctxsize =	sizeof(struct chcr_context) +
4031 						sizeof(struct chcr_aead_ctx),
4032 
4033 			},
4034 			.ivsize = AES_BLOCK_SIZE,
4035 			.maxauthsize	= GHASH_DIGEST_SIZE,
4036 			.setkey = chcr_aead_ccm_setkey,
4037 			.setauthsize	= chcr_ccm_setauthsize,
4038 		}
4039 	},
4040 	{
4041 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4042 		.is_registered = 0,
4043 		.alg.aead = {
4044 			.base = {
4045 				.cra_name = "rfc4309(ccm(aes))",
4046 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4047 				.cra_blocksize	 = 1,
4048 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4049 				.cra_ctxsize =	sizeof(struct chcr_context) +
4050 						sizeof(struct chcr_aead_ctx),
4051 
4052 			},
4053 			.ivsize = 8,
4054 			.maxauthsize	= GHASH_DIGEST_SIZE,
4055 			.setkey = chcr_aead_rfc4309_setkey,
4056 			.setauthsize = chcr_4106_4309_setauthsize,
4057 		}
4058 	},
4059 	{
4060 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4061 		.is_registered = 0,
4062 		.alg.aead = {
4063 			.base = {
4064 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4065 				.cra_driver_name =
4066 					"authenc-hmac-sha1-cbc-aes-chcr",
4067 				.cra_blocksize	 = AES_BLOCK_SIZE,
4068 				.cra_priority = CHCR_AEAD_PRIORITY,
4069 				.cra_ctxsize =	sizeof(struct chcr_context) +
4070 						sizeof(struct chcr_aead_ctx) +
4071 						sizeof(struct chcr_authenc_ctx),
4072 
4073 			},
4074 			.ivsize = AES_BLOCK_SIZE,
4075 			.maxauthsize = SHA1_DIGEST_SIZE,
4076 			.setkey = chcr_authenc_setkey,
4077 			.setauthsize = chcr_authenc_setauthsize,
4078 		}
4079 	},
4080 	{
4081 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4082 		.is_registered = 0,
4083 		.alg.aead = {
4084 			.base = {
4085 
4086 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4087 				.cra_driver_name =
4088 					"authenc-hmac-sha256-cbc-aes-chcr",
4089 				.cra_blocksize	 = AES_BLOCK_SIZE,
4090 				.cra_priority = CHCR_AEAD_PRIORITY,
4091 				.cra_ctxsize =	sizeof(struct chcr_context) +
4092 						sizeof(struct chcr_aead_ctx) +
4093 						sizeof(struct chcr_authenc_ctx),
4094 
4095 			},
4096 			.ivsize = AES_BLOCK_SIZE,
4097 			.maxauthsize	= SHA256_DIGEST_SIZE,
4098 			.setkey = chcr_authenc_setkey,
4099 			.setauthsize = chcr_authenc_setauthsize,
4100 		}
4101 	},
4102 	{
4103 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4104 		.is_registered = 0,
4105 		.alg.aead = {
4106 			.base = {
4107 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4108 				.cra_driver_name =
4109 					"authenc-hmac-sha224-cbc-aes-chcr",
4110 				.cra_blocksize	 = AES_BLOCK_SIZE,
4111 				.cra_priority = CHCR_AEAD_PRIORITY,
4112 				.cra_ctxsize =	sizeof(struct chcr_context) +
4113 						sizeof(struct chcr_aead_ctx) +
4114 						sizeof(struct chcr_authenc_ctx),
4115 			},
4116 			.ivsize = AES_BLOCK_SIZE,
4117 			.maxauthsize = SHA224_DIGEST_SIZE,
4118 			.setkey = chcr_authenc_setkey,
4119 			.setauthsize = chcr_authenc_setauthsize,
4120 		}
4121 	},
4122 	{
4123 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4124 		.is_registered = 0,
4125 		.alg.aead = {
4126 			.base = {
4127 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4128 				.cra_driver_name =
4129 					"authenc-hmac-sha384-cbc-aes-chcr",
4130 				.cra_blocksize	 = AES_BLOCK_SIZE,
4131 				.cra_priority = CHCR_AEAD_PRIORITY,
4132 				.cra_ctxsize =	sizeof(struct chcr_context) +
4133 						sizeof(struct chcr_aead_ctx) +
4134 						sizeof(struct chcr_authenc_ctx),
4135 
4136 			},
4137 			.ivsize = AES_BLOCK_SIZE,
4138 			.maxauthsize = SHA384_DIGEST_SIZE,
4139 			.setkey = chcr_authenc_setkey,
4140 			.setauthsize = chcr_authenc_setauthsize,
4141 		}
4142 	},
4143 	{
4144 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4145 		.is_registered = 0,
4146 		.alg.aead = {
4147 			.base = {
4148 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4149 				.cra_driver_name =
4150 					"authenc-hmac-sha512-cbc-aes-chcr",
4151 				.cra_blocksize	 = AES_BLOCK_SIZE,
4152 				.cra_priority = CHCR_AEAD_PRIORITY,
4153 				.cra_ctxsize =	sizeof(struct chcr_context) +
4154 						sizeof(struct chcr_aead_ctx) +
4155 						sizeof(struct chcr_authenc_ctx),
4156 
4157 			},
4158 			.ivsize = AES_BLOCK_SIZE,
4159 			.maxauthsize = SHA512_DIGEST_SIZE,
4160 			.setkey = chcr_authenc_setkey,
4161 			.setauthsize = chcr_authenc_setauthsize,
4162 		}
4163 	},
4164 	{
4165 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4166 		.is_registered = 0,
4167 		.alg.aead = {
4168 			.base = {
4169 				.cra_name = "authenc(digest_null,cbc(aes))",
4170 				.cra_driver_name =
4171 					"authenc-digest_null-cbc-aes-chcr",
4172 				.cra_blocksize	 = AES_BLOCK_SIZE,
4173 				.cra_priority = CHCR_AEAD_PRIORITY,
4174 				.cra_ctxsize =	sizeof(struct chcr_context) +
4175 						sizeof(struct chcr_aead_ctx) +
4176 						sizeof(struct chcr_authenc_ctx),
4177 
4178 			},
4179 			.ivsize  = AES_BLOCK_SIZE,
4180 			.maxauthsize = 0,
4181 			.setkey  = chcr_aead_digest_null_setkey,
4182 			.setauthsize = chcr_authenc_null_setauthsize,
4183 		}
4184 	},
4185 	{
4186 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4187 		.is_registered = 0,
4188 		.alg.aead = {
4189 			.base = {
4190 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4191 				.cra_driver_name =
4192 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4193 				.cra_blocksize	 = 1,
4194 				.cra_priority = CHCR_AEAD_PRIORITY,
4195 				.cra_ctxsize =	sizeof(struct chcr_context) +
4196 						sizeof(struct chcr_aead_ctx) +
4197 						sizeof(struct chcr_authenc_ctx),
4198 
4199 			},
4200 			.ivsize = CTR_RFC3686_IV_SIZE,
4201 			.maxauthsize = SHA1_DIGEST_SIZE,
4202 			.setkey = chcr_authenc_setkey,
4203 			.setauthsize = chcr_authenc_setauthsize,
4204 		}
4205 	},
4206 	{
4207 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4208 		.is_registered = 0,
4209 		.alg.aead = {
4210 			.base = {
4211 
4212 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4213 				.cra_driver_name =
4214 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4215 				.cra_blocksize	 = 1,
4216 				.cra_priority = CHCR_AEAD_PRIORITY,
4217 				.cra_ctxsize =	sizeof(struct chcr_context) +
4218 						sizeof(struct chcr_aead_ctx) +
4219 						sizeof(struct chcr_authenc_ctx),
4220 
4221 			},
4222 			.ivsize = CTR_RFC3686_IV_SIZE,
4223 			.maxauthsize	= SHA256_DIGEST_SIZE,
4224 			.setkey = chcr_authenc_setkey,
4225 			.setauthsize = chcr_authenc_setauthsize,
4226 		}
4227 	},
4228 	{
4229 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4230 		.is_registered = 0,
4231 		.alg.aead = {
4232 			.base = {
4233 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4234 				.cra_driver_name =
4235 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4236 				.cra_blocksize	 = 1,
4237 				.cra_priority = CHCR_AEAD_PRIORITY,
4238 				.cra_ctxsize =	sizeof(struct chcr_context) +
4239 						sizeof(struct chcr_aead_ctx) +
4240 						sizeof(struct chcr_authenc_ctx),
4241 			},
4242 			.ivsize = CTR_RFC3686_IV_SIZE,
4243 			.maxauthsize = SHA224_DIGEST_SIZE,
4244 			.setkey = chcr_authenc_setkey,
4245 			.setauthsize = chcr_authenc_setauthsize,
4246 		}
4247 	},
4248 	{
4249 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4250 		.is_registered = 0,
4251 		.alg.aead = {
4252 			.base = {
4253 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4254 				.cra_driver_name =
4255 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4256 				.cra_blocksize	 = 1,
4257 				.cra_priority = CHCR_AEAD_PRIORITY,
4258 				.cra_ctxsize =	sizeof(struct chcr_context) +
4259 						sizeof(struct chcr_aead_ctx) +
4260 						sizeof(struct chcr_authenc_ctx),
4261 
4262 			},
4263 			.ivsize = CTR_RFC3686_IV_SIZE,
4264 			.maxauthsize = SHA384_DIGEST_SIZE,
4265 			.setkey = chcr_authenc_setkey,
4266 			.setauthsize = chcr_authenc_setauthsize,
4267 		}
4268 	},
4269 	{
4270 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4271 		.is_registered = 0,
4272 		.alg.aead = {
4273 			.base = {
4274 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4275 				.cra_driver_name =
4276 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4277 				.cra_blocksize	 = 1,
4278 				.cra_priority = CHCR_AEAD_PRIORITY,
4279 				.cra_ctxsize =	sizeof(struct chcr_context) +
4280 						sizeof(struct chcr_aead_ctx) +
4281 						sizeof(struct chcr_authenc_ctx),
4282 
4283 			},
4284 			.ivsize = CTR_RFC3686_IV_SIZE,
4285 			.maxauthsize = SHA512_DIGEST_SIZE,
4286 			.setkey = chcr_authenc_setkey,
4287 			.setauthsize = chcr_authenc_setauthsize,
4288 		}
4289 	},
4290 	{
4291 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4292 		.is_registered = 0,
4293 		.alg.aead = {
4294 			.base = {
4295 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4296 				.cra_driver_name =
4297 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4298 				.cra_blocksize	 = 1,
4299 				.cra_priority = CHCR_AEAD_PRIORITY,
4300 				.cra_ctxsize =	sizeof(struct chcr_context) +
4301 						sizeof(struct chcr_aead_ctx) +
4302 						sizeof(struct chcr_authenc_ctx),
4303 
4304 			},
4305 			.ivsize  = CTR_RFC3686_IV_SIZE,
4306 			.maxauthsize = 0,
4307 			.setkey  = chcr_aead_digest_null_setkey,
4308 			.setauthsize = chcr_authenc_null_setauthsize,
4309 		}
4310 	},
4311 };
4312 
4313 /*
4314  *	chcr_unregister_alg - Deregister crypto algorithms with
4315  *	kernel framework.
4316  */
4317 static int chcr_unregister_alg(void)
4318 {
4319 	int i;
4320 
4321 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4322 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4323 		case CRYPTO_ALG_TYPE_SKCIPHER:
4324 			if (driver_algs[i].is_registered)
4325 				crypto_unregister_skcipher(
4326 						&driver_algs[i].alg.skcipher);
4327 			break;
4328 		case CRYPTO_ALG_TYPE_AEAD:
4329 			if (driver_algs[i].is_registered)
4330 				crypto_unregister_aead(
4331 						&driver_algs[i].alg.aead);
4332 			break;
4333 		case CRYPTO_ALG_TYPE_AHASH:
4334 			if (driver_algs[i].is_registered)
4335 				crypto_unregister_ahash(
4336 						&driver_algs[i].alg.hash);
4337 			break;
4338 		}
4339 		driver_algs[i].is_registered = 0;
4340 	}
4341 	return 0;
4342 }
4343 
4344 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4345 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4346 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4347 
4348 /*
4349  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4350  */
4351 static int chcr_register_alg(void)
4352 {
4353 	struct crypto_alg ai;
4354 	struct ahash_alg *a_hash;
4355 	int err = 0, i;
4356 	char *name = NULL;
4357 
4358 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4359 		if (driver_algs[i].is_registered)
4360 			continue;
4361 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4362 		case CRYPTO_ALG_TYPE_SKCIPHER:
4363 			driver_algs[i].alg.skcipher.base.cra_priority =
4364 				CHCR_CRA_PRIORITY;
4365 			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4366 			driver_algs[i].alg.skcipher.base.cra_flags =
4367 				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4368 				CRYPTO_ALG_NEED_FALLBACK;
4369 			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4370 				sizeof(struct chcr_context) +
4371 				sizeof(struct ablk_ctx);
4372 			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4373 
4374 			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4375 			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4376 			break;
4377 		case CRYPTO_ALG_TYPE_AEAD:
4378 			driver_algs[i].alg.aead.base.cra_flags =
4379 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4380 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4381 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4382 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4383 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4384 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4385 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4386 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4387 			break;
4388 		case CRYPTO_ALG_TYPE_AHASH:
4389 			a_hash = &driver_algs[i].alg.hash;
4390 			a_hash->update = chcr_ahash_update;
4391 			a_hash->final = chcr_ahash_final;
4392 			a_hash->finup = chcr_ahash_finup;
4393 			a_hash->digest = chcr_ahash_digest;
4394 			a_hash->export = chcr_ahash_export;
4395 			a_hash->import = chcr_ahash_import;
4396 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4397 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4398 			a_hash->halg.base.cra_module = THIS_MODULE;
4399 			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4400 			a_hash->halg.base.cra_alignmask = 0;
4401 			a_hash->halg.base.cra_exit = NULL;
4402 
4403 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4404 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4405 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4406 				a_hash->init = chcr_hmac_init;
4407 				a_hash->setkey = chcr_ahash_setkey;
4408 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4409 			} else {
4410 				a_hash->init = chcr_sha_init;
4411 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4412 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4413 			}
4414 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4415 			ai = driver_algs[i].alg.hash.halg.base;
4416 			name = ai.cra_driver_name;
4417 			break;
4418 		}
4419 		if (err) {
4420 			pr_err("chcr : %s : Algorithm registration failed\n",
4421 			       name);
4422 			goto register_err;
4423 		} else {
4424 			driver_algs[i].is_registered = 1;
4425 		}
4426 	}
4427 	return 0;
4428 
4429 register_err:
4430 	chcr_unregister_alg();
4431 	return err;
4432 }
4433 
4434 /*
4435  *	start_crypto - Register the crypto algorithms.
4436  *	This should called once when the first device comesup. After this
4437  *	kernel will start calling driver APIs for crypto operations.
4438  */
4439 int start_crypto(void)
4440 {
4441 	return chcr_register_alg();
4442 }
4443 
4444 /*
4445  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4446  *	This should be called once when the last device goes down. After this
4447  *	kernel will not call the driver API for crypto operations.
4448  */
4449 int stop_crypto(void)
4450 {
4451 	chcr_unregister_alg();
4452 	return 0;
4453 }
4454