1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51 
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
66 
67 #include "t4fw_api.h"
68 #include "t4_msg.h"
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
72 
73 #define IV AES_BLOCK_SIZE
74 
75 static unsigned int sgl_ent_len[] = {
76 	0, 0, 16, 24, 40, 48, 64, 72, 88,
77 	96, 112, 120, 136, 144, 160, 168, 184,
78 	192, 208, 216, 232, 240, 256, 264, 280,
79 	288, 304, 312, 328, 336, 352, 360, 376
80 };
81 
82 static unsigned int dsgl_ent_len[] = {
83 	0, 32, 32, 48, 48, 64, 64, 80, 80,
84 	112, 112, 128, 128, 144, 144, 160, 160,
85 	192, 192, 208, 208, 224, 224, 240, 240,
86 	272, 272, 288, 288, 304, 304, 320, 320
87 };
88 
89 static u32 round_constant[11] = {
90 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
91 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
92 	0x1B000000, 0x36000000, 0x6C000000
93 };
94 
95 static int chcr_handle_cipher_resp(struct skcipher_request *req,
96 				   unsigned char *input, int err);
97 
98 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
99 {
100 	return ctx->crypto_ctx->aeadctx;
101 }
102 
103 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
104 {
105 	return ctx->crypto_ctx->ablkctx;
106 }
107 
108 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
109 {
110 	return ctx->crypto_ctx->hmacctx;
111 }
112 
113 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
114 {
115 	return gctx->ctx->gcm;
116 }
117 
118 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
119 {
120 	return gctx->ctx->authenc;
121 }
122 
123 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
124 {
125 	return container_of(ctx->dev, struct uld_ctx, dev);
126 }
127 
128 static inline int is_ofld_imm(const struct sk_buff *skb)
129 {
130 	return (skb->len <= SGE_MAX_WR_LEN);
131 }
132 
133 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
134 {
135 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
136 }
137 
138 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
139 			 unsigned int entlen,
140 			 unsigned int skip)
141 {
142 	int nents = 0;
143 	unsigned int less;
144 	unsigned int skip_len = 0;
145 
146 	while (sg && skip) {
147 		if (sg_dma_len(sg) <= skip) {
148 			skip -= sg_dma_len(sg);
149 			skip_len = 0;
150 			sg = sg_next(sg);
151 		} else {
152 			skip_len = skip;
153 			skip = 0;
154 		}
155 	}
156 
157 	while (sg && reqlen) {
158 		less = min(reqlen, sg_dma_len(sg) - skip_len);
159 		nents += DIV_ROUND_UP(less, entlen);
160 		reqlen -= less;
161 		skip_len = 0;
162 		sg = sg_next(sg);
163 	}
164 	return nents;
165 }
166 
167 static inline int get_aead_subtype(struct crypto_aead *aead)
168 {
169 	struct aead_alg *alg = crypto_aead_alg(aead);
170 	struct chcr_alg_template *chcr_crypto_alg =
171 		container_of(alg, struct chcr_alg_template, alg.aead);
172 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
173 }
174 
175 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
176 {
177 	u8 temp[SHA512_DIGEST_SIZE];
178 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
179 	int authsize = crypto_aead_authsize(tfm);
180 	struct cpl_fw6_pld *fw6_pld;
181 	int cmp = 0;
182 
183 	fw6_pld = (struct cpl_fw6_pld *)input;
184 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
185 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
186 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
187 	} else {
188 
189 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
190 				authsize, req->assoclen +
191 				req->cryptlen - authsize);
192 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
193 	}
194 	if (cmp)
195 		*err = -EBADMSG;
196 	else
197 		*err = 0;
198 }
199 
200 static int chcr_inc_wrcount(struct chcr_dev *dev)
201 {
202 	if (dev->state == CHCR_DETACH)
203 		return 1;
204 	atomic_inc(&dev->inflight);
205 	return 0;
206 }
207 
208 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
209 {
210 	atomic_dec(&dev->inflight);
211 }
212 
213 static inline int chcr_handle_aead_resp(struct aead_request *req,
214 					 unsigned char *input,
215 					 int err)
216 {
217 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
218 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
219 	struct chcr_dev *dev = a_ctx(tfm)->dev;
220 
221 	chcr_aead_common_exit(req);
222 	if (reqctx->verify == VERIFY_SW) {
223 		chcr_verify_tag(req, input, &err);
224 		reqctx->verify = VERIFY_HW;
225 	}
226 	chcr_dec_wrcount(dev);
227 	req->base.complete(&req->base, err);
228 
229 	return err;
230 }
231 
232 static void get_aes_decrypt_key(unsigned char *dec_key,
233 				       const unsigned char *key,
234 				       unsigned int keylength)
235 {
236 	u32 temp;
237 	u32 w_ring[MAX_NK];
238 	int i, j, k;
239 	u8  nr, nk;
240 
241 	switch (keylength) {
242 	case AES_KEYLENGTH_128BIT:
243 		nk = KEYLENGTH_4BYTES;
244 		nr = NUMBER_OF_ROUNDS_10;
245 		break;
246 	case AES_KEYLENGTH_192BIT:
247 		nk = KEYLENGTH_6BYTES;
248 		nr = NUMBER_OF_ROUNDS_12;
249 		break;
250 	case AES_KEYLENGTH_256BIT:
251 		nk = KEYLENGTH_8BYTES;
252 		nr = NUMBER_OF_ROUNDS_14;
253 		break;
254 	default:
255 		return;
256 	}
257 	for (i = 0; i < nk; i++)
258 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
259 
260 	i = 0;
261 	temp = w_ring[nk - 1];
262 	while (i + nk < (nr + 1) * 4) {
263 		if (!(i % nk)) {
264 			/* RotWord(temp) */
265 			temp = (temp << 8) | (temp >> 24);
266 			temp = aes_ks_subword(temp);
267 			temp ^= round_constant[i / nk];
268 		} else if (nk == 8 && (i % 4 == 0)) {
269 			temp = aes_ks_subword(temp);
270 		}
271 		w_ring[i % nk] ^= temp;
272 		temp = w_ring[i % nk];
273 		i++;
274 	}
275 	i--;
276 	for (k = 0, j = i % nk; k < nk; k++) {
277 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
278 		j--;
279 		if (j < 0)
280 			j += nk;
281 	}
282 }
283 
284 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
285 {
286 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
287 
288 	switch (ds) {
289 	case SHA1_DIGEST_SIZE:
290 		base_hash = crypto_alloc_shash("sha1", 0, 0);
291 		break;
292 	case SHA224_DIGEST_SIZE:
293 		base_hash = crypto_alloc_shash("sha224", 0, 0);
294 		break;
295 	case SHA256_DIGEST_SIZE:
296 		base_hash = crypto_alloc_shash("sha256", 0, 0);
297 		break;
298 	case SHA384_DIGEST_SIZE:
299 		base_hash = crypto_alloc_shash("sha384", 0, 0);
300 		break;
301 	case SHA512_DIGEST_SIZE:
302 		base_hash = crypto_alloc_shash("sha512", 0, 0);
303 		break;
304 	}
305 
306 	return base_hash;
307 }
308 
309 static int chcr_compute_partial_hash(struct shash_desc *desc,
310 				     char *iopad, char *result_hash,
311 				     int digest_size)
312 {
313 	struct sha1_state sha1_st;
314 	struct sha256_state sha256_st;
315 	struct sha512_state sha512_st;
316 	int error;
317 
318 	if (digest_size == SHA1_DIGEST_SIZE) {
319 		error = crypto_shash_init(desc) ?:
320 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
321 			crypto_shash_export(desc, (void *)&sha1_st);
322 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
323 	} else if (digest_size == SHA224_DIGEST_SIZE) {
324 		error = crypto_shash_init(desc) ?:
325 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
326 			crypto_shash_export(desc, (void *)&sha256_st);
327 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
328 
329 	} else if (digest_size == SHA256_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha256_st);
333 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334 
335 	} else if (digest_size == SHA384_DIGEST_SIZE) {
336 		error = crypto_shash_init(desc) ?:
337 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
338 			crypto_shash_export(desc, (void *)&sha512_st);
339 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
340 
341 	} else if (digest_size == SHA512_DIGEST_SIZE) {
342 		error = crypto_shash_init(desc) ?:
343 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 			crypto_shash_export(desc, (void *)&sha512_st);
345 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346 	} else {
347 		error = -EINVAL;
348 		pr_err("Unknown digest size %d\n", digest_size);
349 	}
350 	return error;
351 }
352 
353 static void chcr_change_order(char *buf, int ds)
354 {
355 	int i;
356 
357 	if (ds == SHA512_DIGEST_SIZE) {
358 		for (i = 0; i < (ds / sizeof(u64)); i++)
359 			*((__be64 *)buf + i) =
360 				cpu_to_be64(*((u64 *)buf + i));
361 	} else {
362 		for (i = 0; i < (ds / sizeof(u32)); i++)
363 			*((__be32 *)buf + i) =
364 				cpu_to_be32(*((u32 *)buf + i));
365 	}
366 }
367 
368 static inline int is_hmac(struct crypto_tfm *tfm)
369 {
370 	struct crypto_alg *alg = tfm->__crt_alg;
371 	struct chcr_alg_template *chcr_crypto_alg =
372 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
373 			     alg.hash);
374 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
375 		return 1;
376 	return 0;
377 }
378 
379 static inline void dsgl_walk_init(struct dsgl_walk *walk,
380 				   struct cpl_rx_phys_dsgl *dsgl)
381 {
382 	walk->dsgl = dsgl;
383 	walk->nents = 0;
384 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
385 }
386 
387 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
388 				 int pci_chan_id)
389 {
390 	struct cpl_rx_phys_dsgl *phys_cpl;
391 
392 	phys_cpl = walk->dsgl;
393 
394 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
395 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
396 	phys_cpl->pcirlxorder_to_noofsgentr =
397 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
402 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
403 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
404 	phys_cpl->rss_hdr_int.qid = htons(qid);
405 	phys_cpl->rss_hdr_int.hash_val = 0;
406 	phys_cpl->rss_hdr_int.channel = pci_chan_id;
407 }
408 
409 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
410 					size_t size,
411 					dma_addr_t addr)
412 {
413 	int j;
414 
415 	if (!size)
416 		return;
417 	j = walk->nents;
418 	walk->to->len[j % 8] = htons(size);
419 	walk->to->addr[j % 8] = cpu_to_be64(addr);
420 	j++;
421 	if ((j % 8) == 0)
422 		walk->to++;
423 	walk->nents = j;
424 }
425 
426 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
427 			   struct scatterlist *sg,
428 			      unsigned int slen,
429 			      unsigned int skip)
430 {
431 	int skip_len = 0;
432 	unsigned int left_size = slen, len = 0;
433 	unsigned int j = walk->nents;
434 	int offset, ent_len;
435 
436 	if (!slen)
437 		return;
438 	while (sg && skip) {
439 		if (sg_dma_len(sg) <= skip) {
440 			skip -= sg_dma_len(sg);
441 			skip_len = 0;
442 			sg = sg_next(sg);
443 		} else {
444 			skip_len = skip;
445 			skip = 0;
446 		}
447 	}
448 
449 	while (left_size && sg) {
450 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
451 		offset = 0;
452 		while (len) {
453 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
454 			walk->to->len[j % 8] = htons(ent_len);
455 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
456 						      offset + skip_len);
457 			offset += ent_len;
458 			len -= ent_len;
459 			j++;
460 			if ((j % 8) == 0)
461 				walk->to++;
462 		}
463 		walk->last_sg = sg;
464 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
465 					  skip_len) + skip_len;
466 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
467 		skip_len = 0;
468 		sg = sg_next(sg);
469 	}
470 	walk->nents = j;
471 }
472 
473 static inline void ulptx_walk_init(struct ulptx_walk *walk,
474 				   struct ulptx_sgl *ulp)
475 {
476 	walk->sgl = ulp;
477 	walk->nents = 0;
478 	walk->pair_idx = 0;
479 	walk->pair = ulp->sge;
480 	walk->last_sg = NULL;
481 	walk->last_sg_len = 0;
482 }
483 
484 static inline void ulptx_walk_end(struct ulptx_walk *walk)
485 {
486 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
487 			      ULPTX_NSGE_V(walk->nents));
488 }
489 
490 
491 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
492 					size_t size,
493 					dma_addr_t addr)
494 {
495 	if (!size)
496 		return;
497 
498 	if (walk->nents == 0) {
499 		walk->sgl->len0 = cpu_to_be32(size);
500 		walk->sgl->addr0 = cpu_to_be64(addr);
501 	} else {
502 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
503 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
504 		walk->pair_idx = !walk->pair_idx;
505 		if (!walk->pair_idx)
506 			walk->pair++;
507 	}
508 	walk->nents++;
509 }
510 
511 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
512 					struct scatterlist *sg,
513 			       unsigned int len,
514 			       unsigned int skip)
515 {
516 	int small;
517 	int skip_len = 0;
518 	unsigned int sgmin;
519 
520 	if (!len)
521 		return;
522 	while (sg && skip) {
523 		if (sg_dma_len(sg) <= skip) {
524 			skip -= sg_dma_len(sg);
525 			skip_len = 0;
526 			sg = sg_next(sg);
527 		} else {
528 			skip_len = skip;
529 			skip = 0;
530 		}
531 	}
532 	WARN(!sg, "SG should not be null here\n");
533 	if (sg && (walk->nents == 0)) {
534 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
535 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
536 		walk->sgl->len0 = cpu_to_be32(sgmin);
537 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
538 		walk->nents++;
539 		len -= sgmin;
540 		walk->last_sg = sg;
541 		walk->last_sg_len = sgmin + skip_len;
542 		skip_len += sgmin;
543 		if (sg_dma_len(sg) == skip_len) {
544 			sg = sg_next(sg);
545 			skip_len = 0;
546 		}
547 	}
548 
549 	while (sg && len) {
550 		small = min(sg_dma_len(sg) - skip_len, len);
551 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
552 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
553 		walk->pair->addr[walk->pair_idx] =
554 			cpu_to_be64(sg_dma_address(sg) + skip_len);
555 		walk->pair_idx = !walk->pair_idx;
556 		walk->nents++;
557 		if (!walk->pair_idx)
558 			walk->pair++;
559 		len -= sgmin;
560 		skip_len += sgmin;
561 		walk->last_sg = sg;
562 		walk->last_sg_len = skip_len;
563 		if (sg_dma_len(sg) == skip_len) {
564 			sg = sg_next(sg);
565 			skip_len = 0;
566 		}
567 	}
568 }
569 
570 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
571 {
572 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
573 	struct chcr_alg_template *chcr_crypto_alg =
574 		container_of(alg, struct chcr_alg_template, alg.skcipher);
575 
576 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
577 }
578 
579 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
580 {
581 	struct adapter *adap = netdev2adap(dev);
582 	struct sge_uld_txq_info *txq_info =
583 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
584 	struct sge_uld_txq *txq;
585 	int ret = 0;
586 
587 	local_bh_disable();
588 	txq = &txq_info->uldtxq[idx];
589 	spin_lock(&txq->sendq.lock);
590 	if (txq->full)
591 		ret = -1;
592 	spin_unlock(&txq->sendq.lock);
593 	local_bh_enable();
594 	return ret;
595 }
596 
597 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
598 			       struct _key_ctx *key_ctx)
599 {
600 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
601 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
602 	} else {
603 		memcpy(key_ctx->key,
604 		       ablkctx->key + (ablkctx->enckey_len >> 1),
605 		       ablkctx->enckey_len >> 1);
606 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
607 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
608 	}
609 	return 0;
610 }
611 
612 static int chcr_hash_ent_in_wr(struct scatterlist *src,
613 			     unsigned int minsg,
614 			     unsigned int space,
615 			     unsigned int srcskip)
616 {
617 	int srclen = 0;
618 	int srcsg = minsg;
619 	int soffset = 0, sless;
620 
621 	if (sg_dma_len(src) == srcskip) {
622 		src = sg_next(src);
623 		srcskip = 0;
624 	}
625 	while (src && space > (sgl_ent_len[srcsg + 1])) {
626 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
627 							CHCR_SRC_SG_SIZE);
628 		srclen += sless;
629 		soffset += sless;
630 		srcsg++;
631 		if (sg_dma_len(src) == (soffset + srcskip)) {
632 			src = sg_next(src);
633 			soffset = 0;
634 			srcskip = 0;
635 		}
636 	}
637 	return srclen;
638 }
639 
640 static int chcr_sg_ent_in_wr(struct scatterlist *src,
641 			     struct scatterlist *dst,
642 			     unsigned int minsg,
643 			     unsigned int space,
644 			     unsigned int srcskip,
645 			     unsigned int dstskip)
646 {
647 	int srclen = 0, dstlen = 0;
648 	int srcsg = minsg, dstsg = minsg;
649 	int offset = 0, soffset = 0, less, sless = 0;
650 
651 	if (sg_dma_len(src) == srcskip) {
652 		src = sg_next(src);
653 		srcskip = 0;
654 	}
655 	if (sg_dma_len(dst) == dstskip) {
656 		dst = sg_next(dst);
657 		dstskip = 0;
658 	}
659 
660 	while (src && dst &&
661 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
662 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
663 				CHCR_SRC_SG_SIZE);
664 		srclen += sless;
665 		srcsg++;
666 		offset = 0;
667 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
668 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
669 			if (srclen <= dstlen)
670 				break;
671 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
672 				     dstskip, CHCR_DST_SG_SIZE);
673 			dstlen += less;
674 			offset += less;
675 			if ((offset + dstskip) == sg_dma_len(dst)) {
676 				dst = sg_next(dst);
677 				offset = 0;
678 			}
679 			dstsg++;
680 			dstskip = 0;
681 		}
682 		soffset += sless;
683 		if ((soffset + srcskip) == sg_dma_len(src)) {
684 			src = sg_next(src);
685 			srcskip = 0;
686 			soffset = 0;
687 		}
688 
689 	}
690 	return min(srclen, dstlen);
691 }
692 
693 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
694 				u32 flags,
695 				struct scatterlist *src,
696 				struct scatterlist *dst,
697 				unsigned int nbytes,
698 				u8 *iv,
699 				unsigned short op_type)
700 {
701 	int err;
702 
703 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
704 
705 	skcipher_request_set_sync_tfm(subreq, cipher);
706 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
707 	skcipher_request_set_crypt(subreq, src, dst,
708 				   nbytes, iv);
709 
710 	err = op_type ? crypto_skcipher_decrypt(subreq) :
711 		crypto_skcipher_encrypt(subreq);
712 	skcipher_request_zero(subreq);
713 
714 	return err;
715 
716 }
717 
718 static inline int get_qidxs(struct crypto_async_request *req,
719 			    unsigned int *txqidx, unsigned int *rxqidx)
720 {
721 	struct crypto_tfm *tfm = req->tfm;
722 	int ret = 0;
723 
724 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
725 	case CRYPTO_ALG_TYPE_AEAD:
726 	{
727 		struct aead_request *aead_req =
728 			container_of(req, struct aead_request, base);
729 		struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
730 		*txqidx = reqctx->txqidx;
731 		*rxqidx = reqctx->rxqidx;
732 		break;
733 	}
734 	case CRYPTO_ALG_TYPE_SKCIPHER:
735 	{
736 		struct skcipher_request *sk_req =
737 			container_of(req, struct skcipher_request, base);
738 		struct chcr_skcipher_req_ctx *reqctx =
739 			skcipher_request_ctx(sk_req);
740 		*txqidx = reqctx->txqidx;
741 		*rxqidx = reqctx->rxqidx;
742 		break;
743 	}
744 	case CRYPTO_ALG_TYPE_AHASH:
745 	{
746 		struct ahash_request *ahash_req =
747 			container_of(req, struct ahash_request, base);
748 		struct chcr_ahash_req_ctx *reqctx =
749 			ahash_request_ctx(ahash_req);
750 		*txqidx = reqctx->txqidx;
751 		*rxqidx = reqctx->rxqidx;
752 		break;
753 	}
754 	default:
755 		ret = -EINVAL;
756 		/* should never get here */
757 		BUG();
758 		break;
759 	}
760 	return ret;
761 }
762 
763 static inline void create_wreq(struct chcr_context *ctx,
764 			       struct chcr_wr *chcr_req,
765 			       struct crypto_async_request *req,
766 			       unsigned int imm,
767 			       int hash_sz,
768 			       unsigned int len16,
769 			       unsigned int sc_len,
770 			       unsigned int lcb)
771 {
772 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
773 	unsigned int tx_channel_id, rx_channel_id;
774 	unsigned int txqidx = 0, rxqidx = 0;
775 	unsigned int qid, fid;
776 
777 	get_qidxs(req, &txqidx, &rxqidx);
778 	qid = u_ctx->lldi.rxq_ids[rxqidx];
779 	fid = u_ctx->lldi.rxq_ids[0];
780 	tx_channel_id = txqidx / ctx->txq_perchan;
781 	rx_channel_id = rxqidx / ctx->rxq_perchan;
782 
783 
784 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
785 	chcr_req->wreq.pld_size_hash_size =
786 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
787 	chcr_req->wreq.len16_pkd =
788 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
789 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
790 	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
791 							    !!lcb, txqidx);
792 
793 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
794 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
795 				((sizeof(chcr_req->wreq)) >> 4)));
796 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
797 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
798 					   sizeof(chcr_req->key_ctx) + sc_len);
799 }
800 
801 /**
802  *	create_cipher_wr - form the WR for cipher operations
803  *	@req: cipher req.
804  *	@ctx: crypto driver context of the request.
805  *	@qid: ingress qid where response of this WR should be received.
806  *	@op_type:	encryption or decryption
807  */
808 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
809 {
810 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
811 	struct chcr_context *ctx = c_ctx(tfm);
812 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
813 	struct sk_buff *skb = NULL;
814 	struct chcr_wr *chcr_req;
815 	struct cpl_rx_phys_dsgl *phys_cpl;
816 	struct ulptx_sgl *ulptx;
817 	struct chcr_skcipher_req_ctx *reqctx =
818 		skcipher_request_ctx(wrparam->req);
819 	unsigned int temp = 0, transhdr_len, dst_size;
820 	int error;
821 	int nents;
822 	unsigned int kctx_len;
823 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
824 			GFP_KERNEL : GFP_ATOMIC;
825 	struct adapter *adap = padap(ctx->dev);
826 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
827 
828 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
829 			      reqctx->dst_ofst);
830 	dst_size = get_space_for_phys_dsgl(nents);
831 	kctx_len = roundup(ablkctx->enckey_len, 16);
832 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
833 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
834 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
835 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
836 				     (sgl_len(nents) * 8);
837 	transhdr_len += temp;
838 	transhdr_len = roundup(transhdr_len, 16);
839 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
840 	if (!skb) {
841 		error = -ENOMEM;
842 		goto err;
843 	}
844 	chcr_req = __skb_put_zero(skb, transhdr_len);
845 	chcr_req->sec_cpl.op_ivinsrtofst =
846 			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
847 
848 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
849 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
850 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
851 
852 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
853 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
854 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
855 							 ablkctx->ciph_mode,
856 							 0, 0, IV >> 1);
857 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
858 							  0, 1, dst_size);
859 
860 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
861 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
862 	    (!(get_cryptoalg_subtype(tfm) ==
863 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
864 	    (!(get_cryptoalg_subtype(tfm) ==
865 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
866 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
867 	} else {
868 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
869 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
870 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
871 			       ablkctx->enckey_len);
872 		} else {
873 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
874 			       (ablkctx->enckey_len >> 1),
875 			       ablkctx->enckey_len >> 1);
876 			memcpy(chcr_req->key_ctx.key +
877 			       (ablkctx->enckey_len >> 1),
878 			       ablkctx->key,
879 			       ablkctx->enckey_len >> 1);
880 		}
881 	}
882 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
883 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
884 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
885 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
886 
887 	atomic_inc(&adap->chcr_stats.cipher_rqst);
888 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
889 		+ (reqctx->imm ? (wrparam->bytes) : 0);
890 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
891 		    transhdr_len, temp,
892 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
893 	reqctx->skb = skb;
894 
895 	if (reqctx->op && (ablkctx->ciph_mode ==
896 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
897 		sg_pcopy_to_buffer(wrparam->req->src,
898 			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
899 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
900 
901 	return skb;
902 err:
903 	return ERR_PTR(error);
904 }
905 
906 static inline int chcr_keyctx_ck_size(unsigned int keylen)
907 {
908 	int ck_size = 0;
909 
910 	if (keylen == AES_KEYSIZE_128)
911 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
912 	else if (keylen == AES_KEYSIZE_192)
913 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
914 	else if (keylen == AES_KEYSIZE_256)
915 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
916 	else
917 		ck_size = 0;
918 
919 	return ck_size;
920 }
921 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
922 				       const u8 *key,
923 				       unsigned int keylen)
924 {
925 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
926 
927 	crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
928 				CRYPTO_TFM_REQ_MASK);
929 	crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
930 				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
931 	return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
932 }
933 
934 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
935 			       const u8 *key,
936 			       unsigned int keylen)
937 {
938 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
939 	unsigned int ck_size, context_size;
940 	u16 alignment = 0;
941 	int err;
942 
943 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
944 	if (err)
945 		goto badkey_err;
946 
947 	ck_size = chcr_keyctx_ck_size(keylen);
948 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
949 	memcpy(ablkctx->key, key, keylen);
950 	ablkctx->enckey_len = keylen;
951 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
952 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
953 			keylen + alignment) >> 4;
954 
955 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
956 						0, 0, context_size);
957 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
958 	return 0;
959 badkey_err:
960 	ablkctx->enckey_len = 0;
961 
962 	return err;
963 }
964 
965 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
966 				   const u8 *key,
967 				   unsigned int keylen)
968 {
969 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
970 	unsigned int ck_size, context_size;
971 	u16 alignment = 0;
972 	int err;
973 
974 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
975 	if (err)
976 		goto badkey_err;
977 	ck_size = chcr_keyctx_ck_size(keylen);
978 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
979 	memcpy(ablkctx->key, key, keylen);
980 	ablkctx->enckey_len = keylen;
981 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
982 			keylen + alignment) >> 4;
983 
984 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
985 						0, 0, context_size);
986 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
987 
988 	return 0;
989 badkey_err:
990 	ablkctx->enckey_len = 0;
991 
992 	return err;
993 }
994 
995 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
996 				   const u8 *key,
997 				   unsigned int keylen)
998 {
999 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
1000 	unsigned int ck_size, context_size;
1001 	u16 alignment = 0;
1002 	int err;
1003 
1004 	if (keylen < CTR_RFC3686_NONCE_SIZE)
1005 		return -EINVAL;
1006 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1007 	       CTR_RFC3686_NONCE_SIZE);
1008 
1009 	keylen -= CTR_RFC3686_NONCE_SIZE;
1010 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1011 	if (err)
1012 		goto badkey_err;
1013 
1014 	ck_size = chcr_keyctx_ck_size(keylen);
1015 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1016 	memcpy(ablkctx->key, key, keylen);
1017 	ablkctx->enckey_len = keylen;
1018 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1019 			keylen + alignment) >> 4;
1020 
1021 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1022 						0, 0, context_size);
1023 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1024 
1025 	return 0;
1026 badkey_err:
1027 	ablkctx->enckey_len = 0;
1028 
1029 	return err;
1030 }
1031 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1032 {
1033 	unsigned int size = AES_BLOCK_SIZE;
1034 	__be32 *b = (__be32 *)(dstiv + size);
1035 	u32 c, prev;
1036 
1037 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1038 	for (; size >= 4; size -= 4) {
1039 		prev = be32_to_cpu(*--b);
1040 		c = prev + add;
1041 		*b = cpu_to_be32(c);
1042 		if (prev < c)
1043 			break;
1044 		add = 1;
1045 	}
1046 
1047 }
1048 
1049 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1050 {
1051 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1052 	u64 c;
1053 	u32 temp = be32_to_cpu(*--b);
1054 
1055 	temp = ~temp;
1056 	c = (u64)temp +  1; // No of block can processed withou overflow
1057 	if ((bytes / AES_BLOCK_SIZE) > c)
1058 		bytes = c * AES_BLOCK_SIZE;
1059 	return bytes;
1060 }
1061 
1062 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1063 			     u32 isfinal)
1064 {
1065 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1066 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1067 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1068 	struct crypto_aes_ctx aes;
1069 	int ret, i;
1070 	u8 *key;
1071 	unsigned int keylen;
1072 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1073 	int round8 = round / 8;
1074 
1075 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1076 
1077 	keylen = ablkctx->enckey_len / 2;
1078 	key = ablkctx->key + keylen;
1079 	ret = aes_expandkey(&aes, key, keylen);
1080 	if (ret)
1081 		return ret;
1082 	aes_encrypt(&aes, iv, iv);
1083 	for (i = 0; i < round8; i++)
1084 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1085 
1086 	for (i = 0; i < (round % 8); i++)
1087 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1088 
1089 	if (!isfinal)
1090 		aes_decrypt(&aes, iv, iv);
1091 
1092 	memzero_explicit(&aes, sizeof(aes));
1093 	return 0;
1094 }
1095 
1096 static int chcr_update_cipher_iv(struct skcipher_request *req,
1097 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1098 {
1099 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1100 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1101 	int subtype = get_cryptoalg_subtype(tfm);
1102 	int ret = 0;
1103 
1104 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1105 		ctr_add_iv(iv, req->iv, (reqctx->processed /
1106 			   AES_BLOCK_SIZE));
1107 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1108 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1109 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1110 						AES_BLOCK_SIZE) + 1);
1111 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1112 		ret = chcr_update_tweak(req, iv, 0);
1113 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1114 		if (reqctx->op)
1115 			/*Updated before sending last WR*/
1116 			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1117 		else
1118 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1119 	}
1120 
1121 	return ret;
1122 
1123 }
1124 
1125 /* We need separate function for final iv because in rfc3686  Initial counter
1126  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1127  * for subsequent update requests
1128  */
1129 
1130 static int chcr_final_cipher_iv(struct skcipher_request *req,
1131 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1132 {
1133 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1134 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1135 	int subtype = get_cryptoalg_subtype(tfm);
1136 	int ret = 0;
1137 
1138 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1139 		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1140 						       AES_BLOCK_SIZE));
1141 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1142 		if (!reqctx->partial_req)
1143 			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1144 		else
1145 			ret = chcr_update_tweak(req, iv, 1);
1146 	}
1147 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1148 		/*Already updated for Decrypt*/
1149 		if (!reqctx->op)
1150 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1151 
1152 	}
1153 	return ret;
1154 
1155 }
1156 
1157 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1158 				   unsigned char *input, int err)
1159 {
1160 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1161 	struct chcr_context *ctx = c_ctx(tfm);
1162 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1163 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1164 	struct sk_buff *skb;
1165 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1166 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1167 	struct cipher_wr_param wrparam;
1168 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1169 	int bytes;
1170 
1171 	if (err)
1172 		goto unmap;
1173 	if (req->cryptlen == reqctx->processed) {
1174 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175 				      req);
1176 		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1177 		goto complete;
1178 	}
1179 
1180 	if (!reqctx->imm) {
1181 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1182 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1183 					  reqctx->src_ofst, reqctx->dst_ofst);
1184 		if ((bytes + reqctx->processed) >= req->cryptlen)
1185 			bytes  = req->cryptlen - reqctx->processed;
1186 		else
1187 			bytes = rounddown(bytes, 16);
1188 	} else {
1189 		/*CTR mode counter overfloa*/
1190 		bytes  = req->cryptlen - reqctx->processed;
1191 	}
1192 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193 	if (err)
1194 		goto unmap;
1195 
1196 	if (unlikely(bytes == 0)) {
1197 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198 				      req);
1199 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1200 				     req->base.flags,
1201 				     req->src,
1202 				     req->dst,
1203 				     req->cryptlen,
1204 				     req->iv,
1205 				     reqctx->op);
1206 		goto complete;
1207 	}
1208 
1209 	if (get_cryptoalg_subtype(tfm) ==
1210 	    CRYPTO_ALG_SUB_TYPE_CTR)
1211 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1212 	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1213 	wrparam.req = req;
1214 	wrparam.bytes = bytes;
1215 	skb = create_cipher_wr(&wrparam);
1216 	if (IS_ERR(skb)) {
1217 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1218 		err = PTR_ERR(skb);
1219 		goto unmap;
1220 	}
1221 	skb->dev = u_ctx->lldi.ports[0];
1222 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1223 	chcr_send_wr(skb);
1224 	reqctx->last_req_len = bytes;
1225 	reqctx->processed += bytes;
1226 	if (get_cryptoalg_subtype(tfm) ==
1227 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1228 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1229 		complete(&ctx->cbc_aes_aio_done);
1230 	}
1231 	return 0;
1232 unmap:
1233 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1234 complete:
1235 	if (get_cryptoalg_subtype(tfm) ==
1236 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1237 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1238 		complete(&ctx->cbc_aes_aio_done);
1239 	}
1240 	chcr_dec_wrcount(dev);
1241 	req->base.complete(&req->base, err);
1242 	return err;
1243 }
1244 
1245 static int process_cipher(struct skcipher_request *req,
1246 				  unsigned short qid,
1247 				  struct sk_buff **skb,
1248 				  unsigned short op_type)
1249 {
1250 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1251 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1252 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1253 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1254 	struct	cipher_wr_param wrparam;
1255 	int bytes, err = -EINVAL;
1256 
1257 	reqctx->processed = 0;
1258 	reqctx->partial_req = 0;
1259 	if (!req->iv)
1260 		goto error;
1261 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1262 	    (req->cryptlen == 0) ||
1263 	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1264 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1265 		       ablkctx->enckey_len, req->cryptlen, ivsize);
1266 		goto error;
1267 	}
1268 
1269 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1270 	if (err)
1271 		goto error;
1272 	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1273 					    AES_MIN_KEY_SIZE +
1274 					    sizeof(struct cpl_rx_phys_dsgl) +
1275 					/*Min dsgl size*/
1276 					    32))) {
1277 		/* Can be sent as Imm*/
1278 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1279 
1280 		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1281 				       CHCR_DST_SG_SIZE, 0);
1282 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1283 		kctx_len = roundup(ablkctx->enckey_len, 16);
1284 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1285 		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1286 			SGE_MAX_WR_LEN;
1287 		bytes = IV + req->cryptlen;
1288 
1289 	} else {
1290 		reqctx->imm = 0;
1291 	}
1292 
1293 	if (!reqctx->imm) {
1294 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1295 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1296 					  0, 0);
1297 		if ((bytes + reqctx->processed) >= req->cryptlen)
1298 			bytes  = req->cryptlen - reqctx->processed;
1299 		else
1300 			bytes = rounddown(bytes, 16);
1301 	} else {
1302 		bytes = req->cryptlen;
1303 	}
1304 	if (get_cryptoalg_subtype(tfm) ==
1305 	    CRYPTO_ALG_SUB_TYPE_CTR) {
1306 		bytes = adjust_ctr_overflow(req->iv, bytes);
1307 	}
1308 	if (get_cryptoalg_subtype(tfm) ==
1309 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1310 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1311 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1312 				CTR_RFC3686_IV_SIZE);
1313 
1314 		/* initialize counter portion of counter block */
1315 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1316 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1317 
1318 	} else {
1319 
1320 		memcpy(reqctx->iv, req->iv, IV);
1321 	}
1322 	if (unlikely(bytes == 0)) {
1323 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1324 				      req);
1325 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1326 					   req->base.flags,
1327 					   req->src,
1328 					   req->dst,
1329 					   req->cryptlen,
1330 					   reqctx->iv,
1331 					   op_type);
1332 		goto error;
1333 	}
1334 	reqctx->op = op_type;
1335 	reqctx->srcsg = req->src;
1336 	reqctx->dstsg = req->dst;
1337 	reqctx->src_ofst = 0;
1338 	reqctx->dst_ofst = 0;
1339 	wrparam.qid = qid;
1340 	wrparam.req = req;
1341 	wrparam.bytes = bytes;
1342 	*skb = create_cipher_wr(&wrparam);
1343 	if (IS_ERR(*skb)) {
1344 		err = PTR_ERR(*skb);
1345 		goto unmap;
1346 	}
1347 	reqctx->processed = bytes;
1348 	reqctx->last_req_len = bytes;
1349 	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1350 
1351 	return 0;
1352 unmap:
1353 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1354 error:
1355 	return err;
1356 }
1357 
1358 static int chcr_aes_encrypt(struct skcipher_request *req)
1359 {
1360 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1361 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1362 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1363 	struct sk_buff *skb = NULL;
1364 	int err;
1365 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1366 	struct chcr_context *ctx = c_ctx(tfm);
1367 	unsigned int cpu;
1368 
1369 	cpu = get_cpu();
1370 	reqctx->txqidx = cpu % ctx->ntxq;
1371 	reqctx->rxqidx = cpu % ctx->nrxq;
1372 	put_cpu();
1373 
1374 	err = chcr_inc_wrcount(dev);
1375 	if (err)
1376 		return -ENXIO;
1377 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1378 						reqctx->txqidx) &&
1379 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1380 			err = -ENOSPC;
1381 			goto error;
1382 	}
1383 
1384 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1385 			     &skb, CHCR_ENCRYPT_OP);
1386 	if (err || !skb)
1387 		return  err;
1388 	skb->dev = u_ctx->lldi.ports[0];
1389 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1390 	chcr_send_wr(skb);
1391 	if (get_cryptoalg_subtype(tfm) ==
1392 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1393 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1394 			reqctx->partial_req = 1;
1395 			wait_for_completion(&ctx->cbc_aes_aio_done);
1396         }
1397 	return -EINPROGRESS;
1398 error:
1399 	chcr_dec_wrcount(dev);
1400 	return err;
1401 }
1402 
1403 static int chcr_aes_decrypt(struct skcipher_request *req)
1404 {
1405 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1406 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1407 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1408 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1409 	struct sk_buff *skb = NULL;
1410 	int err;
1411 	struct chcr_context *ctx = c_ctx(tfm);
1412 	unsigned int cpu;
1413 
1414 	cpu = get_cpu();
1415 	reqctx->txqidx = cpu % ctx->ntxq;
1416 	reqctx->rxqidx = cpu % ctx->nrxq;
1417 	put_cpu();
1418 
1419 	err = chcr_inc_wrcount(dev);
1420 	if (err)
1421 		return -ENXIO;
1422 
1423 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1424 						reqctx->txqidx) &&
1425 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1426 			return -ENOSPC;
1427 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1428 			     &skb, CHCR_DECRYPT_OP);
1429 	if (err || !skb)
1430 		return err;
1431 	skb->dev = u_ctx->lldi.ports[0];
1432 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1433 	chcr_send_wr(skb);
1434 	return -EINPROGRESS;
1435 }
1436 static int chcr_device_init(struct chcr_context *ctx)
1437 {
1438 	struct uld_ctx *u_ctx = NULL;
1439 	int txq_perchan, ntxq;
1440 	int err = 0, rxq_perchan;
1441 
1442 	if (!ctx->dev) {
1443 		u_ctx = assign_chcr_device();
1444 		if (!u_ctx) {
1445 			pr_err("chcr device assignment fails\n");
1446 			goto out;
1447 		}
1448 		ctx->dev = &u_ctx->dev;
1449 		ntxq = u_ctx->lldi.ntxq;
1450 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1451 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1452 		ctx->ntxq = ntxq;
1453 		ctx->nrxq = u_ctx->lldi.nrxq;
1454 		ctx->rxq_perchan = rxq_perchan;
1455 		ctx->txq_perchan = txq_perchan;
1456 	}
1457 out:
1458 	return err;
1459 }
1460 
1461 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1462 {
1463 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1464 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1465 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1466 
1467 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
1468 				CRYPTO_ALG_NEED_FALLBACK);
1469 	if (IS_ERR(ablkctx->sw_cipher)) {
1470 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1471 		return PTR_ERR(ablkctx->sw_cipher);
1472 	}
1473 	init_completion(&ctx->cbc_aes_aio_done);
1474 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1475 
1476 	return chcr_device_init(ctx);
1477 }
1478 
1479 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1480 {
1481 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1482 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1483 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1484 
1485 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1486 	 * cannot be used as fallback in chcr_handle_cipher_response
1487 	 */
1488 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1489 				CRYPTO_ALG_NEED_FALLBACK);
1490 	if (IS_ERR(ablkctx->sw_cipher)) {
1491 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1492 		return PTR_ERR(ablkctx->sw_cipher);
1493 	}
1494 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1495 	return chcr_device_init(ctx);
1496 }
1497 
1498 
1499 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1500 {
1501 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1502 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1503 
1504 	crypto_free_sync_skcipher(ablkctx->sw_cipher);
1505 }
1506 
1507 static int get_alg_config(struct algo_param *params,
1508 			  unsigned int auth_size)
1509 {
1510 	switch (auth_size) {
1511 	case SHA1_DIGEST_SIZE:
1512 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1513 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1514 		params->result_size = SHA1_DIGEST_SIZE;
1515 		break;
1516 	case SHA224_DIGEST_SIZE:
1517 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1518 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1519 		params->result_size = SHA256_DIGEST_SIZE;
1520 		break;
1521 	case SHA256_DIGEST_SIZE:
1522 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1523 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1524 		params->result_size = SHA256_DIGEST_SIZE;
1525 		break;
1526 	case SHA384_DIGEST_SIZE:
1527 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1528 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1529 		params->result_size = SHA512_DIGEST_SIZE;
1530 		break;
1531 	case SHA512_DIGEST_SIZE:
1532 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1533 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1534 		params->result_size = SHA512_DIGEST_SIZE;
1535 		break;
1536 	default:
1537 		pr_err("chcr : ERROR, unsupported digest size\n");
1538 		return -EINVAL;
1539 	}
1540 	return 0;
1541 }
1542 
1543 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1544 {
1545 		crypto_free_shash(base_hash);
1546 }
1547 
1548 /**
1549  *	create_hash_wr - Create hash work request
1550  *	@req - Cipher req base
1551  */
1552 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1553 				      struct hash_wr_param *param)
1554 {
1555 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1556 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1557 	struct chcr_context *ctx = h_ctx(tfm);
1558 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1559 	struct sk_buff *skb = NULL;
1560 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1561 	struct chcr_wr *chcr_req;
1562 	struct ulptx_sgl *ulptx;
1563 	unsigned int nents = 0, transhdr_len;
1564 	unsigned int temp = 0;
1565 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1566 		GFP_ATOMIC;
1567 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1568 	int error = 0;
1569 	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1570 
1571 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1572 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1573 				param->sg_len) <= SGE_MAX_WR_LEN;
1574 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1575 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1576 	nents += param->bfr_len ? 1 : 0;
1577 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1578 				param->sg_len, 16) : (sgl_len(nents) * 8);
1579 	transhdr_len = roundup(transhdr_len, 16);
1580 
1581 	skb = alloc_skb(transhdr_len, flags);
1582 	if (!skb)
1583 		return ERR_PTR(-ENOMEM);
1584 	chcr_req = __skb_put_zero(skb, transhdr_len);
1585 
1586 	chcr_req->sec_cpl.op_ivinsrtofst =
1587 		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1588 
1589 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1590 
1591 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1592 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1593 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1594 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1595 	chcr_req->sec_cpl.seqno_numivs =
1596 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1597 					 param->opad_needed, 0);
1598 
1599 	chcr_req->sec_cpl.ivgen_hdrlen =
1600 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1601 
1602 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1603 	       param->alg_prm.result_size);
1604 
1605 	if (param->opad_needed)
1606 		memcpy(chcr_req->key_ctx.key +
1607 		       ((param->alg_prm.result_size <= 32) ? 32 :
1608 			CHCR_HASH_MAX_DIGEST_SIZE),
1609 		       hmacctx->opad, param->alg_prm.result_size);
1610 
1611 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1612 					    param->alg_prm.mk_size, 0,
1613 					    param->opad_needed,
1614 					    ((param->kctx_len +
1615 					     sizeof(chcr_req->key_ctx)) >> 4));
1616 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1617 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1618 				     DUMMY_BYTES);
1619 	if (param->bfr_len != 0) {
1620 		req_ctx->hctx_wr.dma_addr =
1621 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1622 				       param->bfr_len, DMA_TO_DEVICE);
1623 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1624 				       req_ctx->hctx_wr. dma_addr)) {
1625 			error = -ENOMEM;
1626 			goto err;
1627 		}
1628 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1629 	} else {
1630 		req_ctx->hctx_wr.dma_addr = 0;
1631 	}
1632 	chcr_add_hash_src_ent(req, ulptx, param);
1633 	/* Request upto max wr size */
1634 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1635 				(param->sg_len + param->bfr_len) : 0);
1636 	atomic_inc(&adap->chcr_stats.digest_rqst);
1637 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1638 		    param->hash_size, transhdr_len,
1639 		    temp,  0);
1640 	req_ctx->hctx_wr.skb = skb;
1641 	return skb;
1642 err:
1643 	kfree_skb(skb);
1644 	return  ERR_PTR(error);
1645 }
1646 
1647 static int chcr_ahash_update(struct ahash_request *req)
1648 {
1649 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1650 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1651 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1652 	struct chcr_context *ctx = h_ctx(rtfm);
1653 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1654 	struct sk_buff *skb;
1655 	u8 remainder = 0, bs;
1656 	unsigned int nbytes = req->nbytes;
1657 	struct hash_wr_param params;
1658 	int error;
1659 	unsigned int cpu;
1660 
1661 	cpu = get_cpu();
1662 	req_ctx->txqidx = cpu % ctx->ntxq;
1663 	req_ctx->rxqidx = cpu % ctx->nrxq;
1664 	put_cpu();
1665 
1666 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1667 
1668 	if (nbytes + req_ctx->reqlen >= bs) {
1669 		remainder = (nbytes + req_ctx->reqlen) % bs;
1670 		nbytes = nbytes + req_ctx->reqlen - remainder;
1671 	} else {
1672 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1673 				   + req_ctx->reqlen, nbytes, 0);
1674 		req_ctx->reqlen += nbytes;
1675 		return 0;
1676 	}
1677 	error = chcr_inc_wrcount(dev);
1678 	if (error)
1679 		return -ENXIO;
1680 	/* Detach state for CHCR means lldi or padap is freed. Increasing
1681 	 * inflight count for dev guarantees that lldi and padap is valid
1682 	 */
1683 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1684 						req_ctx->txqidx) &&
1685 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1686 			error = -ENOSPC;
1687 			goto err;
1688 	}
1689 
1690 	chcr_init_hctx_per_wr(req_ctx);
1691 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1692 	if (error) {
1693 		error = -ENOMEM;
1694 		goto err;
1695 	}
1696 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1697 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1698 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1699 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1700 	if (params.sg_len > req->nbytes)
1701 		params.sg_len = req->nbytes;
1702 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1703 			req_ctx->reqlen;
1704 	params.opad_needed = 0;
1705 	params.more = 1;
1706 	params.last = 0;
1707 	params.bfr_len = req_ctx->reqlen;
1708 	params.scmd1 = 0;
1709 	req_ctx->hctx_wr.srcsg = req->src;
1710 
1711 	params.hash_size = params.alg_prm.result_size;
1712 	req_ctx->data_len += params.sg_len + params.bfr_len;
1713 	skb = create_hash_wr(req, &params);
1714 	if (IS_ERR(skb)) {
1715 		error = PTR_ERR(skb);
1716 		goto unmap;
1717 	}
1718 
1719 	req_ctx->hctx_wr.processed += params.sg_len;
1720 	if (remainder) {
1721 		/* Swap buffers */
1722 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1723 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1724 				   req_ctx->reqbfr, remainder, req->nbytes -
1725 				   remainder);
1726 	}
1727 	req_ctx->reqlen = remainder;
1728 	skb->dev = u_ctx->lldi.ports[0];
1729 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1730 	chcr_send_wr(skb);
1731 	return -EINPROGRESS;
1732 unmap:
1733 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1734 err:
1735 	chcr_dec_wrcount(dev);
1736 	return error;
1737 }
1738 
1739 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1740 {
1741 	memset(bfr_ptr, 0, bs);
1742 	*bfr_ptr = 0x80;
1743 	if (bs == 64)
1744 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1745 	else
1746 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1747 }
1748 
1749 static int chcr_ahash_final(struct ahash_request *req)
1750 {
1751 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1752 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1753 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1754 	struct hash_wr_param params;
1755 	struct sk_buff *skb;
1756 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1757 	struct chcr_context *ctx = h_ctx(rtfm);
1758 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1759 	int error;
1760 	unsigned int cpu;
1761 
1762 	cpu = get_cpu();
1763 	req_ctx->txqidx = cpu % ctx->ntxq;
1764 	req_ctx->rxqidx = cpu % ctx->nrxq;
1765 	put_cpu();
1766 
1767 	error = chcr_inc_wrcount(dev);
1768 	if (error)
1769 		return -ENXIO;
1770 
1771 	chcr_init_hctx_per_wr(req_ctx);
1772 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1773 		params.opad_needed = 1;
1774 	else
1775 		params.opad_needed = 0;
1776 	params.sg_len = 0;
1777 	req_ctx->hctx_wr.isfinal = 1;
1778 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1779 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1780 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1781 		params.opad_needed = 1;
1782 		params.kctx_len *= 2;
1783 	} else {
1784 		params.opad_needed = 0;
1785 	}
1786 
1787 	req_ctx->hctx_wr.result = 1;
1788 	params.bfr_len = req_ctx->reqlen;
1789 	req_ctx->data_len += params.bfr_len + params.sg_len;
1790 	req_ctx->hctx_wr.srcsg = req->src;
1791 	if (req_ctx->reqlen == 0) {
1792 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1793 		params.last = 0;
1794 		params.more = 1;
1795 		params.scmd1 = 0;
1796 		params.bfr_len = bs;
1797 
1798 	} else {
1799 		params.scmd1 = req_ctx->data_len;
1800 		params.last = 1;
1801 		params.more = 0;
1802 	}
1803 	params.hash_size = crypto_ahash_digestsize(rtfm);
1804 	skb = create_hash_wr(req, &params);
1805 	if (IS_ERR(skb)) {
1806 		error = PTR_ERR(skb);
1807 		goto err;
1808 	}
1809 	req_ctx->reqlen = 0;
1810 	skb->dev = u_ctx->lldi.ports[0];
1811 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1812 	chcr_send_wr(skb);
1813 	return -EINPROGRESS;
1814 err:
1815 	chcr_dec_wrcount(dev);
1816 	return error;
1817 }
1818 
1819 static int chcr_ahash_finup(struct ahash_request *req)
1820 {
1821 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1822 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1823 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1824 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1825 	struct chcr_context *ctx = h_ctx(rtfm);
1826 	struct sk_buff *skb;
1827 	struct hash_wr_param params;
1828 	u8  bs;
1829 	int error;
1830 	unsigned int cpu;
1831 
1832 	cpu = get_cpu();
1833 	req_ctx->txqidx = cpu % ctx->ntxq;
1834 	req_ctx->rxqidx = cpu % ctx->nrxq;
1835 	put_cpu();
1836 
1837 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1838 	error = chcr_inc_wrcount(dev);
1839 	if (error)
1840 		return -ENXIO;
1841 
1842 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1843 						req_ctx->txqidx) &&
1844 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1845 			error = -ENOSPC;
1846 			goto err;
1847 	}
1848 	chcr_init_hctx_per_wr(req_ctx);
1849 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1850 	if (error) {
1851 		error = -ENOMEM;
1852 		goto err;
1853 	}
1854 
1855 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1856 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1857 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1858 		params.kctx_len *= 2;
1859 		params.opad_needed = 1;
1860 	} else {
1861 		params.opad_needed = 0;
1862 	}
1863 
1864 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1865 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1866 	if (params.sg_len < req->nbytes) {
1867 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1868 			params.kctx_len /= 2;
1869 			params.opad_needed = 0;
1870 		}
1871 		params.last = 0;
1872 		params.more = 1;
1873 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1874 					- req_ctx->reqlen;
1875 		params.hash_size = params.alg_prm.result_size;
1876 		params.scmd1 = 0;
1877 	} else {
1878 		params.last = 1;
1879 		params.more = 0;
1880 		params.sg_len = req->nbytes;
1881 		params.hash_size = crypto_ahash_digestsize(rtfm);
1882 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1883 				params.sg_len;
1884 	}
1885 	params.bfr_len = req_ctx->reqlen;
1886 	req_ctx->data_len += params.bfr_len + params.sg_len;
1887 	req_ctx->hctx_wr.result = 1;
1888 	req_ctx->hctx_wr.srcsg = req->src;
1889 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1890 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1891 		params.last = 0;
1892 		params.more = 1;
1893 		params.scmd1 = 0;
1894 		params.bfr_len = bs;
1895 	}
1896 	skb = create_hash_wr(req, &params);
1897 	if (IS_ERR(skb)) {
1898 		error = PTR_ERR(skb);
1899 		goto unmap;
1900 	}
1901 	req_ctx->reqlen = 0;
1902 	req_ctx->hctx_wr.processed += params.sg_len;
1903 	skb->dev = u_ctx->lldi.ports[0];
1904 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1905 	chcr_send_wr(skb);
1906 	return -EINPROGRESS;
1907 unmap:
1908 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1909 err:
1910 	chcr_dec_wrcount(dev);
1911 	return error;
1912 }
1913 
1914 static int chcr_ahash_digest(struct ahash_request *req)
1915 {
1916 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1917 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1918 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1919 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1920 	struct chcr_context *ctx = h_ctx(rtfm);
1921 	struct sk_buff *skb;
1922 	struct hash_wr_param params;
1923 	u8  bs;
1924 	int error;
1925 	unsigned int cpu;
1926 
1927 	cpu = get_cpu();
1928 	req_ctx->txqidx = cpu % ctx->ntxq;
1929 	req_ctx->rxqidx = cpu % ctx->nrxq;
1930 	put_cpu();
1931 
1932 	rtfm->init(req);
1933 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1934 	error = chcr_inc_wrcount(dev);
1935 	if (error)
1936 		return -ENXIO;
1937 
1938 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1939 						req_ctx->txqidx) &&
1940 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1941 			error = -ENOSPC;
1942 			goto err;
1943 	}
1944 
1945 	chcr_init_hctx_per_wr(req_ctx);
1946 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1947 	if (error) {
1948 		error = -ENOMEM;
1949 		goto err;
1950 	}
1951 
1952 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1953 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1954 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1955 		params.kctx_len *= 2;
1956 		params.opad_needed = 1;
1957 	} else {
1958 		params.opad_needed = 0;
1959 	}
1960 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1961 				HASH_SPACE_LEFT(params.kctx_len), 0);
1962 	if (params.sg_len < req->nbytes) {
1963 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1964 			params.kctx_len /= 2;
1965 			params.opad_needed = 0;
1966 		}
1967 		params.last = 0;
1968 		params.more = 1;
1969 		params.scmd1 = 0;
1970 		params.sg_len = rounddown(params.sg_len, bs);
1971 		params.hash_size = params.alg_prm.result_size;
1972 	} else {
1973 		params.sg_len = req->nbytes;
1974 		params.hash_size = crypto_ahash_digestsize(rtfm);
1975 		params.last = 1;
1976 		params.more = 0;
1977 		params.scmd1 = req->nbytes + req_ctx->data_len;
1978 
1979 	}
1980 	params.bfr_len = 0;
1981 	req_ctx->hctx_wr.result = 1;
1982 	req_ctx->hctx_wr.srcsg = req->src;
1983 	req_ctx->data_len += params.bfr_len + params.sg_len;
1984 
1985 	if (req->nbytes == 0) {
1986 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1987 		params.more = 1;
1988 		params.bfr_len = bs;
1989 	}
1990 
1991 	skb = create_hash_wr(req, &params);
1992 	if (IS_ERR(skb)) {
1993 		error = PTR_ERR(skb);
1994 		goto unmap;
1995 	}
1996 	req_ctx->hctx_wr.processed += params.sg_len;
1997 	skb->dev = u_ctx->lldi.ports[0];
1998 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1999 	chcr_send_wr(skb);
2000 	return -EINPROGRESS;
2001 unmap:
2002 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2003 err:
2004 	chcr_dec_wrcount(dev);
2005 	return error;
2006 }
2007 
2008 static int chcr_ahash_continue(struct ahash_request *req)
2009 {
2010 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2011 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2012 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2013 	struct chcr_context *ctx = h_ctx(rtfm);
2014 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2015 	struct sk_buff *skb;
2016 	struct hash_wr_param params;
2017 	u8  bs;
2018 	int error;
2019 	unsigned int cpu;
2020 
2021 	cpu = get_cpu();
2022 	reqctx->txqidx = cpu % ctx->ntxq;
2023 	reqctx->rxqidx = cpu % ctx->nrxq;
2024 	put_cpu();
2025 
2026 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2027 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2028 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2029 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2030 		params.kctx_len *= 2;
2031 		params.opad_needed = 1;
2032 	} else {
2033 		params.opad_needed = 0;
2034 	}
2035 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2036 					    HASH_SPACE_LEFT(params.kctx_len),
2037 					    hctx_wr->src_ofst);
2038 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2039 		params.sg_len = req->nbytes - hctx_wr->processed;
2040 	if (!hctx_wr->result ||
2041 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2042 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2043 			params.kctx_len /= 2;
2044 			params.opad_needed = 0;
2045 		}
2046 		params.last = 0;
2047 		params.more = 1;
2048 		params.sg_len = rounddown(params.sg_len, bs);
2049 		params.hash_size = params.alg_prm.result_size;
2050 		params.scmd1 = 0;
2051 	} else {
2052 		params.last = 1;
2053 		params.more = 0;
2054 		params.hash_size = crypto_ahash_digestsize(rtfm);
2055 		params.scmd1 = reqctx->data_len + params.sg_len;
2056 	}
2057 	params.bfr_len = 0;
2058 	reqctx->data_len += params.sg_len;
2059 	skb = create_hash_wr(req, &params);
2060 	if (IS_ERR(skb)) {
2061 		error = PTR_ERR(skb);
2062 		goto err;
2063 	}
2064 	hctx_wr->processed += params.sg_len;
2065 	skb->dev = u_ctx->lldi.ports[0];
2066 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2067 	chcr_send_wr(skb);
2068 	return 0;
2069 err:
2070 	return error;
2071 }
2072 
2073 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2074 					  unsigned char *input,
2075 					  int err)
2076 {
2077 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2078 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2079 	int digestsize, updated_digestsize;
2080 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2081 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2082 	struct chcr_dev *dev = h_ctx(tfm)->dev;
2083 
2084 	if (input == NULL)
2085 		goto out;
2086 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2087 	updated_digestsize = digestsize;
2088 	if (digestsize == SHA224_DIGEST_SIZE)
2089 		updated_digestsize = SHA256_DIGEST_SIZE;
2090 	else if (digestsize == SHA384_DIGEST_SIZE)
2091 		updated_digestsize = SHA512_DIGEST_SIZE;
2092 
2093 	if (hctx_wr->dma_addr) {
2094 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2095 				 hctx_wr->dma_len, DMA_TO_DEVICE);
2096 		hctx_wr->dma_addr = 0;
2097 	}
2098 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2099 				 req->nbytes)) {
2100 		if (hctx_wr->result == 1) {
2101 			hctx_wr->result = 0;
2102 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2103 			       digestsize);
2104 		} else {
2105 			memcpy(reqctx->partial_hash,
2106 			       input + sizeof(struct cpl_fw6_pld),
2107 			       updated_digestsize);
2108 
2109 		}
2110 		goto unmap;
2111 	}
2112 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2113 	       updated_digestsize);
2114 
2115 	err = chcr_ahash_continue(req);
2116 	if (err)
2117 		goto unmap;
2118 	return;
2119 unmap:
2120 	if (hctx_wr->is_sg_map)
2121 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2122 
2123 
2124 out:
2125 	chcr_dec_wrcount(dev);
2126 	req->base.complete(&req->base, err);
2127 }
2128 
2129 /*
2130  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2131  *	@req: crypto request
2132  */
2133 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2134 			 int err)
2135 {
2136 	struct crypto_tfm *tfm = req->tfm;
2137 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2138 	struct adapter *adap = padap(ctx->dev);
2139 
2140 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2141 	case CRYPTO_ALG_TYPE_AEAD:
2142 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2143 		break;
2144 
2145 	case CRYPTO_ALG_TYPE_SKCIPHER:
2146 		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2147 					       input, err);
2148 		break;
2149 	case CRYPTO_ALG_TYPE_AHASH:
2150 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2151 		}
2152 	atomic_inc(&adap->chcr_stats.complete);
2153 	return err;
2154 }
2155 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2156 {
2157 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2158 	struct chcr_ahash_req_ctx *state = out;
2159 
2160 	state->reqlen = req_ctx->reqlen;
2161 	state->data_len = req_ctx->data_len;
2162 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2163 	memcpy(state->partial_hash, req_ctx->partial_hash,
2164 	       CHCR_HASH_MAX_DIGEST_SIZE);
2165 	chcr_init_hctx_per_wr(state);
2166 	return 0;
2167 }
2168 
2169 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2170 {
2171 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2172 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2173 
2174 	req_ctx->reqlen = state->reqlen;
2175 	req_ctx->data_len = state->data_len;
2176 	req_ctx->reqbfr = req_ctx->bfr1;
2177 	req_ctx->skbfr = req_ctx->bfr2;
2178 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2179 	memcpy(req_ctx->partial_hash, state->partial_hash,
2180 	       CHCR_HASH_MAX_DIGEST_SIZE);
2181 	chcr_init_hctx_per_wr(req_ctx);
2182 	return 0;
2183 }
2184 
2185 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2186 			     unsigned int keylen)
2187 {
2188 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2189 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2190 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2191 	unsigned int i, err = 0, updated_digestsize;
2192 
2193 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2194 
2195 	/* use the key to calculate the ipad and opad. ipad will sent with the
2196 	 * first request's data. opad will be sent with the final hash result
2197 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2198 	 */
2199 	shash->tfm = hmacctx->base_hash;
2200 	if (keylen > bs) {
2201 		err = crypto_shash_digest(shash, key, keylen,
2202 					  hmacctx->ipad);
2203 		if (err)
2204 			goto out;
2205 		keylen = digestsize;
2206 	} else {
2207 		memcpy(hmacctx->ipad, key, keylen);
2208 	}
2209 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2210 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2211 
2212 	for (i = 0; i < bs / sizeof(int); i++) {
2213 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2214 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2215 	}
2216 
2217 	updated_digestsize = digestsize;
2218 	if (digestsize == SHA224_DIGEST_SIZE)
2219 		updated_digestsize = SHA256_DIGEST_SIZE;
2220 	else if (digestsize == SHA384_DIGEST_SIZE)
2221 		updated_digestsize = SHA512_DIGEST_SIZE;
2222 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2223 					hmacctx->ipad, digestsize);
2224 	if (err)
2225 		goto out;
2226 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2227 
2228 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2229 					hmacctx->opad, digestsize);
2230 	if (err)
2231 		goto out;
2232 	chcr_change_order(hmacctx->opad, updated_digestsize);
2233 out:
2234 	return err;
2235 }
2236 
2237 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2238 			       unsigned int key_len)
2239 {
2240 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2241 	unsigned short context_size = 0;
2242 	int err;
2243 
2244 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2245 	if (err)
2246 		goto badkey_err;
2247 
2248 	memcpy(ablkctx->key, key, key_len);
2249 	ablkctx->enckey_len = key_len;
2250 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2251 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2252 	ablkctx->key_ctx_hdr =
2253 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2254 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2255 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2256 				 CHCR_KEYCTX_NO_KEY, 1,
2257 				 0, context_size);
2258 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2259 	return 0;
2260 badkey_err:
2261 	ablkctx->enckey_len = 0;
2262 
2263 	return err;
2264 }
2265 
2266 static int chcr_sha_init(struct ahash_request *areq)
2267 {
2268 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2269 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2270 	int digestsize =  crypto_ahash_digestsize(tfm);
2271 
2272 	req_ctx->data_len = 0;
2273 	req_ctx->reqlen = 0;
2274 	req_ctx->reqbfr = req_ctx->bfr1;
2275 	req_ctx->skbfr = req_ctx->bfr2;
2276 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2277 
2278 	return 0;
2279 }
2280 
2281 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2282 {
2283 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2284 				 sizeof(struct chcr_ahash_req_ctx));
2285 	return chcr_device_init(crypto_tfm_ctx(tfm));
2286 }
2287 
2288 static int chcr_hmac_init(struct ahash_request *areq)
2289 {
2290 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2291 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2292 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2293 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2294 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2295 
2296 	chcr_sha_init(areq);
2297 	req_ctx->data_len = bs;
2298 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2299 		if (digestsize == SHA224_DIGEST_SIZE)
2300 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2301 			       SHA256_DIGEST_SIZE);
2302 		else if (digestsize == SHA384_DIGEST_SIZE)
2303 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2304 			       SHA512_DIGEST_SIZE);
2305 		else
2306 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2307 			       digestsize);
2308 	}
2309 	return 0;
2310 }
2311 
2312 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2313 {
2314 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2315 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2316 	unsigned int digestsize =
2317 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2318 
2319 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2320 				 sizeof(struct chcr_ahash_req_ctx));
2321 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2322 	if (IS_ERR(hmacctx->base_hash))
2323 		return PTR_ERR(hmacctx->base_hash);
2324 	return chcr_device_init(crypto_tfm_ctx(tfm));
2325 }
2326 
2327 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2328 {
2329 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2330 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2331 
2332 	if (hmacctx->base_hash) {
2333 		chcr_free_shash(hmacctx->base_hash);
2334 		hmacctx->base_hash = NULL;
2335 	}
2336 }
2337 
2338 inline void chcr_aead_common_exit(struct aead_request *req)
2339 {
2340 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2341 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2342 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2343 
2344 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2345 }
2346 
2347 static int chcr_aead_common_init(struct aead_request *req)
2348 {
2349 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2350 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2351 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2352 	unsigned int authsize = crypto_aead_authsize(tfm);
2353 	int error = -EINVAL;
2354 
2355 	/* validate key size */
2356 	if (aeadctx->enckey_len == 0)
2357 		goto err;
2358 	if (reqctx->op && req->cryptlen < authsize)
2359 		goto err;
2360 	if (reqctx->b0_len)
2361 		reqctx->scratch_pad = reqctx->iv + IV;
2362 	else
2363 		reqctx->scratch_pad = NULL;
2364 
2365 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2366 				  reqctx->op);
2367 	if (error) {
2368 		error = -ENOMEM;
2369 		goto err;
2370 	}
2371 
2372 	return 0;
2373 err:
2374 	return error;
2375 }
2376 
2377 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2378 				   int aadmax, int wrlen,
2379 				   unsigned short op_type)
2380 {
2381 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2382 
2383 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2384 	    dst_nents > MAX_DSGL_ENT ||
2385 	    (req->assoclen > aadmax) ||
2386 	    (wrlen > SGE_MAX_WR_LEN))
2387 		return 1;
2388 	return 0;
2389 }
2390 
2391 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2392 {
2393 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2394 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2395 	struct aead_request *subreq = aead_request_ctx(req);
2396 
2397 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2398 	aead_request_set_callback(subreq, req->base.flags,
2399 				  req->base.complete, req->base.data);
2400 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2401 				 req->iv);
2402 	aead_request_set_ad(subreq, req->assoclen);
2403 	return op_type ? crypto_aead_decrypt(subreq) :
2404 		crypto_aead_encrypt(subreq);
2405 }
2406 
2407 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2408 					 unsigned short qid,
2409 					 int size)
2410 {
2411 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2412 	struct chcr_context *ctx = a_ctx(tfm);
2413 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2414 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2415 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2416 	struct sk_buff *skb = NULL;
2417 	struct chcr_wr *chcr_req;
2418 	struct cpl_rx_phys_dsgl *phys_cpl;
2419 	struct ulptx_sgl *ulptx;
2420 	unsigned int transhdr_len;
2421 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2422 	unsigned int   kctx_len = 0, dnents, snents;
2423 	unsigned int  authsize = crypto_aead_authsize(tfm);
2424 	int error = -EINVAL;
2425 	u8 *ivptr;
2426 	int null = 0;
2427 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2428 		GFP_ATOMIC;
2429 	struct adapter *adap = padap(ctx->dev);
2430 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2431 
2432 	if (req->cryptlen == 0)
2433 		return NULL;
2434 
2435 	reqctx->b0_len = 0;
2436 	error = chcr_aead_common_init(req);
2437 	if (error)
2438 		return ERR_PTR(error);
2439 
2440 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2441 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2442 		null = 1;
2443 	}
2444 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2445 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2446 	dnents += MIN_AUTH_SG; // For IV
2447 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2448 			       CHCR_SRC_SG_SIZE, 0);
2449 	dst_size = get_space_for_phys_dsgl(dnents);
2450 	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2451 		- sizeof(chcr_req->key_ctx);
2452 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2453 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2454 			SGE_MAX_WR_LEN;
2455 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2456 			: (sgl_len(snents) * 8);
2457 	transhdr_len += temp;
2458 	transhdr_len = roundup(transhdr_len, 16);
2459 
2460 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2461 				    transhdr_len, reqctx->op)) {
2462 		atomic_inc(&adap->chcr_stats.fallback);
2463 		chcr_aead_common_exit(req);
2464 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2465 	}
2466 	skb = alloc_skb(transhdr_len, flags);
2467 	if (!skb) {
2468 		error = -ENOMEM;
2469 		goto err;
2470 	}
2471 
2472 	chcr_req = __skb_put_zero(skb, transhdr_len);
2473 
2474 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2475 
2476 	/*
2477 	 * Input order	is AAD,IV and Payload. where IV should be included as
2478 	 * the part of authdata. All other fields should be filled according
2479 	 * to the hardware spec
2480 	 */
2481 	chcr_req->sec_cpl.op_ivinsrtofst =
2482 				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2483 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2484 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2485 					null ? 0 : 1 + IV,
2486 					null ? 0 : IV + req->assoclen,
2487 					req->assoclen + IV + 1,
2488 					(temp & 0x1F0) >> 4);
2489 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2490 					temp & 0xF,
2491 					null ? 0 : req->assoclen + IV + 1,
2492 					temp, temp);
2493 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2494 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2495 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2496 	else
2497 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2498 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2499 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2500 					temp,
2501 					actx->auth_mode, aeadctx->hmac_ctrl,
2502 					IV >> 1);
2503 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2504 					 0, 0, dst_size);
2505 
2506 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2507 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2508 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2509 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2510 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2511 		       aeadctx->enckey_len);
2512 	else
2513 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2514 		       aeadctx->enckey_len);
2515 
2516 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2517 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2518 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2519 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2520 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2521 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2522 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2523 		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2524 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2525 				CTR_RFC3686_IV_SIZE);
2526 		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2527 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2528 	} else {
2529 		memcpy(ivptr, req->iv, IV);
2530 	}
2531 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2532 	chcr_add_aead_src_ent(req, ulptx);
2533 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2534 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2535 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2536 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2537 		   transhdr_len, temp, 0);
2538 	reqctx->skb = skb;
2539 
2540 	return skb;
2541 err:
2542 	chcr_aead_common_exit(req);
2543 
2544 	return ERR_PTR(error);
2545 }
2546 
2547 int chcr_aead_dma_map(struct device *dev,
2548 		      struct aead_request *req,
2549 		      unsigned short op_type)
2550 {
2551 	int error;
2552 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2553 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2554 	unsigned int authsize = crypto_aead_authsize(tfm);
2555 	int dst_size;
2556 
2557 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2558 				-authsize : authsize);
2559 	if (!req->cryptlen || !dst_size)
2560 		return 0;
2561 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2562 					DMA_BIDIRECTIONAL);
2563 	if (dma_mapping_error(dev, reqctx->iv_dma))
2564 		return -ENOMEM;
2565 	if (reqctx->b0_len)
2566 		reqctx->b0_dma = reqctx->iv_dma + IV;
2567 	else
2568 		reqctx->b0_dma = 0;
2569 	if (req->src == req->dst) {
2570 		error = dma_map_sg(dev, req->src,
2571 				sg_nents_for_len(req->src, dst_size),
2572 					DMA_BIDIRECTIONAL);
2573 		if (!error)
2574 			goto err;
2575 	} else {
2576 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2577 				   DMA_TO_DEVICE);
2578 		if (!error)
2579 			goto err;
2580 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2581 				   DMA_FROM_DEVICE);
2582 		if (!error) {
2583 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2584 				   DMA_TO_DEVICE);
2585 			goto err;
2586 		}
2587 	}
2588 
2589 	return 0;
2590 err:
2591 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2592 	return -ENOMEM;
2593 }
2594 
2595 void chcr_aead_dma_unmap(struct device *dev,
2596 			 struct aead_request *req,
2597 			 unsigned short op_type)
2598 {
2599 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2600 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2601 	unsigned int authsize = crypto_aead_authsize(tfm);
2602 	int dst_size;
2603 
2604 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2605 					-authsize : authsize);
2606 	if (!req->cryptlen || !dst_size)
2607 		return;
2608 
2609 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2610 					DMA_BIDIRECTIONAL);
2611 	if (req->src == req->dst) {
2612 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2613 				   DMA_BIDIRECTIONAL);
2614 	} else {
2615 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2616 				   DMA_TO_DEVICE);
2617 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2618 				   DMA_FROM_DEVICE);
2619 	}
2620 }
2621 
2622 void chcr_add_aead_src_ent(struct aead_request *req,
2623 			   struct ulptx_sgl *ulptx)
2624 {
2625 	struct ulptx_walk ulp_walk;
2626 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2627 
2628 	if (reqctx->imm) {
2629 		u8 *buf = (u8 *)ulptx;
2630 
2631 		if (reqctx->b0_len) {
2632 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2633 			buf += reqctx->b0_len;
2634 		}
2635 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2636 				   buf, req->cryptlen + req->assoclen, 0);
2637 	} else {
2638 		ulptx_walk_init(&ulp_walk, ulptx);
2639 		if (reqctx->b0_len)
2640 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2641 					    reqctx->b0_dma);
2642 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2643 				  req->assoclen,  0);
2644 		ulptx_walk_end(&ulp_walk);
2645 	}
2646 }
2647 
2648 void chcr_add_aead_dst_ent(struct aead_request *req,
2649 			   struct cpl_rx_phys_dsgl *phys_cpl,
2650 			   unsigned short qid)
2651 {
2652 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2653 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2654 	struct dsgl_walk dsgl_walk;
2655 	unsigned int authsize = crypto_aead_authsize(tfm);
2656 	struct chcr_context *ctx = a_ctx(tfm);
2657 	u32 temp;
2658 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2659 
2660 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2661 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2662 	temp = req->assoclen + req->cryptlen +
2663 		(reqctx->op ? -authsize : authsize);
2664 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2665 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2666 }
2667 
2668 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2669 			     void *ulptx,
2670 			     struct  cipher_wr_param *wrparam)
2671 {
2672 	struct ulptx_walk ulp_walk;
2673 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2674 	u8 *buf = ulptx;
2675 
2676 	memcpy(buf, reqctx->iv, IV);
2677 	buf += IV;
2678 	if (reqctx->imm) {
2679 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2680 				   buf, wrparam->bytes, reqctx->processed);
2681 	} else {
2682 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2683 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2684 				  reqctx->src_ofst);
2685 		reqctx->srcsg = ulp_walk.last_sg;
2686 		reqctx->src_ofst = ulp_walk.last_sg_len;
2687 		ulptx_walk_end(&ulp_walk);
2688 	}
2689 }
2690 
2691 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2692 			     struct cpl_rx_phys_dsgl *phys_cpl,
2693 			     struct  cipher_wr_param *wrparam,
2694 			     unsigned short qid)
2695 {
2696 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2697 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2698 	struct chcr_context *ctx = c_ctx(tfm);
2699 	struct dsgl_walk dsgl_walk;
2700 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2701 
2702 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2703 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2704 			 reqctx->dst_ofst);
2705 	reqctx->dstsg = dsgl_walk.last_sg;
2706 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2707 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2708 }
2709 
2710 void chcr_add_hash_src_ent(struct ahash_request *req,
2711 			   struct ulptx_sgl *ulptx,
2712 			   struct hash_wr_param *param)
2713 {
2714 	struct ulptx_walk ulp_walk;
2715 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2716 
2717 	if (reqctx->hctx_wr.imm) {
2718 		u8 *buf = (u8 *)ulptx;
2719 
2720 		if (param->bfr_len) {
2721 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2722 			buf += param->bfr_len;
2723 		}
2724 
2725 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2726 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2727 				   param->sg_len, 0);
2728 	} else {
2729 		ulptx_walk_init(&ulp_walk, ulptx);
2730 		if (param->bfr_len)
2731 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2732 					    reqctx->hctx_wr.dma_addr);
2733 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2734 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2735 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2736 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2737 		ulptx_walk_end(&ulp_walk);
2738 	}
2739 }
2740 
2741 int chcr_hash_dma_map(struct device *dev,
2742 		      struct ahash_request *req)
2743 {
2744 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2745 	int error = 0;
2746 
2747 	if (!req->nbytes)
2748 		return 0;
2749 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2750 			   DMA_TO_DEVICE);
2751 	if (!error)
2752 		return -ENOMEM;
2753 	req_ctx->hctx_wr.is_sg_map = 1;
2754 	return 0;
2755 }
2756 
2757 void chcr_hash_dma_unmap(struct device *dev,
2758 			 struct ahash_request *req)
2759 {
2760 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2761 
2762 	if (!req->nbytes)
2763 		return;
2764 
2765 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2766 			   DMA_TO_DEVICE);
2767 	req_ctx->hctx_wr.is_sg_map = 0;
2768 
2769 }
2770 
2771 int chcr_cipher_dma_map(struct device *dev,
2772 			struct skcipher_request *req)
2773 {
2774 	int error;
2775 
2776 	if (req->src == req->dst) {
2777 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2778 				   DMA_BIDIRECTIONAL);
2779 		if (!error)
2780 			goto err;
2781 	} else {
2782 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2783 				   DMA_TO_DEVICE);
2784 		if (!error)
2785 			goto err;
2786 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2787 				   DMA_FROM_DEVICE);
2788 		if (!error) {
2789 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2790 				   DMA_TO_DEVICE);
2791 			goto err;
2792 		}
2793 	}
2794 
2795 	return 0;
2796 err:
2797 	return -ENOMEM;
2798 }
2799 
2800 void chcr_cipher_dma_unmap(struct device *dev,
2801 			   struct skcipher_request *req)
2802 {
2803 	if (req->src == req->dst) {
2804 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2805 				   DMA_BIDIRECTIONAL);
2806 	} else {
2807 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2808 				   DMA_TO_DEVICE);
2809 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2810 				   DMA_FROM_DEVICE);
2811 	}
2812 }
2813 
2814 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2815 {
2816 	__be32 data;
2817 
2818 	memset(block, 0, csize);
2819 	block += csize;
2820 
2821 	if (csize >= 4)
2822 		csize = 4;
2823 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2824 		return -EOVERFLOW;
2825 
2826 	data = cpu_to_be32(msglen);
2827 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2828 
2829 	return 0;
2830 }
2831 
2832 static int generate_b0(struct aead_request *req, u8 *ivptr,
2833 			unsigned short op_type)
2834 {
2835 	unsigned int l, lp, m;
2836 	int rc;
2837 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2838 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2839 	u8 *b0 = reqctx->scratch_pad;
2840 
2841 	m = crypto_aead_authsize(aead);
2842 
2843 	memcpy(b0, ivptr, 16);
2844 
2845 	lp = b0[0];
2846 	l = lp + 1;
2847 
2848 	/* set m, bits 3-5 */
2849 	*b0 |= (8 * ((m - 2) / 2));
2850 
2851 	/* set adata, bit 6, if associated data is used */
2852 	if (req->assoclen)
2853 		*b0 |= 64;
2854 	rc = set_msg_len(b0 + 16 - l,
2855 			 (op_type == CHCR_DECRYPT_OP) ?
2856 			 req->cryptlen - m : req->cryptlen, l);
2857 
2858 	return rc;
2859 }
2860 
2861 static inline int crypto_ccm_check_iv(const u8 *iv)
2862 {
2863 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2864 	if (iv[0] < 1 || iv[0] > 7)
2865 		return -EINVAL;
2866 
2867 	return 0;
2868 }
2869 
2870 static int ccm_format_packet(struct aead_request *req,
2871 			     u8 *ivptr,
2872 			     unsigned int sub_type,
2873 			     unsigned short op_type,
2874 			     unsigned int assoclen)
2875 {
2876 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2877 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2878 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2879 	int rc = 0;
2880 
2881 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2882 		ivptr[0] = 3;
2883 		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2884 		memcpy(ivptr + 4, req->iv, 8);
2885 		memset(ivptr + 12, 0, 4);
2886 	} else {
2887 		memcpy(ivptr, req->iv, 16);
2888 	}
2889 	if (assoclen)
2890 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2891 				htons(assoclen);
2892 
2893 	rc = generate_b0(req, ivptr, op_type);
2894 	/* zero the ctr value */
2895 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2896 	return rc;
2897 }
2898 
2899 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2900 				  unsigned int dst_size,
2901 				  struct aead_request *req,
2902 				  unsigned short op_type)
2903 {
2904 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2905 	struct chcr_context *ctx = a_ctx(tfm);
2906 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2907 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2908 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2909 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2910 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2911 	unsigned int ccm_xtra;
2912 	unsigned char tag_offset = 0, auth_offset = 0;
2913 	unsigned int assoclen;
2914 
2915 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2916 		assoclen = req->assoclen - 8;
2917 	else
2918 		assoclen = req->assoclen;
2919 	ccm_xtra = CCM_B0_SIZE +
2920 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2921 
2922 	auth_offset = req->cryptlen ?
2923 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2924 	if (op_type == CHCR_DECRYPT_OP) {
2925 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2926 			tag_offset = crypto_aead_authsize(tfm);
2927 		else
2928 			auth_offset = 0;
2929 	}
2930 
2931 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2932 	sec_cpl->pldlen =
2933 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2934 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2935 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2936 				1 + IV,	IV + assoclen + ccm_xtra,
2937 				req->assoclen + IV + 1 + ccm_xtra, 0);
2938 
2939 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2940 					auth_offset, tag_offset,
2941 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2942 					crypto_aead_authsize(tfm));
2943 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2944 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2945 					cipher_mode, mac_mode,
2946 					aeadctx->hmac_ctrl, IV >> 1);
2947 
2948 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2949 					0, dst_size);
2950 }
2951 
2952 static int aead_ccm_validate_input(unsigned short op_type,
2953 				   struct aead_request *req,
2954 				   struct chcr_aead_ctx *aeadctx,
2955 				   unsigned int sub_type)
2956 {
2957 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2958 		if (crypto_ccm_check_iv(req->iv)) {
2959 			pr_err("CCM: IV check fails\n");
2960 			return -EINVAL;
2961 		}
2962 	} else {
2963 		if (req->assoclen != 16 && req->assoclen != 20) {
2964 			pr_err("RFC4309: Invalid AAD length %d\n",
2965 			       req->assoclen);
2966 			return -EINVAL;
2967 		}
2968 	}
2969 	return 0;
2970 }
2971 
2972 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2973 					  unsigned short qid,
2974 					  int size)
2975 {
2976 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2977 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2978 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2979 	struct sk_buff *skb = NULL;
2980 	struct chcr_wr *chcr_req;
2981 	struct cpl_rx_phys_dsgl *phys_cpl;
2982 	struct ulptx_sgl *ulptx;
2983 	unsigned int transhdr_len;
2984 	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2985 	unsigned int sub_type, assoclen = req->assoclen;
2986 	unsigned int authsize = crypto_aead_authsize(tfm);
2987 	int error = -EINVAL;
2988 	u8 *ivptr;
2989 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2990 		GFP_ATOMIC;
2991 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2992 
2993 	sub_type = get_aead_subtype(tfm);
2994 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2995 		assoclen -= 8;
2996 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2997 	error = chcr_aead_common_init(req);
2998 	if (error)
2999 		return ERR_PTR(error);
3000 
3001 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3002 	if (error)
3003 		goto err;
3004 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3005 			+ (reqctx->op ? -authsize : authsize),
3006 			CHCR_DST_SG_SIZE, 0);
3007 	dnents += MIN_CCM_SG; // For IV and B0
3008 	dst_size = get_space_for_phys_dsgl(dnents);
3009 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3010 			       CHCR_SRC_SG_SIZE, 0);
3011 	snents += MIN_CCM_SG; //For B0
3012 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3013 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3014 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3015 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3016 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3017 				     reqctx->b0_len, 16) :
3018 		(sgl_len(snents) *  8);
3019 	transhdr_len += temp;
3020 	transhdr_len = roundup(transhdr_len, 16);
3021 
3022 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3023 				reqctx->b0_len, transhdr_len, reqctx->op)) {
3024 		atomic_inc(&adap->chcr_stats.fallback);
3025 		chcr_aead_common_exit(req);
3026 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3027 	}
3028 	skb = alloc_skb(transhdr_len,  flags);
3029 
3030 	if (!skb) {
3031 		error = -ENOMEM;
3032 		goto err;
3033 	}
3034 
3035 	chcr_req = __skb_put_zero(skb, transhdr_len);
3036 
3037 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3038 
3039 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3040 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3041 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3042 			aeadctx->key, aeadctx->enckey_len);
3043 
3044 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3045 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3046 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3047 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3048 	if (error)
3049 		goto dstmap_fail;
3050 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3051 	chcr_add_aead_src_ent(req, ulptx);
3052 
3053 	atomic_inc(&adap->chcr_stats.aead_rqst);
3054 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3055 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3056 		reqctx->b0_len) : 0);
3057 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3058 		    transhdr_len, temp, 0);
3059 	reqctx->skb = skb;
3060 
3061 	return skb;
3062 dstmap_fail:
3063 	kfree_skb(skb);
3064 err:
3065 	chcr_aead_common_exit(req);
3066 	return ERR_PTR(error);
3067 }
3068 
3069 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3070 				     unsigned short qid,
3071 				     int size)
3072 {
3073 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3074 	struct chcr_context *ctx = a_ctx(tfm);
3075 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3076 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3077 	struct sk_buff *skb = NULL;
3078 	struct chcr_wr *chcr_req;
3079 	struct cpl_rx_phys_dsgl *phys_cpl;
3080 	struct ulptx_sgl *ulptx;
3081 	unsigned int transhdr_len, dnents = 0, snents;
3082 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3083 	unsigned int authsize = crypto_aead_authsize(tfm);
3084 	int error = -EINVAL;
3085 	u8 *ivptr;
3086 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3087 		GFP_ATOMIC;
3088 	struct adapter *adap = padap(ctx->dev);
3089 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3090 
3091 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3092 		assoclen = req->assoclen - 8;
3093 
3094 	reqctx->b0_len = 0;
3095 	error = chcr_aead_common_init(req);
3096 	if (error)
3097 		return ERR_PTR(error);
3098 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3099 				(reqctx->op ? -authsize : authsize),
3100 				CHCR_DST_SG_SIZE, 0);
3101 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3102 			       CHCR_SRC_SG_SIZE, 0);
3103 	dnents += MIN_GCM_SG; // For IV
3104 	dst_size = get_space_for_phys_dsgl(dnents);
3105 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3106 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3107 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3108 			SGE_MAX_WR_LEN;
3109 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3110 		(sgl_len(snents) * 8);
3111 	transhdr_len += temp;
3112 	transhdr_len = roundup(transhdr_len, 16);
3113 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3114 			    transhdr_len, reqctx->op)) {
3115 
3116 		atomic_inc(&adap->chcr_stats.fallback);
3117 		chcr_aead_common_exit(req);
3118 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3119 	}
3120 	skb = alloc_skb(transhdr_len, flags);
3121 	if (!skb) {
3122 		error = -ENOMEM;
3123 		goto err;
3124 	}
3125 
3126 	chcr_req = __skb_put_zero(skb, transhdr_len);
3127 
3128 	//Offset of tag from end
3129 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3130 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3131 						rx_channel_id, 2, 1);
3132 	chcr_req->sec_cpl.pldlen =
3133 		htonl(req->assoclen + IV + req->cryptlen);
3134 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3135 					assoclen ? 1 + IV : 0,
3136 					assoclen ? IV + assoclen : 0,
3137 					req->assoclen + IV + 1, 0);
3138 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3139 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3140 						temp, temp);
3141 	chcr_req->sec_cpl.seqno_numivs =
3142 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3143 					CHCR_ENCRYPT_OP) ? 1 : 0,
3144 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3145 					CHCR_SCMD_AUTH_MODE_GHASH,
3146 					aeadctx->hmac_ctrl, IV >> 1);
3147 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3148 					0, 0, dst_size);
3149 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3150 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3151 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3152 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3153 
3154 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3155 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3156 	/* prepare a 16 byte iv */
3157 	/* S   A   L  T |  IV | 0x00000001 */
3158 	if (get_aead_subtype(tfm) ==
3159 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3160 		memcpy(ivptr, aeadctx->salt, 4);
3161 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3162 	} else {
3163 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3164 	}
3165 	*((unsigned int *)(ivptr + 12)) = htonl(0x01);
3166 
3167 	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3168 
3169 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3170 	chcr_add_aead_src_ent(req, ulptx);
3171 	atomic_inc(&adap->chcr_stats.aead_rqst);
3172 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3173 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3174 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3175 		    transhdr_len, temp, reqctx->verify);
3176 	reqctx->skb = skb;
3177 	return skb;
3178 
3179 err:
3180 	chcr_aead_common_exit(req);
3181 	return ERR_PTR(error);
3182 }
3183 
3184 
3185 
3186 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3187 {
3188 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3189 	struct aead_alg *alg = crypto_aead_alg(tfm);
3190 
3191 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3192 					       CRYPTO_ALG_NEED_FALLBACK |
3193 					       CRYPTO_ALG_ASYNC);
3194 	if  (IS_ERR(aeadctx->sw_cipher))
3195 		return PTR_ERR(aeadctx->sw_cipher);
3196 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3197 				 sizeof(struct aead_request) +
3198 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3199 	return chcr_device_init(a_ctx(tfm));
3200 }
3201 
3202 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3203 {
3204 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3205 
3206 	crypto_free_aead(aeadctx->sw_cipher);
3207 }
3208 
3209 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3210 					unsigned int authsize)
3211 {
3212 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3213 
3214 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3215 	aeadctx->mayverify = VERIFY_HW;
3216 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3217 }
3218 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3219 				    unsigned int authsize)
3220 {
3221 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3222 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3223 
3224 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3225 	 * true for sha1. authsize == 12 condition should be before
3226 	 * authsize == (maxauth >> 1)
3227 	 */
3228 	if (authsize == ICV_4) {
3229 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3230 		aeadctx->mayverify = VERIFY_HW;
3231 	} else if (authsize == ICV_6) {
3232 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3233 		aeadctx->mayverify = VERIFY_HW;
3234 	} else if (authsize == ICV_10) {
3235 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3236 		aeadctx->mayverify = VERIFY_HW;
3237 	} else if (authsize == ICV_12) {
3238 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3239 		aeadctx->mayverify = VERIFY_HW;
3240 	} else if (authsize == ICV_14) {
3241 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3242 		aeadctx->mayverify = VERIFY_HW;
3243 	} else if (authsize == (maxauth >> 1)) {
3244 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3245 		aeadctx->mayverify = VERIFY_HW;
3246 	} else if (authsize == maxauth) {
3247 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3248 		aeadctx->mayverify = VERIFY_HW;
3249 	} else {
3250 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3251 		aeadctx->mayverify = VERIFY_SW;
3252 	}
3253 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3254 }
3255 
3256 
3257 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3258 {
3259 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3260 
3261 	switch (authsize) {
3262 	case ICV_4:
3263 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3264 		aeadctx->mayverify = VERIFY_HW;
3265 		break;
3266 	case ICV_8:
3267 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3268 		aeadctx->mayverify = VERIFY_HW;
3269 		break;
3270 	case ICV_12:
3271 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3272 		aeadctx->mayverify = VERIFY_HW;
3273 		break;
3274 	case ICV_14:
3275 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3276 		aeadctx->mayverify = VERIFY_HW;
3277 		break;
3278 	case ICV_16:
3279 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3280 		aeadctx->mayverify = VERIFY_HW;
3281 		break;
3282 	case ICV_13:
3283 	case ICV_15:
3284 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3285 		aeadctx->mayverify = VERIFY_SW;
3286 		break;
3287 	default:
3288 		return -EINVAL;
3289 	}
3290 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3291 }
3292 
3293 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3294 					  unsigned int authsize)
3295 {
3296 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3297 
3298 	switch (authsize) {
3299 	case ICV_8:
3300 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3301 		aeadctx->mayverify = VERIFY_HW;
3302 		break;
3303 	case ICV_12:
3304 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3305 		aeadctx->mayverify = VERIFY_HW;
3306 		break;
3307 	case ICV_16:
3308 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3309 		aeadctx->mayverify = VERIFY_HW;
3310 		break;
3311 	default:
3312 		return -EINVAL;
3313 	}
3314 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3315 }
3316 
3317 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3318 				unsigned int authsize)
3319 {
3320 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3321 
3322 	switch (authsize) {
3323 	case ICV_4:
3324 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3325 		aeadctx->mayverify = VERIFY_HW;
3326 		break;
3327 	case ICV_6:
3328 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3329 		aeadctx->mayverify = VERIFY_HW;
3330 		break;
3331 	case ICV_8:
3332 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3333 		aeadctx->mayverify = VERIFY_HW;
3334 		break;
3335 	case ICV_10:
3336 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3337 		aeadctx->mayverify = VERIFY_HW;
3338 		break;
3339 	case ICV_12:
3340 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3341 		aeadctx->mayverify = VERIFY_HW;
3342 		break;
3343 	case ICV_14:
3344 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3345 		aeadctx->mayverify = VERIFY_HW;
3346 		break;
3347 	case ICV_16:
3348 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3349 		aeadctx->mayverify = VERIFY_HW;
3350 		break;
3351 	default:
3352 		return -EINVAL;
3353 	}
3354 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3355 }
3356 
3357 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3358 				const u8 *key,
3359 				unsigned int keylen)
3360 {
3361 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3362 	unsigned char ck_size, mk_size;
3363 	int key_ctx_size = 0;
3364 
3365 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3366 	if (keylen == AES_KEYSIZE_128) {
3367 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3368 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3369 	} else if (keylen == AES_KEYSIZE_192) {
3370 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3371 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3372 	} else if (keylen == AES_KEYSIZE_256) {
3373 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3374 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3375 	} else {
3376 		aeadctx->enckey_len = 0;
3377 		return	-EINVAL;
3378 	}
3379 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3380 						key_ctx_size >> 4);
3381 	memcpy(aeadctx->key, key, keylen);
3382 	aeadctx->enckey_len = keylen;
3383 
3384 	return 0;
3385 }
3386 
3387 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3388 				const u8 *key,
3389 				unsigned int keylen)
3390 {
3391 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3392 	int error;
3393 
3394 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3395 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3396 			      CRYPTO_TFM_REQ_MASK);
3397 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3398 	if (error)
3399 		return error;
3400 	return chcr_ccm_common_setkey(aead, key, keylen);
3401 }
3402 
3403 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3404 				    unsigned int keylen)
3405 {
3406 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3407 	int error;
3408 
3409 	if (keylen < 3) {
3410 		aeadctx->enckey_len = 0;
3411 		return	-EINVAL;
3412 	}
3413 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3414 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3415 			      CRYPTO_TFM_REQ_MASK);
3416 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3417 	if (error)
3418 		return error;
3419 	keylen -= 3;
3420 	memcpy(aeadctx->salt, key + keylen, 3);
3421 	return chcr_ccm_common_setkey(aead, key, keylen);
3422 }
3423 
3424 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3425 			   unsigned int keylen)
3426 {
3427 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3428 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3429 	unsigned int ck_size;
3430 	int ret = 0, key_ctx_size = 0;
3431 	struct crypto_aes_ctx aes;
3432 
3433 	aeadctx->enckey_len = 0;
3434 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3435 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3436 			      & CRYPTO_TFM_REQ_MASK);
3437 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3438 	if (ret)
3439 		goto out;
3440 
3441 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3442 	    keylen > 3) {
3443 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3444 		memcpy(aeadctx->salt, key + keylen, 4);
3445 	}
3446 	if (keylen == AES_KEYSIZE_128) {
3447 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3448 	} else if (keylen == AES_KEYSIZE_192) {
3449 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3450 	} else if (keylen == AES_KEYSIZE_256) {
3451 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3452 	} else {
3453 		pr_err("GCM: Invalid key length %d\n", keylen);
3454 		ret = -EINVAL;
3455 		goto out;
3456 	}
3457 
3458 	memcpy(aeadctx->key, key, keylen);
3459 	aeadctx->enckey_len = keylen;
3460 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3461 		AEAD_H_SIZE;
3462 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3463 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3464 						0, 0,
3465 						key_ctx_size >> 4);
3466 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3467 	 * It will go in key context
3468 	 */
3469 	ret = aes_expandkey(&aes, key, keylen);
3470 	if (ret) {
3471 		aeadctx->enckey_len = 0;
3472 		goto out;
3473 	}
3474 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3475 	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3476 	memzero_explicit(&aes, sizeof(aes));
3477 
3478 out:
3479 	return ret;
3480 }
3481 
3482 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3483 				   unsigned int keylen)
3484 {
3485 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3486 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3487 	/* it contains auth and cipher key both*/
3488 	struct crypto_authenc_keys keys;
3489 	unsigned int bs, subtype;
3490 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3491 	int err = 0, i, key_ctx_len = 0;
3492 	unsigned char ck_size = 0;
3493 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3494 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3495 	struct algo_param param;
3496 	int align;
3497 	u8 *o_ptr = NULL;
3498 
3499 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3500 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3501 			      & CRYPTO_TFM_REQ_MASK);
3502 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3503 	if (err)
3504 		goto out;
3505 
3506 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3507 		goto out;
3508 
3509 	if (get_alg_config(&param, max_authsize)) {
3510 		pr_err("chcr : Unsupported digest size\n");
3511 		goto out;
3512 	}
3513 	subtype = get_aead_subtype(authenc);
3514 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3515 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3516 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3517 			goto out;
3518 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3519 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3520 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3521 	}
3522 	if (keys.enckeylen == AES_KEYSIZE_128) {
3523 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3524 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3525 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3526 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3527 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3528 	} else {
3529 		pr_err("chcr : Unsupported cipher key\n");
3530 		goto out;
3531 	}
3532 
3533 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3534 	 * h(opad) so authkey is not needed again. authkeylen size have the
3535 	 * size of the hash digest size.
3536 	 */
3537 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3538 	aeadctx->enckey_len = keys.enckeylen;
3539 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3540 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3541 
3542 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3543 			    aeadctx->enckey_len << 3);
3544 	}
3545 	base_hash  = chcr_alloc_shash(max_authsize);
3546 	if (IS_ERR(base_hash)) {
3547 		pr_err("chcr : Base driver cannot be loaded\n");
3548 		aeadctx->enckey_len = 0;
3549 		memzero_explicit(&keys, sizeof(keys));
3550 		return -EINVAL;
3551 	}
3552 	{
3553 		SHASH_DESC_ON_STACK(shash, base_hash);
3554 
3555 		shash->tfm = base_hash;
3556 		bs = crypto_shash_blocksize(base_hash);
3557 		align = KEYCTX_ALIGN_PAD(max_authsize);
3558 		o_ptr =  actx->h_iopad + param.result_size + align;
3559 
3560 		if (keys.authkeylen > bs) {
3561 			err = crypto_shash_digest(shash, keys.authkey,
3562 						  keys.authkeylen,
3563 						  o_ptr);
3564 			if (err) {
3565 				pr_err("chcr : Base driver cannot be loaded\n");
3566 				goto out;
3567 			}
3568 			keys.authkeylen = max_authsize;
3569 		} else
3570 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3571 
3572 		/* Compute the ipad-digest*/
3573 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3574 		memcpy(pad, o_ptr, keys.authkeylen);
3575 		for (i = 0; i < bs >> 2; i++)
3576 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3577 
3578 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3579 					      max_authsize))
3580 			goto out;
3581 		/* Compute the opad-digest */
3582 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3583 		memcpy(pad, o_ptr, keys.authkeylen);
3584 		for (i = 0; i < bs >> 2; i++)
3585 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3586 
3587 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3588 			goto out;
3589 
3590 		/* convert the ipad and opad digest to network order */
3591 		chcr_change_order(actx->h_iopad, param.result_size);
3592 		chcr_change_order(o_ptr, param.result_size);
3593 		key_ctx_len = sizeof(struct _key_ctx) +
3594 			roundup(keys.enckeylen, 16) +
3595 			(param.result_size + align) * 2;
3596 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3597 						0, 1, key_ctx_len >> 4);
3598 		actx->auth_mode = param.auth_mode;
3599 		chcr_free_shash(base_hash);
3600 
3601 		memzero_explicit(&keys, sizeof(keys));
3602 		return 0;
3603 	}
3604 out:
3605 	aeadctx->enckey_len = 0;
3606 	memzero_explicit(&keys, sizeof(keys));
3607 	if (!IS_ERR(base_hash))
3608 		chcr_free_shash(base_hash);
3609 	return -EINVAL;
3610 }
3611 
3612 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3613 					const u8 *key, unsigned int keylen)
3614 {
3615 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3616 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3617 	struct crypto_authenc_keys keys;
3618 	int err;
3619 	/* it contains auth and cipher key both*/
3620 	unsigned int subtype;
3621 	int key_ctx_len = 0;
3622 	unsigned char ck_size = 0;
3623 
3624 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3625 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3626 			      & CRYPTO_TFM_REQ_MASK);
3627 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3628 	if (err)
3629 		goto out;
3630 
3631 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3632 		goto out;
3633 
3634 	subtype = get_aead_subtype(authenc);
3635 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3636 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3637 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3638 			goto out;
3639 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3640 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3641 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3642 	}
3643 	if (keys.enckeylen == AES_KEYSIZE_128) {
3644 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3645 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3646 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3647 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3648 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3649 	} else {
3650 		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3651 		goto out;
3652 	}
3653 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3654 	aeadctx->enckey_len = keys.enckeylen;
3655 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3656 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3657 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3658 				aeadctx->enckey_len << 3);
3659 	}
3660 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3661 
3662 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3663 						0, key_ctx_len >> 4);
3664 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3665 	memzero_explicit(&keys, sizeof(keys));
3666 	return 0;
3667 out:
3668 	aeadctx->enckey_len = 0;
3669 	memzero_explicit(&keys, sizeof(keys));
3670 	return -EINVAL;
3671 }
3672 
3673 static int chcr_aead_op(struct aead_request *req,
3674 			int size,
3675 			create_wr_t create_wr_fn)
3676 {
3677 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3678 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3679 	struct chcr_context *ctx = a_ctx(tfm);
3680 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3681 	struct sk_buff *skb;
3682 	struct chcr_dev *cdev;
3683 
3684 	cdev = a_ctx(tfm)->dev;
3685 	if (!cdev) {
3686 		pr_err("chcr : %s : No crypto device.\n", __func__);
3687 		return -ENXIO;
3688 	}
3689 
3690 	if (chcr_inc_wrcount(cdev)) {
3691 	/* Detach state for CHCR means lldi or padap is freed.
3692 	 * We cannot increment fallback here.
3693 	 */
3694 		return chcr_aead_fallback(req, reqctx->op);
3695 	}
3696 
3697 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3698 					reqctx->txqidx) &&
3699 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3700 			chcr_dec_wrcount(cdev);
3701 			return -ENOSPC;
3702 	}
3703 
3704 	/* Form a WR from req */
3705 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3706 
3707 	if (IS_ERR_OR_NULL(skb)) {
3708 		chcr_dec_wrcount(cdev);
3709 		return PTR_ERR_OR_ZERO(skb);
3710 	}
3711 
3712 	skb->dev = u_ctx->lldi.ports[0];
3713 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3714 	chcr_send_wr(skb);
3715 	return -EINPROGRESS;
3716 }
3717 
3718 static int chcr_aead_encrypt(struct aead_request *req)
3719 {
3720 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3721 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3722 	struct chcr_context *ctx = a_ctx(tfm);
3723 	unsigned int cpu;
3724 
3725 	cpu = get_cpu();
3726 	reqctx->txqidx = cpu % ctx->ntxq;
3727 	reqctx->rxqidx = cpu % ctx->nrxq;
3728 	put_cpu();
3729 
3730 	reqctx->verify = VERIFY_HW;
3731 	reqctx->op = CHCR_ENCRYPT_OP;
3732 
3733 	switch (get_aead_subtype(tfm)) {
3734 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3735 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3736 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3737 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3738 		return chcr_aead_op(req, 0, create_authenc_wr);
3739 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3740 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3741 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3742 	default:
3743 		return chcr_aead_op(req, 0, create_gcm_wr);
3744 	}
3745 }
3746 
3747 static int chcr_aead_decrypt(struct aead_request *req)
3748 {
3749 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3750 	struct chcr_context *ctx = a_ctx(tfm);
3751 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3752 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3753 	int size;
3754 	unsigned int cpu;
3755 
3756 	cpu = get_cpu();
3757 	reqctx->txqidx = cpu % ctx->ntxq;
3758 	reqctx->rxqidx = cpu % ctx->nrxq;
3759 	put_cpu();
3760 
3761 	if (aeadctx->mayverify == VERIFY_SW) {
3762 		size = crypto_aead_maxauthsize(tfm);
3763 		reqctx->verify = VERIFY_SW;
3764 	} else {
3765 		size = 0;
3766 		reqctx->verify = VERIFY_HW;
3767 	}
3768 	reqctx->op = CHCR_DECRYPT_OP;
3769 	switch (get_aead_subtype(tfm)) {
3770 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3771 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3772 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3773 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3774 		return chcr_aead_op(req, size, create_authenc_wr);
3775 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3776 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3777 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3778 	default:
3779 		return chcr_aead_op(req, size, create_gcm_wr);
3780 	}
3781 }
3782 
3783 static struct chcr_alg_template driver_algs[] = {
3784 	/* AES-CBC */
3785 	{
3786 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3787 		.is_registered = 0,
3788 		.alg.skcipher = {
3789 			.base.cra_name		= "cbc(aes)",
3790 			.base.cra_driver_name	= "cbc-aes-chcr",
3791 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3792 
3793 			.init			= chcr_init_tfm,
3794 			.exit			= chcr_exit_tfm,
3795 			.min_keysize		= AES_MIN_KEY_SIZE,
3796 			.max_keysize		= AES_MAX_KEY_SIZE,
3797 			.ivsize			= AES_BLOCK_SIZE,
3798 			.setkey			= chcr_aes_cbc_setkey,
3799 			.encrypt		= chcr_aes_encrypt,
3800 			.decrypt		= chcr_aes_decrypt,
3801 			}
3802 	},
3803 	{
3804 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3805 		.is_registered = 0,
3806 		.alg.skcipher = {
3807 			.base.cra_name		= "xts(aes)",
3808 			.base.cra_driver_name	= "xts-aes-chcr",
3809 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3810 
3811 			.init			= chcr_init_tfm,
3812 			.exit			= chcr_exit_tfm,
3813 			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3814 			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3815 			.ivsize			= AES_BLOCK_SIZE,
3816 			.setkey			= chcr_aes_xts_setkey,
3817 			.encrypt		= chcr_aes_encrypt,
3818 			.decrypt		= chcr_aes_decrypt,
3819 			}
3820 	},
3821 	{
3822 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3823 		.is_registered = 0,
3824 		.alg.skcipher = {
3825 			.base.cra_name		= "ctr(aes)",
3826 			.base.cra_driver_name	= "ctr-aes-chcr",
3827 			.base.cra_blocksize	= 1,
3828 
3829 			.init			= chcr_init_tfm,
3830 			.exit			= chcr_exit_tfm,
3831 			.min_keysize		= AES_MIN_KEY_SIZE,
3832 			.max_keysize		= AES_MAX_KEY_SIZE,
3833 			.ivsize			= AES_BLOCK_SIZE,
3834 			.setkey			= chcr_aes_ctr_setkey,
3835 			.encrypt		= chcr_aes_encrypt,
3836 			.decrypt		= chcr_aes_decrypt,
3837 		}
3838 	},
3839 	{
3840 		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3841 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3842 		.is_registered = 0,
3843 		.alg.skcipher = {
3844 			.base.cra_name		= "rfc3686(ctr(aes))",
3845 			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3846 			.base.cra_blocksize	= 1,
3847 
3848 			.init			= chcr_rfc3686_init,
3849 			.exit			= chcr_exit_tfm,
3850 			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3851 			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3852 			.ivsize			= CTR_RFC3686_IV_SIZE,
3853 			.setkey			= chcr_aes_rfc3686_setkey,
3854 			.encrypt		= chcr_aes_encrypt,
3855 			.decrypt		= chcr_aes_decrypt,
3856 		}
3857 	},
3858 	/* SHA */
3859 	{
3860 		.type = CRYPTO_ALG_TYPE_AHASH,
3861 		.is_registered = 0,
3862 		.alg.hash = {
3863 			.halg.digestsize = SHA1_DIGEST_SIZE,
3864 			.halg.base = {
3865 				.cra_name = "sha1",
3866 				.cra_driver_name = "sha1-chcr",
3867 				.cra_blocksize = SHA1_BLOCK_SIZE,
3868 			}
3869 		}
3870 	},
3871 	{
3872 		.type = CRYPTO_ALG_TYPE_AHASH,
3873 		.is_registered = 0,
3874 		.alg.hash = {
3875 			.halg.digestsize = SHA256_DIGEST_SIZE,
3876 			.halg.base = {
3877 				.cra_name = "sha256",
3878 				.cra_driver_name = "sha256-chcr",
3879 				.cra_blocksize = SHA256_BLOCK_SIZE,
3880 			}
3881 		}
3882 	},
3883 	{
3884 		.type = CRYPTO_ALG_TYPE_AHASH,
3885 		.is_registered = 0,
3886 		.alg.hash = {
3887 			.halg.digestsize = SHA224_DIGEST_SIZE,
3888 			.halg.base = {
3889 				.cra_name = "sha224",
3890 				.cra_driver_name = "sha224-chcr",
3891 				.cra_blocksize = SHA224_BLOCK_SIZE,
3892 			}
3893 		}
3894 	},
3895 	{
3896 		.type = CRYPTO_ALG_TYPE_AHASH,
3897 		.is_registered = 0,
3898 		.alg.hash = {
3899 			.halg.digestsize = SHA384_DIGEST_SIZE,
3900 			.halg.base = {
3901 				.cra_name = "sha384",
3902 				.cra_driver_name = "sha384-chcr",
3903 				.cra_blocksize = SHA384_BLOCK_SIZE,
3904 			}
3905 		}
3906 	},
3907 	{
3908 		.type = CRYPTO_ALG_TYPE_AHASH,
3909 		.is_registered = 0,
3910 		.alg.hash = {
3911 			.halg.digestsize = SHA512_DIGEST_SIZE,
3912 			.halg.base = {
3913 				.cra_name = "sha512",
3914 				.cra_driver_name = "sha512-chcr",
3915 				.cra_blocksize = SHA512_BLOCK_SIZE,
3916 			}
3917 		}
3918 	},
3919 	/* HMAC */
3920 	{
3921 		.type = CRYPTO_ALG_TYPE_HMAC,
3922 		.is_registered = 0,
3923 		.alg.hash = {
3924 			.halg.digestsize = SHA1_DIGEST_SIZE,
3925 			.halg.base = {
3926 				.cra_name = "hmac(sha1)",
3927 				.cra_driver_name = "hmac-sha1-chcr",
3928 				.cra_blocksize = SHA1_BLOCK_SIZE,
3929 			}
3930 		}
3931 	},
3932 	{
3933 		.type = CRYPTO_ALG_TYPE_HMAC,
3934 		.is_registered = 0,
3935 		.alg.hash = {
3936 			.halg.digestsize = SHA224_DIGEST_SIZE,
3937 			.halg.base = {
3938 				.cra_name = "hmac(sha224)",
3939 				.cra_driver_name = "hmac-sha224-chcr",
3940 				.cra_blocksize = SHA224_BLOCK_SIZE,
3941 			}
3942 		}
3943 	},
3944 	{
3945 		.type = CRYPTO_ALG_TYPE_HMAC,
3946 		.is_registered = 0,
3947 		.alg.hash = {
3948 			.halg.digestsize = SHA256_DIGEST_SIZE,
3949 			.halg.base = {
3950 				.cra_name = "hmac(sha256)",
3951 				.cra_driver_name = "hmac-sha256-chcr",
3952 				.cra_blocksize = SHA256_BLOCK_SIZE,
3953 			}
3954 		}
3955 	},
3956 	{
3957 		.type = CRYPTO_ALG_TYPE_HMAC,
3958 		.is_registered = 0,
3959 		.alg.hash = {
3960 			.halg.digestsize = SHA384_DIGEST_SIZE,
3961 			.halg.base = {
3962 				.cra_name = "hmac(sha384)",
3963 				.cra_driver_name = "hmac-sha384-chcr",
3964 				.cra_blocksize = SHA384_BLOCK_SIZE,
3965 			}
3966 		}
3967 	},
3968 	{
3969 		.type = CRYPTO_ALG_TYPE_HMAC,
3970 		.is_registered = 0,
3971 		.alg.hash = {
3972 			.halg.digestsize = SHA512_DIGEST_SIZE,
3973 			.halg.base = {
3974 				.cra_name = "hmac(sha512)",
3975 				.cra_driver_name = "hmac-sha512-chcr",
3976 				.cra_blocksize = SHA512_BLOCK_SIZE,
3977 			}
3978 		}
3979 	},
3980 	/* Add AEAD Algorithms */
3981 	{
3982 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3983 		.is_registered = 0,
3984 		.alg.aead = {
3985 			.base = {
3986 				.cra_name = "gcm(aes)",
3987 				.cra_driver_name = "gcm-aes-chcr",
3988 				.cra_blocksize	= 1,
3989 				.cra_priority = CHCR_AEAD_PRIORITY,
3990 				.cra_ctxsize =	sizeof(struct chcr_context) +
3991 						sizeof(struct chcr_aead_ctx) +
3992 						sizeof(struct chcr_gcm_ctx),
3993 			},
3994 			.ivsize = GCM_AES_IV_SIZE,
3995 			.maxauthsize = GHASH_DIGEST_SIZE,
3996 			.setkey = chcr_gcm_setkey,
3997 			.setauthsize = chcr_gcm_setauthsize,
3998 		}
3999 	},
4000 	{
4001 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4002 		.is_registered = 0,
4003 		.alg.aead = {
4004 			.base = {
4005 				.cra_name = "rfc4106(gcm(aes))",
4006 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4007 				.cra_blocksize	 = 1,
4008 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4009 				.cra_ctxsize =	sizeof(struct chcr_context) +
4010 						sizeof(struct chcr_aead_ctx) +
4011 						sizeof(struct chcr_gcm_ctx),
4012 
4013 			},
4014 			.ivsize = GCM_RFC4106_IV_SIZE,
4015 			.maxauthsize	= GHASH_DIGEST_SIZE,
4016 			.setkey = chcr_gcm_setkey,
4017 			.setauthsize	= chcr_4106_4309_setauthsize,
4018 		}
4019 	},
4020 	{
4021 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4022 		.is_registered = 0,
4023 		.alg.aead = {
4024 			.base = {
4025 				.cra_name = "ccm(aes)",
4026 				.cra_driver_name = "ccm-aes-chcr",
4027 				.cra_blocksize	 = 1,
4028 				.cra_priority = CHCR_AEAD_PRIORITY,
4029 				.cra_ctxsize =	sizeof(struct chcr_context) +
4030 						sizeof(struct chcr_aead_ctx),
4031 
4032 			},
4033 			.ivsize = AES_BLOCK_SIZE,
4034 			.maxauthsize	= GHASH_DIGEST_SIZE,
4035 			.setkey = chcr_aead_ccm_setkey,
4036 			.setauthsize	= chcr_ccm_setauthsize,
4037 		}
4038 	},
4039 	{
4040 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4041 		.is_registered = 0,
4042 		.alg.aead = {
4043 			.base = {
4044 				.cra_name = "rfc4309(ccm(aes))",
4045 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4046 				.cra_blocksize	 = 1,
4047 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4048 				.cra_ctxsize =	sizeof(struct chcr_context) +
4049 						sizeof(struct chcr_aead_ctx),
4050 
4051 			},
4052 			.ivsize = 8,
4053 			.maxauthsize	= GHASH_DIGEST_SIZE,
4054 			.setkey = chcr_aead_rfc4309_setkey,
4055 			.setauthsize = chcr_4106_4309_setauthsize,
4056 		}
4057 	},
4058 	{
4059 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4060 		.is_registered = 0,
4061 		.alg.aead = {
4062 			.base = {
4063 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4064 				.cra_driver_name =
4065 					"authenc-hmac-sha1-cbc-aes-chcr",
4066 				.cra_blocksize	 = AES_BLOCK_SIZE,
4067 				.cra_priority = CHCR_AEAD_PRIORITY,
4068 				.cra_ctxsize =	sizeof(struct chcr_context) +
4069 						sizeof(struct chcr_aead_ctx) +
4070 						sizeof(struct chcr_authenc_ctx),
4071 
4072 			},
4073 			.ivsize = AES_BLOCK_SIZE,
4074 			.maxauthsize = SHA1_DIGEST_SIZE,
4075 			.setkey = chcr_authenc_setkey,
4076 			.setauthsize = chcr_authenc_setauthsize,
4077 		}
4078 	},
4079 	{
4080 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4081 		.is_registered = 0,
4082 		.alg.aead = {
4083 			.base = {
4084 
4085 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4086 				.cra_driver_name =
4087 					"authenc-hmac-sha256-cbc-aes-chcr",
4088 				.cra_blocksize	 = AES_BLOCK_SIZE,
4089 				.cra_priority = CHCR_AEAD_PRIORITY,
4090 				.cra_ctxsize =	sizeof(struct chcr_context) +
4091 						sizeof(struct chcr_aead_ctx) +
4092 						sizeof(struct chcr_authenc_ctx),
4093 
4094 			},
4095 			.ivsize = AES_BLOCK_SIZE,
4096 			.maxauthsize	= SHA256_DIGEST_SIZE,
4097 			.setkey = chcr_authenc_setkey,
4098 			.setauthsize = chcr_authenc_setauthsize,
4099 		}
4100 	},
4101 	{
4102 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4103 		.is_registered = 0,
4104 		.alg.aead = {
4105 			.base = {
4106 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4107 				.cra_driver_name =
4108 					"authenc-hmac-sha224-cbc-aes-chcr",
4109 				.cra_blocksize	 = AES_BLOCK_SIZE,
4110 				.cra_priority = CHCR_AEAD_PRIORITY,
4111 				.cra_ctxsize =	sizeof(struct chcr_context) +
4112 						sizeof(struct chcr_aead_ctx) +
4113 						sizeof(struct chcr_authenc_ctx),
4114 			},
4115 			.ivsize = AES_BLOCK_SIZE,
4116 			.maxauthsize = SHA224_DIGEST_SIZE,
4117 			.setkey = chcr_authenc_setkey,
4118 			.setauthsize = chcr_authenc_setauthsize,
4119 		}
4120 	},
4121 	{
4122 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4123 		.is_registered = 0,
4124 		.alg.aead = {
4125 			.base = {
4126 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4127 				.cra_driver_name =
4128 					"authenc-hmac-sha384-cbc-aes-chcr",
4129 				.cra_blocksize	 = AES_BLOCK_SIZE,
4130 				.cra_priority = CHCR_AEAD_PRIORITY,
4131 				.cra_ctxsize =	sizeof(struct chcr_context) +
4132 						sizeof(struct chcr_aead_ctx) +
4133 						sizeof(struct chcr_authenc_ctx),
4134 
4135 			},
4136 			.ivsize = AES_BLOCK_SIZE,
4137 			.maxauthsize = SHA384_DIGEST_SIZE,
4138 			.setkey = chcr_authenc_setkey,
4139 			.setauthsize = chcr_authenc_setauthsize,
4140 		}
4141 	},
4142 	{
4143 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4144 		.is_registered = 0,
4145 		.alg.aead = {
4146 			.base = {
4147 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4148 				.cra_driver_name =
4149 					"authenc-hmac-sha512-cbc-aes-chcr",
4150 				.cra_blocksize	 = AES_BLOCK_SIZE,
4151 				.cra_priority = CHCR_AEAD_PRIORITY,
4152 				.cra_ctxsize =	sizeof(struct chcr_context) +
4153 						sizeof(struct chcr_aead_ctx) +
4154 						sizeof(struct chcr_authenc_ctx),
4155 
4156 			},
4157 			.ivsize = AES_BLOCK_SIZE,
4158 			.maxauthsize = SHA512_DIGEST_SIZE,
4159 			.setkey = chcr_authenc_setkey,
4160 			.setauthsize = chcr_authenc_setauthsize,
4161 		}
4162 	},
4163 	{
4164 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4165 		.is_registered = 0,
4166 		.alg.aead = {
4167 			.base = {
4168 				.cra_name = "authenc(digest_null,cbc(aes))",
4169 				.cra_driver_name =
4170 					"authenc-digest_null-cbc-aes-chcr",
4171 				.cra_blocksize	 = AES_BLOCK_SIZE,
4172 				.cra_priority = CHCR_AEAD_PRIORITY,
4173 				.cra_ctxsize =	sizeof(struct chcr_context) +
4174 						sizeof(struct chcr_aead_ctx) +
4175 						sizeof(struct chcr_authenc_ctx),
4176 
4177 			},
4178 			.ivsize  = AES_BLOCK_SIZE,
4179 			.maxauthsize = 0,
4180 			.setkey  = chcr_aead_digest_null_setkey,
4181 			.setauthsize = chcr_authenc_null_setauthsize,
4182 		}
4183 	},
4184 	{
4185 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4186 		.is_registered = 0,
4187 		.alg.aead = {
4188 			.base = {
4189 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4190 				.cra_driver_name =
4191 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4192 				.cra_blocksize	 = 1,
4193 				.cra_priority = CHCR_AEAD_PRIORITY,
4194 				.cra_ctxsize =	sizeof(struct chcr_context) +
4195 						sizeof(struct chcr_aead_ctx) +
4196 						sizeof(struct chcr_authenc_ctx),
4197 
4198 			},
4199 			.ivsize = CTR_RFC3686_IV_SIZE,
4200 			.maxauthsize = SHA1_DIGEST_SIZE,
4201 			.setkey = chcr_authenc_setkey,
4202 			.setauthsize = chcr_authenc_setauthsize,
4203 		}
4204 	},
4205 	{
4206 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4207 		.is_registered = 0,
4208 		.alg.aead = {
4209 			.base = {
4210 
4211 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4212 				.cra_driver_name =
4213 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4214 				.cra_blocksize	 = 1,
4215 				.cra_priority = CHCR_AEAD_PRIORITY,
4216 				.cra_ctxsize =	sizeof(struct chcr_context) +
4217 						sizeof(struct chcr_aead_ctx) +
4218 						sizeof(struct chcr_authenc_ctx),
4219 
4220 			},
4221 			.ivsize = CTR_RFC3686_IV_SIZE,
4222 			.maxauthsize	= SHA256_DIGEST_SIZE,
4223 			.setkey = chcr_authenc_setkey,
4224 			.setauthsize = chcr_authenc_setauthsize,
4225 		}
4226 	},
4227 	{
4228 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4229 		.is_registered = 0,
4230 		.alg.aead = {
4231 			.base = {
4232 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4233 				.cra_driver_name =
4234 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4235 				.cra_blocksize	 = 1,
4236 				.cra_priority = CHCR_AEAD_PRIORITY,
4237 				.cra_ctxsize =	sizeof(struct chcr_context) +
4238 						sizeof(struct chcr_aead_ctx) +
4239 						sizeof(struct chcr_authenc_ctx),
4240 			},
4241 			.ivsize = CTR_RFC3686_IV_SIZE,
4242 			.maxauthsize = SHA224_DIGEST_SIZE,
4243 			.setkey = chcr_authenc_setkey,
4244 			.setauthsize = chcr_authenc_setauthsize,
4245 		}
4246 	},
4247 	{
4248 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4249 		.is_registered = 0,
4250 		.alg.aead = {
4251 			.base = {
4252 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4253 				.cra_driver_name =
4254 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4255 				.cra_blocksize	 = 1,
4256 				.cra_priority = CHCR_AEAD_PRIORITY,
4257 				.cra_ctxsize =	sizeof(struct chcr_context) +
4258 						sizeof(struct chcr_aead_ctx) +
4259 						sizeof(struct chcr_authenc_ctx),
4260 
4261 			},
4262 			.ivsize = CTR_RFC3686_IV_SIZE,
4263 			.maxauthsize = SHA384_DIGEST_SIZE,
4264 			.setkey = chcr_authenc_setkey,
4265 			.setauthsize = chcr_authenc_setauthsize,
4266 		}
4267 	},
4268 	{
4269 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4270 		.is_registered = 0,
4271 		.alg.aead = {
4272 			.base = {
4273 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4274 				.cra_driver_name =
4275 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4276 				.cra_blocksize	 = 1,
4277 				.cra_priority = CHCR_AEAD_PRIORITY,
4278 				.cra_ctxsize =	sizeof(struct chcr_context) +
4279 						sizeof(struct chcr_aead_ctx) +
4280 						sizeof(struct chcr_authenc_ctx),
4281 
4282 			},
4283 			.ivsize = CTR_RFC3686_IV_SIZE,
4284 			.maxauthsize = SHA512_DIGEST_SIZE,
4285 			.setkey = chcr_authenc_setkey,
4286 			.setauthsize = chcr_authenc_setauthsize,
4287 		}
4288 	},
4289 	{
4290 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4291 		.is_registered = 0,
4292 		.alg.aead = {
4293 			.base = {
4294 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4295 				.cra_driver_name =
4296 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4297 				.cra_blocksize	 = 1,
4298 				.cra_priority = CHCR_AEAD_PRIORITY,
4299 				.cra_ctxsize =	sizeof(struct chcr_context) +
4300 						sizeof(struct chcr_aead_ctx) +
4301 						sizeof(struct chcr_authenc_ctx),
4302 
4303 			},
4304 			.ivsize  = CTR_RFC3686_IV_SIZE,
4305 			.maxauthsize = 0,
4306 			.setkey  = chcr_aead_digest_null_setkey,
4307 			.setauthsize = chcr_authenc_null_setauthsize,
4308 		}
4309 	},
4310 };
4311 
4312 /*
4313  *	chcr_unregister_alg - Deregister crypto algorithms with
4314  *	kernel framework.
4315  */
4316 static int chcr_unregister_alg(void)
4317 {
4318 	int i;
4319 
4320 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4321 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4322 		case CRYPTO_ALG_TYPE_SKCIPHER:
4323 			if (driver_algs[i].is_registered)
4324 				crypto_unregister_skcipher(
4325 						&driver_algs[i].alg.skcipher);
4326 			break;
4327 		case CRYPTO_ALG_TYPE_AEAD:
4328 			if (driver_algs[i].is_registered)
4329 				crypto_unregister_aead(
4330 						&driver_algs[i].alg.aead);
4331 			break;
4332 		case CRYPTO_ALG_TYPE_AHASH:
4333 			if (driver_algs[i].is_registered)
4334 				crypto_unregister_ahash(
4335 						&driver_algs[i].alg.hash);
4336 			break;
4337 		}
4338 		driver_algs[i].is_registered = 0;
4339 	}
4340 	return 0;
4341 }
4342 
4343 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4344 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4345 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4346 
4347 /*
4348  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4349  */
4350 static int chcr_register_alg(void)
4351 {
4352 	struct crypto_alg ai;
4353 	struct ahash_alg *a_hash;
4354 	int err = 0, i;
4355 	char *name = NULL;
4356 
4357 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4358 		if (driver_algs[i].is_registered)
4359 			continue;
4360 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4361 		case CRYPTO_ALG_TYPE_SKCIPHER:
4362 			driver_algs[i].alg.skcipher.base.cra_priority =
4363 				CHCR_CRA_PRIORITY;
4364 			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4365 			driver_algs[i].alg.skcipher.base.cra_flags =
4366 				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4367 				CRYPTO_ALG_NEED_FALLBACK;
4368 			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4369 				sizeof(struct chcr_context) +
4370 				sizeof(struct ablk_ctx);
4371 			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4372 
4373 			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4374 			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4375 			break;
4376 		case CRYPTO_ALG_TYPE_AEAD:
4377 			driver_algs[i].alg.aead.base.cra_flags =
4378 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4379 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4380 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4381 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4382 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4383 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4384 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4385 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4386 			break;
4387 		case CRYPTO_ALG_TYPE_AHASH:
4388 			a_hash = &driver_algs[i].alg.hash;
4389 			a_hash->update = chcr_ahash_update;
4390 			a_hash->final = chcr_ahash_final;
4391 			a_hash->finup = chcr_ahash_finup;
4392 			a_hash->digest = chcr_ahash_digest;
4393 			a_hash->export = chcr_ahash_export;
4394 			a_hash->import = chcr_ahash_import;
4395 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4396 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4397 			a_hash->halg.base.cra_module = THIS_MODULE;
4398 			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4399 			a_hash->halg.base.cra_alignmask = 0;
4400 			a_hash->halg.base.cra_exit = NULL;
4401 
4402 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4403 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4404 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4405 				a_hash->init = chcr_hmac_init;
4406 				a_hash->setkey = chcr_ahash_setkey;
4407 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4408 			} else {
4409 				a_hash->init = chcr_sha_init;
4410 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4411 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4412 			}
4413 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4414 			ai = driver_algs[i].alg.hash.halg.base;
4415 			name = ai.cra_driver_name;
4416 			break;
4417 		}
4418 		if (err) {
4419 			pr_err("chcr : %s : Algorithm registration failed\n",
4420 			       name);
4421 			goto register_err;
4422 		} else {
4423 			driver_algs[i].is_registered = 1;
4424 		}
4425 	}
4426 	return 0;
4427 
4428 register_err:
4429 	chcr_unregister_alg();
4430 	return err;
4431 }
4432 
4433 /*
4434  *	start_crypto - Register the crypto algorithms.
4435  *	This should called once when the first device comesup. After this
4436  *	kernel will start calling driver APIs for crypto operations.
4437  */
4438 int start_crypto(void)
4439 {
4440 	return chcr_register_alg();
4441 }
4442 
4443 /*
4444  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4445  *	This should be called once when the last device goes down. After this
4446  *	kernel will not call the driver API for crypto operations.
4447  */
4448 int stop_crypto(void)
4449 {
4450 	chcr_unregister_alg();
4451 	return 0;
4452 }
4453