1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67 
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73 
74 #define IV AES_BLOCK_SIZE
75 
76 static unsigned int sgl_ent_len[] = {
77 	0, 0, 16, 24, 40, 48, 64, 72, 88,
78 	96, 112, 120, 136, 144, 160, 168, 184,
79 	192, 208, 216, 232, 240, 256, 264, 280,
80 	288, 304, 312, 328, 336, 352, 360, 376
81 };
82 
83 static unsigned int dsgl_ent_len[] = {
84 	0, 32, 32, 48, 48, 64, 64, 80, 80,
85 	112, 112, 128, 128, 144, 144, 160, 160,
86 	192, 192, 208, 208, 224, 224, 240, 240,
87 	272, 272, 288, 288, 304, 304, 320, 320
88 };
89 
90 static u32 round_constant[11] = {
91 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 	0x1B000000, 0x36000000, 0x6C000000
94 };
95 
96 static int chcr_handle_cipher_resp(struct skcipher_request *req,
97 				   unsigned char *input, int err);
98 
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 	return ctx->crypto_ctx->aeadctx;
102 }
103 
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 	return ctx->crypto_ctx->ablkctx;
107 }
108 
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 	return ctx->crypto_ctx->hmacctx;
112 }
113 
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 	return gctx->ctx->gcm;
117 }
118 
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 	return gctx->ctx->authenc;
122 }
123 
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 	return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128 
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131 	return (skb->len <= SGE_MAX_WR_LEN);
132 }
133 
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138 
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 			 unsigned int entlen,
141 			 unsigned int skip)
142 {
143 	int nents = 0;
144 	unsigned int less;
145 	unsigned int skip_len = 0;
146 
147 	while (sg && skip) {
148 		if (sg_dma_len(sg) <= skip) {
149 			skip -= sg_dma_len(sg);
150 			skip_len = 0;
151 			sg = sg_next(sg);
152 		} else {
153 			skip_len = skip;
154 			skip = 0;
155 		}
156 	}
157 
158 	while (sg && reqlen) {
159 		less = min(reqlen, sg_dma_len(sg) - skip_len);
160 		nents += DIV_ROUND_UP(less, entlen);
161 		reqlen -= less;
162 		skip_len = 0;
163 		sg = sg_next(sg);
164 	}
165 	return nents;
166 }
167 
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170 	struct aead_alg *alg = crypto_aead_alg(aead);
171 	struct chcr_alg_template *chcr_crypto_alg =
172 		container_of(alg, struct chcr_alg_template, alg.aead);
173 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175 
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178 	u8 temp[SHA512_DIGEST_SIZE];
179 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 	int authsize = crypto_aead_authsize(tfm);
181 	struct cpl_fw6_pld *fw6_pld;
182 	int cmp = 0;
183 
184 	fw6_pld = (struct cpl_fw6_pld *)input;
185 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 	} else {
189 
190 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 				authsize, req->assoclen +
192 				req->cryptlen - authsize);
193 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 	}
195 	if (cmp)
196 		*err = -EBADMSG;
197 	else
198 		*err = 0;
199 }
200 
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203 	if (dev->state == CHCR_DETACH)
204 		return 1;
205 	atomic_inc(&dev->inflight);
206 	return 0;
207 }
208 
209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
210 {
211 	atomic_dec(&dev->inflight);
212 }
213 
214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215 					 unsigned char *input,
216 					 int err)
217 {
218 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 	struct chcr_dev *dev = a_ctx(tfm)->dev;
221 
222 	chcr_aead_common_exit(req);
223 	if (reqctx->verify == VERIFY_SW) {
224 		chcr_verify_tag(req, input, &err);
225 		reqctx->verify = VERIFY_HW;
226 	}
227 	chcr_dec_wrcount(dev);
228 	req->base.complete(&req->base, err);
229 
230 	return err;
231 }
232 
233 static void get_aes_decrypt_key(unsigned char *dec_key,
234 				       const unsigned char *key,
235 				       unsigned int keylength)
236 {
237 	u32 temp;
238 	u32 w_ring[MAX_NK];
239 	int i, j, k;
240 	u8  nr, nk;
241 
242 	switch (keylength) {
243 	case AES_KEYLENGTH_128BIT:
244 		nk = KEYLENGTH_4BYTES;
245 		nr = NUMBER_OF_ROUNDS_10;
246 		break;
247 	case AES_KEYLENGTH_192BIT:
248 		nk = KEYLENGTH_6BYTES;
249 		nr = NUMBER_OF_ROUNDS_12;
250 		break;
251 	case AES_KEYLENGTH_256BIT:
252 		nk = KEYLENGTH_8BYTES;
253 		nr = NUMBER_OF_ROUNDS_14;
254 		break;
255 	default:
256 		return;
257 	}
258 	for (i = 0; i < nk; i++)
259 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
260 
261 	i = 0;
262 	temp = w_ring[nk - 1];
263 	while (i + nk < (nr + 1) * 4) {
264 		if (!(i % nk)) {
265 			/* RotWord(temp) */
266 			temp = (temp << 8) | (temp >> 24);
267 			temp = aes_ks_subword(temp);
268 			temp ^= round_constant[i / nk];
269 		} else if (nk == 8 && (i % 4 == 0)) {
270 			temp = aes_ks_subword(temp);
271 		}
272 		w_ring[i % nk] ^= temp;
273 		temp = w_ring[i % nk];
274 		i++;
275 	}
276 	i--;
277 	for (k = 0, j = i % nk; k < nk; k++) {
278 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
279 		j--;
280 		if (j < 0)
281 			j += nk;
282 	}
283 }
284 
285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
286 {
287 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
288 
289 	switch (ds) {
290 	case SHA1_DIGEST_SIZE:
291 		base_hash = crypto_alloc_shash("sha1", 0, 0);
292 		break;
293 	case SHA224_DIGEST_SIZE:
294 		base_hash = crypto_alloc_shash("sha224", 0, 0);
295 		break;
296 	case SHA256_DIGEST_SIZE:
297 		base_hash = crypto_alloc_shash("sha256", 0, 0);
298 		break;
299 	case SHA384_DIGEST_SIZE:
300 		base_hash = crypto_alloc_shash("sha384", 0, 0);
301 		break;
302 	case SHA512_DIGEST_SIZE:
303 		base_hash = crypto_alloc_shash("sha512", 0, 0);
304 		break;
305 	}
306 
307 	return base_hash;
308 }
309 
310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311 				     char *iopad, char *result_hash,
312 				     int digest_size)
313 {
314 	struct sha1_state sha1_st;
315 	struct sha256_state sha256_st;
316 	struct sha512_state sha512_st;
317 	int error;
318 
319 	if (digest_size == SHA1_DIGEST_SIZE) {
320 		error = crypto_shash_init(desc) ?:
321 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322 			crypto_shash_export(desc, (void *)&sha1_st);
323 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324 	} else if (digest_size == SHA224_DIGEST_SIZE) {
325 		error = crypto_shash_init(desc) ?:
326 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327 			crypto_shash_export(desc, (void *)&sha256_st);
328 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
329 
330 	} else if (digest_size == SHA256_DIGEST_SIZE) {
331 		error = crypto_shash_init(desc) ?:
332 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333 			crypto_shash_export(desc, (void *)&sha256_st);
334 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
335 
336 	} else if (digest_size == SHA384_DIGEST_SIZE) {
337 		error = crypto_shash_init(desc) ?:
338 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339 			crypto_shash_export(desc, (void *)&sha512_st);
340 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
341 
342 	} else if (digest_size == SHA512_DIGEST_SIZE) {
343 		error = crypto_shash_init(desc) ?:
344 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345 			crypto_shash_export(desc, (void *)&sha512_st);
346 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
347 	} else {
348 		error = -EINVAL;
349 		pr_err("Unknown digest size %d\n", digest_size);
350 	}
351 	return error;
352 }
353 
354 static void chcr_change_order(char *buf, int ds)
355 {
356 	int i;
357 
358 	if (ds == SHA512_DIGEST_SIZE) {
359 		for (i = 0; i < (ds / sizeof(u64)); i++)
360 			*((__be64 *)buf + i) =
361 				cpu_to_be64(*((u64 *)buf + i));
362 	} else {
363 		for (i = 0; i < (ds / sizeof(u32)); i++)
364 			*((__be32 *)buf + i) =
365 				cpu_to_be32(*((u32 *)buf + i));
366 	}
367 }
368 
369 static inline int is_hmac(struct crypto_tfm *tfm)
370 {
371 	struct crypto_alg *alg = tfm->__crt_alg;
372 	struct chcr_alg_template *chcr_crypto_alg =
373 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
374 			     alg.hash);
375 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
376 		return 1;
377 	return 0;
378 }
379 
380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381 				   struct cpl_rx_phys_dsgl *dsgl)
382 {
383 	walk->dsgl = dsgl;
384 	walk->nents = 0;
385 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
386 }
387 
388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
389 				 int pci_chan_id)
390 {
391 	struct cpl_rx_phys_dsgl *phys_cpl;
392 
393 	phys_cpl = walk->dsgl;
394 
395 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 	phys_cpl->pcirlxorder_to_noofsgentr =
398 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405 	phys_cpl->rss_hdr_int.qid = htons(qid);
406 	phys_cpl->rss_hdr_int.hash_val = 0;
407 	phys_cpl->rss_hdr_int.channel = pci_chan_id;
408 }
409 
410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
411 					size_t size,
412 					dma_addr_t addr)
413 {
414 	int j;
415 
416 	if (!size)
417 		return;
418 	j = walk->nents;
419 	walk->to->len[j % 8] = htons(size);
420 	walk->to->addr[j % 8] = cpu_to_be64(addr);
421 	j++;
422 	if ((j % 8) == 0)
423 		walk->to++;
424 	walk->nents = j;
425 }
426 
427 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
428 			   struct scatterlist *sg,
429 			      unsigned int slen,
430 			      unsigned int skip)
431 {
432 	int skip_len = 0;
433 	unsigned int left_size = slen, len = 0;
434 	unsigned int j = walk->nents;
435 	int offset, ent_len;
436 
437 	if (!slen)
438 		return;
439 	while (sg && skip) {
440 		if (sg_dma_len(sg) <= skip) {
441 			skip -= sg_dma_len(sg);
442 			skip_len = 0;
443 			sg = sg_next(sg);
444 		} else {
445 			skip_len = skip;
446 			skip = 0;
447 		}
448 	}
449 
450 	while (left_size && sg) {
451 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
452 		offset = 0;
453 		while (len) {
454 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
455 			walk->to->len[j % 8] = htons(ent_len);
456 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
457 						      offset + skip_len);
458 			offset += ent_len;
459 			len -= ent_len;
460 			j++;
461 			if ((j % 8) == 0)
462 				walk->to++;
463 		}
464 		walk->last_sg = sg;
465 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466 					  skip_len) + skip_len;
467 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
468 		skip_len = 0;
469 		sg = sg_next(sg);
470 	}
471 	walk->nents = j;
472 }
473 
474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475 				   struct ulptx_sgl *ulp)
476 {
477 	walk->sgl = ulp;
478 	walk->nents = 0;
479 	walk->pair_idx = 0;
480 	walk->pair = ulp->sge;
481 	walk->last_sg = NULL;
482 	walk->last_sg_len = 0;
483 }
484 
485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
486 {
487 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488 			      ULPTX_NSGE_V(walk->nents));
489 }
490 
491 
492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
493 					size_t size,
494 					dma_addr_t addr)
495 {
496 	if (!size)
497 		return;
498 
499 	if (walk->nents == 0) {
500 		walk->sgl->len0 = cpu_to_be32(size);
501 		walk->sgl->addr0 = cpu_to_be64(addr);
502 	} else {
503 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505 		walk->pair_idx = !walk->pair_idx;
506 		if (!walk->pair_idx)
507 			walk->pair++;
508 	}
509 	walk->nents++;
510 }
511 
512 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
513 					struct scatterlist *sg,
514 			       unsigned int len,
515 			       unsigned int skip)
516 {
517 	int small;
518 	int skip_len = 0;
519 	unsigned int sgmin;
520 
521 	if (!len)
522 		return;
523 	while (sg && skip) {
524 		if (sg_dma_len(sg) <= skip) {
525 			skip -= sg_dma_len(sg);
526 			skip_len = 0;
527 			sg = sg_next(sg);
528 		} else {
529 			skip_len = skip;
530 			skip = 0;
531 		}
532 	}
533 	WARN(!sg, "SG should not be null here\n");
534 	if (sg && (walk->nents == 0)) {
535 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537 		walk->sgl->len0 = cpu_to_be32(sgmin);
538 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
539 		walk->nents++;
540 		len -= sgmin;
541 		walk->last_sg = sg;
542 		walk->last_sg_len = sgmin + skip_len;
543 		skip_len += sgmin;
544 		if (sg_dma_len(sg) == skip_len) {
545 			sg = sg_next(sg);
546 			skip_len = 0;
547 		}
548 	}
549 
550 	while (sg && len) {
551 		small = min(sg_dma_len(sg) - skip_len, len);
552 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554 		walk->pair->addr[walk->pair_idx] =
555 			cpu_to_be64(sg_dma_address(sg) + skip_len);
556 		walk->pair_idx = !walk->pair_idx;
557 		walk->nents++;
558 		if (!walk->pair_idx)
559 			walk->pair++;
560 		len -= sgmin;
561 		skip_len += sgmin;
562 		walk->last_sg = sg;
563 		walk->last_sg_len = skip_len;
564 		if (sg_dma_len(sg) == skip_len) {
565 			sg = sg_next(sg);
566 			skip_len = 0;
567 		}
568 	}
569 }
570 
571 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
572 {
573 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
574 	struct chcr_alg_template *chcr_crypto_alg =
575 		container_of(alg, struct chcr_alg_template, alg.skcipher);
576 
577 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
578 }
579 
580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
581 {
582 	struct adapter *adap = netdev2adap(dev);
583 	struct sge_uld_txq_info *txq_info =
584 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585 	struct sge_uld_txq *txq;
586 	int ret = 0;
587 
588 	local_bh_disable();
589 	txq = &txq_info->uldtxq[idx];
590 	spin_lock(&txq->sendq.lock);
591 	if (txq->full)
592 		ret = -1;
593 	spin_unlock(&txq->sendq.lock);
594 	local_bh_enable();
595 	return ret;
596 }
597 
598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599 			       struct _key_ctx *key_ctx)
600 {
601 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
603 	} else {
604 		memcpy(key_ctx->key,
605 		       ablkctx->key + (ablkctx->enckey_len >> 1),
606 		       ablkctx->enckey_len >> 1);
607 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
609 	}
610 	return 0;
611 }
612 
613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
614 			     unsigned int minsg,
615 			     unsigned int space,
616 			     unsigned int srcskip)
617 {
618 	int srclen = 0;
619 	int srcsg = minsg;
620 	int soffset = 0, sless;
621 
622 	if (sg_dma_len(src) == srcskip) {
623 		src = sg_next(src);
624 		srcskip = 0;
625 	}
626 	while (src && space > (sgl_ent_len[srcsg + 1])) {
627 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
628 							CHCR_SRC_SG_SIZE);
629 		srclen += sless;
630 		soffset += sless;
631 		srcsg++;
632 		if (sg_dma_len(src) == (soffset + srcskip)) {
633 			src = sg_next(src);
634 			soffset = 0;
635 			srcskip = 0;
636 		}
637 	}
638 	return srclen;
639 }
640 
641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642 			     struct scatterlist *dst,
643 			     unsigned int minsg,
644 			     unsigned int space,
645 			     unsigned int srcskip,
646 			     unsigned int dstskip)
647 {
648 	int srclen = 0, dstlen = 0;
649 	int srcsg = minsg, dstsg = minsg;
650 	int offset = 0, soffset = 0, less, sless = 0;
651 
652 	if (sg_dma_len(src) == srcskip) {
653 		src = sg_next(src);
654 		srcskip = 0;
655 	}
656 	if (sg_dma_len(dst) == dstskip) {
657 		dst = sg_next(dst);
658 		dstskip = 0;
659 	}
660 
661 	while (src && dst &&
662 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
664 				CHCR_SRC_SG_SIZE);
665 		srclen += sless;
666 		srcsg++;
667 		offset = 0;
668 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670 			if (srclen <= dstlen)
671 				break;
672 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
673 				     dstskip, CHCR_DST_SG_SIZE);
674 			dstlen += less;
675 			offset += less;
676 			if ((offset + dstskip) == sg_dma_len(dst)) {
677 				dst = sg_next(dst);
678 				offset = 0;
679 			}
680 			dstsg++;
681 			dstskip = 0;
682 		}
683 		soffset += sless;
684 		if ((soffset + srcskip) == sg_dma_len(src)) {
685 			src = sg_next(src);
686 			srcskip = 0;
687 			soffset = 0;
688 		}
689 
690 	}
691 	return min(srclen, dstlen);
692 }
693 
694 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
695 				u32 flags,
696 				struct scatterlist *src,
697 				struct scatterlist *dst,
698 				unsigned int nbytes,
699 				u8 *iv,
700 				unsigned short op_type)
701 {
702 	int err;
703 
704 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
705 
706 	skcipher_request_set_sync_tfm(subreq, cipher);
707 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
708 	skcipher_request_set_crypt(subreq, src, dst,
709 				   nbytes, iv);
710 
711 	err = op_type ? crypto_skcipher_decrypt(subreq) :
712 		crypto_skcipher_encrypt(subreq);
713 	skcipher_request_zero(subreq);
714 
715 	return err;
716 
717 }
718 static inline void create_wreq(struct chcr_context *ctx,
719 			       struct chcr_wr *chcr_req,
720 			       struct crypto_async_request *req,
721 			       unsigned int imm,
722 			       int hash_sz,
723 			       unsigned int len16,
724 			       unsigned int sc_len,
725 			       unsigned int lcb)
726 {
727 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
728 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
729 
730 
731 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
732 	chcr_req->wreq.pld_size_hash_size =
733 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
734 	chcr_req->wreq.len16_pkd =
735 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
736 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
737 	chcr_req->wreq.rx_chid_to_rx_q_id =
738 		FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
739 				!!lcb, ctx->tx_qidx);
740 
741 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
742 						       qid);
743 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
744 				     ((sizeof(chcr_req->wreq)) >> 4)));
745 
746 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
747 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
748 					   sizeof(chcr_req->key_ctx) + sc_len);
749 }
750 
751 /**
752  *	create_cipher_wr - form the WR for cipher operations
753  *	@req: cipher req.
754  *	@ctx: crypto driver context of the request.
755  *	@qid: ingress qid where response of this WR should be received.
756  *	@op_type:	encryption or decryption
757  */
758 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
759 {
760 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
761 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
762 	struct sk_buff *skb = NULL;
763 	struct chcr_wr *chcr_req;
764 	struct cpl_rx_phys_dsgl *phys_cpl;
765 	struct ulptx_sgl *ulptx;
766 	struct chcr_skcipher_req_ctx *reqctx =
767 		skcipher_request_ctx(wrparam->req);
768 	unsigned int temp = 0, transhdr_len, dst_size;
769 	int error;
770 	int nents;
771 	unsigned int kctx_len;
772 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
773 			GFP_KERNEL : GFP_ATOMIC;
774 	struct adapter *adap = padap(c_ctx(tfm)->dev);
775 
776 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
777 			      reqctx->dst_ofst);
778 	dst_size = get_space_for_phys_dsgl(nents);
779 	kctx_len = roundup(ablkctx->enckey_len, 16);
780 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
781 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
782 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
783 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
784 				     (sgl_len(nents) * 8);
785 	transhdr_len += temp;
786 	transhdr_len = roundup(transhdr_len, 16);
787 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
788 	if (!skb) {
789 		error = -ENOMEM;
790 		goto err;
791 	}
792 	chcr_req = __skb_put_zero(skb, transhdr_len);
793 	chcr_req->sec_cpl.op_ivinsrtofst =
794 		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
795 
796 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
797 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
798 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
799 
800 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
801 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
802 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
803 							 ablkctx->ciph_mode,
804 							 0, 0, IV >> 1);
805 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
806 							  0, 1, dst_size);
807 
808 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
809 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
810 	    (!(get_cryptoalg_subtype(tfm) ==
811 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
812 	    (!(get_cryptoalg_subtype(tfm) ==
813 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
814 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
815 	} else {
816 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
817 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
818 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
819 			       ablkctx->enckey_len);
820 		} else {
821 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
822 			       (ablkctx->enckey_len >> 1),
823 			       ablkctx->enckey_len >> 1);
824 			memcpy(chcr_req->key_ctx.key +
825 			       (ablkctx->enckey_len >> 1),
826 			       ablkctx->key,
827 			       ablkctx->enckey_len >> 1);
828 		}
829 	}
830 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
831 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
832 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
833 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
834 
835 	atomic_inc(&adap->chcr_stats.cipher_rqst);
836 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
837 		+ (reqctx->imm ? (wrparam->bytes) : 0);
838 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
839 		    transhdr_len, temp,
840 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
841 	reqctx->skb = skb;
842 
843 	if (reqctx->op && (ablkctx->ciph_mode ==
844 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
845 		sg_pcopy_to_buffer(wrparam->req->src,
846 			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
847 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
848 
849 	return skb;
850 err:
851 	return ERR_PTR(error);
852 }
853 
854 static inline int chcr_keyctx_ck_size(unsigned int keylen)
855 {
856 	int ck_size = 0;
857 
858 	if (keylen == AES_KEYSIZE_128)
859 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
860 	else if (keylen == AES_KEYSIZE_192)
861 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
862 	else if (keylen == AES_KEYSIZE_256)
863 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
864 	else
865 		ck_size = 0;
866 
867 	return ck_size;
868 }
869 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
870 				       const u8 *key,
871 				       unsigned int keylen)
872 {
873 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
874 
875 	crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
876 				CRYPTO_TFM_REQ_MASK);
877 	crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
878 				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
879 	return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
880 }
881 
882 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
883 			       const u8 *key,
884 			       unsigned int keylen)
885 {
886 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
887 	unsigned int ck_size, context_size;
888 	u16 alignment = 0;
889 	int err;
890 
891 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
892 	if (err)
893 		goto badkey_err;
894 
895 	ck_size = chcr_keyctx_ck_size(keylen);
896 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
897 	memcpy(ablkctx->key, key, keylen);
898 	ablkctx->enckey_len = keylen;
899 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
900 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
901 			keylen + alignment) >> 4;
902 
903 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
904 						0, 0, context_size);
905 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
906 	return 0;
907 badkey_err:
908 	ablkctx->enckey_len = 0;
909 
910 	return err;
911 }
912 
913 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
914 				   const u8 *key,
915 				   unsigned int keylen)
916 {
917 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
918 	unsigned int ck_size, context_size;
919 	u16 alignment = 0;
920 	int err;
921 
922 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
923 	if (err)
924 		goto badkey_err;
925 	ck_size = chcr_keyctx_ck_size(keylen);
926 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
927 	memcpy(ablkctx->key, key, keylen);
928 	ablkctx->enckey_len = keylen;
929 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
930 			keylen + alignment) >> 4;
931 
932 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
933 						0, 0, context_size);
934 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
935 
936 	return 0;
937 badkey_err:
938 	ablkctx->enckey_len = 0;
939 
940 	return err;
941 }
942 
943 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
944 				   const u8 *key,
945 				   unsigned int keylen)
946 {
947 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
948 	unsigned int ck_size, context_size;
949 	u16 alignment = 0;
950 	int err;
951 
952 	if (keylen < CTR_RFC3686_NONCE_SIZE)
953 		return -EINVAL;
954 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
955 	       CTR_RFC3686_NONCE_SIZE);
956 
957 	keylen -= CTR_RFC3686_NONCE_SIZE;
958 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
959 	if (err)
960 		goto badkey_err;
961 
962 	ck_size = chcr_keyctx_ck_size(keylen);
963 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
964 	memcpy(ablkctx->key, key, keylen);
965 	ablkctx->enckey_len = keylen;
966 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
967 			keylen + alignment) >> 4;
968 
969 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
970 						0, 0, context_size);
971 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
972 
973 	return 0;
974 badkey_err:
975 	ablkctx->enckey_len = 0;
976 
977 	return err;
978 }
979 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
980 {
981 	unsigned int size = AES_BLOCK_SIZE;
982 	__be32 *b = (__be32 *)(dstiv + size);
983 	u32 c, prev;
984 
985 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
986 	for (; size >= 4; size -= 4) {
987 		prev = be32_to_cpu(*--b);
988 		c = prev + add;
989 		*b = cpu_to_be32(c);
990 		if (prev < c)
991 			break;
992 		add = 1;
993 	}
994 
995 }
996 
997 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
998 {
999 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1000 	u64 c;
1001 	u32 temp = be32_to_cpu(*--b);
1002 
1003 	temp = ~temp;
1004 	c = (u64)temp +  1; // No of block can processed withou overflow
1005 	if ((bytes / AES_BLOCK_SIZE) > c)
1006 		bytes = c * AES_BLOCK_SIZE;
1007 	return bytes;
1008 }
1009 
1010 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1011 			     u32 isfinal)
1012 {
1013 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1014 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1015 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1016 	struct crypto_aes_ctx aes;
1017 	int ret, i;
1018 	u8 *key;
1019 	unsigned int keylen;
1020 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1021 	int round8 = round / 8;
1022 
1023 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1024 
1025 	keylen = ablkctx->enckey_len / 2;
1026 	key = ablkctx->key + keylen;
1027 	ret = aes_expandkey(&aes, key, keylen);
1028 	if (ret)
1029 		return ret;
1030 	aes_encrypt(&aes, iv, iv);
1031 	for (i = 0; i < round8; i++)
1032 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1033 
1034 	for (i = 0; i < (round % 8); i++)
1035 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1036 
1037 	if (!isfinal)
1038 		aes_decrypt(&aes, iv, iv);
1039 
1040 	memzero_explicit(&aes, sizeof(aes));
1041 	return 0;
1042 }
1043 
1044 static int chcr_update_cipher_iv(struct skcipher_request *req,
1045 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1046 {
1047 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1048 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1049 	int subtype = get_cryptoalg_subtype(tfm);
1050 	int ret = 0;
1051 
1052 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1053 		ctr_add_iv(iv, req->iv, (reqctx->processed /
1054 			   AES_BLOCK_SIZE));
1055 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1056 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1057 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1058 						AES_BLOCK_SIZE) + 1);
1059 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1060 		ret = chcr_update_tweak(req, iv, 0);
1061 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1062 		if (reqctx->op)
1063 			/*Updated before sending last WR*/
1064 			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1065 		else
1066 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1067 	}
1068 
1069 	return ret;
1070 
1071 }
1072 
1073 /* We need separate function for final iv because in rfc3686  Initial counter
1074  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1075  * for subsequent update requests
1076  */
1077 
1078 static int chcr_final_cipher_iv(struct skcipher_request *req,
1079 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1080 {
1081 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1082 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1083 	int subtype = get_cryptoalg_subtype(tfm);
1084 	int ret = 0;
1085 
1086 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1087 		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1088 						       AES_BLOCK_SIZE));
1089 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1090 		ret = chcr_update_tweak(req, iv, 1);
1091 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1092 		/*Already updated for Decrypt*/
1093 		if (!reqctx->op)
1094 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1095 
1096 	}
1097 	return ret;
1098 
1099 }
1100 
1101 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1102 				   unsigned char *input, int err)
1103 {
1104 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1105 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1106 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1107 	struct sk_buff *skb;
1108 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1109 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1110 	struct  cipher_wr_param wrparam;
1111 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1112 	int bytes;
1113 
1114 	if (err)
1115 		goto unmap;
1116 	if (req->cryptlen == reqctx->processed) {
1117 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1118 				      req);
1119 		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1120 		goto complete;
1121 	}
1122 
1123 	if (!reqctx->imm) {
1124 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1125 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1126 					  reqctx->src_ofst, reqctx->dst_ofst);
1127 		if ((bytes + reqctx->processed) >= req->cryptlen)
1128 			bytes  = req->cryptlen - reqctx->processed;
1129 		else
1130 			bytes = rounddown(bytes, 16);
1131 	} else {
1132 		/*CTR mode counter overfloa*/
1133 		bytes  = req->cryptlen - reqctx->processed;
1134 	}
1135 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1136 	if (err)
1137 		goto unmap;
1138 
1139 	if (unlikely(bytes == 0)) {
1140 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1141 				      req);
1142 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1143 				     req->base.flags,
1144 				     req->src,
1145 				     req->dst,
1146 				     req->cryptlen,
1147 				     req->iv,
1148 				     reqctx->op);
1149 		goto complete;
1150 	}
1151 
1152 	if (get_cryptoalg_subtype(tfm) ==
1153 	    CRYPTO_ALG_SUB_TYPE_CTR)
1154 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1155 	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1156 	wrparam.req = req;
1157 	wrparam.bytes = bytes;
1158 	skb = create_cipher_wr(&wrparam);
1159 	if (IS_ERR(skb)) {
1160 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1161 		err = PTR_ERR(skb);
1162 		goto unmap;
1163 	}
1164 	skb->dev = u_ctx->lldi.ports[0];
1165 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1166 	chcr_send_wr(skb);
1167 	reqctx->last_req_len = bytes;
1168 	reqctx->processed += bytes;
1169 	return 0;
1170 unmap:
1171 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1172 complete:
1173 	chcr_dec_wrcount(dev);
1174 	req->base.complete(&req->base, err);
1175 	return err;
1176 }
1177 
1178 static int process_cipher(struct skcipher_request *req,
1179 				  unsigned short qid,
1180 				  struct sk_buff **skb,
1181 				  unsigned short op_type)
1182 {
1183 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1184 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1185 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1186 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1187 	struct	cipher_wr_param wrparam;
1188 	int bytes, err = -EINVAL;
1189 
1190 	reqctx->processed = 0;
1191 	if (!req->iv)
1192 		goto error;
1193 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1194 	    (req->cryptlen == 0) ||
1195 	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1196 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1197 		       ablkctx->enckey_len, req->cryptlen, ivsize);
1198 		goto error;
1199 	}
1200 
1201 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1202 	if (err)
1203 		goto error;
1204 	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1205 					    AES_MIN_KEY_SIZE +
1206 					    sizeof(struct cpl_rx_phys_dsgl) +
1207 					/*Min dsgl size*/
1208 					    32))) {
1209 		/* Can be sent as Imm*/
1210 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1211 
1212 		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1213 				       CHCR_DST_SG_SIZE, 0);
1214 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1215 		kctx_len = roundup(ablkctx->enckey_len, 16);
1216 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1217 		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1218 			SGE_MAX_WR_LEN;
1219 		bytes = IV + req->cryptlen;
1220 
1221 	} else {
1222 		reqctx->imm = 0;
1223 	}
1224 
1225 	if (!reqctx->imm) {
1226 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1227 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1228 					  0, 0);
1229 		if ((bytes + reqctx->processed) >= req->cryptlen)
1230 			bytes  = req->cryptlen - reqctx->processed;
1231 		else
1232 			bytes = rounddown(bytes, 16);
1233 	} else {
1234 		bytes = req->cryptlen;
1235 	}
1236 	if (get_cryptoalg_subtype(tfm) ==
1237 	    CRYPTO_ALG_SUB_TYPE_CTR) {
1238 		bytes = adjust_ctr_overflow(req->iv, bytes);
1239 	}
1240 	if (get_cryptoalg_subtype(tfm) ==
1241 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1242 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1243 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1244 				CTR_RFC3686_IV_SIZE);
1245 
1246 		/* initialize counter portion of counter block */
1247 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1248 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1249 
1250 	} else {
1251 
1252 		memcpy(reqctx->iv, req->iv, IV);
1253 	}
1254 	if (unlikely(bytes == 0)) {
1255 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1256 				      req);
1257 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1258 					   req->base.flags,
1259 					   req->src,
1260 					   req->dst,
1261 					   req->cryptlen,
1262 					   reqctx->iv,
1263 					   op_type);
1264 		goto error;
1265 	}
1266 	reqctx->op = op_type;
1267 	reqctx->srcsg = req->src;
1268 	reqctx->dstsg = req->dst;
1269 	reqctx->src_ofst = 0;
1270 	reqctx->dst_ofst = 0;
1271 	wrparam.qid = qid;
1272 	wrparam.req = req;
1273 	wrparam.bytes = bytes;
1274 	*skb = create_cipher_wr(&wrparam);
1275 	if (IS_ERR(*skb)) {
1276 		err = PTR_ERR(*skb);
1277 		goto unmap;
1278 	}
1279 	reqctx->processed = bytes;
1280 	reqctx->last_req_len = bytes;
1281 
1282 	return 0;
1283 unmap:
1284 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1285 error:
1286 	return err;
1287 }
1288 
1289 static int chcr_aes_encrypt(struct skcipher_request *req)
1290 {
1291 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1292 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1293 	struct sk_buff *skb = NULL;
1294 	int err, isfull = 0;
1295 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1296 
1297 	err = chcr_inc_wrcount(dev);
1298 	if (err)
1299 		return -ENXIO;
1300 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1301 					    c_ctx(tfm)->tx_qidx))) {
1302 		isfull = 1;
1303 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1304 			err = -ENOSPC;
1305 			goto error;
1306 		}
1307 	}
1308 
1309 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1310 			     &skb, CHCR_ENCRYPT_OP);
1311 	if (err || !skb)
1312 		return  err;
1313 	skb->dev = u_ctx->lldi.ports[0];
1314 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1315 	chcr_send_wr(skb);
1316 	return isfull ? -EBUSY : -EINPROGRESS;
1317 error:
1318 	chcr_dec_wrcount(dev);
1319 	return err;
1320 }
1321 
1322 static int chcr_aes_decrypt(struct skcipher_request *req)
1323 {
1324 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1325 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1326 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1327 	struct sk_buff *skb = NULL;
1328 	int err, isfull = 0;
1329 
1330 	err = chcr_inc_wrcount(dev);
1331 	if (err)
1332 		return -ENXIO;
1333 
1334 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1335 					    c_ctx(tfm)->tx_qidx))) {
1336 		isfull = 1;
1337 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1338 			return -ENOSPC;
1339 	}
1340 
1341 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1342 			     &skb, CHCR_DECRYPT_OP);
1343 	if (err || !skb)
1344 		return err;
1345 	skb->dev = u_ctx->lldi.ports[0];
1346 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1347 	chcr_send_wr(skb);
1348 	return isfull ? -EBUSY : -EINPROGRESS;
1349 }
1350 
1351 static int chcr_device_init(struct chcr_context *ctx)
1352 {
1353 	struct uld_ctx *u_ctx = NULL;
1354 	unsigned int id;
1355 	int txq_perchan, txq_idx, ntxq;
1356 	int err = 0, rxq_perchan, rxq_idx;
1357 
1358 	id = smp_processor_id();
1359 	if (!ctx->dev) {
1360 		u_ctx = assign_chcr_device();
1361 		if (!u_ctx) {
1362 			err = -ENXIO;
1363 			pr_err("chcr device assignment fails\n");
1364 			goto out;
1365 		}
1366 		ctx->dev = &u_ctx->dev;
1367 		ntxq = u_ctx->lldi.ntxq;
1368 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1369 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1370 		spin_lock(&ctx->dev->lock_chcr_dev);
1371 		ctx->tx_chan_id = ctx->dev->tx_channel_id;
1372 		ctx->dev->tx_channel_id =
1373 			(ctx->dev->tx_channel_id + 1) %  u_ctx->lldi.nchan;
1374 		spin_unlock(&ctx->dev->lock_chcr_dev);
1375 		rxq_idx = ctx->tx_chan_id * rxq_perchan;
1376 		rxq_idx += id % rxq_perchan;
1377 		txq_idx = ctx->tx_chan_id * txq_perchan;
1378 		txq_idx += id % txq_perchan;
1379 		ctx->rx_qidx = rxq_idx;
1380 		ctx->tx_qidx = txq_idx;
1381 		/* Channel Id used by SGE to forward packet to Host.
1382 		 * Same value should be used in cpl_fw6_pld RSS_CH field
1383 		 * by FW. Driver programs PCI channel ID to be used in fw
1384 		 * at the time of queue allocation with value "pi->tx_chan"
1385 		 */
1386 		ctx->pci_chan_id = txq_idx / txq_perchan;
1387 	}
1388 out:
1389 	return err;
1390 }
1391 
1392 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1393 {
1394 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1395 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1396 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1397 
1398 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
1399 				CRYPTO_ALG_NEED_FALLBACK);
1400 	if (IS_ERR(ablkctx->sw_cipher)) {
1401 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1402 		return PTR_ERR(ablkctx->sw_cipher);
1403 	}
1404 
1405 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1406 
1407 	return chcr_device_init(ctx);
1408 }
1409 
1410 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1411 {
1412 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1413 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1414 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1415 
1416 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1417 	 * cannot be used as fallback in chcr_handle_cipher_response
1418 	 */
1419 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1420 				CRYPTO_ALG_NEED_FALLBACK);
1421 	if (IS_ERR(ablkctx->sw_cipher)) {
1422 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1423 		return PTR_ERR(ablkctx->sw_cipher);
1424 	}
1425 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1426 	return chcr_device_init(ctx);
1427 }
1428 
1429 
1430 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1431 {
1432 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1433 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1434 
1435 	crypto_free_sync_skcipher(ablkctx->sw_cipher);
1436 }
1437 
1438 static int get_alg_config(struct algo_param *params,
1439 			  unsigned int auth_size)
1440 {
1441 	switch (auth_size) {
1442 	case SHA1_DIGEST_SIZE:
1443 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1444 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1445 		params->result_size = SHA1_DIGEST_SIZE;
1446 		break;
1447 	case SHA224_DIGEST_SIZE:
1448 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1449 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1450 		params->result_size = SHA256_DIGEST_SIZE;
1451 		break;
1452 	case SHA256_DIGEST_SIZE:
1453 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1454 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1455 		params->result_size = SHA256_DIGEST_SIZE;
1456 		break;
1457 	case SHA384_DIGEST_SIZE:
1458 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1459 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1460 		params->result_size = SHA512_DIGEST_SIZE;
1461 		break;
1462 	case SHA512_DIGEST_SIZE:
1463 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1464 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1465 		params->result_size = SHA512_DIGEST_SIZE;
1466 		break;
1467 	default:
1468 		pr_err("chcr : ERROR, unsupported digest size\n");
1469 		return -EINVAL;
1470 	}
1471 	return 0;
1472 }
1473 
1474 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1475 {
1476 		crypto_free_shash(base_hash);
1477 }
1478 
1479 /**
1480  *	create_hash_wr - Create hash work request
1481  *	@req - Cipher req base
1482  */
1483 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1484 				      struct hash_wr_param *param)
1485 {
1486 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1487 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1488 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1489 	struct sk_buff *skb = NULL;
1490 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1491 	struct chcr_wr *chcr_req;
1492 	struct ulptx_sgl *ulptx;
1493 	unsigned int nents = 0, transhdr_len;
1494 	unsigned int temp = 0;
1495 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1496 		GFP_ATOMIC;
1497 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1498 	int error = 0;
1499 
1500 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1501 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1502 				param->sg_len) <= SGE_MAX_WR_LEN;
1503 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1504 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1505 	nents += param->bfr_len ? 1 : 0;
1506 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1507 				param->sg_len, 16) : (sgl_len(nents) * 8);
1508 	transhdr_len = roundup(transhdr_len, 16);
1509 
1510 	skb = alloc_skb(transhdr_len, flags);
1511 	if (!skb)
1512 		return ERR_PTR(-ENOMEM);
1513 	chcr_req = __skb_put_zero(skb, transhdr_len);
1514 
1515 	chcr_req->sec_cpl.op_ivinsrtofst =
1516 		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1517 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1518 
1519 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1520 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1521 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1522 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1523 	chcr_req->sec_cpl.seqno_numivs =
1524 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1525 					 param->opad_needed, 0);
1526 
1527 	chcr_req->sec_cpl.ivgen_hdrlen =
1528 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1529 
1530 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1531 	       param->alg_prm.result_size);
1532 
1533 	if (param->opad_needed)
1534 		memcpy(chcr_req->key_ctx.key +
1535 		       ((param->alg_prm.result_size <= 32) ? 32 :
1536 			CHCR_HASH_MAX_DIGEST_SIZE),
1537 		       hmacctx->opad, param->alg_prm.result_size);
1538 
1539 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1540 					    param->alg_prm.mk_size, 0,
1541 					    param->opad_needed,
1542 					    ((param->kctx_len +
1543 					     sizeof(chcr_req->key_ctx)) >> 4));
1544 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1545 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1546 				     DUMMY_BYTES);
1547 	if (param->bfr_len != 0) {
1548 		req_ctx->hctx_wr.dma_addr =
1549 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1550 				       param->bfr_len, DMA_TO_DEVICE);
1551 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1552 				       req_ctx->hctx_wr. dma_addr)) {
1553 			error = -ENOMEM;
1554 			goto err;
1555 		}
1556 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1557 	} else {
1558 		req_ctx->hctx_wr.dma_addr = 0;
1559 	}
1560 	chcr_add_hash_src_ent(req, ulptx, param);
1561 	/* Request upto max wr size */
1562 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1563 				(param->sg_len + param->bfr_len) : 0);
1564 	atomic_inc(&adap->chcr_stats.digest_rqst);
1565 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1566 		    param->hash_size, transhdr_len,
1567 		    temp,  0);
1568 	req_ctx->hctx_wr.skb = skb;
1569 	return skb;
1570 err:
1571 	kfree_skb(skb);
1572 	return  ERR_PTR(error);
1573 }
1574 
1575 static int chcr_ahash_update(struct ahash_request *req)
1576 {
1577 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1578 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1579 	struct uld_ctx *u_ctx = NULL;
1580 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1581 	struct sk_buff *skb;
1582 	u8 remainder = 0, bs;
1583 	unsigned int nbytes = req->nbytes;
1584 	struct hash_wr_param params;
1585 	int error, isfull = 0;
1586 
1587 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1588 	u_ctx = ULD_CTX(h_ctx(rtfm));
1589 
1590 	if (nbytes + req_ctx->reqlen >= bs) {
1591 		remainder = (nbytes + req_ctx->reqlen) % bs;
1592 		nbytes = nbytes + req_ctx->reqlen - remainder;
1593 	} else {
1594 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1595 				   + req_ctx->reqlen, nbytes, 0);
1596 		req_ctx->reqlen += nbytes;
1597 		return 0;
1598 	}
1599 	error = chcr_inc_wrcount(dev);
1600 	if (error)
1601 		return -ENXIO;
1602 	/* Detach state for CHCR means lldi or padap is freed. Increasing
1603 	 * inflight count for dev guarantees that lldi and padap is valid
1604 	 */
1605 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1606 					    h_ctx(rtfm)->tx_qidx))) {
1607 		isfull = 1;
1608 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1609 			error = -ENOSPC;
1610 			goto err;
1611 		}
1612 	}
1613 
1614 	chcr_init_hctx_per_wr(req_ctx);
1615 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1616 	if (error) {
1617 		error = -ENOMEM;
1618 		goto err;
1619 	}
1620 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1621 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1622 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1623 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1624 	if (params.sg_len > req->nbytes)
1625 		params.sg_len = req->nbytes;
1626 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1627 			req_ctx->reqlen;
1628 	params.opad_needed = 0;
1629 	params.more = 1;
1630 	params.last = 0;
1631 	params.bfr_len = req_ctx->reqlen;
1632 	params.scmd1 = 0;
1633 	req_ctx->hctx_wr.srcsg = req->src;
1634 
1635 	params.hash_size = params.alg_prm.result_size;
1636 	req_ctx->data_len += params.sg_len + params.bfr_len;
1637 	skb = create_hash_wr(req, &params);
1638 	if (IS_ERR(skb)) {
1639 		error = PTR_ERR(skb);
1640 		goto unmap;
1641 	}
1642 
1643 	req_ctx->hctx_wr.processed += params.sg_len;
1644 	if (remainder) {
1645 		/* Swap buffers */
1646 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1647 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1648 				   req_ctx->reqbfr, remainder, req->nbytes -
1649 				   remainder);
1650 	}
1651 	req_ctx->reqlen = remainder;
1652 	skb->dev = u_ctx->lldi.ports[0];
1653 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1654 	chcr_send_wr(skb);
1655 
1656 	return isfull ? -EBUSY : -EINPROGRESS;
1657 unmap:
1658 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1659 err:
1660 	chcr_dec_wrcount(dev);
1661 	return error;
1662 }
1663 
1664 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1665 {
1666 	memset(bfr_ptr, 0, bs);
1667 	*bfr_ptr = 0x80;
1668 	if (bs == 64)
1669 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1670 	else
1671 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1672 }
1673 
1674 static int chcr_ahash_final(struct ahash_request *req)
1675 {
1676 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1677 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1678 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1679 	struct hash_wr_param params;
1680 	struct sk_buff *skb;
1681 	struct uld_ctx *u_ctx = NULL;
1682 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1683 	int error = -EINVAL;
1684 
1685 	error = chcr_inc_wrcount(dev);
1686 	if (error)
1687 		return -ENXIO;
1688 
1689 	chcr_init_hctx_per_wr(req_ctx);
1690 	u_ctx = ULD_CTX(h_ctx(rtfm));
1691 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1692 		params.opad_needed = 1;
1693 	else
1694 		params.opad_needed = 0;
1695 	params.sg_len = 0;
1696 	req_ctx->hctx_wr.isfinal = 1;
1697 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1698 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1699 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1700 		params.opad_needed = 1;
1701 		params.kctx_len *= 2;
1702 	} else {
1703 		params.opad_needed = 0;
1704 	}
1705 
1706 	req_ctx->hctx_wr.result = 1;
1707 	params.bfr_len = req_ctx->reqlen;
1708 	req_ctx->data_len += params.bfr_len + params.sg_len;
1709 	req_ctx->hctx_wr.srcsg = req->src;
1710 	if (req_ctx->reqlen == 0) {
1711 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1712 		params.last = 0;
1713 		params.more = 1;
1714 		params.scmd1 = 0;
1715 		params.bfr_len = bs;
1716 
1717 	} else {
1718 		params.scmd1 = req_ctx->data_len;
1719 		params.last = 1;
1720 		params.more = 0;
1721 	}
1722 	params.hash_size = crypto_ahash_digestsize(rtfm);
1723 	skb = create_hash_wr(req, &params);
1724 	if (IS_ERR(skb)) {
1725 		error = PTR_ERR(skb);
1726 		goto err;
1727 	}
1728 	req_ctx->reqlen = 0;
1729 	skb->dev = u_ctx->lldi.ports[0];
1730 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1731 	chcr_send_wr(skb);
1732 	return -EINPROGRESS;
1733 err:
1734 	chcr_dec_wrcount(dev);
1735 	return error;
1736 }
1737 
1738 static int chcr_ahash_finup(struct ahash_request *req)
1739 {
1740 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1741 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1742 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1743 	struct uld_ctx *u_ctx = NULL;
1744 	struct sk_buff *skb;
1745 	struct hash_wr_param params;
1746 	u8  bs;
1747 	int error, isfull = 0;
1748 
1749 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1750 	u_ctx = ULD_CTX(h_ctx(rtfm));
1751 	error = chcr_inc_wrcount(dev);
1752 	if (error)
1753 		return -ENXIO;
1754 
1755 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1756 					    h_ctx(rtfm)->tx_qidx))) {
1757 		isfull = 1;
1758 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1759 			error = -ENOSPC;
1760 			goto err;
1761 		}
1762 	}
1763 	chcr_init_hctx_per_wr(req_ctx);
1764 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1765 	if (error) {
1766 		error = -ENOMEM;
1767 		goto err;
1768 	}
1769 
1770 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1771 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1772 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1773 		params.kctx_len *= 2;
1774 		params.opad_needed = 1;
1775 	} else {
1776 		params.opad_needed = 0;
1777 	}
1778 
1779 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1780 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1781 	if (params.sg_len < req->nbytes) {
1782 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1783 			params.kctx_len /= 2;
1784 			params.opad_needed = 0;
1785 		}
1786 		params.last = 0;
1787 		params.more = 1;
1788 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1789 					- req_ctx->reqlen;
1790 		params.hash_size = params.alg_prm.result_size;
1791 		params.scmd1 = 0;
1792 	} else {
1793 		params.last = 1;
1794 		params.more = 0;
1795 		params.sg_len = req->nbytes;
1796 		params.hash_size = crypto_ahash_digestsize(rtfm);
1797 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1798 				params.sg_len;
1799 	}
1800 	params.bfr_len = req_ctx->reqlen;
1801 	req_ctx->data_len += params.bfr_len + params.sg_len;
1802 	req_ctx->hctx_wr.result = 1;
1803 	req_ctx->hctx_wr.srcsg = req->src;
1804 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1805 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1806 		params.last = 0;
1807 		params.more = 1;
1808 		params.scmd1 = 0;
1809 		params.bfr_len = bs;
1810 	}
1811 	skb = create_hash_wr(req, &params);
1812 	if (IS_ERR(skb)) {
1813 		error = PTR_ERR(skb);
1814 		goto unmap;
1815 	}
1816 	req_ctx->reqlen = 0;
1817 	req_ctx->hctx_wr.processed += params.sg_len;
1818 	skb->dev = u_ctx->lldi.ports[0];
1819 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1820 	chcr_send_wr(skb);
1821 
1822 	return isfull ? -EBUSY : -EINPROGRESS;
1823 unmap:
1824 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1825 err:
1826 	chcr_dec_wrcount(dev);
1827 	return error;
1828 }
1829 
1830 static int chcr_ahash_digest(struct ahash_request *req)
1831 {
1832 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1833 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1834 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1835 	struct uld_ctx *u_ctx = NULL;
1836 	struct sk_buff *skb;
1837 	struct hash_wr_param params;
1838 	u8  bs;
1839 	int error, isfull = 0;
1840 
1841 	rtfm->init(req);
1842 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1843 	error = chcr_inc_wrcount(dev);
1844 	if (error)
1845 		return -ENXIO;
1846 
1847 	u_ctx = ULD_CTX(h_ctx(rtfm));
1848 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1849 					    h_ctx(rtfm)->tx_qidx))) {
1850 		isfull = 1;
1851 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1852 			error = -ENOSPC;
1853 			goto err;
1854 		}
1855 	}
1856 
1857 	chcr_init_hctx_per_wr(req_ctx);
1858 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1859 	if (error) {
1860 		error = -ENOMEM;
1861 		goto err;
1862 	}
1863 
1864 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1865 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867 		params.kctx_len *= 2;
1868 		params.opad_needed = 1;
1869 	} else {
1870 		params.opad_needed = 0;
1871 	}
1872 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1873 				HASH_SPACE_LEFT(params.kctx_len), 0);
1874 	if (params.sg_len < req->nbytes) {
1875 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1876 			params.kctx_len /= 2;
1877 			params.opad_needed = 0;
1878 		}
1879 		params.last = 0;
1880 		params.more = 1;
1881 		params.scmd1 = 0;
1882 		params.sg_len = rounddown(params.sg_len, bs);
1883 		params.hash_size = params.alg_prm.result_size;
1884 	} else {
1885 		params.sg_len = req->nbytes;
1886 		params.hash_size = crypto_ahash_digestsize(rtfm);
1887 		params.last = 1;
1888 		params.more = 0;
1889 		params.scmd1 = req->nbytes + req_ctx->data_len;
1890 
1891 	}
1892 	params.bfr_len = 0;
1893 	req_ctx->hctx_wr.result = 1;
1894 	req_ctx->hctx_wr.srcsg = req->src;
1895 	req_ctx->data_len += params.bfr_len + params.sg_len;
1896 
1897 	if (req->nbytes == 0) {
1898 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1899 		params.more = 1;
1900 		params.bfr_len = bs;
1901 	}
1902 
1903 	skb = create_hash_wr(req, &params);
1904 	if (IS_ERR(skb)) {
1905 		error = PTR_ERR(skb);
1906 		goto unmap;
1907 	}
1908 	req_ctx->hctx_wr.processed += params.sg_len;
1909 	skb->dev = u_ctx->lldi.ports[0];
1910 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1911 	chcr_send_wr(skb);
1912 	return isfull ? -EBUSY : -EINPROGRESS;
1913 unmap:
1914 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1915 err:
1916 	chcr_dec_wrcount(dev);
1917 	return error;
1918 }
1919 
1920 static int chcr_ahash_continue(struct ahash_request *req)
1921 {
1922 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1923 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1924 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1925 	struct uld_ctx *u_ctx = NULL;
1926 	struct sk_buff *skb;
1927 	struct hash_wr_param params;
1928 	u8  bs;
1929 	int error;
1930 
1931 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1932 	u_ctx = ULD_CTX(h_ctx(rtfm));
1933 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1934 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1935 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1936 		params.kctx_len *= 2;
1937 		params.opad_needed = 1;
1938 	} else {
1939 		params.opad_needed = 0;
1940 	}
1941 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1942 					    HASH_SPACE_LEFT(params.kctx_len),
1943 					    hctx_wr->src_ofst);
1944 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1945 		params.sg_len = req->nbytes - hctx_wr->processed;
1946 	if (!hctx_wr->result ||
1947 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1948 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1949 			params.kctx_len /= 2;
1950 			params.opad_needed = 0;
1951 		}
1952 		params.last = 0;
1953 		params.more = 1;
1954 		params.sg_len = rounddown(params.sg_len, bs);
1955 		params.hash_size = params.alg_prm.result_size;
1956 		params.scmd1 = 0;
1957 	} else {
1958 		params.last = 1;
1959 		params.more = 0;
1960 		params.hash_size = crypto_ahash_digestsize(rtfm);
1961 		params.scmd1 = reqctx->data_len + params.sg_len;
1962 	}
1963 	params.bfr_len = 0;
1964 	reqctx->data_len += params.sg_len;
1965 	skb = create_hash_wr(req, &params);
1966 	if (IS_ERR(skb)) {
1967 		error = PTR_ERR(skb);
1968 		goto err;
1969 	}
1970 	hctx_wr->processed += params.sg_len;
1971 	skb->dev = u_ctx->lldi.ports[0];
1972 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1973 	chcr_send_wr(skb);
1974 	return 0;
1975 err:
1976 	return error;
1977 }
1978 
1979 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1980 					  unsigned char *input,
1981 					  int err)
1982 {
1983 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1984 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1985 	int digestsize, updated_digestsize;
1986 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1987 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1988 	struct chcr_dev *dev = h_ctx(tfm)->dev;
1989 
1990 	if (input == NULL)
1991 		goto out;
1992 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1993 	updated_digestsize = digestsize;
1994 	if (digestsize == SHA224_DIGEST_SIZE)
1995 		updated_digestsize = SHA256_DIGEST_SIZE;
1996 	else if (digestsize == SHA384_DIGEST_SIZE)
1997 		updated_digestsize = SHA512_DIGEST_SIZE;
1998 
1999 	if (hctx_wr->dma_addr) {
2000 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2001 				 hctx_wr->dma_len, DMA_TO_DEVICE);
2002 		hctx_wr->dma_addr = 0;
2003 	}
2004 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2005 				 req->nbytes)) {
2006 		if (hctx_wr->result == 1) {
2007 			hctx_wr->result = 0;
2008 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2009 			       digestsize);
2010 		} else {
2011 			memcpy(reqctx->partial_hash,
2012 			       input + sizeof(struct cpl_fw6_pld),
2013 			       updated_digestsize);
2014 
2015 		}
2016 		goto unmap;
2017 	}
2018 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2019 	       updated_digestsize);
2020 
2021 	err = chcr_ahash_continue(req);
2022 	if (err)
2023 		goto unmap;
2024 	return;
2025 unmap:
2026 	if (hctx_wr->is_sg_map)
2027 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2028 
2029 
2030 out:
2031 	chcr_dec_wrcount(dev);
2032 	req->base.complete(&req->base, err);
2033 }
2034 
2035 /*
2036  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2037  *	@req: crypto request
2038  */
2039 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2040 			 int err)
2041 {
2042 	struct crypto_tfm *tfm = req->tfm;
2043 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2044 	struct adapter *adap = padap(ctx->dev);
2045 
2046 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2047 	case CRYPTO_ALG_TYPE_AEAD:
2048 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2049 		break;
2050 
2051 	case CRYPTO_ALG_TYPE_SKCIPHER:
2052 		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2053 					       input, err);
2054 		break;
2055 	case CRYPTO_ALG_TYPE_AHASH:
2056 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2057 		}
2058 	atomic_inc(&adap->chcr_stats.complete);
2059 	return err;
2060 }
2061 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2062 {
2063 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2064 	struct chcr_ahash_req_ctx *state = out;
2065 
2066 	state->reqlen = req_ctx->reqlen;
2067 	state->data_len = req_ctx->data_len;
2068 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2069 	memcpy(state->partial_hash, req_ctx->partial_hash,
2070 	       CHCR_HASH_MAX_DIGEST_SIZE);
2071 	chcr_init_hctx_per_wr(state);
2072 	return 0;
2073 }
2074 
2075 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2076 {
2077 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2078 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2079 
2080 	req_ctx->reqlen = state->reqlen;
2081 	req_ctx->data_len = state->data_len;
2082 	req_ctx->reqbfr = req_ctx->bfr1;
2083 	req_ctx->skbfr = req_ctx->bfr2;
2084 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2085 	memcpy(req_ctx->partial_hash, state->partial_hash,
2086 	       CHCR_HASH_MAX_DIGEST_SIZE);
2087 	chcr_init_hctx_per_wr(req_ctx);
2088 	return 0;
2089 }
2090 
2091 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2092 			     unsigned int keylen)
2093 {
2094 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2095 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2096 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2097 	unsigned int i, err = 0, updated_digestsize;
2098 
2099 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2100 
2101 	/* use the key to calculate the ipad and opad. ipad will sent with the
2102 	 * first request's data. opad will be sent with the final hash result
2103 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2104 	 */
2105 	shash->tfm = hmacctx->base_hash;
2106 	if (keylen > bs) {
2107 		err = crypto_shash_digest(shash, key, keylen,
2108 					  hmacctx->ipad);
2109 		if (err)
2110 			goto out;
2111 		keylen = digestsize;
2112 	} else {
2113 		memcpy(hmacctx->ipad, key, keylen);
2114 	}
2115 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2116 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2117 
2118 	for (i = 0; i < bs / sizeof(int); i++) {
2119 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2120 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2121 	}
2122 
2123 	updated_digestsize = digestsize;
2124 	if (digestsize == SHA224_DIGEST_SIZE)
2125 		updated_digestsize = SHA256_DIGEST_SIZE;
2126 	else if (digestsize == SHA384_DIGEST_SIZE)
2127 		updated_digestsize = SHA512_DIGEST_SIZE;
2128 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2129 					hmacctx->ipad, digestsize);
2130 	if (err)
2131 		goto out;
2132 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2133 
2134 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2135 					hmacctx->opad, digestsize);
2136 	if (err)
2137 		goto out;
2138 	chcr_change_order(hmacctx->opad, updated_digestsize);
2139 out:
2140 	return err;
2141 }
2142 
2143 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2144 			       unsigned int key_len)
2145 {
2146 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2147 	unsigned short context_size = 0;
2148 	int err;
2149 
2150 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2151 	if (err)
2152 		goto badkey_err;
2153 
2154 	memcpy(ablkctx->key, key, key_len);
2155 	ablkctx->enckey_len = key_len;
2156 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2157 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2158 	ablkctx->key_ctx_hdr =
2159 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2160 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2161 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2162 				 CHCR_KEYCTX_NO_KEY, 1,
2163 				 0, context_size);
2164 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2165 	return 0;
2166 badkey_err:
2167 	ablkctx->enckey_len = 0;
2168 
2169 	return err;
2170 }
2171 
2172 static int chcr_sha_init(struct ahash_request *areq)
2173 {
2174 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2175 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2176 	int digestsize =  crypto_ahash_digestsize(tfm);
2177 
2178 	req_ctx->data_len = 0;
2179 	req_ctx->reqlen = 0;
2180 	req_ctx->reqbfr = req_ctx->bfr1;
2181 	req_ctx->skbfr = req_ctx->bfr2;
2182 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2183 
2184 	return 0;
2185 }
2186 
2187 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2188 {
2189 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2190 				 sizeof(struct chcr_ahash_req_ctx));
2191 	return chcr_device_init(crypto_tfm_ctx(tfm));
2192 }
2193 
2194 static int chcr_hmac_init(struct ahash_request *areq)
2195 {
2196 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2197 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2198 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2199 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2200 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2201 
2202 	chcr_sha_init(areq);
2203 	req_ctx->data_len = bs;
2204 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2205 		if (digestsize == SHA224_DIGEST_SIZE)
2206 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2207 			       SHA256_DIGEST_SIZE);
2208 		else if (digestsize == SHA384_DIGEST_SIZE)
2209 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2210 			       SHA512_DIGEST_SIZE);
2211 		else
2212 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2213 			       digestsize);
2214 	}
2215 	return 0;
2216 }
2217 
2218 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2219 {
2220 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2221 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2222 	unsigned int digestsize =
2223 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2224 
2225 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2226 				 sizeof(struct chcr_ahash_req_ctx));
2227 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2228 	if (IS_ERR(hmacctx->base_hash))
2229 		return PTR_ERR(hmacctx->base_hash);
2230 	return chcr_device_init(crypto_tfm_ctx(tfm));
2231 }
2232 
2233 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2234 {
2235 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2236 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2237 
2238 	if (hmacctx->base_hash) {
2239 		chcr_free_shash(hmacctx->base_hash);
2240 		hmacctx->base_hash = NULL;
2241 	}
2242 }
2243 
2244 inline void chcr_aead_common_exit(struct aead_request *req)
2245 {
2246 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2247 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2248 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2249 
2250 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2251 }
2252 
2253 static int chcr_aead_common_init(struct aead_request *req)
2254 {
2255 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2256 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2257 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2258 	unsigned int authsize = crypto_aead_authsize(tfm);
2259 	int error = -EINVAL;
2260 
2261 	/* validate key size */
2262 	if (aeadctx->enckey_len == 0)
2263 		goto err;
2264 	if (reqctx->op && req->cryptlen < authsize)
2265 		goto err;
2266 	if (reqctx->b0_len)
2267 		reqctx->scratch_pad = reqctx->iv + IV;
2268 	else
2269 		reqctx->scratch_pad = NULL;
2270 
2271 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2272 				  reqctx->op);
2273 	if (error) {
2274 		error = -ENOMEM;
2275 		goto err;
2276 	}
2277 
2278 	return 0;
2279 err:
2280 	return error;
2281 }
2282 
2283 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2284 				   int aadmax, int wrlen,
2285 				   unsigned short op_type)
2286 {
2287 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2288 
2289 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2290 	    dst_nents > MAX_DSGL_ENT ||
2291 	    (req->assoclen > aadmax) ||
2292 	    (wrlen > SGE_MAX_WR_LEN))
2293 		return 1;
2294 	return 0;
2295 }
2296 
2297 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2298 {
2299 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2300 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2301 	struct aead_request *subreq = aead_request_ctx(req);
2302 
2303 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2304 	aead_request_set_callback(subreq, req->base.flags,
2305 				  req->base.complete, req->base.data);
2306 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2307 				 req->iv);
2308 	aead_request_set_ad(subreq, req->assoclen);
2309 	return op_type ? crypto_aead_decrypt(subreq) :
2310 		crypto_aead_encrypt(subreq);
2311 }
2312 
2313 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2314 					 unsigned short qid,
2315 					 int size)
2316 {
2317 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2318 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2319 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2320 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2321 	struct sk_buff *skb = NULL;
2322 	struct chcr_wr *chcr_req;
2323 	struct cpl_rx_phys_dsgl *phys_cpl;
2324 	struct ulptx_sgl *ulptx;
2325 	unsigned int transhdr_len;
2326 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2327 	unsigned int   kctx_len = 0, dnents, snents;
2328 	unsigned int  authsize = crypto_aead_authsize(tfm);
2329 	int error = -EINVAL;
2330 	u8 *ivptr;
2331 	int null = 0;
2332 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2333 		GFP_ATOMIC;
2334 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2335 
2336 	if (req->cryptlen == 0)
2337 		return NULL;
2338 
2339 	reqctx->b0_len = 0;
2340 	error = chcr_aead_common_init(req);
2341 	if (error)
2342 		return ERR_PTR(error);
2343 
2344 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2345 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2346 		null = 1;
2347 	}
2348 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2349 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2350 	dnents += MIN_AUTH_SG; // For IV
2351 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2352 			       CHCR_SRC_SG_SIZE, 0);
2353 	dst_size = get_space_for_phys_dsgl(dnents);
2354 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2355 		- sizeof(chcr_req->key_ctx);
2356 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2357 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2358 			SGE_MAX_WR_LEN;
2359 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2360 			: (sgl_len(snents) * 8);
2361 	transhdr_len += temp;
2362 	transhdr_len = roundup(transhdr_len, 16);
2363 
2364 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2365 				    transhdr_len, reqctx->op)) {
2366 		atomic_inc(&adap->chcr_stats.fallback);
2367 		chcr_aead_common_exit(req);
2368 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2369 	}
2370 	skb = alloc_skb(transhdr_len, flags);
2371 	if (!skb) {
2372 		error = -ENOMEM;
2373 		goto err;
2374 	}
2375 
2376 	chcr_req = __skb_put_zero(skb, transhdr_len);
2377 
2378 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2379 
2380 	/*
2381 	 * Input order	is AAD,IV and Payload. where IV should be included as
2382 	 * the part of authdata. All other fields should be filled according
2383 	 * to the hardware spec
2384 	 */
2385 	chcr_req->sec_cpl.op_ivinsrtofst =
2386 		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2387 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2388 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2389 					null ? 0 : 1 + IV,
2390 					null ? 0 : IV + req->assoclen,
2391 					req->assoclen + IV + 1,
2392 					(temp & 0x1F0) >> 4);
2393 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2394 					temp & 0xF,
2395 					null ? 0 : req->assoclen + IV + 1,
2396 					temp, temp);
2397 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2398 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2399 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2400 	else
2401 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2402 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2403 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2404 					temp,
2405 					actx->auth_mode, aeadctx->hmac_ctrl,
2406 					IV >> 1);
2407 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2408 					 0, 0, dst_size);
2409 
2410 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2411 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2412 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2413 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2414 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2415 		       aeadctx->enckey_len);
2416 	else
2417 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2418 		       aeadctx->enckey_len);
2419 
2420 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2421 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2422 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2423 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2424 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2425 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2426 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2427 		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2428 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2429 				CTR_RFC3686_IV_SIZE);
2430 		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2431 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2432 	} else {
2433 		memcpy(ivptr, req->iv, IV);
2434 	}
2435 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2436 	chcr_add_aead_src_ent(req, ulptx);
2437 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2438 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2439 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2440 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2441 		   transhdr_len, temp, 0);
2442 	reqctx->skb = skb;
2443 
2444 	return skb;
2445 err:
2446 	chcr_aead_common_exit(req);
2447 
2448 	return ERR_PTR(error);
2449 }
2450 
2451 int chcr_aead_dma_map(struct device *dev,
2452 		      struct aead_request *req,
2453 		      unsigned short op_type)
2454 {
2455 	int error;
2456 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2457 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2458 	unsigned int authsize = crypto_aead_authsize(tfm);
2459 	int dst_size;
2460 
2461 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2462 				-authsize : authsize);
2463 	if (!req->cryptlen || !dst_size)
2464 		return 0;
2465 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2466 					DMA_BIDIRECTIONAL);
2467 	if (dma_mapping_error(dev, reqctx->iv_dma))
2468 		return -ENOMEM;
2469 	if (reqctx->b0_len)
2470 		reqctx->b0_dma = reqctx->iv_dma + IV;
2471 	else
2472 		reqctx->b0_dma = 0;
2473 	if (req->src == req->dst) {
2474 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2475 				   DMA_BIDIRECTIONAL);
2476 		if (!error)
2477 			goto err;
2478 	} else {
2479 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2480 				   DMA_TO_DEVICE);
2481 		if (!error)
2482 			goto err;
2483 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2484 				   DMA_FROM_DEVICE);
2485 		if (!error) {
2486 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2487 				   DMA_TO_DEVICE);
2488 			goto err;
2489 		}
2490 	}
2491 
2492 	return 0;
2493 err:
2494 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2495 	return -ENOMEM;
2496 }
2497 
2498 void chcr_aead_dma_unmap(struct device *dev,
2499 			 struct aead_request *req,
2500 			 unsigned short op_type)
2501 {
2502 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2503 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2504 	unsigned int authsize = crypto_aead_authsize(tfm);
2505 	int dst_size;
2506 
2507 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2508 					-authsize : authsize);
2509 	if (!req->cryptlen || !dst_size)
2510 		return;
2511 
2512 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2513 					DMA_BIDIRECTIONAL);
2514 	if (req->src == req->dst) {
2515 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2516 				   DMA_BIDIRECTIONAL);
2517 	} else {
2518 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2519 				   DMA_TO_DEVICE);
2520 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2521 				   DMA_FROM_DEVICE);
2522 	}
2523 }
2524 
2525 void chcr_add_aead_src_ent(struct aead_request *req,
2526 			   struct ulptx_sgl *ulptx)
2527 {
2528 	struct ulptx_walk ulp_walk;
2529 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2530 
2531 	if (reqctx->imm) {
2532 		u8 *buf = (u8 *)ulptx;
2533 
2534 		if (reqctx->b0_len) {
2535 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2536 			buf += reqctx->b0_len;
2537 		}
2538 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2539 				   buf, req->cryptlen + req->assoclen, 0);
2540 	} else {
2541 		ulptx_walk_init(&ulp_walk, ulptx);
2542 		if (reqctx->b0_len)
2543 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2544 					    reqctx->b0_dma);
2545 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2546 				  req->assoclen,  0);
2547 		ulptx_walk_end(&ulp_walk);
2548 	}
2549 }
2550 
2551 void chcr_add_aead_dst_ent(struct aead_request *req,
2552 			   struct cpl_rx_phys_dsgl *phys_cpl,
2553 			   unsigned short qid)
2554 {
2555 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2556 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2557 	struct dsgl_walk dsgl_walk;
2558 	unsigned int authsize = crypto_aead_authsize(tfm);
2559 	struct chcr_context *ctx = a_ctx(tfm);
2560 	u32 temp;
2561 
2562 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2563 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2564 	temp = req->assoclen + req->cryptlen +
2565 		(reqctx->op ? -authsize : authsize);
2566 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2567 	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2568 }
2569 
2570 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2571 			     void *ulptx,
2572 			     struct  cipher_wr_param *wrparam)
2573 {
2574 	struct ulptx_walk ulp_walk;
2575 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2576 	u8 *buf = ulptx;
2577 
2578 	memcpy(buf, reqctx->iv, IV);
2579 	buf += IV;
2580 	if (reqctx->imm) {
2581 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2582 				   buf, wrparam->bytes, reqctx->processed);
2583 	} else {
2584 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2585 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2586 				  reqctx->src_ofst);
2587 		reqctx->srcsg = ulp_walk.last_sg;
2588 		reqctx->src_ofst = ulp_walk.last_sg_len;
2589 		ulptx_walk_end(&ulp_walk);
2590 	}
2591 }
2592 
2593 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2594 			     struct cpl_rx_phys_dsgl *phys_cpl,
2595 			     struct  cipher_wr_param *wrparam,
2596 			     unsigned short qid)
2597 {
2598 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2599 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2600 	struct chcr_context *ctx = c_ctx(tfm);
2601 	struct dsgl_walk dsgl_walk;
2602 
2603 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2604 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2605 			 reqctx->dst_ofst);
2606 	reqctx->dstsg = dsgl_walk.last_sg;
2607 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2608 
2609 	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2610 }
2611 
2612 void chcr_add_hash_src_ent(struct ahash_request *req,
2613 			   struct ulptx_sgl *ulptx,
2614 			   struct hash_wr_param *param)
2615 {
2616 	struct ulptx_walk ulp_walk;
2617 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2618 
2619 	if (reqctx->hctx_wr.imm) {
2620 		u8 *buf = (u8 *)ulptx;
2621 
2622 		if (param->bfr_len) {
2623 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2624 			buf += param->bfr_len;
2625 		}
2626 
2627 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2628 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2629 				   param->sg_len, 0);
2630 	} else {
2631 		ulptx_walk_init(&ulp_walk, ulptx);
2632 		if (param->bfr_len)
2633 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2634 					    reqctx->hctx_wr.dma_addr);
2635 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2636 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2637 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2638 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2639 		ulptx_walk_end(&ulp_walk);
2640 	}
2641 }
2642 
2643 int chcr_hash_dma_map(struct device *dev,
2644 		      struct ahash_request *req)
2645 {
2646 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2647 	int error = 0;
2648 
2649 	if (!req->nbytes)
2650 		return 0;
2651 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2652 			   DMA_TO_DEVICE);
2653 	if (!error)
2654 		return -ENOMEM;
2655 	req_ctx->hctx_wr.is_sg_map = 1;
2656 	return 0;
2657 }
2658 
2659 void chcr_hash_dma_unmap(struct device *dev,
2660 			 struct ahash_request *req)
2661 {
2662 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2663 
2664 	if (!req->nbytes)
2665 		return;
2666 
2667 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2668 			   DMA_TO_DEVICE);
2669 	req_ctx->hctx_wr.is_sg_map = 0;
2670 
2671 }
2672 
2673 int chcr_cipher_dma_map(struct device *dev,
2674 			struct skcipher_request *req)
2675 {
2676 	int error;
2677 
2678 	if (req->src == req->dst) {
2679 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2680 				   DMA_BIDIRECTIONAL);
2681 		if (!error)
2682 			goto err;
2683 	} else {
2684 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2685 				   DMA_TO_DEVICE);
2686 		if (!error)
2687 			goto err;
2688 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2689 				   DMA_FROM_DEVICE);
2690 		if (!error) {
2691 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2692 				   DMA_TO_DEVICE);
2693 			goto err;
2694 		}
2695 	}
2696 
2697 	return 0;
2698 err:
2699 	return -ENOMEM;
2700 }
2701 
2702 void chcr_cipher_dma_unmap(struct device *dev,
2703 			   struct skcipher_request *req)
2704 {
2705 	if (req->src == req->dst) {
2706 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2707 				   DMA_BIDIRECTIONAL);
2708 	} else {
2709 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2710 				   DMA_TO_DEVICE);
2711 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2712 				   DMA_FROM_DEVICE);
2713 	}
2714 }
2715 
2716 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2717 {
2718 	__be32 data;
2719 
2720 	memset(block, 0, csize);
2721 	block += csize;
2722 
2723 	if (csize >= 4)
2724 		csize = 4;
2725 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2726 		return -EOVERFLOW;
2727 
2728 	data = cpu_to_be32(msglen);
2729 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2730 
2731 	return 0;
2732 }
2733 
2734 static int generate_b0(struct aead_request *req, u8 *ivptr,
2735 			unsigned short op_type)
2736 {
2737 	unsigned int l, lp, m;
2738 	int rc;
2739 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2740 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2741 	u8 *b0 = reqctx->scratch_pad;
2742 
2743 	m = crypto_aead_authsize(aead);
2744 
2745 	memcpy(b0, ivptr, 16);
2746 
2747 	lp = b0[0];
2748 	l = lp + 1;
2749 
2750 	/* set m, bits 3-5 */
2751 	*b0 |= (8 * ((m - 2) / 2));
2752 
2753 	/* set adata, bit 6, if associated data is used */
2754 	if (req->assoclen)
2755 		*b0 |= 64;
2756 	rc = set_msg_len(b0 + 16 - l,
2757 			 (op_type == CHCR_DECRYPT_OP) ?
2758 			 req->cryptlen - m : req->cryptlen, l);
2759 
2760 	return rc;
2761 }
2762 
2763 static inline int crypto_ccm_check_iv(const u8 *iv)
2764 {
2765 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2766 	if (iv[0] < 1 || iv[0] > 7)
2767 		return -EINVAL;
2768 
2769 	return 0;
2770 }
2771 
2772 static int ccm_format_packet(struct aead_request *req,
2773 			     u8 *ivptr,
2774 			     unsigned int sub_type,
2775 			     unsigned short op_type,
2776 			     unsigned int assoclen)
2777 {
2778 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2779 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2780 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2781 	int rc = 0;
2782 
2783 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2784 		ivptr[0] = 3;
2785 		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2786 		memcpy(ivptr + 4, req->iv, 8);
2787 		memset(ivptr + 12, 0, 4);
2788 	} else {
2789 		memcpy(ivptr, req->iv, 16);
2790 	}
2791 	if (assoclen)
2792 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2793 				htons(assoclen);
2794 
2795 	rc = generate_b0(req, ivptr, op_type);
2796 	/* zero the ctr value */
2797 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2798 	return rc;
2799 }
2800 
2801 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2802 				  unsigned int dst_size,
2803 				  struct aead_request *req,
2804 				  unsigned short op_type)
2805 {
2806 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2807 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2808 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2809 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2810 	unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2811 	unsigned int ccm_xtra;
2812 	unsigned char tag_offset = 0, auth_offset = 0;
2813 	unsigned int assoclen;
2814 
2815 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2816 		assoclen = req->assoclen - 8;
2817 	else
2818 		assoclen = req->assoclen;
2819 	ccm_xtra = CCM_B0_SIZE +
2820 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2821 
2822 	auth_offset = req->cryptlen ?
2823 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2824 	if (op_type == CHCR_DECRYPT_OP) {
2825 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2826 			tag_offset = crypto_aead_authsize(tfm);
2827 		else
2828 			auth_offset = 0;
2829 	}
2830 
2831 
2832 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2833 					 2, 1);
2834 	sec_cpl->pldlen =
2835 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2836 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2837 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2838 				1 + IV,	IV + assoclen + ccm_xtra,
2839 				req->assoclen + IV + 1 + ccm_xtra, 0);
2840 
2841 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2842 					auth_offset, tag_offset,
2843 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2844 					crypto_aead_authsize(tfm));
2845 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2846 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2847 					cipher_mode, mac_mode,
2848 					aeadctx->hmac_ctrl, IV >> 1);
2849 
2850 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2851 					0, dst_size);
2852 }
2853 
2854 static int aead_ccm_validate_input(unsigned short op_type,
2855 				   struct aead_request *req,
2856 				   struct chcr_aead_ctx *aeadctx,
2857 				   unsigned int sub_type)
2858 {
2859 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2860 		if (crypto_ccm_check_iv(req->iv)) {
2861 			pr_err("CCM: IV check fails\n");
2862 			return -EINVAL;
2863 		}
2864 	} else {
2865 		if (req->assoclen != 16 && req->assoclen != 20) {
2866 			pr_err("RFC4309: Invalid AAD length %d\n",
2867 			       req->assoclen);
2868 			return -EINVAL;
2869 		}
2870 	}
2871 	return 0;
2872 }
2873 
2874 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2875 					  unsigned short qid,
2876 					  int size)
2877 {
2878 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2879 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2880 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2881 	struct sk_buff *skb = NULL;
2882 	struct chcr_wr *chcr_req;
2883 	struct cpl_rx_phys_dsgl *phys_cpl;
2884 	struct ulptx_sgl *ulptx;
2885 	unsigned int transhdr_len;
2886 	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2887 	unsigned int sub_type, assoclen = req->assoclen;
2888 	unsigned int authsize = crypto_aead_authsize(tfm);
2889 	int error = -EINVAL;
2890 	u8 *ivptr;
2891 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2892 		GFP_ATOMIC;
2893 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2894 
2895 	sub_type = get_aead_subtype(tfm);
2896 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2897 		assoclen -= 8;
2898 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2899 	error = chcr_aead_common_init(req);
2900 	if (error)
2901 		return ERR_PTR(error);
2902 
2903 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2904 	if (error)
2905 		goto err;
2906 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2907 			+ (reqctx->op ? -authsize : authsize),
2908 			CHCR_DST_SG_SIZE, 0);
2909 	dnents += MIN_CCM_SG; // For IV and B0
2910 	dst_size = get_space_for_phys_dsgl(dnents);
2911 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2912 			       CHCR_SRC_SG_SIZE, 0);
2913 	snents += MIN_CCM_SG; //For B0
2914 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2915 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2916 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2917 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
2918 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2919 				     reqctx->b0_len, 16) :
2920 		(sgl_len(snents) *  8);
2921 	transhdr_len += temp;
2922 	transhdr_len = roundup(transhdr_len, 16);
2923 
2924 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2925 				reqctx->b0_len, transhdr_len, reqctx->op)) {
2926 		atomic_inc(&adap->chcr_stats.fallback);
2927 		chcr_aead_common_exit(req);
2928 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2929 	}
2930 	skb = alloc_skb(transhdr_len,  flags);
2931 
2932 	if (!skb) {
2933 		error = -ENOMEM;
2934 		goto err;
2935 	}
2936 
2937 	chcr_req = __skb_put_zero(skb, transhdr_len);
2938 
2939 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2940 
2941 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2942 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2943 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2944 			aeadctx->key, aeadctx->enckey_len);
2945 
2946 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2947 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2948 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2949 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2950 	if (error)
2951 		goto dstmap_fail;
2952 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2953 	chcr_add_aead_src_ent(req, ulptx);
2954 
2955 	atomic_inc(&adap->chcr_stats.aead_rqst);
2956 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2957 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2958 		reqctx->b0_len) : 0);
2959 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2960 		    transhdr_len, temp, 0);
2961 	reqctx->skb = skb;
2962 
2963 	return skb;
2964 dstmap_fail:
2965 	kfree_skb(skb);
2966 err:
2967 	chcr_aead_common_exit(req);
2968 	return ERR_PTR(error);
2969 }
2970 
2971 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2972 				     unsigned short qid,
2973 				     int size)
2974 {
2975 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2976 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2977 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2978 	struct sk_buff *skb = NULL;
2979 	struct chcr_wr *chcr_req;
2980 	struct cpl_rx_phys_dsgl *phys_cpl;
2981 	struct ulptx_sgl *ulptx;
2982 	unsigned int transhdr_len, dnents = 0, snents;
2983 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2984 	unsigned int authsize = crypto_aead_authsize(tfm);
2985 	int error = -EINVAL;
2986 	u8 *ivptr;
2987 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2988 		GFP_ATOMIC;
2989 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2990 
2991 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2992 		assoclen = req->assoclen - 8;
2993 
2994 	reqctx->b0_len = 0;
2995 	error = chcr_aead_common_init(req);
2996 	if (error)
2997 		return ERR_PTR(error);
2998 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2999 				(reqctx->op ? -authsize : authsize),
3000 				CHCR_DST_SG_SIZE, 0);
3001 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3002 			       CHCR_SRC_SG_SIZE, 0);
3003 	dnents += MIN_GCM_SG; // For IV
3004 	dst_size = get_space_for_phys_dsgl(dnents);
3005 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3006 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3007 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3008 			SGE_MAX_WR_LEN;
3009 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3010 		(sgl_len(snents) * 8);
3011 	transhdr_len += temp;
3012 	transhdr_len = roundup(transhdr_len, 16);
3013 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3014 			    transhdr_len, reqctx->op)) {
3015 
3016 		atomic_inc(&adap->chcr_stats.fallback);
3017 		chcr_aead_common_exit(req);
3018 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3019 	}
3020 	skb = alloc_skb(transhdr_len, flags);
3021 	if (!skb) {
3022 		error = -ENOMEM;
3023 		goto err;
3024 	}
3025 
3026 	chcr_req = __skb_put_zero(skb, transhdr_len);
3027 
3028 	//Offset of tag from end
3029 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3030 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3031 					a_ctx(tfm)->tx_chan_id, 2, 1);
3032 	chcr_req->sec_cpl.pldlen =
3033 		htonl(req->assoclen + IV + req->cryptlen);
3034 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3035 					assoclen ? 1 + IV : 0,
3036 					assoclen ? IV + assoclen : 0,
3037 					req->assoclen + IV + 1, 0);
3038 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3039 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3040 						temp, temp);
3041 	chcr_req->sec_cpl.seqno_numivs =
3042 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3043 					CHCR_ENCRYPT_OP) ? 1 : 0,
3044 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3045 					CHCR_SCMD_AUTH_MODE_GHASH,
3046 					aeadctx->hmac_ctrl, IV >> 1);
3047 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3048 					0, 0, dst_size);
3049 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3050 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3051 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3052 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3053 
3054 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3055 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3056 	/* prepare a 16 byte iv */
3057 	/* S   A   L  T |  IV | 0x00000001 */
3058 	if (get_aead_subtype(tfm) ==
3059 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3060 		memcpy(ivptr, aeadctx->salt, 4);
3061 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3062 	} else {
3063 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3064 	}
3065 	*((unsigned int *)(ivptr + 12)) = htonl(0x01);
3066 
3067 	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3068 
3069 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3070 	chcr_add_aead_src_ent(req, ulptx);
3071 	atomic_inc(&adap->chcr_stats.aead_rqst);
3072 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3073 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3074 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3075 		    transhdr_len, temp, reqctx->verify);
3076 	reqctx->skb = skb;
3077 	return skb;
3078 
3079 err:
3080 	chcr_aead_common_exit(req);
3081 	return ERR_PTR(error);
3082 }
3083 
3084 
3085 
3086 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3087 {
3088 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3089 	struct aead_alg *alg = crypto_aead_alg(tfm);
3090 
3091 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3092 					       CRYPTO_ALG_NEED_FALLBACK |
3093 					       CRYPTO_ALG_ASYNC);
3094 	if  (IS_ERR(aeadctx->sw_cipher))
3095 		return PTR_ERR(aeadctx->sw_cipher);
3096 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3097 				 sizeof(struct aead_request) +
3098 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3099 	return chcr_device_init(a_ctx(tfm));
3100 }
3101 
3102 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3103 {
3104 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3105 
3106 	crypto_free_aead(aeadctx->sw_cipher);
3107 }
3108 
3109 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3110 					unsigned int authsize)
3111 {
3112 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3113 
3114 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3115 	aeadctx->mayverify = VERIFY_HW;
3116 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3117 }
3118 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3119 				    unsigned int authsize)
3120 {
3121 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3122 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3123 
3124 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3125 	 * true for sha1. authsize == 12 condition should be before
3126 	 * authsize == (maxauth >> 1)
3127 	 */
3128 	if (authsize == ICV_4) {
3129 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3130 		aeadctx->mayverify = VERIFY_HW;
3131 	} else if (authsize == ICV_6) {
3132 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3133 		aeadctx->mayverify = VERIFY_HW;
3134 	} else if (authsize == ICV_10) {
3135 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3136 		aeadctx->mayverify = VERIFY_HW;
3137 	} else if (authsize == ICV_12) {
3138 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3139 		aeadctx->mayverify = VERIFY_HW;
3140 	} else if (authsize == ICV_14) {
3141 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3142 		aeadctx->mayverify = VERIFY_HW;
3143 	} else if (authsize == (maxauth >> 1)) {
3144 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3145 		aeadctx->mayverify = VERIFY_HW;
3146 	} else if (authsize == maxauth) {
3147 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3148 		aeadctx->mayverify = VERIFY_HW;
3149 	} else {
3150 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3151 		aeadctx->mayverify = VERIFY_SW;
3152 	}
3153 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3154 }
3155 
3156 
3157 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3158 {
3159 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3160 
3161 	switch (authsize) {
3162 	case ICV_4:
3163 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3164 		aeadctx->mayverify = VERIFY_HW;
3165 		break;
3166 	case ICV_8:
3167 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3168 		aeadctx->mayverify = VERIFY_HW;
3169 		break;
3170 	case ICV_12:
3171 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3172 		aeadctx->mayverify = VERIFY_HW;
3173 		break;
3174 	case ICV_14:
3175 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3176 		aeadctx->mayverify = VERIFY_HW;
3177 		break;
3178 	case ICV_16:
3179 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3180 		aeadctx->mayverify = VERIFY_HW;
3181 		break;
3182 	case ICV_13:
3183 	case ICV_15:
3184 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3185 		aeadctx->mayverify = VERIFY_SW;
3186 		break;
3187 	default:
3188 		return -EINVAL;
3189 	}
3190 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3191 }
3192 
3193 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3194 					  unsigned int authsize)
3195 {
3196 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3197 
3198 	switch (authsize) {
3199 	case ICV_8:
3200 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3201 		aeadctx->mayverify = VERIFY_HW;
3202 		break;
3203 	case ICV_12:
3204 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3205 		aeadctx->mayverify = VERIFY_HW;
3206 		break;
3207 	case ICV_16:
3208 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3209 		aeadctx->mayverify = VERIFY_HW;
3210 		break;
3211 	default:
3212 		return -EINVAL;
3213 	}
3214 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3215 }
3216 
3217 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3218 				unsigned int authsize)
3219 {
3220 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3221 
3222 	switch (authsize) {
3223 	case ICV_4:
3224 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3225 		aeadctx->mayverify = VERIFY_HW;
3226 		break;
3227 	case ICV_6:
3228 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3229 		aeadctx->mayverify = VERIFY_HW;
3230 		break;
3231 	case ICV_8:
3232 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3233 		aeadctx->mayverify = VERIFY_HW;
3234 		break;
3235 	case ICV_10:
3236 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3237 		aeadctx->mayverify = VERIFY_HW;
3238 		break;
3239 	case ICV_12:
3240 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3241 		aeadctx->mayverify = VERIFY_HW;
3242 		break;
3243 	case ICV_14:
3244 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3245 		aeadctx->mayverify = VERIFY_HW;
3246 		break;
3247 	case ICV_16:
3248 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3249 		aeadctx->mayverify = VERIFY_HW;
3250 		break;
3251 	default:
3252 		return -EINVAL;
3253 	}
3254 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3255 }
3256 
3257 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3258 				const u8 *key,
3259 				unsigned int keylen)
3260 {
3261 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3262 	unsigned char ck_size, mk_size;
3263 	int key_ctx_size = 0;
3264 
3265 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3266 	if (keylen == AES_KEYSIZE_128) {
3267 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3268 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3269 	} else if (keylen == AES_KEYSIZE_192) {
3270 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3271 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3272 	} else if (keylen == AES_KEYSIZE_256) {
3273 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3274 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3275 	} else {
3276 		aeadctx->enckey_len = 0;
3277 		return	-EINVAL;
3278 	}
3279 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3280 						key_ctx_size >> 4);
3281 	memcpy(aeadctx->key, key, keylen);
3282 	aeadctx->enckey_len = keylen;
3283 
3284 	return 0;
3285 }
3286 
3287 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3288 				const u8 *key,
3289 				unsigned int keylen)
3290 {
3291 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3292 	int error;
3293 
3294 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3295 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3296 			      CRYPTO_TFM_REQ_MASK);
3297 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3298 	if (error)
3299 		return error;
3300 	return chcr_ccm_common_setkey(aead, key, keylen);
3301 }
3302 
3303 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3304 				    unsigned int keylen)
3305 {
3306 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3307 	int error;
3308 
3309 	if (keylen < 3) {
3310 		aeadctx->enckey_len = 0;
3311 		return	-EINVAL;
3312 	}
3313 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3314 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3315 			      CRYPTO_TFM_REQ_MASK);
3316 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3317 	if (error)
3318 		return error;
3319 	keylen -= 3;
3320 	memcpy(aeadctx->salt, key + keylen, 3);
3321 	return chcr_ccm_common_setkey(aead, key, keylen);
3322 }
3323 
3324 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3325 			   unsigned int keylen)
3326 {
3327 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3328 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3329 	unsigned int ck_size;
3330 	int ret = 0, key_ctx_size = 0;
3331 	struct crypto_aes_ctx aes;
3332 
3333 	aeadctx->enckey_len = 0;
3334 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3335 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3336 			      & CRYPTO_TFM_REQ_MASK);
3337 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3338 	if (ret)
3339 		goto out;
3340 
3341 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3342 	    keylen > 3) {
3343 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3344 		memcpy(aeadctx->salt, key + keylen, 4);
3345 	}
3346 	if (keylen == AES_KEYSIZE_128) {
3347 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3348 	} else if (keylen == AES_KEYSIZE_192) {
3349 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3350 	} else if (keylen == AES_KEYSIZE_256) {
3351 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3352 	} else {
3353 		pr_err("GCM: Invalid key length %d\n", keylen);
3354 		ret = -EINVAL;
3355 		goto out;
3356 	}
3357 
3358 	memcpy(aeadctx->key, key, keylen);
3359 	aeadctx->enckey_len = keylen;
3360 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3361 		AEAD_H_SIZE;
3362 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3363 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3364 						0, 0,
3365 						key_ctx_size >> 4);
3366 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3367 	 * It will go in key context
3368 	 */
3369 	ret = aes_expandkey(&aes, key, keylen);
3370 	if (ret) {
3371 		aeadctx->enckey_len = 0;
3372 		goto out;
3373 	}
3374 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3375 	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3376 	memzero_explicit(&aes, sizeof(aes));
3377 
3378 out:
3379 	return ret;
3380 }
3381 
3382 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3383 				   unsigned int keylen)
3384 {
3385 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3386 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3387 	/* it contains auth and cipher key both*/
3388 	struct crypto_authenc_keys keys;
3389 	unsigned int bs, subtype;
3390 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3391 	int err = 0, i, key_ctx_len = 0;
3392 	unsigned char ck_size = 0;
3393 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3394 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3395 	struct algo_param param;
3396 	int align;
3397 	u8 *o_ptr = NULL;
3398 
3399 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3400 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3401 			      & CRYPTO_TFM_REQ_MASK);
3402 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3403 	if (err)
3404 		goto out;
3405 
3406 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3407 		goto out;
3408 
3409 	if (get_alg_config(&param, max_authsize)) {
3410 		pr_err("chcr : Unsupported digest size\n");
3411 		goto out;
3412 	}
3413 	subtype = get_aead_subtype(authenc);
3414 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3415 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3416 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3417 			goto out;
3418 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3419 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3420 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3421 	}
3422 	if (keys.enckeylen == AES_KEYSIZE_128) {
3423 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3424 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3425 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3426 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3427 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3428 	} else {
3429 		pr_err("chcr : Unsupported cipher key\n");
3430 		goto out;
3431 	}
3432 
3433 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3434 	 * h(opad) so authkey is not needed again. authkeylen size have the
3435 	 * size of the hash digest size.
3436 	 */
3437 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3438 	aeadctx->enckey_len = keys.enckeylen;
3439 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3440 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3441 
3442 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3443 			    aeadctx->enckey_len << 3);
3444 	}
3445 	base_hash  = chcr_alloc_shash(max_authsize);
3446 	if (IS_ERR(base_hash)) {
3447 		pr_err("chcr : Base driver cannot be loaded\n");
3448 		aeadctx->enckey_len = 0;
3449 		memzero_explicit(&keys, sizeof(keys));
3450 		return -EINVAL;
3451 	}
3452 	{
3453 		SHASH_DESC_ON_STACK(shash, base_hash);
3454 
3455 		shash->tfm = base_hash;
3456 		bs = crypto_shash_blocksize(base_hash);
3457 		align = KEYCTX_ALIGN_PAD(max_authsize);
3458 		o_ptr =  actx->h_iopad + param.result_size + align;
3459 
3460 		if (keys.authkeylen > bs) {
3461 			err = crypto_shash_digest(shash, keys.authkey,
3462 						  keys.authkeylen,
3463 						  o_ptr);
3464 			if (err) {
3465 				pr_err("chcr : Base driver cannot be loaded\n");
3466 				goto out;
3467 			}
3468 			keys.authkeylen = max_authsize;
3469 		} else
3470 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3471 
3472 		/* Compute the ipad-digest*/
3473 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3474 		memcpy(pad, o_ptr, keys.authkeylen);
3475 		for (i = 0; i < bs >> 2; i++)
3476 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3477 
3478 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3479 					      max_authsize))
3480 			goto out;
3481 		/* Compute the opad-digest */
3482 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3483 		memcpy(pad, o_ptr, keys.authkeylen);
3484 		for (i = 0; i < bs >> 2; i++)
3485 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3486 
3487 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3488 			goto out;
3489 
3490 		/* convert the ipad and opad digest to network order */
3491 		chcr_change_order(actx->h_iopad, param.result_size);
3492 		chcr_change_order(o_ptr, param.result_size);
3493 		key_ctx_len = sizeof(struct _key_ctx) +
3494 			roundup(keys.enckeylen, 16) +
3495 			(param.result_size + align) * 2;
3496 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3497 						0, 1, key_ctx_len >> 4);
3498 		actx->auth_mode = param.auth_mode;
3499 		chcr_free_shash(base_hash);
3500 
3501 		memzero_explicit(&keys, sizeof(keys));
3502 		return 0;
3503 	}
3504 out:
3505 	aeadctx->enckey_len = 0;
3506 	memzero_explicit(&keys, sizeof(keys));
3507 	if (!IS_ERR(base_hash))
3508 		chcr_free_shash(base_hash);
3509 	return -EINVAL;
3510 }
3511 
3512 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3513 					const u8 *key, unsigned int keylen)
3514 {
3515 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3516 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3517 	struct crypto_authenc_keys keys;
3518 	int err;
3519 	/* it contains auth and cipher key both*/
3520 	unsigned int subtype;
3521 	int key_ctx_len = 0;
3522 	unsigned char ck_size = 0;
3523 
3524 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3525 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3526 			      & CRYPTO_TFM_REQ_MASK);
3527 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3528 	if (err)
3529 		goto out;
3530 
3531 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3532 		goto out;
3533 
3534 	subtype = get_aead_subtype(authenc);
3535 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3536 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3537 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3538 			goto out;
3539 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3540 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3541 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3542 	}
3543 	if (keys.enckeylen == AES_KEYSIZE_128) {
3544 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3545 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3546 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3547 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3548 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3549 	} else {
3550 		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3551 		goto out;
3552 	}
3553 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3554 	aeadctx->enckey_len = keys.enckeylen;
3555 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3556 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3557 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3558 				aeadctx->enckey_len << 3);
3559 	}
3560 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3561 
3562 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3563 						0, key_ctx_len >> 4);
3564 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3565 	memzero_explicit(&keys, sizeof(keys));
3566 	return 0;
3567 out:
3568 	aeadctx->enckey_len = 0;
3569 	memzero_explicit(&keys, sizeof(keys));
3570 	return -EINVAL;
3571 }
3572 
3573 static int chcr_aead_op(struct aead_request *req,
3574 			int size,
3575 			create_wr_t create_wr_fn)
3576 {
3577 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3578 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3579 	struct uld_ctx *u_ctx;
3580 	struct sk_buff *skb;
3581 	int isfull = 0;
3582 	struct chcr_dev *cdev;
3583 
3584 	cdev = a_ctx(tfm)->dev;
3585 	if (!cdev) {
3586 		pr_err("chcr : %s : No crypto device.\n", __func__);
3587 		return -ENXIO;
3588 	}
3589 
3590 	if (chcr_inc_wrcount(cdev)) {
3591 	/* Detach state for CHCR means lldi or padap is freed.
3592 	 * We cannot increment fallback here.
3593 	 */
3594 		return chcr_aead_fallback(req, reqctx->op);
3595 	}
3596 
3597 	u_ctx = ULD_CTX(a_ctx(tfm));
3598 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3599 				   a_ctx(tfm)->tx_qidx)) {
3600 		isfull = 1;
3601 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3602 			chcr_dec_wrcount(cdev);
3603 			return -ENOSPC;
3604 		}
3605 	}
3606 
3607 	/* Form a WR from req */
3608 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3609 
3610 	if (IS_ERR_OR_NULL(skb)) {
3611 		chcr_dec_wrcount(cdev);
3612 		return PTR_ERR_OR_ZERO(skb);
3613 	}
3614 
3615 	skb->dev = u_ctx->lldi.ports[0];
3616 	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3617 	chcr_send_wr(skb);
3618 	return isfull ? -EBUSY : -EINPROGRESS;
3619 }
3620 
3621 static int chcr_aead_encrypt(struct aead_request *req)
3622 {
3623 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3624 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3625 
3626 	reqctx->verify = VERIFY_HW;
3627 	reqctx->op = CHCR_ENCRYPT_OP;
3628 
3629 	switch (get_aead_subtype(tfm)) {
3630 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3631 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3632 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3633 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3634 		return chcr_aead_op(req, 0, create_authenc_wr);
3635 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3636 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3637 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3638 	default:
3639 		return chcr_aead_op(req, 0, create_gcm_wr);
3640 	}
3641 }
3642 
3643 static int chcr_aead_decrypt(struct aead_request *req)
3644 {
3645 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3646 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3647 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3648 	int size;
3649 
3650 	if (aeadctx->mayverify == VERIFY_SW) {
3651 		size = crypto_aead_maxauthsize(tfm);
3652 		reqctx->verify = VERIFY_SW;
3653 	} else {
3654 		size = 0;
3655 		reqctx->verify = VERIFY_HW;
3656 	}
3657 	reqctx->op = CHCR_DECRYPT_OP;
3658 	switch (get_aead_subtype(tfm)) {
3659 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3660 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3661 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3662 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3663 		return chcr_aead_op(req, size, create_authenc_wr);
3664 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3665 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3666 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3667 	default:
3668 		return chcr_aead_op(req, size, create_gcm_wr);
3669 	}
3670 }
3671 
3672 static struct chcr_alg_template driver_algs[] = {
3673 	/* AES-CBC */
3674 	{
3675 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3676 		.is_registered = 0,
3677 		.alg.skcipher = {
3678 			.base.cra_name		= "cbc(aes)",
3679 			.base.cra_driver_name	= "cbc-aes-chcr",
3680 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3681 
3682 			.init			= chcr_init_tfm,
3683 			.exit			= chcr_exit_tfm,
3684 			.min_keysize		= AES_MIN_KEY_SIZE,
3685 			.max_keysize		= AES_MAX_KEY_SIZE,
3686 			.ivsize			= AES_BLOCK_SIZE,
3687 			.setkey			= chcr_aes_cbc_setkey,
3688 			.encrypt		= chcr_aes_encrypt,
3689 			.decrypt		= chcr_aes_decrypt,
3690 			}
3691 	},
3692 	{
3693 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3694 		.is_registered = 0,
3695 		.alg.skcipher = {
3696 			.base.cra_name		= "xts(aes)",
3697 			.base.cra_driver_name	= "xts-aes-chcr",
3698 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3699 
3700 			.init			= chcr_init_tfm,
3701 			.exit			= chcr_exit_tfm,
3702 			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3703 			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3704 			.ivsize			= AES_BLOCK_SIZE,
3705 			.setkey			= chcr_aes_xts_setkey,
3706 			.encrypt		= chcr_aes_encrypt,
3707 			.decrypt		= chcr_aes_decrypt,
3708 			}
3709 	},
3710 	{
3711 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3712 		.is_registered = 0,
3713 		.alg.skcipher = {
3714 			.base.cra_name		= "ctr(aes)",
3715 			.base.cra_driver_name	= "ctr-aes-chcr",
3716 			.base.cra_blocksize	= 1,
3717 
3718 			.init			= chcr_init_tfm,
3719 			.exit			= chcr_exit_tfm,
3720 			.min_keysize		= AES_MIN_KEY_SIZE,
3721 			.max_keysize		= AES_MAX_KEY_SIZE,
3722 			.ivsize			= AES_BLOCK_SIZE,
3723 			.setkey			= chcr_aes_ctr_setkey,
3724 			.encrypt		= chcr_aes_encrypt,
3725 			.decrypt		= chcr_aes_decrypt,
3726 		}
3727 	},
3728 	{
3729 		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3730 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3731 		.is_registered = 0,
3732 		.alg.skcipher = {
3733 			.base.cra_name		= "rfc3686(ctr(aes))",
3734 			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3735 			.base.cra_blocksize	= 1,
3736 
3737 			.init			= chcr_rfc3686_init,
3738 			.exit			= chcr_exit_tfm,
3739 			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3740 			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3741 			.ivsize			= CTR_RFC3686_IV_SIZE,
3742 			.setkey			= chcr_aes_rfc3686_setkey,
3743 			.encrypt		= chcr_aes_encrypt,
3744 			.decrypt		= chcr_aes_decrypt,
3745 		}
3746 	},
3747 	/* SHA */
3748 	{
3749 		.type = CRYPTO_ALG_TYPE_AHASH,
3750 		.is_registered = 0,
3751 		.alg.hash = {
3752 			.halg.digestsize = SHA1_DIGEST_SIZE,
3753 			.halg.base = {
3754 				.cra_name = "sha1",
3755 				.cra_driver_name = "sha1-chcr",
3756 				.cra_blocksize = SHA1_BLOCK_SIZE,
3757 			}
3758 		}
3759 	},
3760 	{
3761 		.type = CRYPTO_ALG_TYPE_AHASH,
3762 		.is_registered = 0,
3763 		.alg.hash = {
3764 			.halg.digestsize = SHA256_DIGEST_SIZE,
3765 			.halg.base = {
3766 				.cra_name = "sha256",
3767 				.cra_driver_name = "sha256-chcr",
3768 				.cra_blocksize = SHA256_BLOCK_SIZE,
3769 			}
3770 		}
3771 	},
3772 	{
3773 		.type = CRYPTO_ALG_TYPE_AHASH,
3774 		.is_registered = 0,
3775 		.alg.hash = {
3776 			.halg.digestsize = SHA224_DIGEST_SIZE,
3777 			.halg.base = {
3778 				.cra_name = "sha224",
3779 				.cra_driver_name = "sha224-chcr",
3780 				.cra_blocksize = SHA224_BLOCK_SIZE,
3781 			}
3782 		}
3783 	},
3784 	{
3785 		.type = CRYPTO_ALG_TYPE_AHASH,
3786 		.is_registered = 0,
3787 		.alg.hash = {
3788 			.halg.digestsize = SHA384_DIGEST_SIZE,
3789 			.halg.base = {
3790 				.cra_name = "sha384",
3791 				.cra_driver_name = "sha384-chcr",
3792 				.cra_blocksize = SHA384_BLOCK_SIZE,
3793 			}
3794 		}
3795 	},
3796 	{
3797 		.type = CRYPTO_ALG_TYPE_AHASH,
3798 		.is_registered = 0,
3799 		.alg.hash = {
3800 			.halg.digestsize = SHA512_DIGEST_SIZE,
3801 			.halg.base = {
3802 				.cra_name = "sha512",
3803 				.cra_driver_name = "sha512-chcr",
3804 				.cra_blocksize = SHA512_BLOCK_SIZE,
3805 			}
3806 		}
3807 	},
3808 	/* HMAC */
3809 	{
3810 		.type = CRYPTO_ALG_TYPE_HMAC,
3811 		.is_registered = 0,
3812 		.alg.hash = {
3813 			.halg.digestsize = SHA1_DIGEST_SIZE,
3814 			.halg.base = {
3815 				.cra_name = "hmac(sha1)",
3816 				.cra_driver_name = "hmac-sha1-chcr",
3817 				.cra_blocksize = SHA1_BLOCK_SIZE,
3818 			}
3819 		}
3820 	},
3821 	{
3822 		.type = CRYPTO_ALG_TYPE_HMAC,
3823 		.is_registered = 0,
3824 		.alg.hash = {
3825 			.halg.digestsize = SHA224_DIGEST_SIZE,
3826 			.halg.base = {
3827 				.cra_name = "hmac(sha224)",
3828 				.cra_driver_name = "hmac-sha224-chcr",
3829 				.cra_blocksize = SHA224_BLOCK_SIZE,
3830 			}
3831 		}
3832 	},
3833 	{
3834 		.type = CRYPTO_ALG_TYPE_HMAC,
3835 		.is_registered = 0,
3836 		.alg.hash = {
3837 			.halg.digestsize = SHA256_DIGEST_SIZE,
3838 			.halg.base = {
3839 				.cra_name = "hmac(sha256)",
3840 				.cra_driver_name = "hmac-sha256-chcr",
3841 				.cra_blocksize = SHA256_BLOCK_SIZE,
3842 			}
3843 		}
3844 	},
3845 	{
3846 		.type = CRYPTO_ALG_TYPE_HMAC,
3847 		.is_registered = 0,
3848 		.alg.hash = {
3849 			.halg.digestsize = SHA384_DIGEST_SIZE,
3850 			.halg.base = {
3851 				.cra_name = "hmac(sha384)",
3852 				.cra_driver_name = "hmac-sha384-chcr",
3853 				.cra_blocksize = SHA384_BLOCK_SIZE,
3854 			}
3855 		}
3856 	},
3857 	{
3858 		.type = CRYPTO_ALG_TYPE_HMAC,
3859 		.is_registered = 0,
3860 		.alg.hash = {
3861 			.halg.digestsize = SHA512_DIGEST_SIZE,
3862 			.halg.base = {
3863 				.cra_name = "hmac(sha512)",
3864 				.cra_driver_name = "hmac-sha512-chcr",
3865 				.cra_blocksize = SHA512_BLOCK_SIZE,
3866 			}
3867 		}
3868 	},
3869 	/* Add AEAD Algorithms */
3870 	{
3871 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3872 		.is_registered = 0,
3873 		.alg.aead = {
3874 			.base = {
3875 				.cra_name = "gcm(aes)",
3876 				.cra_driver_name = "gcm-aes-chcr",
3877 				.cra_blocksize	= 1,
3878 				.cra_priority = CHCR_AEAD_PRIORITY,
3879 				.cra_ctxsize =	sizeof(struct chcr_context) +
3880 						sizeof(struct chcr_aead_ctx) +
3881 						sizeof(struct chcr_gcm_ctx),
3882 			},
3883 			.ivsize = GCM_AES_IV_SIZE,
3884 			.maxauthsize = GHASH_DIGEST_SIZE,
3885 			.setkey = chcr_gcm_setkey,
3886 			.setauthsize = chcr_gcm_setauthsize,
3887 		}
3888 	},
3889 	{
3890 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3891 		.is_registered = 0,
3892 		.alg.aead = {
3893 			.base = {
3894 				.cra_name = "rfc4106(gcm(aes))",
3895 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3896 				.cra_blocksize	 = 1,
3897 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3898 				.cra_ctxsize =	sizeof(struct chcr_context) +
3899 						sizeof(struct chcr_aead_ctx) +
3900 						sizeof(struct chcr_gcm_ctx),
3901 
3902 			},
3903 			.ivsize = GCM_RFC4106_IV_SIZE,
3904 			.maxauthsize	= GHASH_DIGEST_SIZE,
3905 			.setkey = chcr_gcm_setkey,
3906 			.setauthsize	= chcr_4106_4309_setauthsize,
3907 		}
3908 	},
3909 	{
3910 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3911 		.is_registered = 0,
3912 		.alg.aead = {
3913 			.base = {
3914 				.cra_name = "ccm(aes)",
3915 				.cra_driver_name = "ccm-aes-chcr",
3916 				.cra_blocksize	 = 1,
3917 				.cra_priority = CHCR_AEAD_PRIORITY,
3918 				.cra_ctxsize =	sizeof(struct chcr_context) +
3919 						sizeof(struct chcr_aead_ctx),
3920 
3921 			},
3922 			.ivsize = AES_BLOCK_SIZE,
3923 			.maxauthsize	= GHASH_DIGEST_SIZE,
3924 			.setkey = chcr_aead_ccm_setkey,
3925 			.setauthsize	= chcr_ccm_setauthsize,
3926 		}
3927 	},
3928 	{
3929 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3930 		.is_registered = 0,
3931 		.alg.aead = {
3932 			.base = {
3933 				.cra_name = "rfc4309(ccm(aes))",
3934 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
3935 				.cra_blocksize	 = 1,
3936 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3937 				.cra_ctxsize =	sizeof(struct chcr_context) +
3938 						sizeof(struct chcr_aead_ctx),
3939 
3940 			},
3941 			.ivsize = 8,
3942 			.maxauthsize	= GHASH_DIGEST_SIZE,
3943 			.setkey = chcr_aead_rfc4309_setkey,
3944 			.setauthsize = chcr_4106_4309_setauthsize,
3945 		}
3946 	},
3947 	{
3948 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3949 		.is_registered = 0,
3950 		.alg.aead = {
3951 			.base = {
3952 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3953 				.cra_driver_name =
3954 					"authenc-hmac-sha1-cbc-aes-chcr",
3955 				.cra_blocksize	 = AES_BLOCK_SIZE,
3956 				.cra_priority = CHCR_AEAD_PRIORITY,
3957 				.cra_ctxsize =	sizeof(struct chcr_context) +
3958 						sizeof(struct chcr_aead_ctx) +
3959 						sizeof(struct chcr_authenc_ctx),
3960 
3961 			},
3962 			.ivsize = AES_BLOCK_SIZE,
3963 			.maxauthsize = SHA1_DIGEST_SIZE,
3964 			.setkey = chcr_authenc_setkey,
3965 			.setauthsize = chcr_authenc_setauthsize,
3966 		}
3967 	},
3968 	{
3969 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3970 		.is_registered = 0,
3971 		.alg.aead = {
3972 			.base = {
3973 
3974 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3975 				.cra_driver_name =
3976 					"authenc-hmac-sha256-cbc-aes-chcr",
3977 				.cra_blocksize	 = AES_BLOCK_SIZE,
3978 				.cra_priority = CHCR_AEAD_PRIORITY,
3979 				.cra_ctxsize =	sizeof(struct chcr_context) +
3980 						sizeof(struct chcr_aead_ctx) +
3981 						sizeof(struct chcr_authenc_ctx),
3982 
3983 			},
3984 			.ivsize = AES_BLOCK_SIZE,
3985 			.maxauthsize	= SHA256_DIGEST_SIZE,
3986 			.setkey = chcr_authenc_setkey,
3987 			.setauthsize = chcr_authenc_setauthsize,
3988 		}
3989 	},
3990 	{
3991 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3992 		.is_registered = 0,
3993 		.alg.aead = {
3994 			.base = {
3995 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3996 				.cra_driver_name =
3997 					"authenc-hmac-sha224-cbc-aes-chcr",
3998 				.cra_blocksize	 = AES_BLOCK_SIZE,
3999 				.cra_priority = CHCR_AEAD_PRIORITY,
4000 				.cra_ctxsize =	sizeof(struct chcr_context) +
4001 						sizeof(struct chcr_aead_ctx) +
4002 						sizeof(struct chcr_authenc_ctx),
4003 			},
4004 			.ivsize = AES_BLOCK_SIZE,
4005 			.maxauthsize = SHA224_DIGEST_SIZE,
4006 			.setkey = chcr_authenc_setkey,
4007 			.setauthsize = chcr_authenc_setauthsize,
4008 		}
4009 	},
4010 	{
4011 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4012 		.is_registered = 0,
4013 		.alg.aead = {
4014 			.base = {
4015 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4016 				.cra_driver_name =
4017 					"authenc-hmac-sha384-cbc-aes-chcr",
4018 				.cra_blocksize	 = AES_BLOCK_SIZE,
4019 				.cra_priority = CHCR_AEAD_PRIORITY,
4020 				.cra_ctxsize =	sizeof(struct chcr_context) +
4021 						sizeof(struct chcr_aead_ctx) +
4022 						sizeof(struct chcr_authenc_ctx),
4023 
4024 			},
4025 			.ivsize = AES_BLOCK_SIZE,
4026 			.maxauthsize = SHA384_DIGEST_SIZE,
4027 			.setkey = chcr_authenc_setkey,
4028 			.setauthsize = chcr_authenc_setauthsize,
4029 		}
4030 	},
4031 	{
4032 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4033 		.is_registered = 0,
4034 		.alg.aead = {
4035 			.base = {
4036 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4037 				.cra_driver_name =
4038 					"authenc-hmac-sha512-cbc-aes-chcr",
4039 				.cra_blocksize	 = AES_BLOCK_SIZE,
4040 				.cra_priority = CHCR_AEAD_PRIORITY,
4041 				.cra_ctxsize =	sizeof(struct chcr_context) +
4042 						sizeof(struct chcr_aead_ctx) +
4043 						sizeof(struct chcr_authenc_ctx),
4044 
4045 			},
4046 			.ivsize = AES_BLOCK_SIZE,
4047 			.maxauthsize = SHA512_DIGEST_SIZE,
4048 			.setkey = chcr_authenc_setkey,
4049 			.setauthsize = chcr_authenc_setauthsize,
4050 		}
4051 	},
4052 	{
4053 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4054 		.is_registered = 0,
4055 		.alg.aead = {
4056 			.base = {
4057 				.cra_name = "authenc(digest_null,cbc(aes))",
4058 				.cra_driver_name =
4059 					"authenc-digest_null-cbc-aes-chcr",
4060 				.cra_blocksize	 = AES_BLOCK_SIZE,
4061 				.cra_priority = CHCR_AEAD_PRIORITY,
4062 				.cra_ctxsize =	sizeof(struct chcr_context) +
4063 						sizeof(struct chcr_aead_ctx) +
4064 						sizeof(struct chcr_authenc_ctx),
4065 
4066 			},
4067 			.ivsize  = AES_BLOCK_SIZE,
4068 			.maxauthsize = 0,
4069 			.setkey  = chcr_aead_digest_null_setkey,
4070 			.setauthsize = chcr_authenc_null_setauthsize,
4071 		}
4072 	},
4073 	{
4074 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4075 		.is_registered = 0,
4076 		.alg.aead = {
4077 			.base = {
4078 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4079 				.cra_driver_name =
4080 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4081 				.cra_blocksize	 = 1,
4082 				.cra_priority = CHCR_AEAD_PRIORITY,
4083 				.cra_ctxsize =	sizeof(struct chcr_context) +
4084 						sizeof(struct chcr_aead_ctx) +
4085 						sizeof(struct chcr_authenc_ctx),
4086 
4087 			},
4088 			.ivsize = CTR_RFC3686_IV_SIZE,
4089 			.maxauthsize = SHA1_DIGEST_SIZE,
4090 			.setkey = chcr_authenc_setkey,
4091 			.setauthsize = chcr_authenc_setauthsize,
4092 		}
4093 	},
4094 	{
4095 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4096 		.is_registered = 0,
4097 		.alg.aead = {
4098 			.base = {
4099 
4100 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4101 				.cra_driver_name =
4102 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4103 				.cra_blocksize	 = 1,
4104 				.cra_priority = CHCR_AEAD_PRIORITY,
4105 				.cra_ctxsize =	sizeof(struct chcr_context) +
4106 						sizeof(struct chcr_aead_ctx) +
4107 						sizeof(struct chcr_authenc_ctx),
4108 
4109 			},
4110 			.ivsize = CTR_RFC3686_IV_SIZE,
4111 			.maxauthsize	= SHA256_DIGEST_SIZE,
4112 			.setkey = chcr_authenc_setkey,
4113 			.setauthsize = chcr_authenc_setauthsize,
4114 		}
4115 	},
4116 	{
4117 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4118 		.is_registered = 0,
4119 		.alg.aead = {
4120 			.base = {
4121 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4122 				.cra_driver_name =
4123 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4124 				.cra_blocksize	 = 1,
4125 				.cra_priority = CHCR_AEAD_PRIORITY,
4126 				.cra_ctxsize =	sizeof(struct chcr_context) +
4127 						sizeof(struct chcr_aead_ctx) +
4128 						sizeof(struct chcr_authenc_ctx),
4129 			},
4130 			.ivsize = CTR_RFC3686_IV_SIZE,
4131 			.maxauthsize = SHA224_DIGEST_SIZE,
4132 			.setkey = chcr_authenc_setkey,
4133 			.setauthsize = chcr_authenc_setauthsize,
4134 		}
4135 	},
4136 	{
4137 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4138 		.is_registered = 0,
4139 		.alg.aead = {
4140 			.base = {
4141 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4142 				.cra_driver_name =
4143 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4144 				.cra_blocksize	 = 1,
4145 				.cra_priority = CHCR_AEAD_PRIORITY,
4146 				.cra_ctxsize =	sizeof(struct chcr_context) +
4147 						sizeof(struct chcr_aead_ctx) +
4148 						sizeof(struct chcr_authenc_ctx),
4149 
4150 			},
4151 			.ivsize = CTR_RFC3686_IV_SIZE,
4152 			.maxauthsize = SHA384_DIGEST_SIZE,
4153 			.setkey = chcr_authenc_setkey,
4154 			.setauthsize = chcr_authenc_setauthsize,
4155 		}
4156 	},
4157 	{
4158 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4159 		.is_registered = 0,
4160 		.alg.aead = {
4161 			.base = {
4162 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4163 				.cra_driver_name =
4164 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4165 				.cra_blocksize	 = 1,
4166 				.cra_priority = CHCR_AEAD_PRIORITY,
4167 				.cra_ctxsize =	sizeof(struct chcr_context) +
4168 						sizeof(struct chcr_aead_ctx) +
4169 						sizeof(struct chcr_authenc_ctx),
4170 
4171 			},
4172 			.ivsize = CTR_RFC3686_IV_SIZE,
4173 			.maxauthsize = SHA512_DIGEST_SIZE,
4174 			.setkey = chcr_authenc_setkey,
4175 			.setauthsize = chcr_authenc_setauthsize,
4176 		}
4177 	},
4178 	{
4179 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4180 		.is_registered = 0,
4181 		.alg.aead = {
4182 			.base = {
4183 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4184 				.cra_driver_name =
4185 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4186 				.cra_blocksize	 = 1,
4187 				.cra_priority = CHCR_AEAD_PRIORITY,
4188 				.cra_ctxsize =	sizeof(struct chcr_context) +
4189 						sizeof(struct chcr_aead_ctx) +
4190 						sizeof(struct chcr_authenc_ctx),
4191 
4192 			},
4193 			.ivsize  = CTR_RFC3686_IV_SIZE,
4194 			.maxauthsize = 0,
4195 			.setkey  = chcr_aead_digest_null_setkey,
4196 			.setauthsize = chcr_authenc_null_setauthsize,
4197 		}
4198 	},
4199 };
4200 
4201 /*
4202  *	chcr_unregister_alg - Deregister crypto algorithms with
4203  *	kernel framework.
4204  */
4205 static int chcr_unregister_alg(void)
4206 {
4207 	int i;
4208 
4209 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4210 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4211 		case CRYPTO_ALG_TYPE_SKCIPHER:
4212 			if (driver_algs[i].is_registered)
4213 				crypto_unregister_skcipher(
4214 						&driver_algs[i].alg.skcipher);
4215 			break;
4216 		case CRYPTO_ALG_TYPE_AEAD:
4217 			if (driver_algs[i].is_registered)
4218 				crypto_unregister_aead(
4219 						&driver_algs[i].alg.aead);
4220 			break;
4221 		case CRYPTO_ALG_TYPE_AHASH:
4222 			if (driver_algs[i].is_registered)
4223 				crypto_unregister_ahash(
4224 						&driver_algs[i].alg.hash);
4225 			break;
4226 		}
4227 		driver_algs[i].is_registered = 0;
4228 	}
4229 	return 0;
4230 }
4231 
4232 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4233 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4234 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4235 
4236 /*
4237  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4238  */
4239 static int chcr_register_alg(void)
4240 {
4241 	struct crypto_alg ai;
4242 	struct ahash_alg *a_hash;
4243 	int err = 0, i;
4244 	char *name = NULL;
4245 
4246 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4247 		if (driver_algs[i].is_registered)
4248 			continue;
4249 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4250 		case CRYPTO_ALG_TYPE_SKCIPHER:
4251 			driver_algs[i].alg.skcipher.base.cra_priority =
4252 				CHCR_CRA_PRIORITY;
4253 			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4254 			driver_algs[i].alg.skcipher.base.cra_flags =
4255 				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4256 				CRYPTO_ALG_NEED_FALLBACK;
4257 			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4258 				sizeof(struct chcr_context) +
4259 				sizeof(struct ablk_ctx);
4260 			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4261 
4262 			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4263 			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4264 			break;
4265 		case CRYPTO_ALG_TYPE_AEAD:
4266 			driver_algs[i].alg.aead.base.cra_flags =
4267 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4268 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4269 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4270 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4271 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4272 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4273 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4274 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4275 			break;
4276 		case CRYPTO_ALG_TYPE_AHASH:
4277 			a_hash = &driver_algs[i].alg.hash;
4278 			a_hash->update = chcr_ahash_update;
4279 			a_hash->final = chcr_ahash_final;
4280 			a_hash->finup = chcr_ahash_finup;
4281 			a_hash->digest = chcr_ahash_digest;
4282 			a_hash->export = chcr_ahash_export;
4283 			a_hash->import = chcr_ahash_import;
4284 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4285 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4286 			a_hash->halg.base.cra_module = THIS_MODULE;
4287 			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4288 			a_hash->halg.base.cra_alignmask = 0;
4289 			a_hash->halg.base.cra_exit = NULL;
4290 
4291 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4292 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4293 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4294 				a_hash->init = chcr_hmac_init;
4295 				a_hash->setkey = chcr_ahash_setkey;
4296 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4297 			} else {
4298 				a_hash->init = chcr_sha_init;
4299 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4300 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4301 			}
4302 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4303 			ai = driver_algs[i].alg.hash.halg.base;
4304 			name = ai.cra_driver_name;
4305 			break;
4306 		}
4307 		if (err) {
4308 			pr_err("chcr : %s : Algorithm registration failed\n",
4309 			       name);
4310 			goto register_err;
4311 		} else {
4312 			driver_algs[i].is_registered = 1;
4313 		}
4314 	}
4315 	return 0;
4316 
4317 register_err:
4318 	chcr_unregister_alg();
4319 	return err;
4320 }
4321 
4322 /*
4323  *	start_crypto - Register the crypto algorithms.
4324  *	This should called once when the first device comesup. After this
4325  *	kernel will start calling driver APIs for crypto operations.
4326  */
4327 int start_crypto(void)
4328 {
4329 	return chcr_register_alg();
4330 }
4331 
4332 /*
4333  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4334  *	This should be called once when the last device goes down. After this
4335  *	kernel will not call the driver API for crypto operations.
4336  */
4337 int stop_crypto(void)
4338 {
4339 	chcr_unregister_alg();
4340 	return 0;
4341 }
4342