1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67 
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73 
74 #define IV AES_BLOCK_SIZE
75 
76 static unsigned int sgl_ent_len[] = {
77 	0, 0, 16, 24, 40, 48, 64, 72, 88,
78 	96, 112, 120, 136, 144, 160, 168, 184,
79 	192, 208, 216, 232, 240, 256, 264, 280,
80 	288, 304, 312, 328, 336, 352, 360, 376
81 };
82 
83 static unsigned int dsgl_ent_len[] = {
84 	0, 32, 32, 48, 48, 64, 64, 80, 80,
85 	112, 112, 128, 128, 144, 144, 160, 160,
86 	192, 192, 208, 208, 224, 224, 240, 240,
87 	272, 272, 288, 288, 304, 304, 320, 320
88 };
89 
90 static u32 round_constant[11] = {
91 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 	0x1B000000, 0x36000000, 0x6C000000
94 };
95 
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 				   unsigned char *input, int err);
98 
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 	return ctx->crypto_ctx->aeadctx;
102 }
103 
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 	return ctx->crypto_ctx->ablkctx;
107 }
108 
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 	return ctx->crypto_ctx->hmacctx;
112 }
113 
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 	return gctx->ctx->gcm;
117 }
118 
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 	return gctx->ctx->authenc;
122 }
123 
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 	return ctx->dev->u_ctx;
127 }
128 
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131 	return (skb->len <= SGE_MAX_WR_LEN);
132 }
133 
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138 
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 			 unsigned int entlen,
141 			 unsigned int skip)
142 {
143 	int nents = 0;
144 	unsigned int less;
145 	unsigned int skip_len = 0;
146 
147 	while (sg && skip) {
148 		if (sg_dma_len(sg) <= skip) {
149 			skip -= sg_dma_len(sg);
150 			skip_len = 0;
151 			sg = sg_next(sg);
152 		} else {
153 			skip_len = skip;
154 			skip = 0;
155 		}
156 	}
157 
158 	while (sg && reqlen) {
159 		less = min(reqlen, sg_dma_len(sg) - skip_len);
160 		nents += DIV_ROUND_UP(less, entlen);
161 		reqlen -= less;
162 		skip_len = 0;
163 		sg = sg_next(sg);
164 	}
165 	return nents;
166 }
167 
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170 	struct aead_alg *alg = crypto_aead_alg(aead);
171 	struct chcr_alg_template *chcr_crypto_alg =
172 		container_of(alg, struct chcr_alg_template, alg.aead);
173 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175 
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178 	u8 temp[SHA512_DIGEST_SIZE];
179 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 	int authsize = crypto_aead_authsize(tfm);
181 	struct cpl_fw6_pld *fw6_pld;
182 	int cmp = 0;
183 
184 	fw6_pld = (struct cpl_fw6_pld *)input;
185 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 	} else {
189 
190 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 				authsize, req->assoclen +
192 				req->cryptlen - authsize);
193 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 	}
195 	if (cmp)
196 		*err = -EBADMSG;
197 	else
198 		*err = 0;
199 }
200 
201 static inline void chcr_handle_aead_resp(struct aead_request *req,
202 					 unsigned char *input,
203 					 int err)
204 {
205 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
206 
207 	chcr_aead_common_exit(req);
208 	if (reqctx->verify == VERIFY_SW) {
209 		chcr_verify_tag(req, input, &err);
210 		reqctx->verify = VERIFY_HW;
211 	}
212 	req->base.complete(&req->base, err);
213 }
214 
215 static void get_aes_decrypt_key(unsigned char *dec_key,
216 				       const unsigned char *key,
217 				       unsigned int keylength)
218 {
219 	u32 temp;
220 	u32 w_ring[MAX_NK];
221 	int i, j, k;
222 	u8  nr, nk;
223 
224 	switch (keylength) {
225 	case AES_KEYLENGTH_128BIT:
226 		nk = KEYLENGTH_4BYTES;
227 		nr = NUMBER_OF_ROUNDS_10;
228 		break;
229 	case AES_KEYLENGTH_192BIT:
230 		nk = KEYLENGTH_6BYTES;
231 		nr = NUMBER_OF_ROUNDS_12;
232 		break;
233 	case AES_KEYLENGTH_256BIT:
234 		nk = KEYLENGTH_8BYTES;
235 		nr = NUMBER_OF_ROUNDS_14;
236 		break;
237 	default:
238 		return;
239 	}
240 	for (i = 0; i < nk; i++)
241 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
242 
243 	i = 0;
244 	temp = w_ring[nk - 1];
245 	while (i + nk < (nr + 1) * 4) {
246 		if (!(i % nk)) {
247 			/* RotWord(temp) */
248 			temp = (temp << 8) | (temp >> 24);
249 			temp = aes_ks_subword(temp);
250 			temp ^= round_constant[i / nk];
251 		} else if (nk == 8 && (i % 4 == 0)) {
252 			temp = aes_ks_subword(temp);
253 		}
254 		w_ring[i % nk] ^= temp;
255 		temp = w_ring[i % nk];
256 		i++;
257 	}
258 	i--;
259 	for (k = 0, j = i % nk; k < nk; k++) {
260 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
261 		j--;
262 		if (j < 0)
263 			j += nk;
264 	}
265 }
266 
267 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
268 {
269 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
270 
271 	switch (ds) {
272 	case SHA1_DIGEST_SIZE:
273 		base_hash = crypto_alloc_shash("sha1", 0, 0);
274 		break;
275 	case SHA224_DIGEST_SIZE:
276 		base_hash = crypto_alloc_shash("sha224", 0, 0);
277 		break;
278 	case SHA256_DIGEST_SIZE:
279 		base_hash = crypto_alloc_shash("sha256", 0, 0);
280 		break;
281 	case SHA384_DIGEST_SIZE:
282 		base_hash = crypto_alloc_shash("sha384", 0, 0);
283 		break;
284 	case SHA512_DIGEST_SIZE:
285 		base_hash = crypto_alloc_shash("sha512", 0, 0);
286 		break;
287 	}
288 
289 	return base_hash;
290 }
291 
292 static int chcr_compute_partial_hash(struct shash_desc *desc,
293 				     char *iopad, char *result_hash,
294 				     int digest_size)
295 {
296 	struct sha1_state sha1_st;
297 	struct sha256_state sha256_st;
298 	struct sha512_state sha512_st;
299 	int error;
300 
301 	if (digest_size == SHA1_DIGEST_SIZE) {
302 		error = crypto_shash_init(desc) ?:
303 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
304 			crypto_shash_export(desc, (void *)&sha1_st);
305 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
306 	} else if (digest_size == SHA224_DIGEST_SIZE) {
307 		error = crypto_shash_init(desc) ?:
308 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
309 			crypto_shash_export(desc, (void *)&sha256_st);
310 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
311 
312 	} else if (digest_size == SHA256_DIGEST_SIZE) {
313 		error = crypto_shash_init(desc) ?:
314 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
315 			crypto_shash_export(desc, (void *)&sha256_st);
316 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
317 
318 	} else if (digest_size == SHA384_DIGEST_SIZE) {
319 		error = crypto_shash_init(desc) ?:
320 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
321 			crypto_shash_export(desc, (void *)&sha512_st);
322 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
323 
324 	} else if (digest_size == SHA512_DIGEST_SIZE) {
325 		error = crypto_shash_init(desc) ?:
326 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
327 			crypto_shash_export(desc, (void *)&sha512_st);
328 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
329 	} else {
330 		error = -EINVAL;
331 		pr_err("Unknown digest size %d\n", digest_size);
332 	}
333 	return error;
334 }
335 
336 static void chcr_change_order(char *buf, int ds)
337 {
338 	int i;
339 
340 	if (ds == SHA512_DIGEST_SIZE) {
341 		for (i = 0; i < (ds / sizeof(u64)); i++)
342 			*((__be64 *)buf + i) =
343 				cpu_to_be64(*((u64 *)buf + i));
344 	} else {
345 		for (i = 0; i < (ds / sizeof(u32)); i++)
346 			*((__be32 *)buf + i) =
347 				cpu_to_be32(*((u32 *)buf + i));
348 	}
349 }
350 
351 static inline int is_hmac(struct crypto_tfm *tfm)
352 {
353 	struct crypto_alg *alg = tfm->__crt_alg;
354 	struct chcr_alg_template *chcr_crypto_alg =
355 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
356 			     alg.hash);
357 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
358 		return 1;
359 	return 0;
360 }
361 
362 static inline void dsgl_walk_init(struct dsgl_walk *walk,
363 				   struct cpl_rx_phys_dsgl *dsgl)
364 {
365 	walk->dsgl = dsgl;
366 	walk->nents = 0;
367 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
368 }
369 
370 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
371 {
372 	struct cpl_rx_phys_dsgl *phys_cpl;
373 
374 	phys_cpl = walk->dsgl;
375 
376 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
377 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
378 	phys_cpl->pcirlxorder_to_noofsgentr =
379 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
380 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
381 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
382 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
383 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
384 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
385 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
386 	phys_cpl->rss_hdr_int.qid = htons(qid);
387 	phys_cpl->rss_hdr_int.hash_val = 0;
388 }
389 
390 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
391 					size_t size,
392 					dma_addr_t *addr)
393 {
394 	int j;
395 
396 	if (!size)
397 		return;
398 	j = walk->nents;
399 	walk->to->len[j % 8] = htons(size);
400 	walk->to->addr[j % 8] = cpu_to_be64(*addr);
401 	j++;
402 	if ((j % 8) == 0)
403 		walk->to++;
404 	walk->nents = j;
405 }
406 
407 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
408 			   struct scatterlist *sg,
409 			      unsigned int slen,
410 			      unsigned int skip)
411 {
412 	int skip_len = 0;
413 	unsigned int left_size = slen, len = 0;
414 	unsigned int j = walk->nents;
415 	int offset, ent_len;
416 
417 	if (!slen)
418 		return;
419 	while (sg && skip) {
420 		if (sg_dma_len(sg) <= skip) {
421 			skip -= sg_dma_len(sg);
422 			skip_len = 0;
423 			sg = sg_next(sg);
424 		} else {
425 			skip_len = skip;
426 			skip = 0;
427 		}
428 	}
429 
430 	while (left_size && sg) {
431 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
432 		offset = 0;
433 		while (len) {
434 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
435 			walk->to->len[j % 8] = htons(ent_len);
436 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
437 						      offset + skip_len);
438 			offset += ent_len;
439 			len -= ent_len;
440 			j++;
441 			if ((j % 8) == 0)
442 				walk->to++;
443 		}
444 		walk->last_sg = sg;
445 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
446 					  skip_len) + skip_len;
447 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
448 		skip_len = 0;
449 		sg = sg_next(sg);
450 	}
451 	walk->nents = j;
452 }
453 
454 static inline void ulptx_walk_init(struct ulptx_walk *walk,
455 				   struct ulptx_sgl *ulp)
456 {
457 	walk->sgl = ulp;
458 	walk->nents = 0;
459 	walk->pair_idx = 0;
460 	walk->pair = ulp->sge;
461 	walk->last_sg = NULL;
462 	walk->last_sg_len = 0;
463 }
464 
465 static inline void ulptx_walk_end(struct ulptx_walk *walk)
466 {
467 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
468 			      ULPTX_NSGE_V(walk->nents));
469 }
470 
471 
472 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
473 					size_t size,
474 					dma_addr_t *addr)
475 {
476 	if (!size)
477 		return;
478 
479 	if (walk->nents == 0) {
480 		walk->sgl->len0 = cpu_to_be32(size);
481 		walk->sgl->addr0 = cpu_to_be64(*addr);
482 	} else {
483 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
484 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
485 		walk->pair_idx = !walk->pair_idx;
486 		if (!walk->pair_idx)
487 			walk->pair++;
488 	}
489 	walk->nents++;
490 }
491 
492 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
493 					struct scatterlist *sg,
494 			       unsigned int len,
495 			       unsigned int skip)
496 {
497 	int small;
498 	int skip_len = 0;
499 	unsigned int sgmin;
500 
501 	if (!len)
502 		return;
503 	while (sg && skip) {
504 		if (sg_dma_len(sg) <= skip) {
505 			skip -= sg_dma_len(sg);
506 			skip_len = 0;
507 			sg = sg_next(sg);
508 		} else {
509 			skip_len = skip;
510 			skip = 0;
511 		}
512 	}
513 	WARN(!sg, "SG should not be null here\n");
514 	if (sg && (walk->nents == 0)) {
515 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
516 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
517 		walk->sgl->len0 = cpu_to_be32(sgmin);
518 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
519 		walk->nents++;
520 		len -= sgmin;
521 		walk->last_sg = sg;
522 		walk->last_sg_len = sgmin + skip_len;
523 		skip_len += sgmin;
524 		if (sg_dma_len(sg) == skip_len) {
525 			sg = sg_next(sg);
526 			skip_len = 0;
527 		}
528 	}
529 
530 	while (sg && len) {
531 		small = min(sg_dma_len(sg) - skip_len, len);
532 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
533 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
534 		walk->pair->addr[walk->pair_idx] =
535 			cpu_to_be64(sg_dma_address(sg) + skip_len);
536 		walk->pair_idx = !walk->pair_idx;
537 		walk->nents++;
538 		if (!walk->pair_idx)
539 			walk->pair++;
540 		len -= sgmin;
541 		skip_len += sgmin;
542 		walk->last_sg = sg;
543 		walk->last_sg_len = skip_len;
544 		if (sg_dma_len(sg) == skip_len) {
545 			sg = sg_next(sg);
546 			skip_len = 0;
547 		}
548 	}
549 }
550 
551 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
552 {
553 	struct crypto_alg *alg = tfm->__crt_alg;
554 	struct chcr_alg_template *chcr_crypto_alg =
555 		container_of(alg, struct chcr_alg_template, alg.crypto);
556 
557 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
558 }
559 
560 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
561 {
562 	struct adapter *adap = netdev2adap(dev);
563 	struct sge_uld_txq_info *txq_info =
564 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
565 	struct sge_uld_txq *txq;
566 	int ret = 0;
567 
568 	local_bh_disable();
569 	txq = &txq_info->uldtxq[idx];
570 	spin_lock(&txq->sendq.lock);
571 	if (txq->full)
572 		ret = -1;
573 	spin_unlock(&txq->sendq.lock);
574 	local_bh_enable();
575 	return ret;
576 }
577 
578 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
579 			       struct _key_ctx *key_ctx)
580 {
581 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
582 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
583 	} else {
584 		memcpy(key_ctx->key,
585 		       ablkctx->key + (ablkctx->enckey_len >> 1),
586 		       ablkctx->enckey_len >> 1);
587 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
588 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
589 	}
590 	return 0;
591 }
592 
593 static int chcr_hash_ent_in_wr(struct scatterlist *src,
594 			     unsigned int minsg,
595 			     unsigned int space,
596 			     unsigned int srcskip)
597 {
598 	int srclen = 0;
599 	int srcsg = minsg;
600 	int soffset = 0, sless;
601 
602 	if (sg_dma_len(src) == srcskip) {
603 		src = sg_next(src);
604 		srcskip = 0;
605 	}
606 	while (src && space > (sgl_ent_len[srcsg + 1])) {
607 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
608 							CHCR_SRC_SG_SIZE);
609 		srclen += sless;
610 		soffset += sless;
611 		srcsg++;
612 		if (sg_dma_len(src) == (soffset + srcskip)) {
613 			src = sg_next(src);
614 			soffset = 0;
615 			srcskip = 0;
616 		}
617 	}
618 	return srclen;
619 }
620 
621 static int chcr_sg_ent_in_wr(struct scatterlist *src,
622 			     struct scatterlist *dst,
623 			     unsigned int minsg,
624 			     unsigned int space,
625 			     unsigned int srcskip,
626 			     unsigned int dstskip)
627 {
628 	int srclen = 0, dstlen = 0;
629 	int srcsg = minsg, dstsg = minsg;
630 	int offset = 0, soffset = 0, less, sless = 0;
631 
632 	if (sg_dma_len(src) == srcskip) {
633 		src = sg_next(src);
634 		srcskip = 0;
635 	}
636 	if (sg_dma_len(dst) == dstskip) {
637 		dst = sg_next(dst);
638 		dstskip = 0;
639 	}
640 
641 	while (src && dst &&
642 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
643 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
644 				CHCR_SRC_SG_SIZE);
645 		srclen += sless;
646 		srcsg++;
647 		offset = 0;
648 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
649 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
650 			if (srclen <= dstlen)
651 				break;
652 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
653 				     dstskip, CHCR_DST_SG_SIZE);
654 			dstlen += less;
655 			offset += less;
656 			if ((offset + dstskip) == sg_dma_len(dst)) {
657 				dst = sg_next(dst);
658 				offset = 0;
659 			}
660 			dstsg++;
661 			dstskip = 0;
662 		}
663 		soffset += sless;
664 		if ((soffset + srcskip) == sg_dma_len(src)) {
665 			src = sg_next(src);
666 			srcskip = 0;
667 			soffset = 0;
668 		}
669 
670 	}
671 	return min(srclen, dstlen);
672 }
673 
674 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
675 				u32 flags,
676 				struct scatterlist *src,
677 				struct scatterlist *dst,
678 				unsigned int nbytes,
679 				u8 *iv,
680 				unsigned short op_type)
681 {
682 	int err;
683 
684 	SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
685 
686 	skcipher_request_set_tfm(subreq, cipher);
687 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
688 	skcipher_request_set_crypt(subreq, src, dst,
689 				   nbytes, iv);
690 
691 	err = op_type ? crypto_skcipher_decrypt(subreq) :
692 		crypto_skcipher_encrypt(subreq);
693 	skcipher_request_zero(subreq);
694 
695 	return err;
696 
697 }
698 static inline void create_wreq(struct chcr_context *ctx,
699 			       struct chcr_wr *chcr_req,
700 			       struct crypto_async_request *req,
701 			       unsigned int imm,
702 			       int hash_sz,
703 			       unsigned int len16,
704 			       unsigned int sc_len,
705 			       unsigned int lcb)
706 {
707 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
708 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
709 
710 
711 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
712 	chcr_req->wreq.pld_size_hash_size =
713 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
714 	chcr_req->wreq.len16_pkd =
715 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
716 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
717 	chcr_req->wreq.rx_chid_to_rx_q_id =
718 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
719 				!!lcb, ctx->tx_qidx);
720 
721 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
722 						       qid);
723 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
724 				     ((sizeof(chcr_req->wreq)) >> 4)));
725 
726 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
727 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
728 					   sizeof(chcr_req->key_ctx) + sc_len);
729 }
730 
731 /**
732  *	create_cipher_wr - form the WR for cipher operations
733  *	@req: cipher req.
734  *	@ctx: crypto driver context of the request.
735  *	@qid: ingress qid where response of this WR should be received.
736  *	@op_type:	encryption or decryption
737  */
738 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
739 {
740 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
741 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
742 	struct sk_buff *skb = NULL;
743 	struct chcr_wr *chcr_req;
744 	struct cpl_rx_phys_dsgl *phys_cpl;
745 	struct ulptx_sgl *ulptx;
746 	struct chcr_blkcipher_req_ctx *reqctx =
747 		ablkcipher_request_ctx(wrparam->req);
748 	unsigned int temp = 0, transhdr_len, dst_size;
749 	int error;
750 	int nents;
751 	unsigned int kctx_len;
752 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
753 			GFP_KERNEL : GFP_ATOMIC;
754 	struct adapter *adap = padap(c_ctx(tfm)->dev);
755 
756 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
757 			      reqctx->dst_ofst);
758 	dst_size = get_space_for_phys_dsgl(nents);
759 	kctx_len = roundup(ablkctx->enckey_len, 16);
760 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
761 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
762 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
763 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
764 				     (sgl_len(nents) * 8);
765 	transhdr_len += temp;
766 	transhdr_len = roundup(transhdr_len, 16);
767 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
768 	if (!skb) {
769 		error = -ENOMEM;
770 		goto err;
771 	}
772 	chcr_req = __skb_put_zero(skb, transhdr_len);
773 	chcr_req->sec_cpl.op_ivinsrtofst =
774 		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
775 
776 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
777 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
778 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
779 
780 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
781 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
782 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
783 							 ablkctx->ciph_mode,
784 							 0, 0, IV >> 1);
785 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
786 							  0, 1, dst_size);
787 
788 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
789 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
790 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
791 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
792 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
793 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
794 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
795 	} else {
796 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
797 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
798 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
799 			       ablkctx->enckey_len);
800 		} else {
801 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
802 			       (ablkctx->enckey_len >> 1),
803 			       ablkctx->enckey_len >> 1);
804 			memcpy(chcr_req->key_ctx.key +
805 			       (ablkctx->enckey_len >> 1),
806 			       ablkctx->key,
807 			       ablkctx->enckey_len >> 1);
808 		}
809 	}
810 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
811 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
812 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
813 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
814 
815 	atomic_inc(&adap->chcr_stats.cipher_rqst);
816 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
817 		+ (reqctx->imm ? (wrparam->bytes) : 0);
818 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
819 		    transhdr_len, temp,
820 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
821 	reqctx->skb = skb;
822 
823 	if (reqctx->op && (ablkctx->ciph_mode ==
824 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
825 		sg_pcopy_to_buffer(wrparam->req->src,
826 			sg_nents(wrparam->req->src), wrparam->req->info, 16,
827 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
828 
829 	return skb;
830 err:
831 	return ERR_PTR(error);
832 }
833 
834 static inline int chcr_keyctx_ck_size(unsigned int keylen)
835 {
836 	int ck_size = 0;
837 
838 	if (keylen == AES_KEYSIZE_128)
839 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
840 	else if (keylen == AES_KEYSIZE_192)
841 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
842 	else if (keylen == AES_KEYSIZE_256)
843 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
844 	else
845 		ck_size = 0;
846 
847 	return ck_size;
848 }
849 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
850 				       const u8 *key,
851 				       unsigned int keylen)
852 {
853 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
854 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
855 	int err = 0;
856 
857 	crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
858 	crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
859 				  CRYPTO_TFM_REQ_MASK);
860 	err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
861 	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
862 	tfm->crt_flags |=
863 		crypto_skcipher_get_flags(ablkctx->sw_cipher) &
864 		CRYPTO_TFM_RES_MASK;
865 	return err;
866 }
867 
868 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
869 			       const u8 *key,
870 			       unsigned int keylen)
871 {
872 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
873 	unsigned int ck_size, context_size;
874 	u16 alignment = 0;
875 	int err;
876 
877 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
878 	if (err)
879 		goto badkey_err;
880 
881 	ck_size = chcr_keyctx_ck_size(keylen);
882 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
883 	memcpy(ablkctx->key, key, keylen);
884 	ablkctx->enckey_len = keylen;
885 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
886 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
887 			keylen + alignment) >> 4;
888 
889 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
890 						0, 0, context_size);
891 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
892 	return 0;
893 badkey_err:
894 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
895 	ablkctx->enckey_len = 0;
896 
897 	return err;
898 }
899 
900 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
901 				   const u8 *key,
902 				   unsigned int keylen)
903 {
904 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
905 	unsigned int ck_size, context_size;
906 	u16 alignment = 0;
907 	int err;
908 
909 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
910 	if (err)
911 		goto badkey_err;
912 	ck_size = chcr_keyctx_ck_size(keylen);
913 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
914 	memcpy(ablkctx->key, key, keylen);
915 	ablkctx->enckey_len = keylen;
916 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
917 			keylen + alignment) >> 4;
918 
919 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
920 						0, 0, context_size);
921 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
922 
923 	return 0;
924 badkey_err:
925 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
926 	ablkctx->enckey_len = 0;
927 
928 	return err;
929 }
930 
931 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
932 				   const u8 *key,
933 				   unsigned int keylen)
934 {
935 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
936 	unsigned int ck_size, context_size;
937 	u16 alignment = 0;
938 	int err;
939 
940 	if (keylen < CTR_RFC3686_NONCE_SIZE)
941 		return -EINVAL;
942 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
943 	       CTR_RFC3686_NONCE_SIZE);
944 
945 	keylen -= CTR_RFC3686_NONCE_SIZE;
946 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
947 	if (err)
948 		goto badkey_err;
949 
950 	ck_size = chcr_keyctx_ck_size(keylen);
951 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
952 	memcpy(ablkctx->key, key, keylen);
953 	ablkctx->enckey_len = keylen;
954 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
955 			keylen + alignment) >> 4;
956 
957 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
958 						0, 0, context_size);
959 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
960 
961 	return 0;
962 badkey_err:
963 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
964 	ablkctx->enckey_len = 0;
965 
966 	return err;
967 }
968 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
969 {
970 	unsigned int size = AES_BLOCK_SIZE;
971 	__be32 *b = (__be32 *)(dstiv + size);
972 	u32 c, prev;
973 
974 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
975 	for (; size >= 4; size -= 4) {
976 		prev = be32_to_cpu(*--b);
977 		c = prev + add;
978 		*b = cpu_to_be32(c);
979 		if (prev < c)
980 			break;
981 		add = 1;
982 	}
983 
984 }
985 
986 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
987 {
988 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
989 	u64 c;
990 	u32 temp = be32_to_cpu(*--b);
991 
992 	temp = ~temp;
993 	c = (u64)temp +  1; // No of block can processed withou overflow
994 	if ((bytes / AES_BLOCK_SIZE) > c)
995 		bytes = c * AES_BLOCK_SIZE;
996 	return bytes;
997 }
998 
999 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1000 			     u32 isfinal)
1001 {
1002 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1003 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1004 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1005 	struct crypto_cipher *cipher;
1006 	int ret, i;
1007 	u8 *key;
1008 	unsigned int keylen;
1009 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1010 	int round8 = round / 8;
1011 
1012 	cipher = ablkctx->aes_generic;
1013 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1014 
1015 	keylen = ablkctx->enckey_len / 2;
1016 	key = ablkctx->key + keylen;
1017 	ret = crypto_cipher_setkey(cipher, key, keylen);
1018 	if (ret)
1019 		goto out;
1020 	crypto_cipher_encrypt_one(cipher, iv, iv);
1021 	for (i = 0; i < round8; i++)
1022 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1023 
1024 	for (i = 0; i < (round % 8); i++)
1025 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1026 
1027 	if (!isfinal)
1028 		crypto_cipher_decrypt_one(cipher, iv, iv);
1029 out:
1030 	return ret;
1031 }
1032 
1033 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1034 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1035 {
1036 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1037 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1038 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1039 	int ret = 0;
1040 
1041 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1042 		ctr_add_iv(iv, req->info, (reqctx->processed /
1043 			   AES_BLOCK_SIZE));
1044 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1045 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1046 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1047 						AES_BLOCK_SIZE) + 1);
1048 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1049 		ret = chcr_update_tweak(req, iv, 0);
1050 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1051 		if (reqctx->op)
1052 			/*Updated before sending last WR*/
1053 			memcpy(iv, req->info, AES_BLOCK_SIZE);
1054 		else
1055 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1056 	}
1057 
1058 	return ret;
1059 
1060 }
1061 
1062 /* We need separate function for final iv because in rfc3686  Initial counter
1063  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1064  * for subsequent update requests
1065  */
1066 
1067 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1068 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1069 {
1070 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1071 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1072 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1073 	int ret = 0;
1074 
1075 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1076 		ctr_add_iv(iv, req->info, (reqctx->processed /
1077 			   AES_BLOCK_SIZE));
1078 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1079 		ret = chcr_update_tweak(req, iv, 1);
1080 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1081 		/*Already updated for Decrypt*/
1082 		if (!reqctx->op)
1083 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1084 
1085 	}
1086 	return ret;
1087 
1088 }
1089 
1090 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1091 				   unsigned char *input, int err)
1092 {
1093 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1094 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1095 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1096 	struct sk_buff *skb;
1097 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1098 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1099 	struct  cipher_wr_param wrparam;
1100 	int bytes;
1101 
1102 	if (err)
1103 		goto unmap;
1104 	if (req->nbytes == reqctx->processed) {
1105 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1106 				      req);
1107 		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1108 		goto complete;
1109 	}
1110 
1111 	if (!reqctx->imm) {
1112 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1113 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1114 					  reqctx->src_ofst, reqctx->dst_ofst);
1115 		if ((bytes + reqctx->processed) >= req->nbytes)
1116 			bytes  = req->nbytes - reqctx->processed;
1117 		else
1118 			bytes = rounddown(bytes, 16);
1119 	} else {
1120 		/*CTR mode counter overfloa*/
1121 		bytes  = req->nbytes - reqctx->processed;
1122 	}
1123 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1124 	if (err)
1125 		goto unmap;
1126 
1127 	if (unlikely(bytes == 0)) {
1128 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1129 				      req);
1130 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1131 				     req->base.flags,
1132 				     req->src,
1133 				     req->dst,
1134 				     req->nbytes,
1135 				     req->info,
1136 				     reqctx->op);
1137 		goto complete;
1138 	}
1139 
1140 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1141 	    CRYPTO_ALG_SUB_TYPE_CTR)
1142 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1143 	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1144 	wrparam.req = req;
1145 	wrparam.bytes = bytes;
1146 	skb = create_cipher_wr(&wrparam);
1147 	if (IS_ERR(skb)) {
1148 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1149 		err = PTR_ERR(skb);
1150 		goto unmap;
1151 	}
1152 	skb->dev = u_ctx->lldi.ports[0];
1153 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1154 	chcr_send_wr(skb);
1155 	reqctx->last_req_len = bytes;
1156 	reqctx->processed += bytes;
1157 	return 0;
1158 unmap:
1159 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1160 complete:
1161 	req->base.complete(&req->base, err);
1162 	return err;
1163 }
1164 
1165 static int process_cipher(struct ablkcipher_request *req,
1166 				  unsigned short qid,
1167 				  struct sk_buff **skb,
1168 				  unsigned short op_type)
1169 {
1170 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1171 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1172 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1173 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1174 	struct	cipher_wr_param wrparam;
1175 	int bytes, err = -EINVAL;
1176 
1177 	reqctx->processed = 0;
1178 	if (!req->info)
1179 		goto error;
1180 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1181 	    (req->nbytes == 0) ||
1182 	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1183 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1184 		       ablkctx->enckey_len, req->nbytes, ivsize);
1185 		goto error;
1186 	}
1187 	chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1188 	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1189 					    AES_MIN_KEY_SIZE +
1190 					    sizeof(struct cpl_rx_phys_dsgl) +
1191 					/*Min dsgl size*/
1192 					    32))) {
1193 		/* Can be sent as Imm*/
1194 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1195 
1196 		dnents = sg_nents_xlen(req->dst, req->nbytes,
1197 				       CHCR_DST_SG_SIZE, 0);
1198 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1199 		kctx_len = roundup(ablkctx->enckey_len, 16);
1200 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1201 		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1202 			SGE_MAX_WR_LEN;
1203 		bytes = IV + req->nbytes;
1204 
1205 	} else {
1206 		reqctx->imm = 0;
1207 	}
1208 
1209 	if (!reqctx->imm) {
1210 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1211 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1212 					  0, 0);
1213 		if ((bytes + reqctx->processed) >= req->nbytes)
1214 			bytes  = req->nbytes - reqctx->processed;
1215 		else
1216 			bytes = rounddown(bytes, 16);
1217 	} else {
1218 		bytes = req->nbytes;
1219 	}
1220 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1221 	    CRYPTO_ALG_SUB_TYPE_CTR) {
1222 		bytes = adjust_ctr_overflow(req->info, bytes);
1223 	}
1224 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1225 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1226 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1227 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1228 				CTR_RFC3686_IV_SIZE);
1229 
1230 		/* initialize counter portion of counter block */
1231 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1232 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1233 
1234 	} else {
1235 
1236 		memcpy(reqctx->iv, req->info, IV);
1237 	}
1238 	if (unlikely(bytes == 0)) {
1239 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1240 				      req);
1241 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1242 					   req->base.flags,
1243 					   req->src,
1244 					   req->dst,
1245 					   req->nbytes,
1246 					   reqctx->iv,
1247 					   op_type);
1248 		goto error;
1249 	}
1250 	reqctx->op = op_type;
1251 	reqctx->srcsg = req->src;
1252 	reqctx->dstsg = req->dst;
1253 	reqctx->src_ofst = 0;
1254 	reqctx->dst_ofst = 0;
1255 	wrparam.qid = qid;
1256 	wrparam.req = req;
1257 	wrparam.bytes = bytes;
1258 	*skb = create_cipher_wr(&wrparam);
1259 	if (IS_ERR(*skb)) {
1260 		err = PTR_ERR(*skb);
1261 		goto unmap;
1262 	}
1263 	reqctx->processed = bytes;
1264 	reqctx->last_req_len = bytes;
1265 
1266 	return 0;
1267 unmap:
1268 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1269 error:
1270 	return err;
1271 }
1272 
1273 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1274 {
1275 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1276 	struct sk_buff *skb = NULL;
1277 	int err, isfull = 0;
1278 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1279 
1280 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1281 					    c_ctx(tfm)->tx_qidx))) {
1282 		isfull = 1;
1283 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1284 			return -ENOSPC;
1285 	}
1286 
1287 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1288 			     &skb, CHCR_ENCRYPT_OP);
1289 	if (err || !skb)
1290 		return  err;
1291 	skb->dev = u_ctx->lldi.ports[0];
1292 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1293 	chcr_send_wr(skb);
1294 	return isfull ? -EBUSY : -EINPROGRESS;
1295 }
1296 
1297 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1298 {
1299 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1300 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1301 	struct sk_buff *skb = NULL;
1302 	int err, isfull = 0;
1303 
1304 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1305 					    c_ctx(tfm)->tx_qidx))) {
1306 		isfull = 1;
1307 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1308 			return -ENOSPC;
1309 	}
1310 
1311 	 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1312 			      &skb, CHCR_DECRYPT_OP);
1313 	if (err || !skb)
1314 		return err;
1315 	skb->dev = u_ctx->lldi.ports[0];
1316 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1317 	chcr_send_wr(skb);
1318 	return isfull ? -EBUSY : -EINPROGRESS;
1319 }
1320 
1321 static int chcr_device_init(struct chcr_context *ctx)
1322 {
1323 	struct uld_ctx *u_ctx = NULL;
1324 	struct adapter *adap;
1325 	unsigned int id;
1326 	int txq_perchan, txq_idx, ntxq;
1327 	int err = 0, rxq_perchan, rxq_idx;
1328 
1329 	id = smp_processor_id();
1330 	if (!ctx->dev) {
1331 		u_ctx = assign_chcr_device();
1332 		if (!u_ctx) {
1333 			pr_err("chcr device assignment fails\n");
1334 			goto out;
1335 		}
1336 		ctx->dev = u_ctx->dev;
1337 		adap = padap(ctx->dev);
1338 		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1339 				    adap->vres.ncrypto_fc);
1340 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1341 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1342 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1343 		rxq_idx += id % rxq_perchan;
1344 		txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1345 		txq_idx += id % txq_perchan;
1346 		spin_lock(&ctx->dev->lock_chcr_dev);
1347 		ctx->rx_qidx = rxq_idx;
1348 		ctx->tx_qidx = txq_idx;
1349 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1350 		ctx->dev->rx_channel_id = 0;
1351 		spin_unlock(&ctx->dev->lock_chcr_dev);
1352 	}
1353 out:
1354 	return err;
1355 }
1356 
1357 static int chcr_cra_init(struct crypto_tfm *tfm)
1358 {
1359 	struct crypto_alg *alg = tfm->__crt_alg;
1360 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1361 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1362 
1363 	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1364 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1365 	if (IS_ERR(ablkctx->sw_cipher)) {
1366 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1367 		return PTR_ERR(ablkctx->sw_cipher);
1368 	}
1369 
1370 	if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1371 		/* To update tweak*/
1372 		ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1373 		if (IS_ERR(ablkctx->aes_generic)) {
1374 			pr_err("failed to allocate aes cipher for tweak\n");
1375 			return PTR_ERR(ablkctx->aes_generic);
1376 		}
1377 	} else
1378 		ablkctx->aes_generic = NULL;
1379 
1380 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1381 	return chcr_device_init(crypto_tfm_ctx(tfm));
1382 }
1383 
1384 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1385 {
1386 	struct crypto_alg *alg = tfm->__crt_alg;
1387 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1388 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1389 
1390 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1391 	 * cannot be used as fallback in chcr_handle_cipher_response
1392 	 */
1393 	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1394 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1395 	if (IS_ERR(ablkctx->sw_cipher)) {
1396 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1397 		return PTR_ERR(ablkctx->sw_cipher);
1398 	}
1399 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1400 	return chcr_device_init(crypto_tfm_ctx(tfm));
1401 }
1402 
1403 
1404 static void chcr_cra_exit(struct crypto_tfm *tfm)
1405 {
1406 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1407 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1408 
1409 	crypto_free_skcipher(ablkctx->sw_cipher);
1410 	if (ablkctx->aes_generic)
1411 		crypto_free_cipher(ablkctx->aes_generic);
1412 }
1413 
1414 static int get_alg_config(struct algo_param *params,
1415 			  unsigned int auth_size)
1416 {
1417 	switch (auth_size) {
1418 	case SHA1_DIGEST_SIZE:
1419 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1420 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1421 		params->result_size = SHA1_DIGEST_SIZE;
1422 		break;
1423 	case SHA224_DIGEST_SIZE:
1424 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1425 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1426 		params->result_size = SHA256_DIGEST_SIZE;
1427 		break;
1428 	case SHA256_DIGEST_SIZE:
1429 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1430 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1431 		params->result_size = SHA256_DIGEST_SIZE;
1432 		break;
1433 	case SHA384_DIGEST_SIZE:
1434 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1435 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1436 		params->result_size = SHA512_DIGEST_SIZE;
1437 		break;
1438 	case SHA512_DIGEST_SIZE:
1439 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1440 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1441 		params->result_size = SHA512_DIGEST_SIZE;
1442 		break;
1443 	default:
1444 		pr_err("chcr : ERROR, unsupported digest size\n");
1445 		return -EINVAL;
1446 	}
1447 	return 0;
1448 }
1449 
1450 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1451 {
1452 		crypto_free_shash(base_hash);
1453 }
1454 
1455 /**
1456  *	create_hash_wr - Create hash work request
1457  *	@req - Cipher req base
1458  */
1459 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1460 				      struct hash_wr_param *param)
1461 {
1462 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1463 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1464 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1465 	struct sk_buff *skb = NULL;
1466 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1467 	struct chcr_wr *chcr_req;
1468 	struct ulptx_sgl *ulptx;
1469 	unsigned int nents = 0, transhdr_len;
1470 	unsigned int temp = 0;
1471 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1472 		GFP_ATOMIC;
1473 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1474 	int error = 0;
1475 
1476 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1477 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1478 				param->sg_len) <= SGE_MAX_WR_LEN;
1479 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1480 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1481 	nents += param->bfr_len ? 1 : 0;
1482 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1483 				param->sg_len, 16) : (sgl_len(nents) * 8);
1484 	transhdr_len = roundup(transhdr_len, 16);
1485 
1486 	skb = alloc_skb(transhdr_len, flags);
1487 	if (!skb)
1488 		return ERR_PTR(-ENOMEM);
1489 	chcr_req = __skb_put_zero(skb, transhdr_len);
1490 
1491 	chcr_req->sec_cpl.op_ivinsrtofst =
1492 		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
1493 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1494 
1495 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1496 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1497 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1498 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1499 	chcr_req->sec_cpl.seqno_numivs =
1500 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1501 					 param->opad_needed, 0);
1502 
1503 	chcr_req->sec_cpl.ivgen_hdrlen =
1504 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1505 
1506 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1507 	       param->alg_prm.result_size);
1508 
1509 	if (param->opad_needed)
1510 		memcpy(chcr_req->key_ctx.key +
1511 		       ((param->alg_prm.result_size <= 32) ? 32 :
1512 			CHCR_HASH_MAX_DIGEST_SIZE),
1513 		       hmacctx->opad, param->alg_prm.result_size);
1514 
1515 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1516 					    param->alg_prm.mk_size, 0,
1517 					    param->opad_needed,
1518 					    ((param->kctx_len +
1519 					     sizeof(chcr_req->key_ctx)) >> 4));
1520 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1521 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1522 				     DUMMY_BYTES);
1523 	if (param->bfr_len != 0) {
1524 		req_ctx->hctx_wr.dma_addr =
1525 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1526 				       param->bfr_len, DMA_TO_DEVICE);
1527 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1528 				       req_ctx->hctx_wr. dma_addr)) {
1529 			error = -ENOMEM;
1530 			goto err;
1531 		}
1532 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1533 	} else {
1534 		req_ctx->hctx_wr.dma_addr = 0;
1535 	}
1536 	chcr_add_hash_src_ent(req, ulptx, param);
1537 	/* Request upto max wr size */
1538 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1539 				(param->sg_len + param->bfr_len) : 0);
1540 	atomic_inc(&adap->chcr_stats.digest_rqst);
1541 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1542 		    param->hash_size, transhdr_len,
1543 		    temp,  0);
1544 	req_ctx->hctx_wr.skb = skb;
1545 	return skb;
1546 err:
1547 	kfree_skb(skb);
1548 	return  ERR_PTR(error);
1549 }
1550 
1551 static int chcr_ahash_update(struct ahash_request *req)
1552 {
1553 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1554 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1555 	struct uld_ctx *u_ctx = NULL;
1556 	struct sk_buff *skb;
1557 	u8 remainder = 0, bs;
1558 	unsigned int nbytes = req->nbytes;
1559 	struct hash_wr_param params;
1560 	int error, isfull = 0;
1561 
1562 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1563 	u_ctx = ULD_CTX(h_ctx(rtfm));
1564 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1565 					    h_ctx(rtfm)->tx_qidx))) {
1566 		isfull = 1;
1567 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1568 			return -ENOSPC;
1569 	}
1570 
1571 	if (nbytes + req_ctx->reqlen >= bs) {
1572 		remainder = (nbytes + req_ctx->reqlen) % bs;
1573 		nbytes = nbytes + req_ctx->reqlen - remainder;
1574 	} else {
1575 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1576 				   + req_ctx->reqlen, nbytes, 0);
1577 		req_ctx->reqlen += nbytes;
1578 		return 0;
1579 	}
1580 	chcr_init_hctx_per_wr(req_ctx);
1581 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1582 	if (error)
1583 		return -ENOMEM;
1584 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1585 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1586 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1587 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1588 	if (params.sg_len > req->nbytes)
1589 		params.sg_len = req->nbytes;
1590 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1591 			req_ctx->reqlen;
1592 	params.opad_needed = 0;
1593 	params.more = 1;
1594 	params.last = 0;
1595 	params.bfr_len = req_ctx->reqlen;
1596 	params.scmd1 = 0;
1597 	req_ctx->hctx_wr.srcsg = req->src;
1598 
1599 	params.hash_size = params.alg_prm.result_size;
1600 	req_ctx->data_len += params.sg_len + params.bfr_len;
1601 	skb = create_hash_wr(req, &params);
1602 	if (IS_ERR(skb)) {
1603 		error = PTR_ERR(skb);
1604 		goto unmap;
1605 	}
1606 
1607 	req_ctx->hctx_wr.processed += params.sg_len;
1608 	if (remainder) {
1609 		/* Swap buffers */
1610 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1611 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1612 				   req_ctx->reqbfr, remainder, req->nbytes -
1613 				   remainder);
1614 	}
1615 	req_ctx->reqlen = remainder;
1616 	skb->dev = u_ctx->lldi.ports[0];
1617 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1618 	chcr_send_wr(skb);
1619 
1620 	return isfull ? -EBUSY : -EINPROGRESS;
1621 unmap:
1622 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1623 	return error;
1624 }
1625 
1626 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1627 {
1628 	memset(bfr_ptr, 0, bs);
1629 	*bfr_ptr = 0x80;
1630 	if (bs == 64)
1631 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1632 	else
1633 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1634 }
1635 
1636 static int chcr_ahash_final(struct ahash_request *req)
1637 {
1638 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1639 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1640 	struct hash_wr_param params;
1641 	struct sk_buff *skb;
1642 	struct uld_ctx *u_ctx = NULL;
1643 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1644 
1645 	chcr_init_hctx_per_wr(req_ctx);
1646 	u_ctx = ULD_CTX(h_ctx(rtfm));
1647 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1648 		params.opad_needed = 1;
1649 	else
1650 		params.opad_needed = 0;
1651 	params.sg_len = 0;
1652 	req_ctx->hctx_wr.isfinal = 1;
1653 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1654 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1655 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1656 		params.opad_needed = 1;
1657 		params.kctx_len *= 2;
1658 	} else {
1659 		params.opad_needed = 0;
1660 	}
1661 
1662 	req_ctx->hctx_wr.result = 1;
1663 	params.bfr_len = req_ctx->reqlen;
1664 	req_ctx->data_len += params.bfr_len + params.sg_len;
1665 	req_ctx->hctx_wr.srcsg = req->src;
1666 	if (req_ctx->reqlen == 0) {
1667 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1668 		params.last = 0;
1669 		params.more = 1;
1670 		params.scmd1 = 0;
1671 		params.bfr_len = bs;
1672 
1673 	} else {
1674 		params.scmd1 = req_ctx->data_len;
1675 		params.last = 1;
1676 		params.more = 0;
1677 	}
1678 	params.hash_size = crypto_ahash_digestsize(rtfm);
1679 	skb = create_hash_wr(req, &params);
1680 	if (IS_ERR(skb))
1681 		return PTR_ERR(skb);
1682 	req_ctx->reqlen = 0;
1683 	skb->dev = u_ctx->lldi.ports[0];
1684 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1685 	chcr_send_wr(skb);
1686 	return -EINPROGRESS;
1687 }
1688 
1689 static int chcr_ahash_finup(struct ahash_request *req)
1690 {
1691 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1692 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1693 	struct uld_ctx *u_ctx = NULL;
1694 	struct sk_buff *skb;
1695 	struct hash_wr_param params;
1696 	u8  bs;
1697 	int error, isfull = 0;
1698 
1699 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1700 	u_ctx = ULD_CTX(h_ctx(rtfm));
1701 
1702 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1703 					    h_ctx(rtfm)->tx_qidx))) {
1704 		isfull = 1;
1705 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1706 			return -ENOSPC;
1707 	}
1708 	chcr_init_hctx_per_wr(req_ctx);
1709 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1710 	if (error)
1711 		return -ENOMEM;
1712 
1713 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1714 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1715 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1716 		params.kctx_len *= 2;
1717 		params.opad_needed = 1;
1718 	} else {
1719 		params.opad_needed = 0;
1720 	}
1721 
1722 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1723 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1724 	if (params.sg_len < req->nbytes) {
1725 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1726 			params.kctx_len /= 2;
1727 			params.opad_needed = 0;
1728 		}
1729 		params.last = 0;
1730 		params.more = 1;
1731 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1732 					- req_ctx->reqlen;
1733 		params.hash_size = params.alg_prm.result_size;
1734 		params.scmd1 = 0;
1735 	} else {
1736 		params.last = 1;
1737 		params.more = 0;
1738 		params.sg_len = req->nbytes;
1739 		params.hash_size = crypto_ahash_digestsize(rtfm);
1740 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1741 				params.sg_len;
1742 	}
1743 	params.bfr_len = req_ctx->reqlen;
1744 	req_ctx->data_len += params.bfr_len + params.sg_len;
1745 	req_ctx->hctx_wr.result = 1;
1746 	req_ctx->hctx_wr.srcsg = req->src;
1747 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1748 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1749 		params.last = 0;
1750 		params.more = 1;
1751 		params.scmd1 = 0;
1752 		params.bfr_len = bs;
1753 	}
1754 	skb = create_hash_wr(req, &params);
1755 	if (IS_ERR(skb)) {
1756 		error = PTR_ERR(skb);
1757 		goto unmap;
1758 	}
1759 	req_ctx->reqlen = 0;
1760 	req_ctx->hctx_wr.processed += params.sg_len;
1761 	skb->dev = u_ctx->lldi.ports[0];
1762 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1763 	chcr_send_wr(skb);
1764 
1765 	return isfull ? -EBUSY : -EINPROGRESS;
1766 unmap:
1767 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1768 	return error;
1769 }
1770 
1771 static int chcr_ahash_digest(struct ahash_request *req)
1772 {
1773 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1774 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1775 	struct uld_ctx *u_ctx = NULL;
1776 	struct sk_buff *skb;
1777 	struct hash_wr_param params;
1778 	u8  bs;
1779 	int error, isfull = 0;
1780 
1781 	rtfm->init(req);
1782 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1783 
1784 	u_ctx = ULD_CTX(h_ctx(rtfm));
1785 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1786 					    h_ctx(rtfm)->tx_qidx))) {
1787 		isfull = 1;
1788 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1789 			return -ENOSPC;
1790 	}
1791 
1792 	chcr_init_hctx_per_wr(req_ctx);
1793 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1794 	if (error)
1795 		return -ENOMEM;
1796 
1797 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1798 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1799 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1800 		params.kctx_len *= 2;
1801 		params.opad_needed = 1;
1802 	} else {
1803 		params.opad_needed = 0;
1804 	}
1805 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1806 				HASH_SPACE_LEFT(params.kctx_len), 0);
1807 	if (params.sg_len < req->nbytes) {
1808 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1809 			params.kctx_len /= 2;
1810 			params.opad_needed = 0;
1811 		}
1812 		params.last = 0;
1813 		params.more = 1;
1814 		params.scmd1 = 0;
1815 		params.sg_len = rounddown(params.sg_len, bs);
1816 		params.hash_size = params.alg_prm.result_size;
1817 	} else {
1818 		params.sg_len = req->nbytes;
1819 		params.hash_size = crypto_ahash_digestsize(rtfm);
1820 		params.last = 1;
1821 		params.more = 0;
1822 		params.scmd1 = req->nbytes + req_ctx->data_len;
1823 
1824 	}
1825 	params.bfr_len = 0;
1826 	req_ctx->hctx_wr.result = 1;
1827 	req_ctx->hctx_wr.srcsg = req->src;
1828 	req_ctx->data_len += params.bfr_len + params.sg_len;
1829 
1830 	if (req->nbytes == 0) {
1831 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1832 		params.more = 1;
1833 		params.bfr_len = bs;
1834 	}
1835 
1836 	skb = create_hash_wr(req, &params);
1837 	if (IS_ERR(skb)) {
1838 		error = PTR_ERR(skb);
1839 		goto unmap;
1840 	}
1841 	req_ctx->hctx_wr.processed += params.sg_len;
1842 	skb->dev = u_ctx->lldi.ports[0];
1843 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1844 	chcr_send_wr(skb);
1845 	return isfull ? -EBUSY : -EINPROGRESS;
1846 unmap:
1847 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1848 	return error;
1849 }
1850 
1851 static int chcr_ahash_continue(struct ahash_request *req)
1852 {
1853 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1854 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1855 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1856 	struct uld_ctx *u_ctx = NULL;
1857 	struct sk_buff *skb;
1858 	struct hash_wr_param params;
1859 	u8  bs;
1860 	int error;
1861 
1862 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1863 	u_ctx = ULD_CTX(h_ctx(rtfm));
1864 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1865 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867 		params.kctx_len *= 2;
1868 		params.opad_needed = 1;
1869 	} else {
1870 		params.opad_needed = 0;
1871 	}
1872 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1873 					    HASH_SPACE_LEFT(params.kctx_len),
1874 					    hctx_wr->src_ofst);
1875 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1876 		params.sg_len = req->nbytes - hctx_wr->processed;
1877 	if (!hctx_wr->result ||
1878 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1879 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1880 			params.kctx_len /= 2;
1881 			params.opad_needed = 0;
1882 		}
1883 		params.last = 0;
1884 		params.more = 1;
1885 		params.sg_len = rounddown(params.sg_len, bs);
1886 		params.hash_size = params.alg_prm.result_size;
1887 		params.scmd1 = 0;
1888 	} else {
1889 		params.last = 1;
1890 		params.more = 0;
1891 		params.hash_size = crypto_ahash_digestsize(rtfm);
1892 		params.scmd1 = reqctx->data_len + params.sg_len;
1893 	}
1894 	params.bfr_len = 0;
1895 	reqctx->data_len += params.sg_len;
1896 	skb = create_hash_wr(req, &params);
1897 	if (IS_ERR(skb)) {
1898 		error = PTR_ERR(skb);
1899 		goto err;
1900 	}
1901 	hctx_wr->processed += params.sg_len;
1902 	skb->dev = u_ctx->lldi.ports[0];
1903 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1904 	chcr_send_wr(skb);
1905 	return 0;
1906 err:
1907 	return error;
1908 }
1909 
1910 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1911 					  unsigned char *input,
1912 					  int err)
1913 {
1914 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1915 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1916 	int digestsize, updated_digestsize;
1917 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1918 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1919 
1920 	if (input == NULL)
1921 		goto out;
1922 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1923 	updated_digestsize = digestsize;
1924 	if (digestsize == SHA224_DIGEST_SIZE)
1925 		updated_digestsize = SHA256_DIGEST_SIZE;
1926 	else if (digestsize == SHA384_DIGEST_SIZE)
1927 		updated_digestsize = SHA512_DIGEST_SIZE;
1928 
1929 	if (hctx_wr->dma_addr) {
1930 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
1931 				 hctx_wr->dma_len, DMA_TO_DEVICE);
1932 		hctx_wr->dma_addr = 0;
1933 	}
1934 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
1935 				 req->nbytes)) {
1936 		if (hctx_wr->result == 1) {
1937 			hctx_wr->result = 0;
1938 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
1939 			       digestsize);
1940 		} else {
1941 			memcpy(reqctx->partial_hash,
1942 			       input + sizeof(struct cpl_fw6_pld),
1943 			       updated_digestsize);
1944 
1945 		}
1946 		goto unmap;
1947 	}
1948 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
1949 	       updated_digestsize);
1950 
1951 	err = chcr_ahash_continue(req);
1952 	if (err)
1953 		goto unmap;
1954 	return;
1955 unmap:
1956 	if (hctx_wr->is_sg_map)
1957 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1958 
1959 
1960 out:
1961 	req->base.complete(&req->base, err);
1962 }
1963 
1964 /*
1965  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
1966  *	@req: crypto request
1967  */
1968 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
1969 			 int err)
1970 {
1971 	struct crypto_tfm *tfm = req->tfm;
1972 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1973 	struct adapter *adap = padap(ctx->dev);
1974 
1975 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1976 	case CRYPTO_ALG_TYPE_AEAD:
1977 		chcr_handle_aead_resp(aead_request_cast(req), input, err);
1978 		break;
1979 
1980 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
1981 		 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
1982 					       input, err);
1983 		break;
1984 
1985 	case CRYPTO_ALG_TYPE_AHASH:
1986 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
1987 		}
1988 	atomic_inc(&adap->chcr_stats.complete);
1989 	return err;
1990 }
1991 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1992 {
1993 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1994 	struct chcr_ahash_req_ctx *state = out;
1995 
1996 	state->reqlen = req_ctx->reqlen;
1997 	state->data_len = req_ctx->data_len;
1998 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1999 	memcpy(state->partial_hash, req_ctx->partial_hash,
2000 	       CHCR_HASH_MAX_DIGEST_SIZE);
2001 	chcr_init_hctx_per_wr(state);
2002 		return 0;
2003 }
2004 
2005 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2006 {
2007 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2008 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2009 
2010 	req_ctx->reqlen = state->reqlen;
2011 	req_ctx->data_len = state->data_len;
2012 	req_ctx->reqbfr = req_ctx->bfr1;
2013 	req_ctx->skbfr = req_ctx->bfr2;
2014 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2015 	memcpy(req_ctx->partial_hash, state->partial_hash,
2016 	       CHCR_HASH_MAX_DIGEST_SIZE);
2017 	chcr_init_hctx_per_wr(req_ctx);
2018 	return 0;
2019 }
2020 
2021 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2022 			     unsigned int keylen)
2023 {
2024 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2025 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2026 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2027 	unsigned int i, err = 0, updated_digestsize;
2028 
2029 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2030 
2031 	/* use the key to calculate the ipad and opad. ipad will sent with the
2032 	 * first request's data. opad will be sent with the final hash result
2033 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2034 	 */
2035 	shash->tfm = hmacctx->base_hash;
2036 	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2037 	if (keylen > bs) {
2038 		err = crypto_shash_digest(shash, key, keylen,
2039 					  hmacctx->ipad);
2040 		if (err)
2041 			goto out;
2042 		keylen = digestsize;
2043 	} else {
2044 		memcpy(hmacctx->ipad, key, keylen);
2045 	}
2046 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2047 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2048 
2049 	for (i = 0; i < bs / sizeof(int); i++) {
2050 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2051 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2052 	}
2053 
2054 	updated_digestsize = digestsize;
2055 	if (digestsize == SHA224_DIGEST_SIZE)
2056 		updated_digestsize = SHA256_DIGEST_SIZE;
2057 	else if (digestsize == SHA384_DIGEST_SIZE)
2058 		updated_digestsize = SHA512_DIGEST_SIZE;
2059 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2060 					hmacctx->ipad, digestsize);
2061 	if (err)
2062 		goto out;
2063 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2064 
2065 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2066 					hmacctx->opad, digestsize);
2067 	if (err)
2068 		goto out;
2069 	chcr_change_order(hmacctx->opad, updated_digestsize);
2070 out:
2071 	return err;
2072 }
2073 
2074 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2075 			       unsigned int key_len)
2076 {
2077 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2078 	unsigned short context_size = 0;
2079 	int err;
2080 
2081 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2082 	if (err)
2083 		goto badkey_err;
2084 
2085 	memcpy(ablkctx->key, key, key_len);
2086 	ablkctx->enckey_len = key_len;
2087 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2088 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2089 	ablkctx->key_ctx_hdr =
2090 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2091 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2092 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2093 				 CHCR_KEYCTX_NO_KEY, 1,
2094 				 0, context_size);
2095 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2096 	return 0;
2097 badkey_err:
2098 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2099 	ablkctx->enckey_len = 0;
2100 
2101 	return err;
2102 }
2103 
2104 static int chcr_sha_init(struct ahash_request *areq)
2105 {
2106 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2107 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2108 	int digestsize =  crypto_ahash_digestsize(tfm);
2109 
2110 	req_ctx->data_len = 0;
2111 	req_ctx->reqlen = 0;
2112 	req_ctx->reqbfr = req_ctx->bfr1;
2113 	req_ctx->skbfr = req_ctx->bfr2;
2114 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2115 
2116 	return 0;
2117 }
2118 
2119 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2120 {
2121 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2122 				 sizeof(struct chcr_ahash_req_ctx));
2123 	return chcr_device_init(crypto_tfm_ctx(tfm));
2124 }
2125 
2126 static int chcr_hmac_init(struct ahash_request *areq)
2127 {
2128 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2129 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2130 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2131 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2132 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2133 
2134 	chcr_sha_init(areq);
2135 	req_ctx->data_len = bs;
2136 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2137 		if (digestsize == SHA224_DIGEST_SIZE)
2138 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2139 			       SHA256_DIGEST_SIZE);
2140 		else if (digestsize == SHA384_DIGEST_SIZE)
2141 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2142 			       SHA512_DIGEST_SIZE);
2143 		else
2144 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2145 			       digestsize);
2146 	}
2147 	return 0;
2148 }
2149 
2150 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2151 {
2152 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2153 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2154 	unsigned int digestsize =
2155 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2156 
2157 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2158 				 sizeof(struct chcr_ahash_req_ctx));
2159 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2160 	if (IS_ERR(hmacctx->base_hash))
2161 		return PTR_ERR(hmacctx->base_hash);
2162 	return chcr_device_init(crypto_tfm_ctx(tfm));
2163 }
2164 
2165 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2166 {
2167 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2168 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2169 
2170 	if (hmacctx->base_hash) {
2171 		chcr_free_shash(hmacctx->base_hash);
2172 		hmacctx->base_hash = NULL;
2173 	}
2174 }
2175 
2176 inline void chcr_aead_common_exit(struct aead_request *req)
2177 {
2178 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2179 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2180 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2181 
2182 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2183 }
2184 
2185 static int chcr_aead_common_init(struct aead_request *req)
2186 {
2187 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2188 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2189 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2190 	unsigned int authsize = crypto_aead_authsize(tfm);
2191 	int error = -EINVAL;
2192 
2193 	/* validate key size */
2194 	if (aeadctx->enckey_len == 0)
2195 		goto err;
2196 	if (reqctx->op && req->cryptlen < authsize)
2197 		goto err;
2198 	if (reqctx->b0_len)
2199 		reqctx->scratch_pad = reqctx->iv + IV;
2200 	else
2201 		reqctx->scratch_pad = NULL;
2202 
2203 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2204 				  reqctx->op);
2205 	if (error) {
2206 		error = -ENOMEM;
2207 		goto err;
2208 	}
2209 	reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2210 					  CHCR_SRC_SG_SIZE, 0);
2211 	reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2212 					  CHCR_SRC_SG_SIZE, req->assoclen);
2213 	return 0;
2214 err:
2215 	return error;
2216 }
2217 
2218 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2219 				   int aadmax, int wrlen,
2220 				   unsigned short op_type)
2221 {
2222 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2223 
2224 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2225 	    dst_nents > MAX_DSGL_ENT ||
2226 	    (req->assoclen > aadmax) ||
2227 	    (wrlen > SGE_MAX_WR_LEN))
2228 		return 1;
2229 	return 0;
2230 }
2231 
2232 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2233 {
2234 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2235 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2236 	struct aead_request *subreq = aead_request_ctx(req);
2237 
2238 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2239 	aead_request_set_callback(subreq, req->base.flags,
2240 				  req->base.complete, req->base.data);
2241 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2242 				 req->iv);
2243 	 aead_request_set_ad(subreq, req->assoclen);
2244 	return op_type ? crypto_aead_decrypt(subreq) :
2245 		crypto_aead_encrypt(subreq);
2246 }
2247 
2248 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2249 					 unsigned short qid,
2250 					 int size)
2251 {
2252 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2253 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2254 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2255 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2256 	struct sk_buff *skb = NULL;
2257 	struct chcr_wr *chcr_req;
2258 	struct cpl_rx_phys_dsgl *phys_cpl;
2259 	struct ulptx_sgl *ulptx;
2260 	unsigned int transhdr_len;
2261 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2262 	unsigned int   kctx_len = 0, dnents;
2263 	unsigned int  assoclen = req->assoclen;
2264 	unsigned int  authsize = crypto_aead_authsize(tfm);
2265 	int error = -EINVAL;
2266 	int null = 0;
2267 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2268 		GFP_ATOMIC;
2269 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2270 
2271 	if (req->cryptlen == 0)
2272 		return NULL;
2273 
2274 	reqctx->b0_len = 0;
2275 	error = chcr_aead_common_init(req);
2276 	if (error)
2277 		return ERR_PTR(error);
2278 
2279 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2280 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2281 		null = 1;
2282 		assoclen = 0;
2283 		reqctx->aad_nents = 0;
2284 	}
2285 	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2286 	dnents += sg_nents_xlen(req->dst, req->cryptlen +
2287 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
2288 		req->assoclen);
2289 	dnents += MIN_AUTH_SG; // For IV
2290 
2291 	dst_size = get_space_for_phys_dsgl(dnents);
2292 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2293 		- sizeof(chcr_req->key_ctx);
2294 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2295 	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2296 			SGE_MAX_WR_LEN;
2297 	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2298 			: (sgl_len(reqctx->src_nents + reqctx->aad_nents
2299 			+ MIN_GCM_SG) * 8);
2300 	transhdr_len += temp;
2301 	transhdr_len = roundup(transhdr_len, 16);
2302 
2303 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2304 				    transhdr_len, reqctx->op)) {
2305 		atomic_inc(&adap->chcr_stats.fallback);
2306 		chcr_aead_common_exit(req);
2307 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2308 	}
2309 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2310 	if (!skb) {
2311 		error = -ENOMEM;
2312 		goto err;
2313 	}
2314 
2315 	chcr_req = __skb_put_zero(skb, transhdr_len);
2316 
2317 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2318 
2319 	/*
2320 	 * Input order	is AAD,IV and Payload. where IV should be included as
2321 	 * the part of authdata. All other fields should be filled according
2322 	 * to the hardware spec
2323 	 */
2324 	chcr_req->sec_cpl.op_ivinsrtofst =
2325 		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2326 				       assoclen + 1);
2327 	chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2328 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2329 					assoclen ? 1 : 0, assoclen,
2330 					assoclen + IV + 1,
2331 					(temp & 0x1F0) >> 4);
2332 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2333 					temp & 0xF,
2334 					null ? 0 : assoclen + IV + 1,
2335 					temp, temp);
2336 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2337 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2338 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2339 	else
2340 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2341 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2342 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2343 					temp,
2344 					actx->auth_mode, aeadctx->hmac_ctrl,
2345 					IV >> 1);
2346 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2347 					 0, 0, dst_size);
2348 
2349 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2350 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2351 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2352 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2353 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2354 		       aeadctx->enckey_len);
2355 	else
2356 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2357 		       aeadctx->enckey_len);
2358 
2359 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2360 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2361 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2362 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2363 		memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2364 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2365 				CTR_RFC3686_IV_SIZE);
2366 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2367 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2368 	} else {
2369 		memcpy(reqctx->iv, req->iv, IV);
2370 	}
2371 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2372 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2373 	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2374 	chcr_add_aead_src_ent(req, ulptx, assoclen);
2375 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2376 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2377 		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2378 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2379 		   transhdr_len, temp, 0);
2380 	reqctx->skb = skb;
2381 
2382 	return skb;
2383 err:
2384 	chcr_aead_common_exit(req);
2385 
2386 	return ERR_PTR(error);
2387 }
2388 
2389 int chcr_aead_dma_map(struct device *dev,
2390 		      struct aead_request *req,
2391 		      unsigned short op_type)
2392 {
2393 	int error;
2394 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2395 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2396 	unsigned int authsize = crypto_aead_authsize(tfm);
2397 	int dst_size;
2398 
2399 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2400 				-authsize : authsize);
2401 	if (!req->cryptlen || !dst_size)
2402 		return 0;
2403 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2404 					DMA_BIDIRECTIONAL);
2405 	if (dma_mapping_error(dev, reqctx->iv_dma))
2406 		return -ENOMEM;
2407 	if (reqctx->b0_len)
2408 		reqctx->b0_dma = reqctx->iv_dma + IV;
2409 	else
2410 		reqctx->b0_dma = 0;
2411 	if (req->src == req->dst) {
2412 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2413 				   DMA_BIDIRECTIONAL);
2414 		if (!error)
2415 			goto err;
2416 	} else {
2417 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2418 				   DMA_TO_DEVICE);
2419 		if (!error)
2420 			goto err;
2421 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2422 				   DMA_FROM_DEVICE);
2423 		if (!error) {
2424 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2425 				   DMA_TO_DEVICE);
2426 			goto err;
2427 		}
2428 	}
2429 
2430 	return 0;
2431 err:
2432 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2433 	return -ENOMEM;
2434 }
2435 
2436 void chcr_aead_dma_unmap(struct device *dev,
2437 			 struct aead_request *req,
2438 			 unsigned short op_type)
2439 {
2440 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2441 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2442 	unsigned int authsize = crypto_aead_authsize(tfm);
2443 	int dst_size;
2444 
2445 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2446 					-authsize : authsize);
2447 	if (!req->cryptlen || !dst_size)
2448 		return;
2449 
2450 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2451 					DMA_BIDIRECTIONAL);
2452 	if (req->src == req->dst) {
2453 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2454 				   DMA_BIDIRECTIONAL);
2455 	} else {
2456 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2457 				   DMA_TO_DEVICE);
2458 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2459 				   DMA_FROM_DEVICE);
2460 	}
2461 }
2462 
2463 void chcr_add_aead_src_ent(struct aead_request *req,
2464 			   struct ulptx_sgl *ulptx,
2465 			   unsigned int assoclen)
2466 {
2467 	struct ulptx_walk ulp_walk;
2468 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2469 
2470 	if (reqctx->imm) {
2471 		u8 *buf = (u8 *)ulptx;
2472 
2473 		if (reqctx->b0_len) {
2474 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2475 			buf += reqctx->b0_len;
2476 		}
2477 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2478 				   buf, assoclen, 0);
2479 		buf += assoclen;
2480 		memcpy(buf, reqctx->iv, IV);
2481 		buf += IV;
2482 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2483 				   buf, req->cryptlen, req->assoclen);
2484 	} else {
2485 		ulptx_walk_init(&ulp_walk, ulptx);
2486 		if (reqctx->b0_len)
2487 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2488 					    &reqctx->b0_dma);
2489 		ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2490 		ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2491 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2492 				  req->assoclen);
2493 		ulptx_walk_end(&ulp_walk);
2494 	}
2495 }
2496 
2497 void chcr_add_aead_dst_ent(struct aead_request *req,
2498 			   struct cpl_rx_phys_dsgl *phys_cpl,
2499 			   unsigned int assoclen,
2500 			   unsigned short qid)
2501 {
2502 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2503 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2504 	struct dsgl_walk dsgl_walk;
2505 	unsigned int authsize = crypto_aead_authsize(tfm);
2506 	u32 temp;
2507 
2508 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2509 	if (reqctx->b0_len)
2510 		dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2511 	dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2512 	dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2513 	temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
2514 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2515 	dsgl_walk_end(&dsgl_walk, qid);
2516 }
2517 
2518 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2519 			     void *ulptx,
2520 			     struct  cipher_wr_param *wrparam)
2521 {
2522 	struct ulptx_walk ulp_walk;
2523 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2524 	u8 *buf = ulptx;
2525 
2526 	memcpy(buf, reqctx->iv, IV);
2527 	buf += IV;
2528 	if (reqctx->imm) {
2529 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2530 				   buf, wrparam->bytes, reqctx->processed);
2531 	} else {
2532 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2533 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2534 				  reqctx->src_ofst);
2535 		reqctx->srcsg = ulp_walk.last_sg;
2536 		reqctx->src_ofst = ulp_walk.last_sg_len;
2537 		ulptx_walk_end(&ulp_walk);
2538 	}
2539 }
2540 
2541 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2542 			     struct cpl_rx_phys_dsgl *phys_cpl,
2543 			     struct  cipher_wr_param *wrparam,
2544 			     unsigned short qid)
2545 {
2546 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2547 	struct dsgl_walk dsgl_walk;
2548 
2549 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2550 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2551 			 reqctx->dst_ofst);
2552 	reqctx->dstsg = dsgl_walk.last_sg;
2553 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2554 
2555 	dsgl_walk_end(&dsgl_walk, qid);
2556 }
2557 
2558 void chcr_add_hash_src_ent(struct ahash_request *req,
2559 			   struct ulptx_sgl *ulptx,
2560 			   struct hash_wr_param *param)
2561 {
2562 	struct ulptx_walk ulp_walk;
2563 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2564 
2565 	if (reqctx->hctx_wr.imm) {
2566 		u8 *buf = (u8 *)ulptx;
2567 
2568 		if (param->bfr_len) {
2569 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2570 			buf += param->bfr_len;
2571 		}
2572 
2573 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2574 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2575 				   param->sg_len, 0);
2576 	} else {
2577 		ulptx_walk_init(&ulp_walk, ulptx);
2578 		if (param->bfr_len)
2579 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2580 					    &reqctx->hctx_wr.dma_addr);
2581 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2582 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2583 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2584 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2585 		ulptx_walk_end(&ulp_walk);
2586 	}
2587 }
2588 
2589 int chcr_hash_dma_map(struct device *dev,
2590 		      struct ahash_request *req)
2591 {
2592 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2593 	int error = 0;
2594 
2595 	if (!req->nbytes)
2596 		return 0;
2597 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2598 			   DMA_TO_DEVICE);
2599 	if (!error)
2600 		return -ENOMEM;
2601 	req_ctx->hctx_wr.is_sg_map = 1;
2602 	return 0;
2603 }
2604 
2605 void chcr_hash_dma_unmap(struct device *dev,
2606 			 struct ahash_request *req)
2607 {
2608 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2609 
2610 	if (!req->nbytes)
2611 		return;
2612 
2613 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2614 			   DMA_TO_DEVICE);
2615 	req_ctx->hctx_wr.is_sg_map = 0;
2616 
2617 }
2618 
2619 int chcr_cipher_dma_map(struct device *dev,
2620 			struct ablkcipher_request *req)
2621 {
2622 	int error;
2623 
2624 	if (req->src == req->dst) {
2625 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2626 				   DMA_BIDIRECTIONAL);
2627 		if (!error)
2628 			goto err;
2629 	} else {
2630 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2631 				   DMA_TO_DEVICE);
2632 		if (!error)
2633 			goto err;
2634 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2635 				   DMA_FROM_DEVICE);
2636 		if (!error) {
2637 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2638 				   DMA_TO_DEVICE);
2639 			goto err;
2640 		}
2641 	}
2642 
2643 	return 0;
2644 err:
2645 	return -ENOMEM;
2646 }
2647 
2648 void chcr_cipher_dma_unmap(struct device *dev,
2649 			   struct ablkcipher_request *req)
2650 {
2651 	if (req->src == req->dst) {
2652 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2653 				   DMA_BIDIRECTIONAL);
2654 	} else {
2655 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2656 				   DMA_TO_DEVICE);
2657 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2658 				   DMA_FROM_DEVICE);
2659 	}
2660 }
2661 
2662 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2663 {
2664 	__be32 data;
2665 
2666 	memset(block, 0, csize);
2667 	block += csize;
2668 
2669 	if (csize >= 4)
2670 		csize = 4;
2671 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2672 		return -EOVERFLOW;
2673 
2674 	data = cpu_to_be32(msglen);
2675 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2676 
2677 	return 0;
2678 }
2679 
2680 static void generate_b0(struct aead_request *req,
2681 			struct chcr_aead_ctx *aeadctx,
2682 			unsigned short op_type)
2683 {
2684 	unsigned int l, lp, m;
2685 	int rc;
2686 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2687 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2688 	u8 *b0 = reqctx->scratch_pad;
2689 
2690 	m = crypto_aead_authsize(aead);
2691 
2692 	memcpy(b0, reqctx->iv, 16);
2693 
2694 	lp = b0[0];
2695 	l = lp + 1;
2696 
2697 	/* set m, bits 3-5 */
2698 	*b0 |= (8 * ((m - 2) / 2));
2699 
2700 	/* set adata, bit 6, if associated data is used */
2701 	if (req->assoclen)
2702 		*b0 |= 64;
2703 	rc = set_msg_len(b0 + 16 - l,
2704 			 (op_type == CHCR_DECRYPT_OP) ?
2705 			 req->cryptlen - m : req->cryptlen, l);
2706 }
2707 
2708 static inline int crypto_ccm_check_iv(const u8 *iv)
2709 {
2710 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2711 	if (iv[0] < 1 || iv[0] > 7)
2712 		return -EINVAL;
2713 
2714 	return 0;
2715 }
2716 
2717 static int ccm_format_packet(struct aead_request *req,
2718 			     struct chcr_aead_ctx *aeadctx,
2719 			     unsigned int sub_type,
2720 			     unsigned short op_type,
2721 			     unsigned int assoclen)
2722 {
2723 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2724 	int rc = 0;
2725 
2726 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2727 		reqctx->iv[0] = 3;
2728 		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2729 		memcpy(reqctx->iv + 4, req->iv, 8);
2730 		memset(reqctx->iv + 12, 0, 4);
2731 	} else {
2732 		memcpy(reqctx->iv, req->iv, 16);
2733 	}
2734 	if (assoclen)
2735 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2736 				htons(assoclen);
2737 
2738 	generate_b0(req, aeadctx, op_type);
2739 	/* zero the ctr value */
2740 	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2741 	return rc;
2742 }
2743 
2744 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2745 				  unsigned int dst_size,
2746 				  struct aead_request *req,
2747 				  unsigned short op_type)
2748 {
2749 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2750 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2751 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2752 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2753 	unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2754 	unsigned int ccm_xtra;
2755 	unsigned char tag_offset = 0, auth_offset = 0;
2756 	unsigned int assoclen;
2757 
2758 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2759 		assoclen = req->assoclen - 8;
2760 	else
2761 		assoclen = req->assoclen;
2762 	ccm_xtra = CCM_B0_SIZE +
2763 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2764 
2765 	auth_offset = req->cryptlen ?
2766 		(assoclen + IV + 1 + ccm_xtra) : 0;
2767 	if (op_type == CHCR_DECRYPT_OP) {
2768 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2769 			tag_offset = crypto_aead_authsize(tfm);
2770 		else
2771 			auth_offset = 0;
2772 	}
2773 
2774 
2775 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2776 					 2, assoclen + 1 + ccm_xtra);
2777 	sec_cpl->pldlen =
2778 		htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2779 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2780 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2781 					1, assoclen + ccm_xtra, assoclen
2782 					+ IV + 1 + ccm_xtra, 0);
2783 
2784 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2785 					auth_offset, tag_offset,
2786 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2787 					crypto_aead_authsize(tfm));
2788 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2789 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2790 					cipher_mode, mac_mode,
2791 					aeadctx->hmac_ctrl, IV >> 1);
2792 
2793 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2794 					0, dst_size);
2795 }
2796 
2797 static int aead_ccm_validate_input(unsigned short op_type,
2798 				   struct aead_request *req,
2799 				   struct chcr_aead_ctx *aeadctx,
2800 				   unsigned int sub_type)
2801 {
2802 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2803 		if (crypto_ccm_check_iv(req->iv)) {
2804 			pr_err("CCM: IV check fails\n");
2805 			return -EINVAL;
2806 		}
2807 	} else {
2808 		if (req->assoclen != 16 && req->assoclen != 20) {
2809 			pr_err("RFC4309: Invalid AAD length %d\n",
2810 			       req->assoclen);
2811 			return -EINVAL;
2812 		}
2813 	}
2814 	return 0;
2815 }
2816 
2817 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2818 					  unsigned short qid,
2819 					  int size)
2820 {
2821 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2822 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2823 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2824 	struct sk_buff *skb = NULL;
2825 	struct chcr_wr *chcr_req;
2826 	struct cpl_rx_phys_dsgl *phys_cpl;
2827 	struct ulptx_sgl *ulptx;
2828 	unsigned int transhdr_len;
2829 	unsigned int dst_size = 0, kctx_len, dnents, temp;
2830 	unsigned int sub_type, assoclen = req->assoclen;
2831 	unsigned int authsize = crypto_aead_authsize(tfm);
2832 	int error = -EINVAL;
2833 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2834 		GFP_ATOMIC;
2835 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2836 
2837 	sub_type = get_aead_subtype(tfm);
2838 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2839 		assoclen -= 8;
2840 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2841 	error = chcr_aead_common_init(req);
2842 	if (error)
2843 		return ERR_PTR(error);
2844 
2845 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2846 	if (error)
2847 		goto err;
2848 	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2849 	dnents += sg_nents_xlen(req->dst, req->cryptlen
2850 			+ (reqctx->op ? -authsize : authsize),
2851 			CHCR_DST_SG_SIZE, req->assoclen);
2852 	dnents += MIN_CCM_SG; // For IV and B0
2853 	dst_size = get_space_for_phys_dsgl(dnents);
2854 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2855 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2856 	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2857 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
2858 	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
2859 				     reqctx->b0_len, 16) :
2860 		(sgl_len(reqctx->src_nents + reqctx->aad_nents +
2861 				    MIN_CCM_SG) *  8);
2862 	transhdr_len += temp;
2863 	transhdr_len = roundup(transhdr_len, 16);
2864 
2865 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2866 				    reqctx->b0_len, transhdr_len, reqctx->op)) {
2867 		atomic_inc(&adap->chcr_stats.fallback);
2868 		chcr_aead_common_exit(req);
2869 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2870 	}
2871 	skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
2872 
2873 	if (!skb) {
2874 		error = -ENOMEM;
2875 		goto err;
2876 	}
2877 
2878 	chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2879 
2880 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2881 
2882 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2883 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2884 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2885 			aeadctx->key, aeadctx->enckey_len);
2886 
2887 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2888 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2889 	error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
2890 	if (error)
2891 		goto dstmap_fail;
2892 	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2893 	chcr_add_aead_src_ent(req, ulptx, assoclen);
2894 
2895 	atomic_inc(&adap->chcr_stats.aead_rqst);
2896 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2897 		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2898 		reqctx->b0_len) : 0);
2899 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2900 		    transhdr_len, temp, 0);
2901 	reqctx->skb = skb;
2902 
2903 	return skb;
2904 dstmap_fail:
2905 	kfree_skb(skb);
2906 err:
2907 	chcr_aead_common_exit(req);
2908 	return ERR_PTR(error);
2909 }
2910 
2911 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2912 				     unsigned short qid,
2913 				     int size)
2914 {
2915 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2916 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2917 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2918 	struct sk_buff *skb = NULL;
2919 	struct chcr_wr *chcr_req;
2920 	struct cpl_rx_phys_dsgl *phys_cpl;
2921 	struct ulptx_sgl *ulptx;
2922 	unsigned int transhdr_len, dnents = 0;
2923 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2924 	unsigned int authsize = crypto_aead_authsize(tfm);
2925 	int error = -EINVAL;
2926 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2927 		GFP_ATOMIC;
2928 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2929 
2930 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2931 		assoclen = req->assoclen - 8;
2932 
2933 	reqctx->b0_len = 0;
2934 	error = chcr_aead_common_init(req);
2935 	if (error)
2936 		return ERR_PTR(error);
2937 	dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2938 	dnents += sg_nents_xlen(req->dst, req->cryptlen +
2939 				(reqctx->op ? -authsize : authsize),
2940 				CHCR_DST_SG_SIZE, req->assoclen);
2941 	dnents += MIN_GCM_SG; // For IV
2942 	dst_size = get_space_for_phys_dsgl(dnents);
2943 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2944 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2945 	reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2946 			SGE_MAX_WR_LEN;
2947 	temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2948 		(sgl_len(reqctx->src_nents +
2949 		reqctx->aad_nents + MIN_GCM_SG) * 8);
2950 	transhdr_len += temp;
2951 	transhdr_len = roundup(transhdr_len, 16);
2952 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2953 			    transhdr_len, reqctx->op)) {
2954 
2955 		atomic_inc(&adap->chcr_stats.fallback);
2956 		chcr_aead_common_exit(req);
2957 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2958 	}
2959 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2960 	if (!skb) {
2961 		error = -ENOMEM;
2962 		goto err;
2963 	}
2964 
2965 	chcr_req = __skb_put_zero(skb, transhdr_len);
2966 
2967 	//Offset of tag from end
2968 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2969 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2970 					a_ctx(tfm)->dev->rx_channel_id, 2,
2971 					(assoclen + 1));
2972 	chcr_req->sec_cpl.pldlen =
2973 		htonl(assoclen + IV + req->cryptlen);
2974 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2975 					assoclen ? 1 : 0, assoclen,
2976 					assoclen + IV + 1, 0);
2977 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
2978 			FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
2979 						temp, temp);
2980 	chcr_req->sec_cpl.seqno_numivs =
2981 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
2982 					CHCR_ENCRYPT_OP) ? 1 : 0,
2983 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
2984 					CHCR_SCMD_AUTH_MODE_GHASH,
2985 					aeadctx->hmac_ctrl, IV >> 1);
2986 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2987 					0, 0, dst_size);
2988 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2989 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2990 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2991 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2992 
2993 	/* prepare a 16 byte iv */
2994 	/* S   A   L  T |  IV | 0x00000001 */
2995 	if (get_aead_subtype(tfm) ==
2996 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2997 		memcpy(reqctx->iv, aeadctx->salt, 4);
2998 		memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
2999 	} else {
3000 		memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
3001 	}
3002 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
3003 
3004 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3005 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
3006 
3007 	chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
3008 	chcr_add_aead_src_ent(req, ulptx, assoclen);
3009 	atomic_inc(&adap->chcr_stats.aead_rqst);
3010 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3011 		kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3012 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3013 		    transhdr_len, temp, reqctx->verify);
3014 	reqctx->skb = skb;
3015 	return skb;
3016 
3017 err:
3018 	chcr_aead_common_exit(req);
3019 	return ERR_PTR(error);
3020 }
3021 
3022 
3023 
3024 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3025 {
3026 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3027 	struct aead_alg *alg = crypto_aead_alg(tfm);
3028 
3029 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3030 					       CRYPTO_ALG_NEED_FALLBACK |
3031 					       CRYPTO_ALG_ASYNC);
3032 	if  (IS_ERR(aeadctx->sw_cipher))
3033 		return PTR_ERR(aeadctx->sw_cipher);
3034 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3035 				 sizeof(struct aead_request) +
3036 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3037 	return chcr_device_init(a_ctx(tfm));
3038 }
3039 
3040 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3041 {
3042 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3043 
3044 	crypto_free_aead(aeadctx->sw_cipher);
3045 }
3046 
3047 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3048 					unsigned int authsize)
3049 {
3050 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3051 
3052 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3053 	aeadctx->mayverify = VERIFY_HW;
3054 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3055 }
3056 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3057 				    unsigned int authsize)
3058 {
3059 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3060 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3061 
3062 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3063 	 * true for sha1. authsize == 12 condition should be before
3064 	 * authsize == (maxauth >> 1)
3065 	 */
3066 	if (authsize == ICV_4) {
3067 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3068 		aeadctx->mayverify = VERIFY_HW;
3069 	} else if (authsize == ICV_6) {
3070 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3071 		aeadctx->mayverify = VERIFY_HW;
3072 	} else if (authsize == ICV_10) {
3073 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3074 		aeadctx->mayverify = VERIFY_HW;
3075 	} else if (authsize == ICV_12) {
3076 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3077 		aeadctx->mayverify = VERIFY_HW;
3078 	} else if (authsize == ICV_14) {
3079 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3080 		aeadctx->mayverify = VERIFY_HW;
3081 	} else if (authsize == (maxauth >> 1)) {
3082 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3083 		aeadctx->mayverify = VERIFY_HW;
3084 	} else if (authsize == maxauth) {
3085 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3086 		aeadctx->mayverify = VERIFY_HW;
3087 	} else {
3088 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3089 		aeadctx->mayverify = VERIFY_SW;
3090 	}
3091 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3092 }
3093 
3094 
3095 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3096 {
3097 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3098 
3099 	switch (authsize) {
3100 	case ICV_4:
3101 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3102 		aeadctx->mayverify = VERIFY_HW;
3103 		break;
3104 	case ICV_8:
3105 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3106 		aeadctx->mayverify = VERIFY_HW;
3107 		break;
3108 	case ICV_12:
3109 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3110 		 aeadctx->mayverify = VERIFY_HW;
3111 		break;
3112 	case ICV_14:
3113 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3114 		 aeadctx->mayverify = VERIFY_HW;
3115 		break;
3116 	case ICV_16:
3117 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3118 		aeadctx->mayverify = VERIFY_HW;
3119 		break;
3120 	case ICV_13:
3121 	case ICV_15:
3122 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3123 		aeadctx->mayverify = VERIFY_SW;
3124 		break;
3125 	default:
3126 
3127 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3128 			CRYPTO_TFM_RES_BAD_KEY_LEN);
3129 		return -EINVAL;
3130 	}
3131 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3132 }
3133 
3134 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3135 					  unsigned int authsize)
3136 {
3137 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3138 
3139 	switch (authsize) {
3140 	case ICV_8:
3141 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3142 		aeadctx->mayverify = VERIFY_HW;
3143 		break;
3144 	case ICV_12:
3145 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3146 		aeadctx->mayverify = VERIFY_HW;
3147 		break;
3148 	case ICV_16:
3149 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3150 		aeadctx->mayverify = VERIFY_HW;
3151 		break;
3152 	default:
3153 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3154 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3155 		return -EINVAL;
3156 	}
3157 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3158 }
3159 
3160 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3161 				unsigned int authsize)
3162 {
3163 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3164 
3165 	switch (authsize) {
3166 	case ICV_4:
3167 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3168 		aeadctx->mayverify = VERIFY_HW;
3169 		break;
3170 	case ICV_6:
3171 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3172 		aeadctx->mayverify = VERIFY_HW;
3173 		break;
3174 	case ICV_8:
3175 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3176 		aeadctx->mayverify = VERIFY_HW;
3177 		break;
3178 	case ICV_10:
3179 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3180 		aeadctx->mayverify = VERIFY_HW;
3181 		break;
3182 	case ICV_12:
3183 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3184 		aeadctx->mayverify = VERIFY_HW;
3185 		break;
3186 	case ICV_14:
3187 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3188 		aeadctx->mayverify = VERIFY_HW;
3189 		break;
3190 	case ICV_16:
3191 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3192 		aeadctx->mayverify = VERIFY_HW;
3193 		break;
3194 	default:
3195 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3196 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3197 		return -EINVAL;
3198 	}
3199 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3200 }
3201 
3202 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3203 				const u8 *key,
3204 				unsigned int keylen)
3205 {
3206 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3207 	unsigned char ck_size, mk_size;
3208 	int key_ctx_size = 0;
3209 
3210 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3211 	if (keylen == AES_KEYSIZE_128) {
3212 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3213 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3214 	} else if (keylen == AES_KEYSIZE_192) {
3215 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3216 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3217 	} else if (keylen == AES_KEYSIZE_256) {
3218 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3219 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3220 	} else {
3221 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3222 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3223 		aeadctx->enckey_len = 0;
3224 		return	-EINVAL;
3225 	}
3226 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3227 						key_ctx_size >> 4);
3228 	memcpy(aeadctx->key, key, keylen);
3229 	aeadctx->enckey_len = keylen;
3230 
3231 	return 0;
3232 }
3233 
3234 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3235 				const u8 *key,
3236 				unsigned int keylen)
3237 {
3238 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3239 	int error;
3240 
3241 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3242 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3243 			      CRYPTO_TFM_REQ_MASK);
3244 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3245 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3246 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3247 			      CRYPTO_TFM_RES_MASK);
3248 	if (error)
3249 		return error;
3250 	return chcr_ccm_common_setkey(aead, key, keylen);
3251 }
3252 
3253 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3254 				    unsigned int keylen)
3255 {
3256 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3257 	int error;
3258 
3259 	if (keylen < 3) {
3260 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3261 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3262 		aeadctx->enckey_len = 0;
3263 		return	-EINVAL;
3264 	}
3265 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3266 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3267 			      CRYPTO_TFM_REQ_MASK);
3268 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3269 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3270 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3271 			      CRYPTO_TFM_RES_MASK);
3272 	if (error)
3273 		return error;
3274 	keylen -= 3;
3275 	memcpy(aeadctx->salt, key + keylen, 3);
3276 	return chcr_ccm_common_setkey(aead, key, keylen);
3277 }
3278 
3279 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3280 			   unsigned int keylen)
3281 {
3282 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3283 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3284 	struct crypto_cipher *cipher;
3285 	unsigned int ck_size;
3286 	int ret = 0, key_ctx_size = 0;
3287 
3288 	aeadctx->enckey_len = 0;
3289 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3290 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3291 			      & CRYPTO_TFM_REQ_MASK);
3292 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3293 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3294 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3295 			      CRYPTO_TFM_RES_MASK);
3296 	if (ret)
3297 		goto out;
3298 
3299 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3300 	    keylen > 3) {
3301 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3302 		memcpy(aeadctx->salt, key + keylen, 4);
3303 	}
3304 	if (keylen == AES_KEYSIZE_128) {
3305 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3306 	} else if (keylen == AES_KEYSIZE_192) {
3307 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3308 	} else if (keylen == AES_KEYSIZE_256) {
3309 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3310 	} else {
3311 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3312 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3313 		pr_err("GCM: Invalid key length %d\n", keylen);
3314 		ret = -EINVAL;
3315 		goto out;
3316 	}
3317 
3318 	memcpy(aeadctx->key, key, keylen);
3319 	aeadctx->enckey_len = keylen;
3320 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3321 		AEAD_H_SIZE;
3322 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3323 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3324 						0, 0,
3325 						key_ctx_size >> 4);
3326 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3327 	 * It will go in key context
3328 	 */
3329 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3330 	if (IS_ERR(cipher)) {
3331 		aeadctx->enckey_len = 0;
3332 		ret = -ENOMEM;
3333 		goto out;
3334 	}
3335 
3336 	ret = crypto_cipher_setkey(cipher, key, keylen);
3337 	if (ret) {
3338 		aeadctx->enckey_len = 0;
3339 		goto out1;
3340 	}
3341 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3342 	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3343 
3344 out1:
3345 	crypto_free_cipher(cipher);
3346 out:
3347 	return ret;
3348 }
3349 
3350 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3351 				   unsigned int keylen)
3352 {
3353 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3354 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3355 	/* it contains auth and cipher key both*/
3356 	struct crypto_authenc_keys keys;
3357 	unsigned int bs, subtype;
3358 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3359 	int err = 0, i, key_ctx_len = 0;
3360 	unsigned char ck_size = 0;
3361 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3362 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3363 	struct algo_param param;
3364 	int align;
3365 	u8 *o_ptr = NULL;
3366 
3367 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3368 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3369 			      & CRYPTO_TFM_REQ_MASK);
3370 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3371 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3372 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3373 			      & CRYPTO_TFM_RES_MASK);
3374 	if (err)
3375 		goto out;
3376 
3377 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3378 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3379 		goto out;
3380 	}
3381 
3382 	if (get_alg_config(&param, max_authsize)) {
3383 		pr_err("chcr : Unsupported digest size\n");
3384 		goto out;
3385 	}
3386 	subtype = get_aead_subtype(authenc);
3387 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3388 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3389 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3390 			goto out;
3391 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3392 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3393 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3394 	}
3395 	if (keys.enckeylen == AES_KEYSIZE_128) {
3396 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3397 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3398 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3399 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3400 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3401 	} else {
3402 		pr_err("chcr : Unsupported cipher key\n");
3403 		goto out;
3404 	}
3405 
3406 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3407 	 * h(opad) so authkey is not needed again. authkeylen size have the
3408 	 * size of the hash digest size.
3409 	 */
3410 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3411 	aeadctx->enckey_len = keys.enckeylen;
3412 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3413 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3414 
3415 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3416 			    aeadctx->enckey_len << 3);
3417 	}
3418 	base_hash  = chcr_alloc_shash(max_authsize);
3419 	if (IS_ERR(base_hash)) {
3420 		pr_err("chcr : Base driver cannot be loaded\n");
3421 		aeadctx->enckey_len = 0;
3422 		memzero_explicit(&keys, sizeof(keys));
3423 		return -EINVAL;
3424 	}
3425 	{
3426 		SHASH_DESC_ON_STACK(shash, base_hash);
3427 
3428 		shash->tfm = base_hash;
3429 		shash->flags = crypto_shash_get_flags(base_hash);
3430 		bs = crypto_shash_blocksize(base_hash);
3431 		align = KEYCTX_ALIGN_PAD(max_authsize);
3432 		o_ptr =  actx->h_iopad + param.result_size + align;
3433 
3434 		if (keys.authkeylen > bs) {
3435 			err = crypto_shash_digest(shash, keys.authkey,
3436 						  keys.authkeylen,
3437 						  o_ptr);
3438 			if (err) {
3439 				pr_err("chcr : Base driver cannot be loaded\n");
3440 				goto out;
3441 			}
3442 			keys.authkeylen = max_authsize;
3443 		} else
3444 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3445 
3446 		/* Compute the ipad-digest*/
3447 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3448 		memcpy(pad, o_ptr, keys.authkeylen);
3449 		for (i = 0; i < bs >> 2; i++)
3450 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3451 
3452 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3453 					      max_authsize))
3454 			goto out;
3455 		/* Compute the opad-digest */
3456 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3457 		memcpy(pad, o_ptr, keys.authkeylen);
3458 		for (i = 0; i < bs >> 2; i++)
3459 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3460 
3461 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3462 			goto out;
3463 
3464 		/* convert the ipad and opad digest to network order */
3465 		chcr_change_order(actx->h_iopad, param.result_size);
3466 		chcr_change_order(o_ptr, param.result_size);
3467 		key_ctx_len = sizeof(struct _key_ctx) +
3468 			roundup(keys.enckeylen, 16) +
3469 			(param.result_size + align) * 2;
3470 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3471 						0, 1, key_ctx_len >> 4);
3472 		actx->auth_mode = param.auth_mode;
3473 		chcr_free_shash(base_hash);
3474 
3475 		memzero_explicit(&keys, sizeof(keys));
3476 		return 0;
3477 	}
3478 out:
3479 	aeadctx->enckey_len = 0;
3480 	memzero_explicit(&keys, sizeof(keys));
3481 	if (!IS_ERR(base_hash))
3482 		chcr_free_shash(base_hash);
3483 	return -EINVAL;
3484 }
3485 
3486 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3487 					const u8 *key, unsigned int keylen)
3488 {
3489 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3490 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3491 	struct crypto_authenc_keys keys;
3492 	int err;
3493 	/* it contains auth and cipher key both*/
3494 	unsigned int subtype;
3495 	int key_ctx_len = 0;
3496 	unsigned char ck_size = 0;
3497 
3498 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3499 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3500 			      & CRYPTO_TFM_REQ_MASK);
3501 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3502 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3503 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3504 			      & CRYPTO_TFM_RES_MASK);
3505 	if (err)
3506 		goto out;
3507 
3508 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3509 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3510 		goto out;
3511 	}
3512 	subtype = get_aead_subtype(authenc);
3513 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3514 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3515 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3516 			goto out;
3517 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3518 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3519 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3520 	}
3521 	if (keys.enckeylen == AES_KEYSIZE_128) {
3522 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3523 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3524 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3525 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3526 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3527 	} else {
3528 		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3529 		goto out;
3530 	}
3531 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3532 	aeadctx->enckey_len = keys.enckeylen;
3533 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3534 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3535 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3536 				aeadctx->enckey_len << 3);
3537 	}
3538 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3539 
3540 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3541 						0, key_ctx_len >> 4);
3542 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3543 	memzero_explicit(&keys, sizeof(keys));
3544 	return 0;
3545 out:
3546 	aeadctx->enckey_len = 0;
3547 	memzero_explicit(&keys, sizeof(keys));
3548 	return -EINVAL;
3549 }
3550 
3551 static int chcr_aead_op(struct aead_request *req,
3552 			int size,
3553 			create_wr_t create_wr_fn)
3554 {
3555 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3556 	struct uld_ctx *u_ctx;
3557 	struct sk_buff *skb;
3558 	int isfull = 0;
3559 
3560 	if (!a_ctx(tfm)->dev) {
3561 		pr_err("chcr : %s : No crypto device.\n", __func__);
3562 		return -ENXIO;
3563 	}
3564 	u_ctx = ULD_CTX(a_ctx(tfm));
3565 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3566 				   a_ctx(tfm)->tx_qidx)) {
3567 		isfull = 1;
3568 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3569 			return -ENOSPC;
3570 	}
3571 
3572 	/* Form a WR from req */
3573 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3574 
3575 	if (IS_ERR(skb) || !skb)
3576 		return PTR_ERR(skb);
3577 
3578 	skb->dev = u_ctx->lldi.ports[0];
3579 	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3580 	chcr_send_wr(skb);
3581 	return isfull ? -EBUSY : -EINPROGRESS;
3582 }
3583 
3584 static int chcr_aead_encrypt(struct aead_request *req)
3585 {
3586 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3587 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3588 
3589 	reqctx->verify = VERIFY_HW;
3590 	reqctx->op = CHCR_ENCRYPT_OP;
3591 
3592 	switch (get_aead_subtype(tfm)) {
3593 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3594 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3595 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3596 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3597 		return chcr_aead_op(req, 0, create_authenc_wr);
3598 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3599 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3600 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3601 	default:
3602 		return chcr_aead_op(req, 0, create_gcm_wr);
3603 	}
3604 }
3605 
3606 static int chcr_aead_decrypt(struct aead_request *req)
3607 {
3608 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3609 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3610 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3611 	int size;
3612 
3613 	if (aeadctx->mayverify == VERIFY_SW) {
3614 		size = crypto_aead_maxauthsize(tfm);
3615 		reqctx->verify = VERIFY_SW;
3616 	} else {
3617 		size = 0;
3618 		reqctx->verify = VERIFY_HW;
3619 	}
3620 	reqctx->op = CHCR_DECRYPT_OP;
3621 	switch (get_aead_subtype(tfm)) {
3622 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3623 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3624 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3625 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3626 		return chcr_aead_op(req, size, create_authenc_wr);
3627 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3628 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3629 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3630 	default:
3631 		return chcr_aead_op(req, size, create_gcm_wr);
3632 	}
3633 }
3634 
3635 static struct chcr_alg_template driver_algs[] = {
3636 	/* AES-CBC */
3637 	{
3638 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3639 		.is_registered = 0,
3640 		.alg.crypto = {
3641 			.cra_name		= "cbc(aes)",
3642 			.cra_driver_name	= "cbc-aes-chcr",
3643 			.cra_blocksize		= AES_BLOCK_SIZE,
3644 			.cra_init		= chcr_cra_init,
3645 			.cra_exit		= chcr_cra_exit,
3646 			.cra_u.ablkcipher	= {
3647 				.min_keysize	= AES_MIN_KEY_SIZE,
3648 				.max_keysize	= AES_MAX_KEY_SIZE,
3649 				.ivsize		= AES_BLOCK_SIZE,
3650 				.setkey			= chcr_aes_cbc_setkey,
3651 				.encrypt		= chcr_aes_encrypt,
3652 				.decrypt		= chcr_aes_decrypt,
3653 			}
3654 		}
3655 	},
3656 	{
3657 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3658 		.is_registered = 0,
3659 		.alg.crypto =   {
3660 			.cra_name		= "xts(aes)",
3661 			.cra_driver_name	= "xts-aes-chcr",
3662 			.cra_blocksize		= AES_BLOCK_SIZE,
3663 			.cra_init		= chcr_cra_init,
3664 			.cra_exit		= NULL,
3665 			.cra_u .ablkcipher = {
3666 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
3667 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
3668 					.ivsize		= AES_BLOCK_SIZE,
3669 					.setkey		= chcr_aes_xts_setkey,
3670 					.encrypt	= chcr_aes_encrypt,
3671 					.decrypt	= chcr_aes_decrypt,
3672 				}
3673 			}
3674 	},
3675 	{
3676 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3677 		.is_registered = 0,
3678 		.alg.crypto = {
3679 			.cra_name		= "ctr(aes)",
3680 			.cra_driver_name	= "ctr-aes-chcr",
3681 			.cra_blocksize		= 1,
3682 			.cra_init		= chcr_cra_init,
3683 			.cra_exit		= chcr_cra_exit,
3684 			.cra_u.ablkcipher	= {
3685 				.min_keysize	= AES_MIN_KEY_SIZE,
3686 				.max_keysize	= AES_MAX_KEY_SIZE,
3687 				.ivsize		= AES_BLOCK_SIZE,
3688 				.setkey		= chcr_aes_ctr_setkey,
3689 				.encrypt	= chcr_aes_encrypt,
3690 				.decrypt	= chcr_aes_decrypt,
3691 			}
3692 		}
3693 	},
3694 	{
3695 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3696 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3697 		.is_registered = 0,
3698 		.alg.crypto = {
3699 			.cra_name		= "rfc3686(ctr(aes))",
3700 			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3701 			.cra_blocksize		= 1,
3702 			.cra_init		= chcr_rfc3686_init,
3703 			.cra_exit		= chcr_cra_exit,
3704 			.cra_u.ablkcipher	= {
3705 				.min_keysize	= AES_MIN_KEY_SIZE +
3706 					CTR_RFC3686_NONCE_SIZE,
3707 				.max_keysize	= AES_MAX_KEY_SIZE +
3708 					CTR_RFC3686_NONCE_SIZE,
3709 				.ivsize		= CTR_RFC3686_IV_SIZE,
3710 				.setkey		= chcr_aes_rfc3686_setkey,
3711 				.encrypt	= chcr_aes_encrypt,
3712 				.decrypt	= chcr_aes_decrypt,
3713 				.geniv          = "seqiv",
3714 			}
3715 		}
3716 	},
3717 	/* SHA */
3718 	{
3719 		.type = CRYPTO_ALG_TYPE_AHASH,
3720 		.is_registered = 0,
3721 		.alg.hash = {
3722 			.halg.digestsize = SHA1_DIGEST_SIZE,
3723 			.halg.base = {
3724 				.cra_name = "sha1",
3725 				.cra_driver_name = "sha1-chcr",
3726 				.cra_blocksize = SHA1_BLOCK_SIZE,
3727 			}
3728 		}
3729 	},
3730 	{
3731 		.type = CRYPTO_ALG_TYPE_AHASH,
3732 		.is_registered = 0,
3733 		.alg.hash = {
3734 			.halg.digestsize = SHA256_DIGEST_SIZE,
3735 			.halg.base = {
3736 				.cra_name = "sha256",
3737 				.cra_driver_name = "sha256-chcr",
3738 				.cra_blocksize = SHA256_BLOCK_SIZE,
3739 			}
3740 		}
3741 	},
3742 	{
3743 		.type = CRYPTO_ALG_TYPE_AHASH,
3744 		.is_registered = 0,
3745 		.alg.hash = {
3746 			.halg.digestsize = SHA224_DIGEST_SIZE,
3747 			.halg.base = {
3748 				.cra_name = "sha224",
3749 				.cra_driver_name = "sha224-chcr",
3750 				.cra_blocksize = SHA224_BLOCK_SIZE,
3751 			}
3752 		}
3753 	},
3754 	{
3755 		.type = CRYPTO_ALG_TYPE_AHASH,
3756 		.is_registered = 0,
3757 		.alg.hash = {
3758 			.halg.digestsize = SHA384_DIGEST_SIZE,
3759 			.halg.base = {
3760 				.cra_name = "sha384",
3761 				.cra_driver_name = "sha384-chcr",
3762 				.cra_blocksize = SHA384_BLOCK_SIZE,
3763 			}
3764 		}
3765 	},
3766 	{
3767 		.type = CRYPTO_ALG_TYPE_AHASH,
3768 		.is_registered = 0,
3769 		.alg.hash = {
3770 			.halg.digestsize = SHA512_DIGEST_SIZE,
3771 			.halg.base = {
3772 				.cra_name = "sha512",
3773 				.cra_driver_name = "sha512-chcr",
3774 				.cra_blocksize = SHA512_BLOCK_SIZE,
3775 			}
3776 		}
3777 	},
3778 	/* HMAC */
3779 	{
3780 		.type = CRYPTO_ALG_TYPE_HMAC,
3781 		.is_registered = 0,
3782 		.alg.hash = {
3783 			.halg.digestsize = SHA1_DIGEST_SIZE,
3784 			.halg.base = {
3785 				.cra_name = "hmac(sha1)",
3786 				.cra_driver_name = "hmac-sha1-chcr",
3787 				.cra_blocksize = SHA1_BLOCK_SIZE,
3788 			}
3789 		}
3790 	},
3791 	{
3792 		.type = CRYPTO_ALG_TYPE_HMAC,
3793 		.is_registered = 0,
3794 		.alg.hash = {
3795 			.halg.digestsize = SHA224_DIGEST_SIZE,
3796 			.halg.base = {
3797 				.cra_name = "hmac(sha224)",
3798 				.cra_driver_name = "hmac-sha224-chcr",
3799 				.cra_blocksize = SHA224_BLOCK_SIZE,
3800 			}
3801 		}
3802 	},
3803 	{
3804 		.type = CRYPTO_ALG_TYPE_HMAC,
3805 		.is_registered = 0,
3806 		.alg.hash = {
3807 			.halg.digestsize = SHA256_DIGEST_SIZE,
3808 			.halg.base = {
3809 				.cra_name = "hmac(sha256)",
3810 				.cra_driver_name = "hmac-sha256-chcr",
3811 				.cra_blocksize = SHA256_BLOCK_SIZE,
3812 			}
3813 		}
3814 	},
3815 	{
3816 		.type = CRYPTO_ALG_TYPE_HMAC,
3817 		.is_registered = 0,
3818 		.alg.hash = {
3819 			.halg.digestsize = SHA384_DIGEST_SIZE,
3820 			.halg.base = {
3821 				.cra_name = "hmac(sha384)",
3822 				.cra_driver_name = "hmac-sha384-chcr",
3823 				.cra_blocksize = SHA384_BLOCK_SIZE,
3824 			}
3825 		}
3826 	},
3827 	{
3828 		.type = CRYPTO_ALG_TYPE_HMAC,
3829 		.is_registered = 0,
3830 		.alg.hash = {
3831 			.halg.digestsize = SHA512_DIGEST_SIZE,
3832 			.halg.base = {
3833 				.cra_name = "hmac(sha512)",
3834 				.cra_driver_name = "hmac-sha512-chcr",
3835 				.cra_blocksize = SHA512_BLOCK_SIZE,
3836 			}
3837 		}
3838 	},
3839 	/* Add AEAD Algorithms */
3840 	{
3841 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3842 		.is_registered = 0,
3843 		.alg.aead = {
3844 			.base = {
3845 				.cra_name = "gcm(aes)",
3846 				.cra_driver_name = "gcm-aes-chcr",
3847 				.cra_blocksize	= 1,
3848 				.cra_priority = CHCR_AEAD_PRIORITY,
3849 				.cra_ctxsize =	sizeof(struct chcr_context) +
3850 						sizeof(struct chcr_aead_ctx) +
3851 						sizeof(struct chcr_gcm_ctx),
3852 			},
3853 			.ivsize = GCM_AES_IV_SIZE,
3854 			.maxauthsize = GHASH_DIGEST_SIZE,
3855 			.setkey = chcr_gcm_setkey,
3856 			.setauthsize = chcr_gcm_setauthsize,
3857 		}
3858 	},
3859 	{
3860 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3861 		.is_registered = 0,
3862 		.alg.aead = {
3863 			.base = {
3864 				.cra_name = "rfc4106(gcm(aes))",
3865 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3866 				.cra_blocksize	 = 1,
3867 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3868 				.cra_ctxsize =	sizeof(struct chcr_context) +
3869 						sizeof(struct chcr_aead_ctx) +
3870 						sizeof(struct chcr_gcm_ctx),
3871 
3872 			},
3873 			.ivsize = GCM_RFC4106_IV_SIZE,
3874 			.maxauthsize	= GHASH_DIGEST_SIZE,
3875 			.setkey = chcr_gcm_setkey,
3876 			.setauthsize	= chcr_4106_4309_setauthsize,
3877 		}
3878 	},
3879 	{
3880 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3881 		.is_registered = 0,
3882 		.alg.aead = {
3883 			.base = {
3884 				.cra_name = "ccm(aes)",
3885 				.cra_driver_name = "ccm-aes-chcr",
3886 				.cra_blocksize	 = 1,
3887 				.cra_priority = CHCR_AEAD_PRIORITY,
3888 				.cra_ctxsize =	sizeof(struct chcr_context) +
3889 						sizeof(struct chcr_aead_ctx),
3890 
3891 			},
3892 			.ivsize = AES_BLOCK_SIZE,
3893 			.maxauthsize	= GHASH_DIGEST_SIZE,
3894 			.setkey = chcr_aead_ccm_setkey,
3895 			.setauthsize	= chcr_ccm_setauthsize,
3896 		}
3897 	},
3898 	{
3899 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3900 		.is_registered = 0,
3901 		.alg.aead = {
3902 			.base = {
3903 				.cra_name = "rfc4309(ccm(aes))",
3904 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
3905 				.cra_blocksize	 = 1,
3906 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3907 				.cra_ctxsize =	sizeof(struct chcr_context) +
3908 						sizeof(struct chcr_aead_ctx),
3909 
3910 			},
3911 			.ivsize = 8,
3912 			.maxauthsize	= GHASH_DIGEST_SIZE,
3913 			.setkey = chcr_aead_rfc4309_setkey,
3914 			.setauthsize = chcr_4106_4309_setauthsize,
3915 		}
3916 	},
3917 	{
3918 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3919 		.is_registered = 0,
3920 		.alg.aead = {
3921 			.base = {
3922 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3923 				.cra_driver_name =
3924 					"authenc-hmac-sha1-cbc-aes-chcr",
3925 				.cra_blocksize	 = AES_BLOCK_SIZE,
3926 				.cra_priority = CHCR_AEAD_PRIORITY,
3927 				.cra_ctxsize =	sizeof(struct chcr_context) +
3928 						sizeof(struct chcr_aead_ctx) +
3929 						sizeof(struct chcr_authenc_ctx),
3930 
3931 			},
3932 			.ivsize = AES_BLOCK_SIZE,
3933 			.maxauthsize = SHA1_DIGEST_SIZE,
3934 			.setkey = chcr_authenc_setkey,
3935 			.setauthsize = chcr_authenc_setauthsize,
3936 		}
3937 	},
3938 	{
3939 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3940 		.is_registered = 0,
3941 		.alg.aead = {
3942 			.base = {
3943 
3944 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
3945 				.cra_driver_name =
3946 					"authenc-hmac-sha256-cbc-aes-chcr",
3947 				.cra_blocksize	 = AES_BLOCK_SIZE,
3948 				.cra_priority = CHCR_AEAD_PRIORITY,
3949 				.cra_ctxsize =	sizeof(struct chcr_context) +
3950 						sizeof(struct chcr_aead_ctx) +
3951 						sizeof(struct chcr_authenc_ctx),
3952 
3953 			},
3954 			.ivsize = AES_BLOCK_SIZE,
3955 			.maxauthsize	= SHA256_DIGEST_SIZE,
3956 			.setkey = chcr_authenc_setkey,
3957 			.setauthsize = chcr_authenc_setauthsize,
3958 		}
3959 	},
3960 	{
3961 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3962 		.is_registered = 0,
3963 		.alg.aead = {
3964 			.base = {
3965 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
3966 				.cra_driver_name =
3967 					"authenc-hmac-sha224-cbc-aes-chcr",
3968 				.cra_blocksize	 = AES_BLOCK_SIZE,
3969 				.cra_priority = CHCR_AEAD_PRIORITY,
3970 				.cra_ctxsize =	sizeof(struct chcr_context) +
3971 						sizeof(struct chcr_aead_ctx) +
3972 						sizeof(struct chcr_authenc_ctx),
3973 			},
3974 			.ivsize = AES_BLOCK_SIZE,
3975 			.maxauthsize = SHA224_DIGEST_SIZE,
3976 			.setkey = chcr_authenc_setkey,
3977 			.setauthsize = chcr_authenc_setauthsize,
3978 		}
3979 	},
3980 	{
3981 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3982 		.is_registered = 0,
3983 		.alg.aead = {
3984 			.base = {
3985 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
3986 				.cra_driver_name =
3987 					"authenc-hmac-sha384-cbc-aes-chcr",
3988 				.cra_blocksize	 = AES_BLOCK_SIZE,
3989 				.cra_priority = CHCR_AEAD_PRIORITY,
3990 				.cra_ctxsize =	sizeof(struct chcr_context) +
3991 						sizeof(struct chcr_aead_ctx) +
3992 						sizeof(struct chcr_authenc_ctx),
3993 
3994 			},
3995 			.ivsize = AES_BLOCK_SIZE,
3996 			.maxauthsize = SHA384_DIGEST_SIZE,
3997 			.setkey = chcr_authenc_setkey,
3998 			.setauthsize = chcr_authenc_setauthsize,
3999 		}
4000 	},
4001 	{
4002 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4003 		.is_registered = 0,
4004 		.alg.aead = {
4005 			.base = {
4006 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4007 				.cra_driver_name =
4008 					"authenc-hmac-sha512-cbc-aes-chcr",
4009 				.cra_blocksize	 = AES_BLOCK_SIZE,
4010 				.cra_priority = CHCR_AEAD_PRIORITY,
4011 				.cra_ctxsize =	sizeof(struct chcr_context) +
4012 						sizeof(struct chcr_aead_ctx) +
4013 						sizeof(struct chcr_authenc_ctx),
4014 
4015 			},
4016 			.ivsize = AES_BLOCK_SIZE,
4017 			.maxauthsize = SHA512_DIGEST_SIZE,
4018 			.setkey = chcr_authenc_setkey,
4019 			.setauthsize = chcr_authenc_setauthsize,
4020 		}
4021 	},
4022 	{
4023 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4024 		.is_registered = 0,
4025 		.alg.aead = {
4026 			.base = {
4027 				.cra_name = "authenc(digest_null,cbc(aes))",
4028 				.cra_driver_name =
4029 					"authenc-digest_null-cbc-aes-chcr",
4030 				.cra_blocksize	 = AES_BLOCK_SIZE,
4031 				.cra_priority = CHCR_AEAD_PRIORITY,
4032 				.cra_ctxsize =	sizeof(struct chcr_context) +
4033 						sizeof(struct chcr_aead_ctx) +
4034 						sizeof(struct chcr_authenc_ctx),
4035 
4036 			},
4037 			.ivsize  = AES_BLOCK_SIZE,
4038 			.maxauthsize = 0,
4039 			.setkey  = chcr_aead_digest_null_setkey,
4040 			.setauthsize = chcr_authenc_null_setauthsize,
4041 		}
4042 	},
4043 	{
4044 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4045 		.is_registered = 0,
4046 		.alg.aead = {
4047 			.base = {
4048 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4049 				.cra_driver_name =
4050 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4051 				.cra_blocksize	 = 1,
4052 				.cra_priority = CHCR_AEAD_PRIORITY,
4053 				.cra_ctxsize =	sizeof(struct chcr_context) +
4054 						sizeof(struct chcr_aead_ctx) +
4055 						sizeof(struct chcr_authenc_ctx),
4056 
4057 			},
4058 			.ivsize = CTR_RFC3686_IV_SIZE,
4059 			.maxauthsize = SHA1_DIGEST_SIZE,
4060 			.setkey = chcr_authenc_setkey,
4061 			.setauthsize = chcr_authenc_setauthsize,
4062 		}
4063 	},
4064 	{
4065 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4066 		.is_registered = 0,
4067 		.alg.aead = {
4068 			.base = {
4069 
4070 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4071 				.cra_driver_name =
4072 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4073 				.cra_blocksize	 = 1,
4074 				.cra_priority = CHCR_AEAD_PRIORITY,
4075 				.cra_ctxsize =	sizeof(struct chcr_context) +
4076 						sizeof(struct chcr_aead_ctx) +
4077 						sizeof(struct chcr_authenc_ctx),
4078 
4079 			},
4080 			.ivsize = CTR_RFC3686_IV_SIZE,
4081 			.maxauthsize	= SHA256_DIGEST_SIZE,
4082 			.setkey = chcr_authenc_setkey,
4083 			.setauthsize = chcr_authenc_setauthsize,
4084 		}
4085 	},
4086 	{
4087 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4088 		.is_registered = 0,
4089 		.alg.aead = {
4090 			.base = {
4091 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4092 				.cra_driver_name =
4093 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4094 				.cra_blocksize	 = 1,
4095 				.cra_priority = CHCR_AEAD_PRIORITY,
4096 				.cra_ctxsize =	sizeof(struct chcr_context) +
4097 						sizeof(struct chcr_aead_ctx) +
4098 						sizeof(struct chcr_authenc_ctx),
4099 			},
4100 			.ivsize = CTR_RFC3686_IV_SIZE,
4101 			.maxauthsize = SHA224_DIGEST_SIZE,
4102 			.setkey = chcr_authenc_setkey,
4103 			.setauthsize = chcr_authenc_setauthsize,
4104 		}
4105 	},
4106 	{
4107 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4108 		.is_registered = 0,
4109 		.alg.aead = {
4110 			.base = {
4111 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4112 				.cra_driver_name =
4113 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4114 				.cra_blocksize	 = 1,
4115 				.cra_priority = CHCR_AEAD_PRIORITY,
4116 				.cra_ctxsize =	sizeof(struct chcr_context) +
4117 						sizeof(struct chcr_aead_ctx) +
4118 						sizeof(struct chcr_authenc_ctx),
4119 
4120 			},
4121 			.ivsize = CTR_RFC3686_IV_SIZE,
4122 			.maxauthsize = SHA384_DIGEST_SIZE,
4123 			.setkey = chcr_authenc_setkey,
4124 			.setauthsize = chcr_authenc_setauthsize,
4125 		}
4126 	},
4127 	{
4128 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4129 		.is_registered = 0,
4130 		.alg.aead = {
4131 			.base = {
4132 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4133 				.cra_driver_name =
4134 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4135 				.cra_blocksize	 = 1,
4136 				.cra_priority = CHCR_AEAD_PRIORITY,
4137 				.cra_ctxsize =	sizeof(struct chcr_context) +
4138 						sizeof(struct chcr_aead_ctx) +
4139 						sizeof(struct chcr_authenc_ctx),
4140 
4141 			},
4142 			.ivsize = CTR_RFC3686_IV_SIZE,
4143 			.maxauthsize = SHA512_DIGEST_SIZE,
4144 			.setkey = chcr_authenc_setkey,
4145 			.setauthsize = chcr_authenc_setauthsize,
4146 		}
4147 	},
4148 	{
4149 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4150 		.is_registered = 0,
4151 		.alg.aead = {
4152 			.base = {
4153 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4154 				.cra_driver_name =
4155 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4156 				.cra_blocksize	 = 1,
4157 				.cra_priority = CHCR_AEAD_PRIORITY,
4158 				.cra_ctxsize =	sizeof(struct chcr_context) +
4159 						sizeof(struct chcr_aead_ctx) +
4160 						sizeof(struct chcr_authenc_ctx),
4161 
4162 			},
4163 			.ivsize  = CTR_RFC3686_IV_SIZE,
4164 			.maxauthsize = 0,
4165 			.setkey  = chcr_aead_digest_null_setkey,
4166 			.setauthsize = chcr_authenc_null_setauthsize,
4167 		}
4168 	},
4169 
4170 };
4171 
4172 /*
4173  *	chcr_unregister_alg - Deregister crypto algorithms with
4174  *	kernel framework.
4175  */
4176 static int chcr_unregister_alg(void)
4177 {
4178 	int i;
4179 
4180 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4181 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4182 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4183 			if (driver_algs[i].is_registered)
4184 				crypto_unregister_alg(
4185 						&driver_algs[i].alg.crypto);
4186 			break;
4187 		case CRYPTO_ALG_TYPE_AEAD:
4188 			if (driver_algs[i].is_registered)
4189 				crypto_unregister_aead(
4190 						&driver_algs[i].alg.aead);
4191 			break;
4192 		case CRYPTO_ALG_TYPE_AHASH:
4193 			if (driver_algs[i].is_registered)
4194 				crypto_unregister_ahash(
4195 						&driver_algs[i].alg.hash);
4196 			break;
4197 		}
4198 		driver_algs[i].is_registered = 0;
4199 	}
4200 	return 0;
4201 }
4202 
4203 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4204 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4205 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4206 
4207 /*
4208  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4209  */
4210 static int chcr_register_alg(void)
4211 {
4212 	struct crypto_alg ai;
4213 	struct ahash_alg *a_hash;
4214 	int err = 0, i;
4215 	char *name = NULL;
4216 
4217 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4218 		if (driver_algs[i].is_registered)
4219 			continue;
4220 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4221 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4222 			driver_algs[i].alg.crypto.cra_priority =
4223 				CHCR_CRA_PRIORITY;
4224 			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4225 			driver_algs[i].alg.crypto.cra_flags =
4226 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4227 				CRYPTO_ALG_NEED_FALLBACK;
4228 			driver_algs[i].alg.crypto.cra_ctxsize =
4229 				sizeof(struct chcr_context) +
4230 				sizeof(struct ablk_ctx);
4231 			driver_algs[i].alg.crypto.cra_alignmask = 0;
4232 			driver_algs[i].alg.crypto.cra_type =
4233 				&crypto_ablkcipher_type;
4234 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
4235 			name = driver_algs[i].alg.crypto.cra_driver_name;
4236 			break;
4237 		case CRYPTO_ALG_TYPE_AEAD:
4238 			driver_algs[i].alg.aead.base.cra_flags =
4239 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4240 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4241 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4242 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4243 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4244 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4245 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4246 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4247 			break;
4248 		case CRYPTO_ALG_TYPE_AHASH:
4249 			a_hash = &driver_algs[i].alg.hash;
4250 			a_hash->update = chcr_ahash_update;
4251 			a_hash->final = chcr_ahash_final;
4252 			a_hash->finup = chcr_ahash_finup;
4253 			a_hash->digest = chcr_ahash_digest;
4254 			a_hash->export = chcr_ahash_export;
4255 			a_hash->import = chcr_ahash_import;
4256 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4257 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4258 			a_hash->halg.base.cra_module = THIS_MODULE;
4259 			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4260 			a_hash->halg.base.cra_alignmask = 0;
4261 			a_hash->halg.base.cra_exit = NULL;
4262 
4263 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4264 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4265 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4266 				a_hash->init = chcr_hmac_init;
4267 				a_hash->setkey = chcr_ahash_setkey;
4268 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4269 			} else {
4270 				a_hash->init = chcr_sha_init;
4271 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4272 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4273 			}
4274 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4275 			ai = driver_algs[i].alg.hash.halg.base;
4276 			name = ai.cra_driver_name;
4277 			break;
4278 		}
4279 		if (err) {
4280 			pr_err("chcr : %s : Algorithm registration failed\n",
4281 			       name);
4282 			goto register_err;
4283 		} else {
4284 			driver_algs[i].is_registered = 1;
4285 		}
4286 	}
4287 	return 0;
4288 
4289 register_err:
4290 	chcr_unregister_alg();
4291 	return err;
4292 }
4293 
4294 /*
4295  *	start_crypto - Register the crypto algorithms.
4296  *	This should called once when the first device comesup. After this
4297  *	kernel will start calling driver APIs for crypto operations.
4298  */
4299 int start_crypto(void)
4300 {
4301 	return chcr_register_alg();
4302 }
4303 
4304 /*
4305  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4306  *	This should be called once when the last device goes down. After this
4307  *	kernel will not call the driver API for crypto operations.
4308  */
4309 int stop_crypto(void)
4310 {
4311 	chcr_unregister_alg();
4312 	return 0;
4313 }
4314