1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67 
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73 
74 #define IV AES_BLOCK_SIZE
75 
76 static unsigned int sgl_ent_len[] = {
77 	0, 0, 16, 24, 40, 48, 64, 72, 88,
78 	96, 112, 120, 136, 144, 160, 168, 184,
79 	192, 208, 216, 232, 240, 256, 264, 280,
80 	288, 304, 312, 328, 336, 352, 360, 376
81 };
82 
83 static unsigned int dsgl_ent_len[] = {
84 	0, 32, 32, 48, 48, 64, 64, 80, 80,
85 	112, 112, 128, 128, 144, 144, 160, 160,
86 	192, 192, 208, 208, 224, 224, 240, 240,
87 	272, 272, 288, 288, 304, 304, 320, 320
88 };
89 
90 static u32 round_constant[11] = {
91 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 	0x1B000000, 0x36000000, 0x6C000000
94 };
95 
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 				   unsigned char *input, int err);
98 
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 	return ctx->crypto_ctx->aeadctx;
102 }
103 
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 	return ctx->crypto_ctx->ablkctx;
107 }
108 
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 	return ctx->crypto_ctx->hmacctx;
112 }
113 
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 	return gctx->ctx->gcm;
117 }
118 
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 	return gctx->ctx->authenc;
122 }
123 
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 	return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128 
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131 	return (skb->len <= SGE_MAX_WR_LEN);
132 }
133 
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138 
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 			 unsigned int entlen,
141 			 unsigned int skip)
142 {
143 	int nents = 0;
144 	unsigned int less;
145 	unsigned int skip_len = 0;
146 
147 	while (sg && skip) {
148 		if (sg_dma_len(sg) <= skip) {
149 			skip -= sg_dma_len(sg);
150 			skip_len = 0;
151 			sg = sg_next(sg);
152 		} else {
153 			skip_len = skip;
154 			skip = 0;
155 		}
156 	}
157 
158 	while (sg && reqlen) {
159 		less = min(reqlen, sg_dma_len(sg) - skip_len);
160 		nents += DIV_ROUND_UP(less, entlen);
161 		reqlen -= less;
162 		skip_len = 0;
163 		sg = sg_next(sg);
164 	}
165 	return nents;
166 }
167 
168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170 	struct aead_alg *alg = crypto_aead_alg(aead);
171 	struct chcr_alg_template *chcr_crypto_alg =
172 		container_of(alg, struct chcr_alg_template, alg.aead);
173 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175 
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178 	u8 temp[SHA512_DIGEST_SIZE];
179 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 	int authsize = crypto_aead_authsize(tfm);
181 	struct cpl_fw6_pld *fw6_pld;
182 	int cmp = 0;
183 
184 	fw6_pld = (struct cpl_fw6_pld *)input;
185 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 	} else {
189 
190 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 				authsize, req->assoclen +
192 				req->cryptlen - authsize);
193 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 	}
195 	if (cmp)
196 		*err = -EBADMSG;
197 	else
198 		*err = 0;
199 }
200 
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203 	if (dev->state == CHCR_DETACH)
204 		return 1;
205 	atomic_inc(&dev->inflight);
206 	return 0;
207 }
208 
209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
210 {
211 	atomic_dec(&dev->inflight);
212 }
213 
214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215 					 unsigned char *input,
216 					 int err)
217 {
218 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 	struct chcr_dev *dev = a_ctx(tfm)->dev;
221 
222 	chcr_aead_common_exit(req);
223 	if (reqctx->verify == VERIFY_SW) {
224 		chcr_verify_tag(req, input, &err);
225 		reqctx->verify = VERIFY_HW;
226 	}
227 	chcr_dec_wrcount(dev);
228 	req->base.complete(&req->base, err);
229 
230 	return err;
231 }
232 
233 static void get_aes_decrypt_key(unsigned char *dec_key,
234 				       const unsigned char *key,
235 				       unsigned int keylength)
236 {
237 	u32 temp;
238 	u32 w_ring[MAX_NK];
239 	int i, j, k;
240 	u8  nr, nk;
241 
242 	switch (keylength) {
243 	case AES_KEYLENGTH_128BIT:
244 		nk = KEYLENGTH_4BYTES;
245 		nr = NUMBER_OF_ROUNDS_10;
246 		break;
247 	case AES_KEYLENGTH_192BIT:
248 		nk = KEYLENGTH_6BYTES;
249 		nr = NUMBER_OF_ROUNDS_12;
250 		break;
251 	case AES_KEYLENGTH_256BIT:
252 		nk = KEYLENGTH_8BYTES;
253 		nr = NUMBER_OF_ROUNDS_14;
254 		break;
255 	default:
256 		return;
257 	}
258 	for (i = 0; i < nk; i++)
259 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
260 
261 	i = 0;
262 	temp = w_ring[nk - 1];
263 	while (i + nk < (nr + 1) * 4) {
264 		if (!(i % nk)) {
265 			/* RotWord(temp) */
266 			temp = (temp << 8) | (temp >> 24);
267 			temp = aes_ks_subword(temp);
268 			temp ^= round_constant[i / nk];
269 		} else if (nk == 8 && (i % 4 == 0)) {
270 			temp = aes_ks_subword(temp);
271 		}
272 		w_ring[i % nk] ^= temp;
273 		temp = w_ring[i % nk];
274 		i++;
275 	}
276 	i--;
277 	for (k = 0, j = i % nk; k < nk; k++) {
278 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
279 		j--;
280 		if (j < 0)
281 			j += nk;
282 	}
283 }
284 
285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
286 {
287 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
288 
289 	switch (ds) {
290 	case SHA1_DIGEST_SIZE:
291 		base_hash = crypto_alloc_shash("sha1", 0, 0);
292 		break;
293 	case SHA224_DIGEST_SIZE:
294 		base_hash = crypto_alloc_shash("sha224", 0, 0);
295 		break;
296 	case SHA256_DIGEST_SIZE:
297 		base_hash = crypto_alloc_shash("sha256", 0, 0);
298 		break;
299 	case SHA384_DIGEST_SIZE:
300 		base_hash = crypto_alloc_shash("sha384", 0, 0);
301 		break;
302 	case SHA512_DIGEST_SIZE:
303 		base_hash = crypto_alloc_shash("sha512", 0, 0);
304 		break;
305 	}
306 
307 	return base_hash;
308 }
309 
310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311 				     char *iopad, char *result_hash,
312 				     int digest_size)
313 {
314 	struct sha1_state sha1_st;
315 	struct sha256_state sha256_st;
316 	struct sha512_state sha512_st;
317 	int error;
318 
319 	if (digest_size == SHA1_DIGEST_SIZE) {
320 		error = crypto_shash_init(desc) ?:
321 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322 			crypto_shash_export(desc, (void *)&sha1_st);
323 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324 	} else if (digest_size == SHA224_DIGEST_SIZE) {
325 		error = crypto_shash_init(desc) ?:
326 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327 			crypto_shash_export(desc, (void *)&sha256_st);
328 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
329 
330 	} else if (digest_size == SHA256_DIGEST_SIZE) {
331 		error = crypto_shash_init(desc) ?:
332 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333 			crypto_shash_export(desc, (void *)&sha256_st);
334 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
335 
336 	} else if (digest_size == SHA384_DIGEST_SIZE) {
337 		error = crypto_shash_init(desc) ?:
338 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339 			crypto_shash_export(desc, (void *)&sha512_st);
340 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
341 
342 	} else if (digest_size == SHA512_DIGEST_SIZE) {
343 		error = crypto_shash_init(desc) ?:
344 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345 			crypto_shash_export(desc, (void *)&sha512_st);
346 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
347 	} else {
348 		error = -EINVAL;
349 		pr_err("Unknown digest size %d\n", digest_size);
350 	}
351 	return error;
352 }
353 
354 static void chcr_change_order(char *buf, int ds)
355 {
356 	int i;
357 
358 	if (ds == SHA512_DIGEST_SIZE) {
359 		for (i = 0; i < (ds / sizeof(u64)); i++)
360 			*((__be64 *)buf + i) =
361 				cpu_to_be64(*((u64 *)buf + i));
362 	} else {
363 		for (i = 0; i < (ds / sizeof(u32)); i++)
364 			*((__be32 *)buf + i) =
365 				cpu_to_be32(*((u32 *)buf + i));
366 	}
367 }
368 
369 static inline int is_hmac(struct crypto_tfm *tfm)
370 {
371 	struct crypto_alg *alg = tfm->__crt_alg;
372 	struct chcr_alg_template *chcr_crypto_alg =
373 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
374 			     alg.hash);
375 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
376 		return 1;
377 	return 0;
378 }
379 
380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381 				   struct cpl_rx_phys_dsgl *dsgl)
382 {
383 	walk->dsgl = dsgl;
384 	walk->nents = 0;
385 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
386 }
387 
388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
389 				 int pci_chan_id)
390 {
391 	struct cpl_rx_phys_dsgl *phys_cpl;
392 
393 	phys_cpl = walk->dsgl;
394 
395 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 	phys_cpl->pcirlxorder_to_noofsgentr =
398 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405 	phys_cpl->rss_hdr_int.qid = htons(qid);
406 	phys_cpl->rss_hdr_int.hash_val = 0;
407 	phys_cpl->rss_hdr_int.channel = pci_chan_id;
408 }
409 
410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
411 					size_t size,
412 					dma_addr_t addr)
413 {
414 	int j;
415 
416 	if (!size)
417 		return;
418 	j = walk->nents;
419 	walk->to->len[j % 8] = htons(size);
420 	walk->to->addr[j % 8] = cpu_to_be64(addr);
421 	j++;
422 	if ((j % 8) == 0)
423 		walk->to++;
424 	walk->nents = j;
425 }
426 
427 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
428 			   struct scatterlist *sg,
429 			      unsigned int slen,
430 			      unsigned int skip)
431 {
432 	int skip_len = 0;
433 	unsigned int left_size = slen, len = 0;
434 	unsigned int j = walk->nents;
435 	int offset, ent_len;
436 
437 	if (!slen)
438 		return;
439 	while (sg && skip) {
440 		if (sg_dma_len(sg) <= skip) {
441 			skip -= sg_dma_len(sg);
442 			skip_len = 0;
443 			sg = sg_next(sg);
444 		} else {
445 			skip_len = skip;
446 			skip = 0;
447 		}
448 	}
449 
450 	while (left_size && sg) {
451 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
452 		offset = 0;
453 		while (len) {
454 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
455 			walk->to->len[j % 8] = htons(ent_len);
456 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
457 						      offset + skip_len);
458 			offset += ent_len;
459 			len -= ent_len;
460 			j++;
461 			if ((j % 8) == 0)
462 				walk->to++;
463 		}
464 		walk->last_sg = sg;
465 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466 					  skip_len) + skip_len;
467 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
468 		skip_len = 0;
469 		sg = sg_next(sg);
470 	}
471 	walk->nents = j;
472 }
473 
474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475 				   struct ulptx_sgl *ulp)
476 {
477 	walk->sgl = ulp;
478 	walk->nents = 0;
479 	walk->pair_idx = 0;
480 	walk->pair = ulp->sge;
481 	walk->last_sg = NULL;
482 	walk->last_sg_len = 0;
483 }
484 
485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
486 {
487 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488 			      ULPTX_NSGE_V(walk->nents));
489 }
490 
491 
492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
493 					size_t size,
494 					dma_addr_t addr)
495 {
496 	if (!size)
497 		return;
498 
499 	if (walk->nents == 0) {
500 		walk->sgl->len0 = cpu_to_be32(size);
501 		walk->sgl->addr0 = cpu_to_be64(addr);
502 	} else {
503 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505 		walk->pair_idx = !walk->pair_idx;
506 		if (!walk->pair_idx)
507 			walk->pair++;
508 	}
509 	walk->nents++;
510 }
511 
512 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
513 					struct scatterlist *sg,
514 			       unsigned int len,
515 			       unsigned int skip)
516 {
517 	int small;
518 	int skip_len = 0;
519 	unsigned int sgmin;
520 
521 	if (!len)
522 		return;
523 	while (sg && skip) {
524 		if (sg_dma_len(sg) <= skip) {
525 			skip -= sg_dma_len(sg);
526 			skip_len = 0;
527 			sg = sg_next(sg);
528 		} else {
529 			skip_len = skip;
530 			skip = 0;
531 		}
532 	}
533 	WARN(!sg, "SG should not be null here\n");
534 	if (sg && (walk->nents == 0)) {
535 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537 		walk->sgl->len0 = cpu_to_be32(sgmin);
538 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
539 		walk->nents++;
540 		len -= sgmin;
541 		walk->last_sg = sg;
542 		walk->last_sg_len = sgmin + skip_len;
543 		skip_len += sgmin;
544 		if (sg_dma_len(sg) == skip_len) {
545 			sg = sg_next(sg);
546 			skip_len = 0;
547 		}
548 	}
549 
550 	while (sg && len) {
551 		small = min(sg_dma_len(sg) - skip_len, len);
552 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554 		walk->pair->addr[walk->pair_idx] =
555 			cpu_to_be64(sg_dma_address(sg) + skip_len);
556 		walk->pair_idx = !walk->pair_idx;
557 		walk->nents++;
558 		if (!walk->pair_idx)
559 			walk->pair++;
560 		len -= sgmin;
561 		skip_len += sgmin;
562 		walk->last_sg = sg;
563 		walk->last_sg_len = skip_len;
564 		if (sg_dma_len(sg) == skip_len) {
565 			sg = sg_next(sg);
566 			skip_len = 0;
567 		}
568 	}
569 }
570 
571 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
572 {
573 	struct crypto_alg *alg = tfm->__crt_alg;
574 	struct chcr_alg_template *chcr_crypto_alg =
575 		container_of(alg, struct chcr_alg_template, alg.crypto);
576 
577 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
578 }
579 
580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
581 {
582 	struct adapter *adap = netdev2adap(dev);
583 	struct sge_uld_txq_info *txq_info =
584 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585 	struct sge_uld_txq *txq;
586 	int ret = 0;
587 
588 	local_bh_disable();
589 	txq = &txq_info->uldtxq[idx];
590 	spin_lock(&txq->sendq.lock);
591 	if (txq->full)
592 		ret = -1;
593 	spin_unlock(&txq->sendq.lock);
594 	local_bh_enable();
595 	return ret;
596 }
597 
598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599 			       struct _key_ctx *key_ctx)
600 {
601 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
603 	} else {
604 		memcpy(key_ctx->key,
605 		       ablkctx->key + (ablkctx->enckey_len >> 1),
606 		       ablkctx->enckey_len >> 1);
607 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
609 	}
610 	return 0;
611 }
612 
613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
614 			     unsigned int minsg,
615 			     unsigned int space,
616 			     unsigned int srcskip)
617 {
618 	int srclen = 0;
619 	int srcsg = minsg;
620 	int soffset = 0, sless;
621 
622 	if (sg_dma_len(src) == srcskip) {
623 		src = sg_next(src);
624 		srcskip = 0;
625 	}
626 	while (src && space > (sgl_ent_len[srcsg + 1])) {
627 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
628 							CHCR_SRC_SG_SIZE);
629 		srclen += sless;
630 		soffset += sless;
631 		srcsg++;
632 		if (sg_dma_len(src) == (soffset + srcskip)) {
633 			src = sg_next(src);
634 			soffset = 0;
635 			srcskip = 0;
636 		}
637 	}
638 	return srclen;
639 }
640 
641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642 			     struct scatterlist *dst,
643 			     unsigned int minsg,
644 			     unsigned int space,
645 			     unsigned int srcskip,
646 			     unsigned int dstskip)
647 {
648 	int srclen = 0, dstlen = 0;
649 	int srcsg = minsg, dstsg = minsg;
650 	int offset = 0, soffset = 0, less, sless = 0;
651 
652 	if (sg_dma_len(src) == srcskip) {
653 		src = sg_next(src);
654 		srcskip = 0;
655 	}
656 	if (sg_dma_len(dst) == dstskip) {
657 		dst = sg_next(dst);
658 		dstskip = 0;
659 	}
660 
661 	while (src && dst &&
662 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
664 				CHCR_SRC_SG_SIZE);
665 		srclen += sless;
666 		srcsg++;
667 		offset = 0;
668 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670 			if (srclen <= dstlen)
671 				break;
672 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
673 				     dstskip, CHCR_DST_SG_SIZE);
674 			dstlen += less;
675 			offset += less;
676 			if ((offset + dstskip) == sg_dma_len(dst)) {
677 				dst = sg_next(dst);
678 				offset = 0;
679 			}
680 			dstsg++;
681 			dstskip = 0;
682 		}
683 		soffset += sless;
684 		if ((soffset + srcskip) == sg_dma_len(src)) {
685 			src = sg_next(src);
686 			srcskip = 0;
687 			soffset = 0;
688 		}
689 
690 	}
691 	return min(srclen, dstlen);
692 }
693 
694 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
695 				u32 flags,
696 				struct scatterlist *src,
697 				struct scatterlist *dst,
698 				unsigned int nbytes,
699 				u8 *iv,
700 				unsigned short op_type)
701 {
702 	int err;
703 
704 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
705 
706 	skcipher_request_set_sync_tfm(subreq, cipher);
707 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
708 	skcipher_request_set_crypt(subreq, src, dst,
709 				   nbytes, iv);
710 
711 	err = op_type ? crypto_skcipher_decrypt(subreq) :
712 		crypto_skcipher_encrypt(subreq);
713 	skcipher_request_zero(subreq);
714 
715 	return err;
716 
717 }
718 static inline void create_wreq(struct chcr_context *ctx,
719 			       struct chcr_wr *chcr_req,
720 			       struct crypto_async_request *req,
721 			       unsigned int imm,
722 			       int hash_sz,
723 			       unsigned int len16,
724 			       unsigned int sc_len,
725 			       unsigned int lcb)
726 {
727 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
728 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
729 
730 
731 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
732 	chcr_req->wreq.pld_size_hash_size =
733 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
734 	chcr_req->wreq.len16_pkd =
735 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
736 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
737 	chcr_req->wreq.rx_chid_to_rx_q_id =
738 		FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
739 				!!lcb, ctx->tx_qidx);
740 
741 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
742 						       qid);
743 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
744 				     ((sizeof(chcr_req->wreq)) >> 4)));
745 
746 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
747 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
748 					   sizeof(chcr_req->key_ctx) + sc_len);
749 }
750 
751 /**
752  *	create_cipher_wr - form the WR for cipher operations
753  *	@req: cipher req.
754  *	@ctx: crypto driver context of the request.
755  *	@qid: ingress qid where response of this WR should be received.
756  *	@op_type:	encryption or decryption
757  */
758 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
759 {
760 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
761 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
762 	struct sk_buff *skb = NULL;
763 	struct chcr_wr *chcr_req;
764 	struct cpl_rx_phys_dsgl *phys_cpl;
765 	struct ulptx_sgl *ulptx;
766 	struct chcr_blkcipher_req_ctx *reqctx =
767 		ablkcipher_request_ctx(wrparam->req);
768 	unsigned int temp = 0, transhdr_len, dst_size;
769 	int error;
770 	int nents;
771 	unsigned int kctx_len;
772 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
773 			GFP_KERNEL : GFP_ATOMIC;
774 	struct adapter *adap = padap(c_ctx(tfm)->dev);
775 
776 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
777 			      reqctx->dst_ofst);
778 	dst_size = get_space_for_phys_dsgl(nents);
779 	kctx_len = roundup(ablkctx->enckey_len, 16);
780 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
781 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
782 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
783 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
784 				     (sgl_len(nents) * 8);
785 	transhdr_len += temp;
786 	transhdr_len = roundup(transhdr_len, 16);
787 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
788 	if (!skb) {
789 		error = -ENOMEM;
790 		goto err;
791 	}
792 	chcr_req = __skb_put_zero(skb, transhdr_len);
793 	chcr_req->sec_cpl.op_ivinsrtofst =
794 		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
795 
796 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
797 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
798 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
799 
800 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
801 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
802 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
803 							 ablkctx->ciph_mode,
804 							 0, 0, IV >> 1);
805 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
806 							  0, 1, dst_size);
807 
808 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
809 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
810 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
811 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
812 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
813 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
814 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
815 	} else {
816 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
817 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
818 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
819 			       ablkctx->enckey_len);
820 		} else {
821 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
822 			       (ablkctx->enckey_len >> 1),
823 			       ablkctx->enckey_len >> 1);
824 			memcpy(chcr_req->key_ctx.key +
825 			       (ablkctx->enckey_len >> 1),
826 			       ablkctx->key,
827 			       ablkctx->enckey_len >> 1);
828 		}
829 	}
830 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
831 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
832 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
833 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
834 
835 	atomic_inc(&adap->chcr_stats.cipher_rqst);
836 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
837 		+ (reqctx->imm ? (wrparam->bytes) : 0);
838 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
839 		    transhdr_len, temp,
840 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
841 	reqctx->skb = skb;
842 
843 	if (reqctx->op && (ablkctx->ciph_mode ==
844 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
845 		sg_pcopy_to_buffer(wrparam->req->src,
846 			sg_nents(wrparam->req->src), wrparam->req->info, 16,
847 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
848 
849 	return skb;
850 err:
851 	return ERR_PTR(error);
852 }
853 
854 static inline int chcr_keyctx_ck_size(unsigned int keylen)
855 {
856 	int ck_size = 0;
857 
858 	if (keylen == AES_KEYSIZE_128)
859 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
860 	else if (keylen == AES_KEYSIZE_192)
861 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
862 	else if (keylen == AES_KEYSIZE_256)
863 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
864 	else
865 		ck_size = 0;
866 
867 	return ck_size;
868 }
869 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
870 				       const u8 *key,
871 				       unsigned int keylen)
872 {
873 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
874 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
875 	int err = 0;
876 
877 	crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
878 				CRYPTO_TFM_REQ_MASK);
879 	crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
880 				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
881 	err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
882 	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
883 	tfm->crt_flags |=
884 		crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
885 		CRYPTO_TFM_RES_MASK;
886 	return err;
887 }
888 
889 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
890 			       const u8 *key,
891 			       unsigned int keylen)
892 {
893 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
894 	unsigned int ck_size, context_size;
895 	u16 alignment = 0;
896 	int err;
897 
898 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
899 	if (err)
900 		goto badkey_err;
901 
902 	ck_size = chcr_keyctx_ck_size(keylen);
903 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
904 	memcpy(ablkctx->key, key, keylen);
905 	ablkctx->enckey_len = keylen;
906 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
907 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
908 			keylen + alignment) >> 4;
909 
910 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
911 						0, 0, context_size);
912 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
913 	return 0;
914 badkey_err:
915 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
916 	ablkctx->enckey_len = 0;
917 
918 	return err;
919 }
920 
921 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
922 				   const u8 *key,
923 				   unsigned int keylen)
924 {
925 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
926 	unsigned int ck_size, context_size;
927 	u16 alignment = 0;
928 	int err;
929 
930 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
931 	if (err)
932 		goto badkey_err;
933 	ck_size = chcr_keyctx_ck_size(keylen);
934 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
935 	memcpy(ablkctx->key, key, keylen);
936 	ablkctx->enckey_len = keylen;
937 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
938 			keylen + alignment) >> 4;
939 
940 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
941 						0, 0, context_size);
942 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
943 
944 	return 0;
945 badkey_err:
946 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
947 	ablkctx->enckey_len = 0;
948 
949 	return err;
950 }
951 
952 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
953 				   const u8 *key,
954 				   unsigned int keylen)
955 {
956 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
957 	unsigned int ck_size, context_size;
958 	u16 alignment = 0;
959 	int err;
960 
961 	if (keylen < CTR_RFC3686_NONCE_SIZE)
962 		return -EINVAL;
963 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
964 	       CTR_RFC3686_NONCE_SIZE);
965 
966 	keylen -= CTR_RFC3686_NONCE_SIZE;
967 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
968 	if (err)
969 		goto badkey_err;
970 
971 	ck_size = chcr_keyctx_ck_size(keylen);
972 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
973 	memcpy(ablkctx->key, key, keylen);
974 	ablkctx->enckey_len = keylen;
975 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
976 			keylen + alignment) >> 4;
977 
978 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
979 						0, 0, context_size);
980 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
981 
982 	return 0;
983 badkey_err:
984 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
985 	ablkctx->enckey_len = 0;
986 
987 	return err;
988 }
989 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
990 {
991 	unsigned int size = AES_BLOCK_SIZE;
992 	__be32 *b = (__be32 *)(dstiv + size);
993 	u32 c, prev;
994 
995 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
996 	for (; size >= 4; size -= 4) {
997 		prev = be32_to_cpu(*--b);
998 		c = prev + add;
999 		*b = cpu_to_be32(c);
1000 		if (prev < c)
1001 			break;
1002 		add = 1;
1003 	}
1004 
1005 }
1006 
1007 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1008 {
1009 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1010 	u64 c;
1011 	u32 temp = be32_to_cpu(*--b);
1012 
1013 	temp = ~temp;
1014 	c = (u64)temp +  1; // No of block can processed withou overflow
1015 	if ((bytes / AES_BLOCK_SIZE) > c)
1016 		bytes = c * AES_BLOCK_SIZE;
1017 	return bytes;
1018 }
1019 
1020 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1021 			     u32 isfinal)
1022 {
1023 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1024 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1025 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1026 	struct crypto_cipher *cipher;
1027 	int ret, i;
1028 	u8 *key;
1029 	unsigned int keylen;
1030 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1031 	int round8 = round / 8;
1032 
1033 	cipher = ablkctx->aes_generic;
1034 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1035 
1036 	keylen = ablkctx->enckey_len / 2;
1037 	key = ablkctx->key + keylen;
1038 	ret = crypto_cipher_setkey(cipher, key, keylen);
1039 	if (ret)
1040 		goto out;
1041 	crypto_cipher_encrypt_one(cipher, iv, iv);
1042 	for (i = 0; i < round8; i++)
1043 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1044 
1045 	for (i = 0; i < (round % 8); i++)
1046 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1047 
1048 	if (!isfinal)
1049 		crypto_cipher_decrypt_one(cipher, iv, iv);
1050 out:
1051 	return ret;
1052 }
1053 
1054 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1055 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1056 {
1057 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1058 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1059 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1060 	int ret = 0;
1061 
1062 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1063 		ctr_add_iv(iv, req->info, (reqctx->processed /
1064 			   AES_BLOCK_SIZE));
1065 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1066 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1067 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1068 						AES_BLOCK_SIZE) + 1);
1069 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1070 		ret = chcr_update_tweak(req, iv, 0);
1071 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1072 		if (reqctx->op)
1073 			/*Updated before sending last WR*/
1074 			memcpy(iv, req->info, AES_BLOCK_SIZE);
1075 		else
1076 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1077 	}
1078 
1079 	return ret;
1080 
1081 }
1082 
1083 /* We need separate function for final iv because in rfc3686  Initial counter
1084  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1085  * for subsequent update requests
1086  */
1087 
1088 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1089 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1090 {
1091 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1092 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1093 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1094 	int ret = 0;
1095 
1096 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1097 		ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
1098 						       AES_BLOCK_SIZE));
1099 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1100 		ret = chcr_update_tweak(req, iv, 1);
1101 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1102 		/*Already updated for Decrypt*/
1103 		if (!reqctx->op)
1104 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1105 
1106 	}
1107 	return ret;
1108 
1109 }
1110 
1111 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1112 				   unsigned char *input, int err)
1113 {
1114 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1115 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1116 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1117 	struct sk_buff *skb;
1118 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1119 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1120 	struct  cipher_wr_param wrparam;
1121 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1122 	int bytes;
1123 
1124 	if (err)
1125 		goto unmap;
1126 	if (req->nbytes == reqctx->processed) {
1127 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1128 				      req);
1129 		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1130 		goto complete;
1131 	}
1132 
1133 	if (!reqctx->imm) {
1134 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1135 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1136 					  reqctx->src_ofst, reqctx->dst_ofst);
1137 		if ((bytes + reqctx->processed) >= req->nbytes)
1138 			bytes  = req->nbytes - reqctx->processed;
1139 		else
1140 			bytes = rounddown(bytes, 16);
1141 	} else {
1142 		/*CTR mode counter overfloa*/
1143 		bytes  = req->nbytes - reqctx->processed;
1144 	}
1145 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1146 	if (err)
1147 		goto unmap;
1148 
1149 	if (unlikely(bytes == 0)) {
1150 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1151 				      req);
1152 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1153 				     req->base.flags,
1154 				     req->src,
1155 				     req->dst,
1156 				     req->nbytes,
1157 				     req->info,
1158 				     reqctx->op);
1159 		goto complete;
1160 	}
1161 
1162 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1163 	    CRYPTO_ALG_SUB_TYPE_CTR)
1164 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1165 	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1166 	wrparam.req = req;
1167 	wrparam.bytes = bytes;
1168 	skb = create_cipher_wr(&wrparam);
1169 	if (IS_ERR(skb)) {
1170 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1171 		err = PTR_ERR(skb);
1172 		goto unmap;
1173 	}
1174 	skb->dev = u_ctx->lldi.ports[0];
1175 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1176 	chcr_send_wr(skb);
1177 	reqctx->last_req_len = bytes;
1178 	reqctx->processed += bytes;
1179 	return 0;
1180 unmap:
1181 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1182 complete:
1183 	chcr_dec_wrcount(dev);
1184 	req->base.complete(&req->base, err);
1185 	return err;
1186 }
1187 
1188 static int process_cipher(struct ablkcipher_request *req,
1189 				  unsigned short qid,
1190 				  struct sk_buff **skb,
1191 				  unsigned short op_type)
1192 {
1193 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1194 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1195 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1196 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1197 	struct	cipher_wr_param wrparam;
1198 	int bytes, err = -EINVAL;
1199 
1200 	reqctx->processed = 0;
1201 	if (!req->info)
1202 		goto error;
1203 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1204 	    (req->nbytes == 0) ||
1205 	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1206 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1207 		       ablkctx->enckey_len, req->nbytes, ivsize);
1208 		goto error;
1209 	}
1210 
1211 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1212 	if (err)
1213 		goto error;
1214 	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1215 					    AES_MIN_KEY_SIZE +
1216 					    sizeof(struct cpl_rx_phys_dsgl) +
1217 					/*Min dsgl size*/
1218 					    32))) {
1219 		/* Can be sent as Imm*/
1220 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1221 
1222 		dnents = sg_nents_xlen(req->dst, req->nbytes,
1223 				       CHCR_DST_SG_SIZE, 0);
1224 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1225 		kctx_len = roundup(ablkctx->enckey_len, 16);
1226 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1227 		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1228 			SGE_MAX_WR_LEN;
1229 		bytes = IV + req->nbytes;
1230 
1231 	} else {
1232 		reqctx->imm = 0;
1233 	}
1234 
1235 	if (!reqctx->imm) {
1236 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1237 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1238 					  0, 0);
1239 		if ((bytes + reqctx->processed) >= req->nbytes)
1240 			bytes  = req->nbytes - reqctx->processed;
1241 		else
1242 			bytes = rounddown(bytes, 16);
1243 	} else {
1244 		bytes = req->nbytes;
1245 	}
1246 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1247 	    CRYPTO_ALG_SUB_TYPE_CTR) {
1248 		bytes = adjust_ctr_overflow(req->info, bytes);
1249 	}
1250 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1251 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1252 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1253 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1254 				CTR_RFC3686_IV_SIZE);
1255 
1256 		/* initialize counter portion of counter block */
1257 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1258 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1259 
1260 	} else {
1261 
1262 		memcpy(reqctx->iv, req->info, IV);
1263 	}
1264 	if (unlikely(bytes == 0)) {
1265 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1266 				      req);
1267 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1268 					   req->base.flags,
1269 					   req->src,
1270 					   req->dst,
1271 					   req->nbytes,
1272 					   reqctx->iv,
1273 					   op_type);
1274 		goto error;
1275 	}
1276 	reqctx->op = op_type;
1277 	reqctx->srcsg = req->src;
1278 	reqctx->dstsg = req->dst;
1279 	reqctx->src_ofst = 0;
1280 	reqctx->dst_ofst = 0;
1281 	wrparam.qid = qid;
1282 	wrparam.req = req;
1283 	wrparam.bytes = bytes;
1284 	*skb = create_cipher_wr(&wrparam);
1285 	if (IS_ERR(*skb)) {
1286 		err = PTR_ERR(*skb);
1287 		goto unmap;
1288 	}
1289 	reqctx->processed = bytes;
1290 	reqctx->last_req_len = bytes;
1291 
1292 	return 0;
1293 unmap:
1294 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1295 error:
1296 	return err;
1297 }
1298 
1299 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1300 {
1301 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1302 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1303 	struct sk_buff *skb = NULL;
1304 	int err, isfull = 0;
1305 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1306 
1307 	err = chcr_inc_wrcount(dev);
1308 	if (err)
1309 		return -ENXIO;
1310 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1311 					    c_ctx(tfm)->tx_qidx))) {
1312 		isfull = 1;
1313 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1314 			err = -ENOSPC;
1315 			goto error;
1316 		}
1317 	}
1318 
1319 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1320 			     &skb, CHCR_ENCRYPT_OP);
1321 	if (err || !skb)
1322 		return  err;
1323 	skb->dev = u_ctx->lldi.ports[0];
1324 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1325 	chcr_send_wr(skb);
1326 	return isfull ? -EBUSY : -EINPROGRESS;
1327 error:
1328 	chcr_dec_wrcount(dev);
1329 	return err;
1330 }
1331 
1332 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1333 {
1334 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1335 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1336 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1337 	struct sk_buff *skb = NULL;
1338 	int err, isfull = 0;
1339 
1340 	err = chcr_inc_wrcount(dev);
1341 	if (err)
1342 		return -ENXIO;
1343 
1344 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1345 					    c_ctx(tfm)->tx_qidx))) {
1346 		isfull = 1;
1347 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1348 			return -ENOSPC;
1349 	}
1350 
1351 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1352 			     &skb, CHCR_DECRYPT_OP);
1353 	if (err || !skb)
1354 		return err;
1355 	skb->dev = u_ctx->lldi.ports[0];
1356 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1357 	chcr_send_wr(skb);
1358 	return isfull ? -EBUSY : -EINPROGRESS;
1359 }
1360 
1361 static int chcr_device_init(struct chcr_context *ctx)
1362 {
1363 	struct uld_ctx *u_ctx = NULL;
1364 	unsigned int id;
1365 	int txq_perchan, txq_idx, ntxq;
1366 	int err = 0, rxq_perchan, rxq_idx;
1367 
1368 	id = smp_processor_id();
1369 	if (!ctx->dev) {
1370 		u_ctx = assign_chcr_device();
1371 		if (!u_ctx) {
1372 			err = -ENXIO;
1373 			pr_err("chcr device assignment fails\n");
1374 			goto out;
1375 		}
1376 		ctx->dev = &u_ctx->dev;
1377 		ntxq = u_ctx->lldi.ntxq;
1378 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1379 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1380 		spin_lock(&ctx->dev->lock_chcr_dev);
1381 		ctx->tx_chan_id = ctx->dev->tx_channel_id;
1382 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1383 		spin_unlock(&ctx->dev->lock_chcr_dev);
1384 		rxq_idx = ctx->tx_chan_id * rxq_perchan;
1385 		rxq_idx += id % rxq_perchan;
1386 		txq_idx = ctx->tx_chan_id * txq_perchan;
1387 		txq_idx += id % txq_perchan;
1388 		ctx->rx_qidx = rxq_idx;
1389 		ctx->tx_qidx = txq_idx;
1390 		/* Channel Id used by SGE to forward packet to Host.
1391 		 * Same value should be used in cpl_fw6_pld RSS_CH field
1392 		 * by FW. Driver programs PCI channel ID to be used in fw
1393 		 * at the time of queue allocation with value "pi->tx_chan"
1394 		 */
1395 		ctx->pci_chan_id = txq_idx / txq_perchan;
1396 	}
1397 out:
1398 	return err;
1399 }
1400 
1401 static int chcr_cra_init(struct crypto_tfm *tfm)
1402 {
1403 	struct crypto_alg *alg = tfm->__crt_alg;
1404 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1405 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1406 
1407 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1408 				CRYPTO_ALG_NEED_FALLBACK);
1409 	if (IS_ERR(ablkctx->sw_cipher)) {
1410 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1411 		return PTR_ERR(ablkctx->sw_cipher);
1412 	}
1413 
1414 	if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1415 		/* To update tweak*/
1416 		ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1417 		if (IS_ERR(ablkctx->aes_generic)) {
1418 			pr_err("failed to allocate aes cipher for tweak\n");
1419 			return PTR_ERR(ablkctx->aes_generic);
1420 		}
1421 	} else
1422 		ablkctx->aes_generic = NULL;
1423 
1424 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1425 	return chcr_device_init(crypto_tfm_ctx(tfm));
1426 }
1427 
1428 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1429 {
1430 	struct crypto_alg *alg = tfm->__crt_alg;
1431 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1432 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1433 
1434 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1435 	 * cannot be used as fallback in chcr_handle_cipher_response
1436 	 */
1437 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1438 				CRYPTO_ALG_NEED_FALLBACK);
1439 	if (IS_ERR(ablkctx->sw_cipher)) {
1440 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1441 		return PTR_ERR(ablkctx->sw_cipher);
1442 	}
1443 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1444 	return chcr_device_init(crypto_tfm_ctx(tfm));
1445 }
1446 
1447 
1448 static void chcr_cra_exit(struct crypto_tfm *tfm)
1449 {
1450 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1451 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1452 
1453 	crypto_free_sync_skcipher(ablkctx->sw_cipher);
1454 	if (ablkctx->aes_generic)
1455 		crypto_free_cipher(ablkctx->aes_generic);
1456 }
1457 
1458 static int get_alg_config(struct algo_param *params,
1459 			  unsigned int auth_size)
1460 {
1461 	switch (auth_size) {
1462 	case SHA1_DIGEST_SIZE:
1463 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1464 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1465 		params->result_size = SHA1_DIGEST_SIZE;
1466 		break;
1467 	case SHA224_DIGEST_SIZE:
1468 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1469 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1470 		params->result_size = SHA256_DIGEST_SIZE;
1471 		break;
1472 	case SHA256_DIGEST_SIZE:
1473 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1474 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1475 		params->result_size = SHA256_DIGEST_SIZE;
1476 		break;
1477 	case SHA384_DIGEST_SIZE:
1478 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1479 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1480 		params->result_size = SHA512_DIGEST_SIZE;
1481 		break;
1482 	case SHA512_DIGEST_SIZE:
1483 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1484 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1485 		params->result_size = SHA512_DIGEST_SIZE;
1486 		break;
1487 	default:
1488 		pr_err("chcr : ERROR, unsupported digest size\n");
1489 		return -EINVAL;
1490 	}
1491 	return 0;
1492 }
1493 
1494 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1495 {
1496 		crypto_free_shash(base_hash);
1497 }
1498 
1499 /**
1500  *	create_hash_wr - Create hash work request
1501  *	@req - Cipher req base
1502  */
1503 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1504 				      struct hash_wr_param *param)
1505 {
1506 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1507 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1508 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1509 	struct sk_buff *skb = NULL;
1510 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1511 	struct chcr_wr *chcr_req;
1512 	struct ulptx_sgl *ulptx;
1513 	unsigned int nents = 0, transhdr_len;
1514 	unsigned int temp = 0;
1515 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1516 		GFP_ATOMIC;
1517 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1518 	int error = 0;
1519 
1520 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1521 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1522 				param->sg_len) <= SGE_MAX_WR_LEN;
1523 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1524 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1525 	nents += param->bfr_len ? 1 : 0;
1526 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1527 				param->sg_len, 16) : (sgl_len(nents) * 8);
1528 	transhdr_len = roundup(transhdr_len, 16);
1529 
1530 	skb = alloc_skb(transhdr_len, flags);
1531 	if (!skb)
1532 		return ERR_PTR(-ENOMEM);
1533 	chcr_req = __skb_put_zero(skb, transhdr_len);
1534 
1535 	chcr_req->sec_cpl.op_ivinsrtofst =
1536 		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1537 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1538 
1539 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1540 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1541 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1542 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1543 	chcr_req->sec_cpl.seqno_numivs =
1544 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1545 					 param->opad_needed, 0);
1546 
1547 	chcr_req->sec_cpl.ivgen_hdrlen =
1548 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1549 
1550 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1551 	       param->alg_prm.result_size);
1552 
1553 	if (param->opad_needed)
1554 		memcpy(chcr_req->key_ctx.key +
1555 		       ((param->alg_prm.result_size <= 32) ? 32 :
1556 			CHCR_HASH_MAX_DIGEST_SIZE),
1557 		       hmacctx->opad, param->alg_prm.result_size);
1558 
1559 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1560 					    param->alg_prm.mk_size, 0,
1561 					    param->opad_needed,
1562 					    ((param->kctx_len +
1563 					     sizeof(chcr_req->key_ctx)) >> 4));
1564 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1565 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1566 				     DUMMY_BYTES);
1567 	if (param->bfr_len != 0) {
1568 		req_ctx->hctx_wr.dma_addr =
1569 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1570 				       param->bfr_len, DMA_TO_DEVICE);
1571 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1572 				       req_ctx->hctx_wr. dma_addr)) {
1573 			error = -ENOMEM;
1574 			goto err;
1575 		}
1576 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1577 	} else {
1578 		req_ctx->hctx_wr.dma_addr = 0;
1579 	}
1580 	chcr_add_hash_src_ent(req, ulptx, param);
1581 	/* Request upto max wr size */
1582 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1583 				(param->sg_len + param->bfr_len) : 0);
1584 	atomic_inc(&adap->chcr_stats.digest_rqst);
1585 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1586 		    param->hash_size, transhdr_len,
1587 		    temp,  0);
1588 	req_ctx->hctx_wr.skb = skb;
1589 	return skb;
1590 err:
1591 	kfree_skb(skb);
1592 	return  ERR_PTR(error);
1593 }
1594 
1595 static int chcr_ahash_update(struct ahash_request *req)
1596 {
1597 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1598 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1599 	struct uld_ctx *u_ctx = NULL;
1600 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1601 	struct sk_buff *skb;
1602 	u8 remainder = 0, bs;
1603 	unsigned int nbytes = req->nbytes;
1604 	struct hash_wr_param params;
1605 	int error, isfull = 0;
1606 
1607 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1608 	u_ctx = ULD_CTX(h_ctx(rtfm));
1609 
1610 	if (nbytes + req_ctx->reqlen >= bs) {
1611 		remainder = (nbytes + req_ctx->reqlen) % bs;
1612 		nbytes = nbytes + req_ctx->reqlen - remainder;
1613 	} else {
1614 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1615 				   + req_ctx->reqlen, nbytes, 0);
1616 		req_ctx->reqlen += nbytes;
1617 		return 0;
1618 	}
1619 	error = chcr_inc_wrcount(dev);
1620 	if (error)
1621 		return -ENXIO;
1622 	/* Detach state for CHCR means lldi or padap is freed. Increasing
1623 	 * inflight count for dev guarantees that lldi and padap is valid
1624 	 */
1625 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1626 					    h_ctx(rtfm)->tx_qidx))) {
1627 		isfull = 1;
1628 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1629 			error = -ENOSPC;
1630 			goto err;
1631 		}
1632 	}
1633 
1634 	chcr_init_hctx_per_wr(req_ctx);
1635 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1636 	if (error) {
1637 		error = -ENOMEM;
1638 		goto err;
1639 	}
1640 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1641 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1642 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1643 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1644 	if (params.sg_len > req->nbytes)
1645 		params.sg_len = req->nbytes;
1646 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1647 			req_ctx->reqlen;
1648 	params.opad_needed = 0;
1649 	params.more = 1;
1650 	params.last = 0;
1651 	params.bfr_len = req_ctx->reqlen;
1652 	params.scmd1 = 0;
1653 	req_ctx->hctx_wr.srcsg = req->src;
1654 
1655 	params.hash_size = params.alg_prm.result_size;
1656 	req_ctx->data_len += params.sg_len + params.bfr_len;
1657 	skb = create_hash_wr(req, &params);
1658 	if (IS_ERR(skb)) {
1659 		error = PTR_ERR(skb);
1660 		goto unmap;
1661 	}
1662 
1663 	req_ctx->hctx_wr.processed += params.sg_len;
1664 	if (remainder) {
1665 		/* Swap buffers */
1666 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1667 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1668 				   req_ctx->reqbfr, remainder, req->nbytes -
1669 				   remainder);
1670 	}
1671 	req_ctx->reqlen = remainder;
1672 	skb->dev = u_ctx->lldi.ports[0];
1673 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1674 	chcr_send_wr(skb);
1675 
1676 	return isfull ? -EBUSY : -EINPROGRESS;
1677 unmap:
1678 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1679 err:
1680 	chcr_dec_wrcount(dev);
1681 	return error;
1682 }
1683 
1684 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1685 {
1686 	memset(bfr_ptr, 0, bs);
1687 	*bfr_ptr = 0x80;
1688 	if (bs == 64)
1689 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1690 	else
1691 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1692 }
1693 
1694 static int chcr_ahash_final(struct ahash_request *req)
1695 {
1696 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1697 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1698 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1699 	struct hash_wr_param params;
1700 	struct sk_buff *skb;
1701 	struct uld_ctx *u_ctx = NULL;
1702 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1703 	int error = -EINVAL;
1704 
1705 	error = chcr_inc_wrcount(dev);
1706 	if (error)
1707 		return -ENXIO;
1708 
1709 	chcr_init_hctx_per_wr(req_ctx);
1710 	u_ctx = ULD_CTX(h_ctx(rtfm));
1711 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1712 		params.opad_needed = 1;
1713 	else
1714 		params.opad_needed = 0;
1715 	params.sg_len = 0;
1716 	req_ctx->hctx_wr.isfinal = 1;
1717 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1718 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1719 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1720 		params.opad_needed = 1;
1721 		params.kctx_len *= 2;
1722 	} else {
1723 		params.opad_needed = 0;
1724 	}
1725 
1726 	req_ctx->hctx_wr.result = 1;
1727 	params.bfr_len = req_ctx->reqlen;
1728 	req_ctx->data_len += params.bfr_len + params.sg_len;
1729 	req_ctx->hctx_wr.srcsg = req->src;
1730 	if (req_ctx->reqlen == 0) {
1731 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1732 		params.last = 0;
1733 		params.more = 1;
1734 		params.scmd1 = 0;
1735 		params.bfr_len = bs;
1736 
1737 	} else {
1738 		params.scmd1 = req_ctx->data_len;
1739 		params.last = 1;
1740 		params.more = 0;
1741 	}
1742 	params.hash_size = crypto_ahash_digestsize(rtfm);
1743 	skb = create_hash_wr(req, &params);
1744 	if (IS_ERR(skb)) {
1745 		error = PTR_ERR(skb);
1746 		goto err;
1747 	}
1748 	req_ctx->reqlen = 0;
1749 	skb->dev = u_ctx->lldi.ports[0];
1750 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1751 	chcr_send_wr(skb);
1752 	return -EINPROGRESS;
1753 err:
1754 	chcr_dec_wrcount(dev);
1755 	return error;
1756 }
1757 
1758 static int chcr_ahash_finup(struct ahash_request *req)
1759 {
1760 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1761 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1762 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1763 	struct uld_ctx *u_ctx = NULL;
1764 	struct sk_buff *skb;
1765 	struct hash_wr_param params;
1766 	u8  bs;
1767 	int error, isfull = 0;
1768 
1769 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1770 	u_ctx = ULD_CTX(h_ctx(rtfm));
1771 	error = chcr_inc_wrcount(dev);
1772 	if (error)
1773 		return -ENXIO;
1774 
1775 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1776 					    h_ctx(rtfm)->tx_qidx))) {
1777 		isfull = 1;
1778 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1779 			error = -ENOSPC;
1780 			goto err;
1781 		}
1782 	}
1783 	chcr_init_hctx_per_wr(req_ctx);
1784 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1785 	if (error) {
1786 		error = -ENOMEM;
1787 		goto err;
1788 	}
1789 
1790 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1791 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1792 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1793 		params.kctx_len *= 2;
1794 		params.opad_needed = 1;
1795 	} else {
1796 		params.opad_needed = 0;
1797 	}
1798 
1799 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1800 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1801 	if (params.sg_len < req->nbytes) {
1802 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1803 			params.kctx_len /= 2;
1804 			params.opad_needed = 0;
1805 		}
1806 		params.last = 0;
1807 		params.more = 1;
1808 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1809 					- req_ctx->reqlen;
1810 		params.hash_size = params.alg_prm.result_size;
1811 		params.scmd1 = 0;
1812 	} else {
1813 		params.last = 1;
1814 		params.more = 0;
1815 		params.sg_len = req->nbytes;
1816 		params.hash_size = crypto_ahash_digestsize(rtfm);
1817 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1818 				params.sg_len;
1819 	}
1820 	params.bfr_len = req_ctx->reqlen;
1821 	req_ctx->data_len += params.bfr_len + params.sg_len;
1822 	req_ctx->hctx_wr.result = 1;
1823 	req_ctx->hctx_wr.srcsg = req->src;
1824 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1825 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1826 		params.last = 0;
1827 		params.more = 1;
1828 		params.scmd1 = 0;
1829 		params.bfr_len = bs;
1830 	}
1831 	skb = create_hash_wr(req, &params);
1832 	if (IS_ERR(skb)) {
1833 		error = PTR_ERR(skb);
1834 		goto unmap;
1835 	}
1836 	req_ctx->reqlen = 0;
1837 	req_ctx->hctx_wr.processed += params.sg_len;
1838 	skb->dev = u_ctx->lldi.ports[0];
1839 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1840 	chcr_send_wr(skb);
1841 
1842 	return isfull ? -EBUSY : -EINPROGRESS;
1843 unmap:
1844 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1845 err:
1846 	chcr_dec_wrcount(dev);
1847 	return error;
1848 }
1849 
1850 static int chcr_ahash_digest(struct ahash_request *req)
1851 {
1852 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1853 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1854 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1855 	struct uld_ctx *u_ctx = NULL;
1856 	struct sk_buff *skb;
1857 	struct hash_wr_param params;
1858 	u8  bs;
1859 	int error, isfull = 0;
1860 
1861 	rtfm->init(req);
1862 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1863 	error = chcr_inc_wrcount(dev);
1864 	if (error)
1865 		return -ENXIO;
1866 
1867 	u_ctx = ULD_CTX(h_ctx(rtfm));
1868 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1869 					    h_ctx(rtfm)->tx_qidx))) {
1870 		isfull = 1;
1871 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1872 			error = -ENOSPC;
1873 			goto err;
1874 		}
1875 	}
1876 
1877 	chcr_init_hctx_per_wr(req_ctx);
1878 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1879 	if (error) {
1880 		error = -ENOMEM;
1881 		goto err;
1882 	}
1883 
1884 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1885 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1886 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1887 		params.kctx_len *= 2;
1888 		params.opad_needed = 1;
1889 	} else {
1890 		params.opad_needed = 0;
1891 	}
1892 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1893 				HASH_SPACE_LEFT(params.kctx_len), 0);
1894 	if (params.sg_len < req->nbytes) {
1895 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1896 			params.kctx_len /= 2;
1897 			params.opad_needed = 0;
1898 		}
1899 		params.last = 0;
1900 		params.more = 1;
1901 		params.scmd1 = 0;
1902 		params.sg_len = rounddown(params.sg_len, bs);
1903 		params.hash_size = params.alg_prm.result_size;
1904 	} else {
1905 		params.sg_len = req->nbytes;
1906 		params.hash_size = crypto_ahash_digestsize(rtfm);
1907 		params.last = 1;
1908 		params.more = 0;
1909 		params.scmd1 = req->nbytes + req_ctx->data_len;
1910 
1911 	}
1912 	params.bfr_len = 0;
1913 	req_ctx->hctx_wr.result = 1;
1914 	req_ctx->hctx_wr.srcsg = req->src;
1915 	req_ctx->data_len += params.bfr_len + params.sg_len;
1916 
1917 	if (req->nbytes == 0) {
1918 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1919 		params.more = 1;
1920 		params.bfr_len = bs;
1921 	}
1922 
1923 	skb = create_hash_wr(req, &params);
1924 	if (IS_ERR(skb)) {
1925 		error = PTR_ERR(skb);
1926 		goto unmap;
1927 	}
1928 	req_ctx->hctx_wr.processed += params.sg_len;
1929 	skb->dev = u_ctx->lldi.ports[0];
1930 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1931 	chcr_send_wr(skb);
1932 	return isfull ? -EBUSY : -EINPROGRESS;
1933 unmap:
1934 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1935 err:
1936 	chcr_dec_wrcount(dev);
1937 	return error;
1938 }
1939 
1940 static int chcr_ahash_continue(struct ahash_request *req)
1941 {
1942 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1943 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1944 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1945 	struct uld_ctx *u_ctx = NULL;
1946 	struct sk_buff *skb;
1947 	struct hash_wr_param params;
1948 	u8  bs;
1949 	int error;
1950 
1951 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1952 	u_ctx = ULD_CTX(h_ctx(rtfm));
1953 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1954 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1955 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1956 		params.kctx_len *= 2;
1957 		params.opad_needed = 1;
1958 	} else {
1959 		params.opad_needed = 0;
1960 	}
1961 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1962 					    HASH_SPACE_LEFT(params.kctx_len),
1963 					    hctx_wr->src_ofst);
1964 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1965 		params.sg_len = req->nbytes - hctx_wr->processed;
1966 	if (!hctx_wr->result ||
1967 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1968 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1969 			params.kctx_len /= 2;
1970 			params.opad_needed = 0;
1971 		}
1972 		params.last = 0;
1973 		params.more = 1;
1974 		params.sg_len = rounddown(params.sg_len, bs);
1975 		params.hash_size = params.alg_prm.result_size;
1976 		params.scmd1 = 0;
1977 	} else {
1978 		params.last = 1;
1979 		params.more = 0;
1980 		params.hash_size = crypto_ahash_digestsize(rtfm);
1981 		params.scmd1 = reqctx->data_len + params.sg_len;
1982 	}
1983 	params.bfr_len = 0;
1984 	reqctx->data_len += params.sg_len;
1985 	skb = create_hash_wr(req, &params);
1986 	if (IS_ERR(skb)) {
1987 		error = PTR_ERR(skb);
1988 		goto err;
1989 	}
1990 	hctx_wr->processed += params.sg_len;
1991 	skb->dev = u_ctx->lldi.ports[0];
1992 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1993 	chcr_send_wr(skb);
1994 	return 0;
1995 err:
1996 	return error;
1997 }
1998 
1999 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2000 					  unsigned char *input,
2001 					  int err)
2002 {
2003 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2004 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2005 	int digestsize, updated_digestsize;
2006 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2007 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2008 	struct chcr_dev *dev = h_ctx(tfm)->dev;
2009 
2010 	if (input == NULL)
2011 		goto out;
2012 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2013 	updated_digestsize = digestsize;
2014 	if (digestsize == SHA224_DIGEST_SIZE)
2015 		updated_digestsize = SHA256_DIGEST_SIZE;
2016 	else if (digestsize == SHA384_DIGEST_SIZE)
2017 		updated_digestsize = SHA512_DIGEST_SIZE;
2018 
2019 	if (hctx_wr->dma_addr) {
2020 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2021 				 hctx_wr->dma_len, DMA_TO_DEVICE);
2022 		hctx_wr->dma_addr = 0;
2023 	}
2024 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2025 				 req->nbytes)) {
2026 		if (hctx_wr->result == 1) {
2027 			hctx_wr->result = 0;
2028 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2029 			       digestsize);
2030 		} else {
2031 			memcpy(reqctx->partial_hash,
2032 			       input + sizeof(struct cpl_fw6_pld),
2033 			       updated_digestsize);
2034 
2035 		}
2036 		goto unmap;
2037 	}
2038 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2039 	       updated_digestsize);
2040 
2041 	err = chcr_ahash_continue(req);
2042 	if (err)
2043 		goto unmap;
2044 	return;
2045 unmap:
2046 	if (hctx_wr->is_sg_map)
2047 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2048 
2049 
2050 out:
2051 	chcr_dec_wrcount(dev);
2052 	req->base.complete(&req->base, err);
2053 }
2054 
2055 /*
2056  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2057  *	@req: crypto request
2058  */
2059 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2060 			 int err)
2061 {
2062 	struct crypto_tfm *tfm = req->tfm;
2063 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2064 	struct adapter *adap = padap(ctx->dev);
2065 
2066 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2067 	case CRYPTO_ALG_TYPE_AEAD:
2068 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2069 		break;
2070 
2071 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2072 		 chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2073 					       input, err);
2074 		break;
2075 	case CRYPTO_ALG_TYPE_AHASH:
2076 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2077 		}
2078 	atomic_inc(&adap->chcr_stats.complete);
2079 	return err;
2080 }
2081 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2082 {
2083 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2084 	struct chcr_ahash_req_ctx *state = out;
2085 
2086 	state->reqlen = req_ctx->reqlen;
2087 	state->data_len = req_ctx->data_len;
2088 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2089 	memcpy(state->partial_hash, req_ctx->partial_hash,
2090 	       CHCR_HASH_MAX_DIGEST_SIZE);
2091 	chcr_init_hctx_per_wr(state);
2092 	return 0;
2093 }
2094 
2095 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2096 {
2097 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2098 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2099 
2100 	req_ctx->reqlen = state->reqlen;
2101 	req_ctx->data_len = state->data_len;
2102 	req_ctx->reqbfr = req_ctx->bfr1;
2103 	req_ctx->skbfr = req_ctx->bfr2;
2104 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2105 	memcpy(req_ctx->partial_hash, state->partial_hash,
2106 	       CHCR_HASH_MAX_DIGEST_SIZE);
2107 	chcr_init_hctx_per_wr(req_ctx);
2108 	return 0;
2109 }
2110 
2111 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2112 			     unsigned int keylen)
2113 {
2114 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2115 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2116 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2117 	unsigned int i, err = 0, updated_digestsize;
2118 
2119 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2120 
2121 	/* use the key to calculate the ipad and opad. ipad will sent with the
2122 	 * first request's data. opad will be sent with the final hash result
2123 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2124 	 */
2125 	shash->tfm = hmacctx->base_hash;
2126 	if (keylen > bs) {
2127 		err = crypto_shash_digest(shash, key, keylen,
2128 					  hmacctx->ipad);
2129 		if (err)
2130 			goto out;
2131 		keylen = digestsize;
2132 	} else {
2133 		memcpy(hmacctx->ipad, key, keylen);
2134 	}
2135 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2136 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2137 
2138 	for (i = 0; i < bs / sizeof(int); i++) {
2139 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2140 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2141 	}
2142 
2143 	updated_digestsize = digestsize;
2144 	if (digestsize == SHA224_DIGEST_SIZE)
2145 		updated_digestsize = SHA256_DIGEST_SIZE;
2146 	else if (digestsize == SHA384_DIGEST_SIZE)
2147 		updated_digestsize = SHA512_DIGEST_SIZE;
2148 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2149 					hmacctx->ipad, digestsize);
2150 	if (err)
2151 		goto out;
2152 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2153 
2154 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2155 					hmacctx->opad, digestsize);
2156 	if (err)
2157 		goto out;
2158 	chcr_change_order(hmacctx->opad, updated_digestsize);
2159 out:
2160 	return err;
2161 }
2162 
2163 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2164 			       unsigned int key_len)
2165 {
2166 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2167 	unsigned short context_size = 0;
2168 	int err;
2169 
2170 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2171 	if (err)
2172 		goto badkey_err;
2173 
2174 	memcpy(ablkctx->key, key, key_len);
2175 	ablkctx->enckey_len = key_len;
2176 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2177 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2178 	ablkctx->key_ctx_hdr =
2179 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2180 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2181 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2182 				 CHCR_KEYCTX_NO_KEY, 1,
2183 				 0, context_size);
2184 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2185 	return 0;
2186 badkey_err:
2187 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2188 	ablkctx->enckey_len = 0;
2189 
2190 	return err;
2191 }
2192 
2193 static int chcr_sha_init(struct ahash_request *areq)
2194 {
2195 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2196 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2197 	int digestsize =  crypto_ahash_digestsize(tfm);
2198 
2199 	req_ctx->data_len = 0;
2200 	req_ctx->reqlen = 0;
2201 	req_ctx->reqbfr = req_ctx->bfr1;
2202 	req_ctx->skbfr = req_ctx->bfr2;
2203 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2204 
2205 	return 0;
2206 }
2207 
2208 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2209 {
2210 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2211 				 sizeof(struct chcr_ahash_req_ctx));
2212 	return chcr_device_init(crypto_tfm_ctx(tfm));
2213 }
2214 
2215 static int chcr_hmac_init(struct ahash_request *areq)
2216 {
2217 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2218 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2219 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2220 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2221 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2222 
2223 	chcr_sha_init(areq);
2224 	req_ctx->data_len = bs;
2225 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2226 		if (digestsize == SHA224_DIGEST_SIZE)
2227 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2228 			       SHA256_DIGEST_SIZE);
2229 		else if (digestsize == SHA384_DIGEST_SIZE)
2230 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2231 			       SHA512_DIGEST_SIZE);
2232 		else
2233 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2234 			       digestsize);
2235 	}
2236 	return 0;
2237 }
2238 
2239 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2240 {
2241 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2242 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2243 	unsigned int digestsize =
2244 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2245 
2246 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2247 				 sizeof(struct chcr_ahash_req_ctx));
2248 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2249 	if (IS_ERR(hmacctx->base_hash))
2250 		return PTR_ERR(hmacctx->base_hash);
2251 	return chcr_device_init(crypto_tfm_ctx(tfm));
2252 }
2253 
2254 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2255 {
2256 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2257 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2258 
2259 	if (hmacctx->base_hash) {
2260 		chcr_free_shash(hmacctx->base_hash);
2261 		hmacctx->base_hash = NULL;
2262 	}
2263 }
2264 
2265 inline void chcr_aead_common_exit(struct aead_request *req)
2266 {
2267 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2268 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2269 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2270 
2271 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2272 }
2273 
2274 static int chcr_aead_common_init(struct aead_request *req)
2275 {
2276 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2277 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2278 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2279 	unsigned int authsize = crypto_aead_authsize(tfm);
2280 	int error = -EINVAL;
2281 
2282 	/* validate key size */
2283 	if (aeadctx->enckey_len == 0)
2284 		goto err;
2285 	if (reqctx->op && req->cryptlen < authsize)
2286 		goto err;
2287 	if (reqctx->b0_len)
2288 		reqctx->scratch_pad = reqctx->iv + IV;
2289 	else
2290 		reqctx->scratch_pad = NULL;
2291 
2292 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2293 				  reqctx->op);
2294 	if (error) {
2295 		error = -ENOMEM;
2296 		goto err;
2297 	}
2298 
2299 	return 0;
2300 err:
2301 	return error;
2302 }
2303 
2304 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2305 				   int aadmax, int wrlen,
2306 				   unsigned short op_type)
2307 {
2308 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2309 
2310 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2311 	    dst_nents > MAX_DSGL_ENT ||
2312 	    (req->assoclen > aadmax) ||
2313 	    (wrlen > SGE_MAX_WR_LEN))
2314 		return 1;
2315 	return 0;
2316 }
2317 
2318 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2319 {
2320 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2321 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2322 	struct aead_request *subreq = aead_request_ctx(req);
2323 
2324 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2325 	aead_request_set_callback(subreq, req->base.flags,
2326 				  req->base.complete, req->base.data);
2327 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2328 				 req->iv);
2329 	aead_request_set_ad(subreq, req->assoclen);
2330 	return op_type ? crypto_aead_decrypt(subreq) :
2331 		crypto_aead_encrypt(subreq);
2332 }
2333 
2334 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2335 					 unsigned short qid,
2336 					 int size)
2337 {
2338 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2339 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2340 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2341 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2342 	struct sk_buff *skb = NULL;
2343 	struct chcr_wr *chcr_req;
2344 	struct cpl_rx_phys_dsgl *phys_cpl;
2345 	struct ulptx_sgl *ulptx;
2346 	unsigned int transhdr_len;
2347 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2348 	unsigned int   kctx_len = 0, dnents, snents;
2349 	unsigned int  authsize = crypto_aead_authsize(tfm);
2350 	int error = -EINVAL;
2351 	u8 *ivptr;
2352 	int null = 0;
2353 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2354 		GFP_ATOMIC;
2355 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2356 
2357 	if (req->cryptlen == 0)
2358 		return NULL;
2359 
2360 	reqctx->b0_len = 0;
2361 	error = chcr_aead_common_init(req);
2362 	if (error)
2363 		return ERR_PTR(error);
2364 
2365 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2366 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2367 		null = 1;
2368 	}
2369 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2370 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2371 	dnents += MIN_AUTH_SG; // For IV
2372 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2373 			       CHCR_SRC_SG_SIZE, 0);
2374 	dst_size = get_space_for_phys_dsgl(dnents);
2375 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2376 		- sizeof(chcr_req->key_ctx);
2377 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2378 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2379 			SGE_MAX_WR_LEN;
2380 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2381 			: (sgl_len(snents) * 8);
2382 	transhdr_len += temp;
2383 	transhdr_len = roundup(transhdr_len, 16);
2384 
2385 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2386 				    transhdr_len, reqctx->op)) {
2387 		atomic_inc(&adap->chcr_stats.fallback);
2388 		chcr_aead_common_exit(req);
2389 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2390 	}
2391 	skb = alloc_skb(transhdr_len, flags);
2392 	if (!skb) {
2393 		error = -ENOMEM;
2394 		goto err;
2395 	}
2396 
2397 	chcr_req = __skb_put_zero(skb, transhdr_len);
2398 
2399 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2400 
2401 	/*
2402 	 * Input order	is AAD,IV and Payload. where IV should be included as
2403 	 * the part of authdata. All other fields should be filled according
2404 	 * to the hardware spec
2405 	 */
2406 	chcr_req->sec_cpl.op_ivinsrtofst =
2407 		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2408 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2409 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2410 					null ? 0 : 1 + IV,
2411 					null ? 0 : IV + req->assoclen,
2412 					req->assoclen + IV + 1,
2413 					(temp & 0x1F0) >> 4);
2414 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2415 					temp & 0xF,
2416 					null ? 0 : req->assoclen + IV + 1,
2417 					temp, temp);
2418 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2419 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2420 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2421 	else
2422 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2423 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2424 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2425 					temp,
2426 					actx->auth_mode, aeadctx->hmac_ctrl,
2427 					IV >> 1);
2428 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2429 					 0, 0, dst_size);
2430 
2431 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2432 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2433 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2434 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2435 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2436 		       aeadctx->enckey_len);
2437 	else
2438 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2439 		       aeadctx->enckey_len);
2440 
2441 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2442 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2443 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2444 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2445 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2446 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2447 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2448 		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2449 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2450 				CTR_RFC3686_IV_SIZE);
2451 		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2452 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2453 	} else {
2454 		memcpy(ivptr, req->iv, IV);
2455 	}
2456 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2457 	chcr_add_aead_src_ent(req, ulptx);
2458 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2459 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2460 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2461 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2462 		   transhdr_len, temp, 0);
2463 	reqctx->skb = skb;
2464 
2465 	return skb;
2466 err:
2467 	chcr_aead_common_exit(req);
2468 
2469 	return ERR_PTR(error);
2470 }
2471 
2472 int chcr_aead_dma_map(struct device *dev,
2473 		      struct aead_request *req,
2474 		      unsigned short op_type)
2475 {
2476 	int error;
2477 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2478 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2479 	unsigned int authsize = crypto_aead_authsize(tfm);
2480 	int dst_size;
2481 
2482 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2483 				-authsize : authsize);
2484 	if (!req->cryptlen || !dst_size)
2485 		return 0;
2486 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2487 					DMA_BIDIRECTIONAL);
2488 	if (dma_mapping_error(dev, reqctx->iv_dma))
2489 		return -ENOMEM;
2490 	if (reqctx->b0_len)
2491 		reqctx->b0_dma = reqctx->iv_dma + IV;
2492 	else
2493 		reqctx->b0_dma = 0;
2494 	if (req->src == req->dst) {
2495 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2496 				   DMA_BIDIRECTIONAL);
2497 		if (!error)
2498 			goto err;
2499 	} else {
2500 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2501 				   DMA_TO_DEVICE);
2502 		if (!error)
2503 			goto err;
2504 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2505 				   DMA_FROM_DEVICE);
2506 		if (!error) {
2507 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2508 				   DMA_TO_DEVICE);
2509 			goto err;
2510 		}
2511 	}
2512 
2513 	return 0;
2514 err:
2515 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2516 	return -ENOMEM;
2517 }
2518 
2519 void chcr_aead_dma_unmap(struct device *dev,
2520 			 struct aead_request *req,
2521 			 unsigned short op_type)
2522 {
2523 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2524 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2525 	unsigned int authsize = crypto_aead_authsize(tfm);
2526 	int dst_size;
2527 
2528 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2529 					-authsize : authsize);
2530 	if (!req->cryptlen || !dst_size)
2531 		return;
2532 
2533 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2534 					DMA_BIDIRECTIONAL);
2535 	if (req->src == req->dst) {
2536 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2537 				   DMA_BIDIRECTIONAL);
2538 	} else {
2539 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2540 				   DMA_TO_DEVICE);
2541 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2542 				   DMA_FROM_DEVICE);
2543 	}
2544 }
2545 
2546 void chcr_add_aead_src_ent(struct aead_request *req,
2547 			   struct ulptx_sgl *ulptx)
2548 {
2549 	struct ulptx_walk ulp_walk;
2550 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2551 
2552 	if (reqctx->imm) {
2553 		u8 *buf = (u8 *)ulptx;
2554 
2555 		if (reqctx->b0_len) {
2556 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2557 			buf += reqctx->b0_len;
2558 		}
2559 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2560 				   buf, req->cryptlen + req->assoclen, 0);
2561 	} else {
2562 		ulptx_walk_init(&ulp_walk, ulptx);
2563 		if (reqctx->b0_len)
2564 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2565 					    reqctx->b0_dma);
2566 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2567 				  req->assoclen,  0);
2568 		ulptx_walk_end(&ulp_walk);
2569 	}
2570 }
2571 
2572 void chcr_add_aead_dst_ent(struct aead_request *req,
2573 			   struct cpl_rx_phys_dsgl *phys_cpl,
2574 			   unsigned short qid)
2575 {
2576 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2577 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2578 	struct dsgl_walk dsgl_walk;
2579 	unsigned int authsize = crypto_aead_authsize(tfm);
2580 	struct chcr_context *ctx = a_ctx(tfm);
2581 	u32 temp;
2582 
2583 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2584 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2585 	temp = req->assoclen + req->cryptlen +
2586 		(reqctx->op ? -authsize : authsize);
2587 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2588 	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2589 }
2590 
2591 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2592 			     void *ulptx,
2593 			     struct  cipher_wr_param *wrparam)
2594 {
2595 	struct ulptx_walk ulp_walk;
2596 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2597 	u8 *buf = ulptx;
2598 
2599 	memcpy(buf, reqctx->iv, IV);
2600 	buf += IV;
2601 	if (reqctx->imm) {
2602 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2603 				   buf, wrparam->bytes, reqctx->processed);
2604 	} else {
2605 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2606 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2607 				  reqctx->src_ofst);
2608 		reqctx->srcsg = ulp_walk.last_sg;
2609 		reqctx->src_ofst = ulp_walk.last_sg_len;
2610 		ulptx_walk_end(&ulp_walk);
2611 	}
2612 }
2613 
2614 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2615 			     struct cpl_rx_phys_dsgl *phys_cpl,
2616 			     struct  cipher_wr_param *wrparam,
2617 			     unsigned short qid)
2618 {
2619 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2620 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2621 	struct chcr_context *ctx = c_ctx(tfm);
2622 	struct dsgl_walk dsgl_walk;
2623 
2624 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2625 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2626 			 reqctx->dst_ofst);
2627 	reqctx->dstsg = dsgl_walk.last_sg;
2628 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2629 
2630 	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2631 }
2632 
2633 void chcr_add_hash_src_ent(struct ahash_request *req,
2634 			   struct ulptx_sgl *ulptx,
2635 			   struct hash_wr_param *param)
2636 {
2637 	struct ulptx_walk ulp_walk;
2638 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2639 
2640 	if (reqctx->hctx_wr.imm) {
2641 		u8 *buf = (u8 *)ulptx;
2642 
2643 		if (param->bfr_len) {
2644 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2645 			buf += param->bfr_len;
2646 		}
2647 
2648 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2649 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2650 				   param->sg_len, 0);
2651 	} else {
2652 		ulptx_walk_init(&ulp_walk, ulptx);
2653 		if (param->bfr_len)
2654 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2655 					    reqctx->hctx_wr.dma_addr);
2656 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2657 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2658 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2659 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2660 		ulptx_walk_end(&ulp_walk);
2661 	}
2662 }
2663 
2664 int chcr_hash_dma_map(struct device *dev,
2665 		      struct ahash_request *req)
2666 {
2667 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2668 	int error = 0;
2669 
2670 	if (!req->nbytes)
2671 		return 0;
2672 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2673 			   DMA_TO_DEVICE);
2674 	if (!error)
2675 		return -ENOMEM;
2676 	req_ctx->hctx_wr.is_sg_map = 1;
2677 	return 0;
2678 }
2679 
2680 void chcr_hash_dma_unmap(struct device *dev,
2681 			 struct ahash_request *req)
2682 {
2683 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2684 
2685 	if (!req->nbytes)
2686 		return;
2687 
2688 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2689 			   DMA_TO_DEVICE);
2690 	req_ctx->hctx_wr.is_sg_map = 0;
2691 
2692 }
2693 
2694 int chcr_cipher_dma_map(struct device *dev,
2695 			struct ablkcipher_request *req)
2696 {
2697 	int error;
2698 
2699 	if (req->src == req->dst) {
2700 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2701 				   DMA_BIDIRECTIONAL);
2702 		if (!error)
2703 			goto err;
2704 	} else {
2705 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2706 				   DMA_TO_DEVICE);
2707 		if (!error)
2708 			goto err;
2709 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2710 				   DMA_FROM_DEVICE);
2711 		if (!error) {
2712 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2713 				   DMA_TO_DEVICE);
2714 			goto err;
2715 		}
2716 	}
2717 
2718 	return 0;
2719 err:
2720 	return -ENOMEM;
2721 }
2722 
2723 void chcr_cipher_dma_unmap(struct device *dev,
2724 			   struct ablkcipher_request *req)
2725 {
2726 	if (req->src == req->dst) {
2727 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2728 				   DMA_BIDIRECTIONAL);
2729 	} else {
2730 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2731 				   DMA_TO_DEVICE);
2732 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2733 				   DMA_FROM_DEVICE);
2734 	}
2735 }
2736 
2737 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2738 {
2739 	__be32 data;
2740 
2741 	memset(block, 0, csize);
2742 	block += csize;
2743 
2744 	if (csize >= 4)
2745 		csize = 4;
2746 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2747 		return -EOVERFLOW;
2748 
2749 	data = cpu_to_be32(msglen);
2750 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2751 
2752 	return 0;
2753 }
2754 
2755 static int generate_b0(struct aead_request *req, u8 *ivptr,
2756 			unsigned short op_type)
2757 {
2758 	unsigned int l, lp, m;
2759 	int rc;
2760 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2761 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2762 	u8 *b0 = reqctx->scratch_pad;
2763 
2764 	m = crypto_aead_authsize(aead);
2765 
2766 	memcpy(b0, ivptr, 16);
2767 
2768 	lp = b0[0];
2769 	l = lp + 1;
2770 
2771 	/* set m, bits 3-5 */
2772 	*b0 |= (8 * ((m - 2) / 2));
2773 
2774 	/* set adata, bit 6, if associated data is used */
2775 	if (req->assoclen)
2776 		*b0 |= 64;
2777 	rc = set_msg_len(b0 + 16 - l,
2778 			 (op_type == CHCR_DECRYPT_OP) ?
2779 			 req->cryptlen - m : req->cryptlen, l);
2780 
2781 	return rc;
2782 }
2783 
2784 static inline int crypto_ccm_check_iv(const u8 *iv)
2785 {
2786 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2787 	if (iv[0] < 1 || iv[0] > 7)
2788 		return -EINVAL;
2789 
2790 	return 0;
2791 }
2792 
2793 static int ccm_format_packet(struct aead_request *req,
2794 			     u8 *ivptr,
2795 			     unsigned int sub_type,
2796 			     unsigned short op_type,
2797 			     unsigned int assoclen)
2798 {
2799 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2800 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2801 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2802 	int rc = 0;
2803 
2804 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2805 		ivptr[0] = 3;
2806 		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2807 		memcpy(ivptr + 4, req->iv, 8);
2808 		memset(ivptr + 12, 0, 4);
2809 	} else {
2810 		memcpy(ivptr, req->iv, 16);
2811 	}
2812 	if (assoclen)
2813 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2814 				htons(assoclen);
2815 
2816 	rc = generate_b0(req, ivptr, op_type);
2817 	/* zero the ctr value */
2818 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2819 	return rc;
2820 }
2821 
2822 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2823 				  unsigned int dst_size,
2824 				  struct aead_request *req,
2825 				  unsigned short op_type)
2826 {
2827 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2828 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2829 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2830 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2831 	unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2832 	unsigned int ccm_xtra;
2833 	unsigned char tag_offset = 0, auth_offset = 0;
2834 	unsigned int assoclen;
2835 
2836 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2837 		assoclen = req->assoclen - 8;
2838 	else
2839 		assoclen = req->assoclen;
2840 	ccm_xtra = CCM_B0_SIZE +
2841 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2842 
2843 	auth_offset = req->cryptlen ?
2844 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2845 	if (op_type == CHCR_DECRYPT_OP) {
2846 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2847 			tag_offset = crypto_aead_authsize(tfm);
2848 		else
2849 			auth_offset = 0;
2850 	}
2851 
2852 
2853 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2854 					 2, 1);
2855 	sec_cpl->pldlen =
2856 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2857 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2858 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2859 				1 + IV,	IV + assoclen + ccm_xtra,
2860 				req->assoclen + IV + 1 + ccm_xtra, 0);
2861 
2862 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2863 					auth_offset, tag_offset,
2864 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2865 					crypto_aead_authsize(tfm));
2866 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2867 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2868 					cipher_mode, mac_mode,
2869 					aeadctx->hmac_ctrl, IV >> 1);
2870 
2871 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2872 					0, dst_size);
2873 }
2874 
2875 static int aead_ccm_validate_input(unsigned short op_type,
2876 				   struct aead_request *req,
2877 				   struct chcr_aead_ctx *aeadctx,
2878 				   unsigned int sub_type)
2879 {
2880 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2881 		if (crypto_ccm_check_iv(req->iv)) {
2882 			pr_err("CCM: IV check fails\n");
2883 			return -EINVAL;
2884 		}
2885 	} else {
2886 		if (req->assoclen != 16 && req->assoclen != 20) {
2887 			pr_err("RFC4309: Invalid AAD length %d\n",
2888 			       req->assoclen);
2889 			return -EINVAL;
2890 		}
2891 	}
2892 	return 0;
2893 }
2894 
2895 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2896 					  unsigned short qid,
2897 					  int size)
2898 {
2899 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2900 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2901 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2902 	struct sk_buff *skb = NULL;
2903 	struct chcr_wr *chcr_req;
2904 	struct cpl_rx_phys_dsgl *phys_cpl;
2905 	struct ulptx_sgl *ulptx;
2906 	unsigned int transhdr_len;
2907 	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2908 	unsigned int sub_type, assoclen = req->assoclen;
2909 	unsigned int authsize = crypto_aead_authsize(tfm);
2910 	int error = -EINVAL;
2911 	u8 *ivptr;
2912 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2913 		GFP_ATOMIC;
2914 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2915 
2916 	sub_type = get_aead_subtype(tfm);
2917 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2918 		assoclen -= 8;
2919 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2920 	error = chcr_aead_common_init(req);
2921 	if (error)
2922 		return ERR_PTR(error);
2923 
2924 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2925 	if (error)
2926 		goto err;
2927 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2928 			+ (reqctx->op ? -authsize : authsize),
2929 			CHCR_DST_SG_SIZE, 0);
2930 	dnents += MIN_CCM_SG; // For IV and B0
2931 	dst_size = get_space_for_phys_dsgl(dnents);
2932 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2933 			       CHCR_SRC_SG_SIZE, 0);
2934 	snents += MIN_CCM_SG; //For B0
2935 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2936 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2937 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2938 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
2939 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2940 				     reqctx->b0_len, 16) :
2941 		(sgl_len(snents) *  8);
2942 	transhdr_len += temp;
2943 	transhdr_len = roundup(transhdr_len, 16);
2944 
2945 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2946 				reqctx->b0_len, transhdr_len, reqctx->op)) {
2947 		atomic_inc(&adap->chcr_stats.fallback);
2948 		chcr_aead_common_exit(req);
2949 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2950 	}
2951 	skb = alloc_skb(transhdr_len,  flags);
2952 
2953 	if (!skb) {
2954 		error = -ENOMEM;
2955 		goto err;
2956 	}
2957 
2958 	chcr_req = __skb_put_zero(skb, transhdr_len);
2959 
2960 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2961 
2962 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2963 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2964 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2965 			aeadctx->key, aeadctx->enckey_len);
2966 
2967 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2968 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2969 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2970 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2971 	if (error)
2972 		goto dstmap_fail;
2973 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2974 	chcr_add_aead_src_ent(req, ulptx);
2975 
2976 	atomic_inc(&adap->chcr_stats.aead_rqst);
2977 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2978 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2979 		reqctx->b0_len) : 0);
2980 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2981 		    transhdr_len, temp, 0);
2982 	reqctx->skb = skb;
2983 
2984 	return skb;
2985 dstmap_fail:
2986 	kfree_skb(skb);
2987 err:
2988 	chcr_aead_common_exit(req);
2989 	return ERR_PTR(error);
2990 }
2991 
2992 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2993 				     unsigned short qid,
2994 				     int size)
2995 {
2996 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2997 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2998 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2999 	struct sk_buff *skb = NULL;
3000 	struct chcr_wr *chcr_req;
3001 	struct cpl_rx_phys_dsgl *phys_cpl;
3002 	struct ulptx_sgl *ulptx;
3003 	unsigned int transhdr_len, dnents = 0, snents;
3004 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3005 	unsigned int authsize = crypto_aead_authsize(tfm);
3006 	int error = -EINVAL;
3007 	u8 *ivptr;
3008 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3009 		GFP_ATOMIC;
3010 	struct adapter *adap = padap(a_ctx(tfm)->dev);
3011 
3012 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3013 		assoclen = req->assoclen - 8;
3014 
3015 	reqctx->b0_len = 0;
3016 	error = chcr_aead_common_init(req);
3017 	if (error)
3018 		return ERR_PTR(error);
3019 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3020 				(reqctx->op ? -authsize : authsize),
3021 				CHCR_DST_SG_SIZE, 0);
3022 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3023 			       CHCR_SRC_SG_SIZE, 0);
3024 	dnents += MIN_GCM_SG; // For IV
3025 	dst_size = get_space_for_phys_dsgl(dnents);
3026 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3027 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3028 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3029 			SGE_MAX_WR_LEN;
3030 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3031 		(sgl_len(snents) * 8);
3032 	transhdr_len += temp;
3033 	transhdr_len = roundup(transhdr_len, 16);
3034 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3035 			    transhdr_len, reqctx->op)) {
3036 
3037 		atomic_inc(&adap->chcr_stats.fallback);
3038 		chcr_aead_common_exit(req);
3039 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3040 	}
3041 	skb = alloc_skb(transhdr_len, flags);
3042 	if (!skb) {
3043 		error = -ENOMEM;
3044 		goto err;
3045 	}
3046 
3047 	chcr_req = __skb_put_zero(skb, transhdr_len);
3048 
3049 	//Offset of tag from end
3050 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3051 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3052 					a_ctx(tfm)->tx_chan_id, 2, 1);
3053 	chcr_req->sec_cpl.pldlen =
3054 		htonl(req->assoclen + IV + req->cryptlen);
3055 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3056 					assoclen ? 1 + IV : 0,
3057 					assoclen ? IV + assoclen : 0,
3058 					req->assoclen + IV + 1, 0);
3059 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3060 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3061 						temp, temp);
3062 	chcr_req->sec_cpl.seqno_numivs =
3063 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3064 					CHCR_ENCRYPT_OP) ? 1 : 0,
3065 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3066 					CHCR_SCMD_AUTH_MODE_GHASH,
3067 					aeadctx->hmac_ctrl, IV >> 1);
3068 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3069 					0, 0, dst_size);
3070 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3071 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3072 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3073 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3074 
3075 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3076 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3077 	/* prepare a 16 byte iv */
3078 	/* S   A   L  T |  IV | 0x00000001 */
3079 	if (get_aead_subtype(tfm) ==
3080 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3081 		memcpy(ivptr, aeadctx->salt, 4);
3082 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3083 	} else {
3084 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3085 	}
3086 	*((unsigned int *)(ivptr + 12)) = htonl(0x01);
3087 
3088 	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3089 
3090 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3091 	chcr_add_aead_src_ent(req, ulptx);
3092 	atomic_inc(&adap->chcr_stats.aead_rqst);
3093 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3094 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3095 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3096 		    transhdr_len, temp, reqctx->verify);
3097 	reqctx->skb = skb;
3098 	return skb;
3099 
3100 err:
3101 	chcr_aead_common_exit(req);
3102 	return ERR_PTR(error);
3103 }
3104 
3105 
3106 
3107 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3108 {
3109 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3110 	struct aead_alg *alg = crypto_aead_alg(tfm);
3111 
3112 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3113 					       CRYPTO_ALG_NEED_FALLBACK |
3114 					       CRYPTO_ALG_ASYNC);
3115 	if  (IS_ERR(aeadctx->sw_cipher))
3116 		return PTR_ERR(aeadctx->sw_cipher);
3117 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3118 				 sizeof(struct aead_request) +
3119 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3120 	return chcr_device_init(a_ctx(tfm));
3121 }
3122 
3123 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3124 {
3125 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3126 
3127 	crypto_free_aead(aeadctx->sw_cipher);
3128 }
3129 
3130 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3131 					unsigned int authsize)
3132 {
3133 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3134 
3135 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3136 	aeadctx->mayverify = VERIFY_HW;
3137 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3138 }
3139 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3140 				    unsigned int authsize)
3141 {
3142 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3143 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3144 
3145 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3146 	 * true for sha1. authsize == 12 condition should be before
3147 	 * authsize == (maxauth >> 1)
3148 	 */
3149 	if (authsize == ICV_4) {
3150 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3151 		aeadctx->mayverify = VERIFY_HW;
3152 	} else if (authsize == ICV_6) {
3153 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3154 		aeadctx->mayverify = VERIFY_HW;
3155 	} else if (authsize == ICV_10) {
3156 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3157 		aeadctx->mayverify = VERIFY_HW;
3158 	} else if (authsize == ICV_12) {
3159 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3160 		aeadctx->mayverify = VERIFY_HW;
3161 	} else if (authsize == ICV_14) {
3162 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3163 		aeadctx->mayverify = VERIFY_HW;
3164 	} else if (authsize == (maxauth >> 1)) {
3165 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3166 		aeadctx->mayverify = VERIFY_HW;
3167 	} else if (authsize == maxauth) {
3168 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3169 		aeadctx->mayverify = VERIFY_HW;
3170 	} else {
3171 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3172 		aeadctx->mayverify = VERIFY_SW;
3173 	}
3174 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3175 }
3176 
3177 
3178 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3179 {
3180 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3181 
3182 	switch (authsize) {
3183 	case ICV_4:
3184 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3185 		aeadctx->mayverify = VERIFY_HW;
3186 		break;
3187 	case ICV_8:
3188 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3189 		aeadctx->mayverify = VERIFY_HW;
3190 		break;
3191 	case ICV_12:
3192 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3193 		aeadctx->mayverify = VERIFY_HW;
3194 		break;
3195 	case ICV_14:
3196 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3197 		aeadctx->mayverify = VERIFY_HW;
3198 		break;
3199 	case ICV_16:
3200 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3201 		aeadctx->mayverify = VERIFY_HW;
3202 		break;
3203 	case ICV_13:
3204 	case ICV_15:
3205 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3206 		aeadctx->mayverify = VERIFY_SW;
3207 		break;
3208 	default:
3209 
3210 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3211 			CRYPTO_TFM_RES_BAD_KEY_LEN);
3212 		return -EINVAL;
3213 	}
3214 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3215 }
3216 
3217 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3218 					  unsigned int authsize)
3219 {
3220 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3221 
3222 	switch (authsize) {
3223 	case ICV_8:
3224 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3225 		aeadctx->mayverify = VERIFY_HW;
3226 		break;
3227 	case ICV_12:
3228 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3229 		aeadctx->mayverify = VERIFY_HW;
3230 		break;
3231 	case ICV_16:
3232 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3233 		aeadctx->mayverify = VERIFY_HW;
3234 		break;
3235 	default:
3236 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3237 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3238 		return -EINVAL;
3239 	}
3240 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3241 }
3242 
3243 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3244 				unsigned int authsize)
3245 {
3246 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3247 
3248 	switch (authsize) {
3249 	case ICV_4:
3250 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3251 		aeadctx->mayverify = VERIFY_HW;
3252 		break;
3253 	case ICV_6:
3254 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3255 		aeadctx->mayverify = VERIFY_HW;
3256 		break;
3257 	case ICV_8:
3258 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3259 		aeadctx->mayverify = VERIFY_HW;
3260 		break;
3261 	case ICV_10:
3262 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3263 		aeadctx->mayverify = VERIFY_HW;
3264 		break;
3265 	case ICV_12:
3266 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3267 		aeadctx->mayverify = VERIFY_HW;
3268 		break;
3269 	case ICV_14:
3270 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3271 		aeadctx->mayverify = VERIFY_HW;
3272 		break;
3273 	case ICV_16:
3274 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3275 		aeadctx->mayverify = VERIFY_HW;
3276 		break;
3277 	default:
3278 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3279 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3280 		return -EINVAL;
3281 	}
3282 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3283 }
3284 
3285 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3286 				const u8 *key,
3287 				unsigned int keylen)
3288 {
3289 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3290 	unsigned char ck_size, mk_size;
3291 	int key_ctx_size = 0;
3292 
3293 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3294 	if (keylen == AES_KEYSIZE_128) {
3295 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3296 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3297 	} else if (keylen == AES_KEYSIZE_192) {
3298 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3299 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3300 	} else if (keylen == AES_KEYSIZE_256) {
3301 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3302 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3303 	} else {
3304 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3305 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3306 		aeadctx->enckey_len = 0;
3307 		return	-EINVAL;
3308 	}
3309 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3310 						key_ctx_size >> 4);
3311 	memcpy(aeadctx->key, key, keylen);
3312 	aeadctx->enckey_len = keylen;
3313 
3314 	return 0;
3315 }
3316 
3317 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3318 				const u8 *key,
3319 				unsigned int keylen)
3320 {
3321 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3322 	int error;
3323 
3324 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3325 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3326 			      CRYPTO_TFM_REQ_MASK);
3327 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3328 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3329 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3330 			      CRYPTO_TFM_RES_MASK);
3331 	if (error)
3332 		return error;
3333 	return chcr_ccm_common_setkey(aead, key, keylen);
3334 }
3335 
3336 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3337 				    unsigned int keylen)
3338 {
3339 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3340 	int error;
3341 
3342 	if (keylen < 3) {
3343 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3344 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3345 		aeadctx->enckey_len = 0;
3346 		return	-EINVAL;
3347 	}
3348 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3349 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3350 			      CRYPTO_TFM_REQ_MASK);
3351 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3352 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3353 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3354 			      CRYPTO_TFM_RES_MASK);
3355 	if (error)
3356 		return error;
3357 	keylen -= 3;
3358 	memcpy(aeadctx->salt, key + keylen, 3);
3359 	return chcr_ccm_common_setkey(aead, key, keylen);
3360 }
3361 
3362 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3363 			   unsigned int keylen)
3364 {
3365 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3366 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3367 	struct crypto_cipher *cipher;
3368 	unsigned int ck_size;
3369 	int ret = 0, key_ctx_size = 0;
3370 
3371 	aeadctx->enckey_len = 0;
3372 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3373 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3374 			      & CRYPTO_TFM_REQ_MASK);
3375 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3376 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3377 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3378 			      CRYPTO_TFM_RES_MASK);
3379 	if (ret)
3380 		goto out;
3381 
3382 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3383 	    keylen > 3) {
3384 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3385 		memcpy(aeadctx->salt, key + keylen, 4);
3386 	}
3387 	if (keylen == AES_KEYSIZE_128) {
3388 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3389 	} else if (keylen == AES_KEYSIZE_192) {
3390 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3391 	} else if (keylen == AES_KEYSIZE_256) {
3392 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3393 	} else {
3394 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3395 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3396 		pr_err("GCM: Invalid key length %d\n", keylen);
3397 		ret = -EINVAL;
3398 		goto out;
3399 	}
3400 
3401 	memcpy(aeadctx->key, key, keylen);
3402 	aeadctx->enckey_len = keylen;
3403 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3404 		AEAD_H_SIZE;
3405 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3406 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3407 						0, 0,
3408 						key_ctx_size >> 4);
3409 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3410 	 * It will go in key context
3411 	 */
3412 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3413 	if (IS_ERR(cipher)) {
3414 		aeadctx->enckey_len = 0;
3415 		ret = -ENOMEM;
3416 		goto out;
3417 	}
3418 
3419 	ret = crypto_cipher_setkey(cipher, key, keylen);
3420 	if (ret) {
3421 		aeadctx->enckey_len = 0;
3422 		goto out1;
3423 	}
3424 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3425 	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3426 
3427 out1:
3428 	crypto_free_cipher(cipher);
3429 out:
3430 	return ret;
3431 }
3432 
3433 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3434 				   unsigned int keylen)
3435 {
3436 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3437 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3438 	/* it contains auth and cipher key both*/
3439 	struct crypto_authenc_keys keys;
3440 	unsigned int bs, subtype;
3441 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3442 	int err = 0, i, key_ctx_len = 0;
3443 	unsigned char ck_size = 0;
3444 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3445 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3446 	struct algo_param param;
3447 	int align;
3448 	u8 *o_ptr = NULL;
3449 
3450 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3451 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3452 			      & CRYPTO_TFM_REQ_MASK);
3453 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3454 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3455 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3456 			      & CRYPTO_TFM_RES_MASK);
3457 	if (err)
3458 		goto out;
3459 
3460 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3461 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3462 		goto out;
3463 	}
3464 
3465 	if (get_alg_config(&param, max_authsize)) {
3466 		pr_err("chcr : Unsupported digest size\n");
3467 		goto out;
3468 	}
3469 	subtype = get_aead_subtype(authenc);
3470 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3471 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3472 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3473 			goto out;
3474 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3475 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3476 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3477 	}
3478 	if (keys.enckeylen == AES_KEYSIZE_128) {
3479 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3480 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3481 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3482 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3483 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3484 	} else {
3485 		pr_err("chcr : Unsupported cipher key\n");
3486 		goto out;
3487 	}
3488 
3489 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3490 	 * h(opad) so authkey is not needed again. authkeylen size have the
3491 	 * size of the hash digest size.
3492 	 */
3493 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3494 	aeadctx->enckey_len = keys.enckeylen;
3495 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3496 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3497 
3498 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3499 			    aeadctx->enckey_len << 3);
3500 	}
3501 	base_hash  = chcr_alloc_shash(max_authsize);
3502 	if (IS_ERR(base_hash)) {
3503 		pr_err("chcr : Base driver cannot be loaded\n");
3504 		aeadctx->enckey_len = 0;
3505 		memzero_explicit(&keys, sizeof(keys));
3506 		return -EINVAL;
3507 	}
3508 	{
3509 		SHASH_DESC_ON_STACK(shash, base_hash);
3510 
3511 		shash->tfm = base_hash;
3512 		bs = crypto_shash_blocksize(base_hash);
3513 		align = KEYCTX_ALIGN_PAD(max_authsize);
3514 		o_ptr =  actx->h_iopad + param.result_size + align;
3515 
3516 		if (keys.authkeylen > bs) {
3517 			err = crypto_shash_digest(shash, keys.authkey,
3518 						  keys.authkeylen,
3519 						  o_ptr);
3520 			if (err) {
3521 				pr_err("chcr : Base driver cannot be loaded\n");
3522 				goto out;
3523 			}
3524 			keys.authkeylen = max_authsize;
3525 		} else
3526 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3527 
3528 		/* Compute the ipad-digest*/
3529 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3530 		memcpy(pad, o_ptr, keys.authkeylen);
3531 		for (i = 0; i < bs >> 2; i++)
3532 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3533 
3534 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3535 					      max_authsize))
3536 			goto out;
3537 		/* Compute the opad-digest */
3538 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3539 		memcpy(pad, o_ptr, keys.authkeylen);
3540 		for (i = 0; i < bs >> 2; i++)
3541 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3542 
3543 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3544 			goto out;
3545 
3546 		/* convert the ipad and opad digest to network order */
3547 		chcr_change_order(actx->h_iopad, param.result_size);
3548 		chcr_change_order(o_ptr, param.result_size);
3549 		key_ctx_len = sizeof(struct _key_ctx) +
3550 			roundup(keys.enckeylen, 16) +
3551 			(param.result_size + align) * 2;
3552 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3553 						0, 1, key_ctx_len >> 4);
3554 		actx->auth_mode = param.auth_mode;
3555 		chcr_free_shash(base_hash);
3556 
3557 		memzero_explicit(&keys, sizeof(keys));
3558 		return 0;
3559 	}
3560 out:
3561 	aeadctx->enckey_len = 0;
3562 	memzero_explicit(&keys, sizeof(keys));
3563 	if (!IS_ERR(base_hash))
3564 		chcr_free_shash(base_hash);
3565 	return -EINVAL;
3566 }
3567 
3568 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3569 					const u8 *key, unsigned int keylen)
3570 {
3571 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3572 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3573 	struct crypto_authenc_keys keys;
3574 	int err;
3575 	/* it contains auth and cipher key both*/
3576 	unsigned int subtype;
3577 	int key_ctx_len = 0;
3578 	unsigned char ck_size = 0;
3579 
3580 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3581 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3582 			      & CRYPTO_TFM_REQ_MASK);
3583 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3584 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3585 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3586 			      & CRYPTO_TFM_RES_MASK);
3587 	if (err)
3588 		goto out;
3589 
3590 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3591 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3592 		goto out;
3593 	}
3594 	subtype = get_aead_subtype(authenc);
3595 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3596 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3597 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3598 			goto out;
3599 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3600 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3601 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3602 	}
3603 	if (keys.enckeylen == AES_KEYSIZE_128) {
3604 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3605 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3606 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3607 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3608 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3609 	} else {
3610 		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3611 		goto out;
3612 	}
3613 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3614 	aeadctx->enckey_len = keys.enckeylen;
3615 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3616 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3617 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3618 				aeadctx->enckey_len << 3);
3619 	}
3620 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3621 
3622 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3623 						0, key_ctx_len >> 4);
3624 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3625 	memzero_explicit(&keys, sizeof(keys));
3626 	return 0;
3627 out:
3628 	aeadctx->enckey_len = 0;
3629 	memzero_explicit(&keys, sizeof(keys));
3630 	return -EINVAL;
3631 }
3632 
3633 static int chcr_aead_op(struct aead_request *req,
3634 			int size,
3635 			create_wr_t create_wr_fn)
3636 {
3637 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3638 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3639 	struct uld_ctx *u_ctx;
3640 	struct sk_buff *skb;
3641 	int isfull = 0;
3642 	struct chcr_dev *cdev;
3643 
3644 	cdev = a_ctx(tfm)->dev;
3645 	if (!cdev) {
3646 		pr_err("chcr : %s : No crypto device.\n", __func__);
3647 		return -ENXIO;
3648 	}
3649 
3650 	if (chcr_inc_wrcount(cdev)) {
3651 	/* Detach state for CHCR means lldi or padap is freed.
3652 	 * We cannot increment fallback here.
3653 	 */
3654 		return chcr_aead_fallback(req, reqctx->op);
3655 	}
3656 
3657 	u_ctx = ULD_CTX(a_ctx(tfm));
3658 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3659 				   a_ctx(tfm)->tx_qidx)) {
3660 		isfull = 1;
3661 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3662 			chcr_dec_wrcount(cdev);
3663 			return -ENOSPC;
3664 		}
3665 	}
3666 
3667 	/* Form a WR from req */
3668 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3669 
3670 	if (IS_ERR_OR_NULL(skb)) {
3671 		chcr_dec_wrcount(cdev);
3672 		return PTR_ERR_OR_ZERO(skb);
3673 	}
3674 
3675 	skb->dev = u_ctx->lldi.ports[0];
3676 	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3677 	chcr_send_wr(skb);
3678 	return isfull ? -EBUSY : -EINPROGRESS;
3679 }
3680 
3681 static int chcr_aead_encrypt(struct aead_request *req)
3682 {
3683 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3684 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3685 
3686 	reqctx->verify = VERIFY_HW;
3687 	reqctx->op = CHCR_ENCRYPT_OP;
3688 
3689 	switch (get_aead_subtype(tfm)) {
3690 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3691 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3692 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3693 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3694 		return chcr_aead_op(req, 0, create_authenc_wr);
3695 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3696 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3697 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3698 	default:
3699 		return chcr_aead_op(req, 0, create_gcm_wr);
3700 	}
3701 }
3702 
3703 static int chcr_aead_decrypt(struct aead_request *req)
3704 {
3705 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3706 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3707 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3708 	int size;
3709 
3710 	if (aeadctx->mayverify == VERIFY_SW) {
3711 		size = crypto_aead_maxauthsize(tfm);
3712 		reqctx->verify = VERIFY_SW;
3713 	} else {
3714 		size = 0;
3715 		reqctx->verify = VERIFY_HW;
3716 	}
3717 	reqctx->op = CHCR_DECRYPT_OP;
3718 	switch (get_aead_subtype(tfm)) {
3719 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3720 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3721 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3722 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3723 		return chcr_aead_op(req, size, create_authenc_wr);
3724 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3725 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3726 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3727 	default:
3728 		return chcr_aead_op(req, size, create_gcm_wr);
3729 	}
3730 }
3731 
3732 static struct chcr_alg_template driver_algs[] = {
3733 	/* AES-CBC */
3734 	{
3735 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3736 		.is_registered = 0,
3737 		.alg.crypto = {
3738 			.cra_name		= "cbc(aes)",
3739 			.cra_driver_name	= "cbc-aes-chcr",
3740 			.cra_blocksize		= AES_BLOCK_SIZE,
3741 			.cra_init		= chcr_cra_init,
3742 			.cra_exit		= chcr_cra_exit,
3743 			.cra_u.ablkcipher	= {
3744 				.min_keysize	= AES_MIN_KEY_SIZE,
3745 				.max_keysize	= AES_MAX_KEY_SIZE,
3746 				.ivsize		= AES_BLOCK_SIZE,
3747 				.setkey			= chcr_aes_cbc_setkey,
3748 				.encrypt		= chcr_aes_encrypt,
3749 				.decrypt		= chcr_aes_decrypt,
3750 			}
3751 		}
3752 	},
3753 	{
3754 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3755 		.is_registered = 0,
3756 		.alg.crypto =   {
3757 			.cra_name		= "xts(aes)",
3758 			.cra_driver_name	= "xts-aes-chcr",
3759 			.cra_blocksize		= AES_BLOCK_SIZE,
3760 			.cra_init		= chcr_cra_init,
3761 			.cra_exit		= NULL,
3762 			.cra_u .ablkcipher = {
3763 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
3764 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
3765 					.ivsize		= AES_BLOCK_SIZE,
3766 					.setkey		= chcr_aes_xts_setkey,
3767 					.encrypt	= chcr_aes_encrypt,
3768 					.decrypt	= chcr_aes_decrypt,
3769 				}
3770 			}
3771 	},
3772 	{
3773 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3774 		.is_registered = 0,
3775 		.alg.crypto = {
3776 			.cra_name		= "ctr(aes)",
3777 			.cra_driver_name	= "ctr-aes-chcr",
3778 			.cra_blocksize		= 1,
3779 			.cra_init		= chcr_cra_init,
3780 			.cra_exit		= chcr_cra_exit,
3781 			.cra_u.ablkcipher	= {
3782 				.min_keysize	= AES_MIN_KEY_SIZE,
3783 				.max_keysize	= AES_MAX_KEY_SIZE,
3784 				.ivsize		= AES_BLOCK_SIZE,
3785 				.setkey		= chcr_aes_ctr_setkey,
3786 				.encrypt	= chcr_aes_encrypt,
3787 				.decrypt	= chcr_aes_decrypt,
3788 			}
3789 		}
3790 	},
3791 	{
3792 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3793 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3794 		.is_registered = 0,
3795 		.alg.crypto = {
3796 			.cra_name		= "rfc3686(ctr(aes))",
3797 			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3798 			.cra_blocksize		= 1,
3799 			.cra_init		= chcr_rfc3686_init,
3800 			.cra_exit		= chcr_cra_exit,
3801 			.cra_u.ablkcipher	= {
3802 				.min_keysize	= AES_MIN_KEY_SIZE +
3803 					CTR_RFC3686_NONCE_SIZE,
3804 				.max_keysize	= AES_MAX_KEY_SIZE +
3805 					CTR_RFC3686_NONCE_SIZE,
3806 				.ivsize		= CTR_RFC3686_IV_SIZE,
3807 				.setkey		= chcr_aes_rfc3686_setkey,
3808 				.encrypt	= chcr_aes_encrypt,
3809 				.decrypt	= chcr_aes_decrypt,
3810 			}
3811 		}
3812 	},
3813 	/* SHA */
3814 	{
3815 		.type = CRYPTO_ALG_TYPE_AHASH,
3816 		.is_registered = 0,
3817 		.alg.hash = {
3818 			.halg.digestsize = SHA1_DIGEST_SIZE,
3819 			.halg.base = {
3820 				.cra_name = "sha1",
3821 				.cra_driver_name = "sha1-chcr",
3822 				.cra_blocksize = SHA1_BLOCK_SIZE,
3823 			}
3824 		}
3825 	},
3826 	{
3827 		.type = CRYPTO_ALG_TYPE_AHASH,
3828 		.is_registered = 0,
3829 		.alg.hash = {
3830 			.halg.digestsize = SHA256_DIGEST_SIZE,
3831 			.halg.base = {
3832 				.cra_name = "sha256",
3833 				.cra_driver_name = "sha256-chcr",
3834 				.cra_blocksize = SHA256_BLOCK_SIZE,
3835 			}
3836 		}
3837 	},
3838 	{
3839 		.type = CRYPTO_ALG_TYPE_AHASH,
3840 		.is_registered = 0,
3841 		.alg.hash = {
3842 			.halg.digestsize = SHA224_DIGEST_SIZE,
3843 			.halg.base = {
3844 				.cra_name = "sha224",
3845 				.cra_driver_name = "sha224-chcr",
3846 				.cra_blocksize = SHA224_BLOCK_SIZE,
3847 			}
3848 		}
3849 	},
3850 	{
3851 		.type = CRYPTO_ALG_TYPE_AHASH,
3852 		.is_registered = 0,
3853 		.alg.hash = {
3854 			.halg.digestsize = SHA384_DIGEST_SIZE,
3855 			.halg.base = {
3856 				.cra_name = "sha384",
3857 				.cra_driver_name = "sha384-chcr",
3858 				.cra_blocksize = SHA384_BLOCK_SIZE,
3859 			}
3860 		}
3861 	},
3862 	{
3863 		.type = CRYPTO_ALG_TYPE_AHASH,
3864 		.is_registered = 0,
3865 		.alg.hash = {
3866 			.halg.digestsize = SHA512_DIGEST_SIZE,
3867 			.halg.base = {
3868 				.cra_name = "sha512",
3869 				.cra_driver_name = "sha512-chcr",
3870 				.cra_blocksize = SHA512_BLOCK_SIZE,
3871 			}
3872 		}
3873 	},
3874 	/* HMAC */
3875 	{
3876 		.type = CRYPTO_ALG_TYPE_HMAC,
3877 		.is_registered = 0,
3878 		.alg.hash = {
3879 			.halg.digestsize = SHA1_DIGEST_SIZE,
3880 			.halg.base = {
3881 				.cra_name = "hmac(sha1)",
3882 				.cra_driver_name = "hmac-sha1-chcr",
3883 				.cra_blocksize = SHA1_BLOCK_SIZE,
3884 			}
3885 		}
3886 	},
3887 	{
3888 		.type = CRYPTO_ALG_TYPE_HMAC,
3889 		.is_registered = 0,
3890 		.alg.hash = {
3891 			.halg.digestsize = SHA224_DIGEST_SIZE,
3892 			.halg.base = {
3893 				.cra_name = "hmac(sha224)",
3894 				.cra_driver_name = "hmac-sha224-chcr",
3895 				.cra_blocksize = SHA224_BLOCK_SIZE,
3896 			}
3897 		}
3898 	},
3899 	{
3900 		.type = CRYPTO_ALG_TYPE_HMAC,
3901 		.is_registered = 0,
3902 		.alg.hash = {
3903 			.halg.digestsize = SHA256_DIGEST_SIZE,
3904 			.halg.base = {
3905 				.cra_name = "hmac(sha256)",
3906 				.cra_driver_name = "hmac-sha256-chcr",
3907 				.cra_blocksize = SHA256_BLOCK_SIZE,
3908 			}
3909 		}
3910 	},
3911 	{
3912 		.type = CRYPTO_ALG_TYPE_HMAC,
3913 		.is_registered = 0,
3914 		.alg.hash = {
3915 			.halg.digestsize = SHA384_DIGEST_SIZE,
3916 			.halg.base = {
3917 				.cra_name = "hmac(sha384)",
3918 				.cra_driver_name = "hmac-sha384-chcr",
3919 				.cra_blocksize = SHA384_BLOCK_SIZE,
3920 			}
3921 		}
3922 	},
3923 	{
3924 		.type = CRYPTO_ALG_TYPE_HMAC,
3925 		.is_registered = 0,
3926 		.alg.hash = {
3927 			.halg.digestsize = SHA512_DIGEST_SIZE,
3928 			.halg.base = {
3929 				.cra_name = "hmac(sha512)",
3930 				.cra_driver_name = "hmac-sha512-chcr",
3931 				.cra_blocksize = SHA512_BLOCK_SIZE,
3932 			}
3933 		}
3934 	},
3935 	/* Add AEAD Algorithms */
3936 	{
3937 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3938 		.is_registered = 0,
3939 		.alg.aead = {
3940 			.base = {
3941 				.cra_name = "gcm(aes)",
3942 				.cra_driver_name = "gcm-aes-chcr",
3943 				.cra_blocksize	= 1,
3944 				.cra_priority = CHCR_AEAD_PRIORITY,
3945 				.cra_ctxsize =	sizeof(struct chcr_context) +
3946 						sizeof(struct chcr_aead_ctx) +
3947 						sizeof(struct chcr_gcm_ctx),
3948 			},
3949 			.ivsize = GCM_AES_IV_SIZE,
3950 			.maxauthsize = GHASH_DIGEST_SIZE,
3951 			.setkey = chcr_gcm_setkey,
3952 			.setauthsize = chcr_gcm_setauthsize,
3953 		}
3954 	},
3955 	{
3956 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3957 		.is_registered = 0,
3958 		.alg.aead = {
3959 			.base = {
3960 				.cra_name = "rfc4106(gcm(aes))",
3961 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3962 				.cra_blocksize	 = 1,
3963 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3964 				.cra_ctxsize =	sizeof(struct chcr_context) +
3965 						sizeof(struct chcr_aead_ctx) +
3966 						sizeof(struct chcr_gcm_ctx),
3967 
3968 			},
3969 			.ivsize = GCM_RFC4106_IV_SIZE,
3970 			.maxauthsize	= GHASH_DIGEST_SIZE,
3971 			.setkey = chcr_gcm_setkey,
3972 			.setauthsize	= chcr_4106_4309_setauthsize,
3973 		}
3974 	},
3975 	{
3976 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3977 		.is_registered = 0,
3978 		.alg.aead = {
3979 			.base = {
3980 				.cra_name = "ccm(aes)",
3981 				.cra_driver_name = "ccm-aes-chcr",
3982 				.cra_blocksize	 = 1,
3983 				.cra_priority = CHCR_AEAD_PRIORITY,
3984 				.cra_ctxsize =	sizeof(struct chcr_context) +
3985 						sizeof(struct chcr_aead_ctx),
3986 
3987 			},
3988 			.ivsize = AES_BLOCK_SIZE,
3989 			.maxauthsize	= GHASH_DIGEST_SIZE,
3990 			.setkey = chcr_aead_ccm_setkey,
3991 			.setauthsize	= chcr_ccm_setauthsize,
3992 		}
3993 	},
3994 	{
3995 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3996 		.is_registered = 0,
3997 		.alg.aead = {
3998 			.base = {
3999 				.cra_name = "rfc4309(ccm(aes))",
4000 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4001 				.cra_blocksize	 = 1,
4002 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4003 				.cra_ctxsize =	sizeof(struct chcr_context) +
4004 						sizeof(struct chcr_aead_ctx),
4005 
4006 			},
4007 			.ivsize = 8,
4008 			.maxauthsize	= GHASH_DIGEST_SIZE,
4009 			.setkey = chcr_aead_rfc4309_setkey,
4010 			.setauthsize = chcr_4106_4309_setauthsize,
4011 		}
4012 	},
4013 	{
4014 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4015 		.is_registered = 0,
4016 		.alg.aead = {
4017 			.base = {
4018 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4019 				.cra_driver_name =
4020 					"authenc-hmac-sha1-cbc-aes-chcr",
4021 				.cra_blocksize	 = AES_BLOCK_SIZE,
4022 				.cra_priority = CHCR_AEAD_PRIORITY,
4023 				.cra_ctxsize =	sizeof(struct chcr_context) +
4024 						sizeof(struct chcr_aead_ctx) +
4025 						sizeof(struct chcr_authenc_ctx),
4026 
4027 			},
4028 			.ivsize = AES_BLOCK_SIZE,
4029 			.maxauthsize = SHA1_DIGEST_SIZE,
4030 			.setkey = chcr_authenc_setkey,
4031 			.setauthsize = chcr_authenc_setauthsize,
4032 		}
4033 	},
4034 	{
4035 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4036 		.is_registered = 0,
4037 		.alg.aead = {
4038 			.base = {
4039 
4040 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4041 				.cra_driver_name =
4042 					"authenc-hmac-sha256-cbc-aes-chcr",
4043 				.cra_blocksize	 = AES_BLOCK_SIZE,
4044 				.cra_priority = CHCR_AEAD_PRIORITY,
4045 				.cra_ctxsize =	sizeof(struct chcr_context) +
4046 						sizeof(struct chcr_aead_ctx) +
4047 						sizeof(struct chcr_authenc_ctx),
4048 
4049 			},
4050 			.ivsize = AES_BLOCK_SIZE,
4051 			.maxauthsize	= SHA256_DIGEST_SIZE,
4052 			.setkey = chcr_authenc_setkey,
4053 			.setauthsize = chcr_authenc_setauthsize,
4054 		}
4055 	},
4056 	{
4057 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4058 		.is_registered = 0,
4059 		.alg.aead = {
4060 			.base = {
4061 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4062 				.cra_driver_name =
4063 					"authenc-hmac-sha224-cbc-aes-chcr",
4064 				.cra_blocksize	 = AES_BLOCK_SIZE,
4065 				.cra_priority = CHCR_AEAD_PRIORITY,
4066 				.cra_ctxsize =	sizeof(struct chcr_context) +
4067 						sizeof(struct chcr_aead_ctx) +
4068 						sizeof(struct chcr_authenc_ctx),
4069 			},
4070 			.ivsize = AES_BLOCK_SIZE,
4071 			.maxauthsize = SHA224_DIGEST_SIZE,
4072 			.setkey = chcr_authenc_setkey,
4073 			.setauthsize = chcr_authenc_setauthsize,
4074 		}
4075 	},
4076 	{
4077 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4078 		.is_registered = 0,
4079 		.alg.aead = {
4080 			.base = {
4081 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4082 				.cra_driver_name =
4083 					"authenc-hmac-sha384-cbc-aes-chcr",
4084 				.cra_blocksize	 = AES_BLOCK_SIZE,
4085 				.cra_priority = CHCR_AEAD_PRIORITY,
4086 				.cra_ctxsize =	sizeof(struct chcr_context) +
4087 						sizeof(struct chcr_aead_ctx) +
4088 						sizeof(struct chcr_authenc_ctx),
4089 
4090 			},
4091 			.ivsize = AES_BLOCK_SIZE,
4092 			.maxauthsize = SHA384_DIGEST_SIZE,
4093 			.setkey = chcr_authenc_setkey,
4094 			.setauthsize = chcr_authenc_setauthsize,
4095 		}
4096 	},
4097 	{
4098 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4099 		.is_registered = 0,
4100 		.alg.aead = {
4101 			.base = {
4102 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4103 				.cra_driver_name =
4104 					"authenc-hmac-sha512-cbc-aes-chcr",
4105 				.cra_blocksize	 = AES_BLOCK_SIZE,
4106 				.cra_priority = CHCR_AEAD_PRIORITY,
4107 				.cra_ctxsize =	sizeof(struct chcr_context) +
4108 						sizeof(struct chcr_aead_ctx) +
4109 						sizeof(struct chcr_authenc_ctx),
4110 
4111 			},
4112 			.ivsize = AES_BLOCK_SIZE,
4113 			.maxauthsize = SHA512_DIGEST_SIZE,
4114 			.setkey = chcr_authenc_setkey,
4115 			.setauthsize = chcr_authenc_setauthsize,
4116 		}
4117 	},
4118 	{
4119 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4120 		.is_registered = 0,
4121 		.alg.aead = {
4122 			.base = {
4123 				.cra_name = "authenc(digest_null,cbc(aes))",
4124 				.cra_driver_name =
4125 					"authenc-digest_null-cbc-aes-chcr",
4126 				.cra_blocksize	 = AES_BLOCK_SIZE,
4127 				.cra_priority = CHCR_AEAD_PRIORITY,
4128 				.cra_ctxsize =	sizeof(struct chcr_context) +
4129 						sizeof(struct chcr_aead_ctx) +
4130 						sizeof(struct chcr_authenc_ctx),
4131 
4132 			},
4133 			.ivsize  = AES_BLOCK_SIZE,
4134 			.maxauthsize = 0,
4135 			.setkey  = chcr_aead_digest_null_setkey,
4136 			.setauthsize = chcr_authenc_null_setauthsize,
4137 		}
4138 	},
4139 	{
4140 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4141 		.is_registered = 0,
4142 		.alg.aead = {
4143 			.base = {
4144 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4145 				.cra_driver_name =
4146 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4147 				.cra_blocksize	 = 1,
4148 				.cra_priority = CHCR_AEAD_PRIORITY,
4149 				.cra_ctxsize =	sizeof(struct chcr_context) +
4150 						sizeof(struct chcr_aead_ctx) +
4151 						sizeof(struct chcr_authenc_ctx),
4152 
4153 			},
4154 			.ivsize = CTR_RFC3686_IV_SIZE,
4155 			.maxauthsize = SHA1_DIGEST_SIZE,
4156 			.setkey = chcr_authenc_setkey,
4157 			.setauthsize = chcr_authenc_setauthsize,
4158 		}
4159 	},
4160 	{
4161 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4162 		.is_registered = 0,
4163 		.alg.aead = {
4164 			.base = {
4165 
4166 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4167 				.cra_driver_name =
4168 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4169 				.cra_blocksize	 = 1,
4170 				.cra_priority = CHCR_AEAD_PRIORITY,
4171 				.cra_ctxsize =	sizeof(struct chcr_context) +
4172 						sizeof(struct chcr_aead_ctx) +
4173 						sizeof(struct chcr_authenc_ctx),
4174 
4175 			},
4176 			.ivsize = CTR_RFC3686_IV_SIZE,
4177 			.maxauthsize	= SHA256_DIGEST_SIZE,
4178 			.setkey = chcr_authenc_setkey,
4179 			.setauthsize = chcr_authenc_setauthsize,
4180 		}
4181 	},
4182 	{
4183 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4184 		.is_registered = 0,
4185 		.alg.aead = {
4186 			.base = {
4187 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4188 				.cra_driver_name =
4189 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4190 				.cra_blocksize	 = 1,
4191 				.cra_priority = CHCR_AEAD_PRIORITY,
4192 				.cra_ctxsize =	sizeof(struct chcr_context) +
4193 						sizeof(struct chcr_aead_ctx) +
4194 						sizeof(struct chcr_authenc_ctx),
4195 			},
4196 			.ivsize = CTR_RFC3686_IV_SIZE,
4197 			.maxauthsize = SHA224_DIGEST_SIZE,
4198 			.setkey = chcr_authenc_setkey,
4199 			.setauthsize = chcr_authenc_setauthsize,
4200 		}
4201 	},
4202 	{
4203 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4204 		.is_registered = 0,
4205 		.alg.aead = {
4206 			.base = {
4207 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4208 				.cra_driver_name =
4209 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4210 				.cra_blocksize	 = 1,
4211 				.cra_priority = CHCR_AEAD_PRIORITY,
4212 				.cra_ctxsize =	sizeof(struct chcr_context) +
4213 						sizeof(struct chcr_aead_ctx) +
4214 						sizeof(struct chcr_authenc_ctx),
4215 
4216 			},
4217 			.ivsize = CTR_RFC3686_IV_SIZE,
4218 			.maxauthsize = SHA384_DIGEST_SIZE,
4219 			.setkey = chcr_authenc_setkey,
4220 			.setauthsize = chcr_authenc_setauthsize,
4221 		}
4222 	},
4223 	{
4224 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4225 		.is_registered = 0,
4226 		.alg.aead = {
4227 			.base = {
4228 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4229 				.cra_driver_name =
4230 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4231 				.cra_blocksize	 = 1,
4232 				.cra_priority = CHCR_AEAD_PRIORITY,
4233 				.cra_ctxsize =	sizeof(struct chcr_context) +
4234 						sizeof(struct chcr_aead_ctx) +
4235 						sizeof(struct chcr_authenc_ctx),
4236 
4237 			},
4238 			.ivsize = CTR_RFC3686_IV_SIZE,
4239 			.maxauthsize = SHA512_DIGEST_SIZE,
4240 			.setkey = chcr_authenc_setkey,
4241 			.setauthsize = chcr_authenc_setauthsize,
4242 		}
4243 	},
4244 	{
4245 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4246 		.is_registered = 0,
4247 		.alg.aead = {
4248 			.base = {
4249 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4250 				.cra_driver_name =
4251 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4252 				.cra_blocksize	 = 1,
4253 				.cra_priority = CHCR_AEAD_PRIORITY,
4254 				.cra_ctxsize =	sizeof(struct chcr_context) +
4255 						sizeof(struct chcr_aead_ctx) +
4256 						sizeof(struct chcr_authenc_ctx),
4257 
4258 			},
4259 			.ivsize  = CTR_RFC3686_IV_SIZE,
4260 			.maxauthsize = 0,
4261 			.setkey  = chcr_aead_digest_null_setkey,
4262 			.setauthsize = chcr_authenc_null_setauthsize,
4263 		}
4264 	},
4265 };
4266 
4267 /*
4268  *	chcr_unregister_alg - Deregister crypto algorithms with
4269  *	kernel framework.
4270  */
4271 static int chcr_unregister_alg(void)
4272 {
4273 	int i;
4274 
4275 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4276 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4277 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4278 			if (driver_algs[i].is_registered)
4279 				crypto_unregister_alg(
4280 						&driver_algs[i].alg.crypto);
4281 			break;
4282 		case CRYPTO_ALG_TYPE_AEAD:
4283 			if (driver_algs[i].is_registered)
4284 				crypto_unregister_aead(
4285 						&driver_algs[i].alg.aead);
4286 			break;
4287 		case CRYPTO_ALG_TYPE_AHASH:
4288 			if (driver_algs[i].is_registered)
4289 				crypto_unregister_ahash(
4290 						&driver_algs[i].alg.hash);
4291 			break;
4292 		}
4293 		driver_algs[i].is_registered = 0;
4294 	}
4295 	return 0;
4296 }
4297 
4298 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4299 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4300 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4301 
4302 /*
4303  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4304  */
4305 static int chcr_register_alg(void)
4306 {
4307 	struct crypto_alg ai;
4308 	struct ahash_alg *a_hash;
4309 	int err = 0, i;
4310 	char *name = NULL;
4311 
4312 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4313 		if (driver_algs[i].is_registered)
4314 			continue;
4315 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4316 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4317 			driver_algs[i].alg.crypto.cra_priority =
4318 				CHCR_CRA_PRIORITY;
4319 			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4320 			driver_algs[i].alg.crypto.cra_flags =
4321 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4322 				CRYPTO_ALG_NEED_FALLBACK;
4323 			driver_algs[i].alg.crypto.cra_ctxsize =
4324 				sizeof(struct chcr_context) +
4325 				sizeof(struct ablk_ctx);
4326 			driver_algs[i].alg.crypto.cra_alignmask = 0;
4327 			driver_algs[i].alg.crypto.cra_type =
4328 				&crypto_ablkcipher_type;
4329 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
4330 			name = driver_algs[i].alg.crypto.cra_driver_name;
4331 			break;
4332 		case CRYPTO_ALG_TYPE_AEAD:
4333 			driver_algs[i].alg.aead.base.cra_flags =
4334 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4335 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4336 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4337 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4338 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4339 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4340 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4341 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4342 			break;
4343 		case CRYPTO_ALG_TYPE_AHASH:
4344 			a_hash = &driver_algs[i].alg.hash;
4345 			a_hash->update = chcr_ahash_update;
4346 			a_hash->final = chcr_ahash_final;
4347 			a_hash->finup = chcr_ahash_finup;
4348 			a_hash->digest = chcr_ahash_digest;
4349 			a_hash->export = chcr_ahash_export;
4350 			a_hash->import = chcr_ahash_import;
4351 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4352 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4353 			a_hash->halg.base.cra_module = THIS_MODULE;
4354 			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4355 			a_hash->halg.base.cra_alignmask = 0;
4356 			a_hash->halg.base.cra_exit = NULL;
4357 
4358 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4359 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4360 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4361 				a_hash->init = chcr_hmac_init;
4362 				a_hash->setkey = chcr_ahash_setkey;
4363 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4364 			} else {
4365 				a_hash->init = chcr_sha_init;
4366 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4367 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4368 			}
4369 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4370 			ai = driver_algs[i].alg.hash.halg.base;
4371 			name = ai.cra_driver_name;
4372 			break;
4373 		}
4374 		if (err) {
4375 			pr_err("chcr : %s : Algorithm registration failed\n",
4376 			       name);
4377 			goto register_err;
4378 		} else {
4379 			driver_algs[i].is_registered = 1;
4380 		}
4381 	}
4382 	return 0;
4383 
4384 register_err:
4385 	chcr_unregister_alg();
4386 	return err;
4387 }
4388 
4389 /*
4390  *	start_crypto - Register the crypto algorithms.
4391  *	This should called once when the first device comesup. After this
4392  *	kernel will start calling driver APIs for crypto operations.
4393  */
4394 int start_crypto(void)
4395 {
4396 	return chcr_register_alg();
4397 }
4398 
4399 /*
4400  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4401  *	This should be called once when the last device goes down. After this
4402  *	kernel will not call the driver API for crypto operations.
4403  */
4404 int stop_crypto(void)
4405 {
4406 	chcr_unregister_alg();
4407 	return 0;
4408 }
4409