1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/internal/aead.h>
59 #include <crypto/null.h>
60 #include <crypto/internal/skcipher.h>
61 #include <crypto/aead.h>
62 #include <crypto/scatterwalk.h>
63 #include <crypto/internal/hash.h>
64 
65 #include "t4fw_api.h"
66 #include "t4_msg.h"
67 #include "chcr_core.h"
68 #include "chcr_algo.h"
69 #include "chcr_crypto.h"
70 
71 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
72 {
73 	return ctx->crypto_ctx->aeadctx;
74 }
75 
76 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
77 {
78 	return ctx->crypto_ctx->ablkctx;
79 }
80 
81 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
82 {
83 	return ctx->crypto_ctx->hmacctx;
84 }
85 
86 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
87 {
88 	return gctx->ctx->gcm;
89 }
90 
91 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
92 {
93 	return gctx->ctx->authenc;
94 }
95 
96 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
97 {
98 	return ctx->dev->u_ctx;
99 }
100 
101 static inline int is_ofld_imm(const struct sk_buff *skb)
102 {
103 	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
104 }
105 
106 /*
107  *	sgl_len - calculates the size of an SGL of the given capacity
108  *	@n: the number of SGL entries
109  *	Calculates the number of flits needed for a scatter/gather list that
110  *	can hold the given number of entries.
111  */
112 static inline unsigned int sgl_len(unsigned int n)
113 {
114 	n--;
115 	return (3 * n) / 2 + (n & 1) + 2;
116 }
117 
118 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
119 {
120 	u8 temp[SHA512_DIGEST_SIZE];
121 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
122 	int authsize = crypto_aead_authsize(tfm);
123 	struct cpl_fw6_pld *fw6_pld;
124 	int cmp = 0;
125 
126 	fw6_pld = (struct cpl_fw6_pld *)input;
127 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
128 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
129 		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
130 	} else {
131 
132 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
133 				authsize, req->assoclen +
134 				req->cryptlen - authsize);
135 		cmp = memcmp(temp, (fw6_pld + 1), authsize);
136 	}
137 	if (cmp)
138 		*err = -EBADMSG;
139 	else
140 		*err = 0;
141 }
142 
143 /*
144  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
145  *	@req: crypto request
146  */
147 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
148 			 int err)
149 {
150 	struct crypto_tfm *tfm = req->tfm;
151 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
152 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
153 	struct chcr_req_ctx ctx_req;
154 	struct cpl_fw6_pld *fw6_pld;
155 	unsigned int digestsize, updated_digestsize;
156 
157 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
158 	case CRYPTO_ALG_TYPE_AEAD:
159 		ctx_req.req.aead_req = (struct aead_request *)req;
160 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
161 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
162 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
163 		if (ctx_req.ctx.reqctx->skb) {
164 			kfree_skb(ctx_req.ctx.reqctx->skb);
165 			ctx_req.ctx.reqctx->skb = NULL;
166 		}
167 		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
168 			chcr_verify_tag(ctx_req.req.aead_req, input,
169 					&err);
170 			ctx_req.ctx.reqctx->verify = VERIFY_HW;
171 		}
172 		break;
173 
174 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
175 		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
176 		ctx_req.ctx.ablk_ctx =
177 			ablkcipher_request_ctx(ctx_req.req.ablk_req);
178 		if (!err) {
179 			fw6_pld = (struct cpl_fw6_pld *)input;
180 			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
181 			       AES_BLOCK_SIZE);
182 		}
183 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
184 			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
185 		if (ctx_req.ctx.ablk_ctx->skb) {
186 			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
187 			ctx_req.ctx.ablk_ctx->skb = NULL;
188 		}
189 		break;
190 
191 	case CRYPTO_ALG_TYPE_AHASH:
192 		ctx_req.req.ahash_req = (struct ahash_request *)req;
193 		ctx_req.ctx.ahash_ctx =
194 			ahash_request_ctx(ctx_req.req.ahash_req);
195 		digestsize =
196 			crypto_ahash_digestsize(crypto_ahash_reqtfm(
197 							ctx_req.req.ahash_req));
198 		updated_digestsize = digestsize;
199 		if (digestsize == SHA224_DIGEST_SIZE)
200 			updated_digestsize = SHA256_DIGEST_SIZE;
201 		else if (digestsize == SHA384_DIGEST_SIZE)
202 			updated_digestsize = SHA512_DIGEST_SIZE;
203 		if (ctx_req.ctx.ahash_ctx->skb) {
204 			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
205 			ctx_req.ctx.ahash_ctx->skb = NULL;
206 		}
207 		if (ctx_req.ctx.ahash_ctx->result == 1) {
208 			ctx_req.ctx.ahash_ctx->result = 0;
209 			memcpy(ctx_req.req.ahash_req->result, input +
210 			       sizeof(struct cpl_fw6_pld),
211 			       digestsize);
212 		} else {
213 			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
214 			       sizeof(struct cpl_fw6_pld),
215 			       updated_digestsize);
216 		}
217 		break;
218 	}
219 	return err;
220 }
221 
222 /*
223  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
224  *	@skb: the packet
225  *	Returns the number of flits needed for the given offload packet.
226  *	These packets are already fully constructed and no additional headers
227  *	will be added.
228  */
229 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
230 {
231 	unsigned int flits, cnt;
232 
233 	if (is_ofld_imm(skb))
234 		return DIV_ROUND_UP(skb->len, 8);
235 
236 	flits = skb_transport_offset(skb) / 8;   /* headers */
237 	cnt = skb_shinfo(skb)->nr_frags;
238 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
239 		cnt++;
240 	return flits + sgl_len(cnt);
241 }
242 
243 static inline void get_aes_decrypt_key(unsigned char *dec_key,
244 				       const unsigned char *key,
245 				       unsigned int keylength)
246 {
247 	u32 temp;
248 	u32 w_ring[MAX_NK];
249 	int i, j, k;
250 	u8  nr, nk;
251 
252 	switch (keylength) {
253 	case AES_KEYLENGTH_128BIT:
254 		nk = KEYLENGTH_4BYTES;
255 		nr = NUMBER_OF_ROUNDS_10;
256 		break;
257 	case AES_KEYLENGTH_192BIT:
258 		nk = KEYLENGTH_6BYTES;
259 		nr = NUMBER_OF_ROUNDS_12;
260 		break;
261 	case AES_KEYLENGTH_256BIT:
262 		nk = KEYLENGTH_8BYTES;
263 		nr = NUMBER_OF_ROUNDS_14;
264 		break;
265 	default:
266 		return;
267 	}
268 	for (i = 0; i < nk; i++)
269 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
270 
271 	i = 0;
272 	temp = w_ring[nk - 1];
273 	while (i + nk < (nr + 1) * 4) {
274 		if (!(i % nk)) {
275 			/* RotWord(temp) */
276 			temp = (temp << 8) | (temp >> 24);
277 			temp = aes_ks_subword(temp);
278 			temp ^= round_constant[i / nk];
279 		} else if (nk == 8 && (i % 4 == 0)) {
280 			temp = aes_ks_subword(temp);
281 		}
282 		w_ring[i % nk] ^= temp;
283 		temp = w_ring[i % nk];
284 		i++;
285 	}
286 	i--;
287 	for (k = 0, j = i % nk; k < nk; k++) {
288 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
289 		j--;
290 		if (j < 0)
291 			j += nk;
292 	}
293 }
294 
295 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
296 {
297 	struct crypto_shash *base_hash = NULL;
298 
299 	switch (ds) {
300 	case SHA1_DIGEST_SIZE:
301 		base_hash = crypto_alloc_shash("sha1", 0, 0);
302 		break;
303 	case SHA224_DIGEST_SIZE:
304 		base_hash = crypto_alloc_shash("sha224", 0, 0);
305 		break;
306 	case SHA256_DIGEST_SIZE:
307 		base_hash = crypto_alloc_shash("sha256", 0, 0);
308 		break;
309 	case SHA384_DIGEST_SIZE:
310 		base_hash = crypto_alloc_shash("sha384", 0, 0);
311 		break;
312 	case SHA512_DIGEST_SIZE:
313 		base_hash = crypto_alloc_shash("sha512", 0, 0);
314 		break;
315 	}
316 
317 	return base_hash;
318 }
319 
320 static int chcr_compute_partial_hash(struct shash_desc *desc,
321 				     char *iopad, char *result_hash,
322 				     int digest_size)
323 {
324 	struct sha1_state sha1_st;
325 	struct sha256_state sha256_st;
326 	struct sha512_state sha512_st;
327 	int error;
328 
329 	if (digest_size == SHA1_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha1_st);
333 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
334 	} else if (digest_size == SHA224_DIGEST_SIZE) {
335 		error = crypto_shash_init(desc) ?:
336 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
337 			crypto_shash_export(desc, (void *)&sha256_st);
338 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
339 
340 	} else if (digest_size == SHA256_DIGEST_SIZE) {
341 		error = crypto_shash_init(desc) ?:
342 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
343 			crypto_shash_export(desc, (void *)&sha256_st);
344 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
345 
346 	} else if (digest_size == SHA384_DIGEST_SIZE) {
347 		error = crypto_shash_init(desc) ?:
348 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
349 			crypto_shash_export(desc, (void *)&sha512_st);
350 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
351 
352 	} else if (digest_size == SHA512_DIGEST_SIZE) {
353 		error = crypto_shash_init(desc) ?:
354 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
355 			crypto_shash_export(desc, (void *)&sha512_st);
356 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
357 	} else {
358 		error = -EINVAL;
359 		pr_err("Unknown digest size %d\n", digest_size);
360 	}
361 	return error;
362 }
363 
364 static void chcr_change_order(char *buf, int ds)
365 {
366 	int i;
367 
368 	if (ds == SHA512_DIGEST_SIZE) {
369 		for (i = 0; i < (ds / sizeof(u64)); i++)
370 			*((__be64 *)buf + i) =
371 				cpu_to_be64(*((u64 *)buf + i));
372 	} else {
373 		for (i = 0; i < (ds / sizeof(u32)); i++)
374 			*((__be32 *)buf + i) =
375 				cpu_to_be32(*((u32 *)buf + i));
376 	}
377 }
378 
379 static inline int is_hmac(struct crypto_tfm *tfm)
380 {
381 	struct crypto_alg *alg = tfm->__crt_alg;
382 	struct chcr_alg_template *chcr_crypto_alg =
383 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
384 			     alg.hash);
385 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
386 		return 1;
387 	return 0;
388 }
389 
390 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
391 			   struct scatterlist *sg,
392 			   struct phys_sge_parm *sg_param)
393 {
394 	struct phys_sge_pairs *to;
395 	int out_buf_size = sg_param->obsize;
396 	unsigned int nents = sg_param->nents, i, j = 0;
397 
398 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
399 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
400 	phys_cpl->pcirlxorder_to_noofsgentr =
401 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
402 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
403 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
404 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
405 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
406 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
407 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
408 	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
409 	phys_cpl->rss_hdr_int.hash_val = 0;
410 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
411 				       sizeof(struct cpl_rx_phys_dsgl));
412 
413 	for (i = 0; nents; to++) {
414 		for (j = 0; j < 8 && nents; j++, nents--) {
415 			out_buf_size -= sg_dma_len(sg);
416 			to->len[j] = htons(sg_dma_len(sg));
417 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
418 			sg = sg_next(sg);
419 		}
420 	}
421 	if (out_buf_size) {
422 		j--;
423 		to--;
424 		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
425 	}
426 }
427 
428 static inline int map_writesg_phys_cpl(struct device *dev,
429 					struct cpl_rx_phys_dsgl *phys_cpl,
430 					struct scatterlist *sg,
431 					struct phys_sge_parm *sg_param)
432 {
433 	if (!sg || !sg_param->nents)
434 		return 0;
435 
436 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
437 	if (sg_param->nents == 0) {
438 		pr_err("CHCR : DMA mapping failed\n");
439 		return -EINVAL;
440 	}
441 	write_phys_cpl(phys_cpl, sg, sg_param);
442 	return 0;
443 }
444 
445 static inline int get_aead_subtype(struct crypto_aead *aead)
446 {
447 	struct aead_alg *alg = crypto_aead_alg(aead);
448 	struct chcr_alg_template *chcr_crypto_alg =
449 		container_of(alg, struct chcr_alg_template, alg.aead);
450 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
451 }
452 
453 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
454 {
455 	struct crypto_alg *alg = tfm->__crt_alg;
456 	struct chcr_alg_template *chcr_crypto_alg =
457 		container_of(alg, struct chcr_alg_template, alg.crypto);
458 
459 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
460 }
461 
462 static inline void write_buffer_to_skb(struct sk_buff *skb,
463 					unsigned int *frags,
464 					char *bfr,
465 					u8 bfr_len)
466 {
467 	skb->len += bfr_len;
468 	skb->data_len += bfr_len;
469 	skb->truesize += bfr_len;
470 	get_page(virt_to_page(bfr));
471 	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
472 			   offset_in_page(bfr), bfr_len);
473 	(*frags)++;
474 }
475 
476 
477 static inline void
478 write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
479 			struct scatterlist *sg, unsigned int count)
480 {
481 	struct page *spage;
482 	unsigned int page_len;
483 
484 	skb->len += count;
485 	skb->data_len += count;
486 	skb->truesize += count;
487 
488 	while (count > 0) {
489 		if (!sg || (!(sg->length)))
490 			break;
491 		spage = sg_page(sg);
492 		get_page(spage);
493 		page_len = min(sg->length, count);
494 		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
495 		(*frags)++;
496 		count -= page_len;
497 		sg = sg_next(sg);
498 	}
499 }
500 
501 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
502 			       struct _key_ctx *key_ctx)
503 {
504 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
505 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
506 	} else {
507 		memcpy(key_ctx->key,
508 		       ablkctx->key + (ablkctx->enckey_len >> 1),
509 		       ablkctx->enckey_len >> 1);
510 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
511 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
512 	}
513 	return 0;
514 }
515 
516 static inline void create_wreq(struct chcr_context *ctx,
517 			       struct chcr_wr *chcr_req,
518 			       void *req, struct sk_buff *skb,
519 			       int kctx_len, int hash_sz,
520 			       int is_iv,
521 			       unsigned int sc_len)
522 {
523 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
524 	int iv_loc = IV_DSGL;
525 	int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
526 	unsigned int immdatalen = 0, nr_frags = 0;
527 
528 	if (is_ofld_imm(skb)) {
529 		immdatalen = skb->data_len;
530 		iv_loc = IV_IMMEDIATE;
531 	} else {
532 		nr_frags = skb_shinfo(skb)->nr_frags;
533 	}
534 
535 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
536 				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
537 	chcr_req->wreq.pld_size_hash_size =
538 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
539 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
540 	chcr_req->wreq.len16_pkd =
541 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
542 				    (calc_tx_flits_ofld(skb) * 8), 16)));
543 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
544 	chcr_req->wreq.rx_chid_to_rx_q_id =
545 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
546 				is_iv ? iv_loc : IV_NOP, ctx->tx_channel_id);
547 
548 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
549 						       qid);
550 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
551 					16) - ((sizeof(chcr_req->wreq)) >> 4)));
552 
553 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
554 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
555 				   sizeof(chcr_req->key_ctx) +
556 				   kctx_len + sc_len + immdatalen);
557 }
558 
559 /**
560  *	create_cipher_wr - form the WR for cipher operations
561  *	@req: cipher req.
562  *	@ctx: crypto driver context of the request.
563  *	@qid: ingress qid where response of this WR should be received.
564  *	@op_type:	encryption or decryption
565  */
566 static struct sk_buff
567 *create_cipher_wr(struct ablkcipher_request *req,
568 		  unsigned short qid,
569 		  unsigned short op_type)
570 {
571 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
572 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
573 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
574 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
575 	struct sk_buff *skb = NULL;
576 	struct chcr_wr *chcr_req;
577 	struct cpl_rx_phys_dsgl *phys_cpl;
578 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
579 	struct phys_sge_parm sg_param;
580 	unsigned int frags = 0, transhdr_len, phys_dsgl;
581 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
582 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
583 			GFP_ATOMIC;
584 
585 	if (!req->info)
586 		return ERR_PTR(-EINVAL);
587 	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
588 	if (reqctx->dst_nents <= 0) {
589 		pr_err("AES:Invalid Destination sg lists\n");
590 		return ERR_PTR(-EINVAL);
591 	}
592 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
593 	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
594 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
595 		       ablkctx->enckey_len, req->nbytes, ivsize);
596 		return ERR_PTR(-EINVAL);
597 	}
598 
599 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
600 
601 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
602 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
603 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
604 	if (!skb)
605 		return ERR_PTR(-ENOMEM);
606 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
607 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
608 	memset(chcr_req, 0, transhdr_len);
609 	chcr_req->sec_cpl.op_ivinsrtofst =
610 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
611 
612 	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
613 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
614 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
615 
616 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
617 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
618 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
619 							 ablkctx->ciph_mode,
620 							 0, 0, ivsize >> 1);
621 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
622 							  0, 1, phys_dsgl);
623 
624 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
625 	if (op_type == CHCR_DECRYPT_OP) {
626 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
627 	} else {
628 		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
629 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
630 			       ablkctx->enckey_len);
631 		} else {
632 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
633 			       (ablkctx->enckey_len >> 1),
634 			       ablkctx->enckey_len >> 1);
635 			memcpy(chcr_req->key_ctx.key +
636 			       (ablkctx->enckey_len >> 1),
637 			       ablkctx->key,
638 			       ablkctx->enckey_len >> 1);
639 		}
640 	}
641 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
642 	sg_param.nents = reqctx->dst_nents;
643 	sg_param.obsize = req->nbytes;
644 	sg_param.qid = qid;
645 	sg_param.align = 1;
646 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
647 				 &sg_param))
648 		goto map_fail1;
649 
650 	skb_set_transport_header(skb, transhdr_len);
651 	memcpy(reqctx->iv, req->info, ivsize);
652 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
653 	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
654 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
655 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
656 	reqctx->skb = skb;
657 	skb_get(skb);
658 	return skb;
659 map_fail1:
660 	kfree_skb(skb);
661 	return ERR_PTR(-ENOMEM);
662 }
663 
664 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
665 			       unsigned int keylen)
666 {
667 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
668 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
669 	unsigned int ck_size, context_size;
670 	u16 alignment = 0;
671 
672 	if (keylen == AES_KEYSIZE_128) {
673 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
674 	} else if (keylen == AES_KEYSIZE_192) {
675 		alignment = 8;
676 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
677 	} else if (keylen == AES_KEYSIZE_256) {
678 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
679 	} else {
680 		goto badkey_err;
681 	}
682 	memcpy(ablkctx->key, key, keylen);
683 	ablkctx->enckey_len = keylen;
684 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
685 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
686 			keylen + alignment) >> 4;
687 
688 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
689 						0, 0, context_size);
690 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
691 	return 0;
692 badkey_err:
693 	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
694 	ablkctx->enckey_len = 0;
695 	return -EINVAL;
696 }
697 
698 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
699 {
700 	struct adapter *adap = netdev2adap(dev);
701 	struct sge_uld_txq_info *txq_info =
702 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
703 	struct sge_uld_txq *txq;
704 	int ret = 0;
705 
706 	local_bh_disable();
707 	txq = &txq_info->uldtxq[idx];
708 	spin_lock(&txq->sendq.lock);
709 	if (txq->full)
710 		ret = -1;
711 	spin_unlock(&txq->sendq.lock);
712 	local_bh_enable();
713 	return ret;
714 }
715 
716 static int chcr_aes_encrypt(struct ablkcipher_request *req)
717 {
718 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
719 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
720 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
721 	struct sk_buff *skb;
722 
723 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
724 					    ctx->tx_channel_id))) {
725 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
726 			return -EBUSY;
727 	}
728 
729 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
730 			       CHCR_ENCRYPT_OP);
731 	if (IS_ERR(skb)) {
732 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
733 		return  PTR_ERR(skb);
734 	}
735 	skb->dev = u_ctx->lldi.ports[0];
736 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
737 	chcr_send_wr(skb);
738 	return -EINPROGRESS;
739 }
740 
741 static int chcr_aes_decrypt(struct ablkcipher_request *req)
742 {
743 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
744 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
745 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
746 	struct sk_buff *skb;
747 
748 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
749 					    ctx->tx_channel_id))) {
750 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
751 			return -EBUSY;
752 	}
753 
754 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
755 			       CHCR_DECRYPT_OP);
756 	if (IS_ERR(skb)) {
757 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
758 		return PTR_ERR(skb);
759 	}
760 	skb->dev = u_ctx->lldi.ports[0];
761 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
762 	chcr_send_wr(skb);
763 	return -EINPROGRESS;
764 }
765 
766 static int chcr_device_init(struct chcr_context *ctx)
767 {
768 	struct uld_ctx *u_ctx;
769 	unsigned int id;
770 	int err = 0, rxq_perchan, rxq_idx;
771 
772 	id = smp_processor_id();
773 	if (!ctx->dev) {
774 		err = assign_chcr_device(&ctx->dev);
775 		if (err) {
776 			pr_err("chcr device assignment fails\n");
777 			goto out;
778 		}
779 		u_ctx = ULD_CTX(ctx);
780 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
781 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
782 		rxq_idx += id % rxq_perchan;
783 		spin_lock(&ctx->dev->lock_chcr_dev);
784 		ctx->tx_channel_id = rxq_idx;
785 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
786 		ctx->dev->rx_channel_id = 0;
787 		spin_unlock(&ctx->dev->lock_chcr_dev);
788 	}
789 out:
790 	return err;
791 }
792 
793 static int chcr_cra_init(struct crypto_tfm *tfm)
794 {
795 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
796 	return chcr_device_init(crypto_tfm_ctx(tfm));
797 }
798 
799 static int get_alg_config(struct algo_param *params,
800 			  unsigned int auth_size)
801 {
802 	switch (auth_size) {
803 	case SHA1_DIGEST_SIZE:
804 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
805 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
806 		params->result_size = SHA1_DIGEST_SIZE;
807 		break;
808 	case SHA224_DIGEST_SIZE:
809 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
810 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
811 		params->result_size = SHA256_DIGEST_SIZE;
812 		break;
813 	case SHA256_DIGEST_SIZE:
814 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
815 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
816 		params->result_size = SHA256_DIGEST_SIZE;
817 		break;
818 	case SHA384_DIGEST_SIZE:
819 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
820 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
821 		params->result_size = SHA512_DIGEST_SIZE;
822 		break;
823 	case SHA512_DIGEST_SIZE:
824 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
825 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
826 		params->result_size = SHA512_DIGEST_SIZE;
827 		break;
828 	default:
829 		pr_err("chcr : ERROR, unsupported digest size\n");
830 		return -EINVAL;
831 	}
832 	return 0;
833 }
834 
835 static inline void chcr_free_shash(struct crypto_shash *base_hash)
836 {
837 		crypto_free_shash(base_hash);
838 }
839 
840 /**
841  *	create_hash_wr - Create hash work request
842  *	@req - Cipher req base
843  */
844 static struct sk_buff *create_hash_wr(struct ahash_request *req,
845 				      struct hash_wr_param *param)
846 {
847 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
848 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
849 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
850 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
851 	struct sk_buff *skb = NULL;
852 	struct chcr_wr *chcr_req;
853 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
854 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
855 	unsigned int kctx_len = 0;
856 	u8 hash_size_in_response = 0;
857 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
858 		GFP_ATOMIC;
859 
860 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
861 	kctx_len = param->alg_prm.result_size + iopad_alignment;
862 	if (param->opad_needed)
863 		kctx_len += param->alg_prm.result_size + iopad_alignment;
864 
865 	if (req_ctx->result)
866 		hash_size_in_response = digestsize;
867 	else
868 		hash_size_in_response = param->alg_prm.result_size;
869 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
870 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
871 	if (!skb)
872 		return skb;
873 
874 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
875 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
876 	memset(chcr_req, 0, transhdr_len);
877 
878 	chcr_req->sec_cpl.op_ivinsrtofst =
879 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
880 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
881 
882 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
883 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
884 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
885 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
886 	chcr_req->sec_cpl.seqno_numivs =
887 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
888 					 param->opad_needed, 0);
889 
890 	chcr_req->sec_cpl.ivgen_hdrlen =
891 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
892 
893 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
894 	       param->alg_prm.result_size);
895 
896 	if (param->opad_needed)
897 		memcpy(chcr_req->key_ctx.key +
898 		       ((param->alg_prm.result_size <= 32) ? 32 :
899 			CHCR_HASH_MAX_DIGEST_SIZE),
900 		       hmacctx->opad, param->alg_prm.result_size);
901 
902 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
903 					    param->alg_prm.mk_size, 0,
904 					    param->opad_needed,
905 					    ((kctx_len +
906 					     sizeof(chcr_req->key_ctx)) >> 4));
907 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
908 
909 	skb_set_transport_header(skb, transhdr_len);
910 	if (param->bfr_len != 0)
911 		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
912 				    param->bfr_len);
913 	if (param->sg_len != 0)
914 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
915 
916 	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
917 			DUMMY_BYTES);
918 	req_ctx->skb = skb;
919 	skb_get(skb);
920 	return skb;
921 }
922 
923 static int chcr_ahash_update(struct ahash_request *req)
924 {
925 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
926 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
927 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
928 	struct uld_ctx *u_ctx = NULL;
929 	struct sk_buff *skb;
930 	u8 remainder = 0, bs;
931 	unsigned int nbytes = req->nbytes;
932 	struct hash_wr_param params;
933 
934 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
935 
936 	u_ctx = ULD_CTX(ctx);
937 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
938 					    ctx->tx_channel_id))) {
939 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
940 			return -EBUSY;
941 	}
942 
943 	if (nbytes + req_ctx->reqlen >= bs) {
944 		remainder = (nbytes + req_ctx->reqlen) % bs;
945 		nbytes = nbytes + req_ctx->reqlen - remainder;
946 	} else {
947 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
948 				   + req_ctx->reqlen, nbytes, 0);
949 		req_ctx->reqlen += nbytes;
950 		return 0;
951 	}
952 
953 	params.opad_needed = 0;
954 	params.more = 1;
955 	params.last = 0;
956 	params.sg_len = nbytes - req_ctx->reqlen;
957 	params.bfr_len = req_ctx->reqlen;
958 	params.scmd1 = 0;
959 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
960 	req_ctx->result = 0;
961 	req_ctx->data_len += params.sg_len + params.bfr_len;
962 	skb = create_hash_wr(req, &params);
963 	if (!skb)
964 		return -ENOMEM;
965 
966 	if (remainder) {
967 		u8 *temp;
968 		/* Swap buffers */
969 		temp = req_ctx->reqbfr;
970 		req_ctx->reqbfr = req_ctx->skbfr;
971 		req_ctx->skbfr = temp;
972 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
973 				   req_ctx->reqbfr, remainder, req->nbytes -
974 				   remainder);
975 	}
976 	req_ctx->reqlen = remainder;
977 	skb->dev = u_ctx->lldi.ports[0];
978 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
979 	chcr_send_wr(skb);
980 
981 	return -EINPROGRESS;
982 }
983 
984 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
985 {
986 	memset(bfr_ptr, 0, bs);
987 	*bfr_ptr = 0x80;
988 	if (bs == 64)
989 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
990 	else
991 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
992 }
993 
994 static int chcr_ahash_final(struct ahash_request *req)
995 {
996 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
997 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
998 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
999 	struct hash_wr_param params;
1000 	struct sk_buff *skb;
1001 	struct uld_ctx *u_ctx = NULL;
1002 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1003 
1004 	u_ctx = ULD_CTX(ctx);
1005 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1006 		params.opad_needed = 1;
1007 	else
1008 		params.opad_needed = 0;
1009 	params.sg_len = 0;
1010 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1011 	req_ctx->result = 1;
1012 	params.bfr_len = req_ctx->reqlen;
1013 	req_ctx->data_len += params.bfr_len + params.sg_len;
1014 	if (req_ctx->reqlen == 0) {
1015 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1016 		params.last = 0;
1017 		params.more = 1;
1018 		params.scmd1 = 0;
1019 		params.bfr_len = bs;
1020 
1021 	} else {
1022 		params.scmd1 = req_ctx->data_len;
1023 		params.last = 1;
1024 		params.more = 0;
1025 	}
1026 	skb = create_hash_wr(req, &params);
1027 	if (!skb)
1028 		return -ENOMEM;
1029 
1030 	skb->dev = u_ctx->lldi.ports[0];
1031 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1032 	chcr_send_wr(skb);
1033 	return -EINPROGRESS;
1034 }
1035 
1036 static int chcr_ahash_finup(struct ahash_request *req)
1037 {
1038 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1039 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1040 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1041 	struct uld_ctx *u_ctx = NULL;
1042 	struct sk_buff *skb;
1043 	struct hash_wr_param params;
1044 	u8  bs;
1045 
1046 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1047 	u_ctx = ULD_CTX(ctx);
1048 
1049 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1050 					    ctx->tx_channel_id))) {
1051 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1052 			return -EBUSY;
1053 	}
1054 
1055 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1056 		params.opad_needed = 1;
1057 	else
1058 		params.opad_needed = 0;
1059 
1060 	params.sg_len = req->nbytes;
1061 	params.bfr_len = req_ctx->reqlen;
1062 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1063 	req_ctx->data_len += params.bfr_len + params.sg_len;
1064 	req_ctx->result = 1;
1065 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1066 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1067 		params.last = 0;
1068 		params.more = 1;
1069 		params.scmd1 = 0;
1070 		params.bfr_len = bs;
1071 	} else {
1072 		params.scmd1 = req_ctx->data_len;
1073 		params.last = 1;
1074 		params.more = 0;
1075 	}
1076 
1077 	skb = create_hash_wr(req, &params);
1078 	if (!skb)
1079 		return -ENOMEM;
1080 
1081 	skb->dev = u_ctx->lldi.ports[0];
1082 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1083 	chcr_send_wr(skb);
1084 
1085 	return -EINPROGRESS;
1086 }
1087 
1088 static int chcr_ahash_digest(struct ahash_request *req)
1089 {
1090 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1091 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1092 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1093 	struct uld_ctx *u_ctx = NULL;
1094 	struct sk_buff *skb;
1095 	struct hash_wr_param params;
1096 	u8  bs;
1097 
1098 	rtfm->init(req);
1099 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1100 
1101 	u_ctx = ULD_CTX(ctx);
1102 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1103 					    ctx->tx_channel_id))) {
1104 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1105 			return -EBUSY;
1106 	}
1107 
1108 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1109 		params.opad_needed = 1;
1110 	else
1111 		params.opad_needed = 0;
1112 
1113 	params.last = 0;
1114 	params.more = 0;
1115 	params.sg_len = req->nbytes;
1116 	params.bfr_len = 0;
1117 	params.scmd1 = 0;
1118 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1119 	req_ctx->result = 1;
1120 	req_ctx->data_len += params.bfr_len + params.sg_len;
1121 
1122 	if (req->nbytes == 0) {
1123 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1124 		params.more = 1;
1125 		params.bfr_len = bs;
1126 	}
1127 
1128 	skb = create_hash_wr(req, &params);
1129 	if (!skb)
1130 		return -ENOMEM;
1131 
1132 	skb->dev = u_ctx->lldi.ports[0];
1133 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1134 	chcr_send_wr(skb);
1135 	return -EINPROGRESS;
1136 }
1137 
1138 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1139 {
1140 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1141 	struct chcr_ahash_req_ctx *state = out;
1142 
1143 	state->reqlen = req_ctx->reqlen;
1144 	state->data_len = req_ctx->data_len;
1145 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1146 	memcpy(state->partial_hash, req_ctx->partial_hash,
1147 	       CHCR_HASH_MAX_DIGEST_SIZE);
1148 		return 0;
1149 }
1150 
1151 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1152 {
1153 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1154 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1155 
1156 	req_ctx->reqlen = state->reqlen;
1157 	req_ctx->data_len = state->data_len;
1158 	req_ctx->reqbfr = req_ctx->bfr1;
1159 	req_ctx->skbfr = req_ctx->bfr2;
1160 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1161 	memcpy(req_ctx->partial_hash, state->partial_hash,
1162 	       CHCR_HASH_MAX_DIGEST_SIZE);
1163 	return 0;
1164 }
1165 
1166 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1167 			     unsigned int keylen)
1168 {
1169 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1170 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1171 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1172 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1173 	unsigned int i, err = 0, updated_digestsize;
1174 
1175 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1176 
1177 	/* use the key to calculate the ipad and opad. ipad will sent with the
1178 	 * first request's data. opad will be sent with the final hash result
1179 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1180 	 */
1181 	shash->tfm = hmacctx->base_hash;
1182 	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1183 	if (keylen > bs) {
1184 		err = crypto_shash_digest(shash, key, keylen,
1185 					  hmacctx->ipad);
1186 		if (err)
1187 			goto out;
1188 		keylen = digestsize;
1189 	} else {
1190 		memcpy(hmacctx->ipad, key, keylen);
1191 	}
1192 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1193 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
1194 
1195 	for (i = 0; i < bs / sizeof(int); i++) {
1196 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1197 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1198 	}
1199 
1200 	updated_digestsize = digestsize;
1201 	if (digestsize == SHA224_DIGEST_SIZE)
1202 		updated_digestsize = SHA256_DIGEST_SIZE;
1203 	else if (digestsize == SHA384_DIGEST_SIZE)
1204 		updated_digestsize = SHA512_DIGEST_SIZE;
1205 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1206 					hmacctx->ipad, digestsize);
1207 	if (err)
1208 		goto out;
1209 	chcr_change_order(hmacctx->ipad, updated_digestsize);
1210 
1211 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
1212 					hmacctx->opad, digestsize);
1213 	if (err)
1214 		goto out;
1215 	chcr_change_order(hmacctx->opad, updated_digestsize);
1216 out:
1217 	return err;
1218 }
1219 
1220 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1221 			       unsigned int key_len)
1222 {
1223 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1224 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1225 	unsigned short context_size = 0;
1226 
1227 	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
1228 	    (key_len != (AES_KEYSIZE_256 << 1))) {
1229 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1230 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
1231 		ablkctx->enckey_len = 0;
1232 		return -EINVAL;
1233 
1234 	}
1235 
1236 	memcpy(ablkctx->key, key, key_len);
1237 	ablkctx->enckey_len = key_len;
1238 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1239 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1240 	ablkctx->key_ctx_hdr =
1241 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1242 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1243 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1244 				 CHCR_KEYCTX_NO_KEY, 1,
1245 				 0, context_size);
1246 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1247 	return 0;
1248 }
1249 
1250 static int chcr_sha_init(struct ahash_request *areq)
1251 {
1252 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1253 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1254 	int digestsize =  crypto_ahash_digestsize(tfm);
1255 
1256 	req_ctx->data_len = 0;
1257 	req_ctx->reqlen = 0;
1258 	req_ctx->reqbfr = req_ctx->bfr1;
1259 	req_ctx->skbfr = req_ctx->bfr2;
1260 	req_ctx->skb = NULL;
1261 	req_ctx->result = 0;
1262 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
1263 	return 0;
1264 }
1265 
1266 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1267 {
1268 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1269 				 sizeof(struct chcr_ahash_req_ctx));
1270 	return chcr_device_init(crypto_tfm_ctx(tfm));
1271 }
1272 
1273 static int chcr_hmac_init(struct ahash_request *areq)
1274 {
1275 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1276 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1277 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1278 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1279 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1280 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1281 
1282 	chcr_sha_init(areq);
1283 	req_ctx->data_len = bs;
1284 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1285 		if (digestsize == SHA224_DIGEST_SIZE)
1286 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1287 			       SHA256_DIGEST_SIZE);
1288 		else if (digestsize == SHA384_DIGEST_SIZE)
1289 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1290 			       SHA512_DIGEST_SIZE);
1291 		else
1292 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1293 			       digestsize);
1294 	}
1295 	return 0;
1296 }
1297 
1298 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1299 {
1300 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1301 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1302 	unsigned int digestsize =
1303 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1304 
1305 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1306 				 sizeof(struct chcr_ahash_req_ctx));
1307 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
1308 	if (IS_ERR(hmacctx->base_hash))
1309 		return PTR_ERR(hmacctx->base_hash);
1310 	return chcr_device_init(crypto_tfm_ctx(tfm));
1311 }
1312 
1313 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1314 {
1315 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1316 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1317 
1318 	if (hmacctx->base_hash) {
1319 		chcr_free_shash(hmacctx->base_hash);
1320 		hmacctx->base_hash = NULL;
1321 	}
1322 }
1323 
1324 static int chcr_copy_assoc(struct aead_request *req,
1325 				struct chcr_aead_ctx *ctx)
1326 {
1327 	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1328 
1329 	skcipher_request_set_tfm(skreq, ctx->null);
1330 	skcipher_request_set_callback(skreq, aead_request_flags(req),
1331 			NULL, NULL);
1332 	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1333 			NULL);
1334 
1335 	return crypto_skcipher_encrypt(skreq);
1336 }
1337 
1338 static unsigned char get_hmac(unsigned int authsize)
1339 {
1340 	switch (authsize) {
1341 	case ICV_8:
1342 		return CHCR_SCMD_HMAC_CTRL_PL1;
1343 	case ICV_10:
1344 		return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
1345 	case ICV_12:
1346 		return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
1347 	}
1348 	return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
1349 }
1350 
1351 
1352 static struct sk_buff *create_authenc_wr(struct aead_request *req,
1353 					 unsigned short qid,
1354 					 int size,
1355 					 unsigned short op_type)
1356 {
1357 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1358 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1359 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1360 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1361 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1362 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1363 	struct sk_buff *skb = NULL;
1364 	struct chcr_wr *chcr_req;
1365 	struct cpl_rx_phys_dsgl *phys_cpl;
1366 	struct phys_sge_parm sg_param;
1367 	struct scatterlist *src;
1368 	unsigned int frags = 0, transhdr_len;
1369 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1370 	unsigned int   kctx_len = 0;
1371 	unsigned short stop_offset = 0;
1372 	unsigned int  assoclen = req->assoclen;
1373 	unsigned int  authsize = crypto_aead_authsize(tfm);
1374 	int err = 0;
1375 	int null = 0;
1376 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1377 		GFP_ATOMIC;
1378 
1379 	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
1380 		goto err;
1381 
1382 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1383 		goto err;
1384 
1385 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1386 		goto err;
1387 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1388 	reqctx->dst = src;
1389 
1390 	if (req->src != req->dst) {
1391 		err = chcr_copy_assoc(req, aeadctx);
1392 		if (err)
1393 			return ERR_PTR(err);
1394 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1395 					       req->assoclen);
1396 	}
1397 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1398 		null = 1;
1399 		assoclen = 0;
1400 	}
1401 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1402 					     (op_type ? -authsize : authsize));
1403 	if (reqctx->dst_nents <= 0) {
1404 		pr_err("AUTHENC:Invalid Destination sg entries\n");
1405 		goto err;
1406 	}
1407 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1408 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
1409 		- sizeof(chcr_req->key_ctx);
1410 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1411 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1412 	if (!skb)
1413 		goto err;
1414 
1415 	/* LLD is going to write the sge hdr. */
1416 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1417 
1418 	/* Write WR */
1419 	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1420 	memset(chcr_req, 0, transhdr_len);
1421 
1422 	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1423 
1424 	/*
1425 	 * Input order	is AAD,IV and Payload. where IV should be included as
1426 	 * the part of authdata. All other fields should be filled according
1427 	 * to the hardware spec
1428 	 */
1429 	chcr_req->sec_cpl.op_ivinsrtofst =
1430 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
1431 				       (ivsize ? (assoclen + 1) : 0));
1432 	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
1433 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1434 					assoclen ? 1 : 0, assoclen,
1435 					assoclen + ivsize + 1,
1436 					(stop_offset & 0x1F0) >> 4);
1437 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
1438 					stop_offset & 0xF,
1439 					null ? 0 : assoclen + ivsize + 1,
1440 					stop_offset, stop_offset);
1441 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1442 					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
1443 					CHCR_SCMD_CIPHER_MODE_AES_CBC,
1444 					actx->auth_mode, aeadctx->hmac_ctrl,
1445 					ivsize >> 1);
1446 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1447 					 0, 1, dst_size);
1448 
1449 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1450 	if (op_type == CHCR_ENCRYPT_OP)
1451 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
1452 		       aeadctx->enckey_len);
1453 	else
1454 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
1455 		       aeadctx->enckey_len);
1456 
1457 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
1458 					4), actx->h_iopad, kctx_len -
1459 				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
1460 
1461 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1462 	sg_param.nents = reqctx->dst_nents;
1463 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1464 	sg_param.qid = qid;
1465 	sg_param.align = 0;
1466 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1467 				  &sg_param))
1468 		goto dstmap_fail;
1469 
1470 	skb_set_transport_header(skb, transhdr_len);
1471 
1472 	if (assoclen) {
1473 		/* AAD buffer in */
1474 		write_sg_to_skb(skb, &frags, req->src, assoclen);
1475 
1476 	}
1477 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
1478 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
1479 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1480 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1481 	reqctx->skb = skb;
1482 	skb_get(skb);
1483 
1484 	return skb;
1485 dstmap_fail:
1486 	/* ivmap_fail: */
1487 	kfree_skb(skb);
1488 err:
1489 	return ERR_PTR(-EINVAL);
1490 }
1491 
1492 static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
1493 				  unsigned short offset)
1494 {
1495 	struct page *spage;
1496 	unsigned char *addr;
1497 
1498 	spage = sg_page(sg);
1499 	get_page(spage); /* so that it is not freed by NIC */
1500 #ifdef KMAP_ATOMIC_ARGS
1501 	addr = kmap_atomic(spage, KM_SOFTIRQ0);
1502 #else
1503 	addr = kmap_atomic(spage);
1504 #endif
1505 	memset(addr + sg->offset, 0, offset + 1);
1506 
1507 	kunmap_atomic(addr);
1508 }
1509 
1510 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
1511 {
1512 	__be32 data;
1513 
1514 	memset(block, 0, csize);
1515 	block += csize;
1516 
1517 	if (csize >= 4)
1518 		csize = 4;
1519 	else if (msglen > (unsigned int)(1 << (8 * csize)))
1520 		return -EOVERFLOW;
1521 
1522 	data = cpu_to_be32(msglen);
1523 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1524 
1525 	return 0;
1526 }
1527 
1528 static void generate_b0(struct aead_request *req,
1529 			struct chcr_aead_ctx *aeadctx,
1530 			unsigned short op_type)
1531 {
1532 	unsigned int l, lp, m;
1533 	int rc;
1534 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1535 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1536 	u8 *b0 = reqctx->scratch_pad;
1537 
1538 	m = crypto_aead_authsize(aead);
1539 
1540 	memcpy(b0, reqctx->iv, 16);
1541 
1542 	lp = b0[0];
1543 	l = lp + 1;
1544 
1545 	/* set m, bits 3-5 */
1546 	*b0 |= (8 * ((m - 2) / 2));
1547 
1548 	/* set adata, bit 6, if associated data is used */
1549 	if (req->assoclen)
1550 		*b0 |= 64;
1551 	rc = set_msg_len(b0 + 16 - l,
1552 			 (op_type == CHCR_DECRYPT_OP) ?
1553 			 req->cryptlen - m : req->cryptlen, l);
1554 }
1555 
1556 static inline int crypto_ccm_check_iv(const u8 *iv)
1557 {
1558 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1559 	if (iv[0] < 1 || iv[0] > 7)
1560 		return -EINVAL;
1561 
1562 	return 0;
1563 }
1564 
1565 static int ccm_format_packet(struct aead_request *req,
1566 			     struct chcr_aead_ctx *aeadctx,
1567 			     unsigned int sub_type,
1568 			     unsigned short op_type)
1569 {
1570 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1571 	int rc = 0;
1572 
1573 	if (req->assoclen > T5_MAX_AAD_SIZE) {
1574 		pr_err("CCM: Unsupported AAD data. It should be < %d\n",
1575 		       T5_MAX_AAD_SIZE);
1576 		return -EINVAL;
1577 	}
1578 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1579 		reqctx->iv[0] = 3;
1580 		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
1581 		memcpy(reqctx->iv + 4, req->iv, 8);
1582 		memset(reqctx->iv + 12, 0, 4);
1583 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1584 			htons(req->assoclen - 8);
1585 	} else {
1586 		memcpy(reqctx->iv, req->iv, 16);
1587 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1588 			htons(req->assoclen);
1589 	}
1590 	generate_b0(req, aeadctx, op_type);
1591 	/* zero the ctr value */
1592 	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
1593 	return rc;
1594 }
1595 
1596 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
1597 				  unsigned int dst_size,
1598 				  struct aead_request *req,
1599 				  unsigned short op_type,
1600 					  struct chcr_context *chcrctx)
1601 {
1602 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1603 	unsigned int ivsize = AES_BLOCK_SIZE;
1604 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
1605 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
1606 	unsigned int c_id = chcrctx->dev->rx_channel_id;
1607 	unsigned int ccm_xtra;
1608 	unsigned char tag_offset = 0, auth_offset = 0;
1609 	unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
1610 	unsigned int assoclen;
1611 
1612 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1613 		assoclen = req->assoclen - 8;
1614 	else
1615 		assoclen = req->assoclen;
1616 	ccm_xtra = CCM_B0_SIZE +
1617 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
1618 
1619 	auth_offset = req->cryptlen ?
1620 		(assoclen + ivsize + 1 + ccm_xtra) : 0;
1621 	if (op_type == CHCR_DECRYPT_OP) {
1622 		if (crypto_aead_authsize(tfm) != req->cryptlen)
1623 			tag_offset = crypto_aead_authsize(tfm);
1624 		else
1625 			auth_offset = 0;
1626 	}
1627 
1628 
1629 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
1630 					 2, (ivsize ?  (assoclen + 1) :  0) +
1631 					 ccm_xtra);
1632 	sec_cpl->pldlen =
1633 		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
1634 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
1635 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1636 					1, assoclen + ccm_xtra, assoclen
1637 					+ ivsize + 1 + ccm_xtra, 0);
1638 
1639 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
1640 					auth_offset, tag_offset,
1641 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
1642 					crypto_aead_authsize(tfm));
1643 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1644 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
1645 					cipher_mode, mac_mode, hmac_ctrl,
1646 					ivsize >> 1);
1647 
1648 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1649 					1, dst_size);
1650 }
1651 
1652 int aead_ccm_validate_input(unsigned short op_type,
1653 			    struct aead_request *req,
1654 			    struct chcr_aead_ctx *aeadctx,
1655 			    unsigned int sub_type)
1656 {
1657 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1658 		if (crypto_ccm_check_iv(req->iv)) {
1659 			pr_err("CCM: IV check fails\n");
1660 			return -EINVAL;
1661 		}
1662 	} else {
1663 		if (req->assoclen != 16 && req->assoclen != 20) {
1664 			pr_err("RFC4309: Invalid AAD length %d\n",
1665 			       req->assoclen);
1666 			return -EINVAL;
1667 		}
1668 	}
1669 	if (aeadctx->enckey_len == 0) {
1670 		pr_err("CCM: Encryption key not set\n");
1671 		return -EINVAL;
1672 	}
1673 	return 0;
1674 }
1675 
1676 unsigned int fill_aead_req_fields(struct sk_buff *skb,
1677 				  struct aead_request *req,
1678 				  struct scatterlist *src,
1679 				  unsigned int ivsize,
1680 				  struct chcr_aead_ctx *aeadctx)
1681 {
1682 	unsigned int frags = 0;
1683 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1684 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1685 	/* b0 and aad length(if available) */
1686 
1687 	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
1688 				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
1689 	if (req->assoclen) {
1690 		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1691 			write_sg_to_skb(skb, &frags, req->src,
1692 					req->assoclen - 8);
1693 		else
1694 			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1695 	}
1696 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1697 	if (req->cryptlen)
1698 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1699 
1700 	return frags;
1701 }
1702 
1703 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1704 					  unsigned short qid,
1705 					  int size,
1706 					  unsigned short op_type)
1707 {
1708 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1709 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1710 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1711 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1712 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1713 	struct sk_buff *skb = NULL;
1714 	struct chcr_wr *chcr_req;
1715 	struct cpl_rx_phys_dsgl *phys_cpl;
1716 	struct phys_sge_parm sg_param;
1717 	struct scatterlist *src;
1718 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1719 	unsigned int dst_size = 0, kctx_len;
1720 	unsigned int sub_type;
1721 	unsigned int authsize = crypto_aead_authsize(tfm);
1722 	int err = 0;
1723 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1724 		GFP_ATOMIC;
1725 
1726 
1727 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1728 		goto err;
1729 
1730 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1731 		goto err;
1732 	sub_type = get_aead_subtype(tfm);
1733 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1734 	reqctx->dst = src;
1735 
1736 	if (req->src != req->dst) {
1737 		err = chcr_copy_assoc(req, aeadctx);
1738 		if (err) {
1739 			pr_err("AAD copy to destination buffer fails\n");
1740 			return ERR_PTR(err);
1741 		}
1742 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1743 					       req->assoclen);
1744 	}
1745 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1746 					     (op_type ? -authsize : authsize));
1747 	if (reqctx->dst_nents <= 0) {
1748 		pr_err("CCM:Invalid Destination sg entries\n");
1749 		goto err;
1750 	}
1751 
1752 
1753 	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
1754 		goto err;
1755 
1756 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1757 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
1758 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1759 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
1760 
1761 	if (!skb)
1762 		goto err;
1763 
1764 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1765 
1766 	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1767 	memset(chcr_req, 0, transhdr_len);
1768 
1769 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
1770 
1771 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1772 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1773 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1774 					16), aeadctx->key, aeadctx->enckey_len);
1775 
1776 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1777 	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
1778 		goto dstmap_fail;
1779 
1780 	sg_param.nents = reqctx->dst_nents;
1781 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1782 	sg_param.qid = qid;
1783 	sg_param.align = 0;
1784 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1785 				  &sg_param))
1786 		goto dstmap_fail;
1787 
1788 	skb_set_transport_header(skb, transhdr_len);
1789 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
1790 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
1791 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1792 	reqctx->skb = skb;
1793 	skb_get(skb);
1794 	return skb;
1795 dstmap_fail:
1796 	kfree_skb(skb);
1797 	skb = NULL;
1798 err:
1799 	return ERR_PTR(-EINVAL);
1800 }
1801 
1802 static struct sk_buff *create_gcm_wr(struct aead_request *req,
1803 				     unsigned short qid,
1804 				     int size,
1805 				     unsigned short op_type)
1806 {
1807 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1808 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1809 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1810 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1811 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
1812 	struct sk_buff *skb = NULL;
1813 	struct chcr_wr *chcr_req;
1814 	struct cpl_rx_phys_dsgl *phys_cpl;
1815 	struct phys_sge_parm sg_param;
1816 	struct scatterlist *src;
1817 	unsigned int frags = 0, transhdr_len;
1818 	unsigned int ivsize = AES_BLOCK_SIZE;
1819 	unsigned int dst_size = 0, kctx_len;
1820 	unsigned char tag_offset = 0;
1821 	unsigned int crypt_len = 0;
1822 	unsigned int authsize = crypto_aead_authsize(tfm);
1823 	unsigned char hmac_ctrl = get_hmac(authsize);
1824 	int err = 0;
1825 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1826 		GFP_ATOMIC;
1827 
1828 	/* validate key size */
1829 	if (aeadctx->enckey_len == 0)
1830 		goto err;
1831 
1832 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1833 		goto err;
1834 
1835 	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1836 		goto err;
1837 
1838 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1839 	reqctx->dst = src;
1840 	if (req->src != req->dst) {
1841 		err = chcr_copy_assoc(req, aeadctx);
1842 		if (err)
1843 			return	ERR_PTR(err);
1844 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1845 					       req->assoclen);
1846 	}
1847 
1848 	if (!req->cryptlen)
1849 		/* null-payload is not supported in the hardware.
1850 		 * software is sending block size
1851 		 */
1852 		crypt_len = AES_BLOCK_SIZE;
1853 	else
1854 		crypt_len = req->cryptlen;
1855 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1856 					     (op_type ? -authsize : authsize));
1857 	if (reqctx->dst_nents <= 0) {
1858 		pr_err("GCM:Invalid Destination sg entries\n");
1859 		goto err;
1860 	}
1861 
1862 
1863 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1864 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
1865 		AEAD_H_SIZE;
1866 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1867 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1868 	if (!skb)
1869 		goto err;
1870 
1871 	/* NIC driver is going to write the sge hdr. */
1872 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1873 
1874 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
1875 	memset(chcr_req, 0, transhdr_len);
1876 
1877 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
1878 		req->assoclen -= 8;
1879 
1880 	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1881 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
1882 					ctx->dev->rx_channel_id, 2, (ivsize ?
1883 					(req->assoclen + 1) : 0));
1884 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
1885 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1886 					req->assoclen ? 1 : 0, req->assoclen,
1887 					req->assoclen + ivsize + 1, 0);
1888 	if (req->cryptlen) {
1889 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1890 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
1891 						tag_offset, tag_offset);
1892 		chcr_req->sec_cpl.seqno_numivs =
1893 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
1894 					CHCR_ENCRYPT_OP) ? 1 : 0,
1895 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
1896 					CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
1897 					ivsize >> 1);
1898 	} else {
1899 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1900 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
1901 		chcr_req->sec_cpl.seqno_numivs =
1902 			FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1903 					(op_type ==  CHCR_ENCRYPT_OP) ?
1904 					1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
1905 					0, 0, ivsize >> 1);
1906 	}
1907 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1908 					0, 1, dst_size);
1909 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1910 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1911 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1912 				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
1913 
1914 	/* prepare a 16 byte iv */
1915 	/* S   A   L  T |  IV | 0x00000001 */
1916 	if (get_aead_subtype(tfm) ==
1917 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
1918 		memcpy(reqctx->iv, aeadctx->salt, 4);
1919 		memcpy(reqctx->iv + 4, req->iv, 8);
1920 	} else {
1921 		memcpy(reqctx->iv, req->iv, 12);
1922 	}
1923 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
1924 
1925 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1926 	sg_param.nents = reqctx->dst_nents;
1927 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1928 	sg_param.qid = qid;
1929 	sg_param.align = 0;
1930 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1931 				  &sg_param))
1932 		goto dstmap_fail;
1933 
1934 	skb_set_transport_header(skb, transhdr_len);
1935 
1936 	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1937 
1938 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1939 
1940 	if (req->cryptlen) {
1941 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1942 	} else {
1943 		aes_gcm_empty_pld_pad(req->dst, authsize - 1);
1944 		write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
1945 
1946 	}
1947 
1948 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1949 			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1950 	reqctx->skb = skb;
1951 	skb_get(skb);
1952 	return skb;
1953 
1954 dstmap_fail:
1955 	/* ivmap_fail: */
1956 	kfree_skb(skb);
1957 	skb = NULL;
1958 err:
1959 	return skb;
1960 }
1961 
1962 
1963 
1964 static int chcr_aead_cra_init(struct crypto_aead *tfm)
1965 {
1966 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1967 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1968 
1969 	crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
1970 	aeadctx->null = crypto_get_default_null_skcipher();
1971 	if (IS_ERR(aeadctx->null))
1972 		return PTR_ERR(aeadctx->null);
1973 	return chcr_device_init(ctx);
1974 }
1975 
1976 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
1977 {
1978 	crypto_put_default_null_skcipher();
1979 }
1980 
1981 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
1982 					unsigned int authsize)
1983 {
1984 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1985 
1986 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
1987 	aeadctx->mayverify = VERIFY_HW;
1988 	return 0;
1989 }
1990 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
1991 				    unsigned int authsize)
1992 {
1993 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1994 	u32 maxauth = crypto_aead_maxauthsize(tfm);
1995 
1996 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
1997 	 * true for sha1. authsize == 12 condition should be before
1998 	 * authsize == (maxauth >> 1)
1999 	 */
2000 	if (authsize == ICV_4) {
2001 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2002 		aeadctx->mayverify = VERIFY_HW;
2003 	} else if (authsize == ICV_6) {
2004 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2005 		aeadctx->mayverify = VERIFY_HW;
2006 	} else if (authsize == ICV_10) {
2007 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2008 		aeadctx->mayverify = VERIFY_HW;
2009 	} else if (authsize == ICV_12) {
2010 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2011 		aeadctx->mayverify = VERIFY_HW;
2012 	} else if (authsize == ICV_14) {
2013 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2014 		aeadctx->mayverify = VERIFY_HW;
2015 	} else if (authsize == (maxauth >> 1)) {
2016 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2017 		aeadctx->mayverify = VERIFY_HW;
2018 	} else if (authsize == maxauth) {
2019 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2020 		aeadctx->mayverify = VERIFY_HW;
2021 	} else {
2022 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2023 		aeadctx->mayverify = VERIFY_SW;
2024 	}
2025 	return 0;
2026 }
2027 
2028 
2029 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2030 {
2031 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2032 
2033 	switch (authsize) {
2034 	case ICV_4:
2035 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2036 		aeadctx->mayverify = VERIFY_HW;
2037 		break;
2038 	case ICV_8:
2039 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2040 		aeadctx->mayverify = VERIFY_HW;
2041 		break;
2042 	case ICV_12:
2043 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2044 		 aeadctx->mayverify = VERIFY_HW;
2045 		break;
2046 	case ICV_14:
2047 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2048 		 aeadctx->mayverify = VERIFY_HW;
2049 		break;
2050 	case ICV_16:
2051 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2052 		aeadctx->mayverify = VERIFY_HW;
2053 		break;
2054 	case ICV_13:
2055 	case ICV_15:
2056 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2057 		aeadctx->mayverify = VERIFY_SW;
2058 		break;
2059 	default:
2060 
2061 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2062 			CRYPTO_TFM_RES_BAD_KEY_LEN);
2063 		return -EINVAL;
2064 	}
2065 	return 0;
2066 }
2067 
2068 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2069 					  unsigned int authsize)
2070 {
2071 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2072 
2073 	switch (authsize) {
2074 	case ICV_8:
2075 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2076 		aeadctx->mayverify = VERIFY_HW;
2077 		break;
2078 	case ICV_12:
2079 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2080 		aeadctx->mayverify = VERIFY_HW;
2081 		break;
2082 	case ICV_16:
2083 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2084 		aeadctx->mayverify = VERIFY_HW;
2085 		break;
2086 	default:
2087 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2088 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2089 		return -EINVAL;
2090 	}
2091 	return 0;
2092 }
2093 
2094 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2095 				unsigned int authsize)
2096 {
2097 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2098 
2099 	switch (authsize) {
2100 	case ICV_4:
2101 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2102 		aeadctx->mayverify = VERIFY_HW;
2103 		break;
2104 	case ICV_6:
2105 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2106 		aeadctx->mayverify = VERIFY_HW;
2107 		break;
2108 	case ICV_8:
2109 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2110 		aeadctx->mayverify = VERIFY_HW;
2111 		break;
2112 	case ICV_10:
2113 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2114 		aeadctx->mayverify = VERIFY_HW;
2115 		break;
2116 	case ICV_12:
2117 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2118 		aeadctx->mayverify = VERIFY_HW;
2119 		break;
2120 	case ICV_14:
2121 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2122 		aeadctx->mayverify = VERIFY_HW;
2123 		break;
2124 	case ICV_16:
2125 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2126 		aeadctx->mayverify = VERIFY_HW;
2127 		break;
2128 	default:
2129 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2130 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2131 		return -EINVAL;
2132 	}
2133 	return 0;
2134 }
2135 
2136 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2137 				const u8 *key,
2138 				unsigned int keylen)
2139 {
2140 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2141 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2142 	unsigned char ck_size, mk_size;
2143 	int key_ctx_size = 0;
2144 
2145 	memcpy(aeadctx->key, key, keylen);
2146 	aeadctx->enckey_len = keylen;
2147 	key_ctx_size = sizeof(struct _key_ctx) +
2148 		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2149 	if (keylen == AES_KEYSIZE_128) {
2150 		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2151 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2152 	} else if (keylen == AES_KEYSIZE_192) {
2153 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2154 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2155 	} else if (keylen == AES_KEYSIZE_256) {
2156 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2157 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2158 	} else {
2159 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2160 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2161 		aeadctx->enckey_len = 0;
2162 		return	-EINVAL;
2163 	}
2164 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2165 						key_ctx_size >> 4);
2166 	return 0;
2167 }
2168 
2169 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2170 				    unsigned int keylen)
2171 {
2172 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2173 	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2174 
2175 	if (keylen < 3) {
2176 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2177 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2178 		aeadctx->enckey_len = 0;
2179 		return	-EINVAL;
2180 	}
2181 	keylen -= 3;
2182 	memcpy(aeadctx->salt, key + keylen, 3);
2183 	return chcr_aead_ccm_setkey(aead, key, keylen);
2184 }
2185 
2186 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2187 			   unsigned int keylen)
2188 {
2189 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2190 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2191 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2192 	struct crypto_cipher *cipher;
2193 	unsigned int ck_size;
2194 	int ret = 0, key_ctx_size = 0;
2195 
2196 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2197 	    keylen > 3) {
2198 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2199 		memcpy(aeadctx->salt, key + keylen, 4);
2200 	}
2201 	if (keylen == AES_KEYSIZE_128) {
2202 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2203 	} else if (keylen == AES_KEYSIZE_192) {
2204 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2205 	} else if (keylen == AES_KEYSIZE_256) {
2206 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2207 	} else {
2208 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2209 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2210 		aeadctx->enckey_len = 0;
2211 		pr_err("GCM: Invalid key length %d", keylen);
2212 		ret = -EINVAL;
2213 		goto out;
2214 	}
2215 
2216 	memcpy(aeadctx->key, key, keylen);
2217 	aeadctx->enckey_len = keylen;
2218 	key_ctx_size = sizeof(struct _key_ctx) +
2219 		((DIV_ROUND_UP(keylen, 16)) << 4) +
2220 		AEAD_H_SIZE;
2221 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2222 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
2223 						0, 0,
2224 						key_ctx_size >> 4);
2225 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
2226 	 * It will go in key context
2227 	 */
2228 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
2229 	if (IS_ERR(cipher)) {
2230 		aeadctx->enckey_len = 0;
2231 		ret = -ENOMEM;
2232 		goto out;
2233 	}
2234 
2235 	ret = crypto_cipher_setkey(cipher, key, keylen);
2236 	if (ret) {
2237 		aeadctx->enckey_len = 0;
2238 		goto out1;
2239 	}
2240 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2241 	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2242 
2243 out1:
2244 	crypto_free_cipher(cipher);
2245 out:
2246 	return ret;
2247 }
2248 
2249 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2250 				   unsigned int keylen)
2251 {
2252 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2253 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2254 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2255 	/* it contains auth and cipher key both*/
2256 	struct crypto_authenc_keys keys;
2257 	unsigned int bs;
2258 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2259 	int err = 0, i, key_ctx_len = 0;
2260 	unsigned char ck_size = 0;
2261 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2262 	struct crypto_shash *base_hash = NULL;
2263 	struct algo_param param;
2264 	int align;
2265 	u8 *o_ptr = NULL;
2266 
2267 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2268 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2269 		goto out;
2270 	}
2271 
2272 	if (get_alg_config(&param, max_authsize)) {
2273 		pr_err("chcr : Unsupported digest size\n");
2274 		goto out;
2275 	}
2276 	if (keys.enckeylen == AES_KEYSIZE_128) {
2277 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2278 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2279 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2280 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2281 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2282 	} else {
2283 		pr_err("chcr : Unsupported cipher key\n");
2284 		goto out;
2285 	}
2286 
2287 	/* Copy only encryption key. We use authkey to generate h(ipad) and
2288 	 * h(opad) so authkey is not needed again. authkeylen size have the
2289 	 * size of the hash digest size.
2290 	 */
2291 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2292 	aeadctx->enckey_len = keys.enckeylen;
2293 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2294 			    aeadctx->enckey_len << 3);
2295 
2296 	base_hash  = chcr_alloc_shash(max_authsize);
2297 	if (IS_ERR(base_hash)) {
2298 		pr_err("chcr : Base driver cannot be loaded\n");
2299 		goto out;
2300 	}
2301 	{
2302 		SHASH_DESC_ON_STACK(shash, base_hash);
2303 		shash->tfm = base_hash;
2304 		shash->flags = crypto_shash_get_flags(base_hash);
2305 		bs = crypto_shash_blocksize(base_hash);
2306 		align = KEYCTX_ALIGN_PAD(max_authsize);
2307 		o_ptr =  actx->h_iopad + param.result_size + align;
2308 
2309 		if (keys.authkeylen > bs) {
2310 			err = crypto_shash_digest(shash, keys.authkey,
2311 						  keys.authkeylen,
2312 						  o_ptr);
2313 			if (err) {
2314 				pr_err("chcr : Base driver cannot be loaded\n");
2315 				goto out;
2316 			}
2317 			keys.authkeylen = max_authsize;
2318 		} else
2319 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
2320 
2321 		/* Compute the ipad-digest*/
2322 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2323 		memcpy(pad, o_ptr, keys.authkeylen);
2324 		for (i = 0; i < bs >> 2; i++)
2325 			*((unsigned int *)pad + i) ^= IPAD_DATA;
2326 
2327 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
2328 					      max_authsize))
2329 			goto out;
2330 		/* Compute the opad-digest */
2331 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2332 		memcpy(pad, o_ptr, keys.authkeylen);
2333 		for (i = 0; i < bs >> 2; i++)
2334 			*((unsigned int *)pad + i) ^= OPAD_DATA;
2335 
2336 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
2337 			goto out;
2338 
2339 		/* convert the ipad and opad digest to network order */
2340 		chcr_change_order(actx->h_iopad, param.result_size);
2341 		chcr_change_order(o_ptr, param.result_size);
2342 		key_ctx_len = sizeof(struct _key_ctx) +
2343 			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
2344 			(param.result_size + align) * 2;
2345 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
2346 						0, 1, key_ctx_len >> 4);
2347 		actx->auth_mode = param.auth_mode;
2348 		chcr_free_shash(base_hash);
2349 
2350 		return 0;
2351 	}
2352 out:
2353 	aeadctx->enckey_len = 0;
2354 	if (base_hash)
2355 		chcr_free_shash(base_hash);
2356 	return -EINVAL;
2357 }
2358 
2359 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
2360 					const u8 *key, unsigned int keylen)
2361 {
2362 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2363 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2364 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2365 	struct crypto_authenc_keys keys;
2366 
2367 	/* it contains auth and cipher key both*/
2368 	int key_ctx_len = 0;
2369 	unsigned char ck_size = 0;
2370 
2371 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2372 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2373 		goto out;
2374 	}
2375 	if (keys.enckeylen == AES_KEYSIZE_128) {
2376 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2377 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2378 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2379 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2380 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2381 	} else {
2382 		pr_err("chcr : Unsupported cipher key\n");
2383 		goto out;
2384 	}
2385 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2386 	aeadctx->enckey_len = keys.enckeylen;
2387 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2388 				    aeadctx->enckey_len << 3);
2389 	key_ctx_len =  sizeof(struct _key_ctx)
2390 		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
2391 
2392 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
2393 						0, key_ctx_len >> 4);
2394 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
2395 	return 0;
2396 out:
2397 	aeadctx->enckey_len = 0;
2398 	return -EINVAL;
2399 }
2400 static int chcr_aead_encrypt(struct aead_request *req)
2401 {
2402 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2403 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2404 
2405 	reqctx->verify = VERIFY_HW;
2406 
2407 	switch (get_aead_subtype(tfm)) {
2408 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2409 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2410 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2411 				    create_authenc_wr);
2412 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2413 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2414 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2415 				    create_aead_ccm_wr);
2416 	default:
2417 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2418 				    create_gcm_wr);
2419 	}
2420 }
2421 
2422 static int chcr_aead_decrypt(struct aead_request *req)
2423 {
2424 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2425 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2426 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2427 	int size;
2428 
2429 	if (aeadctx->mayverify == VERIFY_SW) {
2430 		size = crypto_aead_maxauthsize(tfm);
2431 		reqctx->verify = VERIFY_SW;
2432 	} else {
2433 		size = 0;
2434 		reqctx->verify = VERIFY_HW;
2435 	}
2436 
2437 	switch (get_aead_subtype(tfm)) {
2438 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2439 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2440 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2441 				    create_authenc_wr);
2442 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2443 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2444 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2445 				    create_aead_ccm_wr);
2446 	default:
2447 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2448 				    create_gcm_wr);
2449 	}
2450 }
2451 
2452 static int chcr_aead_op(struct aead_request *req,
2453 			  unsigned short op_type,
2454 			  int size,
2455 			  create_wr_t create_wr_fn)
2456 {
2457 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2458 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2459 	struct uld_ctx *u_ctx;
2460 	struct sk_buff *skb;
2461 
2462 	if (!ctx->dev) {
2463 		pr_err("chcr : %s : No crypto device.\n", __func__);
2464 		return -ENXIO;
2465 	}
2466 	u_ctx = ULD_CTX(ctx);
2467 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2468 				   ctx->tx_channel_id)) {
2469 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
2470 			return -EBUSY;
2471 	}
2472 
2473 	/* Form a WR from req */
2474 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
2475 			   op_type);
2476 
2477 	if (IS_ERR(skb) || skb == NULL) {
2478 		pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
2479 		return PTR_ERR(skb);
2480 	}
2481 
2482 	skb->dev = u_ctx->lldi.ports[0];
2483 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
2484 	chcr_send_wr(skb);
2485 	return -EINPROGRESS;
2486 }
2487 static struct chcr_alg_template driver_algs[] = {
2488 	/* AES-CBC */
2489 	{
2490 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2491 		.is_registered = 0,
2492 		.alg.crypto = {
2493 			.cra_name		= "cbc(aes)",
2494 			.cra_driver_name	= "cbc-aes-chcr",
2495 			.cra_priority		= CHCR_CRA_PRIORITY,
2496 			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
2497 				CRYPTO_ALG_ASYNC,
2498 			.cra_blocksize		= AES_BLOCK_SIZE,
2499 			.cra_ctxsize		= sizeof(struct chcr_context)
2500 				+ sizeof(struct ablk_ctx),
2501 			.cra_alignmask		= 0,
2502 			.cra_type		= &crypto_ablkcipher_type,
2503 			.cra_module		= THIS_MODULE,
2504 			.cra_init		= chcr_cra_init,
2505 			.cra_exit		= NULL,
2506 			.cra_u.ablkcipher	= {
2507 				.min_keysize	= AES_MIN_KEY_SIZE,
2508 				.max_keysize	= AES_MAX_KEY_SIZE,
2509 				.ivsize		= AES_BLOCK_SIZE,
2510 				.setkey			= chcr_aes_cbc_setkey,
2511 				.encrypt		= chcr_aes_encrypt,
2512 				.decrypt		= chcr_aes_decrypt,
2513 			}
2514 		}
2515 	},
2516 	{
2517 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2518 		.is_registered = 0,
2519 		.alg.crypto =   {
2520 			.cra_name		= "xts(aes)",
2521 			.cra_driver_name	= "xts-aes-chcr",
2522 			.cra_priority		= CHCR_CRA_PRIORITY,
2523 			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
2524 				CRYPTO_ALG_ASYNC,
2525 			.cra_blocksize		= AES_BLOCK_SIZE,
2526 			.cra_ctxsize		= sizeof(struct chcr_context) +
2527 				sizeof(struct ablk_ctx),
2528 			.cra_alignmask		= 0,
2529 			.cra_type		= &crypto_ablkcipher_type,
2530 			.cra_module		= THIS_MODULE,
2531 			.cra_init		= chcr_cra_init,
2532 			.cra_exit		= NULL,
2533 			.cra_u = {
2534 				.ablkcipher = {
2535 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
2536 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
2537 					.ivsize		= AES_BLOCK_SIZE,
2538 					.setkey		= chcr_aes_xts_setkey,
2539 					.encrypt	= chcr_aes_encrypt,
2540 					.decrypt	= chcr_aes_decrypt,
2541 				}
2542 			}
2543 		}
2544 	},
2545 	/* SHA */
2546 	{
2547 		.type = CRYPTO_ALG_TYPE_AHASH,
2548 		.is_registered = 0,
2549 		.alg.hash = {
2550 			.halg.digestsize = SHA1_DIGEST_SIZE,
2551 			.halg.base = {
2552 				.cra_name = "sha1",
2553 				.cra_driver_name = "sha1-chcr",
2554 				.cra_blocksize = SHA1_BLOCK_SIZE,
2555 			}
2556 		}
2557 	},
2558 	{
2559 		.type = CRYPTO_ALG_TYPE_AHASH,
2560 		.is_registered = 0,
2561 		.alg.hash = {
2562 			.halg.digestsize = SHA256_DIGEST_SIZE,
2563 			.halg.base = {
2564 				.cra_name = "sha256",
2565 				.cra_driver_name = "sha256-chcr",
2566 				.cra_blocksize = SHA256_BLOCK_SIZE,
2567 			}
2568 		}
2569 	},
2570 	{
2571 		.type = CRYPTO_ALG_TYPE_AHASH,
2572 		.is_registered = 0,
2573 		.alg.hash = {
2574 			.halg.digestsize = SHA224_DIGEST_SIZE,
2575 			.halg.base = {
2576 				.cra_name = "sha224",
2577 				.cra_driver_name = "sha224-chcr",
2578 				.cra_blocksize = SHA224_BLOCK_SIZE,
2579 			}
2580 		}
2581 	},
2582 	{
2583 		.type = CRYPTO_ALG_TYPE_AHASH,
2584 		.is_registered = 0,
2585 		.alg.hash = {
2586 			.halg.digestsize = SHA384_DIGEST_SIZE,
2587 			.halg.base = {
2588 				.cra_name = "sha384",
2589 				.cra_driver_name = "sha384-chcr",
2590 				.cra_blocksize = SHA384_BLOCK_SIZE,
2591 			}
2592 		}
2593 	},
2594 	{
2595 		.type = CRYPTO_ALG_TYPE_AHASH,
2596 		.is_registered = 0,
2597 		.alg.hash = {
2598 			.halg.digestsize = SHA512_DIGEST_SIZE,
2599 			.halg.base = {
2600 				.cra_name = "sha512",
2601 				.cra_driver_name = "sha512-chcr",
2602 				.cra_blocksize = SHA512_BLOCK_SIZE,
2603 			}
2604 		}
2605 	},
2606 	/* HMAC */
2607 	{
2608 		.type = CRYPTO_ALG_TYPE_HMAC,
2609 		.is_registered = 0,
2610 		.alg.hash = {
2611 			.halg.digestsize = SHA1_DIGEST_SIZE,
2612 			.halg.base = {
2613 				.cra_name = "hmac(sha1)",
2614 				.cra_driver_name = "hmac-sha1-chcr",
2615 				.cra_blocksize = SHA1_BLOCK_SIZE,
2616 			}
2617 		}
2618 	},
2619 	{
2620 		.type = CRYPTO_ALG_TYPE_HMAC,
2621 		.is_registered = 0,
2622 		.alg.hash = {
2623 			.halg.digestsize = SHA224_DIGEST_SIZE,
2624 			.halg.base = {
2625 				.cra_name = "hmac(sha224)",
2626 				.cra_driver_name = "hmac-sha224-chcr",
2627 				.cra_blocksize = SHA224_BLOCK_SIZE,
2628 			}
2629 		}
2630 	},
2631 	{
2632 		.type = CRYPTO_ALG_TYPE_HMAC,
2633 		.is_registered = 0,
2634 		.alg.hash = {
2635 			.halg.digestsize = SHA256_DIGEST_SIZE,
2636 			.halg.base = {
2637 				.cra_name = "hmac(sha256)",
2638 				.cra_driver_name = "hmac-sha256-chcr",
2639 				.cra_blocksize = SHA256_BLOCK_SIZE,
2640 			}
2641 		}
2642 	},
2643 	{
2644 		.type = CRYPTO_ALG_TYPE_HMAC,
2645 		.is_registered = 0,
2646 		.alg.hash = {
2647 			.halg.digestsize = SHA384_DIGEST_SIZE,
2648 			.halg.base = {
2649 				.cra_name = "hmac(sha384)",
2650 				.cra_driver_name = "hmac-sha384-chcr",
2651 				.cra_blocksize = SHA384_BLOCK_SIZE,
2652 			}
2653 		}
2654 	},
2655 	{
2656 		.type = CRYPTO_ALG_TYPE_HMAC,
2657 		.is_registered = 0,
2658 		.alg.hash = {
2659 			.halg.digestsize = SHA512_DIGEST_SIZE,
2660 			.halg.base = {
2661 				.cra_name = "hmac(sha512)",
2662 				.cra_driver_name = "hmac-sha512-chcr",
2663 				.cra_blocksize = SHA512_BLOCK_SIZE,
2664 			}
2665 		}
2666 	},
2667 	/* Add AEAD Algorithms */
2668 	{
2669 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
2670 		.is_registered = 0,
2671 		.alg.aead = {
2672 			.base = {
2673 				.cra_name = "gcm(aes)",
2674 				.cra_driver_name = "gcm-aes-chcr",
2675 				.cra_blocksize	= 1,
2676 				.cra_ctxsize =	sizeof(struct chcr_context) +
2677 						sizeof(struct chcr_aead_ctx) +
2678 						sizeof(struct chcr_gcm_ctx),
2679 			},
2680 			.ivsize = 12,
2681 			.maxauthsize = GHASH_DIGEST_SIZE,
2682 			.setkey = chcr_gcm_setkey,
2683 			.setauthsize = chcr_gcm_setauthsize,
2684 		}
2685 	},
2686 	{
2687 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
2688 		.is_registered = 0,
2689 		.alg.aead = {
2690 			.base = {
2691 				.cra_name = "rfc4106(gcm(aes))",
2692 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
2693 				.cra_blocksize	 = 1,
2694 				.cra_ctxsize =	sizeof(struct chcr_context) +
2695 						sizeof(struct chcr_aead_ctx) +
2696 						sizeof(struct chcr_gcm_ctx),
2697 
2698 			},
2699 			.ivsize = 8,
2700 			.maxauthsize	= GHASH_DIGEST_SIZE,
2701 			.setkey = chcr_gcm_setkey,
2702 			.setauthsize	= chcr_4106_4309_setauthsize,
2703 		}
2704 	},
2705 	{
2706 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
2707 		.is_registered = 0,
2708 		.alg.aead = {
2709 			.base = {
2710 				.cra_name = "ccm(aes)",
2711 				.cra_driver_name = "ccm-aes-chcr",
2712 				.cra_blocksize	 = 1,
2713 				.cra_ctxsize =	sizeof(struct chcr_context) +
2714 						sizeof(struct chcr_aead_ctx),
2715 
2716 			},
2717 			.ivsize = AES_BLOCK_SIZE,
2718 			.maxauthsize	= GHASH_DIGEST_SIZE,
2719 			.setkey = chcr_aead_ccm_setkey,
2720 			.setauthsize	= chcr_ccm_setauthsize,
2721 		}
2722 	},
2723 	{
2724 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
2725 		.is_registered = 0,
2726 		.alg.aead = {
2727 			.base = {
2728 				.cra_name = "rfc4309(ccm(aes))",
2729 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
2730 				.cra_blocksize	 = 1,
2731 				.cra_ctxsize =	sizeof(struct chcr_context) +
2732 						sizeof(struct chcr_aead_ctx),
2733 
2734 			},
2735 			.ivsize = 8,
2736 			.maxauthsize	= GHASH_DIGEST_SIZE,
2737 			.setkey = chcr_aead_rfc4309_setkey,
2738 			.setauthsize = chcr_4106_4309_setauthsize,
2739 		}
2740 	},
2741 	{
2742 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2743 		.is_registered = 0,
2744 		.alg.aead = {
2745 			.base = {
2746 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2747 				.cra_driver_name =
2748 					"authenc-hmac-sha1-cbc-aes-chcr",
2749 				.cra_blocksize	 = AES_BLOCK_SIZE,
2750 				.cra_ctxsize =	sizeof(struct chcr_context) +
2751 						sizeof(struct chcr_aead_ctx) +
2752 						sizeof(struct chcr_authenc_ctx),
2753 
2754 			},
2755 			.ivsize = AES_BLOCK_SIZE,
2756 			.maxauthsize = SHA1_DIGEST_SIZE,
2757 			.setkey = chcr_authenc_setkey,
2758 			.setauthsize = chcr_authenc_setauthsize,
2759 		}
2760 	},
2761 	{
2762 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2763 		.is_registered = 0,
2764 		.alg.aead = {
2765 			.base = {
2766 
2767 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2768 				.cra_driver_name =
2769 					"authenc-hmac-sha256-cbc-aes-chcr",
2770 				.cra_blocksize	 = AES_BLOCK_SIZE,
2771 				.cra_ctxsize =	sizeof(struct chcr_context) +
2772 						sizeof(struct chcr_aead_ctx) +
2773 						sizeof(struct chcr_authenc_ctx),
2774 
2775 			},
2776 			.ivsize = AES_BLOCK_SIZE,
2777 			.maxauthsize	= SHA256_DIGEST_SIZE,
2778 			.setkey = chcr_authenc_setkey,
2779 			.setauthsize = chcr_authenc_setauthsize,
2780 		}
2781 	},
2782 	{
2783 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2784 		.is_registered = 0,
2785 		.alg.aead = {
2786 			.base = {
2787 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2788 				.cra_driver_name =
2789 					"authenc-hmac-sha224-cbc-aes-chcr",
2790 				.cra_blocksize	 = AES_BLOCK_SIZE,
2791 				.cra_ctxsize =	sizeof(struct chcr_context) +
2792 						sizeof(struct chcr_aead_ctx) +
2793 						sizeof(struct chcr_authenc_ctx),
2794 			},
2795 			.ivsize = AES_BLOCK_SIZE,
2796 			.maxauthsize = SHA224_DIGEST_SIZE,
2797 			.setkey = chcr_authenc_setkey,
2798 			.setauthsize = chcr_authenc_setauthsize,
2799 		}
2800 	},
2801 	{
2802 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2803 		.is_registered = 0,
2804 		.alg.aead = {
2805 			.base = {
2806 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2807 				.cra_driver_name =
2808 					"authenc-hmac-sha384-cbc-aes-chcr",
2809 				.cra_blocksize	 = AES_BLOCK_SIZE,
2810 				.cra_ctxsize =	sizeof(struct chcr_context) +
2811 						sizeof(struct chcr_aead_ctx) +
2812 						sizeof(struct chcr_authenc_ctx),
2813 
2814 			},
2815 			.ivsize = AES_BLOCK_SIZE,
2816 			.maxauthsize = SHA384_DIGEST_SIZE,
2817 			.setkey = chcr_authenc_setkey,
2818 			.setauthsize = chcr_authenc_setauthsize,
2819 		}
2820 	},
2821 	{
2822 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2823 		.is_registered = 0,
2824 		.alg.aead = {
2825 			.base = {
2826 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2827 				.cra_driver_name =
2828 					"authenc-hmac-sha512-cbc-aes-chcr",
2829 				.cra_blocksize	 = AES_BLOCK_SIZE,
2830 				.cra_ctxsize =	sizeof(struct chcr_context) +
2831 						sizeof(struct chcr_aead_ctx) +
2832 						sizeof(struct chcr_authenc_ctx),
2833 
2834 			},
2835 			.ivsize = AES_BLOCK_SIZE,
2836 			.maxauthsize = SHA512_DIGEST_SIZE,
2837 			.setkey = chcr_authenc_setkey,
2838 			.setauthsize = chcr_authenc_setauthsize,
2839 		}
2840 	},
2841 	{
2842 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
2843 		.is_registered = 0,
2844 		.alg.aead = {
2845 			.base = {
2846 				.cra_name = "authenc(digest_null,cbc(aes))",
2847 				.cra_driver_name =
2848 					"authenc-digest_null-cbc-aes-chcr",
2849 				.cra_blocksize	 = AES_BLOCK_SIZE,
2850 				.cra_ctxsize =	sizeof(struct chcr_context) +
2851 						sizeof(struct chcr_aead_ctx) +
2852 						sizeof(struct chcr_authenc_ctx),
2853 
2854 			},
2855 			.ivsize  = AES_BLOCK_SIZE,
2856 			.maxauthsize = 0,
2857 			.setkey  = chcr_aead_digest_null_setkey,
2858 			.setauthsize = chcr_authenc_null_setauthsize,
2859 		}
2860 	},
2861 };
2862 
2863 /*
2864  *	chcr_unregister_alg - Deregister crypto algorithms with
2865  *	kernel framework.
2866  */
2867 static int chcr_unregister_alg(void)
2868 {
2869 	int i;
2870 
2871 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2872 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2873 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2874 			if (driver_algs[i].is_registered)
2875 				crypto_unregister_alg(
2876 						&driver_algs[i].alg.crypto);
2877 			break;
2878 		case CRYPTO_ALG_TYPE_AEAD:
2879 			if (driver_algs[i].is_registered)
2880 				crypto_unregister_aead(
2881 						&driver_algs[i].alg.aead);
2882 			break;
2883 		case CRYPTO_ALG_TYPE_AHASH:
2884 			if (driver_algs[i].is_registered)
2885 				crypto_unregister_ahash(
2886 						&driver_algs[i].alg.hash);
2887 			break;
2888 		}
2889 		driver_algs[i].is_registered = 0;
2890 	}
2891 	return 0;
2892 }
2893 
2894 #define SZ_AHASH_CTX sizeof(struct chcr_context)
2895 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
2896 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
2897 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
2898 
2899 /*
2900  *	chcr_register_alg - Register crypto algorithms with kernel framework.
2901  */
2902 static int chcr_register_alg(void)
2903 {
2904 	struct crypto_alg ai;
2905 	struct ahash_alg *a_hash;
2906 	int err = 0, i;
2907 	char *name = NULL;
2908 
2909 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2910 		if (driver_algs[i].is_registered)
2911 			continue;
2912 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2913 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2914 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
2915 			name = driver_algs[i].alg.crypto.cra_driver_name;
2916 			break;
2917 		case CRYPTO_ALG_TYPE_AEAD:
2918 			driver_algs[i].alg.aead.base.cra_priority =
2919 				CHCR_CRA_PRIORITY;
2920 			driver_algs[i].alg.aead.base.cra_flags =
2921 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
2922 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
2923 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
2924 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
2925 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
2926 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
2927 			err = crypto_register_aead(&driver_algs[i].alg.aead);
2928 			name = driver_algs[i].alg.aead.base.cra_driver_name;
2929 			break;
2930 		case CRYPTO_ALG_TYPE_AHASH:
2931 			a_hash = &driver_algs[i].alg.hash;
2932 			a_hash->update = chcr_ahash_update;
2933 			a_hash->final = chcr_ahash_final;
2934 			a_hash->finup = chcr_ahash_finup;
2935 			a_hash->digest = chcr_ahash_digest;
2936 			a_hash->export = chcr_ahash_export;
2937 			a_hash->import = chcr_ahash_import;
2938 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
2939 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
2940 			a_hash->halg.base.cra_module = THIS_MODULE;
2941 			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
2942 			a_hash->halg.base.cra_alignmask = 0;
2943 			a_hash->halg.base.cra_exit = NULL;
2944 			a_hash->halg.base.cra_type = &crypto_ahash_type;
2945 
2946 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
2947 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
2948 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
2949 				a_hash->init = chcr_hmac_init;
2950 				a_hash->setkey = chcr_ahash_setkey;
2951 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
2952 			} else {
2953 				a_hash->init = chcr_sha_init;
2954 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
2955 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
2956 			}
2957 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
2958 			ai = driver_algs[i].alg.hash.halg.base;
2959 			name = ai.cra_driver_name;
2960 			break;
2961 		}
2962 		if (err) {
2963 			pr_err("chcr : %s : Algorithm registration failed\n",
2964 			       name);
2965 			goto register_err;
2966 		} else {
2967 			driver_algs[i].is_registered = 1;
2968 		}
2969 	}
2970 	return 0;
2971 
2972 register_err:
2973 	chcr_unregister_alg();
2974 	return err;
2975 }
2976 
2977 /*
2978  *	start_crypto - Register the crypto algorithms.
2979  *	This should called once when the first device comesup. After this
2980  *	kernel will start calling driver APIs for crypto operations.
2981  */
2982 int start_crypto(void)
2983 {
2984 	return chcr_register_alg();
2985 }
2986 
2987 /*
2988  *	stop_crypto - Deregister all the crypto algorithms with kernel.
2989  *	This should be called once when the last device goes down. After this
2990  *	kernel will not call the driver API for crypto operations.
2991  */
2992 int stop_crypto(void)
2993 {
2994 	chcr_unregister_alg();
2995 	return 0;
2996 }
2997