1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/internal/aead.h>
59 #include <crypto/null.h>
60 #include <crypto/internal/skcipher.h>
61 #include <crypto/aead.h>
62 #include <crypto/scatterwalk.h>
63 #include <crypto/internal/hash.h>
64 
65 #include "t4fw_api.h"
66 #include "t4_msg.h"
67 #include "chcr_core.h"
68 #include "chcr_algo.h"
69 #include "chcr_crypto.h"
70 
71 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
72 {
73 	return ctx->crypto_ctx->aeadctx;
74 }
75 
76 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
77 {
78 	return ctx->crypto_ctx->ablkctx;
79 }
80 
81 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
82 {
83 	return ctx->crypto_ctx->hmacctx;
84 }
85 
86 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
87 {
88 	return gctx->ctx->gcm;
89 }
90 
91 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
92 {
93 	return gctx->ctx->authenc;
94 }
95 
96 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
97 {
98 	return ctx->dev->u_ctx;
99 }
100 
101 static inline int is_ofld_imm(const struct sk_buff *skb)
102 {
103 	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
104 }
105 
106 /*
107  *	sgl_len - calculates the size of an SGL of the given capacity
108  *	@n: the number of SGL entries
109  *	Calculates the number of flits needed for a scatter/gather list that
110  *	can hold the given number of entries.
111  */
112 static inline unsigned int sgl_len(unsigned int n)
113 {
114 	n--;
115 	return (3 * n) / 2 + (n & 1) + 2;
116 }
117 
118 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
119 {
120 	u8 temp[SHA512_DIGEST_SIZE];
121 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
122 	int authsize = crypto_aead_authsize(tfm);
123 	struct cpl_fw6_pld *fw6_pld;
124 	int cmp = 0;
125 
126 	fw6_pld = (struct cpl_fw6_pld *)input;
127 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
128 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
129 		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
130 	} else {
131 
132 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
133 				authsize, req->assoclen +
134 				req->cryptlen - authsize);
135 		cmp = memcmp(temp, (fw6_pld + 1), authsize);
136 	}
137 	if (cmp)
138 		*err = -EBADMSG;
139 	else
140 		*err = 0;
141 }
142 
143 /*
144  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
145  *	@req: crypto request
146  */
147 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
148 			 int err)
149 {
150 	struct crypto_tfm *tfm = req->tfm;
151 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
152 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
153 	struct chcr_req_ctx ctx_req;
154 	struct cpl_fw6_pld *fw6_pld;
155 	unsigned int digestsize, updated_digestsize;
156 
157 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
158 	case CRYPTO_ALG_TYPE_AEAD:
159 		ctx_req.req.aead_req = (struct aead_request *)req;
160 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
161 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
162 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
163 		if (ctx_req.ctx.reqctx->skb) {
164 			kfree_skb(ctx_req.ctx.reqctx->skb);
165 			ctx_req.ctx.reqctx->skb = NULL;
166 		}
167 		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
168 			chcr_verify_tag(ctx_req.req.aead_req, input,
169 					&err);
170 			ctx_req.ctx.reqctx->verify = VERIFY_HW;
171 		}
172 		break;
173 
174 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
175 		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
176 		ctx_req.ctx.ablk_ctx =
177 			ablkcipher_request_ctx(ctx_req.req.ablk_req);
178 		if (!err) {
179 			fw6_pld = (struct cpl_fw6_pld *)input;
180 			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
181 			       AES_BLOCK_SIZE);
182 		}
183 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
184 			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
185 		if (ctx_req.ctx.ablk_ctx->skb) {
186 			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
187 			ctx_req.ctx.ablk_ctx->skb = NULL;
188 		}
189 		break;
190 
191 	case CRYPTO_ALG_TYPE_AHASH:
192 		ctx_req.req.ahash_req = (struct ahash_request *)req;
193 		ctx_req.ctx.ahash_ctx =
194 			ahash_request_ctx(ctx_req.req.ahash_req);
195 		digestsize =
196 			crypto_ahash_digestsize(crypto_ahash_reqtfm(
197 							ctx_req.req.ahash_req));
198 		updated_digestsize = digestsize;
199 		if (digestsize == SHA224_DIGEST_SIZE)
200 			updated_digestsize = SHA256_DIGEST_SIZE;
201 		else if (digestsize == SHA384_DIGEST_SIZE)
202 			updated_digestsize = SHA512_DIGEST_SIZE;
203 		if (ctx_req.ctx.ahash_ctx->skb) {
204 			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
205 			ctx_req.ctx.ahash_ctx->skb = NULL;
206 		}
207 		if (ctx_req.ctx.ahash_ctx->result == 1) {
208 			ctx_req.ctx.ahash_ctx->result = 0;
209 			memcpy(ctx_req.req.ahash_req->result, input +
210 			       sizeof(struct cpl_fw6_pld),
211 			       digestsize);
212 		} else {
213 			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
214 			       sizeof(struct cpl_fw6_pld),
215 			       updated_digestsize);
216 		}
217 		break;
218 	}
219 	return err;
220 }
221 
222 /*
223  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
224  *	@skb: the packet
225  *	Returns the number of flits needed for the given offload packet.
226  *	These packets are already fully constructed and no additional headers
227  *	will be added.
228  */
229 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
230 {
231 	unsigned int flits, cnt;
232 
233 	if (is_ofld_imm(skb))
234 		return DIV_ROUND_UP(skb->len, 8);
235 
236 	flits = skb_transport_offset(skb) / 8;   /* headers */
237 	cnt = skb_shinfo(skb)->nr_frags;
238 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
239 		cnt++;
240 	return flits + sgl_len(cnt);
241 }
242 
243 static inline void get_aes_decrypt_key(unsigned char *dec_key,
244 				       const unsigned char *key,
245 				       unsigned int keylength)
246 {
247 	u32 temp;
248 	u32 w_ring[MAX_NK];
249 	int i, j, k;
250 	u8  nr, nk;
251 
252 	switch (keylength) {
253 	case AES_KEYLENGTH_128BIT:
254 		nk = KEYLENGTH_4BYTES;
255 		nr = NUMBER_OF_ROUNDS_10;
256 		break;
257 	case AES_KEYLENGTH_192BIT:
258 		nk = KEYLENGTH_6BYTES;
259 		nr = NUMBER_OF_ROUNDS_12;
260 		break;
261 	case AES_KEYLENGTH_256BIT:
262 		nk = KEYLENGTH_8BYTES;
263 		nr = NUMBER_OF_ROUNDS_14;
264 		break;
265 	default:
266 		return;
267 	}
268 	for (i = 0; i < nk; i++)
269 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
270 
271 	i = 0;
272 	temp = w_ring[nk - 1];
273 	while (i + nk < (nr + 1) * 4) {
274 		if (!(i % nk)) {
275 			/* RotWord(temp) */
276 			temp = (temp << 8) | (temp >> 24);
277 			temp = aes_ks_subword(temp);
278 			temp ^= round_constant[i / nk];
279 		} else if (nk == 8 && (i % 4 == 0)) {
280 			temp = aes_ks_subword(temp);
281 		}
282 		w_ring[i % nk] ^= temp;
283 		temp = w_ring[i % nk];
284 		i++;
285 	}
286 	i--;
287 	for (k = 0, j = i % nk; k < nk; k++) {
288 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
289 		j--;
290 		if (j < 0)
291 			j += nk;
292 	}
293 }
294 
295 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
296 {
297 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
298 
299 	switch (ds) {
300 	case SHA1_DIGEST_SIZE:
301 		base_hash = crypto_alloc_shash("sha1", 0, 0);
302 		break;
303 	case SHA224_DIGEST_SIZE:
304 		base_hash = crypto_alloc_shash("sha224", 0, 0);
305 		break;
306 	case SHA256_DIGEST_SIZE:
307 		base_hash = crypto_alloc_shash("sha256", 0, 0);
308 		break;
309 	case SHA384_DIGEST_SIZE:
310 		base_hash = crypto_alloc_shash("sha384", 0, 0);
311 		break;
312 	case SHA512_DIGEST_SIZE:
313 		base_hash = crypto_alloc_shash("sha512", 0, 0);
314 		break;
315 	}
316 
317 	return base_hash;
318 }
319 
320 static int chcr_compute_partial_hash(struct shash_desc *desc,
321 				     char *iopad, char *result_hash,
322 				     int digest_size)
323 {
324 	struct sha1_state sha1_st;
325 	struct sha256_state sha256_st;
326 	struct sha512_state sha512_st;
327 	int error;
328 
329 	if (digest_size == SHA1_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha1_st);
333 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
334 	} else if (digest_size == SHA224_DIGEST_SIZE) {
335 		error = crypto_shash_init(desc) ?:
336 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
337 			crypto_shash_export(desc, (void *)&sha256_st);
338 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
339 
340 	} else if (digest_size == SHA256_DIGEST_SIZE) {
341 		error = crypto_shash_init(desc) ?:
342 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
343 			crypto_shash_export(desc, (void *)&sha256_st);
344 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
345 
346 	} else if (digest_size == SHA384_DIGEST_SIZE) {
347 		error = crypto_shash_init(desc) ?:
348 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
349 			crypto_shash_export(desc, (void *)&sha512_st);
350 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
351 
352 	} else if (digest_size == SHA512_DIGEST_SIZE) {
353 		error = crypto_shash_init(desc) ?:
354 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
355 			crypto_shash_export(desc, (void *)&sha512_st);
356 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
357 	} else {
358 		error = -EINVAL;
359 		pr_err("Unknown digest size %d\n", digest_size);
360 	}
361 	return error;
362 }
363 
364 static void chcr_change_order(char *buf, int ds)
365 {
366 	int i;
367 
368 	if (ds == SHA512_DIGEST_SIZE) {
369 		for (i = 0; i < (ds / sizeof(u64)); i++)
370 			*((__be64 *)buf + i) =
371 				cpu_to_be64(*((u64 *)buf + i));
372 	} else {
373 		for (i = 0; i < (ds / sizeof(u32)); i++)
374 			*((__be32 *)buf + i) =
375 				cpu_to_be32(*((u32 *)buf + i));
376 	}
377 }
378 
379 static inline int is_hmac(struct crypto_tfm *tfm)
380 {
381 	struct crypto_alg *alg = tfm->__crt_alg;
382 	struct chcr_alg_template *chcr_crypto_alg =
383 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
384 			     alg.hash);
385 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
386 		return 1;
387 	return 0;
388 }
389 
390 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
391 			   struct scatterlist *sg,
392 			   struct phys_sge_parm *sg_param)
393 {
394 	struct phys_sge_pairs *to;
395 	int out_buf_size = sg_param->obsize;
396 	unsigned int nents = sg_param->nents, i, j = 0;
397 
398 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
399 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
400 	phys_cpl->pcirlxorder_to_noofsgentr =
401 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
402 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
403 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
404 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
405 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
406 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
407 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
408 	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
409 	phys_cpl->rss_hdr_int.hash_val = 0;
410 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
411 				       sizeof(struct cpl_rx_phys_dsgl));
412 
413 	for (i = 0; nents; to++) {
414 		for (j = 0; j < 8 && nents; j++, nents--) {
415 			out_buf_size -= sg_dma_len(sg);
416 			to->len[j] = htons(sg_dma_len(sg));
417 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
418 			sg = sg_next(sg);
419 		}
420 	}
421 	if (out_buf_size) {
422 		j--;
423 		to--;
424 		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
425 	}
426 }
427 
428 static inline int map_writesg_phys_cpl(struct device *dev,
429 					struct cpl_rx_phys_dsgl *phys_cpl,
430 					struct scatterlist *sg,
431 					struct phys_sge_parm *sg_param)
432 {
433 	if (!sg || !sg_param->nents)
434 		return 0;
435 
436 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
437 	if (sg_param->nents == 0) {
438 		pr_err("CHCR : DMA mapping failed\n");
439 		return -EINVAL;
440 	}
441 	write_phys_cpl(phys_cpl, sg, sg_param);
442 	return 0;
443 }
444 
445 static inline int get_aead_subtype(struct crypto_aead *aead)
446 {
447 	struct aead_alg *alg = crypto_aead_alg(aead);
448 	struct chcr_alg_template *chcr_crypto_alg =
449 		container_of(alg, struct chcr_alg_template, alg.aead);
450 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
451 }
452 
453 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
454 {
455 	struct crypto_alg *alg = tfm->__crt_alg;
456 	struct chcr_alg_template *chcr_crypto_alg =
457 		container_of(alg, struct chcr_alg_template, alg.crypto);
458 
459 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
460 }
461 
462 static inline void write_buffer_to_skb(struct sk_buff *skb,
463 					unsigned int *frags,
464 					char *bfr,
465 					u8 bfr_len)
466 {
467 	skb->len += bfr_len;
468 	skb->data_len += bfr_len;
469 	skb->truesize += bfr_len;
470 	get_page(virt_to_page(bfr));
471 	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
472 			   offset_in_page(bfr), bfr_len);
473 	(*frags)++;
474 }
475 
476 
477 static inline void
478 write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
479 			struct scatterlist *sg, unsigned int count)
480 {
481 	struct page *spage;
482 	unsigned int page_len;
483 
484 	skb->len += count;
485 	skb->data_len += count;
486 	skb->truesize += count;
487 
488 	while (count > 0) {
489 		if (!sg || (!(sg->length)))
490 			break;
491 		spage = sg_page(sg);
492 		get_page(spage);
493 		page_len = min(sg->length, count);
494 		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
495 		(*frags)++;
496 		count -= page_len;
497 		sg = sg_next(sg);
498 	}
499 }
500 
501 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
502 			       struct _key_ctx *key_ctx)
503 {
504 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
505 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
506 	} else {
507 		memcpy(key_ctx->key,
508 		       ablkctx->key + (ablkctx->enckey_len >> 1),
509 		       ablkctx->enckey_len >> 1);
510 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
511 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
512 	}
513 	return 0;
514 }
515 
516 static inline void create_wreq(struct chcr_context *ctx,
517 			       struct chcr_wr *chcr_req,
518 			       void *req, struct sk_buff *skb,
519 			       int kctx_len, int hash_sz,
520 			       int is_iv,
521 			       unsigned int sc_len)
522 {
523 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
524 	int iv_loc = IV_DSGL;
525 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
526 	unsigned int immdatalen = 0, nr_frags = 0;
527 
528 	if (is_ofld_imm(skb)) {
529 		immdatalen = skb->data_len;
530 		iv_loc = IV_IMMEDIATE;
531 	} else {
532 		nr_frags = skb_shinfo(skb)->nr_frags;
533 	}
534 
535 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
536 				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
537 	chcr_req->wreq.pld_size_hash_size =
538 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
539 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
540 	chcr_req->wreq.len16_pkd =
541 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
542 				    (calc_tx_flits_ofld(skb) * 8), 16)));
543 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
544 	chcr_req->wreq.rx_chid_to_rx_q_id =
545 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
546 				is_iv ? iv_loc : IV_NOP, ctx->tx_qidx);
547 
548 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
549 						       qid);
550 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
551 					16) - ((sizeof(chcr_req->wreq)) >> 4)));
552 
553 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
554 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
555 				   sizeof(chcr_req->key_ctx) +
556 				   kctx_len + sc_len + immdatalen);
557 }
558 
559 /**
560  *	create_cipher_wr - form the WR for cipher operations
561  *	@req: cipher req.
562  *	@ctx: crypto driver context of the request.
563  *	@qid: ingress qid where response of this WR should be received.
564  *	@op_type:	encryption or decryption
565  */
566 static struct sk_buff
567 *create_cipher_wr(struct ablkcipher_request *req,
568 		  unsigned short qid,
569 		  unsigned short op_type)
570 {
571 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
572 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
573 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
574 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
575 	struct sk_buff *skb = NULL;
576 	struct chcr_wr *chcr_req;
577 	struct cpl_rx_phys_dsgl *phys_cpl;
578 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
579 	struct phys_sge_parm sg_param;
580 	unsigned int frags = 0, transhdr_len, phys_dsgl;
581 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
582 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
583 			GFP_ATOMIC;
584 
585 	if (!req->info)
586 		return ERR_PTR(-EINVAL);
587 	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
588 	if (reqctx->dst_nents <= 0) {
589 		pr_err("AES:Invalid Destination sg lists\n");
590 		return ERR_PTR(-EINVAL);
591 	}
592 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
593 	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
594 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
595 		       ablkctx->enckey_len, req->nbytes, ivsize);
596 		return ERR_PTR(-EINVAL);
597 	}
598 
599 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
600 
601 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
602 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
603 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
604 	if (!skb)
605 		return ERR_PTR(-ENOMEM);
606 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
607 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
608 	memset(chcr_req, 0, transhdr_len);
609 	chcr_req->sec_cpl.op_ivinsrtofst =
610 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
611 
612 	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
613 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
614 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
615 
616 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
617 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
618 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
619 							 ablkctx->ciph_mode,
620 							 0, 0, ivsize >> 1);
621 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
622 							  0, 1, phys_dsgl);
623 
624 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
625 	if (op_type == CHCR_DECRYPT_OP) {
626 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
627 	} else {
628 		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
629 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
630 			       ablkctx->enckey_len);
631 		} else {
632 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
633 			       (ablkctx->enckey_len >> 1),
634 			       ablkctx->enckey_len >> 1);
635 			memcpy(chcr_req->key_ctx.key +
636 			       (ablkctx->enckey_len >> 1),
637 			       ablkctx->key,
638 			       ablkctx->enckey_len >> 1);
639 		}
640 	}
641 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
642 	sg_param.nents = reqctx->dst_nents;
643 	sg_param.obsize = req->nbytes;
644 	sg_param.qid = qid;
645 	sg_param.align = 1;
646 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
647 				 &sg_param))
648 		goto map_fail1;
649 
650 	skb_set_transport_header(skb, transhdr_len);
651 	memcpy(reqctx->iv, req->info, ivsize);
652 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
653 	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
654 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
655 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
656 	reqctx->skb = skb;
657 	skb_get(skb);
658 	return skb;
659 map_fail1:
660 	kfree_skb(skb);
661 	return ERR_PTR(-ENOMEM);
662 }
663 
664 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
665 			       unsigned int keylen)
666 {
667 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
668 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
669 	unsigned int ck_size, context_size;
670 	u16 alignment = 0;
671 
672 	if (keylen == AES_KEYSIZE_128) {
673 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
674 	} else if (keylen == AES_KEYSIZE_192) {
675 		alignment = 8;
676 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
677 	} else if (keylen == AES_KEYSIZE_256) {
678 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
679 	} else {
680 		goto badkey_err;
681 	}
682 	memcpy(ablkctx->key, key, keylen);
683 	ablkctx->enckey_len = keylen;
684 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
685 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
686 			keylen + alignment) >> 4;
687 
688 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
689 						0, 0, context_size);
690 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
691 	return 0;
692 badkey_err:
693 	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
694 	ablkctx->enckey_len = 0;
695 	return -EINVAL;
696 }
697 
698 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
699 {
700 	struct adapter *adap = netdev2adap(dev);
701 	struct sge_uld_txq_info *txq_info =
702 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
703 	struct sge_uld_txq *txq;
704 	int ret = 0;
705 
706 	local_bh_disable();
707 	txq = &txq_info->uldtxq[idx];
708 	spin_lock(&txq->sendq.lock);
709 	if (txq->full)
710 		ret = -1;
711 	spin_unlock(&txq->sendq.lock);
712 	local_bh_enable();
713 	return ret;
714 }
715 
716 static int chcr_aes_encrypt(struct ablkcipher_request *req)
717 {
718 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
719 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
720 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
721 	struct sk_buff *skb;
722 
723 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
724 					    ctx->tx_qidx))) {
725 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
726 			return -EBUSY;
727 	}
728 
729 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
730 			       CHCR_ENCRYPT_OP);
731 	if (IS_ERR(skb)) {
732 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
733 		return  PTR_ERR(skb);
734 	}
735 	skb->dev = u_ctx->lldi.ports[0];
736 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
737 	chcr_send_wr(skb);
738 	return -EINPROGRESS;
739 }
740 
741 static int chcr_aes_decrypt(struct ablkcipher_request *req)
742 {
743 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
744 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
745 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
746 	struct sk_buff *skb;
747 
748 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
749 					    ctx->tx_qidx))) {
750 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
751 			return -EBUSY;
752 	}
753 
754 	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
755 			       CHCR_DECRYPT_OP);
756 	if (IS_ERR(skb)) {
757 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
758 		return PTR_ERR(skb);
759 	}
760 	skb->dev = u_ctx->lldi.ports[0];
761 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
762 	chcr_send_wr(skb);
763 	return -EINPROGRESS;
764 }
765 
766 static int chcr_device_init(struct chcr_context *ctx)
767 {
768 	struct uld_ctx *u_ctx;
769 	struct adapter *adap;
770 	unsigned int id;
771 	int txq_perchan, txq_idx, ntxq;
772 	int err = 0, rxq_perchan, rxq_idx;
773 
774 	id = smp_processor_id();
775 	if (!ctx->dev) {
776 		err = assign_chcr_device(&ctx->dev);
777 		if (err) {
778 			pr_err("chcr device assignment fails\n");
779 			goto out;
780 		}
781 		u_ctx = ULD_CTX(ctx);
782 		adap = padap(ctx->dev);
783 		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
784 				    adap->vres.ncrypto_fc);
785 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
786 		txq_perchan = ntxq / u_ctx->lldi.nchan;
787 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
788 		rxq_idx += id % rxq_perchan;
789 		txq_idx = ctx->dev->tx_channel_id * txq_perchan;
790 		txq_idx += id % txq_perchan;
791 		spin_lock(&ctx->dev->lock_chcr_dev);
792 		ctx->rx_qidx = rxq_idx;
793 		ctx->tx_qidx = txq_idx;
794 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
795 		ctx->dev->rx_channel_id = 0;
796 		spin_unlock(&ctx->dev->lock_chcr_dev);
797 	}
798 out:
799 	return err;
800 }
801 
802 static int chcr_cra_init(struct crypto_tfm *tfm)
803 {
804 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
805 	return chcr_device_init(crypto_tfm_ctx(tfm));
806 }
807 
808 static int get_alg_config(struct algo_param *params,
809 			  unsigned int auth_size)
810 {
811 	switch (auth_size) {
812 	case SHA1_DIGEST_SIZE:
813 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
814 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
815 		params->result_size = SHA1_DIGEST_SIZE;
816 		break;
817 	case SHA224_DIGEST_SIZE:
818 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
819 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
820 		params->result_size = SHA256_DIGEST_SIZE;
821 		break;
822 	case SHA256_DIGEST_SIZE:
823 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
824 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
825 		params->result_size = SHA256_DIGEST_SIZE;
826 		break;
827 	case SHA384_DIGEST_SIZE:
828 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
829 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
830 		params->result_size = SHA512_DIGEST_SIZE;
831 		break;
832 	case SHA512_DIGEST_SIZE:
833 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
834 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
835 		params->result_size = SHA512_DIGEST_SIZE;
836 		break;
837 	default:
838 		pr_err("chcr : ERROR, unsupported digest size\n");
839 		return -EINVAL;
840 	}
841 	return 0;
842 }
843 
844 static inline void chcr_free_shash(struct crypto_shash *base_hash)
845 {
846 		crypto_free_shash(base_hash);
847 }
848 
849 /**
850  *	create_hash_wr - Create hash work request
851  *	@req - Cipher req base
852  */
853 static struct sk_buff *create_hash_wr(struct ahash_request *req,
854 				      struct hash_wr_param *param)
855 {
856 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
857 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
858 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
859 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
860 	struct sk_buff *skb = NULL;
861 	struct chcr_wr *chcr_req;
862 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
863 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
864 	unsigned int kctx_len = 0;
865 	u8 hash_size_in_response = 0;
866 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
867 		GFP_ATOMIC;
868 
869 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
870 	kctx_len = param->alg_prm.result_size + iopad_alignment;
871 	if (param->opad_needed)
872 		kctx_len += param->alg_prm.result_size + iopad_alignment;
873 
874 	if (req_ctx->result)
875 		hash_size_in_response = digestsize;
876 	else
877 		hash_size_in_response = param->alg_prm.result_size;
878 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
879 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
880 	if (!skb)
881 		return skb;
882 
883 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
884 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
885 	memset(chcr_req, 0, transhdr_len);
886 
887 	chcr_req->sec_cpl.op_ivinsrtofst =
888 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
889 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
890 
891 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
892 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
893 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
894 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
895 	chcr_req->sec_cpl.seqno_numivs =
896 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
897 					 param->opad_needed, 0);
898 
899 	chcr_req->sec_cpl.ivgen_hdrlen =
900 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
901 
902 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
903 	       param->alg_prm.result_size);
904 
905 	if (param->opad_needed)
906 		memcpy(chcr_req->key_ctx.key +
907 		       ((param->alg_prm.result_size <= 32) ? 32 :
908 			CHCR_HASH_MAX_DIGEST_SIZE),
909 		       hmacctx->opad, param->alg_prm.result_size);
910 
911 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
912 					    param->alg_prm.mk_size, 0,
913 					    param->opad_needed,
914 					    ((kctx_len +
915 					     sizeof(chcr_req->key_ctx)) >> 4));
916 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
917 
918 	skb_set_transport_header(skb, transhdr_len);
919 	if (param->bfr_len != 0)
920 		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
921 				    param->bfr_len);
922 	if (param->sg_len != 0)
923 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
924 
925 	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
926 			DUMMY_BYTES);
927 	req_ctx->skb = skb;
928 	skb_get(skb);
929 	return skb;
930 }
931 
932 static int chcr_ahash_update(struct ahash_request *req)
933 {
934 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
935 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
936 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
937 	struct uld_ctx *u_ctx = NULL;
938 	struct sk_buff *skb;
939 	u8 remainder = 0, bs;
940 	unsigned int nbytes = req->nbytes;
941 	struct hash_wr_param params;
942 
943 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
944 
945 	u_ctx = ULD_CTX(ctx);
946 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
947 					    ctx->tx_qidx))) {
948 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
949 			return -EBUSY;
950 	}
951 
952 	if (nbytes + req_ctx->reqlen >= bs) {
953 		remainder = (nbytes + req_ctx->reqlen) % bs;
954 		nbytes = nbytes + req_ctx->reqlen - remainder;
955 	} else {
956 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
957 				   + req_ctx->reqlen, nbytes, 0);
958 		req_ctx->reqlen += nbytes;
959 		return 0;
960 	}
961 
962 	params.opad_needed = 0;
963 	params.more = 1;
964 	params.last = 0;
965 	params.sg_len = nbytes - req_ctx->reqlen;
966 	params.bfr_len = req_ctx->reqlen;
967 	params.scmd1 = 0;
968 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
969 	req_ctx->result = 0;
970 	req_ctx->data_len += params.sg_len + params.bfr_len;
971 	skb = create_hash_wr(req, &params);
972 	if (!skb)
973 		return -ENOMEM;
974 
975 	if (remainder) {
976 		u8 *temp;
977 		/* Swap buffers */
978 		temp = req_ctx->reqbfr;
979 		req_ctx->reqbfr = req_ctx->skbfr;
980 		req_ctx->skbfr = temp;
981 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
982 				   req_ctx->reqbfr, remainder, req->nbytes -
983 				   remainder);
984 	}
985 	req_ctx->reqlen = remainder;
986 	skb->dev = u_ctx->lldi.ports[0];
987 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
988 	chcr_send_wr(skb);
989 
990 	return -EINPROGRESS;
991 }
992 
993 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
994 {
995 	memset(bfr_ptr, 0, bs);
996 	*bfr_ptr = 0x80;
997 	if (bs == 64)
998 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
999 	else
1000 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1001 }
1002 
1003 static int chcr_ahash_final(struct ahash_request *req)
1004 {
1005 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1006 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1007 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1008 	struct hash_wr_param params;
1009 	struct sk_buff *skb;
1010 	struct uld_ctx *u_ctx = NULL;
1011 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1012 
1013 	u_ctx = ULD_CTX(ctx);
1014 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1015 		params.opad_needed = 1;
1016 	else
1017 		params.opad_needed = 0;
1018 	params.sg_len = 0;
1019 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1020 	req_ctx->result = 1;
1021 	params.bfr_len = req_ctx->reqlen;
1022 	req_ctx->data_len += params.bfr_len + params.sg_len;
1023 	if (req_ctx->reqlen == 0) {
1024 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1025 		params.last = 0;
1026 		params.more = 1;
1027 		params.scmd1 = 0;
1028 		params.bfr_len = bs;
1029 
1030 	} else {
1031 		params.scmd1 = req_ctx->data_len;
1032 		params.last = 1;
1033 		params.more = 0;
1034 	}
1035 	skb = create_hash_wr(req, &params);
1036 	if (!skb)
1037 		return -ENOMEM;
1038 
1039 	skb->dev = u_ctx->lldi.ports[0];
1040 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1041 	chcr_send_wr(skb);
1042 	return -EINPROGRESS;
1043 }
1044 
1045 static int chcr_ahash_finup(struct ahash_request *req)
1046 {
1047 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1048 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1049 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1050 	struct uld_ctx *u_ctx = NULL;
1051 	struct sk_buff *skb;
1052 	struct hash_wr_param params;
1053 	u8  bs;
1054 
1055 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1056 	u_ctx = ULD_CTX(ctx);
1057 
1058 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1059 					    ctx->tx_qidx))) {
1060 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1061 			return -EBUSY;
1062 	}
1063 
1064 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1065 		params.opad_needed = 1;
1066 	else
1067 		params.opad_needed = 0;
1068 
1069 	params.sg_len = req->nbytes;
1070 	params.bfr_len = req_ctx->reqlen;
1071 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1072 	req_ctx->data_len += params.bfr_len + params.sg_len;
1073 	req_ctx->result = 1;
1074 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1075 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1076 		params.last = 0;
1077 		params.more = 1;
1078 		params.scmd1 = 0;
1079 		params.bfr_len = bs;
1080 	} else {
1081 		params.scmd1 = req_ctx->data_len;
1082 		params.last = 1;
1083 		params.more = 0;
1084 	}
1085 
1086 	skb = create_hash_wr(req, &params);
1087 	if (!skb)
1088 		return -ENOMEM;
1089 
1090 	skb->dev = u_ctx->lldi.ports[0];
1091 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1092 	chcr_send_wr(skb);
1093 
1094 	return -EINPROGRESS;
1095 }
1096 
1097 static int chcr_ahash_digest(struct ahash_request *req)
1098 {
1099 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1100 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1101 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1102 	struct uld_ctx *u_ctx = NULL;
1103 	struct sk_buff *skb;
1104 	struct hash_wr_param params;
1105 	u8  bs;
1106 
1107 	rtfm->init(req);
1108 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1109 
1110 	u_ctx = ULD_CTX(ctx);
1111 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1112 					    ctx->tx_qidx))) {
1113 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1114 			return -EBUSY;
1115 	}
1116 
1117 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1118 		params.opad_needed = 1;
1119 	else
1120 		params.opad_needed = 0;
1121 
1122 	params.last = 0;
1123 	params.more = 0;
1124 	params.sg_len = req->nbytes;
1125 	params.bfr_len = 0;
1126 	params.scmd1 = 0;
1127 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1128 	req_ctx->result = 1;
1129 	req_ctx->data_len += params.bfr_len + params.sg_len;
1130 
1131 	if (req->nbytes == 0) {
1132 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1133 		params.more = 1;
1134 		params.bfr_len = bs;
1135 	}
1136 
1137 	skb = create_hash_wr(req, &params);
1138 	if (!skb)
1139 		return -ENOMEM;
1140 
1141 	skb->dev = u_ctx->lldi.ports[0];
1142 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
1143 	chcr_send_wr(skb);
1144 	return -EINPROGRESS;
1145 }
1146 
1147 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1148 {
1149 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1150 	struct chcr_ahash_req_ctx *state = out;
1151 
1152 	state->reqlen = req_ctx->reqlen;
1153 	state->data_len = req_ctx->data_len;
1154 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1155 	memcpy(state->partial_hash, req_ctx->partial_hash,
1156 	       CHCR_HASH_MAX_DIGEST_SIZE);
1157 		return 0;
1158 }
1159 
1160 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1161 {
1162 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1163 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1164 
1165 	req_ctx->reqlen = state->reqlen;
1166 	req_ctx->data_len = state->data_len;
1167 	req_ctx->reqbfr = req_ctx->bfr1;
1168 	req_ctx->skbfr = req_ctx->bfr2;
1169 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1170 	memcpy(req_ctx->partial_hash, state->partial_hash,
1171 	       CHCR_HASH_MAX_DIGEST_SIZE);
1172 	return 0;
1173 }
1174 
1175 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1176 			     unsigned int keylen)
1177 {
1178 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1179 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1180 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1181 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1182 	unsigned int i, err = 0, updated_digestsize;
1183 
1184 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1185 
1186 	/* use the key to calculate the ipad and opad. ipad will sent with the
1187 	 * first request's data. opad will be sent with the final hash result
1188 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1189 	 */
1190 	shash->tfm = hmacctx->base_hash;
1191 	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1192 	if (keylen > bs) {
1193 		err = crypto_shash_digest(shash, key, keylen,
1194 					  hmacctx->ipad);
1195 		if (err)
1196 			goto out;
1197 		keylen = digestsize;
1198 	} else {
1199 		memcpy(hmacctx->ipad, key, keylen);
1200 	}
1201 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1202 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
1203 
1204 	for (i = 0; i < bs / sizeof(int); i++) {
1205 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1206 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1207 	}
1208 
1209 	updated_digestsize = digestsize;
1210 	if (digestsize == SHA224_DIGEST_SIZE)
1211 		updated_digestsize = SHA256_DIGEST_SIZE;
1212 	else if (digestsize == SHA384_DIGEST_SIZE)
1213 		updated_digestsize = SHA512_DIGEST_SIZE;
1214 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1215 					hmacctx->ipad, digestsize);
1216 	if (err)
1217 		goto out;
1218 	chcr_change_order(hmacctx->ipad, updated_digestsize);
1219 
1220 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
1221 					hmacctx->opad, digestsize);
1222 	if (err)
1223 		goto out;
1224 	chcr_change_order(hmacctx->opad, updated_digestsize);
1225 out:
1226 	return err;
1227 }
1228 
1229 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1230 			       unsigned int key_len)
1231 {
1232 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1233 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1234 	unsigned short context_size = 0;
1235 
1236 	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
1237 	    (key_len != (AES_KEYSIZE_256 << 1))) {
1238 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1239 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
1240 		ablkctx->enckey_len = 0;
1241 		return -EINVAL;
1242 
1243 	}
1244 
1245 	memcpy(ablkctx->key, key, key_len);
1246 	ablkctx->enckey_len = key_len;
1247 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1248 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1249 	ablkctx->key_ctx_hdr =
1250 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1251 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1252 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1253 				 CHCR_KEYCTX_NO_KEY, 1,
1254 				 0, context_size);
1255 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1256 	return 0;
1257 }
1258 
1259 static int chcr_sha_init(struct ahash_request *areq)
1260 {
1261 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1262 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1263 	int digestsize =  crypto_ahash_digestsize(tfm);
1264 
1265 	req_ctx->data_len = 0;
1266 	req_ctx->reqlen = 0;
1267 	req_ctx->reqbfr = req_ctx->bfr1;
1268 	req_ctx->skbfr = req_ctx->bfr2;
1269 	req_ctx->skb = NULL;
1270 	req_ctx->result = 0;
1271 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
1272 	return 0;
1273 }
1274 
1275 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1276 {
1277 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1278 				 sizeof(struct chcr_ahash_req_ctx));
1279 	return chcr_device_init(crypto_tfm_ctx(tfm));
1280 }
1281 
1282 static int chcr_hmac_init(struct ahash_request *areq)
1283 {
1284 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1285 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1286 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1287 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1288 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1289 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1290 
1291 	chcr_sha_init(areq);
1292 	req_ctx->data_len = bs;
1293 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1294 		if (digestsize == SHA224_DIGEST_SIZE)
1295 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1296 			       SHA256_DIGEST_SIZE);
1297 		else if (digestsize == SHA384_DIGEST_SIZE)
1298 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1299 			       SHA512_DIGEST_SIZE);
1300 		else
1301 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1302 			       digestsize);
1303 	}
1304 	return 0;
1305 }
1306 
1307 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1308 {
1309 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1310 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1311 	unsigned int digestsize =
1312 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1313 
1314 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1315 				 sizeof(struct chcr_ahash_req_ctx));
1316 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
1317 	if (IS_ERR(hmacctx->base_hash))
1318 		return PTR_ERR(hmacctx->base_hash);
1319 	return chcr_device_init(crypto_tfm_ctx(tfm));
1320 }
1321 
1322 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1323 {
1324 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1325 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1326 
1327 	if (hmacctx->base_hash) {
1328 		chcr_free_shash(hmacctx->base_hash);
1329 		hmacctx->base_hash = NULL;
1330 	}
1331 }
1332 
1333 static int chcr_copy_assoc(struct aead_request *req,
1334 				struct chcr_aead_ctx *ctx)
1335 {
1336 	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
1337 
1338 	skcipher_request_set_tfm(skreq, ctx->null);
1339 	skcipher_request_set_callback(skreq, aead_request_flags(req),
1340 			NULL, NULL);
1341 	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
1342 			NULL);
1343 
1344 	return crypto_skcipher_encrypt(skreq);
1345 }
1346 static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
1347 				   int aadmax, int wrlen,
1348 				   unsigned short op_type)
1349 {
1350 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1351 
1352 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
1353 	    (req->assoclen > aadmax) ||
1354 	    (src_nent > MAX_SKB_FRAGS) ||
1355 	    (wrlen > MAX_WR_SIZE))
1356 		return 1;
1357 	return 0;
1358 }
1359 
1360 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
1361 {
1362 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1363 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1364 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1365 	struct aead_request *subreq = aead_request_ctx(req);
1366 
1367 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
1368 	aead_request_set_callback(subreq, req->base.flags,
1369 				  req->base.complete, req->base.data);
1370 	 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
1371 				 req->iv);
1372 	 aead_request_set_ad(subreq, req->assoclen);
1373 	return op_type ? crypto_aead_decrypt(subreq) :
1374 		crypto_aead_encrypt(subreq);
1375 }
1376 
1377 static struct sk_buff *create_authenc_wr(struct aead_request *req,
1378 					 unsigned short qid,
1379 					 int size,
1380 					 unsigned short op_type)
1381 {
1382 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1383 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1384 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1385 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1386 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
1387 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1388 	struct sk_buff *skb = NULL;
1389 	struct chcr_wr *chcr_req;
1390 	struct cpl_rx_phys_dsgl *phys_cpl;
1391 	struct phys_sge_parm sg_param;
1392 	struct scatterlist *src;
1393 	unsigned int frags = 0, transhdr_len;
1394 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1395 	unsigned int   kctx_len = 0;
1396 	unsigned short stop_offset = 0;
1397 	unsigned int  assoclen = req->assoclen;
1398 	unsigned int  authsize = crypto_aead_authsize(tfm);
1399 	int err = -EINVAL, src_nent;
1400 	int null = 0;
1401 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1402 		GFP_ATOMIC;
1403 
1404 	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
1405 		goto err;
1406 
1407 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1408 		goto err;
1409 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1410 	if (src_nent < 0)
1411 		goto err;
1412 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1413 	reqctx->dst = src;
1414 
1415 	if (req->src != req->dst) {
1416 		err = chcr_copy_assoc(req, aeadctx);
1417 		if (err)
1418 			return ERR_PTR(err);
1419 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1420 					       req->assoclen);
1421 	}
1422 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1423 		null = 1;
1424 		assoclen = 0;
1425 	}
1426 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1427 					     (op_type ? -authsize : authsize));
1428 	if (reqctx->dst_nents < 0) {
1429 		pr_err("AUTHENC:Invalid Destination sg entries\n");
1430 		goto err;
1431 	}
1432 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1433 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
1434 		- sizeof(chcr_req->key_ctx);
1435 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1436 	if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
1437 			T6_MAX_AAD_SIZE,
1438 			transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
1439 				op_type)) {
1440 		return ERR_PTR(chcr_aead_fallback(req, op_type));
1441 	}
1442 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1443 	if (!skb)
1444 		goto err;
1445 
1446 	/* LLD is going to write the sge hdr. */
1447 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1448 
1449 	/* Write WR */
1450 	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1451 	memset(chcr_req, 0, transhdr_len);
1452 
1453 	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1454 
1455 	/*
1456 	 * Input order	is AAD,IV and Payload. where IV should be included as
1457 	 * the part of authdata. All other fields should be filled according
1458 	 * to the hardware spec
1459 	 */
1460 	chcr_req->sec_cpl.op_ivinsrtofst =
1461 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
1462 				       (ivsize ? (assoclen + 1) : 0));
1463 	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
1464 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1465 					assoclen ? 1 : 0, assoclen,
1466 					assoclen + ivsize + 1,
1467 					(stop_offset & 0x1F0) >> 4);
1468 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
1469 					stop_offset & 0xF,
1470 					null ? 0 : assoclen + ivsize + 1,
1471 					stop_offset, stop_offset);
1472 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1473 					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
1474 					CHCR_SCMD_CIPHER_MODE_AES_CBC,
1475 					actx->auth_mode, aeadctx->hmac_ctrl,
1476 					ivsize >> 1);
1477 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1478 					 0, 1, dst_size);
1479 
1480 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1481 	if (op_type == CHCR_ENCRYPT_OP)
1482 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
1483 		       aeadctx->enckey_len);
1484 	else
1485 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
1486 		       aeadctx->enckey_len);
1487 
1488 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
1489 					4), actx->h_iopad, kctx_len -
1490 				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
1491 
1492 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1493 	sg_param.nents = reqctx->dst_nents;
1494 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1495 	sg_param.qid = qid;
1496 	sg_param.align = 0;
1497 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1498 				  &sg_param))
1499 		goto dstmap_fail;
1500 
1501 	skb_set_transport_header(skb, transhdr_len);
1502 
1503 	if (assoclen) {
1504 		/* AAD buffer in */
1505 		write_sg_to_skb(skb, &frags, req->src, assoclen);
1506 
1507 	}
1508 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
1509 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
1510 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1511 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1512 	reqctx->skb = skb;
1513 	skb_get(skb);
1514 
1515 	return skb;
1516 dstmap_fail:
1517 	/* ivmap_fail: */
1518 	kfree_skb(skb);
1519 err:
1520 	return ERR_PTR(-EINVAL);
1521 }
1522 
1523 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
1524 {
1525 	__be32 data;
1526 
1527 	memset(block, 0, csize);
1528 	block += csize;
1529 
1530 	if (csize >= 4)
1531 		csize = 4;
1532 	else if (msglen > (unsigned int)(1 << (8 * csize)))
1533 		return -EOVERFLOW;
1534 
1535 	data = cpu_to_be32(msglen);
1536 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1537 
1538 	return 0;
1539 }
1540 
1541 static void generate_b0(struct aead_request *req,
1542 			struct chcr_aead_ctx *aeadctx,
1543 			unsigned short op_type)
1544 {
1545 	unsigned int l, lp, m;
1546 	int rc;
1547 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1548 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1549 	u8 *b0 = reqctx->scratch_pad;
1550 
1551 	m = crypto_aead_authsize(aead);
1552 
1553 	memcpy(b0, reqctx->iv, 16);
1554 
1555 	lp = b0[0];
1556 	l = lp + 1;
1557 
1558 	/* set m, bits 3-5 */
1559 	*b0 |= (8 * ((m - 2) / 2));
1560 
1561 	/* set adata, bit 6, if associated data is used */
1562 	if (req->assoclen)
1563 		*b0 |= 64;
1564 	rc = set_msg_len(b0 + 16 - l,
1565 			 (op_type == CHCR_DECRYPT_OP) ?
1566 			 req->cryptlen - m : req->cryptlen, l);
1567 }
1568 
1569 static inline int crypto_ccm_check_iv(const u8 *iv)
1570 {
1571 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1572 	if (iv[0] < 1 || iv[0] > 7)
1573 		return -EINVAL;
1574 
1575 	return 0;
1576 }
1577 
1578 static int ccm_format_packet(struct aead_request *req,
1579 			     struct chcr_aead_ctx *aeadctx,
1580 			     unsigned int sub_type,
1581 			     unsigned short op_type)
1582 {
1583 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1584 	int rc = 0;
1585 
1586 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1587 		reqctx->iv[0] = 3;
1588 		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
1589 		memcpy(reqctx->iv + 4, req->iv, 8);
1590 		memset(reqctx->iv + 12, 0, 4);
1591 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1592 			htons(req->assoclen - 8);
1593 	} else {
1594 		memcpy(reqctx->iv, req->iv, 16);
1595 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
1596 			htons(req->assoclen);
1597 	}
1598 	generate_b0(req, aeadctx, op_type);
1599 	/* zero the ctr value */
1600 	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
1601 	return rc;
1602 }
1603 
1604 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
1605 				  unsigned int dst_size,
1606 				  struct aead_request *req,
1607 				  unsigned short op_type,
1608 					  struct chcr_context *chcrctx)
1609 {
1610 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1611 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
1612 	unsigned int ivsize = AES_BLOCK_SIZE;
1613 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
1614 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
1615 	unsigned int c_id = chcrctx->dev->rx_channel_id;
1616 	unsigned int ccm_xtra;
1617 	unsigned char tag_offset = 0, auth_offset = 0;
1618 	unsigned int assoclen;
1619 
1620 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1621 		assoclen = req->assoclen - 8;
1622 	else
1623 		assoclen = req->assoclen;
1624 	ccm_xtra = CCM_B0_SIZE +
1625 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
1626 
1627 	auth_offset = req->cryptlen ?
1628 		(assoclen + ivsize + 1 + ccm_xtra) : 0;
1629 	if (op_type == CHCR_DECRYPT_OP) {
1630 		if (crypto_aead_authsize(tfm) != req->cryptlen)
1631 			tag_offset = crypto_aead_authsize(tfm);
1632 		else
1633 			auth_offset = 0;
1634 	}
1635 
1636 
1637 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
1638 					 2, (ivsize ?  (assoclen + 1) :  0) +
1639 					 ccm_xtra);
1640 	sec_cpl->pldlen =
1641 		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
1642 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
1643 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1644 					1, assoclen + ccm_xtra, assoclen
1645 					+ ivsize + 1 + ccm_xtra, 0);
1646 
1647 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
1648 					auth_offset, tag_offset,
1649 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
1650 					crypto_aead_authsize(tfm));
1651 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
1652 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
1653 					cipher_mode, mac_mode,
1654 					aeadctx->hmac_ctrl, ivsize >> 1);
1655 
1656 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
1657 					1, dst_size);
1658 }
1659 
1660 int aead_ccm_validate_input(unsigned short op_type,
1661 			    struct aead_request *req,
1662 			    struct chcr_aead_ctx *aeadctx,
1663 			    unsigned int sub_type)
1664 {
1665 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
1666 		if (crypto_ccm_check_iv(req->iv)) {
1667 			pr_err("CCM: IV check fails\n");
1668 			return -EINVAL;
1669 		}
1670 	} else {
1671 		if (req->assoclen != 16 && req->assoclen != 20) {
1672 			pr_err("RFC4309: Invalid AAD length %d\n",
1673 			       req->assoclen);
1674 			return -EINVAL;
1675 		}
1676 	}
1677 	if (aeadctx->enckey_len == 0) {
1678 		pr_err("CCM: Encryption key not set\n");
1679 		return -EINVAL;
1680 	}
1681 	return 0;
1682 }
1683 
1684 unsigned int fill_aead_req_fields(struct sk_buff *skb,
1685 				  struct aead_request *req,
1686 				  struct scatterlist *src,
1687 				  unsigned int ivsize,
1688 				  struct chcr_aead_ctx *aeadctx)
1689 {
1690 	unsigned int frags = 0;
1691 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1692 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1693 	/* b0 and aad length(if available) */
1694 
1695 	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
1696 				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
1697 	if (req->assoclen) {
1698 		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
1699 			write_sg_to_skb(skb, &frags, req->src,
1700 					req->assoclen - 8);
1701 		else
1702 			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1703 	}
1704 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1705 	if (req->cryptlen)
1706 		write_sg_to_skb(skb, &frags, src, req->cryptlen);
1707 
1708 	return frags;
1709 }
1710 
1711 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1712 					  unsigned short qid,
1713 					  int size,
1714 					  unsigned short op_type)
1715 {
1716 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1717 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1718 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1719 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1720 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
1721 	struct sk_buff *skb = NULL;
1722 	struct chcr_wr *chcr_req;
1723 	struct cpl_rx_phys_dsgl *phys_cpl;
1724 	struct phys_sge_parm sg_param;
1725 	struct scatterlist *src;
1726 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1727 	unsigned int dst_size = 0, kctx_len;
1728 	unsigned int sub_type;
1729 	unsigned int authsize = crypto_aead_authsize(tfm);
1730 	int err = -EINVAL, src_nent;
1731 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1732 		GFP_ATOMIC;
1733 
1734 
1735 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1736 		goto err;
1737 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1738 	if (src_nent < 0)
1739 		goto err;
1740 
1741 	sub_type = get_aead_subtype(tfm);
1742 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1743 	reqctx->dst = src;
1744 
1745 	if (req->src != req->dst) {
1746 		err = chcr_copy_assoc(req, aeadctx);
1747 		if (err) {
1748 			pr_err("AAD copy to destination buffer fails\n");
1749 			return ERR_PTR(err);
1750 		}
1751 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1752 					       req->assoclen);
1753 	}
1754 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1755 					     (op_type ? -authsize : authsize));
1756 	if (reqctx->dst_nents < 0) {
1757 		pr_err("CCM:Invalid Destination sg entries\n");
1758 		goto err;
1759 	}
1760 
1761 
1762 	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
1763 		goto err;
1764 
1765 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1766 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
1767 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1768 	if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
1769 			    T6_MAX_AAD_SIZE - 18,
1770 			    transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
1771 			    op_type)) {
1772 		return ERR_PTR(chcr_aead_fallback(req, op_type));
1773 	}
1774 
1775 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
1776 
1777 	if (!skb)
1778 		goto err;
1779 
1780 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1781 
1782 	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
1783 	memset(chcr_req, 0, transhdr_len);
1784 
1785 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
1786 
1787 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1788 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1789 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1790 					16), aeadctx->key, aeadctx->enckey_len);
1791 
1792 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1793 	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
1794 		goto dstmap_fail;
1795 
1796 	sg_param.nents = reqctx->dst_nents;
1797 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1798 	sg_param.qid = qid;
1799 	sg_param.align = 0;
1800 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1801 				  &sg_param))
1802 		goto dstmap_fail;
1803 
1804 	skb_set_transport_header(skb, transhdr_len);
1805 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
1806 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
1807 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1808 	reqctx->skb = skb;
1809 	skb_get(skb);
1810 	return skb;
1811 dstmap_fail:
1812 	kfree_skb(skb);
1813 	skb = NULL;
1814 err:
1815 	return ERR_PTR(-EINVAL);
1816 }
1817 
1818 static struct sk_buff *create_gcm_wr(struct aead_request *req,
1819 				     unsigned short qid,
1820 				     int size,
1821 				     unsigned short op_type)
1822 {
1823 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1824 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1825 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1826 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1827 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
1828 	struct sk_buff *skb = NULL;
1829 	struct chcr_wr *chcr_req;
1830 	struct cpl_rx_phys_dsgl *phys_cpl;
1831 	struct phys_sge_parm sg_param;
1832 	struct scatterlist *src;
1833 	unsigned int frags = 0, transhdr_len;
1834 	unsigned int ivsize = AES_BLOCK_SIZE;
1835 	unsigned int dst_size = 0, kctx_len;
1836 	unsigned char tag_offset = 0;
1837 	unsigned int crypt_len = 0;
1838 	unsigned int authsize = crypto_aead_authsize(tfm);
1839 	int err = -EINVAL, src_nent;
1840 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1841 		GFP_ATOMIC;
1842 
1843 	/* validate key size */
1844 	if (aeadctx->enckey_len == 0)
1845 		goto err;
1846 
1847 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
1848 		goto err;
1849 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
1850 	if (src_nent < 0)
1851 		goto err;
1852 
1853 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1854 	reqctx->dst = src;
1855 	if (req->src != req->dst) {
1856 		err = chcr_copy_assoc(req, aeadctx);
1857 		if (err)
1858 			return	ERR_PTR(err);
1859 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1860 					       req->assoclen);
1861 	}
1862 
1863 	if (!req->cryptlen)
1864 		/* null-payload is not supported in the hardware.
1865 		 * software is sending block size
1866 		 */
1867 		crypt_len = AES_BLOCK_SIZE;
1868 	else
1869 		crypt_len = req->cryptlen;
1870 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1871 					     (op_type ? -authsize : authsize));
1872 	if (reqctx->dst_nents < 0) {
1873 		pr_err("GCM:Invalid Destination sg entries\n");
1874 		goto err;
1875 	}
1876 
1877 
1878 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
1879 	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
1880 		AEAD_H_SIZE;
1881 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
1882 	if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
1883 			    T6_MAX_AAD_SIZE,
1884 			    transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
1885 			    op_type)) {
1886 		return ERR_PTR(chcr_aead_fallback(req, op_type));
1887 	}
1888 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
1889 	if (!skb)
1890 		goto err;
1891 
1892 	/* NIC driver is going to write the sge hdr. */
1893 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
1894 
1895 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
1896 	memset(chcr_req, 0, transhdr_len);
1897 
1898 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
1899 		req->assoclen -= 8;
1900 
1901 	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
1902 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
1903 					ctx->dev->rx_channel_id, 2, (ivsize ?
1904 					(req->assoclen + 1) : 0));
1905 	chcr_req->sec_cpl.pldlen =
1906 		htonl(req->assoclen + ivsize + req->cryptlen);
1907 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1908 					req->assoclen ? 1 : 0, req->assoclen,
1909 					req->assoclen + ivsize + 1, 0);
1910 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
1911 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
1912 						tag_offset, tag_offset);
1913 		chcr_req->sec_cpl.seqno_numivs =
1914 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
1915 					CHCR_ENCRYPT_OP) ? 1 : 0,
1916 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
1917 					CHCR_SCMD_AUTH_MODE_GHASH,
1918 					aeadctx->hmac_ctrl, ivsize >> 1);
1919 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
1920 					0, 1, dst_size);
1921 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
1922 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
1923 	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
1924 				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
1925 
1926 	/* prepare a 16 byte iv */
1927 	/* S   A   L  T |  IV | 0x00000001 */
1928 	if (get_aead_subtype(tfm) ==
1929 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
1930 		memcpy(reqctx->iv, aeadctx->salt, 4);
1931 		memcpy(reqctx->iv + 4, req->iv, 8);
1932 	} else {
1933 		memcpy(reqctx->iv, req->iv, 12);
1934 	}
1935 	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
1936 
1937 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
1938 	sg_param.nents = reqctx->dst_nents;
1939 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1940 	sg_param.qid = qid;
1941 	sg_param.align = 0;
1942 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1943 				  &sg_param))
1944 		goto dstmap_fail;
1945 
1946 	skb_set_transport_header(skb, transhdr_len);
1947 
1948 	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
1949 
1950 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
1951 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
1952 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
1953 			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
1954 	reqctx->skb = skb;
1955 	skb_get(skb);
1956 	return skb;
1957 
1958 dstmap_fail:
1959 	/* ivmap_fail: */
1960 	kfree_skb(skb);
1961 	skb = NULL;
1962 err:
1963 	return skb;
1964 }
1965 
1966 
1967 
1968 static int chcr_aead_cra_init(struct crypto_aead *tfm)
1969 {
1970 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1971 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1972 	struct aead_alg *alg = crypto_aead_alg(tfm);
1973 
1974 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
1975 					       CRYPTO_ALG_NEED_FALLBACK);
1976 	if  (IS_ERR(aeadctx->sw_cipher))
1977 		return PTR_ERR(aeadctx->sw_cipher);
1978 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
1979 				 sizeof(struct aead_request) +
1980 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
1981 	aeadctx->null = crypto_get_default_null_skcipher();
1982 	if (IS_ERR(aeadctx->null))
1983 		return PTR_ERR(aeadctx->null);
1984 	return chcr_device_init(ctx);
1985 }
1986 
1987 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
1988 {
1989 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
1990 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
1991 
1992 	crypto_put_default_null_skcipher();
1993 	crypto_free_aead(aeadctx->sw_cipher);
1994 }
1995 
1996 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
1997 					unsigned int authsize)
1998 {
1999 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2000 
2001 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2002 	aeadctx->mayverify = VERIFY_HW;
2003 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2004 }
2005 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2006 				    unsigned int authsize)
2007 {
2008 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2009 	u32 maxauth = crypto_aead_maxauthsize(tfm);
2010 
2011 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2012 	 * true for sha1. authsize == 12 condition should be before
2013 	 * authsize == (maxauth >> 1)
2014 	 */
2015 	if (authsize == ICV_4) {
2016 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2017 		aeadctx->mayverify = VERIFY_HW;
2018 	} else if (authsize == ICV_6) {
2019 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2020 		aeadctx->mayverify = VERIFY_HW;
2021 	} else if (authsize == ICV_10) {
2022 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2023 		aeadctx->mayverify = VERIFY_HW;
2024 	} else if (authsize == ICV_12) {
2025 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2026 		aeadctx->mayverify = VERIFY_HW;
2027 	} else if (authsize == ICV_14) {
2028 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2029 		aeadctx->mayverify = VERIFY_HW;
2030 	} else if (authsize == (maxauth >> 1)) {
2031 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2032 		aeadctx->mayverify = VERIFY_HW;
2033 	} else if (authsize == maxauth) {
2034 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2035 		aeadctx->mayverify = VERIFY_HW;
2036 	} else {
2037 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2038 		aeadctx->mayverify = VERIFY_SW;
2039 	}
2040 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2041 }
2042 
2043 
2044 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2045 {
2046 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2047 
2048 	switch (authsize) {
2049 	case ICV_4:
2050 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2051 		aeadctx->mayverify = VERIFY_HW;
2052 		break;
2053 	case ICV_8:
2054 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2055 		aeadctx->mayverify = VERIFY_HW;
2056 		break;
2057 	case ICV_12:
2058 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2059 		 aeadctx->mayverify = VERIFY_HW;
2060 		break;
2061 	case ICV_14:
2062 		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2063 		 aeadctx->mayverify = VERIFY_HW;
2064 		break;
2065 	case ICV_16:
2066 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2067 		aeadctx->mayverify = VERIFY_HW;
2068 		break;
2069 	case ICV_13:
2070 	case ICV_15:
2071 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2072 		aeadctx->mayverify = VERIFY_SW;
2073 		break;
2074 	default:
2075 
2076 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2077 			CRYPTO_TFM_RES_BAD_KEY_LEN);
2078 		return -EINVAL;
2079 	}
2080 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2081 }
2082 
2083 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2084 					  unsigned int authsize)
2085 {
2086 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2087 
2088 	switch (authsize) {
2089 	case ICV_8:
2090 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2091 		aeadctx->mayverify = VERIFY_HW;
2092 		break;
2093 	case ICV_12:
2094 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2095 		aeadctx->mayverify = VERIFY_HW;
2096 		break;
2097 	case ICV_16:
2098 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2099 		aeadctx->mayverify = VERIFY_HW;
2100 		break;
2101 	default:
2102 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2103 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2104 		return -EINVAL;
2105 	}
2106 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2107 }
2108 
2109 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
2110 				unsigned int authsize)
2111 {
2112 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2113 
2114 	switch (authsize) {
2115 	case ICV_4:
2116 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2117 		aeadctx->mayverify = VERIFY_HW;
2118 		break;
2119 	case ICV_6:
2120 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2121 		aeadctx->mayverify = VERIFY_HW;
2122 		break;
2123 	case ICV_8:
2124 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2125 		aeadctx->mayverify = VERIFY_HW;
2126 		break;
2127 	case ICV_10:
2128 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2129 		aeadctx->mayverify = VERIFY_HW;
2130 		break;
2131 	case ICV_12:
2132 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2133 		aeadctx->mayverify = VERIFY_HW;
2134 		break;
2135 	case ICV_14:
2136 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2137 		aeadctx->mayverify = VERIFY_HW;
2138 		break;
2139 	case ICV_16:
2140 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2141 		aeadctx->mayverify = VERIFY_HW;
2142 		break;
2143 	default:
2144 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
2145 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2146 		return -EINVAL;
2147 	}
2148 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2149 }
2150 
2151 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
2152 				const u8 *key,
2153 				unsigned int keylen)
2154 {
2155 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2156 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2157 	unsigned char ck_size, mk_size;
2158 	int key_ctx_size = 0;
2159 
2160 	key_ctx_size = sizeof(struct _key_ctx) +
2161 		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
2162 	if (keylen == AES_KEYSIZE_128) {
2163 		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2164 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2165 	} else if (keylen == AES_KEYSIZE_192) {
2166 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2167 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2168 	} else if (keylen == AES_KEYSIZE_256) {
2169 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2170 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2171 	} else {
2172 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2173 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2174 		aeadctx->enckey_len = 0;
2175 		return	-EINVAL;
2176 	}
2177 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
2178 						key_ctx_size >> 4);
2179 	memcpy(aeadctx->key, key, keylen);
2180 	aeadctx->enckey_len = keylen;
2181 
2182 	return 0;
2183 }
2184 
2185 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
2186 				const u8 *key,
2187 				unsigned int keylen)
2188 {
2189 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2190 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2191 	int error;
2192 
2193 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2194 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
2195 			      CRYPTO_TFM_REQ_MASK);
2196 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2197 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2198 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2199 			      CRYPTO_TFM_RES_MASK);
2200 	if (error)
2201 		return error;
2202 	return chcr_ccm_common_setkey(aead, key, keylen);
2203 }
2204 
2205 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
2206 				    unsigned int keylen)
2207 {
2208 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2209 	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2210 
2211 	if (keylen < 3) {
2212 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2213 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2214 		aeadctx->enckey_len = 0;
2215 		return	-EINVAL;
2216 	}
2217 	keylen -= 3;
2218 	memcpy(aeadctx->salt, key + keylen, 3);
2219 	return chcr_ccm_common_setkey(aead, key, keylen);
2220 }
2221 
2222 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2223 			   unsigned int keylen)
2224 {
2225 	struct chcr_context *ctx = crypto_aead_ctx(aead);
2226 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2227 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
2228 	struct crypto_cipher *cipher;
2229 	unsigned int ck_size;
2230 	int ret = 0, key_ctx_size = 0;
2231 
2232 	aeadctx->enckey_len = 0;
2233 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2234 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
2235 			      & CRYPTO_TFM_REQ_MASK);
2236 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2237 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
2238 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
2239 			      CRYPTO_TFM_RES_MASK);
2240 	if (ret)
2241 		goto out;
2242 
2243 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2244 	    keylen > 3) {
2245 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
2246 		memcpy(aeadctx->salt, key + keylen, 4);
2247 	}
2248 	if (keylen == AES_KEYSIZE_128) {
2249 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2250 	} else if (keylen == AES_KEYSIZE_192) {
2251 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2252 	} else if (keylen == AES_KEYSIZE_256) {
2253 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2254 	} else {
2255 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
2256 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
2257 		pr_err("GCM: Invalid key length %d\n", keylen);
2258 		ret = -EINVAL;
2259 		goto out;
2260 	}
2261 
2262 	memcpy(aeadctx->key, key, keylen);
2263 	aeadctx->enckey_len = keylen;
2264 	key_ctx_size = sizeof(struct _key_ctx) +
2265 		((DIV_ROUND_UP(keylen, 16)) << 4) +
2266 		AEAD_H_SIZE;
2267 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
2268 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
2269 						0, 0,
2270 						key_ctx_size >> 4);
2271 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
2272 	 * It will go in key context
2273 	 */
2274 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
2275 	if (IS_ERR(cipher)) {
2276 		aeadctx->enckey_len = 0;
2277 		ret = -ENOMEM;
2278 		goto out;
2279 	}
2280 
2281 	ret = crypto_cipher_setkey(cipher, key, keylen);
2282 	if (ret) {
2283 		aeadctx->enckey_len = 0;
2284 		goto out1;
2285 	}
2286 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
2287 	crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
2288 
2289 out1:
2290 	crypto_free_cipher(cipher);
2291 out:
2292 	return ret;
2293 }
2294 
2295 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
2296 				   unsigned int keylen)
2297 {
2298 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2299 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2300 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2301 	/* it contains auth and cipher key both*/
2302 	struct crypto_authenc_keys keys;
2303 	unsigned int bs;
2304 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
2305 	int err = 0, i, key_ctx_len = 0;
2306 	unsigned char ck_size = 0;
2307 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
2308 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
2309 	struct algo_param param;
2310 	int align;
2311 	u8 *o_ptr = NULL;
2312 
2313 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2314 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2315 			      & CRYPTO_TFM_REQ_MASK);
2316 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2317 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2318 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2319 			      & CRYPTO_TFM_RES_MASK);
2320 	if (err)
2321 		goto out;
2322 
2323 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2324 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2325 		goto out;
2326 	}
2327 
2328 	if (get_alg_config(&param, max_authsize)) {
2329 		pr_err("chcr : Unsupported digest size\n");
2330 		goto out;
2331 	}
2332 	if (keys.enckeylen == AES_KEYSIZE_128) {
2333 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2334 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2335 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2336 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2337 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2338 	} else {
2339 		pr_err("chcr : Unsupported cipher key\n");
2340 		goto out;
2341 	}
2342 
2343 	/* Copy only encryption key. We use authkey to generate h(ipad) and
2344 	 * h(opad) so authkey is not needed again. authkeylen size have the
2345 	 * size of the hash digest size.
2346 	 */
2347 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2348 	aeadctx->enckey_len = keys.enckeylen;
2349 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2350 			    aeadctx->enckey_len << 3);
2351 
2352 	base_hash  = chcr_alloc_shash(max_authsize);
2353 	if (IS_ERR(base_hash)) {
2354 		pr_err("chcr : Base driver cannot be loaded\n");
2355 		aeadctx->enckey_len = 0;
2356 		return -EINVAL;
2357 	}
2358 	{
2359 		SHASH_DESC_ON_STACK(shash, base_hash);
2360 		shash->tfm = base_hash;
2361 		shash->flags = crypto_shash_get_flags(base_hash);
2362 		bs = crypto_shash_blocksize(base_hash);
2363 		align = KEYCTX_ALIGN_PAD(max_authsize);
2364 		o_ptr =  actx->h_iopad + param.result_size + align;
2365 
2366 		if (keys.authkeylen > bs) {
2367 			err = crypto_shash_digest(shash, keys.authkey,
2368 						  keys.authkeylen,
2369 						  o_ptr);
2370 			if (err) {
2371 				pr_err("chcr : Base driver cannot be loaded\n");
2372 				goto out;
2373 			}
2374 			keys.authkeylen = max_authsize;
2375 		} else
2376 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
2377 
2378 		/* Compute the ipad-digest*/
2379 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2380 		memcpy(pad, o_ptr, keys.authkeylen);
2381 		for (i = 0; i < bs >> 2; i++)
2382 			*((unsigned int *)pad + i) ^= IPAD_DATA;
2383 
2384 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
2385 					      max_authsize))
2386 			goto out;
2387 		/* Compute the opad-digest */
2388 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
2389 		memcpy(pad, o_ptr, keys.authkeylen);
2390 		for (i = 0; i < bs >> 2; i++)
2391 			*((unsigned int *)pad + i) ^= OPAD_DATA;
2392 
2393 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
2394 			goto out;
2395 
2396 		/* convert the ipad and opad digest to network order */
2397 		chcr_change_order(actx->h_iopad, param.result_size);
2398 		chcr_change_order(o_ptr, param.result_size);
2399 		key_ctx_len = sizeof(struct _key_ctx) +
2400 			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
2401 			(param.result_size + align) * 2;
2402 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
2403 						0, 1, key_ctx_len >> 4);
2404 		actx->auth_mode = param.auth_mode;
2405 		chcr_free_shash(base_hash);
2406 
2407 		return 0;
2408 	}
2409 out:
2410 	aeadctx->enckey_len = 0;
2411 	if (!IS_ERR(base_hash))
2412 		chcr_free_shash(base_hash);
2413 	return -EINVAL;
2414 }
2415 
2416 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
2417 					const u8 *key, unsigned int keylen)
2418 {
2419 	struct chcr_context *ctx = crypto_aead_ctx(authenc);
2420 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2421 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2422 	struct crypto_authenc_keys keys;
2423 	int err;
2424 	/* it contains auth and cipher key both*/
2425 	int key_ctx_len = 0;
2426 	unsigned char ck_size = 0;
2427 
2428 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
2429 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
2430 			      & CRYPTO_TFM_REQ_MASK);
2431 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
2432 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
2433 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
2434 			      & CRYPTO_TFM_RES_MASK);
2435 	if (err)
2436 		goto out;
2437 
2438 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
2439 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
2440 		goto out;
2441 	}
2442 	if (keys.enckeylen == AES_KEYSIZE_128) {
2443 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2444 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
2445 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2446 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
2447 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2448 	} else {
2449 		pr_err("chcr : Unsupported cipher key\n");
2450 		goto out;
2451 	}
2452 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
2453 	aeadctx->enckey_len = keys.enckeylen;
2454 	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
2455 				    aeadctx->enckey_len << 3);
2456 	key_ctx_len =  sizeof(struct _key_ctx)
2457 		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
2458 
2459 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
2460 						0, key_ctx_len >> 4);
2461 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
2462 	return 0;
2463 out:
2464 	aeadctx->enckey_len = 0;
2465 	return -EINVAL;
2466 }
2467 static int chcr_aead_encrypt(struct aead_request *req)
2468 {
2469 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2470 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2471 
2472 	reqctx->verify = VERIFY_HW;
2473 
2474 	switch (get_aead_subtype(tfm)) {
2475 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2476 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2477 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2478 				    create_authenc_wr);
2479 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2480 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2481 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2482 				    create_aead_ccm_wr);
2483 	default:
2484 		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
2485 				    create_gcm_wr);
2486 	}
2487 }
2488 
2489 static int chcr_aead_decrypt(struct aead_request *req)
2490 {
2491 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2492 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
2493 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2494 	int size;
2495 
2496 	if (aeadctx->mayverify == VERIFY_SW) {
2497 		size = crypto_aead_maxauthsize(tfm);
2498 		reqctx->verify = VERIFY_SW;
2499 	} else {
2500 		size = 0;
2501 		reqctx->verify = VERIFY_HW;
2502 	}
2503 
2504 	switch (get_aead_subtype(tfm)) {
2505 	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
2506 	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
2507 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2508 				    create_authenc_wr);
2509 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
2510 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
2511 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2512 				    create_aead_ccm_wr);
2513 	default:
2514 		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
2515 				    create_gcm_wr);
2516 	}
2517 }
2518 
2519 static int chcr_aead_op(struct aead_request *req,
2520 			  unsigned short op_type,
2521 			  int size,
2522 			  create_wr_t create_wr_fn)
2523 {
2524 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2525 	struct chcr_context *ctx = crypto_aead_ctx(tfm);
2526 	struct uld_ctx *u_ctx;
2527 	struct sk_buff *skb;
2528 
2529 	if (!ctx->dev) {
2530 		pr_err("chcr : %s : No crypto device.\n", __func__);
2531 		return -ENXIO;
2532 	}
2533 	u_ctx = ULD_CTX(ctx);
2534 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
2535 				   ctx->tx_qidx)) {
2536 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
2537 			return -EBUSY;
2538 	}
2539 
2540 	/* Form a WR from req */
2541 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
2542 			   op_type);
2543 
2544 	if (IS_ERR(skb) || !skb)
2545 		return PTR_ERR(skb);
2546 
2547 	skb->dev = u_ctx->lldi.ports[0];
2548 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
2549 	chcr_send_wr(skb);
2550 	return -EINPROGRESS;
2551 }
2552 static struct chcr_alg_template driver_algs[] = {
2553 	/* AES-CBC */
2554 	{
2555 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2556 		.is_registered = 0,
2557 		.alg.crypto = {
2558 			.cra_name		= "cbc(aes)",
2559 			.cra_driver_name	= "cbc-aes-chcr",
2560 			.cra_priority		= CHCR_CRA_PRIORITY,
2561 			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
2562 				CRYPTO_ALG_ASYNC,
2563 			.cra_blocksize		= AES_BLOCK_SIZE,
2564 			.cra_ctxsize		= sizeof(struct chcr_context)
2565 				+ sizeof(struct ablk_ctx),
2566 			.cra_alignmask		= 0,
2567 			.cra_type		= &crypto_ablkcipher_type,
2568 			.cra_module		= THIS_MODULE,
2569 			.cra_init		= chcr_cra_init,
2570 			.cra_exit		= NULL,
2571 			.cra_u.ablkcipher	= {
2572 				.min_keysize	= AES_MIN_KEY_SIZE,
2573 				.max_keysize	= AES_MAX_KEY_SIZE,
2574 				.ivsize		= AES_BLOCK_SIZE,
2575 				.setkey			= chcr_aes_cbc_setkey,
2576 				.encrypt		= chcr_aes_encrypt,
2577 				.decrypt		= chcr_aes_decrypt,
2578 			}
2579 		}
2580 	},
2581 	{
2582 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2583 		.is_registered = 0,
2584 		.alg.crypto =   {
2585 			.cra_name		= "xts(aes)",
2586 			.cra_driver_name	= "xts-aes-chcr",
2587 			.cra_priority		= CHCR_CRA_PRIORITY,
2588 			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
2589 				CRYPTO_ALG_ASYNC,
2590 			.cra_blocksize		= AES_BLOCK_SIZE,
2591 			.cra_ctxsize		= sizeof(struct chcr_context) +
2592 				sizeof(struct ablk_ctx),
2593 			.cra_alignmask		= 0,
2594 			.cra_type		= &crypto_ablkcipher_type,
2595 			.cra_module		= THIS_MODULE,
2596 			.cra_init		= chcr_cra_init,
2597 			.cra_exit		= NULL,
2598 			.cra_u = {
2599 				.ablkcipher = {
2600 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
2601 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
2602 					.ivsize		= AES_BLOCK_SIZE,
2603 					.setkey		= chcr_aes_xts_setkey,
2604 					.encrypt	= chcr_aes_encrypt,
2605 					.decrypt	= chcr_aes_decrypt,
2606 				}
2607 			}
2608 		}
2609 	},
2610 	/* SHA */
2611 	{
2612 		.type = CRYPTO_ALG_TYPE_AHASH,
2613 		.is_registered = 0,
2614 		.alg.hash = {
2615 			.halg.digestsize = SHA1_DIGEST_SIZE,
2616 			.halg.base = {
2617 				.cra_name = "sha1",
2618 				.cra_driver_name = "sha1-chcr",
2619 				.cra_blocksize = SHA1_BLOCK_SIZE,
2620 			}
2621 		}
2622 	},
2623 	{
2624 		.type = CRYPTO_ALG_TYPE_AHASH,
2625 		.is_registered = 0,
2626 		.alg.hash = {
2627 			.halg.digestsize = SHA256_DIGEST_SIZE,
2628 			.halg.base = {
2629 				.cra_name = "sha256",
2630 				.cra_driver_name = "sha256-chcr",
2631 				.cra_blocksize = SHA256_BLOCK_SIZE,
2632 			}
2633 		}
2634 	},
2635 	{
2636 		.type = CRYPTO_ALG_TYPE_AHASH,
2637 		.is_registered = 0,
2638 		.alg.hash = {
2639 			.halg.digestsize = SHA224_DIGEST_SIZE,
2640 			.halg.base = {
2641 				.cra_name = "sha224",
2642 				.cra_driver_name = "sha224-chcr",
2643 				.cra_blocksize = SHA224_BLOCK_SIZE,
2644 			}
2645 		}
2646 	},
2647 	{
2648 		.type = CRYPTO_ALG_TYPE_AHASH,
2649 		.is_registered = 0,
2650 		.alg.hash = {
2651 			.halg.digestsize = SHA384_DIGEST_SIZE,
2652 			.halg.base = {
2653 				.cra_name = "sha384",
2654 				.cra_driver_name = "sha384-chcr",
2655 				.cra_blocksize = SHA384_BLOCK_SIZE,
2656 			}
2657 		}
2658 	},
2659 	{
2660 		.type = CRYPTO_ALG_TYPE_AHASH,
2661 		.is_registered = 0,
2662 		.alg.hash = {
2663 			.halg.digestsize = SHA512_DIGEST_SIZE,
2664 			.halg.base = {
2665 				.cra_name = "sha512",
2666 				.cra_driver_name = "sha512-chcr",
2667 				.cra_blocksize = SHA512_BLOCK_SIZE,
2668 			}
2669 		}
2670 	},
2671 	/* HMAC */
2672 	{
2673 		.type = CRYPTO_ALG_TYPE_HMAC,
2674 		.is_registered = 0,
2675 		.alg.hash = {
2676 			.halg.digestsize = SHA1_DIGEST_SIZE,
2677 			.halg.base = {
2678 				.cra_name = "hmac(sha1)",
2679 				.cra_driver_name = "hmac-sha1-chcr",
2680 				.cra_blocksize = SHA1_BLOCK_SIZE,
2681 			}
2682 		}
2683 	},
2684 	{
2685 		.type = CRYPTO_ALG_TYPE_HMAC,
2686 		.is_registered = 0,
2687 		.alg.hash = {
2688 			.halg.digestsize = SHA224_DIGEST_SIZE,
2689 			.halg.base = {
2690 				.cra_name = "hmac(sha224)",
2691 				.cra_driver_name = "hmac-sha224-chcr",
2692 				.cra_blocksize = SHA224_BLOCK_SIZE,
2693 			}
2694 		}
2695 	},
2696 	{
2697 		.type = CRYPTO_ALG_TYPE_HMAC,
2698 		.is_registered = 0,
2699 		.alg.hash = {
2700 			.halg.digestsize = SHA256_DIGEST_SIZE,
2701 			.halg.base = {
2702 				.cra_name = "hmac(sha256)",
2703 				.cra_driver_name = "hmac-sha256-chcr",
2704 				.cra_blocksize = SHA256_BLOCK_SIZE,
2705 			}
2706 		}
2707 	},
2708 	{
2709 		.type = CRYPTO_ALG_TYPE_HMAC,
2710 		.is_registered = 0,
2711 		.alg.hash = {
2712 			.halg.digestsize = SHA384_DIGEST_SIZE,
2713 			.halg.base = {
2714 				.cra_name = "hmac(sha384)",
2715 				.cra_driver_name = "hmac-sha384-chcr",
2716 				.cra_blocksize = SHA384_BLOCK_SIZE,
2717 			}
2718 		}
2719 	},
2720 	{
2721 		.type = CRYPTO_ALG_TYPE_HMAC,
2722 		.is_registered = 0,
2723 		.alg.hash = {
2724 			.halg.digestsize = SHA512_DIGEST_SIZE,
2725 			.halg.base = {
2726 				.cra_name = "hmac(sha512)",
2727 				.cra_driver_name = "hmac-sha512-chcr",
2728 				.cra_blocksize = SHA512_BLOCK_SIZE,
2729 			}
2730 		}
2731 	},
2732 	/* Add AEAD Algorithms */
2733 	{
2734 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
2735 		.is_registered = 0,
2736 		.alg.aead = {
2737 			.base = {
2738 				.cra_name = "gcm(aes)",
2739 				.cra_driver_name = "gcm-aes-chcr",
2740 				.cra_blocksize	= 1,
2741 				.cra_priority = CHCR_AEAD_PRIORITY,
2742 				.cra_ctxsize =	sizeof(struct chcr_context) +
2743 						sizeof(struct chcr_aead_ctx) +
2744 						sizeof(struct chcr_gcm_ctx),
2745 			},
2746 			.ivsize = 12,
2747 			.maxauthsize = GHASH_DIGEST_SIZE,
2748 			.setkey = chcr_gcm_setkey,
2749 			.setauthsize = chcr_gcm_setauthsize,
2750 		}
2751 	},
2752 	{
2753 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
2754 		.is_registered = 0,
2755 		.alg.aead = {
2756 			.base = {
2757 				.cra_name = "rfc4106(gcm(aes))",
2758 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
2759 				.cra_blocksize	 = 1,
2760 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
2761 				.cra_ctxsize =	sizeof(struct chcr_context) +
2762 						sizeof(struct chcr_aead_ctx) +
2763 						sizeof(struct chcr_gcm_ctx),
2764 
2765 			},
2766 			.ivsize = 8,
2767 			.maxauthsize	= GHASH_DIGEST_SIZE,
2768 			.setkey = chcr_gcm_setkey,
2769 			.setauthsize	= chcr_4106_4309_setauthsize,
2770 		}
2771 	},
2772 	{
2773 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
2774 		.is_registered = 0,
2775 		.alg.aead = {
2776 			.base = {
2777 				.cra_name = "ccm(aes)",
2778 				.cra_driver_name = "ccm-aes-chcr",
2779 				.cra_blocksize	 = 1,
2780 				.cra_priority = CHCR_AEAD_PRIORITY,
2781 				.cra_ctxsize =	sizeof(struct chcr_context) +
2782 						sizeof(struct chcr_aead_ctx),
2783 
2784 			},
2785 			.ivsize = AES_BLOCK_SIZE,
2786 			.maxauthsize	= GHASH_DIGEST_SIZE,
2787 			.setkey = chcr_aead_ccm_setkey,
2788 			.setauthsize	= chcr_ccm_setauthsize,
2789 		}
2790 	},
2791 	{
2792 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
2793 		.is_registered = 0,
2794 		.alg.aead = {
2795 			.base = {
2796 				.cra_name = "rfc4309(ccm(aes))",
2797 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
2798 				.cra_blocksize	 = 1,
2799 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
2800 				.cra_ctxsize =	sizeof(struct chcr_context) +
2801 						sizeof(struct chcr_aead_ctx),
2802 
2803 			},
2804 			.ivsize = 8,
2805 			.maxauthsize	= GHASH_DIGEST_SIZE,
2806 			.setkey = chcr_aead_rfc4309_setkey,
2807 			.setauthsize = chcr_4106_4309_setauthsize,
2808 		}
2809 	},
2810 	{
2811 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2812 		.is_registered = 0,
2813 		.alg.aead = {
2814 			.base = {
2815 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2816 				.cra_driver_name =
2817 					"authenc-hmac-sha1-cbc-aes-chcr",
2818 				.cra_blocksize	 = AES_BLOCK_SIZE,
2819 				.cra_priority = CHCR_AEAD_PRIORITY,
2820 				.cra_ctxsize =	sizeof(struct chcr_context) +
2821 						sizeof(struct chcr_aead_ctx) +
2822 						sizeof(struct chcr_authenc_ctx),
2823 
2824 			},
2825 			.ivsize = AES_BLOCK_SIZE,
2826 			.maxauthsize = SHA1_DIGEST_SIZE,
2827 			.setkey = chcr_authenc_setkey,
2828 			.setauthsize = chcr_authenc_setauthsize,
2829 		}
2830 	},
2831 	{
2832 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2833 		.is_registered = 0,
2834 		.alg.aead = {
2835 			.base = {
2836 
2837 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2838 				.cra_driver_name =
2839 					"authenc-hmac-sha256-cbc-aes-chcr",
2840 				.cra_blocksize	 = AES_BLOCK_SIZE,
2841 				.cra_priority = CHCR_AEAD_PRIORITY,
2842 				.cra_ctxsize =	sizeof(struct chcr_context) +
2843 						sizeof(struct chcr_aead_ctx) +
2844 						sizeof(struct chcr_authenc_ctx),
2845 
2846 			},
2847 			.ivsize = AES_BLOCK_SIZE,
2848 			.maxauthsize	= SHA256_DIGEST_SIZE,
2849 			.setkey = chcr_authenc_setkey,
2850 			.setauthsize = chcr_authenc_setauthsize,
2851 		}
2852 	},
2853 	{
2854 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2855 		.is_registered = 0,
2856 		.alg.aead = {
2857 			.base = {
2858 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2859 				.cra_driver_name =
2860 					"authenc-hmac-sha224-cbc-aes-chcr",
2861 				.cra_blocksize	 = AES_BLOCK_SIZE,
2862 				.cra_priority = CHCR_AEAD_PRIORITY,
2863 				.cra_ctxsize =	sizeof(struct chcr_context) +
2864 						sizeof(struct chcr_aead_ctx) +
2865 						sizeof(struct chcr_authenc_ctx),
2866 			},
2867 			.ivsize = AES_BLOCK_SIZE,
2868 			.maxauthsize = SHA224_DIGEST_SIZE,
2869 			.setkey = chcr_authenc_setkey,
2870 			.setauthsize = chcr_authenc_setauthsize,
2871 		}
2872 	},
2873 	{
2874 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2875 		.is_registered = 0,
2876 		.alg.aead = {
2877 			.base = {
2878 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2879 				.cra_driver_name =
2880 					"authenc-hmac-sha384-cbc-aes-chcr",
2881 				.cra_blocksize	 = AES_BLOCK_SIZE,
2882 				.cra_priority = CHCR_AEAD_PRIORITY,
2883 				.cra_ctxsize =	sizeof(struct chcr_context) +
2884 						sizeof(struct chcr_aead_ctx) +
2885 						sizeof(struct chcr_authenc_ctx),
2886 
2887 			},
2888 			.ivsize = AES_BLOCK_SIZE,
2889 			.maxauthsize = SHA384_DIGEST_SIZE,
2890 			.setkey = chcr_authenc_setkey,
2891 			.setauthsize = chcr_authenc_setauthsize,
2892 		}
2893 	},
2894 	{
2895 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
2896 		.is_registered = 0,
2897 		.alg.aead = {
2898 			.base = {
2899 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2900 				.cra_driver_name =
2901 					"authenc-hmac-sha512-cbc-aes-chcr",
2902 				.cra_blocksize	 = AES_BLOCK_SIZE,
2903 				.cra_priority = CHCR_AEAD_PRIORITY,
2904 				.cra_ctxsize =	sizeof(struct chcr_context) +
2905 						sizeof(struct chcr_aead_ctx) +
2906 						sizeof(struct chcr_authenc_ctx),
2907 
2908 			},
2909 			.ivsize = AES_BLOCK_SIZE,
2910 			.maxauthsize = SHA512_DIGEST_SIZE,
2911 			.setkey = chcr_authenc_setkey,
2912 			.setauthsize = chcr_authenc_setauthsize,
2913 		}
2914 	},
2915 	{
2916 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
2917 		.is_registered = 0,
2918 		.alg.aead = {
2919 			.base = {
2920 				.cra_name = "authenc(digest_null,cbc(aes))",
2921 				.cra_driver_name =
2922 					"authenc-digest_null-cbc-aes-chcr",
2923 				.cra_blocksize	 = AES_BLOCK_SIZE,
2924 				.cra_priority = CHCR_AEAD_PRIORITY,
2925 				.cra_ctxsize =	sizeof(struct chcr_context) +
2926 						sizeof(struct chcr_aead_ctx) +
2927 						sizeof(struct chcr_authenc_ctx),
2928 
2929 			},
2930 			.ivsize  = AES_BLOCK_SIZE,
2931 			.maxauthsize = 0,
2932 			.setkey  = chcr_aead_digest_null_setkey,
2933 			.setauthsize = chcr_authenc_null_setauthsize,
2934 		}
2935 	},
2936 };
2937 
2938 /*
2939  *	chcr_unregister_alg - Deregister crypto algorithms with
2940  *	kernel framework.
2941  */
2942 static int chcr_unregister_alg(void)
2943 {
2944 	int i;
2945 
2946 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2947 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2948 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2949 			if (driver_algs[i].is_registered)
2950 				crypto_unregister_alg(
2951 						&driver_algs[i].alg.crypto);
2952 			break;
2953 		case CRYPTO_ALG_TYPE_AEAD:
2954 			if (driver_algs[i].is_registered)
2955 				crypto_unregister_aead(
2956 						&driver_algs[i].alg.aead);
2957 			break;
2958 		case CRYPTO_ALG_TYPE_AHASH:
2959 			if (driver_algs[i].is_registered)
2960 				crypto_unregister_ahash(
2961 						&driver_algs[i].alg.hash);
2962 			break;
2963 		}
2964 		driver_algs[i].is_registered = 0;
2965 	}
2966 	return 0;
2967 }
2968 
2969 #define SZ_AHASH_CTX sizeof(struct chcr_context)
2970 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
2971 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
2972 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
2973 
2974 /*
2975  *	chcr_register_alg - Register crypto algorithms with kernel framework.
2976  */
2977 static int chcr_register_alg(void)
2978 {
2979 	struct crypto_alg ai;
2980 	struct ahash_alg *a_hash;
2981 	int err = 0, i;
2982 	char *name = NULL;
2983 
2984 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2985 		if (driver_algs[i].is_registered)
2986 			continue;
2987 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
2988 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2989 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
2990 			name = driver_algs[i].alg.crypto.cra_driver_name;
2991 			break;
2992 		case CRYPTO_ALG_TYPE_AEAD:
2993 			driver_algs[i].alg.aead.base.cra_flags =
2994 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
2995 				CRYPTO_ALG_NEED_FALLBACK;
2996 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
2997 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
2998 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
2999 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
3000 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
3001 			err = crypto_register_aead(&driver_algs[i].alg.aead);
3002 			name = driver_algs[i].alg.aead.base.cra_driver_name;
3003 			break;
3004 		case CRYPTO_ALG_TYPE_AHASH:
3005 			a_hash = &driver_algs[i].alg.hash;
3006 			a_hash->update = chcr_ahash_update;
3007 			a_hash->final = chcr_ahash_final;
3008 			a_hash->finup = chcr_ahash_finup;
3009 			a_hash->digest = chcr_ahash_digest;
3010 			a_hash->export = chcr_ahash_export;
3011 			a_hash->import = chcr_ahash_import;
3012 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3013 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3014 			a_hash->halg.base.cra_module = THIS_MODULE;
3015 			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3016 			a_hash->halg.base.cra_alignmask = 0;
3017 			a_hash->halg.base.cra_exit = NULL;
3018 			a_hash->halg.base.cra_type = &crypto_ahash_type;
3019 
3020 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3021 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3022 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3023 				a_hash->init = chcr_hmac_init;
3024 				a_hash->setkey = chcr_ahash_setkey;
3025 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3026 			} else {
3027 				a_hash->init = chcr_sha_init;
3028 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3029 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
3030 			}
3031 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
3032 			ai = driver_algs[i].alg.hash.halg.base;
3033 			name = ai.cra_driver_name;
3034 			break;
3035 		}
3036 		if (err) {
3037 			pr_err("chcr : %s : Algorithm registration failed\n",
3038 			       name);
3039 			goto register_err;
3040 		} else {
3041 			driver_algs[i].is_registered = 1;
3042 		}
3043 	}
3044 	return 0;
3045 
3046 register_err:
3047 	chcr_unregister_alg();
3048 	return err;
3049 }
3050 
3051 /*
3052  *	start_crypto - Register the crypto algorithms.
3053  *	This should called once when the first device comesup. After this
3054  *	kernel will start calling driver APIs for crypto operations.
3055  */
3056 int start_crypto(void)
3057 {
3058 	return chcr_register_alg();
3059 }
3060 
3061 /*
3062  *	stop_crypto - Deregister all the crypto algorithms with kernel.
3063  *	This should be called once when the last device goes down. After this
3064  *	kernel will not call the driver API for crypto operations.
3065  */
3066 int stop_crypto(void)
3067 {
3068 	chcr_unregister_alg();
3069 	return 0;
3070 }
3071