xref: /openbmc/linux/drivers/crypto/chelsio/chcr_algo.c (revision c51d39010a1bccc9c1294e2d7c00005aefeb2b5c)
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/internal/hash.h>
58 
59 #include "t4fw_api.h"
60 #include "t4_msg.h"
61 #include "chcr_core.h"
62 #include "chcr_algo.h"
63 #include "chcr_crypto.h"
64 
65 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
66 {
67 	return ctx->crypto_ctx->ablkctx;
68 }
69 
70 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
71 {
72 	return ctx->crypto_ctx->hmacctx;
73 }
74 
75 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
76 {
77 	return ctx->dev->u_ctx;
78 }
79 
80 static inline int is_ofld_imm(const struct sk_buff *skb)
81 {
82 	return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
83 }
84 
85 /*
86  *	sgl_len - calculates the size of an SGL of the given capacity
87  *	@n: the number of SGL entries
88  *	Calculates the number of flits needed for a scatter/gather list that
89  *	can hold the given number of entries.
90  */
91 static inline unsigned int sgl_len(unsigned int n)
92 {
93 	n--;
94 	return (3 * n) / 2 + (n & 1) + 2;
95 }
96 
97 /*
98  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
99  *	@req: crypto request
100  */
101 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
102 		     int error_status)
103 {
104 	struct crypto_tfm *tfm = req->tfm;
105 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
106 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
107 	struct chcr_req_ctx ctx_req;
108 	struct cpl_fw6_pld *fw6_pld;
109 	unsigned int digestsize, updated_digestsize;
110 
111 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
112 	case CRYPTO_ALG_TYPE_BLKCIPHER:
113 		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
114 		ctx_req.ctx.ablk_ctx =
115 			ablkcipher_request_ctx(ctx_req.req.ablk_req);
116 		if (!error_status) {
117 			fw6_pld = (struct cpl_fw6_pld *)input;
118 			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
119 			       AES_BLOCK_SIZE);
120 		}
121 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
122 			     ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
123 		if (ctx_req.ctx.ablk_ctx->skb) {
124 			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
125 			ctx_req.ctx.ablk_ctx->skb = NULL;
126 		}
127 		break;
128 
129 	case CRYPTO_ALG_TYPE_AHASH:
130 		ctx_req.req.ahash_req = (struct ahash_request *)req;
131 		ctx_req.ctx.ahash_ctx =
132 			ahash_request_ctx(ctx_req.req.ahash_req);
133 		digestsize =
134 			crypto_ahash_digestsize(crypto_ahash_reqtfm(
135 							ctx_req.req.ahash_req));
136 		updated_digestsize = digestsize;
137 		if (digestsize == SHA224_DIGEST_SIZE)
138 			updated_digestsize = SHA256_DIGEST_SIZE;
139 		else if (digestsize == SHA384_DIGEST_SIZE)
140 			updated_digestsize = SHA512_DIGEST_SIZE;
141 		if (ctx_req.ctx.ahash_ctx->skb)
142 			ctx_req.ctx.ahash_ctx->skb = NULL;
143 		if (ctx_req.ctx.ahash_ctx->result == 1) {
144 			ctx_req.ctx.ahash_ctx->result = 0;
145 			memcpy(ctx_req.req.ahash_req->result, input +
146 			       sizeof(struct cpl_fw6_pld),
147 			       digestsize);
148 		} else {
149 			memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
150 			       sizeof(struct cpl_fw6_pld),
151 			       updated_digestsize);
152 		}
153 		kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
154 		ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
155 		break;
156 	}
157 	return 0;
158 }
159 
160 /*
161  *	calc_tx_flits_ofld - calculate # of flits for an offload packet
162  *	@skb: the packet
163  *	Returns the number of flits needed for the given offload packet.
164  *	These packets are already fully constructed and no additional headers
165  *	will be added.
166  */
167 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
168 {
169 	unsigned int flits, cnt;
170 
171 	if (is_ofld_imm(skb))
172 		return DIV_ROUND_UP(skb->len, 8);
173 
174 	flits = skb_transport_offset(skb) / 8;   /* headers */
175 	cnt = skb_shinfo(skb)->nr_frags;
176 	if (skb_tail_pointer(skb) != skb_transport_header(skb))
177 		cnt++;
178 	return flits + sgl_len(cnt);
179 }
180 
181 static struct shash_desc *chcr_alloc_shash(unsigned int ds)
182 {
183 	struct crypto_shash *base_hash = NULL;
184 	struct shash_desc *desc;
185 
186 	switch (ds) {
187 	case SHA1_DIGEST_SIZE:
188 		base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
189 		break;
190 	case SHA224_DIGEST_SIZE:
191 		base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
192 		break;
193 	case SHA256_DIGEST_SIZE:
194 		base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
195 		break;
196 	case SHA384_DIGEST_SIZE:
197 		base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
198 		break;
199 	case SHA512_DIGEST_SIZE:
200 		base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
201 		break;
202 	}
203 	if (IS_ERR(base_hash)) {
204 		pr_err("Can not allocate sha-generic algo.\n");
205 		return (void *)base_hash;
206 	}
207 
208 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
209 		       GFP_KERNEL);
210 	if (!desc)
211 		return ERR_PTR(-ENOMEM);
212 	desc->tfm = base_hash;
213 	desc->flags = crypto_shash_get_flags(base_hash);
214 	return desc;
215 }
216 
217 static int chcr_compute_partial_hash(struct shash_desc *desc,
218 				     char *iopad, char *result_hash,
219 				     int digest_size)
220 {
221 	struct sha1_state sha1_st;
222 	struct sha256_state sha256_st;
223 	struct sha512_state sha512_st;
224 	int error;
225 
226 	if (digest_size == SHA1_DIGEST_SIZE) {
227 		error = crypto_shash_init(desc) ?:
228 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
229 			crypto_shash_export(desc, (void *)&sha1_st);
230 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
231 	} else if (digest_size == SHA224_DIGEST_SIZE) {
232 		error = crypto_shash_init(desc) ?:
233 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
234 			crypto_shash_export(desc, (void *)&sha256_st);
235 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
236 
237 	} else if (digest_size == SHA256_DIGEST_SIZE) {
238 		error = crypto_shash_init(desc) ?:
239 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
240 			crypto_shash_export(desc, (void *)&sha256_st);
241 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
242 
243 	} else if (digest_size == SHA384_DIGEST_SIZE) {
244 		error = crypto_shash_init(desc) ?:
245 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
246 			crypto_shash_export(desc, (void *)&sha512_st);
247 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
248 
249 	} else if (digest_size == SHA512_DIGEST_SIZE) {
250 		error = crypto_shash_init(desc) ?:
251 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
252 			crypto_shash_export(desc, (void *)&sha512_st);
253 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
254 	} else {
255 		error = -EINVAL;
256 		pr_err("Unknown digest size %d\n", digest_size);
257 	}
258 	return error;
259 }
260 
261 static void chcr_change_order(char *buf, int ds)
262 {
263 	int i;
264 
265 	if (ds == SHA512_DIGEST_SIZE) {
266 		for (i = 0; i < (ds / sizeof(u64)); i++)
267 			*((__be64 *)buf + i) =
268 				cpu_to_be64(*((u64 *)buf + i));
269 	} else {
270 		for (i = 0; i < (ds / sizeof(u32)); i++)
271 			*((__be32 *)buf + i) =
272 				cpu_to_be32(*((u32 *)buf + i));
273 	}
274 }
275 
276 static inline int is_hmac(struct crypto_tfm *tfm)
277 {
278 	struct crypto_alg *alg = tfm->__crt_alg;
279 	struct chcr_alg_template *chcr_crypto_alg =
280 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
281 			     alg.hash);
282 	if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
283 	    CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
284 		return 1;
285 	return 0;
286 }
287 
288 static inline unsigned int ch_nents(struct scatterlist *sg,
289 				    unsigned int *total_size)
290 {
291 	unsigned int nents;
292 
293 	for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
294 		nents++;
295 		*total_size += sg->length;
296 	}
297 	return nents;
298 }
299 
300 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
301 			   struct scatterlist *sg,
302 			   struct phys_sge_parm *sg_param)
303 {
304 	struct phys_sge_pairs *to;
305 	unsigned int out_buf_size = sg_param->obsize;
306 	unsigned int nents = sg_param->nents, i, j, tot_len = 0;
307 
308 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
309 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
310 	phys_cpl->pcirlxorder_to_noofsgentr =
311 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
312 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
313 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
314 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
315 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
316 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
317 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
318 	phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
319 	phys_cpl->rss_hdr_int.hash_val = 0;
320 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
321 				       sizeof(struct cpl_rx_phys_dsgl));
322 
323 	for (i = 0; nents; to++) {
324 		for (j = i; (nents && (j < (8 + i))); j++, nents--) {
325 			to->len[j] = htons(sg->length);
326 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
327 			if (out_buf_size) {
328 				if (tot_len + sg_dma_len(sg) >= out_buf_size) {
329 					to->len[j] = htons(out_buf_size -
330 							   tot_len);
331 					return;
332 				}
333 				tot_len += sg_dma_len(sg);
334 			}
335 			sg = sg_next(sg);
336 		}
337 	}
338 }
339 
340 static inline unsigned
341 int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
342 			 struct scatterlist *sg, struct phys_sge_parm *sg_param)
343 {
344 	if (!sg || !sg_param->nents)
345 		return 0;
346 
347 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
348 	if (sg_param->nents == 0) {
349 		pr_err("CHCR : DMA mapping failed\n");
350 		return -EINVAL;
351 	}
352 	write_phys_cpl(phys_cpl, sg, sg_param);
353 	return 0;
354 }
355 
356 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
357 {
358 	struct crypto_alg *alg = tfm->__crt_alg;
359 	struct chcr_alg_template *chcr_crypto_alg =
360 		container_of(alg, struct chcr_alg_template, alg.crypto);
361 
362 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
363 }
364 
365 static inline void
366 write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
367 			struct scatterlist *sg, unsigned int count)
368 {
369 	struct page *spage;
370 	unsigned int page_len;
371 
372 	skb->len += count;
373 	skb->data_len += count;
374 	skb->truesize += count;
375 	while (count > 0) {
376 		if (sg && (!(sg->length)))
377 			break;
378 		spage = sg_page(sg);
379 		get_page(spage);
380 		page_len = min(sg->length, count);
381 		skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
382 		(*frags)++;
383 		count -= page_len;
384 		sg = sg_next(sg);
385 	}
386 }
387 
388 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
389 			       struct _key_ctx *key_ctx)
390 {
391 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
392 		get_aes_decrypt_key(key_ctx->key, ablkctx->key,
393 				    ablkctx->enckey_len << 3);
394 		memset(key_ctx->key + ablkctx->enckey_len, 0,
395 		       CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
396 	} else {
397 		memcpy(key_ctx->key,
398 		       ablkctx->key + (ablkctx->enckey_len >> 1),
399 		       ablkctx->enckey_len >> 1);
400 		get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
401 				    ablkctx->key, ablkctx->enckey_len << 2);
402 	}
403 	return 0;
404 }
405 
406 static inline void create_wreq(struct chcr_context *ctx,
407 			       struct fw_crypto_lookaside_wr *wreq,
408 			       void *req, struct sk_buff *skb,
409 			       int kctx_len, int hash_sz,
410 			       unsigned int phys_dsgl)
411 {
412 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
413 	struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
414 	struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
415 	int iv_loc = IV_DSGL;
416 	int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
417 	unsigned int immdatalen = 0, nr_frags = 0;
418 
419 	if (is_ofld_imm(skb)) {
420 		immdatalen = skb->data_len;
421 		iv_loc = IV_IMMEDIATE;
422 	} else {
423 		nr_frags = skb_shinfo(skb)->nr_frags;
424 	}
425 
426 	wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
427 						     (kctx_len >> 4));
428 	wreq->pld_size_hash_size =
429 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
430 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
431 	wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
432 				    (calc_tx_flits_ofld(skb) * 8), 16)));
433 	wreq->cookie = cpu_to_be64((uintptr_t)req);
434 	wreq->rx_chid_to_rx_q_id =
435 		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
436 				(hash_sz) ? IV_NOP : iv_loc);
437 
438 	ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
439 	ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
440 					 16) - ((sizeof(*wreq)) >> 4)));
441 
442 	sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
443 	sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
444 				  ((hash_sz) ? DUMMY_BYTES :
445 				  (sizeof(struct cpl_rx_phys_dsgl) +
446 				   phys_dsgl)) + immdatalen);
447 }
448 
449 /**
450  *	create_cipher_wr - form the WR for cipher operations
451  *	@req: cipher req.
452  *	@ctx: crypto driver context of the request.
453  *	@qid: ingress qid where response of this WR should be received.
454  *	@op_type:	encryption or decryption
455  */
456 static struct sk_buff
457 *create_cipher_wr(struct crypto_async_request *req_base,
458 		  struct chcr_context *ctx, unsigned short qid,
459 		  unsigned short op_type)
460 {
461 	struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
462 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
463 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
464 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
465 	struct sk_buff *skb = NULL;
466 	struct _key_ctx *key_ctx;
467 	struct fw_crypto_lookaside_wr *wreq;
468 	struct cpl_tx_sec_pdu *sec_cpl;
469 	struct cpl_rx_phys_dsgl *phys_cpl;
470 	struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
471 	struct phys_sge_parm sg_param;
472 	unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
473 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
474 
475 	if (!req->info)
476 		return ERR_PTR(-EINVAL);
477 	ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
478 	ablkctx->enc = op_type;
479 
480 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
481 	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
482 		return ERR_PTR(-EINVAL);
483 
484 	phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
485 
486 	kctx_len = sizeof(*key_ctx) +
487 		(DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
488 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
489 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
490 			GFP_ATOMIC);
491 	if (!skb)
492 		return ERR_PTR(-ENOMEM);
493 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
494 	wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
495 
496 	sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
497 	sec_cpl->op_ivinsrtofst =
498 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
499 
500 	sec_cpl->pldlen = htonl(ivsize + req->nbytes);
501 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
502 								ivsize + 1, 0);
503 
504 	sec_cpl->cipherstop_lo_authinsert =  FILL_SEC_CPL_AUTHINSERT(0, 0,
505 								     0, 0);
506 	sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
507 							 ablkctx->ciph_mode,
508 							 0, 0, ivsize >> 1, 1);
509 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
510 							  0, 1, phys_dsgl);
511 
512 	key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
513 	key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
514 	if (op_type == CHCR_DECRYPT_OP) {
515 		if (generate_copy_rrkey(ablkctx, key_ctx))
516 			goto map_fail1;
517 	} else {
518 		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
519 			memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
520 		} else {
521 			memcpy(key_ctx->key, ablkctx->key +
522 			       (ablkctx->enckey_len >> 1),
523 			       ablkctx->enckey_len >> 1);
524 			memcpy(key_ctx->key +
525 			       (ablkctx->enckey_len >> 1),
526 			       ablkctx->key,
527 			       ablkctx->enckey_len >> 1);
528 		}
529 	}
530 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
531 
532 	memcpy(ablkctx->iv, req->info, ivsize);
533 	sg_init_table(&ablkctx->iv_sg, 1);
534 	sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
535 	sg_param.nents = ablkctx->dst_nents;
536 	sg_param.obsize = dst_bufsize;
537 	sg_param.qid = qid;
538 	sg_param.align = 1;
539 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
540 				 &sg_param))
541 		goto map_fail1;
542 
543 	skb_set_transport_header(skb, transhdr_len);
544 	write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
545 	write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
546 	create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
547 	req_ctx->skb = skb;
548 	skb_get(skb);
549 	return skb;
550 map_fail1:
551 	kfree_skb(skb);
552 	return ERR_PTR(-ENOMEM);
553 }
554 
555 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
556 			       unsigned int keylen)
557 {
558 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
559 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
560 	struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
561 	unsigned int ck_size, context_size;
562 	u16 alignment = 0;
563 
564 	if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
565 		goto badkey_err;
566 
567 	memcpy(ablkctx->key, key, keylen);
568 	ablkctx->enckey_len = keylen;
569 	if (keylen == AES_KEYSIZE_128) {
570 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
571 	} else if (keylen == AES_KEYSIZE_192) {
572 		alignment = 8;
573 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
574 	} else if (keylen == AES_KEYSIZE_256) {
575 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
576 	} else {
577 		goto badkey_err;
578 	}
579 
580 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
581 			keylen + alignment) >> 4;
582 
583 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
584 						0, 0, context_size);
585 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
586 	return 0;
587 badkey_err:
588 	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
589 	ablkctx->enckey_len = 0;
590 	return -EINVAL;
591 }
592 
593 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
594 {
595 	struct adapter *adap = netdev2adap(dev);
596 	struct sge_uld_txq_info *txq_info =
597 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
598 	struct sge_uld_txq *txq;
599 	int ret = 0;
600 
601 	local_bh_disable();
602 	txq = &txq_info->uldtxq[idx];
603 	spin_lock(&txq->sendq.lock);
604 	if (txq->full)
605 		ret = -1;
606 	spin_unlock(&txq->sendq.lock);
607 	local_bh_enable();
608 	return ret;
609 }
610 
611 static int chcr_aes_encrypt(struct ablkcipher_request *req)
612 {
613 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
614 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
615 	struct crypto_async_request *req_base = &req->base;
616 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
617 	struct sk_buff *skb;
618 
619 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
620 					    ctx->tx_channel_id))) {
621 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
622 			return -EBUSY;
623 	}
624 
625 	skb = create_cipher_wr(req_base, ctx,
626 			       u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
627 			       CHCR_ENCRYPT_OP);
628 	if (IS_ERR(skb)) {
629 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
630 		return  PTR_ERR(skb);
631 	}
632 	skb->dev = u_ctx->lldi.ports[0];
633 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
634 	chcr_send_wr(skb);
635 	return -EINPROGRESS;
636 }
637 
638 static int chcr_aes_decrypt(struct ablkcipher_request *req)
639 {
640 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
641 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
642 	struct crypto_async_request *req_base = &req->base;
643 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
644 	struct sk_buff *skb;
645 
646 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
647 					    ctx->tx_channel_id))) {
648 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
649 			return -EBUSY;
650 	}
651 
652 	skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
653 			       CHCR_DECRYPT_OP);
654 	if (IS_ERR(skb)) {
655 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
656 		return PTR_ERR(skb);
657 	}
658 	skb->dev = u_ctx->lldi.ports[0];
659 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
660 	chcr_send_wr(skb);
661 	return -EINPROGRESS;
662 }
663 
664 static int chcr_device_init(struct chcr_context *ctx)
665 {
666 	struct uld_ctx *u_ctx;
667 	unsigned int id;
668 	int err = 0, rxq_perchan, rxq_idx;
669 
670 	id = smp_processor_id();
671 	if (!ctx->dev) {
672 		err = assign_chcr_device(&ctx->dev);
673 		if (err) {
674 			pr_err("chcr device assignment fails\n");
675 			goto out;
676 		}
677 		u_ctx = ULD_CTX(ctx);
678 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
679 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
680 		rxq_idx += id % rxq_perchan;
681 		spin_lock(&ctx->dev->lock_chcr_dev);
682 		ctx->tx_channel_id = rxq_idx;
683 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
684 		spin_unlock(&ctx->dev->lock_chcr_dev);
685 	}
686 out:
687 	return err;
688 }
689 
690 static int chcr_cra_init(struct crypto_tfm *tfm)
691 {
692 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
693 	return chcr_device_init(crypto_tfm_ctx(tfm));
694 }
695 
696 static int get_alg_config(struct algo_param *params,
697 			  unsigned int auth_size)
698 {
699 	switch (auth_size) {
700 	case SHA1_DIGEST_SIZE:
701 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
702 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
703 		params->result_size = SHA1_DIGEST_SIZE;
704 		break;
705 	case SHA224_DIGEST_SIZE:
706 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
707 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
708 		params->result_size = SHA256_DIGEST_SIZE;
709 		break;
710 	case SHA256_DIGEST_SIZE:
711 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
712 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
713 		params->result_size = SHA256_DIGEST_SIZE;
714 		break;
715 	case SHA384_DIGEST_SIZE:
716 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
717 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
718 		params->result_size = SHA512_DIGEST_SIZE;
719 		break;
720 	case SHA512_DIGEST_SIZE:
721 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
722 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
723 		params->result_size = SHA512_DIGEST_SIZE;
724 		break;
725 	default:
726 		pr_err("chcr : ERROR, unsupported digest size\n");
727 		return -EINVAL;
728 	}
729 	return 0;
730 }
731 
732 static inline int
733 write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
734 			    struct sk_buff *skb, unsigned int *frags, char *bfr,
735 			    u8 bfr_len)
736 {
737 	void *page_ptr = NULL;
738 
739 	skb->len += bfr_len;
740 	skb->data_len += bfr_len;
741 	skb->truesize += bfr_len;
742 	page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
743 	if (!page_ptr)
744 		return -ENOMEM;
745 	get_page(virt_to_page(page_ptr));
746 	req_ctx->dummy_payload_ptr = page_ptr;
747 	memcpy(page_ptr, bfr, bfr_len);
748 	skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
749 			   offset_in_page(page_ptr), bfr_len);
750 	(*frags)++;
751 	return 0;
752 }
753 
754 /**
755  *	create_final_hash_wr - Create hash work request
756  *	@req - Cipher req base
757  */
758 static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
759 					    struct hash_wr_param *param)
760 {
761 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
762 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
763 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
764 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
765 	struct sk_buff *skb = NULL;
766 	struct _key_ctx *key_ctx;
767 	struct fw_crypto_lookaside_wr *wreq;
768 	struct cpl_tx_sec_pdu *sec_cpl;
769 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
770 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
771 	unsigned int kctx_len = sizeof(*key_ctx);
772 	u8 hash_size_in_response = 0;
773 
774 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
775 	kctx_len += param->alg_prm.result_size + iopad_alignment;
776 	if (param->opad_needed)
777 		kctx_len += param->alg_prm.result_size + iopad_alignment;
778 
779 	if (req_ctx->result)
780 		hash_size_in_response = digestsize;
781 	else
782 		hash_size_in_response = param->alg_prm.result_size;
783 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
784 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
785 			GFP_ATOMIC);
786 	if (!skb)
787 		return skb;
788 
789 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
790 	wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
791 	memset(wreq, 0, transhdr_len);
792 
793 	sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
794 	sec_cpl->op_ivinsrtofst =
795 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
796 	sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
797 
798 	sec_cpl->aadstart_cipherstop_hi =
799 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
800 	sec_cpl->cipherstop_lo_authinsert =
801 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
802 	sec_cpl->seqno_numivs =
803 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
804 					 param->opad_needed, 0, 0);
805 
806 	sec_cpl->ivgen_hdrlen =
807 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
808 
809 	key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
810 	memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
811 
812 	if (param->opad_needed)
813 		memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
814 				       CHCR_HASH_MAX_DIGEST_SIZE),
815 		       hmacctx->opad, param->alg_prm.result_size);
816 
817 	key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
818 					    param->alg_prm.mk_size, 0,
819 					    param->opad_needed,
820 					    (kctx_len >> 4));
821 	sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
822 
823 	skb_set_transport_header(skb, transhdr_len);
824 	if (param->bfr_len != 0)
825 		write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
826 					    param->bfr_len);
827 	if (param->sg_len != 0)
828 		write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
829 
830 	create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
831 		    0);
832 	req_ctx->skb = skb;
833 	skb_get(skb);
834 	return skb;
835 }
836 
837 static int chcr_ahash_update(struct ahash_request *req)
838 {
839 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
840 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
841 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
842 	struct uld_ctx *u_ctx = NULL;
843 	struct sk_buff *skb;
844 	u8 remainder = 0, bs;
845 	unsigned int nbytes = req->nbytes;
846 	struct hash_wr_param params;
847 
848 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
849 
850 	u_ctx = ULD_CTX(ctx);
851 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
852 					    ctx->tx_channel_id))) {
853 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
854 			return -EBUSY;
855 	}
856 
857 	if (nbytes + req_ctx->bfr_len >= bs) {
858 		remainder = (nbytes + req_ctx->bfr_len) % bs;
859 		nbytes = nbytes + req_ctx->bfr_len - remainder;
860 	} else {
861 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
862 				   req_ctx->bfr_len, nbytes, 0);
863 		req_ctx->bfr_len += nbytes;
864 		return 0;
865 	}
866 
867 	params.opad_needed = 0;
868 	params.more = 1;
869 	params.last = 0;
870 	params.sg_len = nbytes - req_ctx->bfr_len;
871 	params.bfr_len = req_ctx->bfr_len;
872 	params.scmd1 = 0;
873 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
874 	req_ctx->result = 0;
875 	req_ctx->data_len += params.sg_len + params.bfr_len;
876 	skb = create_final_hash_wr(req, &params);
877 	if (!skb)
878 		return -ENOMEM;
879 
880 	req_ctx->bfr_len = remainder;
881 	if (remainder)
882 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
883 				   req_ctx->bfr, remainder, req->nbytes -
884 				   remainder);
885 	skb->dev = u_ctx->lldi.ports[0];
886 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
887 	chcr_send_wr(skb);
888 
889 	return -EINPROGRESS;
890 }
891 
892 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
893 {
894 	memset(bfr_ptr, 0, bs);
895 	*bfr_ptr = 0x80;
896 	if (bs == 64)
897 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
898 	else
899 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
900 }
901 
902 static int chcr_ahash_final(struct ahash_request *req)
903 {
904 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
905 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
906 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
907 	struct hash_wr_param params;
908 	struct sk_buff *skb;
909 	struct uld_ctx *u_ctx = NULL;
910 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
911 
912 	u_ctx = ULD_CTX(ctx);
913 	if (is_hmac(crypto_ahash_tfm(rtfm)))
914 		params.opad_needed = 1;
915 	else
916 		params.opad_needed = 0;
917 	params.sg_len = 0;
918 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
919 	req_ctx->result = 1;
920 	params.bfr_len = req_ctx->bfr_len;
921 	req_ctx->data_len += params.bfr_len + params.sg_len;
922 	if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
923 		create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
924 		params.last = 0;
925 		params.more = 1;
926 		params.scmd1 = 0;
927 		params.bfr_len = bs;
928 
929 	} else {
930 		params.scmd1 = req_ctx->data_len;
931 		params.last = 1;
932 		params.more = 0;
933 	}
934 	skb = create_final_hash_wr(req, &params);
935 	skb->dev = u_ctx->lldi.ports[0];
936 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
937 	chcr_send_wr(skb);
938 	return -EINPROGRESS;
939 }
940 
941 static int chcr_ahash_finup(struct ahash_request *req)
942 {
943 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
944 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
945 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
946 	struct uld_ctx *u_ctx = NULL;
947 	struct sk_buff *skb;
948 	struct hash_wr_param params;
949 	u8  bs;
950 
951 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
952 	u_ctx = ULD_CTX(ctx);
953 
954 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
955 					    ctx->tx_channel_id))) {
956 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
957 			return -EBUSY;
958 	}
959 
960 	if (is_hmac(crypto_ahash_tfm(rtfm)))
961 		params.opad_needed = 1;
962 	else
963 		params.opad_needed = 0;
964 
965 	params.sg_len = req->nbytes;
966 	params.bfr_len = req_ctx->bfr_len;
967 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
968 	req_ctx->data_len += params.bfr_len + params.sg_len;
969 	req_ctx->result = 1;
970 	if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
971 		create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
972 		params.last = 0;
973 		params.more = 1;
974 		params.scmd1 = 0;
975 		params.bfr_len = bs;
976 	} else {
977 		params.scmd1 = req_ctx->data_len;
978 		params.last = 1;
979 		params.more = 0;
980 	}
981 
982 	skb = create_final_hash_wr(req, &params);
983 	if (!skb)
984 		return -ENOMEM;
985 	skb->dev = u_ctx->lldi.ports[0];
986 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
987 	chcr_send_wr(skb);
988 
989 	return -EINPROGRESS;
990 }
991 
992 static int chcr_ahash_digest(struct ahash_request *req)
993 {
994 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
995 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
996 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
997 	struct uld_ctx *u_ctx = NULL;
998 	struct sk_buff *skb;
999 	struct hash_wr_param params;
1000 	u8  bs;
1001 
1002 	rtfm->init(req);
1003 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1004 
1005 	u_ctx = ULD_CTX(ctx);
1006 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1007 					    ctx->tx_channel_id))) {
1008 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1009 			return -EBUSY;
1010 	}
1011 
1012 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1013 		params.opad_needed = 1;
1014 	else
1015 		params.opad_needed = 0;
1016 
1017 	params.last = 0;
1018 	params.more = 0;
1019 	params.sg_len = req->nbytes;
1020 	params.bfr_len = 0;
1021 	params.scmd1 = 0;
1022 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1023 	req_ctx->result = 1;
1024 	req_ctx->data_len += params.bfr_len + params.sg_len;
1025 
1026 	if (req_ctx->bfr && req->nbytes == 0) {
1027 		create_last_hash_block(req_ctx->bfr, bs, 0);
1028 		params.more = 1;
1029 		params.bfr_len = bs;
1030 	}
1031 
1032 	skb = create_final_hash_wr(req, &params);
1033 	if (!skb)
1034 		return -ENOMEM;
1035 
1036 	skb->dev = u_ctx->lldi.ports[0];
1037 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1038 	chcr_send_wr(skb);
1039 	return -EINPROGRESS;
1040 }
1041 
1042 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1043 {
1044 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1045 	struct chcr_ahash_req_ctx *state = out;
1046 
1047 	state->bfr_len = req_ctx->bfr_len;
1048 	state->data_len = req_ctx->data_len;
1049 	memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1050 	memcpy(state->partial_hash, req_ctx->partial_hash,
1051 	       CHCR_HASH_MAX_DIGEST_SIZE);
1052 	return 0;
1053 }
1054 
1055 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1056 {
1057 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1058 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1059 
1060 	req_ctx->bfr_len = state->bfr_len;
1061 	req_ctx->data_len = state->data_len;
1062 	req_ctx->dummy_payload_ptr = NULL;
1063 	memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1064 	memcpy(req_ctx->partial_hash, state->partial_hash,
1065 	       CHCR_HASH_MAX_DIGEST_SIZE);
1066 	return 0;
1067 }
1068 
1069 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1070 			     unsigned int keylen)
1071 {
1072 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1073 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1074 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1075 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1076 	unsigned int i, err = 0, updated_digestsize;
1077 
1078 	/*
1079 	 * use the key to calculate the ipad and opad. ipad will sent with the
1080 	 * first request's data. opad will be sent with the final hash result
1081 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1082 	 */
1083 	if (!hmacctx->desc)
1084 		return -EINVAL;
1085 	if (keylen > bs) {
1086 		err = crypto_shash_digest(hmacctx->desc, key, keylen,
1087 					  hmacctx->ipad);
1088 		if (err)
1089 			goto out;
1090 		keylen = digestsize;
1091 	} else {
1092 		memcpy(hmacctx->ipad, key, keylen);
1093 	}
1094 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
1095 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
1096 
1097 	for (i = 0; i < bs / sizeof(int); i++) {
1098 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1099 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1100 	}
1101 
1102 	updated_digestsize = digestsize;
1103 	if (digestsize == SHA224_DIGEST_SIZE)
1104 		updated_digestsize = SHA256_DIGEST_SIZE;
1105 	else if (digestsize == SHA384_DIGEST_SIZE)
1106 		updated_digestsize = SHA512_DIGEST_SIZE;
1107 	err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
1108 					hmacctx->ipad, digestsize);
1109 	if (err)
1110 		goto out;
1111 	chcr_change_order(hmacctx->ipad, updated_digestsize);
1112 
1113 	err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
1114 					hmacctx->opad, digestsize);
1115 	if (err)
1116 		goto out;
1117 	chcr_change_order(hmacctx->opad, updated_digestsize);
1118 out:
1119 	return err;
1120 }
1121 
1122 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1123 			       unsigned int key_len)
1124 {
1125 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1126 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1127 	int status = 0;
1128 	unsigned short context_size = 0;
1129 
1130 	if ((key_len == (AES_KEYSIZE_128 << 1)) ||
1131 	    (key_len == (AES_KEYSIZE_256 << 1))) {
1132 		memcpy(ablkctx->key, key, key_len);
1133 		ablkctx->enckey_len = key_len;
1134 		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1135 		ablkctx->key_ctx_hdr =
1136 			FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1137 					 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1138 					 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1139 					 CHCR_KEYCTX_NO_KEY, 1,
1140 					 0, context_size);
1141 		ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1142 	} else {
1143 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1144 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
1145 		ablkctx->enckey_len = 0;
1146 		status = -EINVAL;
1147 	}
1148 	return status;
1149 }
1150 
1151 static int chcr_sha_init(struct ahash_request *areq)
1152 {
1153 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1154 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1155 	int digestsize =  crypto_ahash_digestsize(tfm);
1156 
1157 	req_ctx->data_len = 0;
1158 	req_ctx->dummy_payload_ptr = NULL;
1159 	req_ctx->bfr_len = 0;
1160 	req_ctx->skb = NULL;
1161 	req_ctx->result = 0;
1162 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
1163 	return 0;
1164 }
1165 
1166 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1167 {
1168 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1169 				 sizeof(struct chcr_ahash_req_ctx));
1170 	return chcr_device_init(crypto_tfm_ctx(tfm));
1171 }
1172 
1173 static int chcr_hmac_init(struct ahash_request *areq)
1174 {
1175 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1176 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1177 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1178 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1179 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1180 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1181 
1182 	chcr_sha_init(areq);
1183 	req_ctx->data_len = bs;
1184 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1185 		if (digestsize == SHA224_DIGEST_SIZE)
1186 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1187 			       SHA256_DIGEST_SIZE);
1188 		else if (digestsize == SHA384_DIGEST_SIZE)
1189 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1190 			       SHA512_DIGEST_SIZE);
1191 		else
1192 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
1193 			       digestsize);
1194 	}
1195 	return 0;
1196 }
1197 
1198 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1199 {
1200 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1201 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1202 	unsigned int digestsize =
1203 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1204 
1205 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1206 				 sizeof(struct chcr_ahash_req_ctx));
1207 	hmacctx->desc = chcr_alloc_shash(digestsize);
1208 	if (IS_ERR(hmacctx->desc))
1209 		return PTR_ERR(hmacctx->desc);
1210 	return chcr_device_init(crypto_tfm_ctx(tfm));
1211 }
1212 
1213 static void chcr_free_shash(struct shash_desc *desc)
1214 {
1215 	crypto_free_shash(desc->tfm);
1216 	kfree(desc);
1217 }
1218 
1219 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1220 {
1221 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1222 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1223 
1224 	if (hmacctx->desc) {
1225 		chcr_free_shash(hmacctx->desc);
1226 		hmacctx->desc = NULL;
1227 	}
1228 }
1229 
1230 static struct chcr_alg_template driver_algs[] = {
1231 	/* AES-CBC */
1232 	{
1233 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1234 		.is_registered = 0,
1235 		.alg.crypto = {
1236 			.cra_name		= "cbc(aes)",
1237 			.cra_driver_name	= "cbc(aes-chcr)",
1238 			.cra_priority		= CHCR_CRA_PRIORITY,
1239 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1240 				CRYPTO_ALG_ASYNC,
1241 			.cra_blocksize		= AES_BLOCK_SIZE,
1242 			.cra_ctxsize		= sizeof(struct chcr_context)
1243 				+ sizeof(struct ablk_ctx),
1244 			.cra_alignmask		= 0,
1245 			.cra_type		= &crypto_ablkcipher_type,
1246 			.cra_module		= THIS_MODULE,
1247 			.cra_init		= chcr_cra_init,
1248 			.cra_exit		= NULL,
1249 			.cra_u.ablkcipher	= {
1250 				.min_keysize	= AES_MIN_KEY_SIZE,
1251 				.max_keysize	= AES_MAX_KEY_SIZE,
1252 				.ivsize		= AES_BLOCK_SIZE,
1253 				.setkey			= chcr_aes_cbc_setkey,
1254 				.encrypt		= chcr_aes_encrypt,
1255 				.decrypt		= chcr_aes_decrypt,
1256 			}
1257 		}
1258 	},
1259 	{
1260 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1261 		.is_registered = 0,
1262 		.alg.crypto =   {
1263 			.cra_name		= "xts(aes)",
1264 			.cra_driver_name	= "xts(aes-chcr)",
1265 			.cra_priority		= CHCR_CRA_PRIORITY,
1266 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
1267 				CRYPTO_ALG_ASYNC,
1268 			.cra_blocksize		= AES_BLOCK_SIZE,
1269 			.cra_ctxsize		= sizeof(struct chcr_context) +
1270 				sizeof(struct ablk_ctx),
1271 			.cra_alignmask		= 0,
1272 			.cra_type		= &crypto_ablkcipher_type,
1273 			.cra_module		= THIS_MODULE,
1274 			.cra_init		= chcr_cra_init,
1275 			.cra_exit		= NULL,
1276 			.cra_u = {
1277 				.ablkcipher = {
1278 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
1279 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
1280 					.ivsize		= AES_BLOCK_SIZE,
1281 					.setkey		= chcr_aes_xts_setkey,
1282 					.encrypt	= chcr_aes_encrypt,
1283 					.decrypt	= chcr_aes_decrypt,
1284 				}
1285 			}
1286 		}
1287 	},
1288 	/* SHA */
1289 	{
1290 		.type = CRYPTO_ALG_TYPE_AHASH,
1291 		.is_registered = 0,
1292 		.alg.hash = {
1293 			.halg.digestsize = SHA1_DIGEST_SIZE,
1294 			.halg.base = {
1295 				.cra_name = "sha1",
1296 				.cra_driver_name = "sha1-chcr",
1297 				.cra_blocksize = SHA1_BLOCK_SIZE,
1298 			}
1299 		}
1300 	},
1301 	{
1302 		.type = CRYPTO_ALG_TYPE_AHASH,
1303 		.is_registered = 0,
1304 		.alg.hash = {
1305 			.halg.digestsize = SHA256_DIGEST_SIZE,
1306 			.halg.base = {
1307 				.cra_name = "sha256",
1308 				.cra_driver_name = "sha256-chcr",
1309 				.cra_blocksize = SHA256_BLOCK_SIZE,
1310 			}
1311 		}
1312 	},
1313 	{
1314 		.type = CRYPTO_ALG_TYPE_AHASH,
1315 		.is_registered = 0,
1316 		.alg.hash = {
1317 			.halg.digestsize = SHA224_DIGEST_SIZE,
1318 			.halg.base = {
1319 				.cra_name = "sha224",
1320 				.cra_driver_name = "sha224-chcr",
1321 				.cra_blocksize = SHA224_BLOCK_SIZE,
1322 			}
1323 		}
1324 	},
1325 	{
1326 		.type = CRYPTO_ALG_TYPE_AHASH,
1327 		.is_registered = 0,
1328 		.alg.hash = {
1329 			.halg.digestsize = SHA384_DIGEST_SIZE,
1330 			.halg.base = {
1331 				.cra_name = "sha384",
1332 				.cra_driver_name = "sha384-chcr",
1333 				.cra_blocksize = SHA384_BLOCK_SIZE,
1334 			}
1335 		}
1336 	},
1337 	{
1338 		.type = CRYPTO_ALG_TYPE_AHASH,
1339 		.is_registered = 0,
1340 		.alg.hash = {
1341 			.halg.digestsize = SHA512_DIGEST_SIZE,
1342 			.halg.base = {
1343 				.cra_name = "sha512",
1344 				.cra_driver_name = "sha512-chcr",
1345 				.cra_blocksize = SHA512_BLOCK_SIZE,
1346 			}
1347 		}
1348 	},
1349 	/* HMAC */
1350 	{
1351 		.type = CRYPTO_ALG_TYPE_HMAC,
1352 		.is_registered = 0,
1353 		.alg.hash = {
1354 			.halg.digestsize = SHA1_DIGEST_SIZE,
1355 			.halg.base = {
1356 				.cra_name = "hmac(sha1)",
1357 				.cra_driver_name = "hmac(sha1-chcr)",
1358 				.cra_blocksize = SHA1_BLOCK_SIZE,
1359 			}
1360 		}
1361 	},
1362 	{
1363 		.type = CRYPTO_ALG_TYPE_HMAC,
1364 		.is_registered = 0,
1365 		.alg.hash = {
1366 			.halg.digestsize = SHA224_DIGEST_SIZE,
1367 			.halg.base = {
1368 				.cra_name = "hmac(sha224)",
1369 				.cra_driver_name = "hmac(sha224-chcr)",
1370 				.cra_blocksize = SHA224_BLOCK_SIZE,
1371 			}
1372 		}
1373 	},
1374 	{
1375 		.type = CRYPTO_ALG_TYPE_HMAC,
1376 		.is_registered = 0,
1377 		.alg.hash = {
1378 			.halg.digestsize = SHA256_DIGEST_SIZE,
1379 			.halg.base = {
1380 				.cra_name = "hmac(sha256)",
1381 				.cra_driver_name = "hmac(sha256-chcr)",
1382 				.cra_blocksize = SHA256_BLOCK_SIZE,
1383 			}
1384 		}
1385 	},
1386 	{
1387 		.type = CRYPTO_ALG_TYPE_HMAC,
1388 		.is_registered = 0,
1389 		.alg.hash = {
1390 			.halg.digestsize = SHA384_DIGEST_SIZE,
1391 			.halg.base = {
1392 				.cra_name = "hmac(sha384)",
1393 				.cra_driver_name = "hmac(sha384-chcr)",
1394 				.cra_blocksize = SHA384_BLOCK_SIZE,
1395 			}
1396 		}
1397 	},
1398 	{
1399 		.type = CRYPTO_ALG_TYPE_HMAC,
1400 		.is_registered = 0,
1401 		.alg.hash = {
1402 			.halg.digestsize = SHA512_DIGEST_SIZE,
1403 			.halg.base = {
1404 				.cra_name = "hmac(sha512)",
1405 				.cra_driver_name = "hmac(sha512-chcr)",
1406 				.cra_blocksize = SHA512_BLOCK_SIZE,
1407 			}
1408 		}
1409 	},
1410 };
1411 
1412 /*
1413  *	chcr_unregister_alg - Deregister crypto algorithms with
1414  *	kernel framework.
1415  */
1416 static int chcr_unregister_alg(void)
1417 {
1418 	int i;
1419 
1420 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1421 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1422 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
1423 			if (driver_algs[i].is_registered)
1424 				crypto_unregister_alg(
1425 						&driver_algs[i].alg.crypto);
1426 			break;
1427 		case CRYPTO_ALG_TYPE_AHASH:
1428 			if (driver_algs[i].is_registered)
1429 				crypto_unregister_ahash(
1430 						&driver_algs[i].alg.hash);
1431 			break;
1432 		}
1433 		driver_algs[i].is_registered = 0;
1434 	}
1435 	return 0;
1436 }
1437 
1438 #define SZ_AHASH_CTX sizeof(struct chcr_context)
1439 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
1440 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
1441 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
1442 
1443 /*
1444  *	chcr_register_alg - Register crypto algorithms with kernel framework.
1445  */
1446 static int chcr_register_alg(void)
1447 {
1448 	struct crypto_alg ai;
1449 	struct ahash_alg *a_hash;
1450 	int err = 0, i;
1451 	char *name = NULL;
1452 
1453 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1454 		if (driver_algs[i].is_registered)
1455 			continue;
1456 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1457 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
1458 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
1459 			name = driver_algs[i].alg.crypto.cra_driver_name;
1460 			break;
1461 		case CRYPTO_ALG_TYPE_AHASH:
1462 			a_hash = &driver_algs[i].alg.hash;
1463 			a_hash->update = chcr_ahash_update;
1464 			a_hash->final = chcr_ahash_final;
1465 			a_hash->finup = chcr_ahash_finup;
1466 			a_hash->digest = chcr_ahash_digest;
1467 			a_hash->export = chcr_ahash_export;
1468 			a_hash->import = chcr_ahash_import;
1469 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
1470 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
1471 			a_hash->halg.base.cra_module = THIS_MODULE;
1472 			a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
1473 			a_hash->halg.base.cra_alignmask = 0;
1474 			a_hash->halg.base.cra_exit = NULL;
1475 			a_hash->halg.base.cra_type = &crypto_ahash_type;
1476 
1477 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
1478 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
1479 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
1480 				a_hash->init = chcr_hmac_init;
1481 				a_hash->setkey = chcr_ahash_setkey;
1482 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
1483 			} else {
1484 				a_hash->init = chcr_sha_init;
1485 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
1486 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
1487 			}
1488 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
1489 			ai = driver_algs[i].alg.hash.halg.base;
1490 			name = ai.cra_driver_name;
1491 			break;
1492 		}
1493 		if (err) {
1494 			pr_err("chcr : %s : Algorithm registration failed\n",
1495 			       name);
1496 			goto register_err;
1497 		} else {
1498 			driver_algs[i].is_registered = 1;
1499 		}
1500 	}
1501 	return 0;
1502 
1503 register_err:
1504 	chcr_unregister_alg();
1505 	return err;
1506 }
1507 
1508 /*
1509  *	start_crypto - Register the crypto algorithms.
1510  *	This should called once when the first device comesup. After this
1511  *	kernel will start calling driver APIs for crypto operations.
1512  */
1513 int start_crypto(void)
1514 {
1515 	return chcr_register_alg();
1516 }
1517 
1518 /*
1519  *	stop_crypto - Deregister all the crypto algorithms with kernel.
1520  *	This should be called once when the last device goes down. After this
1521  *	kernel will not call the driver API for crypto operations.
1522  */
1523 int stop_crypto(void)
1524 {
1525 	chcr_unregister_alg();
1526 	return 0;
1527 }
1528