xref: /openbmc/linux/drivers/crypto/bcm/cipher.c (revision dea54fba)
1 /*
2  * Copyright 2016 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License, version 2, as
6  * published by the Free Software Foundation (the "GPL").
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License version 2 (GPLv2) for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * version 2 (GPLv2) along with this source code.
15  */
16 
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/crypto.h>
26 #include <linux/kthread.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_device.h>
31 #include <linux/io.h>
32 #include <linux/bitops.h>
33 
34 #include <crypto/algapi.h>
35 #include <crypto/aead.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/aes.h>
38 #include <crypto/des.h>
39 #include <crypto/hmac.h>
40 #include <crypto/sha.h>
41 #include <crypto/md5.h>
42 #include <crypto/authenc.h>
43 #include <crypto/skcipher.h>
44 #include <crypto/hash.h>
45 #include <crypto/aes.h>
46 #include <crypto/sha3.h>
47 
48 #include "util.h"
49 #include "cipher.h"
50 #include "spu.h"
51 #include "spum.h"
52 #include "spu2.h"
53 
54 /* ================= Device Structure ================== */
55 
56 struct device_private iproc_priv;
57 
58 /* ==================== Parameters ===================== */
59 
60 int flow_debug_logging;
61 module_param(flow_debug_logging, int, 0644);
62 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
63 
64 int packet_debug_logging;
65 module_param(packet_debug_logging, int, 0644);
66 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
67 
68 int debug_logging_sleep;
69 module_param(debug_logging_sleep, int, 0644);
70 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
71 
72 /*
73  * The value of these module parameters is used to set the priority for each
74  * algo type when this driver registers algos with the kernel crypto API.
75  * To use a priority other than the default, set the priority in the insmod or
76  * modprobe. Changing the module priority after init time has no effect.
77  *
78  * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
79  * algos, but more preferred than generic software algos.
80  */
81 static int cipher_pri = 150;
82 module_param(cipher_pri, int, 0644);
83 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
84 
85 static int hash_pri = 100;
86 module_param(hash_pri, int, 0644);
87 MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
88 
89 static int aead_pri = 150;
90 module_param(aead_pri, int, 0644);
91 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
92 
93 #define MAX_SPUS 16
94 
95 /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
96  * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
97  * 0x60 - ring 0
98  * 0x68 - ring 1
99  * 0x70 - ring 2
100  * 0x78 - ring 3
101  */
102 char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
103 /*
104  * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
105  * is set dynamically after reading SPU type from device tree.
106  */
107 #define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
108 
109 /* min and max time to sleep before retrying when mbox queue is full. usec */
110 #define MBOX_SLEEP_MIN  800
111 #define MBOX_SLEEP_MAX 1000
112 
113 /**
114  * select_channel() - Select a SPU channel to handle a crypto request. Selects
115  * channel in round robin order.
116  *
117  * Return:  channel index
118  */
119 static u8 select_channel(void)
120 {
121 	u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
122 
123 	return chan_idx % iproc_priv.spu.num_spu;
124 }
125 
126 /**
127  * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
128  * receive a SPU response message for an ablkcipher request. Includes buffers to
129  * catch SPU message headers and the response data.
130  * @mssg:	mailbox message containing the receive sg
131  * @rctx:	crypto request context
132  * @rx_frag_num: number of scatterlist elements required to hold the
133  *		SPU response message
134  * @chunksize:	Number of bytes of response data expected
135  * @stat_pad_len: Number of bytes required to pad the STAT field to
136  *		a 4-byte boundary
137  *
138  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
139  * when the request completes, whether the request is handled successfully or
140  * there is an error.
141  *
142  * Returns:
143  *   0 if successful
144  *   < 0 if an error
145  */
146 static int
147 spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
148 			    struct iproc_reqctx_s *rctx,
149 			    u8 rx_frag_num,
150 			    unsigned int chunksize, u32 stat_pad_len)
151 {
152 	struct spu_hw *spu = &iproc_priv.spu;
153 	struct scatterlist *sg;	/* used to build sgs in mbox message */
154 	struct iproc_ctx_s *ctx = rctx->ctx;
155 	u32 datalen;		/* Number of bytes of response data expected */
156 
157 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
158 				rctx->gfp);
159 	if (!mssg->spu.dst)
160 		return -ENOMEM;
161 
162 	sg = mssg->spu.dst;
163 	sg_init_table(sg, rx_frag_num);
164 	/* Space for SPU message header */
165 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
166 
167 	/* If XTS tweak in payload, add buffer to receive encrypted tweak */
168 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
169 	    spu->spu_xts_tweak_in_payload())
170 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
171 			   SPU_XTS_TWEAK_SIZE);
172 
173 	/* Copy in each dst sg entry from request, up to chunksize */
174 	datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
175 				 rctx->dst_nents, chunksize);
176 	if (datalen < chunksize) {
177 		pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
178 		       __func__, chunksize, datalen);
179 		return -EFAULT;
180 	}
181 
182 	if (ctx->cipher.alg == CIPHER_ALG_RC4)
183 		/* Add buffer to catch 260-byte SUPDT field for RC4 */
184 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
185 
186 	if (stat_pad_len)
187 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
188 
189 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
190 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
191 
192 	return 0;
193 }
194 
195 /**
196  * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
197  * send a SPU request message for an ablkcipher request. Includes SPU message
198  * headers and the request data.
199  * @mssg:	mailbox message containing the transmit sg
200  * @rctx:	crypto request context
201  * @tx_frag_num: number of scatterlist elements required to construct the
202  *		SPU request message
203  * @chunksize:	Number of bytes of request data
204  * @pad_len:	Number of pad bytes
205  *
206  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
207  * when the request completes, whether the request is handled successfully or
208  * there is an error.
209  *
210  * Returns:
211  *   0 if successful
212  *   < 0 if an error
213  */
214 static int
215 spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
216 			    struct iproc_reqctx_s *rctx,
217 			    u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
218 {
219 	struct spu_hw *spu = &iproc_priv.spu;
220 	struct scatterlist *sg;	/* used to build sgs in mbox message */
221 	struct iproc_ctx_s *ctx = rctx->ctx;
222 	u32 datalen;		/* Number of bytes of response data expected */
223 	u32 stat_len;
224 
225 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
226 				rctx->gfp);
227 	if (unlikely(!mssg->spu.src))
228 		return -ENOMEM;
229 
230 	sg = mssg->spu.src;
231 	sg_init_table(sg, tx_frag_num);
232 
233 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
234 		   BCM_HDR_LEN + ctx->spu_req_hdr_len);
235 
236 	/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
237 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
238 	    spu->spu_xts_tweak_in_payload())
239 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
240 
241 	/* Copy in each src sg entry from request, up to chunksize */
242 	datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
243 				 rctx->src_nents, chunksize);
244 	if (unlikely(datalen < chunksize)) {
245 		pr_err("%s(): failed to copy src sg to mbox msg",
246 		       __func__);
247 		return -EFAULT;
248 	}
249 
250 	if (pad_len)
251 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
252 
253 	stat_len = spu->spu_tx_status_len();
254 	if (stat_len) {
255 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
256 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
257 	}
258 	return 0;
259 }
260 
261 /**
262  * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
263  * a single SPU request message, starting at the current position in the request
264  * data.
265  * @rctx:	Crypto request context
266  *
267  * This may be called on the crypto API thread, or, when a request is so large
268  * it must be broken into multiple SPU messages, on the thread used to invoke
269  * the response callback. When requests are broken into multiple SPU
270  * messages, we assume subsequent messages depend on previous results, and
271  * thus always wait for previous results before submitting the next message.
272  * Because requests are submitted in lock step like this, there is no need
273  * to synchronize access to request data structures.
274  *
275  * Return: -EINPROGRESS: request has been accepted and result will be returned
276  *			 asynchronously
277  *         Any other value indicates an error
278  */
279 static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
280 {
281 	struct spu_hw *spu = &iproc_priv.spu;
282 	struct crypto_async_request *areq = rctx->parent;
283 	struct ablkcipher_request *req =
284 	    container_of(areq, struct ablkcipher_request, base);
285 	struct iproc_ctx_s *ctx = rctx->ctx;
286 	struct spu_cipher_parms cipher_parms;
287 	int err = 0;
288 	unsigned int chunksize = 0;	/* Num bytes of request to submit */
289 	int remaining = 0;	/* Bytes of request still to process */
290 	int chunk_start;	/* Beginning of data for current SPU msg */
291 
292 	/* IV or ctr value to use in this SPU msg */
293 	u8 local_iv_ctr[MAX_IV_SIZE];
294 	u32 stat_pad_len;	/* num bytes to align status field */
295 	u32 pad_len;		/* total length of all padding */
296 	bool update_key = false;
297 	struct brcm_message *mssg;	/* mailbox message */
298 	int retry_cnt = 0;
299 
300 	/* number of entries in src and dst sg in mailbox message. */
301 	u8 rx_frag_num = 2;	/* response header and STATUS */
302 	u8 tx_frag_num = 1;	/* request header */
303 
304 	flow_log("%s\n", __func__);
305 
306 	cipher_parms.alg = ctx->cipher.alg;
307 	cipher_parms.mode = ctx->cipher.mode;
308 	cipher_parms.type = ctx->cipher_type;
309 	cipher_parms.key_len = ctx->enckeylen;
310 	cipher_parms.key_buf = ctx->enckey;
311 	cipher_parms.iv_buf = local_iv_ctr;
312 	cipher_parms.iv_len = rctx->iv_ctr_len;
313 
314 	mssg = &rctx->mb_mssg;
315 	chunk_start = rctx->src_sent;
316 	remaining = rctx->total_todo - chunk_start;
317 
318 	/* determine the chunk we are breaking off and update the indexes */
319 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
320 	    (remaining > ctx->max_payload))
321 		chunksize = ctx->max_payload;
322 	else
323 		chunksize = remaining;
324 
325 	rctx->src_sent += chunksize;
326 	rctx->total_sent = rctx->src_sent;
327 
328 	/* Count number of sg entries to be included in this request */
329 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
330 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
331 
332 	if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
333 	    rctx->is_encrypt && chunk_start)
334 		/*
335 		 * Encrypting non-first first chunk. Copy last block of
336 		 * previous result to IV for this chunk.
337 		 */
338 		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
339 				    rctx->iv_ctr_len,
340 				    chunk_start - rctx->iv_ctr_len);
341 
342 	if (rctx->iv_ctr_len) {
343 		/* get our local copy of the iv */
344 		__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
345 				 rctx->iv_ctr_len);
346 
347 		/* generate the next IV if possible */
348 		if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
349 		    !rctx->is_encrypt) {
350 			/*
351 			 * CBC Decrypt: next IV is the last ciphertext block in
352 			 * this chunk
353 			 */
354 			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
355 					    rctx->iv_ctr_len,
356 					    rctx->src_sent - rctx->iv_ctr_len);
357 		} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
358 			/*
359 			 * The SPU hardware increments the counter once for
360 			 * each AES block of 16 bytes. So update the counter
361 			 * for the next chunk, if there is one. Note that for
362 			 * this chunk, the counter has already been copied to
363 			 * local_iv_ctr. We can assume a block size of 16,
364 			 * because we only support CTR mode for AES, not for
365 			 * any other cipher alg.
366 			 */
367 			add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
368 		}
369 	}
370 
371 	if (ctx->cipher.alg == CIPHER_ALG_RC4) {
372 		rx_frag_num++;
373 		if (chunk_start) {
374 			/*
375 			 * for non-first RC4 chunks, use SUPDT from previous
376 			 * response as key for this chunk.
377 			 */
378 			cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
379 			update_key = true;
380 			cipher_parms.type = CIPHER_TYPE_UPDT;
381 		} else if (!rctx->is_encrypt) {
382 			/*
383 			 * First RC4 chunk. For decrypt, key in pre-built msg
384 			 * header may have been changed if encrypt required
385 			 * multiple chunks. So revert the key to the
386 			 * ctx->enckey value.
387 			 */
388 			update_key = true;
389 			cipher_parms.type = CIPHER_TYPE_INIT;
390 		}
391 	}
392 
393 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
394 		flow_log("max_payload infinite\n");
395 	else
396 		flow_log("max_payload %u\n", ctx->max_payload);
397 
398 	flow_log("sent:%u start:%u remains:%u size:%u\n",
399 		 rctx->src_sent, chunk_start, remaining, chunksize);
400 
401 	/* Copy SPU header template created at setkey time */
402 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
403 	       sizeof(rctx->msg_buf.bcm_spu_req_hdr));
404 
405 	/*
406 	 * Pass SUPDT field as key. Key field in finish() call is only used
407 	 * when update_key has been set above for RC4. Will be ignored in
408 	 * all other cases.
409 	 */
410 	spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
411 				   ctx->spu_req_hdr_len, !(rctx->is_encrypt),
412 				   &cipher_parms, update_key, chunksize);
413 
414 	atomic64_add(chunksize, &iproc_priv.bytes_out);
415 
416 	stat_pad_len = spu->spu_wordalign_padlen(chunksize);
417 	if (stat_pad_len)
418 		rx_frag_num++;
419 	pad_len = stat_pad_len;
420 	if (pad_len) {
421 		tx_frag_num++;
422 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
423 				     0, ctx->auth.alg, ctx->auth.mode,
424 				     rctx->total_sent, stat_pad_len);
425 	}
426 
427 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
428 			      ctx->spu_req_hdr_len);
429 	packet_log("payload:\n");
430 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
431 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
432 
433 	/*
434 	 * Build mailbox message containing SPU request msg and rx buffers
435 	 * to catch response message
436 	 */
437 	memset(mssg, 0, sizeof(*mssg));
438 	mssg->type = BRCM_MESSAGE_SPU;
439 	mssg->ctx = rctx;	/* Will be returned in response */
440 
441 	/* Create rx scatterlist to catch result */
442 	rx_frag_num += rctx->dst_nents;
443 
444 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
445 	    spu->spu_xts_tweak_in_payload())
446 		rx_frag_num++;	/* extra sg to insert tweak */
447 
448 	err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
449 					  stat_pad_len);
450 	if (err)
451 		return err;
452 
453 	/* Create tx scatterlist containing SPU request message */
454 	tx_frag_num += rctx->src_nents;
455 	if (spu->spu_tx_status_len())
456 		tx_frag_num++;
457 
458 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
459 	    spu->spu_xts_tweak_in_payload())
460 		tx_frag_num++;	/* extra sg to insert tweak */
461 
462 	err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
463 					  pad_len);
464 	if (err)
465 		return err;
466 
467 	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
468 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
469 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
470 			/*
471 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
472 			 * not in atomic context and we can wait and try again.
473 			 */
474 			retry_cnt++;
475 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
476 			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
477 						mssg);
478 			atomic_inc(&iproc_priv.mb_no_spc);
479 		}
480 	}
481 	if (unlikely(err < 0)) {
482 		atomic_inc(&iproc_priv.mb_send_fail);
483 		return err;
484 	}
485 
486 	return -EINPROGRESS;
487 }
488 
489 /**
490  * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
491  * total received count for the request and updates global stats.
492  * @rctx:	Crypto request context
493  */
494 static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
495 {
496 	struct spu_hw *spu = &iproc_priv.spu;
497 #ifdef DEBUG
498 	struct crypto_async_request *areq = rctx->parent;
499 	struct ablkcipher_request *req = ablkcipher_request_cast(areq);
500 #endif
501 	struct iproc_ctx_s *ctx = rctx->ctx;
502 	u32 payload_len;
503 
504 	/* See how much data was returned */
505 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
506 
507 	/*
508 	 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
509 	 * encrypted tweak ("i") value; we don't count those.
510 	 */
511 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
512 	    spu->spu_xts_tweak_in_payload() &&
513 	    (payload_len >= SPU_XTS_TWEAK_SIZE))
514 		payload_len -= SPU_XTS_TWEAK_SIZE;
515 
516 	atomic64_add(payload_len, &iproc_priv.bytes_in);
517 
518 	flow_log("%s() offset: %u, bd_len: %u BD:\n",
519 		 __func__, rctx->total_received, payload_len);
520 
521 	dump_sg(req->dst, rctx->total_received, payload_len);
522 	if (ctx->cipher.alg == CIPHER_ALG_RC4)
523 		packet_dump("  supdt ", rctx->msg_buf.c.supdt_tweak,
524 			    SPU_SUPDT_LEN);
525 
526 	rctx->total_received += payload_len;
527 	if (rctx->total_received == rctx->total_todo) {
528 		atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
529 		atomic_inc(
530 		   &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
531 	}
532 }
533 
534 /**
535  * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
536  * receive a SPU response message for an ahash request.
537  * @mssg:	mailbox message containing the receive sg
538  * @rctx:	crypto request context
539  * @rx_frag_num: number of scatterlist elements required to hold the
540  *		SPU response message
541  * @digestsize: length of hash digest, in bytes
542  * @stat_pad_len: Number of bytes required to pad the STAT field to
543  *		a 4-byte boundary
544  *
545  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
546  * when the request completes, whether the request is handled successfully or
547  * there is an error.
548  *
549  * Return:
550  *   0 if successful
551  *   < 0 if an error
552  */
553 static int
554 spu_ahash_rx_sg_create(struct brcm_message *mssg,
555 		       struct iproc_reqctx_s *rctx,
556 		       u8 rx_frag_num, unsigned int digestsize,
557 		       u32 stat_pad_len)
558 {
559 	struct spu_hw *spu = &iproc_priv.spu;
560 	struct scatterlist *sg;	/* used to build sgs in mbox message */
561 	struct iproc_ctx_s *ctx = rctx->ctx;
562 
563 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
564 				rctx->gfp);
565 	if (!mssg->spu.dst)
566 		return -ENOMEM;
567 
568 	sg = mssg->spu.dst;
569 	sg_init_table(sg, rx_frag_num);
570 	/* Space for SPU message header */
571 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
572 
573 	/* Space for digest */
574 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
575 
576 	if (stat_pad_len)
577 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
578 
579 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
580 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
581 	return 0;
582 }
583 
584 /**
585  * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
586  * a SPU request message for an ahash request. Includes SPU message headers and
587  * the request data.
588  * @mssg:	mailbox message containing the transmit sg
589  * @rctx:	crypto request context
590  * @tx_frag_num: number of scatterlist elements required to construct the
591  *		SPU request message
592  * @spu_hdr_len: length in bytes of SPU message header
593  * @hash_carry_len: Number of bytes of data carried over from previous req
594  * @new_data_len: Number of bytes of new request data
595  * @pad_len:	Number of pad bytes
596  *
597  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
598  * when the request completes, whether the request is handled successfully or
599  * there is an error.
600  *
601  * Return:
602  *   0 if successful
603  *   < 0 if an error
604  */
605 static int
606 spu_ahash_tx_sg_create(struct brcm_message *mssg,
607 		       struct iproc_reqctx_s *rctx,
608 		       u8 tx_frag_num,
609 		       u32 spu_hdr_len,
610 		       unsigned int hash_carry_len,
611 		       unsigned int new_data_len, u32 pad_len)
612 {
613 	struct spu_hw *spu = &iproc_priv.spu;
614 	struct scatterlist *sg;	/* used to build sgs in mbox message */
615 	u32 datalen;		/* Number of bytes of response data expected */
616 	u32 stat_len;
617 
618 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
619 				rctx->gfp);
620 	if (!mssg->spu.src)
621 		return -ENOMEM;
622 
623 	sg = mssg->spu.src;
624 	sg_init_table(sg, tx_frag_num);
625 
626 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
627 		   BCM_HDR_LEN + spu_hdr_len);
628 
629 	if (hash_carry_len)
630 		sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
631 
632 	if (new_data_len) {
633 		/* Copy in each src sg entry from request, up to chunksize */
634 		datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
635 					 rctx->src_nents, new_data_len);
636 		if (datalen < new_data_len) {
637 			pr_err("%s(): failed to copy src sg to mbox msg",
638 			       __func__);
639 			return -EFAULT;
640 		}
641 	}
642 
643 	if (pad_len)
644 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
645 
646 	stat_len = spu->spu_tx_status_len();
647 	if (stat_len) {
648 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
649 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
650 	}
651 
652 	return 0;
653 }
654 
655 /**
656  * handle_ahash_req() - Process an asynchronous hash request from the crypto
657  * API.
658  * @rctx:  Crypto request context
659  *
660  * Builds a SPU request message embedded in a mailbox message and submits the
661  * mailbox message on a selected mailbox channel. The SPU request message is
662  * constructed as a scatterlist, including entries from the crypto API's
663  * src scatterlist to avoid copying the data to be hashed. This function is
664  * called either on the thread from the crypto API, or, in the case that the
665  * crypto API request is too large to fit in a single SPU request message,
666  * on the thread that invokes the receive callback with a response message.
667  * Because some operations require the response from one chunk before the next
668  * chunk can be submitted, we always wait for the response for the previous
669  * chunk before submitting the next chunk. Because requests are submitted in
670  * lock step like this, there is no need to synchronize access to request data
671  * structures.
672  *
673  * Return:
674  *   -EINPROGRESS: request has been submitted to SPU and response will be
675  *		   returned asynchronously
676  *   -EAGAIN:      non-final request included a small amount of data, which for
677  *		   efficiency we did not submit to the SPU, but instead stored
678  *		   to be submitted to the SPU with the next part of the request
679  *   other:        an error code
680  */
681 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
682 {
683 	struct spu_hw *spu = &iproc_priv.spu;
684 	struct crypto_async_request *areq = rctx->parent;
685 	struct ahash_request *req = ahash_request_cast(areq);
686 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
687 	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
688 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
689 	struct iproc_ctx_s *ctx = rctx->ctx;
690 
691 	/* number of bytes still to be hashed in this req */
692 	unsigned int nbytes_to_hash = 0;
693 	int err = 0;
694 	unsigned int chunksize = 0;	/* length of hash carry + new data */
695 	/*
696 	 * length of new data, not from hash carry, to be submitted in
697 	 * this hw request
698 	 */
699 	unsigned int new_data_len;
700 
701 	unsigned int chunk_start = 0;
702 	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
703 	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
704 	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
705 	u32 stat_pad_len = 0;	/* length of padding to align STATUS word */
706 	struct brcm_message *mssg;	/* mailbox message */
707 	struct spu_request_opts req_opts;
708 	struct spu_cipher_parms cipher_parms;
709 	struct spu_hash_parms hash_parms;
710 	struct spu_aead_parms aead_parms;
711 	unsigned int local_nbuf;
712 	u32 spu_hdr_len;
713 	unsigned int digestsize;
714 	u16 rem = 0;
715 	int retry_cnt = 0;
716 
717 	/*
718 	 * number of entries in src and dst sg. Always includes SPU msg header.
719 	 * rx always includes a buffer to catch digest and STATUS.
720 	 */
721 	u8 rx_frag_num = 3;
722 	u8 tx_frag_num = 1;
723 
724 	flow_log("total_todo %u, total_sent %u\n",
725 		 rctx->total_todo, rctx->total_sent);
726 
727 	memset(&req_opts, 0, sizeof(req_opts));
728 	memset(&cipher_parms, 0, sizeof(cipher_parms));
729 	memset(&hash_parms, 0, sizeof(hash_parms));
730 	memset(&aead_parms, 0, sizeof(aead_parms));
731 
732 	req_opts.bd_suppress = true;
733 	hash_parms.alg = ctx->auth.alg;
734 	hash_parms.mode = ctx->auth.mode;
735 	hash_parms.type = HASH_TYPE_NONE;
736 	hash_parms.key_buf = (u8 *)ctx->authkey;
737 	hash_parms.key_len = ctx->authkeylen;
738 
739 	/*
740 	 * For hash algorithms below assignment looks bit odd but
741 	 * it's needed for AES-XCBC and AES-CMAC hash algorithms
742 	 * to differentiate between 128, 192, 256 bit key values.
743 	 * Based on the key values, hash algorithm is selected.
744 	 * For example for 128 bit key, hash algorithm is AES-128.
745 	 */
746 	cipher_parms.type = ctx->cipher_type;
747 
748 	mssg = &rctx->mb_mssg;
749 	chunk_start = rctx->src_sent;
750 
751 	/*
752 	 * Compute the amount remaining to hash. This may include data
753 	 * carried over from previous requests.
754 	 */
755 	nbytes_to_hash = rctx->total_todo - rctx->total_sent;
756 	chunksize = nbytes_to_hash;
757 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
758 	    (chunksize > ctx->max_payload))
759 		chunksize = ctx->max_payload;
760 
761 	/*
762 	 * If this is not a final request and the request data is not a multiple
763 	 * of a full block, then simply park the extra data and prefix it to the
764 	 * data for the next request.
765 	 */
766 	if (!rctx->is_final) {
767 		u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
768 		u16 new_len;  /* len of data to add to hash carry */
769 
770 		rem = chunksize % blocksize;   /* remainder */
771 		if (rem) {
772 			/* chunksize not a multiple of blocksize */
773 			chunksize -= rem;
774 			if (chunksize == 0) {
775 				/* Don't have a full block to submit to hw */
776 				new_len = rem - rctx->hash_carry_len;
777 				sg_copy_part_to_buf(req->src, dest, new_len,
778 						    rctx->src_sent);
779 				rctx->hash_carry_len = rem;
780 				flow_log("Exiting with hash carry len: %u\n",
781 					 rctx->hash_carry_len);
782 				packet_dump("  buf: ",
783 					    rctx->hash_carry,
784 					    rctx->hash_carry_len);
785 				return -EAGAIN;
786 			}
787 		}
788 	}
789 
790 	/* if we have hash carry, then prefix it to the data in this request */
791 	local_nbuf = rctx->hash_carry_len;
792 	rctx->hash_carry_len = 0;
793 	if (local_nbuf)
794 		tx_frag_num++;
795 	new_data_len = chunksize - local_nbuf;
796 
797 	/* Count number of sg entries to be used in this request */
798 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
799 				       new_data_len);
800 
801 	/* AES hashing keeps key size in type field, so need to copy it here */
802 	if (hash_parms.alg == HASH_ALG_AES)
803 		hash_parms.type = cipher_parms.type;
804 	else
805 		hash_parms.type = spu->spu_hash_type(rctx->total_sent);
806 
807 	digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
808 					  hash_parms.type);
809 	hash_parms.digestsize =	digestsize;
810 
811 	/* update the indexes */
812 	rctx->total_sent += chunksize;
813 	/* if you sent a prebuf then that wasn't from this req->src */
814 	rctx->src_sent += new_data_len;
815 
816 	if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
817 		hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
818 							   hash_parms.mode,
819 							   chunksize,
820 							   blocksize);
821 
822 	/*
823 	 * If a non-first chunk, then include the digest returned from the
824 	 * previous chunk so that hw can add to it (except for AES types).
825 	 */
826 	if ((hash_parms.type == HASH_TYPE_UPDT) &&
827 	    (hash_parms.alg != HASH_ALG_AES)) {
828 		hash_parms.key_buf = rctx->incr_hash;
829 		hash_parms.key_len = digestsize;
830 	}
831 
832 	atomic64_add(chunksize, &iproc_priv.bytes_out);
833 
834 	flow_log("%s() final: %u nbuf: %u ",
835 		 __func__, rctx->is_final, local_nbuf);
836 
837 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
838 		flow_log("max_payload infinite\n");
839 	else
840 		flow_log("max_payload %u\n", ctx->max_payload);
841 
842 	flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
843 
844 	/* Prepend SPU header with type 3 BCM header */
845 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
846 
847 	hash_parms.prebuf_len = local_nbuf;
848 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
849 					      BCM_HDR_LEN,
850 					      &req_opts, &cipher_parms,
851 					      &hash_parms, &aead_parms,
852 					      new_data_len);
853 
854 	if (spu_hdr_len == 0) {
855 		pr_err("Failed to create SPU request header\n");
856 		return -EFAULT;
857 	}
858 
859 	/*
860 	 * Determine total length of padding required. Put all padding in one
861 	 * buffer.
862 	 */
863 	data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
864 	db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
865 				   0, 0, hash_parms.pad_len);
866 	if (spu->spu_tx_status_len())
867 		stat_pad_len = spu->spu_wordalign_padlen(db_size);
868 	if (stat_pad_len)
869 		rx_frag_num++;
870 	pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
871 	if (pad_len) {
872 		tx_frag_num++;
873 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
874 				     hash_parms.pad_len, ctx->auth.alg,
875 				     ctx->auth.mode, rctx->total_sent,
876 				     stat_pad_len);
877 	}
878 
879 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
880 			      spu_hdr_len);
881 	packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
882 	flow_log("Data:\n");
883 	dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
884 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
885 
886 	/*
887 	 * Build mailbox message containing SPU request msg and rx buffers
888 	 * to catch response message
889 	 */
890 	memset(mssg, 0, sizeof(*mssg));
891 	mssg->type = BRCM_MESSAGE_SPU;
892 	mssg->ctx = rctx;	/* Will be returned in response */
893 
894 	/* Create rx scatterlist to catch result */
895 	err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
896 				     stat_pad_len);
897 	if (err)
898 		return err;
899 
900 	/* Create tx scatterlist containing SPU request message */
901 	tx_frag_num += rctx->src_nents;
902 	if (spu->spu_tx_status_len())
903 		tx_frag_num++;
904 	err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
905 				     local_nbuf, new_data_len, pad_len);
906 	if (err)
907 		return err;
908 
909 	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
910 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
911 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
912 			/*
913 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
914 			 * not in atomic context and we can wait and try again.
915 			 */
916 			retry_cnt++;
917 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
918 			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
919 						mssg);
920 			atomic_inc(&iproc_priv.mb_no_spc);
921 		}
922 	}
923 	if (err < 0) {
924 		atomic_inc(&iproc_priv.mb_send_fail);
925 		return err;
926 	}
927 	return -EINPROGRESS;
928 }
929 
930 /**
931  * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
932  * for an HMAC request.
933  * @req:  The HMAC request from the crypto API
934  * @ctx:  The session context
935  *
936  * Return: 0 if synchronous hash operation successful
937  *         -EINVAL if the hash algo is unrecognized
938  *         any other value indicates an error
939  */
940 static int spu_hmac_outer_hash(struct ahash_request *req,
941 			       struct iproc_ctx_s *ctx)
942 {
943 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
944 	unsigned int blocksize =
945 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
946 	int rc;
947 
948 	switch (ctx->auth.alg) {
949 	case HASH_ALG_MD5:
950 		rc = do_shash("md5", req->result, ctx->opad, blocksize,
951 			      req->result, ctx->digestsize, NULL, 0);
952 		break;
953 	case HASH_ALG_SHA1:
954 		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
955 			      req->result, ctx->digestsize, NULL, 0);
956 		break;
957 	case HASH_ALG_SHA224:
958 		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
959 			      req->result, ctx->digestsize, NULL, 0);
960 		break;
961 	case HASH_ALG_SHA256:
962 		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
963 			      req->result, ctx->digestsize, NULL, 0);
964 		break;
965 	case HASH_ALG_SHA384:
966 		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
967 			      req->result, ctx->digestsize, NULL, 0);
968 		break;
969 	case HASH_ALG_SHA512:
970 		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
971 			      req->result, ctx->digestsize, NULL, 0);
972 		break;
973 	default:
974 		pr_err("%s() Error : unknown hmac type\n", __func__);
975 		rc = -EINVAL;
976 	}
977 	return rc;
978 }
979 
980 /**
981  * ahash_req_done() - Process a hash result from the SPU hardware.
982  * @rctx: Crypto request context
983  *
984  * Return: 0 if successful
985  *         < 0 if an error
986  */
987 static int ahash_req_done(struct iproc_reqctx_s *rctx)
988 {
989 	struct spu_hw *spu = &iproc_priv.spu;
990 	struct crypto_async_request *areq = rctx->parent;
991 	struct ahash_request *req = ahash_request_cast(areq);
992 	struct iproc_ctx_s *ctx = rctx->ctx;
993 	int err;
994 
995 	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
996 
997 	if (spu->spu_type == SPU_TYPE_SPUM) {
998 		/* byte swap the output from the UPDT function to network byte
999 		 * order
1000 		 */
1001 		if (ctx->auth.alg == HASH_ALG_MD5) {
1002 			__swab32s((u32 *)req->result);
1003 			__swab32s(((u32 *)req->result) + 1);
1004 			__swab32s(((u32 *)req->result) + 2);
1005 			__swab32s(((u32 *)req->result) + 3);
1006 			__swab32s(((u32 *)req->result) + 4);
1007 		}
1008 	}
1009 
1010 	flow_dump("  digest ", req->result, ctx->digestsize);
1011 
1012 	/* if this an HMAC then do the outer hash */
1013 	if (rctx->is_sw_hmac) {
1014 		err = spu_hmac_outer_hash(req, ctx);
1015 		if (err < 0)
1016 			return err;
1017 		flow_dump("  hmac: ", req->result, ctx->digestsize);
1018 	}
1019 
1020 	if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1021 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1022 		atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1023 	} else {
1024 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1025 		atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 /**
1032  * handle_ahash_resp() - Process a SPU response message for a hash request.
1033  * Checks if the entire crypto API request has been processed, and if so,
1034  * invokes post processing on the result.
1035  * @rctx: Crypto request context
1036  */
1037 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1038 {
1039 	struct iproc_ctx_s *ctx = rctx->ctx;
1040 #ifdef DEBUG
1041 	struct crypto_async_request *areq = rctx->parent;
1042 	struct ahash_request *req = ahash_request_cast(areq);
1043 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1044 	unsigned int blocksize =
1045 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1046 #endif
1047 	/*
1048 	 * Save hash to use as input to next op if incremental. Might be copying
1049 	 * too much, but that's easier than figuring out actual digest size here
1050 	 */
1051 	memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1052 
1053 	flow_log("%s() blocksize:%u digestsize:%u\n",
1054 		 __func__, blocksize, ctx->digestsize);
1055 
1056 	atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1057 
1058 	if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1059 		ahash_req_done(rctx);
1060 }
1061 
1062 /**
1063  * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1064  * a SPU response message for an AEAD request. Includes buffers to catch SPU
1065  * message headers and the response data.
1066  * @mssg:	mailbox message containing the receive sg
1067  * @rctx:	crypto request context
1068  * @rx_frag_num: number of scatterlist elements required to hold the
1069  *		SPU response message
1070  * @assoc_len:	Length of associated data included in the crypto request
1071  * @ret_iv_len: Length of IV returned in response
1072  * @resp_len:	Number of bytes of response data expected to be written to
1073  *              dst buffer from crypto API
1074  * @digestsize: Length of hash digest, in bytes
1075  * @stat_pad_len: Number of bytes required to pad the STAT field to
1076  *		a 4-byte boundary
1077  *
1078  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1079  * when the request completes, whether the request is handled successfully or
1080  * there is an error.
1081  *
1082  * Returns:
1083  *   0 if successful
1084  *   < 0 if an error
1085  */
1086 static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1087 				 struct aead_request *req,
1088 				 struct iproc_reqctx_s *rctx,
1089 				 u8 rx_frag_num,
1090 				 unsigned int assoc_len,
1091 				 u32 ret_iv_len, unsigned int resp_len,
1092 				 unsigned int digestsize, u32 stat_pad_len)
1093 {
1094 	struct spu_hw *spu = &iproc_priv.spu;
1095 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1096 	struct iproc_ctx_s *ctx = rctx->ctx;
1097 	u32 datalen;		/* Number of bytes of response data expected */
1098 	u32 assoc_buf_len;
1099 	u8 data_padlen = 0;
1100 
1101 	if (ctx->is_rfc4543) {
1102 		/* RFC4543: only pad after data, not after AAD */
1103 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1104 							  assoc_len + resp_len);
1105 		assoc_buf_len = assoc_len;
1106 	} else {
1107 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1108 							  resp_len);
1109 		assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1110 						assoc_len, ret_iv_len,
1111 						rctx->is_encrypt);
1112 	}
1113 
1114 	if (ctx->cipher.mode == CIPHER_MODE_CCM)
1115 		/* ICV (after data) must be in the next 32-bit word for CCM */
1116 		data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1117 							 resp_len +
1118 							 data_padlen);
1119 
1120 	if (data_padlen)
1121 		/* have to catch gcm pad in separate buffer */
1122 		rx_frag_num++;
1123 
1124 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1125 				rctx->gfp);
1126 	if (!mssg->spu.dst)
1127 		return -ENOMEM;
1128 
1129 	sg = mssg->spu.dst;
1130 	sg_init_table(sg, rx_frag_num);
1131 
1132 	/* Space for SPU message header */
1133 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1134 
1135 	if (assoc_buf_len) {
1136 		/*
1137 		 * Don't write directly to req->dst, because SPU may pad the
1138 		 * assoc data in the response
1139 		 */
1140 		memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1141 		sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1142 	}
1143 
1144 	if (resp_len) {
1145 		/*
1146 		 * Copy in each dst sg entry from request, up to chunksize.
1147 		 * dst sg catches just the data. digest caught in separate buf.
1148 		 */
1149 		datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1150 					 rctx->dst_nents, resp_len);
1151 		if (datalen < (resp_len)) {
1152 			pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1153 			       __func__, resp_len, datalen);
1154 			return -EFAULT;
1155 		}
1156 	}
1157 
1158 	/* If GCM/CCM data is padded, catch padding in separate buffer */
1159 	if (data_padlen) {
1160 		memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1161 		sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1162 	}
1163 
1164 	/* Always catch ICV in separate buffer */
1165 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1166 
1167 	flow_log("stat_pad_len %u\n", stat_pad_len);
1168 	if (stat_pad_len) {
1169 		memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1170 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1171 	}
1172 
1173 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1174 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1175 
1176 	return 0;
1177 }
1178 
1179 /**
1180  * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1181  * SPU request message for an AEAD request. Includes SPU message headers and the
1182  * request data.
1183  * @mssg:	mailbox message containing the transmit sg
1184  * @rctx:	crypto request context
1185  * @tx_frag_num: number of scatterlist elements required to construct the
1186  *		SPU request message
1187  * @spu_hdr_len: length of SPU message header in bytes
1188  * @assoc:	crypto API associated data scatterlist
1189  * @assoc_len:	length of associated data
1190  * @assoc_nents: number of scatterlist entries containing assoc data
1191  * @aead_iv_len: length of AEAD IV, if included
1192  * @chunksize:	Number of bytes of request data
1193  * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1194  * @pad_len:	Number of pad bytes
1195  * @incl_icv:	If true, write separate ICV buffer after data and
1196  *              any padding
1197  *
1198  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1199  * when the request completes, whether the request is handled successfully or
1200  * there is an error.
1201  *
1202  * Return:
1203  *   0 if successful
1204  *   < 0 if an error
1205  */
1206 static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1207 				 struct iproc_reqctx_s *rctx,
1208 				 u8 tx_frag_num,
1209 				 u32 spu_hdr_len,
1210 				 struct scatterlist *assoc,
1211 				 unsigned int assoc_len,
1212 				 int assoc_nents,
1213 				 unsigned int aead_iv_len,
1214 				 unsigned int chunksize,
1215 				 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1216 {
1217 	struct spu_hw *spu = &iproc_priv.spu;
1218 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1219 	struct scatterlist *assoc_sg = assoc;
1220 	struct iproc_ctx_s *ctx = rctx->ctx;
1221 	u32 datalen;		/* Number of bytes of data to write */
1222 	u32 written;		/* Number of bytes of data written */
1223 	u32 assoc_offset = 0;
1224 	u32 stat_len;
1225 
1226 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1227 				rctx->gfp);
1228 	if (!mssg->spu.src)
1229 		return -ENOMEM;
1230 
1231 	sg = mssg->spu.src;
1232 	sg_init_table(sg, tx_frag_num);
1233 
1234 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1235 		   BCM_HDR_LEN + spu_hdr_len);
1236 
1237 	if (assoc_len) {
1238 		/* Copy in each associated data sg entry from request */
1239 		written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1240 					 assoc_nents, assoc_len);
1241 		if (written < assoc_len) {
1242 			pr_err("%s(): failed to copy assoc sg to mbox msg",
1243 			       __func__);
1244 			return -EFAULT;
1245 		}
1246 	}
1247 
1248 	if (aead_iv_len)
1249 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1250 
1251 	if (aad_pad_len) {
1252 		memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1253 		sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1254 	}
1255 
1256 	datalen = chunksize;
1257 	if ((chunksize > ctx->digestsize) && incl_icv)
1258 		datalen -= ctx->digestsize;
1259 	if (datalen) {
1260 		/* For aead, a single msg should consume the entire src sg */
1261 		written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1262 					 rctx->src_nents, datalen);
1263 		if (written < datalen) {
1264 			pr_err("%s(): failed to copy src sg to mbox msg",
1265 			       __func__);
1266 			return -EFAULT;
1267 		}
1268 	}
1269 
1270 	if (pad_len) {
1271 		memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1272 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1273 	}
1274 
1275 	if (incl_icv)
1276 		sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1277 
1278 	stat_len = spu->spu_tx_status_len();
1279 	if (stat_len) {
1280 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
1281 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1282 	}
1283 	return 0;
1284 }
1285 
1286 /**
1287  * handle_aead_req() - Submit a SPU request message for the next chunk of the
1288  * current AEAD request.
1289  * @rctx:  Crypto request context
1290  *
1291  * Unlike other operation types, we assume the length of the request fits in
1292  * a single SPU request message. aead_enqueue() makes sure this is true.
1293  * Comments for other op types regarding threads applies here as well.
1294  *
1295  * Unlike incremental hash ops, where the spu returns the entire hash for
1296  * truncated algs like sha-224, the SPU returns just the truncated hash in
1297  * response to aead requests. So digestsize is always ctx->digestsize here.
1298  *
1299  * Return: -EINPROGRESS: crypto request has been accepted and result will be
1300  *			 returned asynchronously
1301  *         Any other value indicates an error
1302  */
1303 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1304 {
1305 	struct spu_hw *spu = &iproc_priv.spu;
1306 	struct crypto_async_request *areq = rctx->parent;
1307 	struct aead_request *req = container_of(areq,
1308 						struct aead_request, base);
1309 	struct iproc_ctx_s *ctx = rctx->ctx;
1310 	int err;
1311 	unsigned int chunksize;
1312 	unsigned int resp_len;
1313 	u32 spu_hdr_len;
1314 	u32 db_size;
1315 	u32 stat_pad_len;
1316 	u32 pad_len;
1317 	struct brcm_message *mssg;	/* mailbox message */
1318 	struct spu_request_opts req_opts;
1319 	struct spu_cipher_parms cipher_parms;
1320 	struct spu_hash_parms hash_parms;
1321 	struct spu_aead_parms aead_parms;
1322 	int assoc_nents = 0;
1323 	bool incl_icv = false;
1324 	unsigned int digestsize = ctx->digestsize;
1325 	int retry_cnt = 0;
1326 
1327 	/* number of entries in src and dst sg. Always includes SPU msg header.
1328 	 */
1329 	u8 rx_frag_num = 2;	/* and STATUS */
1330 	u8 tx_frag_num = 1;
1331 
1332 	/* doing the whole thing at once */
1333 	chunksize = rctx->total_todo;
1334 
1335 	flow_log("%s: chunksize %u\n", __func__, chunksize);
1336 
1337 	memset(&req_opts, 0, sizeof(req_opts));
1338 	memset(&hash_parms, 0, sizeof(hash_parms));
1339 	memset(&aead_parms, 0, sizeof(aead_parms));
1340 
1341 	req_opts.is_inbound = !(rctx->is_encrypt);
1342 	req_opts.auth_first = ctx->auth_first;
1343 	req_opts.is_aead = true;
1344 	req_opts.is_esp = ctx->is_esp;
1345 
1346 	cipher_parms.alg = ctx->cipher.alg;
1347 	cipher_parms.mode = ctx->cipher.mode;
1348 	cipher_parms.type = ctx->cipher_type;
1349 	cipher_parms.key_buf = ctx->enckey;
1350 	cipher_parms.key_len = ctx->enckeylen;
1351 	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1352 	cipher_parms.iv_len = rctx->iv_ctr_len;
1353 
1354 	hash_parms.alg = ctx->auth.alg;
1355 	hash_parms.mode = ctx->auth.mode;
1356 	hash_parms.type = HASH_TYPE_NONE;
1357 	hash_parms.key_buf = (u8 *)ctx->authkey;
1358 	hash_parms.key_len = ctx->authkeylen;
1359 	hash_parms.digestsize = digestsize;
1360 
1361 	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1362 	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
1363 		hash_parms.key_len = SHA224_DIGEST_SIZE;
1364 
1365 	aead_parms.assoc_size = req->assoclen;
1366 	if (ctx->is_esp && !ctx->is_rfc4543) {
1367 		/*
1368 		 * 8-byte IV is included assoc data in request. SPU2
1369 		 * expects AAD to include just SPI and seqno. So
1370 		 * subtract off the IV len.
1371 		 */
1372 		aead_parms.assoc_size -= GCM_ESP_IV_SIZE;
1373 
1374 		if (rctx->is_encrypt) {
1375 			aead_parms.return_iv = true;
1376 			aead_parms.ret_iv_len = GCM_ESP_IV_SIZE;
1377 			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1378 		}
1379 	} else {
1380 		aead_parms.ret_iv_len = 0;
1381 	}
1382 
1383 	/*
1384 	 * Count number of sg entries from the crypto API request that are to
1385 	 * be included in this mailbox message. For dst sg, don't count space
1386 	 * for digest. Digest gets caught in a separate buffer and copied back
1387 	 * to dst sg when processing response.
1388 	 */
1389 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1390 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1391 	if (aead_parms.assoc_size)
1392 		assoc_nents = spu_sg_count(rctx->assoc, 0,
1393 					   aead_parms.assoc_size);
1394 
1395 	mssg = &rctx->mb_mssg;
1396 
1397 	rctx->total_sent = chunksize;
1398 	rctx->src_sent = chunksize;
1399 	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1400 				    aead_parms.assoc_size,
1401 				    aead_parms.ret_iv_len,
1402 				    rctx->is_encrypt))
1403 		rx_frag_num++;
1404 
1405 	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1406 						rctx->iv_ctr_len);
1407 
1408 	if (ctx->auth.alg == HASH_ALG_AES)
1409 		hash_parms.type = ctx->cipher_type;
1410 
1411 	/* General case AAD padding (CCM and RFC4543 special cases below) */
1412 	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1413 						 aead_parms.assoc_size);
1414 
1415 	/* General case data padding (CCM decrypt special case below) */
1416 	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1417 							   chunksize);
1418 
1419 	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1420 		/*
1421 		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
1422 		 * 128-bit aligned
1423 		 */
1424 		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1425 					 ctx->cipher.mode,
1426 					 aead_parms.assoc_size + 2);
1427 
1428 		/*
1429 		 * And when decrypting CCM, need to pad without including
1430 		 * size of ICV which is tacked on to end of chunk
1431 		 */
1432 		if (!rctx->is_encrypt)
1433 			aead_parms.data_pad_len =
1434 				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1435 							chunksize - digestsize);
1436 
1437 		/* CCM also requires software to rewrite portions of IV: */
1438 		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1439 				       chunksize, rctx->is_encrypt,
1440 				       ctx->is_esp);
1441 	}
1442 
1443 	if (ctx->is_rfc4543) {
1444 		/*
1445 		 * RFC4543: data is included in AAD, so don't pad after AAD
1446 		 * and pad data based on both AAD + data size
1447 		 */
1448 		aead_parms.aad_pad_len = 0;
1449 		if (!rctx->is_encrypt)
1450 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1451 					ctx->cipher.mode,
1452 					aead_parms.assoc_size + chunksize -
1453 					digestsize);
1454 		else
1455 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1456 					ctx->cipher.mode,
1457 					aead_parms.assoc_size + chunksize);
1458 
1459 		req_opts.is_rfc4543 = true;
1460 	}
1461 
1462 	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1463 		incl_icv = true;
1464 		tx_frag_num++;
1465 		/* Copy ICV from end of src scatterlist to digest buf */
1466 		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1467 				    req->assoclen + rctx->total_sent -
1468 				    digestsize);
1469 	}
1470 
1471 	atomic64_add(chunksize, &iproc_priv.bytes_out);
1472 
1473 	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1474 
1475 	/* Prepend SPU header with type 3 BCM header */
1476 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1477 
1478 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1479 					      BCM_HDR_LEN, &req_opts,
1480 					      &cipher_parms, &hash_parms,
1481 					      &aead_parms, chunksize);
1482 
1483 	/* Determine total length of padding. Put all padding in one buffer. */
1484 	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1485 				   chunksize, aead_parms.aad_pad_len,
1486 				   aead_parms.data_pad_len, 0);
1487 
1488 	stat_pad_len = spu->spu_wordalign_padlen(db_size);
1489 
1490 	if (stat_pad_len)
1491 		rx_frag_num++;
1492 	pad_len = aead_parms.data_pad_len + stat_pad_len;
1493 	if (pad_len) {
1494 		tx_frag_num++;
1495 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1496 				     aead_parms.data_pad_len, 0,
1497 				     ctx->auth.alg, ctx->auth.mode,
1498 				     rctx->total_sent, stat_pad_len);
1499 	}
1500 
1501 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1502 			      spu_hdr_len);
1503 	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1504 	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1505 	packet_log("BD:\n");
1506 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1507 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1508 
1509 	/*
1510 	 * Build mailbox message containing SPU request msg and rx buffers
1511 	 * to catch response message
1512 	 */
1513 	memset(mssg, 0, sizeof(*mssg));
1514 	mssg->type = BRCM_MESSAGE_SPU;
1515 	mssg->ctx = rctx;	/* Will be returned in response */
1516 
1517 	/* Create rx scatterlist to catch result */
1518 	rx_frag_num += rctx->dst_nents;
1519 	resp_len = chunksize;
1520 
1521 	/*
1522 	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1523 	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
1524 	 * sends entire digest back.
1525 	 */
1526 	rx_frag_num++;
1527 
1528 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1529 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1530 		/*
1531 		 * Input is ciphertxt plus ICV, but ICV not incl
1532 		 * in output.
1533 		 */
1534 		resp_len -= ctx->digestsize;
1535 		if (resp_len == 0)
1536 			/* no rx frags to catch output data */
1537 			rx_frag_num -= rctx->dst_nents;
1538 	}
1539 
1540 	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1541 				    aead_parms.assoc_size,
1542 				    aead_parms.ret_iv_len, resp_len, digestsize,
1543 				    stat_pad_len);
1544 	if (err)
1545 		return err;
1546 
1547 	/* Create tx scatterlist containing SPU request message */
1548 	tx_frag_num += rctx->src_nents;
1549 	tx_frag_num += assoc_nents;
1550 	if (aead_parms.aad_pad_len)
1551 		tx_frag_num++;
1552 	if (aead_parms.iv_len)
1553 		tx_frag_num++;
1554 	if (spu->spu_tx_status_len())
1555 		tx_frag_num++;
1556 	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1557 				    rctx->assoc, aead_parms.assoc_size,
1558 				    assoc_nents, aead_parms.iv_len, chunksize,
1559 				    aead_parms.aad_pad_len, pad_len, incl_icv);
1560 	if (err)
1561 		return err;
1562 
1563 	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
1564 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
1565 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
1566 			/*
1567 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
1568 			 * not in atomic context and we can wait and try again.
1569 			 */
1570 			retry_cnt++;
1571 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
1572 			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
1573 						mssg);
1574 			atomic_inc(&iproc_priv.mb_no_spc);
1575 		}
1576 	}
1577 	if (err < 0) {
1578 		atomic_inc(&iproc_priv.mb_send_fail);
1579 		return err;
1580 	}
1581 
1582 	return -EINPROGRESS;
1583 }
1584 
1585 /**
1586  * handle_aead_resp() - Process a SPU response message for an AEAD request.
1587  * @rctx:  Crypto request context
1588  */
1589 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1590 {
1591 	struct spu_hw *spu = &iproc_priv.spu;
1592 	struct crypto_async_request *areq = rctx->parent;
1593 	struct aead_request *req = container_of(areq,
1594 						struct aead_request, base);
1595 	struct iproc_ctx_s *ctx = rctx->ctx;
1596 	u32 payload_len;
1597 	unsigned int icv_offset;
1598 	u32 result_len;
1599 
1600 	/* See how much data was returned */
1601 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1602 	flow_log("payload_len %u\n", payload_len);
1603 
1604 	/* only count payload */
1605 	atomic64_add(payload_len, &iproc_priv.bytes_in);
1606 
1607 	if (req->assoclen)
1608 		packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
1609 			    req->assoclen);
1610 
1611 	/*
1612 	 * Copy the ICV back to the destination
1613 	 * buffer. In decrypt case, SPU gives us back the digest, but crypto
1614 	 * API doesn't expect ICV in dst buffer.
1615 	 */
1616 	result_len = req->cryptlen;
1617 	if (rctx->is_encrypt) {
1618 		icv_offset = req->assoclen + rctx->total_sent;
1619 		packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1620 		flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1621 		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1622 				      ctx->digestsize, icv_offset);
1623 		result_len += ctx->digestsize;
1624 	}
1625 
1626 	packet_log("response data:  ");
1627 	dump_sg(req->dst, req->assoclen, result_len);
1628 
1629 	atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1630 	if (ctx->cipher.alg == CIPHER_ALG_AES) {
1631 		if (ctx->cipher.mode == CIPHER_MODE_CCM)
1632 			atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1633 		else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1634 			atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1635 		else
1636 			atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1637 	} else {
1638 		atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1639 	}
1640 }
1641 
1642 /**
1643  * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1644  * @rctx:  request context
1645  *
1646  * Mailbox scatterlists are allocated for each chunk. So free them after
1647  * processing each chunk.
1648  */
1649 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1650 {
1651 	/* mailbox message used to tx request */
1652 	struct brcm_message *mssg = &rctx->mb_mssg;
1653 
1654 	kfree(mssg->spu.src);
1655 	kfree(mssg->spu.dst);
1656 	memset(mssg, 0, sizeof(struct brcm_message));
1657 }
1658 
1659 /**
1660  * finish_req() - Used to invoke the complete callback from the requester when
1661  * a request has been handled asynchronously.
1662  * @rctx:  Request context
1663  * @err:   Indicates whether the request was successful or not
1664  *
1665  * Ensures that cleanup has been done for request
1666  */
1667 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1668 {
1669 	struct crypto_async_request *areq = rctx->parent;
1670 
1671 	flow_log("%s() err:%d\n\n", __func__, err);
1672 
1673 	/* No harm done if already called */
1674 	spu_chunk_cleanup(rctx);
1675 
1676 	if (areq)
1677 		areq->complete(areq, err);
1678 }
1679 
1680 /**
1681  * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1682  * @cl:		mailbox client structure for SPU driver
1683  * @msg:	mailbox message containing SPU response
1684  */
1685 static void spu_rx_callback(struct mbox_client *cl, void *msg)
1686 {
1687 	struct spu_hw *spu = &iproc_priv.spu;
1688 	struct brcm_message *mssg = msg;
1689 	struct iproc_reqctx_s *rctx;
1690 	struct iproc_ctx_s *ctx;
1691 	struct crypto_async_request *areq;
1692 	int err = 0;
1693 
1694 	rctx = mssg->ctx;
1695 	if (unlikely(!rctx)) {
1696 		/* This is fatal */
1697 		pr_err("%s(): no request context", __func__);
1698 		err = -EFAULT;
1699 		goto cb_finish;
1700 	}
1701 	areq = rctx->parent;
1702 	ctx = rctx->ctx;
1703 
1704 	/* process the SPU status */
1705 	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1706 	if (err != 0) {
1707 		if (err == SPU_INVALID_ICV)
1708 			atomic_inc(&iproc_priv.bad_icv);
1709 		err = -EBADMSG;
1710 		goto cb_finish;
1711 	}
1712 
1713 	/* Process the SPU response message */
1714 	switch (rctx->ctx->alg->type) {
1715 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
1716 		handle_ablkcipher_resp(rctx);
1717 		break;
1718 	case CRYPTO_ALG_TYPE_AHASH:
1719 		handle_ahash_resp(rctx);
1720 		break;
1721 	case CRYPTO_ALG_TYPE_AEAD:
1722 		handle_aead_resp(rctx);
1723 		break;
1724 	default:
1725 		err = -EINVAL;
1726 		goto cb_finish;
1727 	}
1728 
1729 	/*
1730 	 * If this response does not complete the request, then send the next
1731 	 * request chunk.
1732 	 */
1733 	if (rctx->total_sent < rctx->total_todo) {
1734 		/* Deallocate anything specific to previous chunk */
1735 		spu_chunk_cleanup(rctx);
1736 
1737 		switch (rctx->ctx->alg->type) {
1738 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
1739 			err = handle_ablkcipher_req(rctx);
1740 			break;
1741 		case CRYPTO_ALG_TYPE_AHASH:
1742 			err = handle_ahash_req(rctx);
1743 			if (err == -EAGAIN)
1744 				/*
1745 				 * we saved data in hash carry, but tell crypto
1746 				 * API we successfully completed request.
1747 				 */
1748 				err = 0;
1749 			break;
1750 		case CRYPTO_ALG_TYPE_AEAD:
1751 			err = handle_aead_req(rctx);
1752 			break;
1753 		default:
1754 			err = -EINVAL;
1755 		}
1756 
1757 		if (err == -EINPROGRESS)
1758 			/* Successfully submitted request for next chunk */
1759 			return;
1760 	}
1761 
1762 cb_finish:
1763 	finish_req(rctx, err);
1764 }
1765 
1766 /* ==================== Kernel Cryptographic API ==================== */
1767 
1768 /**
1769  * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
1770  * @req:	Crypto API request
1771  * @encrypt:	true if encrypting; false if decrypting
1772  *
1773  * Return: -EINPROGRESS if request accepted and result will be returned
1774  *			asynchronously
1775  *	   < 0 if an error
1776  */
1777 static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1778 {
1779 	struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1780 	struct iproc_ctx_s *ctx =
1781 	    crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1782 	int err;
1783 
1784 	flow_log("%s() enc:%u\n", __func__, encrypt);
1785 
1786 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1787 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1788 	rctx->parent = &req->base;
1789 	rctx->is_encrypt = encrypt;
1790 	rctx->bd_suppress = false;
1791 	rctx->total_todo = req->nbytes;
1792 	rctx->src_sent = 0;
1793 	rctx->total_sent = 0;
1794 	rctx->total_received = 0;
1795 	rctx->ctx = ctx;
1796 
1797 	/* Initialize current position in src and dst scatterlists */
1798 	rctx->src_sg = req->src;
1799 	rctx->src_nents = 0;
1800 	rctx->src_skip = 0;
1801 	rctx->dst_sg = req->dst;
1802 	rctx->dst_nents = 0;
1803 	rctx->dst_skip = 0;
1804 
1805 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1806 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
1807 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
1808 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
1809 	    ctx->cipher.mode == CIPHER_MODE_GCM ||
1810 	    ctx->cipher.mode == CIPHER_MODE_CCM) {
1811 		rctx->iv_ctr_len =
1812 		    crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1813 		memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1814 	} else {
1815 		rctx->iv_ctr_len = 0;
1816 	}
1817 
1818 	/* Choose a SPU to process this request */
1819 	rctx->chan_idx = select_channel();
1820 	err = handle_ablkcipher_req(rctx);
1821 	if (err != -EINPROGRESS)
1822 		/* synchronous result */
1823 		spu_chunk_cleanup(rctx);
1824 
1825 	return err;
1826 }
1827 
1828 static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1829 		      unsigned int keylen)
1830 {
1831 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1832 	u32 tmp[DES_EXPKEY_WORDS];
1833 
1834 	if (keylen == DES_KEY_SIZE) {
1835 		if (des_ekey(tmp, key) == 0) {
1836 			if (crypto_ablkcipher_get_flags(cipher) &
1837 			    CRYPTO_TFM_REQ_WEAK_KEY) {
1838 				u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1839 
1840 				crypto_ablkcipher_set_flags(cipher, flags);
1841 				return -EINVAL;
1842 			}
1843 		}
1844 
1845 		ctx->cipher_type = CIPHER_TYPE_DES;
1846 	} else {
1847 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1848 		return -EINVAL;
1849 	}
1850 	return 0;
1851 }
1852 
1853 static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1854 			   unsigned int keylen)
1855 {
1856 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1857 
1858 	if (keylen == (DES_KEY_SIZE * 3)) {
1859 		const u32 *K = (const u32 *)key;
1860 		u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
1861 
1862 		if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1863 		    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
1864 			crypto_ablkcipher_set_flags(cipher, flags);
1865 			return -EINVAL;
1866 		}
1867 
1868 		ctx->cipher_type = CIPHER_TYPE_3DES;
1869 	} else {
1870 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1871 		return -EINVAL;
1872 	}
1873 	return 0;
1874 }
1875 
1876 static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1877 		      unsigned int keylen)
1878 {
1879 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1880 
1881 	if (ctx->cipher.mode == CIPHER_MODE_XTS)
1882 		/* XTS includes two keys of equal length */
1883 		keylen = keylen / 2;
1884 
1885 	switch (keylen) {
1886 	case AES_KEYSIZE_128:
1887 		ctx->cipher_type = CIPHER_TYPE_AES128;
1888 		break;
1889 	case AES_KEYSIZE_192:
1890 		ctx->cipher_type = CIPHER_TYPE_AES192;
1891 		break;
1892 	case AES_KEYSIZE_256:
1893 		ctx->cipher_type = CIPHER_TYPE_AES256;
1894 		break;
1895 	default:
1896 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1897 		return -EINVAL;
1898 	}
1899 	WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1900 		((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1901 	return 0;
1902 }
1903 
1904 static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1905 		      unsigned int keylen)
1906 {
1907 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1908 	int i;
1909 
1910 	ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1911 
1912 	ctx->enckey[0] = 0x00;	/* 0x00 */
1913 	ctx->enckey[1] = 0x00;	/* i    */
1914 	ctx->enckey[2] = 0x00;	/* 0x00 */
1915 	ctx->enckey[3] = 0x00;	/* j    */
1916 	for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1917 		ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1918 
1919 	ctx->cipher_type = CIPHER_TYPE_INIT;
1920 
1921 	return 0;
1922 }
1923 
1924 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1925 			     unsigned int keylen)
1926 {
1927 	struct spu_hw *spu = &iproc_priv.spu;
1928 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1929 	struct spu_cipher_parms cipher_parms;
1930 	u32 alloc_len = 0;
1931 	int err;
1932 
1933 	flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1934 	flow_dump("  key: ", key, keylen);
1935 
1936 	switch (ctx->cipher.alg) {
1937 	case CIPHER_ALG_DES:
1938 		err = des_setkey(cipher, key, keylen);
1939 		break;
1940 	case CIPHER_ALG_3DES:
1941 		err = threedes_setkey(cipher, key, keylen);
1942 		break;
1943 	case CIPHER_ALG_AES:
1944 		err = aes_setkey(cipher, key, keylen);
1945 		break;
1946 	case CIPHER_ALG_RC4:
1947 		err = rc4_setkey(cipher, key, keylen);
1948 		break;
1949 	default:
1950 		pr_err("%s() Error: unknown cipher alg\n", __func__);
1951 		err = -EINVAL;
1952 	}
1953 	if (err)
1954 		return err;
1955 
1956 	/* RC4 already populated ctx->enkey */
1957 	if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1958 		memcpy(ctx->enckey, key, keylen);
1959 		ctx->enckeylen = keylen;
1960 	}
1961 	/* SPU needs XTS keys in the reverse order the crypto API presents */
1962 	if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1963 	    (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1964 		unsigned int xts_keylen = keylen / 2;
1965 
1966 		memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1967 		memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1968 	}
1969 
1970 	if (spu->spu_type == SPU_TYPE_SPUM)
1971 		alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1972 	else if (spu->spu_type == SPU_TYPE_SPU2)
1973 		alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1974 	memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1975 	cipher_parms.iv_buf = NULL;
1976 	cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1977 	flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1978 
1979 	cipher_parms.alg = ctx->cipher.alg;
1980 	cipher_parms.mode = ctx->cipher.mode;
1981 	cipher_parms.type = ctx->cipher_type;
1982 	cipher_parms.key_buf = ctx->enckey;
1983 	cipher_parms.key_len = ctx->enckeylen;
1984 
1985 	/* Prepend SPU request message with BCM header */
1986 	memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1987 	ctx->spu_req_hdr_len =
1988 	    spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1989 				     &cipher_parms);
1990 
1991 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1992 							  ctx->enckeylen,
1993 							  false);
1994 
1995 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1996 
1997 	return 0;
1998 }
1999 
2000 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2001 {
2002 	flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
2003 
2004 	return ablkcipher_enqueue(req, true);
2005 }
2006 
2007 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2008 {
2009 	flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
2010 	return ablkcipher_enqueue(req, false);
2011 }
2012 
2013 static int ahash_enqueue(struct ahash_request *req)
2014 {
2015 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2016 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2017 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2018 	int err = 0;
2019 	const char *alg_name;
2020 
2021 	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
2022 
2023 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2024 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2025 	rctx->parent = &req->base;
2026 	rctx->ctx = ctx;
2027 	rctx->bd_suppress = true;
2028 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2029 
2030 	/* Initialize position in src scatterlist */
2031 	rctx->src_sg = req->src;
2032 	rctx->src_skip = 0;
2033 	rctx->src_nents = 0;
2034 	rctx->dst_sg = NULL;
2035 	rctx->dst_skip = 0;
2036 	rctx->dst_nents = 0;
2037 
2038 	/* SPU2 hardware does not compute hash of zero length data */
2039 	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
2040 	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
2041 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2042 		flow_log("Doing %sfinal %s zero-len hash request in software\n",
2043 			 rctx->is_final ? "" : "non-", alg_name);
2044 		err = do_shash((unsigned char *)alg_name, req->result,
2045 			       NULL, 0, NULL, 0, ctx->authkey,
2046 			       ctx->authkeylen);
2047 		if (err < 0)
2048 			flow_log("Hash request failed with error %d\n", err);
2049 		return err;
2050 	}
2051 	/* Choose a SPU to process this request */
2052 	rctx->chan_idx = select_channel();
2053 
2054 	err = handle_ahash_req(rctx);
2055 	if (err != -EINPROGRESS)
2056 		/* synchronous result */
2057 		spu_chunk_cleanup(rctx);
2058 
2059 	if (err == -EAGAIN)
2060 		/*
2061 		 * we saved data in hash carry, but tell crypto API
2062 		 * we successfully completed request.
2063 		 */
2064 		err = 0;
2065 
2066 	return err;
2067 }
2068 
2069 static int __ahash_init(struct ahash_request *req)
2070 {
2071 	struct spu_hw *spu = &iproc_priv.spu;
2072 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2073 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2074 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2075 
2076 	flow_log("%s()\n", __func__);
2077 
2078 	/* Initialize the context */
2079 	rctx->hash_carry_len = 0;
2080 	rctx->is_final = 0;
2081 
2082 	rctx->total_todo = 0;
2083 	rctx->src_sent = 0;
2084 	rctx->total_sent = 0;
2085 	rctx->total_received = 0;
2086 
2087 	ctx->digestsize = crypto_ahash_digestsize(tfm);
2088 	/* If we add a hash whose digest is larger, catch it here. */
2089 	WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2090 
2091 	rctx->is_sw_hmac = false;
2092 
2093 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2094 							  true);
2095 
2096 	return 0;
2097 }
2098 
2099 /**
2100  * spu_no_incr_hash() - Determine whether incremental hashing is supported.
2101  * @ctx:  Crypto session context
2102  *
2103  * SPU-2 does not support incremental hashing (we'll have to revisit and
2104  * condition based on chip revision or device tree entry if future versions do
2105  * support incremental hash)
2106  *
2107  * SPU-M also doesn't support incremental hashing of AES-XCBC
2108  *
2109  * Return: true if incremental hashing is not supported
2110  *         false otherwise
2111  */
2112 bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2113 {
2114 	struct spu_hw *spu = &iproc_priv.spu;
2115 
2116 	if (spu->spu_type == SPU_TYPE_SPU2)
2117 		return true;
2118 
2119 	if ((ctx->auth.alg == HASH_ALG_AES) &&
2120 	    (ctx->auth.mode == HASH_MODE_XCBC))
2121 		return true;
2122 
2123 	/* Otherwise, incremental hashing is supported */
2124 	return false;
2125 }
2126 
2127 static int ahash_init(struct ahash_request *req)
2128 {
2129 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2130 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2131 	const char *alg_name;
2132 	struct crypto_shash *hash;
2133 	int ret;
2134 	gfp_t gfp;
2135 
2136 	if (spu_no_incr_hash(ctx)) {
2137 		/*
2138 		 * If we get an incremental hashing request and it's not
2139 		 * supported by the hardware, we need to handle it in software
2140 		 * by calling synchronous hash functions.
2141 		 */
2142 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2143 		hash = crypto_alloc_shash(alg_name, 0, 0);
2144 		if (IS_ERR(hash)) {
2145 			ret = PTR_ERR(hash);
2146 			goto err;
2147 		}
2148 
2149 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2150 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2151 		ctx->shash = kmalloc(sizeof(*ctx->shash) +
2152 				     crypto_shash_descsize(hash), gfp);
2153 		if (!ctx->shash) {
2154 			ret = -ENOMEM;
2155 			goto err_hash;
2156 		}
2157 		ctx->shash->tfm = hash;
2158 		ctx->shash->flags = 0;
2159 
2160 		/* Set the key using data we already have from setkey */
2161 		if (ctx->authkeylen > 0) {
2162 			ret = crypto_shash_setkey(hash, ctx->authkey,
2163 						  ctx->authkeylen);
2164 			if (ret)
2165 				goto err_shash;
2166 		}
2167 
2168 		/* Initialize hash w/ this key and other params */
2169 		ret = crypto_shash_init(ctx->shash);
2170 		if (ret)
2171 			goto err_shash;
2172 	} else {
2173 		/* Otherwise call the internal function which uses SPU hw */
2174 		ret = __ahash_init(req);
2175 	}
2176 
2177 	return ret;
2178 
2179 err_shash:
2180 	kfree(ctx->shash);
2181 err_hash:
2182 	crypto_free_shash(hash);
2183 err:
2184 	return ret;
2185 }
2186 
2187 static int __ahash_update(struct ahash_request *req)
2188 {
2189 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2190 
2191 	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2192 
2193 	if (!req->nbytes)
2194 		return 0;
2195 	rctx->total_todo += req->nbytes;
2196 	rctx->src_sent = 0;
2197 
2198 	return ahash_enqueue(req);
2199 }
2200 
2201 static int ahash_update(struct ahash_request *req)
2202 {
2203 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2204 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2205 	u8 *tmpbuf;
2206 	int ret;
2207 	int nents;
2208 	gfp_t gfp;
2209 
2210 	if (spu_no_incr_hash(ctx)) {
2211 		/*
2212 		 * If we get an incremental hashing request and it's not
2213 		 * supported by the hardware, we need to handle it in software
2214 		 * by calling synchronous hash functions.
2215 		 */
2216 		if (req->src)
2217 			nents = sg_nents(req->src);
2218 		else
2219 			return -EINVAL;
2220 
2221 		/* Copy data from req scatterlist to tmp buffer */
2222 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2223 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2224 		tmpbuf = kmalloc(req->nbytes, gfp);
2225 		if (!tmpbuf)
2226 			return -ENOMEM;
2227 
2228 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2229 				req->nbytes) {
2230 			kfree(tmpbuf);
2231 			return -EINVAL;
2232 		}
2233 
2234 		/* Call synchronous update */
2235 		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2236 		kfree(tmpbuf);
2237 	} else {
2238 		/* Otherwise call the internal function which uses SPU hw */
2239 		ret = __ahash_update(req);
2240 	}
2241 
2242 	return ret;
2243 }
2244 
2245 static int __ahash_final(struct ahash_request *req)
2246 {
2247 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2248 
2249 	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2250 
2251 	rctx->is_final = 1;
2252 
2253 	return ahash_enqueue(req);
2254 }
2255 
2256 static int ahash_final(struct ahash_request *req)
2257 {
2258 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2259 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2260 	int ret;
2261 
2262 	if (spu_no_incr_hash(ctx)) {
2263 		/*
2264 		 * If we get an incremental hashing request and it's not
2265 		 * supported by the hardware, we need to handle it in software
2266 		 * by calling synchronous hash functions.
2267 		 */
2268 		ret = crypto_shash_final(ctx->shash, req->result);
2269 
2270 		/* Done with hash, can deallocate it now */
2271 		crypto_free_shash(ctx->shash->tfm);
2272 		kfree(ctx->shash);
2273 
2274 	} else {
2275 		/* Otherwise call the internal function which uses SPU hw */
2276 		ret = __ahash_final(req);
2277 	}
2278 
2279 	return ret;
2280 }
2281 
2282 static int __ahash_finup(struct ahash_request *req)
2283 {
2284 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2285 
2286 	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2287 
2288 	rctx->total_todo += req->nbytes;
2289 	rctx->src_sent = 0;
2290 	rctx->is_final = 1;
2291 
2292 	return ahash_enqueue(req);
2293 }
2294 
2295 static int ahash_finup(struct ahash_request *req)
2296 {
2297 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2298 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2299 	u8 *tmpbuf;
2300 	int ret;
2301 	int nents;
2302 	gfp_t gfp;
2303 
2304 	if (spu_no_incr_hash(ctx)) {
2305 		/*
2306 		 * If we get an incremental hashing request and it's not
2307 		 * supported by the hardware, we need to handle it in software
2308 		 * by calling synchronous hash functions.
2309 		 */
2310 		if (req->src) {
2311 			nents = sg_nents(req->src);
2312 		} else {
2313 			ret = -EINVAL;
2314 			goto ahash_finup_exit;
2315 		}
2316 
2317 		/* Copy data from req scatterlist to tmp buffer */
2318 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2319 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2320 		tmpbuf = kmalloc(req->nbytes, gfp);
2321 		if (!tmpbuf) {
2322 			ret = -ENOMEM;
2323 			goto ahash_finup_exit;
2324 		}
2325 
2326 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2327 				req->nbytes) {
2328 			ret = -EINVAL;
2329 			goto ahash_finup_free;
2330 		}
2331 
2332 		/* Call synchronous update */
2333 		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2334 					 req->result);
2335 	} else {
2336 		/* Otherwise call the internal function which uses SPU hw */
2337 		return __ahash_finup(req);
2338 	}
2339 ahash_finup_free:
2340 	kfree(tmpbuf);
2341 
2342 ahash_finup_exit:
2343 	/* Done with hash, can deallocate it now */
2344 	crypto_free_shash(ctx->shash->tfm);
2345 	kfree(ctx->shash);
2346 	return ret;
2347 }
2348 
2349 static int ahash_digest(struct ahash_request *req)
2350 {
2351 	int err = 0;
2352 
2353 	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2354 
2355 	/* whole thing at once */
2356 	err = __ahash_init(req);
2357 	if (!err)
2358 		err = __ahash_finup(req);
2359 
2360 	return err;
2361 }
2362 
2363 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2364 			unsigned int keylen)
2365 {
2366 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2367 
2368 	flow_log("%s() ahash:%p key:%p keylen:%u\n",
2369 		 __func__, ahash, key, keylen);
2370 	flow_dump("  key: ", key, keylen);
2371 
2372 	if (ctx->auth.alg == HASH_ALG_AES) {
2373 		switch (keylen) {
2374 		case AES_KEYSIZE_128:
2375 			ctx->cipher_type = CIPHER_TYPE_AES128;
2376 			break;
2377 		case AES_KEYSIZE_192:
2378 			ctx->cipher_type = CIPHER_TYPE_AES192;
2379 			break;
2380 		case AES_KEYSIZE_256:
2381 			ctx->cipher_type = CIPHER_TYPE_AES256;
2382 			break;
2383 		default:
2384 			pr_err("%s() Error: Invalid key length\n", __func__);
2385 			return -EINVAL;
2386 		}
2387 	} else {
2388 		pr_err("%s() Error: unknown hash alg\n", __func__);
2389 		return -EINVAL;
2390 	}
2391 	memcpy(ctx->authkey, key, keylen);
2392 	ctx->authkeylen = keylen;
2393 
2394 	return 0;
2395 }
2396 
2397 static int ahash_export(struct ahash_request *req, void *out)
2398 {
2399 	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2400 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2401 
2402 	spu_exp->total_todo = rctx->total_todo;
2403 	spu_exp->total_sent = rctx->total_sent;
2404 	spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2405 	memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2406 	spu_exp->hash_carry_len = rctx->hash_carry_len;
2407 	memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2408 
2409 	return 0;
2410 }
2411 
2412 static int ahash_import(struct ahash_request *req, const void *in)
2413 {
2414 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2415 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2416 
2417 	rctx->total_todo = spu_exp->total_todo;
2418 	rctx->total_sent = spu_exp->total_sent;
2419 	rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2420 	memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2421 	rctx->hash_carry_len = spu_exp->hash_carry_len;
2422 	memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2423 
2424 	return 0;
2425 }
2426 
2427 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2428 			     unsigned int keylen)
2429 {
2430 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2431 	unsigned int blocksize =
2432 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2433 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
2434 	unsigned int index;
2435 	int rc;
2436 
2437 	flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2438 		 __func__, ahash, key, keylen, blocksize, digestsize);
2439 	flow_dump("  key: ", key, keylen);
2440 
2441 	if (keylen > blocksize) {
2442 		switch (ctx->auth.alg) {
2443 		case HASH_ALG_MD5:
2444 			rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2445 				      0, NULL, 0);
2446 			break;
2447 		case HASH_ALG_SHA1:
2448 			rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2449 				      0, NULL, 0);
2450 			break;
2451 		case HASH_ALG_SHA224:
2452 			rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2453 				      0, NULL, 0);
2454 			break;
2455 		case HASH_ALG_SHA256:
2456 			rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2457 				      0, NULL, 0);
2458 			break;
2459 		case HASH_ALG_SHA384:
2460 			rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2461 				      0, NULL, 0);
2462 			break;
2463 		case HASH_ALG_SHA512:
2464 			rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2465 				      0, NULL, 0);
2466 			break;
2467 		case HASH_ALG_SHA3_224:
2468 			rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2469 				      NULL, 0, NULL, 0);
2470 			break;
2471 		case HASH_ALG_SHA3_256:
2472 			rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2473 				      NULL, 0, NULL, 0);
2474 			break;
2475 		case HASH_ALG_SHA3_384:
2476 			rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2477 				      NULL, 0, NULL, 0);
2478 			break;
2479 		case HASH_ALG_SHA3_512:
2480 			rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2481 				      NULL, 0, NULL, 0);
2482 			break;
2483 		default:
2484 			pr_err("%s() Error: unknown hash alg\n", __func__);
2485 			return -EINVAL;
2486 		}
2487 		if (rc < 0) {
2488 			pr_err("%s() Error %d computing shash for %s\n",
2489 			       __func__, rc, hash_alg_name[ctx->auth.alg]);
2490 			return rc;
2491 		}
2492 		ctx->authkeylen = digestsize;
2493 
2494 		flow_log("  keylen > digestsize... hashed\n");
2495 		flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
2496 	} else {
2497 		memcpy(ctx->authkey, key, keylen);
2498 		ctx->authkeylen = keylen;
2499 	}
2500 
2501 	/*
2502 	 * Full HMAC operation in SPUM is not verified,
2503 	 * So keeping the generation of IPAD, OPAD and
2504 	 * outer hashing in software.
2505 	 */
2506 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2507 		memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2508 		memset(ctx->ipad + ctx->authkeylen, 0,
2509 		       blocksize - ctx->authkeylen);
2510 		ctx->authkeylen = 0;
2511 		memcpy(ctx->opad, ctx->ipad, blocksize);
2512 
2513 		for (index = 0; index < blocksize; index++) {
2514 			ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2515 			ctx->opad[index] ^= HMAC_OPAD_VALUE;
2516 		}
2517 
2518 		flow_dump("  ipad: ", ctx->ipad, blocksize);
2519 		flow_dump("  opad: ", ctx->opad, blocksize);
2520 	}
2521 	ctx->digestsize = digestsize;
2522 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2523 
2524 	return 0;
2525 }
2526 
2527 static int ahash_hmac_init(struct ahash_request *req)
2528 {
2529 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2530 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2531 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2532 	unsigned int blocksize =
2533 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2534 
2535 	flow_log("ahash_hmac_init()\n");
2536 
2537 	/* init the context as a hash */
2538 	ahash_init(req);
2539 
2540 	if (!spu_no_incr_hash(ctx)) {
2541 		/* SPU-M can do incr hashing but needs sw for outer HMAC */
2542 		rctx->is_sw_hmac = true;
2543 		ctx->auth.mode = HASH_MODE_HASH;
2544 		/* start with a prepended ipad */
2545 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2546 		rctx->hash_carry_len = blocksize;
2547 		rctx->total_todo += blocksize;
2548 	}
2549 
2550 	return 0;
2551 }
2552 
2553 static int ahash_hmac_update(struct ahash_request *req)
2554 {
2555 	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2556 
2557 	if (!req->nbytes)
2558 		return 0;
2559 
2560 	return ahash_update(req);
2561 }
2562 
2563 static int ahash_hmac_final(struct ahash_request *req)
2564 {
2565 	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2566 
2567 	return ahash_final(req);
2568 }
2569 
2570 static int ahash_hmac_finup(struct ahash_request *req)
2571 {
2572 	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2573 
2574 	return ahash_finup(req);
2575 }
2576 
2577 static int ahash_hmac_digest(struct ahash_request *req)
2578 {
2579 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2580 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2581 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2582 	unsigned int blocksize =
2583 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2584 
2585 	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2586 
2587 	/* Perform initialization and then call finup */
2588 	__ahash_init(req);
2589 
2590 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2591 		/*
2592 		 * SPU2 supports full HMAC implementation in the
2593 		 * hardware, need not to generate IPAD, OPAD and
2594 		 * outer hash in software.
2595 		 * Only for hash key len > hash block size, SPU2
2596 		 * expects to perform hashing on the key, shorten
2597 		 * it to digest size and feed it as hash key.
2598 		 */
2599 		rctx->is_sw_hmac = false;
2600 		ctx->auth.mode = HASH_MODE_HMAC;
2601 	} else {
2602 		rctx->is_sw_hmac = true;
2603 		ctx->auth.mode = HASH_MODE_HASH;
2604 		/* start with a prepended ipad */
2605 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2606 		rctx->hash_carry_len = blocksize;
2607 		rctx->total_todo += blocksize;
2608 	}
2609 
2610 	return __ahash_finup(req);
2611 }
2612 
2613 /* aead helpers */
2614 
2615 static int aead_need_fallback(struct aead_request *req)
2616 {
2617 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2618 	struct spu_hw *spu = &iproc_priv.spu;
2619 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2620 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2621 	u32 payload_len;
2622 
2623 	/*
2624 	 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2625 	 * and AAD are both 0 bytes long. So use fallback in this case.
2626 	 */
2627 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2628 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2629 	    (req->assoclen == 0)) {
2630 		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2631 		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2632 			flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2633 			return 1;
2634 		}
2635 	}
2636 
2637 	/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2638 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2639 	    (spu->spu_type == SPU_TYPE_SPUM) &&
2640 	    (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2641 	    (ctx->digestsize != 16)) {
2642 		flow_log("%s() AES CCM needs fallback for digest size %d\n",
2643 			 __func__, ctx->digestsize);
2644 		return 1;
2645 	}
2646 
2647 	/*
2648 	 * SPU-M on NSP has an issue where AES-CCM hash is not correct
2649 	 * when AAD size is 0
2650 	 */
2651 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2652 	    (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2653 	    (req->assoclen == 0)) {
2654 		flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2655 			 __func__);
2656 		return 1;
2657 	}
2658 
2659 	payload_len = req->cryptlen;
2660 	if (spu->spu_type == SPU_TYPE_SPUM)
2661 		payload_len += req->assoclen;
2662 
2663 	flow_log("%s() payload len: %u\n", __func__, payload_len);
2664 
2665 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2666 		return 0;
2667 	else
2668 		return payload_len > ctx->max_payload;
2669 }
2670 
2671 static void aead_complete(struct crypto_async_request *areq, int err)
2672 {
2673 	struct aead_request *req =
2674 	    container_of(areq, struct aead_request, base);
2675 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2676 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2677 
2678 	flow_log("%s() err:%d\n", __func__, err);
2679 
2680 	areq->tfm = crypto_aead_tfm(aead);
2681 
2682 	areq->complete = rctx->old_complete;
2683 	areq->data = rctx->old_data;
2684 
2685 	areq->complete(areq, err);
2686 }
2687 
2688 static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2689 {
2690 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2691 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2692 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2693 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2694 	int err;
2695 	u32 req_flags;
2696 
2697 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2698 
2699 	if (ctx->fallback_cipher) {
2700 		/* Store the cipher tfm and then use the fallback tfm */
2701 		rctx->old_tfm = tfm;
2702 		aead_request_set_tfm(req, ctx->fallback_cipher);
2703 		/*
2704 		 * Save the callback and chain ourselves in, so we can restore
2705 		 * the tfm
2706 		 */
2707 		rctx->old_complete = req->base.complete;
2708 		rctx->old_data = req->base.data;
2709 		req_flags = aead_request_flags(req);
2710 		aead_request_set_callback(req, req_flags, aead_complete, req);
2711 		err = is_encrypt ? crypto_aead_encrypt(req) :
2712 		    crypto_aead_decrypt(req);
2713 
2714 		if (err == 0) {
2715 			/*
2716 			 * fallback was synchronous (did not return
2717 			 * -EINPROGRESS). So restore request state here.
2718 			 */
2719 			aead_request_set_callback(req, req_flags,
2720 						  rctx->old_complete, req);
2721 			req->base.data = rctx->old_data;
2722 			aead_request_set_tfm(req, aead);
2723 			flow_log("%s() fallback completed successfully\n\n",
2724 				 __func__);
2725 		}
2726 	} else {
2727 		err = -EINVAL;
2728 	}
2729 
2730 	return err;
2731 }
2732 
2733 static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2734 {
2735 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2736 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2737 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2738 	int err;
2739 
2740 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2741 
2742 	if (req->assoclen > MAX_ASSOC_SIZE) {
2743 		pr_err
2744 		    ("%s() Error: associated data too long. (%u > %u bytes)\n",
2745 		     __func__, req->assoclen, MAX_ASSOC_SIZE);
2746 		return -EINVAL;
2747 	}
2748 
2749 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2750 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2751 	rctx->parent = &req->base;
2752 	rctx->is_encrypt = is_encrypt;
2753 	rctx->bd_suppress = false;
2754 	rctx->total_todo = req->cryptlen;
2755 	rctx->src_sent = 0;
2756 	rctx->total_sent = 0;
2757 	rctx->total_received = 0;
2758 	rctx->is_sw_hmac = false;
2759 	rctx->ctx = ctx;
2760 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2761 
2762 	/* assoc data is at start of src sg */
2763 	rctx->assoc = req->src;
2764 
2765 	/*
2766 	 * Init current position in src scatterlist to be after assoc data.
2767 	 * src_skip set to buffer offset where data begins. (Assoc data could
2768 	 * end in the middle of a buffer.)
2769 	 */
2770 	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2771 			     &rctx->src_skip) < 0) {
2772 		pr_err("%s() Error: Unable to find start of src data\n",
2773 		       __func__);
2774 		return -EINVAL;
2775 	}
2776 
2777 	rctx->src_nents = 0;
2778 	rctx->dst_nents = 0;
2779 	if (req->dst == req->src) {
2780 		rctx->dst_sg = rctx->src_sg;
2781 		rctx->dst_skip = rctx->src_skip;
2782 	} else {
2783 		/*
2784 		 * Expect req->dst to have room for assoc data followed by
2785 		 * output data and ICV, if encrypt. So initialize dst_sg
2786 		 * to point beyond assoc len offset.
2787 		 */
2788 		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2789 				     &rctx->dst_skip) < 0) {
2790 			pr_err("%s() Error: Unable to find start of dst data\n",
2791 			       __func__);
2792 			return -EINVAL;
2793 		}
2794 	}
2795 
2796 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2797 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
2798 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
2799 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
2800 	    ctx->cipher.mode == CIPHER_MODE_GCM) {
2801 		rctx->iv_ctr_len =
2802 			ctx->salt_len +
2803 			crypto_aead_ivsize(crypto_aead_reqtfm(req));
2804 	} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2805 		rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2806 	} else {
2807 		rctx->iv_ctr_len = 0;
2808 	}
2809 
2810 	rctx->hash_carry_len = 0;
2811 
2812 	flow_log("  src sg: %p\n", req->src);
2813 	flow_log("  rctx->src_sg: %p, src_skip %u\n",
2814 		 rctx->src_sg, rctx->src_skip);
2815 	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
2816 	flow_log("  dst sg: %p\n", req->dst);
2817 	flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
2818 		 rctx->dst_sg, rctx->dst_skip);
2819 	flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
2820 	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
2821 	flow_log("  authkeylen:%u\n", ctx->authkeylen);
2822 	flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2823 
2824 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2825 		flow_log("  max_payload infinite");
2826 	else
2827 		flow_log("  max_payload: %u\n", ctx->max_payload);
2828 
2829 	if (unlikely(aead_need_fallback(req)))
2830 		return aead_do_fallback(req, is_encrypt);
2831 
2832 	/*
2833 	 * Do memory allocations for request after fallback check, because if we
2834 	 * do fallback, we won't call finish_req() to dealloc.
2835 	 */
2836 	if (rctx->iv_ctr_len) {
2837 		if (ctx->salt_len)
2838 			memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2839 			       ctx->salt, ctx->salt_len);
2840 		memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2841 		       req->iv,
2842 		       rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2843 	}
2844 
2845 	rctx->chan_idx = select_channel();
2846 	err = handle_aead_req(rctx);
2847 	if (err != -EINPROGRESS)
2848 		/* synchronous result */
2849 		spu_chunk_cleanup(rctx);
2850 
2851 	return err;
2852 }
2853 
2854 static int aead_authenc_setkey(struct crypto_aead *cipher,
2855 			       const u8 *key, unsigned int keylen)
2856 {
2857 	struct spu_hw *spu = &iproc_priv.spu;
2858 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2859 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2860 	struct rtattr *rta = (void *)key;
2861 	struct crypto_authenc_key_param *param;
2862 	const u8 *origkey = key;
2863 	const unsigned int origkeylen = keylen;
2864 
2865 	int ret = 0;
2866 
2867 	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2868 		 keylen);
2869 	flow_dump("  key: ", key, keylen);
2870 
2871 	if (!RTA_OK(rta, keylen))
2872 		goto badkey;
2873 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
2874 		goto badkey;
2875 	if (RTA_PAYLOAD(rta) < sizeof(*param))
2876 		goto badkey;
2877 
2878 	param = RTA_DATA(rta);
2879 	ctx->enckeylen = be32_to_cpu(param->enckeylen);
2880 
2881 	key += RTA_ALIGN(rta->rta_len);
2882 	keylen -= RTA_ALIGN(rta->rta_len);
2883 
2884 	if (keylen < ctx->enckeylen)
2885 		goto badkey;
2886 	if (ctx->enckeylen > MAX_KEY_SIZE)
2887 		goto badkey;
2888 
2889 	ctx->authkeylen = keylen - ctx->enckeylen;
2890 
2891 	if (ctx->authkeylen > MAX_KEY_SIZE)
2892 		goto badkey;
2893 
2894 	memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
2895 	/* May end up padding auth key. So make sure it's zeroed. */
2896 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
2897 	memcpy(ctx->authkey, key, ctx->authkeylen);
2898 
2899 	switch (ctx->alg->cipher_info.alg) {
2900 	case CIPHER_ALG_DES:
2901 		if (ctx->enckeylen == DES_KEY_SIZE) {
2902 			u32 tmp[DES_EXPKEY_WORDS];
2903 			u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2904 
2905 			if (des_ekey(tmp, key) == 0) {
2906 				if (crypto_aead_get_flags(cipher) &
2907 				    CRYPTO_TFM_REQ_WEAK_KEY) {
2908 					crypto_aead_set_flags(cipher, flags);
2909 					return -EINVAL;
2910 				}
2911 			}
2912 
2913 			ctx->cipher_type = CIPHER_TYPE_DES;
2914 		} else {
2915 			goto badkey;
2916 		}
2917 		break;
2918 	case CIPHER_ALG_3DES:
2919 		if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2920 			const u32 *K = (const u32 *)key;
2921 			u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2922 
2923 			if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
2924 			    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
2925 				crypto_aead_set_flags(cipher, flags);
2926 				return -EINVAL;
2927 			}
2928 
2929 			ctx->cipher_type = CIPHER_TYPE_3DES;
2930 		} else {
2931 			crypto_aead_set_flags(cipher,
2932 					      CRYPTO_TFM_RES_BAD_KEY_LEN);
2933 			return -EINVAL;
2934 		}
2935 		break;
2936 	case CIPHER_ALG_AES:
2937 		switch (ctx->enckeylen) {
2938 		case AES_KEYSIZE_128:
2939 			ctx->cipher_type = CIPHER_TYPE_AES128;
2940 			break;
2941 		case AES_KEYSIZE_192:
2942 			ctx->cipher_type = CIPHER_TYPE_AES192;
2943 			break;
2944 		case AES_KEYSIZE_256:
2945 			ctx->cipher_type = CIPHER_TYPE_AES256;
2946 			break;
2947 		default:
2948 			goto badkey;
2949 		}
2950 		break;
2951 	case CIPHER_ALG_RC4:
2952 		ctx->cipher_type = CIPHER_TYPE_INIT;
2953 		break;
2954 	default:
2955 		pr_err("%s() Error: Unknown cipher alg\n", __func__);
2956 		return -EINVAL;
2957 	}
2958 
2959 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2960 		 ctx->authkeylen);
2961 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2962 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2963 
2964 	/* setkey the fallback just in case we needto use it */
2965 	if (ctx->fallback_cipher) {
2966 		flow_log("  running fallback setkey()\n");
2967 
2968 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2969 		ctx->fallback_cipher->base.crt_flags |=
2970 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2971 		ret =
2972 		    crypto_aead_setkey(ctx->fallback_cipher, origkey,
2973 				       origkeylen);
2974 		if (ret) {
2975 			flow_log("  fallback setkey() returned:%d\n", ret);
2976 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2977 			tfm->crt_flags |=
2978 			    (ctx->fallback_cipher->base.crt_flags &
2979 			     CRYPTO_TFM_RES_MASK);
2980 		}
2981 	}
2982 
2983 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2984 							  ctx->enckeylen,
2985 							  false);
2986 
2987 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2988 
2989 	return ret;
2990 
2991 badkey:
2992 	ctx->enckeylen = 0;
2993 	ctx->authkeylen = 0;
2994 	ctx->digestsize = 0;
2995 
2996 	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2997 	return -EINVAL;
2998 }
2999 
3000 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
3001 			       const u8 *key, unsigned int keylen)
3002 {
3003 	struct spu_hw *spu = &iproc_priv.spu;
3004 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3005 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
3006 
3007 	int ret = 0;
3008 
3009 	flow_log("%s() keylen:%u\n", __func__, keylen);
3010 	flow_dump("  key: ", key, keylen);
3011 
3012 	if (!ctx->is_esp)
3013 		ctx->digestsize = keylen;
3014 
3015 	ctx->enckeylen = keylen;
3016 	ctx->authkeylen = 0;
3017 	memcpy(ctx->enckey, key, ctx->enckeylen);
3018 
3019 	switch (ctx->enckeylen) {
3020 	case AES_KEYSIZE_128:
3021 		ctx->cipher_type = CIPHER_TYPE_AES128;
3022 		break;
3023 	case AES_KEYSIZE_192:
3024 		ctx->cipher_type = CIPHER_TYPE_AES192;
3025 		break;
3026 	case AES_KEYSIZE_256:
3027 		ctx->cipher_type = CIPHER_TYPE_AES256;
3028 		break;
3029 	default:
3030 		goto badkey;
3031 	}
3032 
3033 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3034 		 ctx->authkeylen);
3035 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
3036 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
3037 
3038 	/* setkey the fallback just in case we need to use it */
3039 	if (ctx->fallback_cipher) {
3040 		flow_log("  running fallback setkey()\n");
3041 
3042 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
3043 		ctx->fallback_cipher->base.crt_flags |=
3044 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
3045 		ret = crypto_aead_setkey(ctx->fallback_cipher, key,
3046 					 keylen + ctx->salt_len);
3047 		if (ret) {
3048 			flow_log("  fallback setkey() returned:%d\n", ret);
3049 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
3050 			tfm->crt_flags |=
3051 			    (ctx->fallback_cipher->base.crt_flags &
3052 			     CRYPTO_TFM_RES_MASK);
3053 		}
3054 	}
3055 
3056 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
3057 							  ctx->enckeylen,
3058 							  false);
3059 
3060 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
3061 
3062 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3063 		 ctx->authkeylen);
3064 
3065 	return ret;
3066 
3067 badkey:
3068 	ctx->enckeylen = 0;
3069 	ctx->authkeylen = 0;
3070 	ctx->digestsize = 0;
3071 
3072 	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
3073 	return -EINVAL;
3074 }
3075 
3076 /**
3077  * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
3078  * @cipher: AEAD structure
3079  * @key:    Key followed by 4 bytes of salt
3080  * @keylen: Length of key plus salt, in bytes
3081  *
3082  * Extracts salt from key and stores it to be prepended to IV on each request.
3083  * Digest is always 16 bytes
3084  *
3085  * Return: Value from generic gcm setkey.
3086  */
3087 static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3088 			       const u8 *key, unsigned int keylen)
3089 {
3090 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3091 
3092 	flow_log("%s\n", __func__);
3093 	ctx->salt_len = GCM_ESP_SALT_SIZE;
3094 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3095 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3096 	keylen -= GCM_ESP_SALT_SIZE;
3097 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
3098 	ctx->is_esp = true;
3099 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3100 
3101 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3102 }
3103 
3104 /**
3105  * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
3106  * cipher: AEAD structure
3107  * key:    Key followed by 4 bytes of salt
3108  * keylen: Length of key plus salt, in bytes
3109  *
3110  * Extracts salt from key and stores it to be prepended to IV on each request.
3111  * Digest is always 16 bytes
3112  *
3113  * Return: Value from generic gcm setkey.
3114  */
3115 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3116 				  const u8 *key, unsigned int keylen)
3117 {
3118 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3119 
3120 	flow_log("%s\n", __func__);
3121 	ctx->salt_len = GCM_ESP_SALT_SIZE;
3122 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3123 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3124 	keylen -= GCM_ESP_SALT_SIZE;
3125 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
3126 	ctx->is_esp = true;
3127 	ctx->is_rfc4543 = true;
3128 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3129 
3130 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3131 }
3132 
3133 /**
3134  * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
3135  * @cipher: AEAD structure
3136  * @key:    Key followed by 4 bytes of salt
3137  * @keylen: Length of key plus salt, in bytes
3138  *
3139  * Extracts salt from key and stores it to be prepended to IV on each request.
3140  * Digest is always 16 bytes
3141  *
3142  * Return: Value from generic ccm setkey.
3143  */
3144 static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3145 			       const u8 *key, unsigned int keylen)
3146 {
3147 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3148 
3149 	flow_log("%s\n", __func__);
3150 	ctx->salt_len = CCM_ESP_SALT_SIZE;
3151 	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3152 	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3153 	keylen -= CCM_ESP_SALT_SIZE;
3154 	ctx->is_esp = true;
3155 	flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3156 
3157 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3158 }
3159 
3160 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3161 {
3162 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3163 	int ret = 0;
3164 
3165 	flow_log("%s() authkeylen:%u authsize:%u\n",
3166 		 __func__, ctx->authkeylen, authsize);
3167 
3168 	ctx->digestsize = authsize;
3169 
3170 	/* setkey the fallback just in case we needto use it */
3171 	if (ctx->fallback_cipher) {
3172 		flow_log("  running fallback setauth()\n");
3173 
3174 		ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3175 		if (ret)
3176 			flow_log("  fallback setauth() returned:%d\n", ret);
3177 	}
3178 
3179 	return ret;
3180 }
3181 
3182 static int aead_encrypt(struct aead_request *req)
3183 {
3184 	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3185 		 req->cryptlen);
3186 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3187 	flow_log("  assoc_len:%u\n", req->assoclen);
3188 
3189 	return aead_enqueue(req, true);
3190 }
3191 
3192 static int aead_decrypt(struct aead_request *req)
3193 {
3194 	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3195 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3196 	flow_log("  assoc_len:%u\n", req->assoclen);
3197 
3198 	return aead_enqueue(req, false);
3199 }
3200 
3201 /* ==================== Supported Cipher Algorithms ==================== */
3202 
3203 static struct iproc_alg_s driver_algs[] = {
3204 	{
3205 	 .type = CRYPTO_ALG_TYPE_AEAD,
3206 	 .alg.aead = {
3207 		 .base = {
3208 			.cra_name = "gcm(aes)",
3209 			.cra_driver_name = "gcm-aes-iproc",
3210 			.cra_blocksize = AES_BLOCK_SIZE,
3211 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3212 		 },
3213 		 .setkey = aead_gcm_ccm_setkey,
3214 		 .ivsize = GCM_AES_IV_SIZE,
3215 		.maxauthsize = AES_BLOCK_SIZE,
3216 	 },
3217 	 .cipher_info = {
3218 			 .alg = CIPHER_ALG_AES,
3219 			 .mode = CIPHER_MODE_GCM,
3220 			 },
3221 	 .auth_info = {
3222 		       .alg = HASH_ALG_AES,
3223 		       .mode = HASH_MODE_GCM,
3224 		       },
3225 	 .auth_first = 0,
3226 	 },
3227 	{
3228 	 .type = CRYPTO_ALG_TYPE_AEAD,
3229 	 .alg.aead = {
3230 		 .base = {
3231 			.cra_name = "ccm(aes)",
3232 			.cra_driver_name = "ccm-aes-iproc",
3233 			.cra_blocksize = AES_BLOCK_SIZE,
3234 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3235 		 },
3236 		 .setkey = aead_gcm_ccm_setkey,
3237 		 .ivsize = CCM_AES_IV_SIZE,
3238 		.maxauthsize = AES_BLOCK_SIZE,
3239 	 },
3240 	 .cipher_info = {
3241 			 .alg = CIPHER_ALG_AES,
3242 			 .mode = CIPHER_MODE_CCM,
3243 			 },
3244 	 .auth_info = {
3245 		       .alg = HASH_ALG_AES,
3246 		       .mode = HASH_MODE_CCM,
3247 		       },
3248 	 .auth_first = 0,
3249 	 },
3250 	{
3251 	 .type = CRYPTO_ALG_TYPE_AEAD,
3252 	 .alg.aead = {
3253 		 .base = {
3254 			.cra_name = "rfc4106(gcm(aes))",
3255 			.cra_driver_name = "gcm-aes-esp-iproc",
3256 			.cra_blocksize = AES_BLOCK_SIZE,
3257 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3258 		 },
3259 		 .setkey = aead_gcm_esp_setkey,
3260 		 .ivsize = GCM_ESP_IV_SIZE,
3261 		 .maxauthsize = AES_BLOCK_SIZE,
3262 	 },
3263 	 .cipher_info = {
3264 			 .alg = CIPHER_ALG_AES,
3265 			 .mode = CIPHER_MODE_GCM,
3266 			 },
3267 	 .auth_info = {
3268 		       .alg = HASH_ALG_AES,
3269 		       .mode = HASH_MODE_GCM,
3270 		       },
3271 	 .auth_first = 0,
3272 	 },
3273 	{
3274 	 .type = CRYPTO_ALG_TYPE_AEAD,
3275 	 .alg.aead = {
3276 		 .base = {
3277 			.cra_name = "rfc4309(ccm(aes))",
3278 			.cra_driver_name = "ccm-aes-esp-iproc",
3279 			.cra_blocksize = AES_BLOCK_SIZE,
3280 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3281 		 },
3282 		 .setkey = aead_ccm_esp_setkey,
3283 		 .ivsize = CCM_AES_IV_SIZE,
3284 		 .maxauthsize = AES_BLOCK_SIZE,
3285 	 },
3286 	 .cipher_info = {
3287 			 .alg = CIPHER_ALG_AES,
3288 			 .mode = CIPHER_MODE_CCM,
3289 			 },
3290 	 .auth_info = {
3291 		       .alg = HASH_ALG_AES,
3292 		       .mode = HASH_MODE_CCM,
3293 		       },
3294 	 .auth_first = 0,
3295 	 },
3296 	{
3297 	 .type = CRYPTO_ALG_TYPE_AEAD,
3298 	 .alg.aead = {
3299 		 .base = {
3300 			.cra_name = "rfc4543(gcm(aes))",
3301 			.cra_driver_name = "gmac-aes-esp-iproc",
3302 			.cra_blocksize = AES_BLOCK_SIZE,
3303 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3304 		 },
3305 		 .setkey = rfc4543_gcm_esp_setkey,
3306 		 .ivsize = GCM_ESP_IV_SIZE,
3307 		 .maxauthsize = AES_BLOCK_SIZE,
3308 	 },
3309 	 .cipher_info = {
3310 			 .alg = CIPHER_ALG_AES,
3311 			 .mode = CIPHER_MODE_GCM,
3312 			 },
3313 	 .auth_info = {
3314 		       .alg = HASH_ALG_AES,
3315 		       .mode = HASH_MODE_GCM,
3316 		       },
3317 	 .auth_first = 0,
3318 	 },
3319 	{
3320 	 .type = CRYPTO_ALG_TYPE_AEAD,
3321 	 .alg.aead = {
3322 		 .base = {
3323 			.cra_name = "authenc(hmac(md5),cbc(aes))",
3324 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3325 			.cra_blocksize = AES_BLOCK_SIZE,
3326 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3327 		 },
3328 		 .setkey = aead_authenc_setkey,
3329 		.ivsize = AES_BLOCK_SIZE,
3330 		.maxauthsize = MD5_DIGEST_SIZE,
3331 	 },
3332 	 .cipher_info = {
3333 			 .alg = CIPHER_ALG_AES,
3334 			 .mode = CIPHER_MODE_CBC,
3335 			 },
3336 	 .auth_info = {
3337 		       .alg = HASH_ALG_MD5,
3338 		       .mode = HASH_MODE_HMAC,
3339 		       },
3340 	 .auth_first = 0,
3341 	 },
3342 	{
3343 	 .type = CRYPTO_ALG_TYPE_AEAD,
3344 	 .alg.aead = {
3345 		 .base = {
3346 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
3347 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3348 			.cra_blocksize = AES_BLOCK_SIZE,
3349 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3350 		 },
3351 		 .setkey = aead_authenc_setkey,
3352 		 .ivsize = AES_BLOCK_SIZE,
3353 		 .maxauthsize = SHA1_DIGEST_SIZE,
3354 	 },
3355 	 .cipher_info = {
3356 			 .alg = CIPHER_ALG_AES,
3357 			 .mode = CIPHER_MODE_CBC,
3358 			 },
3359 	 .auth_info = {
3360 		       .alg = HASH_ALG_SHA1,
3361 		       .mode = HASH_MODE_HMAC,
3362 		       },
3363 	 .auth_first = 0,
3364 	 },
3365 	{
3366 	 .type = CRYPTO_ALG_TYPE_AEAD,
3367 	 .alg.aead = {
3368 		 .base = {
3369 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
3370 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3371 			.cra_blocksize = AES_BLOCK_SIZE,
3372 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3373 		 },
3374 		 .setkey = aead_authenc_setkey,
3375 		 .ivsize = AES_BLOCK_SIZE,
3376 		 .maxauthsize = SHA256_DIGEST_SIZE,
3377 	 },
3378 	 .cipher_info = {
3379 			 .alg = CIPHER_ALG_AES,
3380 			 .mode = CIPHER_MODE_CBC,
3381 			 },
3382 	 .auth_info = {
3383 		       .alg = HASH_ALG_SHA256,
3384 		       .mode = HASH_MODE_HMAC,
3385 		       },
3386 	 .auth_first = 0,
3387 	 },
3388 	{
3389 	 .type = CRYPTO_ALG_TYPE_AEAD,
3390 	 .alg.aead = {
3391 		 .base = {
3392 			.cra_name = "authenc(hmac(md5),cbc(des))",
3393 			.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3394 			.cra_blocksize = DES_BLOCK_SIZE,
3395 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3396 		 },
3397 		 .setkey = aead_authenc_setkey,
3398 		 .ivsize = DES_BLOCK_SIZE,
3399 		 .maxauthsize = MD5_DIGEST_SIZE,
3400 	 },
3401 	 .cipher_info = {
3402 			 .alg = CIPHER_ALG_DES,
3403 			 .mode = CIPHER_MODE_CBC,
3404 			 },
3405 	 .auth_info = {
3406 		       .alg = HASH_ALG_MD5,
3407 		       .mode = HASH_MODE_HMAC,
3408 		       },
3409 	 .auth_first = 0,
3410 	 },
3411 	{
3412 	 .type = CRYPTO_ALG_TYPE_AEAD,
3413 	 .alg.aead = {
3414 		 .base = {
3415 			.cra_name = "authenc(hmac(sha1),cbc(des))",
3416 			.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3417 			.cra_blocksize = DES_BLOCK_SIZE,
3418 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3419 		 },
3420 		 .setkey = aead_authenc_setkey,
3421 		 .ivsize = DES_BLOCK_SIZE,
3422 		 .maxauthsize = SHA1_DIGEST_SIZE,
3423 	 },
3424 	 .cipher_info = {
3425 			 .alg = CIPHER_ALG_DES,
3426 			 .mode = CIPHER_MODE_CBC,
3427 			 },
3428 	 .auth_info = {
3429 		       .alg = HASH_ALG_SHA1,
3430 		       .mode = HASH_MODE_HMAC,
3431 		       },
3432 	 .auth_first = 0,
3433 	 },
3434 	{
3435 	 .type = CRYPTO_ALG_TYPE_AEAD,
3436 	 .alg.aead = {
3437 		 .base = {
3438 			.cra_name = "authenc(hmac(sha224),cbc(des))",
3439 			.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3440 			.cra_blocksize = DES_BLOCK_SIZE,
3441 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3442 		 },
3443 		 .setkey = aead_authenc_setkey,
3444 		 .ivsize = DES_BLOCK_SIZE,
3445 		 .maxauthsize = SHA224_DIGEST_SIZE,
3446 	 },
3447 	 .cipher_info = {
3448 			 .alg = CIPHER_ALG_DES,
3449 			 .mode = CIPHER_MODE_CBC,
3450 			 },
3451 	 .auth_info = {
3452 		       .alg = HASH_ALG_SHA224,
3453 		       .mode = HASH_MODE_HMAC,
3454 		       },
3455 	 .auth_first = 0,
3456 	 },
3457 	{
3458 	 .type = CRYPTO_ALG_TYPE_AEAD,
3459 	 .alg.aead = {
3460 		 .base = {
3461 			.cra_name = "authenc(hmac(sha256),cbc(des))",
3462 			.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3463 			.cra_blocksize = DES_BLOCK_SIZE,
3464 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3465 		 },
3466 		 .setkey = aead_authenc_setkey,
3467 		 .ivsize = DES_BLOCK_SIZE,
3468 		 .maxauthsize = SHA256_DIGEST_SIZE,
3469 	 },
3470 	 .cipher_info = {
3471 			 .alg = CIPHER_ALG_DES,
3472 			 .mode = CIPHER_MODE_CBC,
3473 			 },
3474 	 .auth_info = {
3475 		       .alg = HASH_ALG_SHA256,
3476 		       .mode = HASH_MODE_HMAC,
3477 		       },
3478 	 .auth_first = 0,
3479 	 },
3480 	{
3481 	 .type = CRYPTO_ALG_TYPE_AEAD,
3482 	 .alg.aead = {
3483 		 .base = {
3484 			.cra_name = "authenc(hmac(sha384),cbc(des))",
3485 			.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3486 			.cra_blocksize = DES_BLOCK_SIZE,
3487 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3488 		 },
3489 		 .setkey = aead_authenc_setkey,
3490 		 .ivsize = DES_BLOCK_SIZE,
3491 		 .maxauthsize = SHA384_DIGEST_SIZE,
3492 	 },
3493 	 .cipher_info = {
3494 			 .alg = CIPHER_ALG_DES,
3495 			 .mode = CIPHER_MODE_CBC,
3496 			 },
3497 	 .auth_info = {
3498 		       .alg = HASH_ALG_SHA384,
3499 		       .mode = HASH_MODE_HMAC,
3500 		       },
3501 	 .auth_first = 0,
3502 	 },
3503 	{
3504 	 .type = CRYPTO_ALG_TYPE_AEAD,
3505 	 .alg.aead = {
3506 		 .base = {
3507 			.cra_name = "authenc(hmac(sha512),cbc(des))",
3508 			.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3509 			.cra_blocksize = DES_BLOCK_SIZE,
3510 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3511 		 },
3512 		 .setkey = aead_authenc_setkey,
3513 		 .ivsize = DES_BLOCK_SIZE,
3514 		 .maxauthsize = SHA512_DIGEST_SIZE,
3515 	 },
3516 	 .cipher_info = {
3517 			 .alg = CIPHER_ALG_DES,
3518 			 .mode = CIPHER_MODE_CBC,
3519 			 },
3520 	 .auth_info = {
3521 		       .alg = HASH_ALG_SHA512,
3522 		       .mode = HASH_MODE_HMAC,
3523 		       },
3524 	 .auth_first = 0,
3525 	 },
3526 	{
3527 	 .type = CRYPTO_ALG_TYPE_AEAD,
3528 	 .alg.aead = {
3529 		 .base = {
3530 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3531 			.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3532 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3533 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3534 		 },
3535 		 .setkey = aead_authenc_setkey,
3536 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3537 		 .maxauthsize = MD5_DIGEST_SIZE,
3538 	 },
3539 	 .cipher_info = {
3540 			 .alg = CIPHER_ALG_3DES,
3541 			 .mode = CIPHER_MODE_CBC,
3542 			 },
3543 	 .auth_info = {
3544 		       .alg = HASH_ALG_MD5,
3545 		       .mode = HASH_MODE_HMAC,
3546 		       },
3547 	 .auth_first = 0,
3548 	 },
3549 	{
3550 	 .type = CRYPTO_ALG_TYPE_AEAD,
3551 	 .alg.aead = {
3552 		 .base = {
3553 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3554 			.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3555 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3556 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3557 		 },
3558 		 .setkey = aead_authenc_setkey,
3559 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3560 		 .maxauthsize = SHA1_DIGEST_SIZE,
3561 	 },
3562 	 .cipher_info = {
3563 			 .alg = CIPHER_ALG_3DES,
3564 			 .mode = CIPHER_MODE_CBC,
3565 			 },
3566 	 .auth_info = {
3567 		       .alg = HASH_ALG_SHA1,
3568 		       .mode = HASH_MODE_HMAC,
3569 		       },
3570 	 .auth_first = 0,
3571 	 },
3572 	{
3573 	 .type = CRYPTO_ALG_TYPE_AEAD,
3574 	 .alg.aead = {
3575 		 .base = {
3576 			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3577 			.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3578 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3579 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3580 		 },
3581 		 .setkey = aead_authenc_setkey,
3582 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3583 		 .maxauthsize = SHA224_DIGEST_SIZE,
3584 	 },
3585 	 .cipher_info = {
3586 			 .alg = CIPHER_ALG_3DES,
3587 			 .mode = CIPHER_MODE_CBC,
3588 			 },
3589 	 .auth_info = {
3590 		       .alg = HASH_ALG_SHA224,
3591 		       .mode = HASH_MODE_HMAC,
3592 		       },
3593 	 .auth_first = 0,
3594 	 },
3595 	{
3596 	 .type = CRYPTO_ALG_TYPE_AEAD,
3597 	 .alg.aead = {
3598 		 .base = {
3599 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3600 			.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3601 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3602 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3603 		 },
3604 		 .setkey = aead_authenc_setkey,
3605 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3606 		 .maxauthsize = SHA256_DIGEST_SIZE,
3607 	 },
3608 	 .cipher_info = {
3609 			 .alg = CIPHER_ALG_3DES,
3610 			 .mode = CIPHER_MODE_CBC,
3611 			 },
3612 	 .auth_info = {
3613 		       .alg = HASH_ALG_SHA256,
3614 		       .mode = HASH_MODE_HMAC,
3615 		       },
3616 	 .auth_first = 0,
3617 	 },
3618 	{
3619 	 .type = CRYPTO_ALG_TYPE_AEAD,
3620 	 .alg.aead = {
3621 		 .base = {
3622 			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3623 			.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3624 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3625 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3626 		 },
3627 		 .setkey = aead_authenc_setkey,
3628 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3629 		 .maxauthsize = SHA384_DIGEST_SIZE,
3630 	 },
3631 	 .cipher_info = {
3632 			 .alg = CIPHER_ALG_3DES,
3633 			 .mode = CIPHER_MODE_CBC,
3634 			 },
3635 	 .auth_info = {
3636 		       .alg = HASH_ALG_SHA384,
3637 		       .mode = HASH_MODE_HMAC,
3638 		       },
3639 	 .auth_first = 0,
3640 	 },
3641 	{
3642 	 .type = CRYPTO_ALG_TYPE_AEAD,
3643 	 .alg.aead = {
3644 		 .base = {
3645 			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3646 			.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3647 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3648 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3649 		 },
3650 		 .setkey = aead_authenc_setkey,
3651 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3652 		 .maxauthsize = SHA512_DIGEST_SIZE,
3653 	 },
3654 	 .cipher_info = {
3655 			 .alg = CIPHER_ALG_3DES,
3656 			 .mode = CIPHER_MODE_CBC,
3657 			 },
3658 	 .auth_info = {
3659 		       .alg = HASH_ALG_SHA512,
3660 		       .mode = HASH_MODE_HMAC,
3661 		       },
3662 	 .auth_first = 0,
3663 	 },
3664 
3665 /* ABLKCIPHER algorithms. */
3666 	{
3667 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3668 	 .alg.crypto = {
3669 			.cra_name = "ecb(arc4)",
3670 			.cra_driver_name = "ecb-arc4-iproc",
3671 			.cra_blocksize = ARC4_BLOCK_SIZE,
3672 			.cra_ablkcipher = {
3673 					   .min_keysize = ARC4_MIN_KEY_SIZE,
3674 					   .max_keysize = ARC4_MAX_KEY_SIZE,
3675 					   .ivsize = 0,
3676 					}
3677 			},
3678 	 .cipher_info = {
3679 			 .alg = CIPHER_ALG_RC4,
3680 			 .mode = CIPHER_MODE_NONE,
3681 			 },
3682 	 .auth_info = {
3683 		       .alg = HASH_ALG_NONE,
3684 		       .mode = HASH_MODE_NONE,
3685 		       },
3686 	 },
3687 	{
3688 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3689 	 .alg.crypto = {
3690 			.cra_name = "ofb(des)",
3691 			.cra_driver_name = "ofb-des-iproc",
3692 			.cra_blocksize = DES_BLOCK_SIZE,
3693 			.cra_ablkcipher = {
3694 					   .min_keysize = DES_KEY_SIZE,
3695 					   .max_keysize = DES_KEY_SIZE,
3696 					   .ivsize = DES_BLOCK_SIZE,
3697 					}
3698 			},
3699 	 .cipher_info = {
3700 			 .alg = CIPHER_ALG_DES,
3701 			 .mode = CIPHER_MODE_OFB,
3702 			 },
3703 	 .auth_info = {
3704 		       .alg = HASH_ALG_NONE,
3705 		       .mode = HASH_MODE_NONE,
3706 		       },
3707 	 },
3708 	{
3709 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3710 	 .alg.crypto = {
3711 			.cra_name = "cbc(des)",
3712 			.cra_driver_name = "cbc-des-iproc",
3713 			.cra_blocksize = DES_BLOCK_SIZE,
3714 			.cra_ablkcipher = {
3715 					   .min_keysize = DES_KEY_SIZE,
3716 					   .max_keysize = DES_KEY_SIZE,
3717 					   .ivsize = DES_BLOCK_SIZE,
3718 					}
3719 			},
3720 	 .cipher_info = {
3721 			 .alg = CIPHER_ALG_DES,
3722 			 .mode = CIPHER_MODE_CBC,
3723 			 },
3724 	 .auth_info = {
3725 		       .alg = HASH_ALG_NONE,
3726 		       .mode = HASH_MODE_NONE,
3727 		       },
3728 	 },
3729 	{
3730 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3731 	 .alg.crypto = {
3732 			.cra_name = "ecb(des)",
3733 			.cra_driver_name = "ecb-des-iproc",
3734 			.cra_blocksize = DES_BLOCK_SIZE,
3735 			.cra_ablkcipher = {
3736 					   .min_keysize = DES_KEY_SIZE,
3737 					   .max_keysize = DES_KEY_SIZE,
3738 					   .ivsize = 0,
3739 					}
3740 			},
3741 	 .cipher_info = {
3742 			 .alg = CIPHER_ALG_DES,
3743 			 .mode = CIPHER_MODE_ECB,
3744 			 },
3745 	 .auth_info = {
3746 		       .alg = HASH_ALG_NONE,
3747 		       .mode = HASH_MODE_NONE,
3748 		       },
3749 	 },
3750 	{
3751 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3752 	 .alg.crypto = {
3753 			.cra_name = "ofb(des3_ede)",
3754 			.cra_driver_name = "ofb-des3-iproc",
3755 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3756 			.cra_ablkcipher = {
3757 					   .min_keysize = DES3_EDE_KEY_SIZE,
3758 					   .max_keysize = DES3_EDE_KEY_SIZE,
3759 					   .ivsize = DES3_EDE_BLOCK_SIZE,
3760 					}
3761 			},
3762 	 .cipher_info = {
3763 			 .alg = CIPHER_ALG_3DES,
3764 			 .mode = CIPHER_MODE_OFB,
3765 			 },
3766 	 .auth_info = {
3767 		       .alg = HASH_ALG_NONE,
3768 		       .mode = HASH_MODE_NONE,
3769 		       },
3770 	 },
3771 	{
3772 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3773 	 .alg.crypto = {
3774 			.cra_name = "cbc(des3_ede)",
3775 			.cra_driver_name = "cbc-des3-iproc",
3776 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3777 			.cra_ablkcipher = {
3778 					   .min_keysize = DES3_EDE_KEY_SIZE,
3779 					   .max_keysize = DES3_EDE_KEY_SIZE,
3780 					   .ivsize = DES3_EDE_BLOCK_SIZE,
3781 					}
3782 			},
3783 	 .cipher_info = {
3784 			 .alg = CIPHER_ALG_3DES,
3785 			 .mode = CIPHER_MODE_CBC,
3786 			 },
3787 	 .auth_info = {
3788 		       .alg = HASH_ALG_NONE,
3789 		       .mode = HASH_MODE_NONE,
3790 		       },
3791 	 },
3792 	{
3793 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3794 	 .alg.crypto = {
3795 			.cra_name = "ecb(des3_ede)",
3796 			.cra_driver_name = "ecb-des3-iproc",
3797 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3798 			.cra_ablkcipher = {
3799 					   .min_keysize = DES3_EDE_KEY_SIZE,
3800 					   .max_keysize = DES3_EDE_KEY_SIZE,
3801 					   .ivsize = 0,
3802 					}
3803 			},
3804 	 .cipher_info = {
3805 			 .alg = CIPHER_ALG_3DES,
3806 			 .mode = CIPHER_MODE_ECB,
3807 			 },
3808 	 .auth_info = {
3809 		       .alg = HASH_ALG_NONE,
3810 		       .mode = HASH_MODE_NONE,
3811 		       },
3812 	 },
3813 	{
3814 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3815 	 .alg.crypto = {
3816 			.cra_name = "ofb(aes)",
3817 			.cra_driver_name = "ofb-aes-iproc",
3818 			.cra_blocksize = AES_BLOCK_SIZE,
3819 			.cra_ablkcipher = {
3820 					   .min_keysize = AES_MIN_KEY_SIZE,
3821 					   .max_keysize = AES_MAX_KEY_SIZE,
3822 					   .ivsize = AES_BLOCK_SIZE,
3823 					}
3824 			},
3825 	 .cipher_info = {
3826 			 .alg = CIPHER_ALG_AES,
3827 			 .mode = CIPHER_MODE_OFB,
3828 			 },
3829 	 .auth_info = {
3830 		       .alg = HASH_ALG_NONE,
3831 		       .mode = HASH_MODE_NONE,
3832 		       },
3833 	 },
3834 	{
3835 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3836 	 .alg.crypto = {
3837 			.cra_name = "cbc(aes)",
3838 			.cra_driver_name = "cbc-aes-iproc",
3839 			.cra_blocksize = AES_BLOCK_SIZE,
3840 			.cra_ablkcipher = {
3841 					   .min_keysize = AES_MIN_KEY_SIZE,
3842 					   .max_keysize = AES_MAX_KEY_SIZE,
3843 					   .ivsize = AES_BLOCK_SIZE,
3844 					}
3845 			},
3846 	 .cipher_info = {
3847 			 .alg = CIPHER_ALG_AES,
3848 			 .mode = CIPHER_MODE_CBC,
3849 			 },
3850 	 .auth_info = {
3851 		       .alg = HASH_ALG_NONE,
3852 		       .mode = HASH_MODE_NONE,
3853 		       },
3854 	 },
3855 	{
3856 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3857 	 .alg.crypto = {
3858 			.cra_name = "ecb(aes)",
3859 			.cra_driver_name = "ecb-aes-iproc",
3860 			.cra_blocksize = AES_BLOCK_SIZE,
3861 			.cra_ablkcipher = {
3862 					   .min_keysize = AES_MIN_KEY_SIZE,
3863 					   .max_keysize = AES_MAX_KEY_SIZE,
3864 					   .ivsize = 0,
3865 					}
3866 			},
3867 	 .cipher_info = {
3868 			 .alg = CIPHER_ALG_AES,
3869 			 .mode = CIPHER_MODE_ECB,
3870 			 },
3871 	 .auth_info = {
3872 		       .alg = HASH_ALG_NONE,
3873 		       .mode = HASH_MODE_NONE,
3874 		       },
3875 	 },
3876 	{
3877 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3878 	 .alg.crypto = {
3879 			.cra_name = "ctr(aes)",
3880 			.cra_driver_name = "ctr-aes-iproc",
3881 			.cra_blocksize = AES_BLOCK_SIZE,
3882 			.cra_ablkcipher = {
3883 					   /* .geniv = "chainiv", */
3884 					   .min_keysize = AES_MIN_KEY_SIZE,
3885 					   .max_keysize = AES_MAX_KEY_SIZE,
3886 					   .ivsize = AES_BLOCK_SIZE,
3887 					}
3888 			},
3889 	 .cipher_info = {
3890 			 .alg = CIPHER_ALG_AES,
3891 			 .mode = CIPHER_MODE_CTR,
3892 			 },
3893 	 .auth_info = {
3894 		       .alg = HASH_ALG_NONE,
3895 		       .mode = HASH_MODE_NONE,
3896 		       },
3897 	 },
3898 {
3899 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3900 	 .alg.crypto = {
3901 			.cra_name = "xts(aes)",
3902 			.cra_driver_name = "xts-aes-iproc",
3903 			.cra_blocksize = AES_BLOCK_SIZE,
3904 			.cra_ablkcipher = {
3905 				.min_keysize = 2 * AES_MIN_KEY_SIZE,
3906 				.max_keysize = 2 * AES_MAX_KEY_SIZE,
3907 				.ivsize = AES_BLOCK_SIZE,
3908 				}
3909 			},
3910 	 .cipher_info = {
3911 			 .alg = CIPHER_ALG_AES,
3912 			 .mode = CIPHER_MODE_XTS,
3913 			 },
3914 	 .auth_info = {
3915 		       .alg = HASH_ALG_NONE,
3916 		       .mode = HASH_MODE_NONE,
3917 		       },
3918 	 },
3919 
3920 /* AHASH algorithms. */
3921 	{
3922 	 .type = CRYPTO_ALG_TYPE_AHASH,
3923 	 .alg.hash = {
3924 		      .halg.digestsize = MD5_DIGEST_SIZE,
3925 		      .halg.base = {
3926 				    .cra_name = "md5",
3927 				    .cra_driver_name = "md5-iproc",
3928 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3929 				    .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3930 					     CRYPTO_ALG_ASYNC,
3931 				}
3932 		      },
3933 	 .cipher_info = {
3934 			 .alg = CIPHER_ALG_NONE,
3935 			 .mode = CIPHER_MODE_NONE,
3936 			 },
3937 	 .auth_info = {
3938 		       .alg = HASH_ALG_MD5,
3939 		       .mode = HASH_MODE_HASH,
3940 		       },
3941 	 },
3942 	{
3943 	 .type = CRYPTO_ALG_TYPE_AHASH,
3944 	 .alg.hash = {
3945 		      .halg.digestsize = MD5_DIGEST_SIZE,
3946 		      .halg.base = {
3947 				    .cra_name = "hmac(md5)",
3948 				    .cra_driver_name = "hmac-md5-iproc",
3949 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3950 				}
3951 		      },
3952 	 .cipher_info = {
3953 			 .alg = CIPHER_ALG_NONE,
3954 			 .mode = CIPHER_MODE_NONE,
3955 			 },
3956 	 .auth_info = {
3957 		       .alg = HASH_ALG_MD5,
3958 		       .mode = HASH_MODE_HMAC,
3959 		       },
3960 	 },
3961 	{.type = CRYPTO_ALG_TYPE_AHASH,
3962 	 .alg.hash = {
3963 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3964 		      .halg.base = {
3965 				    .cra_name = "sha1",
3966 				    .cra_driver_name = "sha1-iproc",
3967 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3968 				}
3969 		      },
3970 	 .cipher_info = {
3971 			 .alg = CIPHER_ALG_NONE,
3972 			 .mode = CIPHER_MODE_NONE,
3973 			 },
3974 	 .auth_info = {
3975 		       .alg = HASH_ALG_SHA1,
3976 		       .mode = HASH_MODE_HASH,
3977 		       },
3978 	 },
3979 	{.type = CRYPTO_ALG_TYPE_AHASH,
3980 	 .alg.hash = {
3981 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3982 		      .halg.base = {
3983 				    .cra_name = "hmac(sha1)",
3984 				    .cra_driver_name = "hmac-sha1-iproc",
3985 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3986 				}
3987 		      },
3988 	 .cipher_info = {
3989 			 .alg = CIPHER_ALG_NONE,
3990 			 .mode = CIPHER_MODE_NONE,
3991 			 },
3992 	 .auth_info = {
3993 		       .alg = HASH_ALG_SHA1,
3994 		       .mode = HASH_MODE_HMAC,
3995 		       },
3996 	 },
3997 	{.type = CRYPTO_ALG_TYPE_AHASH,
3998 	 .alg.hash = {
3999 			.halg.digestsize = SHA224_DIGEST_SIZE,
4000 			.halg.base = {
4001 				    .cra_name = "sha224",
4002 				    .cra_driver_name = "sha224-iproc",
4003 				    .cra_blocksize = SHA224_BLOCK_SIZE,
4004 			}
4005 		      },
4006 	 .cipher_info = {
4007 			 .alg = CIPHER_ALG_NONE,
4008 			 .mode = CIPHER_MODE_NONE,
4009 			 },
4010 	 .auth_info = {
4011 		       .alg = HASH_ALG_SHA224,
4012 		       .mode = HASH_MODE_HASH,
4013 		       },
4014 	 },
4015 	{.type = CRYPTO_ALG_TYPE_AHASH,
4016 	 .alg.hash = {
4017 		      .halg.digestsize = SHA224_DIGEST_SIZE,
4018 		      .halg.base = {
4019 				    .cra_name = "hmac(sha224)",
4020 				    .cra_driver_name = "hmac-sha224-iproc",
4021 				    .cra_blocksize = SHA224_BLOCK_SIZE,
4022 				}
4023 		      },
4024 	 .cipher_info = {
4025 			 .alg = CIPHER_ALG_NONE,
4026 			 .mode = CIPHER_MODE_NONE,
4027 			 },
4028 	 .auth_info = {
4029 		       .alg = HASH_ALG_SHA224,
4030 		       .mode = HASH_MODE_HMAC,
4031 		       },
4032 	 },
4033 	{.type = CRYPTO_ALG_TYPE_AHASH,
4034 	 .alg.hash = {
4035 		      .halg.digestsize = SHA256_DIGEST_SIZE,
4036 		      .halg.base = {
4037 				    .cra_name = "sha256",
4038 				    .cra_driver_name = "sha256-iproc",
4039 				    .cra_blocksize = SHA256_BLOCK_SIZE,
4040 				}
4041 		      },
4042 	 .cipher_info = {
4043 			 .alg = CIPHER_ALG_NONE,
4044 			 .mode = CIPHER_MODE_NONE,
4045 			 },
4046 	 .auth_info = {
4047 		       .alg = HASH_ALG_SHA256,
4048 		       .mode = HASH_MODE_HASH,
4049 		       },
4050 	 },
4051 	{.type = CRYPTO_ALG_TYPE_AHASH,
4052 	 .alg.hash = {
4053 		      .halg.digestsize = SHA256_DIGEST_SIZE,
4054 		      .halg.base = {
4055 				    .cra_name = "hmac(sha256)",
4056 				    .cra_driver_name = "hmac-sha256-iproc",
4057 				    .cra_blocksize = SHA256_BLOCK_SIZE,
4058 				}
4059 		      },
4060 	 .cipher_info = {
4061 			 .alg = CIPHER_ALG_NONE,
4062 			 .mode = CIPHER_MODE_NONE,
4063 			 },
4064 	 .auth_info = {
4065 		       .alg = HASH_ALG_SHA256,
4066 		       .mode = HASH_MODE_HMAC,
4067 		       },
4068 	 },
4069 	{
4070 	.type = CRYPTO_ALG_TYPE_AHASH,
4071 	 .alg.hash = {
4072 		      .halg.digestsize = SHA384_DIGEST_SIZE,
4073 		      .halg.base = {
4074 				    .cra_name = "sha384",
4075 				    .cra_driver_name = "sha384-iproc",
4076 				    .cra_blocksize = SHA384_BLOCK_SIZE,
4077 				}
4078 		      },
4079 	 .cipher_info = {
4080 			 .alg = CIPHER_ALG_NONE,
4081 			 .mode = CIPHER_MODE_NONE,
4082 			 },
4083 	 .auth_info = {
4084 		       .alg = HASH_ALG_SHA384,
4085 		       .mode = HASH_MODE_HASH,
4086 		       },
4087 	 },
4088 	{
4089 	 .type = CRYPTO_ALG_TYPE_AHASH,
4090 	 .alg.hash = {
4091 		      .halg.digestsize = SHA384_DIGEST_SIZE,
4092 		      .halg.base = {
4093 				    .cra_name = "hmac(sha384)",
4094 				    .cra_driver_name = "hmac-sha384-iproc",
4095 				    .cra_blocksize = SHA384_BLOCK_SIZE,
4096 				}
4097 		      },
4098 	 .cipher_info = {
4099 			 .alg = CIPHER_ALG_NONE,
4100 			 .mode = CIPHER_MODE_NONE,
4101 			 },
4102 	 .auth_info = {
4103 		       .alg = HASH_ALG_SHA384,
4104 		       .mode = HASH_MODE_HMAC,
4105 		       },
4106 	 },
4107 	{
4108 	 .type = CRYPTO_ALG_TYPE_AHASH,
4109 	 .alg.hash = {
4110 		      .halg.digestsize = SHA512_DIGEST_SIZE,
4111 		      .halg.base = {
4112 				    .cra_name = "sha512",
4113 				    .cra_driver_name = "sha512-iproc",
4114 				    .cra_blocksize = SHA512_BLOCK_SIZE,
4115 				}
4116 		      },
4117 	 .cipher_info = {
4118 			 .alg = CIPHER_ALG_NONE,
4119 			 .mode = CIPHER_MODE_NONE,
4120 			 },
4121 	 .auth_info = {
4122 		       .alg = HASH_ALG_SHA512,
4123 		       .mode = HASH_MODE_HASH,
4124 		       },
4125 	 },
4126 	{
4127 	 .type = CRYPTO_ALG_TYPE_AHASH,
4128 	 .alg.hash = {
4129 		      .halg.digestsize = SHA512_DIGEST_SIZE,
4130 		      .halg.base = {
4131 				    .cra_name = "hmac(sha512)",
4132 				    .cra_driver_name = "hmac-sha512-iproc",
4133 				    .cra_blocksize = SHA512_BLOCK_SIZE,
4134 				}
4135 		      },
4136 	 .cipher_info = {
4137 			 .alg = CIPHER_ALG_NONE,
4138 			 .mode = CIPHER_MODE_NONE,
4139 			 },
4140 	 .auth_info = {
4141 		       .alg = HASH_ALG_SHA512,
4142 		       .mode = HASH_MODE_HMAC,
4143 		       },
4144 	 },
4145 	{
4146 	 .type = CRYPTO_ALG_TYPE_AHASH,
4147 	 .alg.hash = {
4148 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4149 		      .halg.base = {
4150 				    .cra_name = "sha3-224",
4151 				    .cra_driver_name = "sha3-224-iproc",
4152 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4153 				}
4154 		      },
4155 	 .cipher_info = {
4156 			 .alg = CIPHER_ALG_NONE,
4157 			 .mode = CIPHER_MODE_NONE,
4158 			 },
4159 	 .auth_info = {
4160 		       .alg = HASH_ALG_SHA3_224,
4161 		       .mode = HASH_MODE_HASH,
4162 		       },
4163 	 },
4164 	{
4165 	 .type = CRYPTO_ALG_TYPE_AHASH,
4166 	 .alg.hash = {
4167 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4168 		      .halg.base = {
4169 				    .cra_name = "hmac(sha3-224)",
4170 				    .cra_driver_name = "hmac-sha3-224-iproc",
4171 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4172 				}
4173 		      },
4174 	 .cipher_info = {
4175 			 .alg = CIPHER_ALG_NONE,
4176 			 .mode = CIPHER_MODE_NONE,
4177 			 },
4178 	 .auth_info = {
4179 		       .alg = HASH_ALG_SHA3_224,
4180 		       .mode = HASH_MODE_HMAC
4181 		       },
4182 	 },
4183 	{
4184 	 .type = CRYPTO_ALG_TYPE_AHASH,
4185 	 .alg.hash = {
4186 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4187 		      .halg.base = {
4188 				    .cra_name = "sha3-256",
4189 				    .cra_driver_name = "sha3-256-iproc",
4190 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4191 				}
4192 		      },
4193 	 .cipher_info = {
4194 			 .alg = CIPHER_ALG_NONE,
4195 			 .mode = CIPHER_MODE_NONE,
4196 			 },
4197 	 .auth_info = {
4198 		       .alg = HASH_ALG_SHA3_256,
4199 		       .mode = HASH_MODE_HASH,
4200 		       },
4201 	 },
4202 	{
4203 	 .type = CRYPTO_ALG_TYPE_AHASH,
4204 	 .alg.hash = {
4205 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4206 		      .halg.base = {
4207 				    .cra_name = "hmac(sha3-256)",
4208 				    .cra_driver_name = "hmac-sha3-256-iproc",
4209 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4210 				}
4211 		      },
4212 	 .cipher_info = {
4213 			 .alg = CIPHER_ALG_NONE,
4214 			 .mode = CIPHER_MODE_NONE,
4215 			 },
4216 	 .auth_info = {
4217 		       .alg = HASH_ALG_SHA3_256,
4218 		       .mode = HASH_MODE_HMAC,
4219 		       },
4220 	 },
4221 	{
4222 	 .type = CRYPTO_ALG_TYPE_AHASH,
4223 	 .alg.hash = {
4224 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4225 		      .halg.base = {
4226 				    .cra_name = "sha3-384",
4227 				    .cra_driver_name = "sha3-384-iproc",
4228 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4229 				}
4230 		      },
4231 	 .cipher_info = {
4232 			 .alg = CIPHER_ALG_NONE,
4233 			 .mode = CIPHER_MODE_NONE,
4234 			 },
4235 	 .auth_info = {
4236 		       .alg = HASH_ALG_SHA3_384,
4237 		       .mode = HASH_MODE_HASH,
4238 		       },
4239 	 },
4240 	{
4241 	 .type = CRYPTO_ALG_TYPE_AHASH,
4242 	 .alg.hash = {
4243 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4244 		      .halg.base = {
4245 				    .cra_name = "hmac(sha3-384)",
4246 				    .cra_driver_name = "hmac-sha3-384-iproc",
4247 				    .cra_blocksize = SHA3_384_BLOCK_SIZE,
4248 				}
4249 		      },
4250 	 .cipher_info = {
4251 			 .alg = CIPHER_ALG_NONE,
4252 			 .mode = CIPHER_MODE_NONE,
4253 			 },
4254 	 .auth_info = {
4255 		       .alg = HASH_ALG_SHA3_384,
4256 		       .mode = HASH_MODE_HMAC,
4257 		       },
4258 	 },
4259 	{
4260 	 .type = CRYPTO_ALG_TYPE_AHASH,
4261 	 .alg.hash = {
4262 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4263 		      .halg.base = {
4264 				    .cra_name = "sha3-512",
4265 				    .cra_driver_name = "sha3-512-iproc",
4266 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4267 				}
4268 		      },
4269 	 .cipher_info = {
4270 			 .alg = CIPHER_ALG_NONE,
4271 			 .mode = CIPHER_MODE_NONE,
4272 			 },
4273 	 .auth_info = {
4274 		       .alg = HASH_ALG_SHA3_512,
4275 		       .mode = HASH_MODE_HASH,
4276 		       },
4277 	 },
4278 	{
4279 	 .type = CRYPTO_ALG_TYPE_AHASH,
4280 	 .alg.hash = {
4281 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4282 		      .halg.base = {
4283 				    .cra_name = "hmac(sha3-512)",
4284 				    .cra_driver_name = "hmac-sha3-512-iproc",
4285 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4286 				}
4287 		      },
4288 	 .cipher_info = {
4289 			 .alg = CIPHER_ALG_NONE,
4290 			 .mode = CIPHER_MODE_NONE,
4291 			 },
4292 	 .auth_info = {
4293 		       .alg = HASH_ALG_SHA3_512,
4294 		       .mode = HASH_MODE_HMAC,
4295 		       },
4296 	 },
4297 	{
4298 	 .type = CRYPTO_ALG_TYPE_AHASH,
4299 	 .alg.hash = {
4300 		      .halg.digestsize = AES_BLOCK_SIZE,
4301 		      .halg.base = {
4302 				    .cra_name = "xcbc(aes)",
4303 				    .cra_driver_name = "xcbc-aes-iproc",
4304 				    .cra_blocksize = AES_BLOCK_SIZE,
4305 				}
4306 		      },
4307 	 .cipher_info = {
4308 			 .alg = CIPHER_ALG_NONE,
4309 			 .mode = CIPHER_MODE_NONE,
4310 			 },
4311 	 .auth_info = {
4312 		       .alg = HASH_ALG_AES,
4313 		       .mode = HASH_MODE_XCBC,
4314 		       },
4315 	 },
4316 	{
4317 	 .type = CRYPTO_ALG_TYPE_AHASH,
4318 	 .alg.hash = {
4319 		      .halg.digestsize = AES_BLOCK_SIZE,
4320 		      .halg.base = {
4321 				    .cra_name = "cmac(aes)",
4322 				    .cra_driver_name = "cmac-aes-iproc",
4323 				    .cra_blocksize = AES_BLOCK_SIZE,
4324 				}
4325 		      },
4326 	 .cipher_info = {
4327 			 .alg = CIPHER_ALG_NONE,
4328 			 .mode = CIPHER_MODE_NONE,
4329 			 },
4330 	 .auth_info = {
4331 		       .alg = HASH_ALG_AES,
4332 		       .mode = HASH_MODE_CMAC,
4333 		       },
4334 	 },
4335 };
4336 
4337 static int generic_cra_init(struct crypto_tfm *tfm,
4338 			    struct iproc_alg_s *cipher_alg)
4339 {
4340 	struct spu_hw *spu = &iproc_priv.spu;
4341 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4342 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4343 
4344 	flow_log("%s()\n", __func__);
4345 
4346 	ctx->alg = cipher_alg;
4347 	ctx->cipher = cipher_alg->cipher_info;
4348 	ctx->auth = cipher_alg->auth_info;
4349 	ctx->auth_first = cipher_alg->auth_first;
4350 	ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4351 						    ctx->cipher.mode,
4352 						    blocksize);
4353 	ctx->fallback_cipher = NULL;
4354 
4355 	ctx->enckeylen = 0;
4356 	ctx->authkeylen = 0;
4357 
4358 	atomic_inc(&iproc_priv.stream_count);
4359 	atomic_inc(&iproc_priv.session_count);
4360 
4361 	return 0;
4362 }
4363 
4364 static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4365 {
4366 	struct crypto_alg *alg = tfm->__crt_alg;
4367 	struct iproc_alg_s *cipher_alg;
4368 
4369 	flow_log("%s()\n", __func__);
4370 
4371 	tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4372 
4373 	cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4374 	return generic_cra_init(tfm, cipher_alg);
4375 }
4376 
4377 static int ahash_cra_init(struct crypto_tfm *tfm)
4378 {
4379 	int err;
4380 	struct crypto_alg *alg = tfm->__crt_alg;
4381 	struct iproc_alg_s *cipher_alg;
4382 
4383 	cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4384 				  alg.hash);
4385 
4386 	err = generic_cra_init(tfm, cipher_alg);
4387 	flow_log("%s()\n", __func__);
4388 
4389 	/*
4390 	 * export state size has to be < 512 bytes. So don't include msg bufs
4391 	 * in state size.
4392 	 */
4393 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4394 				 sizeof(struct iproc_reqctx_s));
4395 
4396 	return err;
4397 }
4398 
4399 static int aead_cra_init(struct crypto_aead *aead)
4400 {
4401 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4402 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4403 	struct crypto_alg *alg = tfm->__crt_alg;
4404 	struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4405 	struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4406 						      alg.aead);
4407 
4408 	int err = generic_cra_init(tfm, cipher_alg);
4409 
4410 	flow_log("%s()\n", __func__);
4411 
4412 	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4413 	ctx->is_esp = false;
4414 	ctx->salt_len = 0;
4415 	ctx->salt_offset = 0;
4416 
4417 	/* random first IV */
4418 	get_random_bytes(ctx->iv, MAX_IV_SIZE);
4419 	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
4420 
4421 	if (!err) {
4422 		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4423 			flow_log("%s() creating fallback cipher\n", __func__);
4424 
4425 			ctx->fallback_cipher =
4426 			    crypto_alloc_aead(alg->cra_name, 0,
4427 					      CRYPTO_ALG_ASYNC |
4428 					      CRYPTO_ALG_NEED_FALLBACK);
4429 			if (IS_ERR(ctx->fallback_cipher)) {
4430 				pr_err("%s() Error: failed to allocate fallback for %s\n",
4431 				       __func__, alg->cra_name);
4432 				return PTR_ERR(ctx->fallback_cipher);
4433 			}
4434 		}
4435 	}
4436 
4437 	return err;
4438 }
4439 
4440 static void generic_cra_exit(struct crypto_tfm *tfm)
4441 {
4442 	atomic_dec(&iproc_priv.session_count);
4443 }
4444 
4445 static void aead_cra_exit(struct crypto_aead *aead)
4446 {
4447 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4448 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4449 
4450 	generic_cra_exit(tfm);
4451 
4452 	if (ctx->fallback_cipher) {
4453 		crypto_free_aead(ctx->fallback_cipher);
4454 		ctx->fallback_cipher = NULL;
4455 	}
4456 }
4457 
4458 /**
4459  * spu_functions_register() - Specify hardware-specific SPU functions based on
4460  * SPU type read from device tree.
4461  * @dev:	device structure
4462  * @spu_type:	SPU hardware generation
4463  * @spu_subtype: SPU hardware version
4464  */
4465 static void spu_functions_register(struct device *dev,
4466 				   enum spu_spu_type spu_type,
4467 				   enum spu_spu_subtype spu_subtype)
4468 {
4469 	struct spu_hw *spu = &iproc_priv.spu;
4470 
4471 	if (spu_type == SPU_TYPE_SPUM) {
4472 		dev_dbg(dev, "Registering SPUM functions");
4473 		spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4474 		spu->spu_payload_length = spum_payload_length;
4475 		spu->spu_response_hdr_len = spum_response_hdr_len;
4476 		spu->spu_hash_pad_len = spum_hash_pad_len;
4477 		spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4478 		spu->spu_assoc_resp_len = spum_assoc_resp_len;
4479 		spu->spu_aead_ivlen = spum_aead_ivlen;
4480 		spu->spu_hash_type = spum_hash_type;
4481 		spu->spu_digest_size = spum_digest_size;
4482 		spu->spu_create_request = spum_create_request;
4483 		spu->spu_cipher_req_init = spum_cipher_req_init;
4484 		spu->spu_cipher_req_finish = spum_cipher_req_finish;
4485 		spu->spu_request_pad = spum_request_pad;
4486 		spu->spu_tx_status_len = spum_tx_status_len;
4487 		spu->spu_rx_status_len = spum_rx_status_len;
4488 		spu->spu_status_process = spum_status_process;
4489 		spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4490 		spu->spu_ccm_update_iv = spum_ccm_update_iv;
4491 		spu->spu_wordalign_padlen = spum_wordalign_padlen;
4492 		if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4493 			spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4494 		else
4495 			spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4496 	} else {
4497 		dev_dbg(dev, "Registering SPU2 functions");
4498 		spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4499 		spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4500 		spu->spu_payload_length = spu2_payload_length;
4501 		spu->spu_response_hdr_len = spu2_response_hdr_len;
4502 		spu->spu_hash_pad_len = spu2_hash_pad_len;
4503 		spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4504 		spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4505 		spu->spu_aead_ivlen = spu2_aead_ivlen;
4506 		spu->spu_hash_type = spu2_hash_type;
4507 		spu->spu_digest_size = spu2_digest_size;
4508 		spu->spu_create_request = spu2_create_request;
4509 		spu->spu_cipher_req_init = spu2_cipher_req_init;
4510 		spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4511 		spu->spu_request_pad = spu2_request_pad;
4512 		spu->spu_tx_status_len = spu2_tx_status_len;
4513 		spu->spu_rx_status_len = spu2_rx_status_len;
4514 		spu->spu_status_process = spu2_status_process;
4515 		spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4516 		spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4517 		spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4518 	}
4519 }
4520 
4521 /**
4522  * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4523  * channel for the SPU being probed.
4524  * @dev:  SPU driver device structure
4525  *
4526  * Return: 0 if successful
4527  *	   < 0 otherwise
4528  */
4529 static int spu_mb_init(struct device *dev)
4530 {
4531 	struct mbox_client *mcl = &iproc_priv.mcl[iproc_priv.spu.num_spu];
4532 	int err;
4533 
4534 	mcl->dev = dev;
4535 	mcl->tx_block = false;
4536 	mcl->tx_tout = 0;
4537 	mcl->knows_txdone = false;
4538 	mcl->rx_callback = spu_rx_callback;
4539 	mcl->tx_done = NULL;
4540 
4541 	iproc_priv.mbox[iproc_priv.spu.num_spu] =
4542 			mbox_request_channel(mcl, 0);
4543 	if (IS_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu])) {
4544 		err = (int)PTR_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu]);
4545 		dev_err(dev,
4546 			"Mbox channel %d request failed with err %d",
4547 			iproc_priv.spu.num_spu, err);
4548 		iproc_priv.mbox[iproc_priv.spu.num_spu] = NULL;
4549 		return err;
4550 	}
4551 
4552 	return 0;
4553 }
4554 
4555 static void spu_mb_release(struct platform_device *pdev)
4556 {
4557 	int i;
4558 
4559 	for (i = 0; i < iproc_priv.spu.num_spu; i++)
4560 		mbox_free_channel(iproc_priv.mbox[i]);
4561 }
4562 
4563 static void spu_counters_init(void)
4564 {
4565 	int i;
4566 	int j;
4567 
4568 	atomic_set(&iproc_priv.session_count, 0);
4569 	atomic_set(&iproc_priv.stream_count, 0);
4570 	atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_spu);
4571 	atomic64_set(&iproc_priv.bytes_in, 0);
4572 	atomic64_set(&iproc_priv.bytes_out, 0);
4573 	for (i = 0; i < SPU_OP_NUM; i++) {
4574 		atomic_set(&iproc_priv.op_counts[i], 0);
4575 		atomic_set(&iproc_priv.setkey_cnt[i], 0);
4576 	}
4577 	for (i = 0; i < CIPHER_ALG_LAST; i++)
4578 		for (j = 0; j < CIPHER_MODE_LAST; j++)
4579 			atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4580 
4581 	for (i = 0; i < HASH_ALG_LAST; i++) {
4582 		atomic_set(&iproc_priv.hash_cnt[i], 0);
4583 		atomic_set(&iproc_priv.hmac_cnt[i], 0);
4584 	}
4585 	for (i = 0; i < AEAD_TYPE_LAST; i++)
4586 		atomic_set(&iproc_priv.aead_cnt[i], 0);
4587 
4588 	atomic_set(&iproc_priv.mb_no_spc, 0);
4589 	atomic_set(&iproc_priv.mb_send_fail, 0);
4590 	atomic_set(&iproc_priv.bad_icv, 0);
4591 }
4592 
4593 static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4594 {
4595 	struct spu_hw *spu = &iproc_priv.spu;
4596 	struct crypto_alg *crypto = &driver_alg->alg.crypto;
4597 	int err;
4598 
4599 	/* SPU2 does not support RC4 */
4600 	if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4601 	    (spu->spu_type == SPU_TYPE_SPU2))
4602 		return 0;
4603 
4604 	crypto->cra_module = THIS_MODULE;
4605 	crypto->cra_priority = cipher_pri;
4606 	crypto->cra_alignmask = 0;
4607 	crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4608 	INIT_LIST_HEAD(&crypto->cra_list);
4609 
4610 	crypto->cra_init = ablkcipher_cra_init;
4611 	crypto->cra_exit = generic_cra_exit;
4612 	crypto->cra_type = &crypto_ablkcipher_type;
4613 	crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4614 				CRYPTO_ALG_KERN_DRIVER_ONLY;
4615 
4616 	crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4617 	crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4618 	crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4619 
4620 	err = crypto_register_alg(crypto);
4621 	/* Mark alg as having been registered, if successful */
4622 	if (err == 0)
4623 		driver_alg->registered = true;
4624 	pr_debug("  registered ablkcipher %s\n", crypto->cra_driver_name);
4625 	return err;
4626 }
4627 
4628 static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4629 {
4630 	struct spu_hw *spu = &iproc_priv.spu;
4631 	struct ahash_alg *hash = &driver_alg->alg.hash;
4632 	int err;
4633 
4634 	/* AES-XCBC is the only AES hash type currently supported on SPU-M */
4635 	if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4636 	    (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4637 	    (spu->spu_type == SPU_TYPE_SPUM))
4638 		return 0;
4639 
4640 	/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4641 	if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4642 	    (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4643 		return 0;
4644 
4645 	hash->halg.base.cra_module = THIS_MODULE;
4646 	hash->halg.base.cra_priority = hash_pri;
4647 	hash->halg.base.cra_alignmask = 0;
4648 	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4649 	hash->halg.base.cra_init = ahash_cra_init;
4650 	hash->halg.base.cra_exit = generic_cra_exit;
4651 	hash->halg.base.cra_type = &crypto_ahash_type;
4652 	hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
4653 	hash->halg.statesize = sizeof(struct spu_hash_export_s);
4654 
4655 	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4656 		hash->setkey = ahash_setkey;
4657 		hash->init = ahash_init;
4658 		hash->update = ahash_update;
4659 		hash->final = ahash_final;
4660 		hash->finup = ahash_finup;
4661 		hash->digest = ahash_digest;
4662 	} else {
4663 		hash->setkey = ahash_hmac_setkey;
4664 		hash->init = ahash_hmac_init;
4665 		hash->update = ahash_hmac_update;
4666 		hash->final = ahash_hmac_final;
4667 		hash->finup = ahash_hmac_finup;
4668 		hash->digest = ahash_hmac_digest;
4669 	}
4670 	hash->export = ahash_export;
4671 	hash->import = ahash_import;
4672 
4673 	err = crypto_register_ahash(hash);
4674 	/* Mark alg as having been registered, if successful */
4675 	if (err == 0)
4676 		driver_alg->registered = true;
4677 	pr_debug("  registered ahash %s\n",
4678 		 hash->halg.base.cra_driver_name);
4679 	return err;
4680 }
4681 
4682 static int spu_register_aead(struct iproc_alg_s *driver_alg)
4683 {
4684 	struct aead_alg *aead = &driver_alg->alg.aead;
4685 	int err;
4686 
4687 	aead->base.cra_module = THIS_MODULE;
4688 	aead->base.cra_priority = aead_pri;
4689 	aead->base.cra_alignmask = 0;
4690 	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4691 	INIT_LIST_HEAD(&aead->base.cra_list);
4692 
4693 	aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
4694 	/* setkey set in alg initialization */
4695 	aead->setauthsize = aead_setauthsize;
4696 	aead->encrypt = aead_encrypt;
4697 	aead->decrypt = aead_decrypt;
4698 	aead->init = aead_cra_init;
4699 	aead->exit = aead_cra_exit;
4700 
4701 	err = crypto_register_aead(aead);
4702 	/* Mark alg as having been registered, if successful */
4703 	if (err == 0)
4704 		driver_alg->registered = true;
4705 	pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
4706 	return err;
4707 }
4708 
4709 /* register crypto algorithms the device supports */
4710 static int spu_algs_register(struct device *dev)
4711 {
4712 	int i, j;
4713 	int err;
4714 
4715 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4716 		switch (driver_algs[i].type) {
4717 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4718 			err = spu_register_ablkcipher(&driver_algs[i]);
4719 			break;
4720 		case CRYPTO_ALG_TYPE_AHASH:
4721 			err = spu_register_ahash(&driver_algs[i]);
4722 			break;
4723 		case CRYPTO_ALG_TYPE_AEAD:
4724 			err = spu_register_aead(&driver_algs[i]);
4725 			break;
4726 		default:
4727 			dev_err(dev,
4728 				"iproc-crypto: unknown alg type: %d",
4729 				driver_algs[i].type);
4730 			err = -EINVAL;
4731 		}
4732 
4733 		if (err) {
4734 			dev_err(dev, "alg registration failed with error %d\n",
4735 				err);
4736 			goto err_algs;
4737 		}
4738 	}
4739 
4740 	return 0;
4741 
4742 err_algs:
4743 	for (j = 0; j < i; j++) {
4744 		/* Skip any algorithm not registered */
4745 		if (!driver_algs[j].registered)
4746 			continue;
4747 		switch (driver_algs[j].type) {
4748 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4749 			crypto_unregister_alg(&driver_algs[j].alg.crypto);
4750 			driver_algs[j].registered = false;
4751 			break;
4752 		case CRYPTO_ALG_TYPE_AHASH:
4753 			crypto_unregister_ahash(&driver_algs[j].alg.hash);
4754 			driver_algs[j].registered = false;
4755 			break;
4756 		case CRYPTO_ALG_TYPE_AEAD:
4757 			crypto_unregister_aead(&driver_algs[j].alg.aead);
4758 			driver_algs[j].registered = false;
4759 			break;
4760 		}
4761 	}
4762 	return err;
4763 }
4764 
4765 /* ==================== Kernel Platform API ==================== */
4766 
4767 static struct spu_type_subtype spum_ns2_types = {
4768 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4769 };
4770 
4771 static struct spu_type_subtype spum_nsp_types = {
4772 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4773 };
4774 
4775 static struct spu_type_subtype spu2_types = {
4776 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4777 };
4778 
4779 static struct spu_type_subtype spu2_v2_types = {
4780 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4781 };
4782 
4783 static const struct of_device_id bcm_spu_dt_ids[] = {
4784 	{
4785 		.compatible = "brcm,spum-crypto",
4786 		.data = &spum_ns2_types,
4787 	},
4788 	{
4789 		.compatible = "brcm,spum-nsp-crypto",
4790 		.data = &spum_nsp_types,
4791 	},
4792 	{
4793 		.compatible = "brcm,spu2-crypto",
4794 		.data = &spu2_types,
4795 	},
4796 	{
4797 		.compatible = "brcm,spu2-v2-crypto",
4798 		.data = &spu2_v2_types,
4799 	},
4800 	{ /* sentinel */ }
4801 };
4802 
4803 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4804 
4805 static int spu_dt_read(struct platform_device *pdev)
4806 {
4807 	struct device *dev = &pdev->dev;
4808 	struct spu_hw *spu = &iproc_priv.spu;
4809 	struct resource *spu_ctrl_regs;
4810 	const struct of_device_id *match;
4811 	const struct spu_type_subtype *matched_spu_type;
4812 	void __iomem *spu_reg_vbase[MAX_SPUS];
4813 	int err;
4814 
4815 	match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
4816 	matched_spu_type = match->data;
4817 
4818 	if (iproc_priv.spu.num_spu > 1) {
4819 		/* If this is 2nd or later SPU, make sure it's same type */
4820 		if ((spu->spu_type != matched_spu_type->type) ||
4821 		    (spu->spu_subtype != matched_spu_type->subtype)) {
4822 			err = -EINVAL;
4823 			dev_err(&pdev->dev, "Multiple SPU types not allowed");
4824 			return err;
4825 		}
4826 	} else {
4827 		/* Record type of first SPU */
4828 		spu->spu_type = matched_spu_type->type;
4829 		spu->spu_subtype = matched_spu_type->subtype;
4830 	}
4831 
4832 	/* Get and map SPU registers */
4833 	spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4834 	if (!spu_ctrl_regs) {
4835 		err = -EINVAL;
4836 		dev_err(&pdev->dev, "Invalid/missing registers for SPU\n");
4837 		return err;
4838 	}
4839 
4840 	spu_reg_vbase[iproc_priv.spu.num_spu] =
4841 				devm_ioremap_resource(dev, spu_ctrl_regs);
4842 	if (IS_ERR(spu_reg_vbase[iproc_priv.spu.num_spu])) {
4843 		err = PTR_ERR(spu_reg_vbase[iproc_priv.spu.num_spu]);
4844 		dev_err(&pdev->dev, "Failed to map registers: %d\n",
4845 			err);
4846 		spu_reg_vbase[iproc_priv.spu.num_spu] = NULL;
4847 		return err;
4848 	}
4849 
4850 	dev_dbg(dev, "SPU %d detected.", iproc_priv.spu.num_spu);
4851 
4852 	spu->reg_vbase[iproc_priv.spu.num_spu] = spu_reg_vbase;
4853 
4854 	return 0;
4855 }
4856 
4857 int bcm_spu_probe(struct platform_device *pdev)
4858 {
4859 	struct device *dev = &pdev->dev;
4860 	struct spu_hw *spu = &iproc_priv.spu;
4861 	int err = 0;
4862 
4863 	iproc_priv.pdev[iproc_priv.spu.num_spu] = pdev;
4864 	platform_set_drvdata(iproc_priv.pdev[iproc_priv.spu.num_spu],
4865 			     &iproc_priv);
4866 
4867 	err = spu_dt_read(pdev);
4868 	if (err < 0)
4869 		goto failure;
4870 
4871 	err = spu_mb_init(&pdev->dev);
4872 	if (err < 0)
4873 		goto failure;
4874 
4875 	iproc_priv.spu.num_spu++;
4876 
4877 	/* If already initialized, we've just added another SPU and are done */
4878 	if (iproc_priv.inited)
4879 		return 0;
4880 
4881 	if (spu->spu_type == SPU_TYPE_SPUM)
4882 		iproc_priv.bcm_hdr_len = 8;
4883 	else if (spu->spu_type == SPU_TYPE_SPU2)
4884 		iproc_priv.bcm_hdr_len = 0;
4885 
4886 	spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4887 
4888 	spu_counters_init();
4889 
4890 	spu_setup_debugfs();
4891 
4892 	err = spu_algs_register(dev);
4893 	if (err < 0)
4894 		goto fail_reg;
4895 
4896 	iproc_priv.inited = true;
4897 
4898 	return 0;
4899 
4900 fail_reg:
4901 	spu_free_debugfs();
4902 failure:
4903 	spu_mb_release(pdev);
4904 	dev_err(dev, "%s failed with error %d.\n", __func__, err);
4905 
4906 	return err;
4907 }
4908 
4909 int bcm_spu_remove(struct platform_device *pdev)
4910 {
4911 	int i;
4912 	struct device *dev = &pdev->dev;
4913 	char *cdn;
4914 
4915 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4916 		/*
4917 		 * Not all algorithms were registered, depending on whether
4918 		 * hardware is SPU or SPU2.  So here we make sure to skip
4919 		 * those algorithms that were not previously registered.
4920 		 */
4921 		if (!driver_algs[i].registered)
4922 			continue;
4923 
4924 		switch (driver_algs[i].type) {
4925 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4926 			crypto_unregister_alg(&driver_algs[i].alg.crypto);
4927 			dev_dbg(dev, "  unregistered cipher %s\n",
4928 				driver_algs[i].alg.crypto.cra_driver_name);
4929 			driver_algs[i].registered = false;
4930 			break;
4931 		case CRYPTO_ALG_TYPE_AHASH:
4932 			crypto_unregister_ahash(&driver_algs[i].alg.hash);
4933 			cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4934 			dev_dbg(dev, "  unregistered hash %s\n", cdn);
4935 			driver_algs[i].registered = false;
4936 			break;
4937 		case CRYPTO_ALG_TYPE_AEAD:
4938 			crypto_unregister_aead(&driver_algs[i].alg.aead);
4939 			dev_dbg(dev, "  unregistered aead %s\n",
4940 				driver_algs[i].alg.aead.base.cra_driver_name);
4941 			driver_algs[i].registered = false;
4942 			break;
4943 		}
4944 	}
4945 	spu_free_debugfs();
4946 	spu_mb_release(pdev);
4947 	return 0;
4948 }
4949 
4950 /* ===== Kernel Module API ===== */
4951 
4952 static struct platform_driver bcm_spu_pdriver = {
4953 	.driver = {
4954 		   .name = "brcm-spu-crypto",
4955 		   .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4956 		   },
4957 	.probe = bcm_spu_probe,
4958 	.remove = bcm_spu_remove,
4959 };
4960 module_platform_driver(bcm_spu_pdriver);
4961 
4962 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4963 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4964 MODULE_LICENSE("GPL v2");
4965