xref: /openbmc/linux/drivers/crypto/bcm/cipher.c (revision e5f586c763a079349398e2b0c7c271386193ac34)
1 /*
2  * Copyright 2016 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License, version 2, as
6  * published by the Free Software Foundation (the "GPL").
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License version 2 (GPLv2) for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * version 2 (GPLv2) along with this source code.
15  */
16 
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/crypto.h>
26 #include <linux/kthread.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_device.h>
31 #include <linux/io.h>
32 #include <linux/bitops.h>
33 
34 #include <crypto/algapi.h>
35 #include <crypto/aead.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/aes.h>
38 #include <crypto/des.h>
39 #include <crypto/sha.h>
40 #include <crypto/md5.h>
41 #include <crypto/authenc.h>
42 #include <crypto/skcipher.h>
43 #include <crypto/hash.h>
44 #include <crypto/aes.h>
45 #include <crypto/sha3.h>
46 
47 #include "util.h"
48 #include "cipher.h"
49 #include "spu.h"
50 #include "spum.h"
51 #include "spu2.h"
52 
53 /* ================= Device Structure ================== */
54 
55 struct device_private iproc_priv;
56 
57 /* ==================== Parameters ===================== */
58 
59 int flow_debug_logging;
60 module_param(flow_debug_logging, int, 0644);
61 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
62 
63 int packet_debug_logging;
64 module_param(packet_debug_logging, int, 0644);
65 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
66 
67 int debug_logging_sleep;
68 module_param(debug_logging_sleep, int, 0644);
69 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
70 
71 /*
72  * The value of these module parameters is used to set the priority for each
73  * algo type when this driver registers algos with the kernel crypto API.
74  * To use a priority other than the default, set the priority in the insmod or
75  * modprobe. Changing the module priority after init time has no effect.
76  *
77  * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
78  * algos, but more preferred than generic software algos.
79  */
80 static int cipher_pri = 150;
81 module_param(cipher_pri, int, 0644);
82 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
83 
84 static int hash_pri = 100;
85 module_param(hash_pri, int, 0644);
86 MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
87 
88 static int aead_pri = 150;
89 module_param(aead_pri, int, 0644);
90 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
91 
92 #define MAX_SPUS 16
93 
94 /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
95  * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
96  * 0x60 - ring 0
97  * 0x68 - ring 1
98  * 0x70 - ring 2
99  * 0x78 - ring 3
100  */
101 char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
102 /*
103  * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
104  * is set dynamically after reading SPU type from device tree.
105  */
106 #define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
107 
108 /* min and max time to sleep before retrying when mbox queue is full. usec */
109 #define MBOX_SLEEP_MIN  800
110 #define MBOX_SLEEP_MAX 1000
111 
112 /**
113  * select_channel() - Select a SPU channel to handle a crypto request. Selects
114  * channel in round robin order.
115  *
116  * Return:  channel index
117  */
118 static u8 select_channel(void)
119 {
120 	u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
121 
122 	return chan_idx % iproc_priv.spu.num_spu;
123 }
124 
125 /**
126  * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
127  * receive a SPU response message for an ablkcipher request. Includes buffers to
128  * catch SPU message headers and the response data.
129  * @mssg:	mailbox message containing the receive sg
130  * @rctx:	crypto request context
131  * @rx_frag_num: number of scatterlist elements required to hold the
132  *		SPU response message
133  * @chunksize:	Number of bytes of response data expected
134  * @stat_pad_len: Number of bytes required to pad the STAT field to
135  *		a 4-byte boundary
136  *
137  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
138  * when the request completes, whether the request is handled successfully or
139  * there is an error.
140  *
141  * Returns:
142  *   0 if successful
143  *   < 0 if an error
144  */
145 static int
146 spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
147 			    struct iproc_reqctx_s *rctx,
148 			    u8 rx_frag_num,
149 			    unsigned int chunksize, u32 stat_pad_len)
150 {
151 	struct spu_hw *spu = &iproc_priv.spu;
152 	struct scatterlist *sg;	/* used to build sgs in mbox message */
153 	struct iproc_ctx_s *ctx = rctx->ctx;
154 	u32 datalen;		/* Number of bytes of response data expected */
155 
156 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
157 				rctx->gfp);
158 	if (!mssg->spu.dst)
159 		return -ENOMEM;
160 
161 	sg = mssg->spu.dst;
162 	sg_init_table(sg, rx_frag_num);
163 	/* Space for SPU message header */
164 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
165 
166 	/* If XTS tweak in payload, add buffer to receive encrypted tweak */
167 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
168 	    spu->spu_xts_tweak_in_payload())
169 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
170 			   SPU_XTS_TWEAK_SIZE);
171 
172 	/* Copy in each dst sg entry from request, up to chunksize */
173 	datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
174 				 rctx->dst_nents, chunksize);
175 	if (datalen < chunksize) {
176 		pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
177 		       __func__, chunksize, datalen);
178 		return -EFAULT;
179 	}
180 
181 	if (ctx->cipher.alg == CIPHER_ALG_RC4)
182 		/* Add buffer to catch 260-byte SUPDT field for RC4 */
183 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
184 
185 	if (stat_pad_len)
186 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
187 
188 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
189 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
190 
191 	return 0;
192 }
193 
194 /**
195  * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
196  * send a SPU request message for an ablkcipher request. Includes SPU message
197  * headers and the request data.
198  * @mssg:	mailbox message containing the transmit sg
199  * @rctx:	crypto request context
200  * @tx_frag_num: number of scatterlist elements required to construct the
201  *		SPU request message
202  * @chunksize:	Number of bytes of request data
203  * @pad_len:	Number of pad bytes
204  *
205  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
206  * when the request completes, whether the request is handled successfully or
207  * there is an error.
208  *
209  * Returns:
210  *   0 if successful
211  *   < 0 if an error
212  */
213 static int
214 spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
215 			    struct iproc_reqctx_s *rctx,
216 			    u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
217 {
218 	struct spu_hw *spu = &iproc_priv.spu;
219 	struct scatterlist *sg;	/* used to build sgs in mbox message */
220 	struct iproc_ctx_s *ctx = rctx->ctx;
221 	u32 datalen;		/* Number of bytes of response data expected */
222 	u32 stat_len;
223 
224 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
225 				rctx->gfp);
226 	if (unlikely(!mssg->spu.src))
227 		return -ENOMEM;
228 
229 	sg = mssg->spu.src;
230 	sg_init_table(sg, tx_frag_num);
231 
232 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
233 		   BCM_HDR_LEN + ctx->spu_req_hdr_len);
234 
235 	/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
236 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
237 	    spu->spu_xts_tweak_in_payload())
238 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
239 
240 	/* Copy in each src sg entry from request, up to chunksize */
241 	datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
242 				 rctx->src_nents, chunksize);
243 	if (unlikely(datalen < chunksize)) {
244 		pr_err("%s(): failed to copy src sg to mbox msg",
245 		       __func__);
246 		return -EFAULT;
247 	}
248 
249 	if (pad_len)
250 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
251 
252 	stat_len = spu->spu_tx_status_len();
253 	if (stat_len) {
254 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
255 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
256 	}
257 	return 0;
258 }
259 
260 /**
261  * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
262  * a single SPU request message, starting at the current position in the request
263  * data.
264  * @rctx:	Crypto request context
265  *
266  * This may be called on the crypto API thread, or, when a request is so large
267  * it must be broken into multiple SPU messages, on the thread used to invoke
268  * the response callback. When requests are broken into multiple SPU
269  * messages, we assume subsequent messages depend on previous results, and
270  * thus always wait for previous results before submitting the next message.
271  * Because requests are submitted in lock step like this, there is no need
272  * to synchronize access to request data structures.
273  *
274  * Return: -EINPROGRESS: request has been accepted and result will be returned
275  *			 asynchronously
276  *         Any other value indicates an error
277  */
278 static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
279 {
280 	struct spu_hw *spu = &iproc_priv.spu;
281 	struct crypto_async_request *areq = rctx->parent;
282 	struct ablkcipher_request *req =
283 	    container_of(areq, struct ablkcipher_request, base);
284 	struct iproc_ctx_s *ctx = rctx->ctx;
285 	struct spu_cipher_parms cipher_parms;
286 	int err = 0;
287 	unsigned int chunksize = 0;	/* Num bytes of request to submit */
288 	int remaining = 0;	/* Bytes of request still to process */
289 	int chunk_start;	/* Beginning of data for current SPU msg */
290 
291 	/* IV or ctr value to use in this SPU msg */
292 	u8 local_iv_ctr[MAX_IV_SIZE];
293 	u32 stat_pad_len;	/* num bytes to align status field */
294 	u32 pad_len;		/* total length of all padding */
295 	bool update_key = false;
296 	struct brcm_message *mssg;	/* mailbox message */
297 	int retry_cnt = 0;
298 
299 	/* number of entries in src and dst sg in mailbox message. */
300 	u8 rx_frag_num = 2;	/* response header and STATUS */
301 	u8 tx_frag_num = 1;	/* request header */
302 
303 	flow_log("%s\n", __func__);
304 
305 	cipher_parms.alg = ctx->cipher.alg;
306 	cipher_parms.mode = ctx->cipher.mode;
307 	cipher_parms.type = ctx->cipher_type;
308 	cipher_parms.key_len = ctx->enckeylen;
309 	cipher_parms.key_buf = ctx->enckey;
310 	cipher_parms.iv_buf = local_iv_ctr;
311 	cipher_parms.iv_len = rctx->iv_ctr_len;
312 
313 	mssg = &rctx->mb_mssg;
314 	chunk_start = rctx->src_sent;
315 	remaining = rctx->total_todo - chunk_start;
316 
317 	/* determine the chunk we are breaking off and update the indexes */
318 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
319 	    (remaining > ctx->max_payload))
320 		chunksize = ctx->max_payload;
321 	else
322 		chunksize = remaining;
323 
324 	rctx->src_sent += chunksize;
325 	rctx->total_sent = rctx->src_sent;
326 
327 	/* Count number of sg entries to be included in this request */
328 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
329 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
330 
331 	if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
332 	    rctx->is_encrypt && chunk_start)
333 		/*
334 		 * Encrypting non-first first chunk. Copy last block of
335 		 * previous result to IV for this chunk.
336 		 */
337 		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
338 				    rctx->iv_ctr_len,
339 				    chunk_start - rctx->iv_ctr_len);
340 
341 	if (rctx->iv_ctr_len) {
342 		/* get our local copy of the iv */
343 		__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
344 				 rctx->iv_ctr_len);
345 
346 		/* generate the next IV if possible */
347 		if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
348 		    !rctx->is_encrypt) {
349 			/*
350 			 * CBC Decrypt: next IV is the last ciphertext block in
351 			 * this chunk
352 			 */
353 			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
354 					    rctx->iv_ctr_len,
355 					    rctx->src_sent - rctx->iv_ctr_len);
356 		} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
357 			/*
358 			 * The SPU hardware increments the counter once for
359 			 * each AES block of 16 bytes. So update the counter
360 			 * for the next chunk, if there is one. Note that for
361 			 * this chunk, the counter has already been copied to
362 			 * local_iv_ctr. We can assume a block size of 16,
363 			 * because we only support CTR mode for AES, not for
364 			 * any other cipher alg.
365 			 */
366 			add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
367 		}
368 	}
369 
370 	if (ctx->cipher.alg == CIPHER_ALG_RC4) {
371 		rx_frag_num++;
372 		if (chunk_start) {
373 			/*
374 			 * for non-first RC4 chunks, use SUPDT from previous
375 			 * response as key for this chunk.
376 			 */
377 			cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
378 			update_key = true;
379 			cipher_parms.type = CIPHER_TYPE_UPDT;
380 		} else if (!rctx->is_encrypt) {
381 			/*
382 			 * First RC4 chunk. For decrypt, key in pre-built msg
383 			 * header may have been changed if encrypt required
384 			 * multiple chunks. So revert the key to the
385 			 * ctx->enckey value.
386 			 */
387 			update_key = true;
388 			cipher_parms.type = CIPHER_TYPE_INIT;
389 		}
390 	}
391 
392 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
393 		flow_log("max_payload infinite\n");
394 	else
395 		flow_log("max_payload %u\n", ctx->max_payload);
396 
397 	flow_log("sent:%u start:%u remains:%u size:%u\n",
398 		 rctx->src_sent, chunk_start, remaining, chunksize);
399 
400 	/* Copy SPU header template created at setkey time */
401 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
402 	       sizeof(rctx->msg_buf.bcm_spu_req_hdr));
403 
404 	/*
405 	 * Pass SUPDT field as key. Key field in finish() call is only used
406 	 * when update_key has been set above for RC4. Will be ignored in
407 	 * all other cases.
408 	 */
409 	spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
410 				   ctx->spu_req_hdr_len, !(rctx->is_encrypt),
411 				   &cipher_parms, update_key, chunksize);
412 
413 	atomic64_add(chunksize, &iproc_priv.bytes_out);
414 
415 	stat_pad_len = spu->spu_wordalign_padlen(chunksize);
416 	if (stat_pad_len)
417 		rx_frag_num++;
418 	pad_len = stat_pad_len;
419 	if (pad_len) {
420 		tx_frag_num++;
421 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
422 				     0, ctx->auth.alg, ctx->auth.mode,
423 				     rctx->total_sent, stat_pad_len);
424 	}
425 
426 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
427 			      ctx->spu_req_hdr_len);
428 	packet_log("payload:\n");
429 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
430 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
431 
432 	/*
433 	 * Build mailbox message containing SPU request msg and rx buffers
434 	 * to catch response message
435 	 */
436 	memset(mssg, 0, sizeof(*mssg));
437 	mssg->type = BRCM_MESSAGE_SPU;
438 	mssg->ctx = rctx;	/* Will be returned in response */
439 
440 	/* Create rx scatterlist to catch result */
441 	rx_frag_num += rctx->dst_nents;
442 
443 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
444 	    spu->spu_xts_tweak_in_payload())
445 		rx_frag_num++;	/* extra sg to insert tweak */
446 
447 	err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
448 					  stat_pad_len);
449 	if (err)
450 		return err;
451 
452 	/* Create tx scatterlist containing SPU request message */
453 	tx_frag_num += rctx->src_nents;
454 	if (spu->spu_tx_status_len())
455 		tx_frag_num++;
456 
457 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
458 	    spu->spu_xts_tweak_in_payload())
459 		tx_frag_num++;	/* extra sg to insert tweak */
460 
461 	err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
462 					  pad_len);
463 	if (err)
464 		return err;
465 
466 	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
467 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
468 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
469 			/*
470 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
471 			 * not in atomic context and we can wait and try again.
472 			 */
473 			retry_cnt++;
474 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
475 			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
476 						mssg);
477 			atomic_inc(&iproc_priv.mb_no_spc);
478 		}
479 	}
480 	if (unlikely(err < 0)) {
481 		atomic_inc(&iproc_priv.mb_send_fail);
482 		return err;
483 	}
484 
485 	return -EINPROGRESS;
486 }
487 
488 /**
489  * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
490  * total received count for the request and updates global stats.
491  * @rctx:	Crypto request context
492  */
493 static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
494 {
495 	struct spu_hw *spu = &iproc_priv.spu;
496 #ifdef DEBUG
497 	struct crypto_async_request *areq = rctx->parent;
498 	struct ablkcipher_request *req = ablkcipher_request_cast(areq);
499 #endif
500 	struct iproc_ctx_s *ctx = rctx->ctx;
501 	u32 payload_len;
502 
503 	/* See how much data was returned */
504 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
505 
506 	/*
507 	 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
508 	 * encrypted tweak ("i") value; we don't count those.
509 	 */
510 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
511 	    spu->spu_xts_tweak_in_payload() &&
512 	    (payload_len >= SPU_XTS_TWEAK_SIZE))
513 		payload_len -= SPU_XTS_TWEAK_SIZE;
514 
515 	atomic64_add(payload_len, &iproc_priv.bytes_in);
516 
517 	flow_log("%s() offset: %u, bd_len: %u BD:\n",
518 		 __func__, rctx->total_received, payload_len);
519 
520 	dump_sg(req->dst, rctx->total_received, payload_len);
521 	if (ctx->cipher.alg == CIPHER_ALG_RC4)
522 		packet_dump("  supdt ", rctx->msg_buf.c.supdt_tweak,
523 			    SPU_SUPDT_LEN);
524 
525 	rctx->total_received += payload_len;
526 	if (rctx->total_received == rctx->total_todo) {
527 		atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
528 		atomic_inc(
529 		   &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
530 	}
531 }
532 
533 /**
534  * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
535  * receive a SPU response message for an ahash request.
536  * @mssg:	mailbox message containing the receive sg
537  * @rctx:	crypto request context
538  * @rx_frag_num: number of scatterlist elements required to hold the
539  *		SPU response message
540  * @digestsize: length of hash digest, in bytes
541  * @stat_pad_len: Number of bytes required to pad the STAT field to
542  *		a 4-byte boundary
543  *
544  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
545  * when the request completes, whether the request is handled successfully or
546  * there is an error.
547  *
548  * Return:
549  *   0 if successful
550  *   < 0 if an error
551  */
552 static int
553 spu_ahash_rx_sg_create(struct brcm_message *mssg,
554 		       struct iproc_reqctx_s *rctx,
555 		       u8 rx_frag_num, unsigned int digestsize,
556 		       u32 stat_pad_len)
557 {
558 	struct spu_hw *spu = &iproc_priv.spu;
559 	struct scatterlist *sg;	/* used to build sgs in mbox message */
560 	struct iproc_ctx_s *ctx = rctx->ctx;
561 
562 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
563 				rctx->gfp);
564 	if (!mssg->spu.dst)
565 		return -ENOMEM;
566 
567 	sg = mssg->spu.dst;
568 	sg_init_table(sg, rx_frag_num);
569 	/* Space for SPU message header */
570 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
571 
572 	/* Space for digest */
573 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
574 
575 	if (stat_pad_len)
576 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
577 
578 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
579 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
580 	return 0;
581 }
582 
583 /**
584  * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
585  * a SPU request message for an ahash request. Includes SPU message headers and
586  * the request data.
587  * @mssg:	mailbox message containing the transmit sg
588  * @rctx:	crypto request context
589  * @tx_frag_num: number of scatterlist elements required to construct the
590  *		SPU request message
591  * @spu_hdr_len: length in bytes of SPU message header
592  * @hash_carry_len: Number of bytes of data carried over from previous req
593  * @new_data_len: Number of bytes of new request data
594  * @pad_len:	Number of pad bytes
595  *
596  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
597  * when the request completes, whether the request is handled successfully or
598  * there is an error.
599  *
600  * Return:
601  *   0 if successful
602  *   < 0 if an error
603  */
604 static int
605 spu_ahash_tx_sg_create(struct brcm_message *mssg,
606 		       struct iproc_reqctx_s *rctx,
607 		       u8 tx_frag_num,
608 		       u32 spu_hdr_len,
609 		       unsigned int hash_carry_len,
610 		       unsigned int new_data_len, u32 pad_len)
611 {
612 	struct spu_hw *spu = &iproc_priv.spu;
613 	struct scatterlist *sg;	/* used to build sgs in mbox message */
614 	u32 datalen;		/* Number of bytes of response data expected */
615 	u32 stat_len;
616 
617 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
618 				rctx->gfp);
619 	if (!mssg->spu.src)
620 		return -ENOMEM;
621 
622 	sg = mssg->spu.src;
623 	sg_init_table(sg, tx_frag_num);
624 
625 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
626 		   BCM_HDR_LEN + spu_hdr_len);
627 
628 	if (hash_carry_len)
629 		sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
630 
631 	if (new_data_len) {
632 		/* Copy in each src sg entry from request, up to chunksize */
633 		datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
634 					 rctx->src_nents, new_data_len);
635 		if (datalen < new_data_len) {
636 			pr_err("%s(): failed to copy src sg to mbox msg",
637 			       __func__);
638 			return -EFAULT;
639 		}
640 	}
641 
642 	if (pad_len)
643 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
644 
645 	stat_len = spu->spu_tx_status_len();
646 	if (stat_len) {
647 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
648 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
649 	}
650 
651 	return 0;
652 }
653 
654 /**
655  * handle_ahash_req() - Process an asynchronous hash request from the crypto
656  * API.
657  * @rctx:  Crypto request context
658  *
659  * Builds a SPU request message embedded in a mailbox message and submits the
660  * mailbox message on a selected mailbox channel. The SPU request message is
661  * constructed as a scatterlist, including entries from the crypto API's
662  * src scatterlist to avoid copying the data to be hashed. This function is
663  * called either on the thread from the crypto API, or, in the case that the
664  * crypto API request is too large to fit in a single SPU request message,
665  * on the thread that invokes the receive callback with a response message.
666  * Because some operations require the response from one chunk before the next
667  * chunk can be submitted, we always wait for the response for the previous
668  * chunk before submitting the next chunk. Because requests are submitted in
669  * lock step like this, there is no need to synchronize access to request data
670  * structures.
671  *
672  * Return:
673  *   -EINPROGRESS: request has been submitted to SPU and response will be
674  *		   returned asynchronously
675  *   -EAGAIN:      non-final request included a small amount of data, which for
676  *		   efficiency we did not submit to the SPU, but instead stored
677  *		   to be submitted to the SPU with the next part of the request
678  *   other:        an error code
679  */
680 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
681 {
682 	struct spu_hw *spu = &iproc_priv.spu;
683 	struct crypto_async_request *areq = rctx->parent;
684 	struct ahash_request *req = ahash_request_cast(areq);
685 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
686 	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
687 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
688 	struct iproc_ctx_s *ctx = rctx->ctx;
689 
690 	/* number of bytes still to be hashed in this req */
691 	unsigned int nbytes_to_hash = 0;
692 	int err = 0;
693 	unsigned int chunksize = 0;	/* length of hash carry + new data */
694 	/*
695 	 * length of new data, not from hash carry, to be submitted in
696 	 * this hw request
697 	 */
698 	unsigned int new_data_len;
699 
700 	unsigned int chunk_start = 0;
701 	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
702 	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
703 	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
704 	u32 stat_pad_len = 0;	/* length of padding to align STATUS word */
705 	struct brcm_message *mssg;	/* mailbox message */
706 	struct spu_request_opts req_opts;
707 	struct spu_cipher_parms cipher_parms;
708 	struct spu_hash_parms hash_parms;
709 	struct spu_aead_parms aead_parms;
710 	unsigned int local_nbuf;
711 	u32 spu_hdr_len;
712 	unsigned int digestsize;
713 	u16 rem = 0;
714 	int retry_cnt = 0;
715 
716 	/*
717 	 * number of entries in src and dst sg. Always includes SPU msg header.
718 	 * rx always includes a buffer to catch digest and STATUS.
719 	 */
720 	u8 rx_frag_num = 3;
721 	u8 tx_frag_num = 1;
722 
723 	flow_log("total_todo %u, total_sent %u\n",
724 		 rctx->total_todo, rctx->total_sent);
725 
726 	memset(&req_opts, 0, sizeof(req_opts));
727 	memset(&cipher_parms, 0, sizeof(cipher_parms));
728 	memset(&hash_parms, 0, sizeof(hash_parms));
729 	memset(&aead_parms, 0, sizeof(aead_parms));
730 
731 	req_opts.bd_suppress = true;
732 	hash_parms.alg = ctx->auth.alg;
733 	hash_parms.mode = ctx->auth.mode;
734 	hash_parms.type = HASH_TYPE_NONE;
735 	hash_parms.key_buf = (u8 *)ctx->authkey;
736 	hash_parms.key_len = ctx->authkeylen;
737 
738 	/*
739 	 * For hash algorithms below assignment looks bit odd but
740 	 * it's needed for AES-XCBC and AES-CMAC hash algorithms
741 	 * to differentiate between 128, 192, 256 bit key values.
742 	 * Based on the key values, hash algorithm is selected.
743 	 * For example for 128 bit key, hash algorithm is AES-128.
744 	 */
745 	cipher_parms.type = ctx->cipher_type;
746 
747 	mssg = &rctx->mb_mssg;
748 	chunk_start = rctx->src_sent;
749 
750 	/*
751 	 * Compute the amount remaining to hash. This may include data
752 	 * carried over from previous requests.
753 	 */
754 	nbytes_to_hash = rctx->total_todo - rctx->total_sent;
755 	chunksize = nbytes_to_hash;
756 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
757 	    (chunksize > ctx->max_payload))
758 		chunksize = ctx->max_payload;
759 
760 	/*
761 	 * If this is not a final request and the request data is not a multiple
762 	 * of a full block, then simply park the extra data and prefix it to the
763 	 * data for the next request.
764 	 */
765 	if (!rctx->is_final) {
766 		u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
767 		u16 new_len;  /* len of data to add to hash carry */
768 
769 		rem = chunksize % blocksize;   /* remainder */
770 		if (rem) {
771 			/* chunksize not a multiple of blocksize */
772 			chunksize -= rem;
773 			if (chunksize == 0) {
774 				/* Don't have a full block to submit to hw */
775 				new_len = rem - rctx->hash_carry_len;
776 				sg_copy_part_to_buf(req->src, dest, new_len,
777 						    rctx->src_sent);
778 				rctx->hash_carry_len = rem;
779 				flow_log("Exiting with hash carry len: %u\n",
780 					 rctx->hash_carry_len);
781 				packet_dump("  buf: ",
782 					    rctx->hash_carry,
783 					    rctx->hash_carry_len);
784 				return -EAGAIN;
785 			}
786 		}
787 	}
788 
789 	/* if we have hash carry, then prefix it to the data in this request */
790 	local_nbuf = rctx->hash_carry_len;
791 	rctx->hash_carry_len = 0;
792 	if (local_nbuf)
793 		tx_frag_num++;
794 	new_data_len = chunksize - local_nbuf;
795 
796 	/* Count number of sg entries to be used in this request */
797 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
798 				       new_data_len);
799 
800 	/* AES hashing keeps key size in type field, so need to copy it here */
801 	if (hash_parms.alg == HASH_ALG_AES)
802 		hash_parms.type = cipher_parms.type;
803 	else
804 		hash_parms.type = spu->spu_hash_type(rctx->total_sent);
805 
806 	digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
807 					  hash_parms.type);
808 	hash_parms.digestsize =	digestsize;
809 
810 	/* update the indexes */
811 	rctx->total_sent += chunksize;
812 	/* if you sent a prebuf then that wasn't from this req->src */
813 	rctx->src_sent += new_data_len;
814 
815 	if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
816 		hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
817 							   hash_parms.mode,
818 							   chunksize,
819 							   blocksize);
820 
821 	/*
822 	 * If a non-first chunk, then include the digest returned from the
823 	 * previous chunk so that hw can add to it (except for AES types).
824 	 */
825 	if ((hash_parms.type == HASH_TYPE_UPDT) &&
826 	    (hash_parms.alg != HASH_ALG_AES)) {
827 		hash_parms.key_buf = rctx->incr_hash;
828 		hash_parms.key_len = digestsize;
829 	}
830 
831 	atomic64_add(chunksize, &iproc_priv.bytes_out);
832 
833 	flow_log("%s() final: %u nbuf: %u ",
834 		 __func__, rctx->is_final, local_nbuf);
835 
836 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
837 		flow_log("max_payload infinite\n");
838 	else
839 		flow_log("max_payload %u\n", ctx->max_payload);
840 
841 	flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
842 
843 	/* Prepend SPU header with type 3 BCM header */
844 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
845 
846 	hash_parms.prebuf_len = local_nbuf;
847 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
848 					      BCM_HDR_LEN,
849 					      &req_opts, &cipher_parms,
850 					      &hash_parms, &aead_parms,
851 					      new_data_len);
852 
853 	if (spu_hdr_len == 0) {
854 		pr_err("Failed to create SPU request header\n");
855 		return -EFAULT;
856 	}
857 
858 	/*
859 	 * Determine total length of padding required. Put all padding in one
860 	 * buffer.
861 	 */
862 	data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
863 	db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
864 				   0, 0, hash_parms.pad_len);
865 	if (spu->spu_tx_status_len())
866 		stat_pad_len = spu->spu_wordalign_padlen(db_size);
867 	if (stat_pad_len)
868 		rx_frag_num++;
869 	pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
870 	if (pad_len) {
871 		tx_frag_num++;
872 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
873 				     hash_parms.pad_len, ctx->auth.alg,
874 				     ctx->auth.mode, rctx->total_sent,
875 				     stat_pad_len);
876 	}
877 
878 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
879 			      spu_hdr_len);
880 	packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
881 	flow_log("Data:\n");
882 	dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
883 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
884 
885 	/*
886 	 * Build mailbox message containing SPU request msg and rx buffers
887 	 * to catch response message
888 	 */
889 	memset(mssg, 0, sizeof(*mssg));
890 	mssg->type = BRCM_MESSAGE_SPU;
891 	mssg->ctx = rctx;	/* Will be returned in response */
892 
893 	/* Create rx scatterlist to catch result */
894 	err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
895 				     stat_pad_len);
896 	if (err)
897 		return err;
898 
899 	/* Create tx scatterlist containing SPU request message */
900 	tx_frag_num += rctx->src_nents;
901 	if (spu->spu_tx_status_len())
902 		tx_frag_num++;
903 	err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
904 				     local_nbuf, new_data_len, pad_len);
905 	if (err)
906 		return err;
907 
908 	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
909 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
910 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
911 			/*
912 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
913 			 * not in atomic context and we can wait and try again.
914 			 */
915 			retry_cnt++;
916 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
917 			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
918 						mssg);
919 			atomic_inc(&iproc_priv.mb_no_spc);
920 		}
921 	}
922 	if (err < 0) {
923 		atomic_inc(&iproc_priv.mb_send_fail);
924 		return err;
925 	}
926 	return -EINPROGRESS;
927 }
928 
929 /**
930  * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
931  * for an HMAC request.
932  * @req:  The HMAC request from the crypto API
933  * @ctx:  The session context
934  *
935  * Return: 0 if synchronous hash operation successful
936  *         -EINVAL if the hash algo is unrecognized
937  *         any other value indicates an error
938  */
939 static int spu_hmac_outer_hash(struct ahash_request *req,
940 			       struct iproc_ctx_s *ctx)
941 {
942 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
943 	unsigned int blocksize =
944 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
945 	int rc;
946 
947 	switch (ctx->auth.alg) {
948 	case HASH_ALG_MD5:
949 		rc = do_shash("md5", req->result, ctx->opad, blocksize,
950 			      req->result, ctx->digestsize, NULL, 0);
951 		break;
952 	case HASH_ALG_SHA1:
953 		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
954 			      req->result, ctx->digestsize, NULL, 0);
955 		break;
956 	case HASH_ALG_SHA224:
957 		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
958 			      req->result, ctx->digestsize, NULL, 0);
959 		break;
960 	case HASH_ALG_SHA256:
961 		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
962 			      req->result, ctx->digestsize, NULL, 0);
963 		break;
964 	case HASH_ALG_SHA384:
965 		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
966 			      req->result, ctx->digestsize, NULL, 0);
967 		break;
968 	case HASH_ALG_SHA512:
969 		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
970 			      req->result, ctx->digestsize, NULL, 0);
971 		break;
972 	default:
973 		pr_err("%s() Error : unknown hmac type\n", __func__);
974 		rc = -EINVAL;
975 	}
976 	return rc;
977 }
978 
979 /**
980  * ahash_req_done() - Process a hash result from the SPU hardware.
981  * @rctx: Crypto request context
982  *
983  * Return: 0 if successful
984  *         < 0 if an error
985  */
986 static int ahash_req_done(struct iproc_reqctx_s *rctx)
987 {
988 	struct spu_hw *spu = &iproc_priv.spu;
989 	struct crypto_async_request *areq = rctx->parent;
990 	struct ahash_request *req = ahash_request_cast(areq);
991 	struct iproc_ctx_s *ctx = rctx->ctx;
992 	int err;
993 
994 	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
995 
996 	if (spu->spu_type == SPU_TYPE_SPUM) {
997 		/* byte swap the output from the UPDT function to network byte
998 		 * order
999 		 */
1000 		if (ctx->auth.alg == HASH_ALG_MD5) {
1001 			__swab32s((u32 *)req->result);
1002 			__swab32s(((u32 *)req->result) + 1);
1003 			__swab32s(((u32 *)req->result) + 2);
1004 			__swab32s(((u32 *)req->result) + 3);
1005 			__swab32s(((u32 *)req->result) + 4);
1006 		}
1007 	}
1008 
1009 	flow_dump("  digest ", req->result, ctx->digestsize);
1010 
1011 	/* if this an HMAC then do the outer hash */
1012 	if (rctx->is_sw_hmac) {
1013 		err = spu_hmac_outer_hash(req, ctx);
1014 		if (err < 0)
1015 			return err;
1016 		flow_dump("  hmac: ", req->result, ctx->digestsize);
1017 	}
1018 
1019 	if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1020 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1021 		atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1022 	} else {
1023 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1024 		atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 /**
1031  * handle_ahash_resp() - Process a SPU response message for a hash request.
1032  * Checks if the entire crypto API request has been processed, and if so,
1033  * invokes post processing on the result.
1034  * @rctx: Crypto request context
1035  */
1036 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1037 {
1038 	struct iproc_ctx_s *ctx = rctx->ctx;
1039 #ifdef DEBUG
1040 	struct crypto_async_request *areq = rctx->parent;
1041 	struct ahash_request *req = ahash_request_cast(areq);
1042 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1043 	unsigned int blocksize =
1044 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1045 #endif
1046 	/*
1047 	 * Save hash to use as input to next op if incremental. Might be copying
1048 	 * too much, but that's easier than figuring out actual digest size here
1049 	 */
1050 	memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1051 
1052 	flow_log("%s() blocksize:%u digestsize:%u\n",
1053 		 __func__, blocksize, ctx->digestsize);
1054 
1055 	atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1056 
1057 	if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1058 		ahash_req_done(rctx);
1059 }
1060 
1061 /**
1062  * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1063  * a SPU response message for an AEAD request. Includes buffers to catch SPU
1064  * message headers and the response data.
1065  * @mssg:	mailbox message containing the receive sg
1066  * @rctx:	crypto request context
1067  * @rx_frag_num: number of scatterlist elements required to hold the
1068  *		SPU response message
1069  * @assoc_len:	Length of associated data included in the crypto request
1070  * @ret_iv_len: Length of IV returned in response
1071  * @resp_len:	Number of bytes of response data expected to be written to
1072  *              dst buffer from crypto API
1073  * @digestsize: Length of hash digest, in bytes
1074  * @stat_pad_len: Number of bytes required to pad the STAT field to
1075  *		a 4-byte boundary
1076  *
1077  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1078  * when the request completes, whether the request is handled successfully or
1079  * there is an error.
1080  *
1081  * Returns:
1082  *   0 if successful
1083  *   < 0 if an error
1084  */
1085 static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1086 				 struct aead_request *req,
1087 				 struct iproc_reqctx_s *rctx,
1088 				 u8 rx_frag_num,
1089 				 unsigned int assoc_len,
1090 				 u32 ret_iv_len, unsigned int resp_len,
1091 				 unsigned int digestsize, u32 stat_pad_len)
1092 {
1093 	struct spu_hw *spu = &iproc_priv.spu;
1094 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1095 	struct iproc_ctx_s *ctx = rctx->ctx;
1096 	u32 datalen;		/* Number of bytes of response data expected */
1097 	u32 assoc_buf_len;
1098 	u8 data_padlen = 0;
1099 
1100 	if (ctx->is_rfc4543) {
1101 		/* RFC4543: only pad after data, not after AAD */
1102 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1103 							  assoc_len + resp_len);
1104 		assoc_buf_len = assoc_len;
1105 	} else {
1106 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1107 							  resp_len);
1108 		assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1109 						assoc_len, ret_iv_len,
1110 						rctx->is_encrypt);
1111 	}
1112 
1113 	if (ctx->cipher.mode == CIPHER_MODE_CCM)
1114 		/* ICV (after data) must be in the next 32-bit word for CCM */
1115 		data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1116 							 resp_len +
1117 							 data_padlen);
1118 
1119 	if (data_padlen)
1120 		/* have to catch gcm pad in separate buffer */
1121 		rx_frag_num++;
1122 
1123 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1124 				rctx->gfp);
1125 	if (!mssg->spu.dst)
1126 		return -ENOMEM;
1127 
1128 	sg = mssg->spu.dst;
1129 	sg_init_table(sg, rx_frag_num);
1130 
1131 	/* Space for SPU message header */
1132 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1133 
1134 	if (assoc_buf_len) {
1135 		/*
1136 		 * Don't write directly to req->dst, because SPU may pad the
1137 		 * assoc data in the response
1138 		 */
1139 		memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1140 		sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1141 	}
1142 
1143 	if (resp_len) {
1144 		/*
1145 		 * Copy in each dst sg entry from request, up to chunksize.
1146 		 * dst sg catches just the data. digest caught in separate buf.
1147 		 */
1148 		datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1149 					 rctx->dst_nents, resp_len);
1150 		if (datalen < (resp_len)) {
1151 			pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1152 			       __func__, resp_len, datalen);
1153 			return -EFAULT;
1154 		}
1155 	}
1156 
1157 	/* If GCM/CCM data is padded, catch padding in separate buffer */
1158 	if (data_padlen) {
1159 		memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1160 		sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1161 	}
1162 
1163 	/* Always catch ICV in separate buffer */
1164 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1165 
1166 	flow_log("stat_pad_len %u\n", stat_pad_len);
1167 	if (stat_pad_len) {
1168 		memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1169 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1170 	}
1171 
1172 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1173 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1174 
1175 	return 0;
1176 }
1177 
1178 /**
1179  * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1180  * SPU request message for an AEAD request. Includes SPU message headers and the
1181  * request data.
1182  * @mssg:	mailbox message containing the transmit sg
1183  * @rctx:	crypto request context
1184  * @tx_frag_num: number of scatterlist elements required to construct the
1185  *		SPU request message
1186  * @spu_hdr_len: length of SPU message header in bytes
1187  * @assoc:	crypto API associated data scatterlist
1188  * @assoc_len:	length of associated data
1189  * @assoc_nents: number of scatterlist entries containing assoc data
1190  * @aead_iv_len: length of AEAD IV, if included
1191  * @chunksize:	Number of bytes of request data
1192  * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1193  * @pad_len:	Number of pad bytes
1194  * @incl_icv:	If true, write separate ICV buffer after data and
1195  *              any padding
1196  *
1197  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1198  * when the request completes, whether the request is handled successfully or
1199  * there is an error.
1200  *
1201  * Return:
1202  *   0 if successful
1203  *   < 0 if an error
1204  */
1205 static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1206 				 struct iproc_reqctx_s *rctx,
1207 				 u8 tx_frag_num,
1208 				 u32 spu_hdr_len,
1209 				 struct scatterlist *assoc,
1210 				 unsigned int assoc_len,
1211 				 int assoc_nents,
1212 				 unsigned int aead_iv_len,
1213 				 unsigned int chunksize,
1214 				 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1215 {
1216 	struct spu_hw *spu = &iproc_priv.spu;
1217 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1218 	struct scatterlist *assoc_sg = assoc;
1219 	struct iproc_ctx_s *ctx = rctx->ctx;
1220 	u32 datalen;		/* Number of bytes of data to write */
1221 	u32 written;		/* Number of bytes of data written */
1222 	u32 assoc_offset = 0;
1223 	u32 stat_len;
1224 
1225 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1226 				rctx->gfp);
1227 	if (!mssg->spu.src)
1228 		return -ENOMEM;
1229 
1230 	sg = mssg->spu.src;
1231 	sg_init_table(sg, tx_frag_num);
1232 
1233 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1234 		   BCM_HDR_LEN + spu_hdr_len);
1235 
1236 	if (assoc_len) {
1237 		/* Copy in each associated data sg entry from request */
1238 		written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1239 					 assoc_nents, assoc_len);
1240 		if (written < assoc_len) {
1241 			pr_err("%s(): failed to copy assoc sg to mbox msg",
1242 			       __func__);
1243 			return -EFAULT;
1244 		}
1245 	}
1246 
1247 	if (aead_iv_len)
1248 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1249 
1250 	if (aad_pad_len) {
1251 		memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1252 		sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1253 	}
1254 
1255 	datalen = chunksize;
1256 	if ((chunksize > ctx->digestsize) && incl_icv)
1257 		datalen -= ctx->digestsize;
1258 	if (datalen) {
1259 		/* For aead, a single msg should consume the entire src sg */
1260 		written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1261 					 rctx->src_nents, datalen);
1262 		if (written < datalen) {
1263 			pr_err("%s(): failed to copy src sg to mbox msg",
1264 			       __func__);
1265 			return -EFAULT;
1266 		}
1267 	}
1268 
1269 	if (pad_len) {
1270 		memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1271 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1272 	}
1273 
1274 	if (incl_icv)
1275 		sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1276 
1277 	stat_len = spu->spu_tx_status_len();
1278 	if (stat_len) {
1279 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
1280 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1281 	}
1282 	return 0;
1283 }
1284 
1285 /**
1286  * handle_aead_req() - Submit a SPU request message for the next chunk of the
1287  * current AEAD request.
1288  * @rctx:  Crypto request context
1289  *
1290  * Unlike other operation types, we assume the length of the request fits in
1291  * a single SPU request message. aead_enqueue() makes sure this is true.
1292  * Comments for other op types regarding threads applies here as well.
1293  *
1294  * Unlike incremental hash ops, where the spu returns the entire hash for
1295  * truncated algs like sha-224, the SPU returns just the truncated hash in
1296  * response to aead requests. So digestsize is always ctx->digestsize here.
1297  *
1298  * Return: -EINPROGRESS: crypto request has been accepted and result will be
1299  *			 returned asynchronously
1300  *         Any other value indicates an error
1301  */
1302 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1303 {
1304 	struct spu_hw *spu = &iproc_priv.spu;
1305 	struct crypto_async_request *areq = rctx->parent;
1306 	struct aead_request *req = container_of(areq,
1307 						struct aead_request, base);
1308 	struct iproc_ctx_s *ctx = rctx->ctx;
1309 	int err;
1310 	unsigned int chunksize;
1311 	unsigned int resp_len;
1312 	u32 spu_hdr_len;
1313 	u32 db_size;
1314 	u32 stat_pad_len;
1315 	u32 pad_len;
1316 	struct brcm_message *mssg;	/* mailbox message */
1317 	struct spu_request_opts req_opts;
1318 	struct spu_cipher_parms cipher_parms;
1319 	struct spu_hash_parms hash_parms;
1320 	struct spu_aead_parms aead_parms;
1321 	int assoc_nents = 0;
1322 	bool incl_icv = false;
1323 	unsigned int digestsize = ctx->digestsize;
1324 	int retry_cnt = 0;
1325 
1326 	/* number of entries in src and dst sg. Always includes SPU msg header.
1327 	 */
1328 	u8 rx_frag_num = 2;	/* and STATUS */
1329 	u8 tx_frag_num = 1;
1330 
1331 	/* doing the whole thing at once */
1332 	chunksize = rctx->total_todo;
1333 
1334 	flow_log("%s: chunksize %u\n", __func__, chunksize);
1335 
1336 	memset(&req_opts, 0, sizeof(req_opts));
1337 	memset(&hash_parms, 0, sizeof(hash_parms));
1338 	memset(&aead_parms, 0, sizeof(aead_parms));
1339 
1340 	req_opts.is_inbound = !(rctx->is_encrypt);
1341 	req_opts.auth_first = ctx->auth_first;
1342 	req_opts.is_aead = true;
1343 	req_opts.is_esp = ctx->is_esp;
1344 
1345 	cipher_parms.alg = ctx->cipher.alg;
1346 	cipher_parms.mode = ctx->cipher.mode;
1347 	cipher_parms.type = ctx->cipher_type;
1348 	cipher_parms.key_buf = ctx->enckey;
1349 	cipher_parms.key_len = ctx->enckeylen;
1350 	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1351 	cipher_parms.iv_len = rctx->iv_ctr_len;
1352 
1353 	hash_parms.alg = ctx->auth.alg;
1354 	hash_parms.mode = ctx->auth.mode;
1355 	hash_parms.type = HASH_TYPE_NONE;
1356 	hash_parms.key_buf = (u8 *)ctx->authkey;
1357 	hash_parms.key_len = ctx->authkeylen;
1358 	hash_parms.digestsize = digestsize;
1359 
1360 	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1361 	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
1362 		hash_parms.key_len = SHA224_DIGEST_SIZE;
1363 
1364 	aead_parms.assoc_size = req->assoclen;
1365 	if (ctx->is_esp && !ctx->is_rfc4543) {
1366 		/*
1367 		 * 8-byte IV is included assoc data in request. SPU2
1368 		 * expects AAD to include just SPI and seqno. So
1369 		 * subtract off the IV len.
1370 		 */
1371 		aead_parms.assoc_size -= GCM_ESP_IV_SIZE;
1372 
1373 		if (rctx->is_encrypt) {
1374 			aead_parms.return_iv = true;
1375 			aead_parms.ret_iv_len = GCM_ESP_IV_SIZE;
1376 			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1377 		}
1378 	} else {
1379 		aead_parms.ret_iv_len = 0;
1380 	}
1381 
1382 	/*
1383 	 * Count number of sg entries from the crypto API request that are to
1384 	 * be included in this mailbox message. For dst sg, don't count space
1385 	 * for digest. Digest gets caught in a separate buffer and copied back
1386 	 * to dst sg when processing response.
1387 	 */
1388 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1389 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1390 	if (aead_parms.assoc_size)
1391 		assoc_nents = spu_sg_count(rctx->assoc, 0,
1392 					   aead_parms.assoc_size);
1393 
1394 	mssg = &rctx->mb_mssg;
1395 
1396 	rctx->total_sent = chunksize;
1397 	rctx->src_sent = chunksize;
1398 	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1399 				    aead_parms.assoc_size,
1400 				    aead_parms.ret_iv_len,
1401 				    rctx->is_encrypt))
1402 		rx_frag_num++;
1403 
1404 	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1405 						rctx->iv_ctr_len);
1406 
1407 	if (ctx->auth.alg == HASH_ALG_AES)
1408 		hash_parms.type = ctx->cipher_type;
1409 
1410 	/* General case AAD padding (CCM and RFC4543 special cases below) */
1411 	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1412 						 aead_parms.assoc_size);
1413 
1414 	/* General case data padding (CCM decrypt special case below) */
1415 	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1416 							   chunksize);
1417 
1418 	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1419 		/*
1420 		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
1421 		 * 128-bit aligned
1422 		 */
1423 		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1424 					 ctx->cipher.mode,
1425 					 aead_parms.assoc_size + 2);
1426 
1427 		/*
1428 		 * And when decrypting CCM, need to pad without including
1429 		 * size of ICV which is tacked on to end of chunk
1430 		 */
1431 		if (!rctx->is_encrypt)
1432 			aead_parms.data_pad_len =
1433 				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1434 							chunksize - digestsize);
1435 
1436 		/* CCM also requires software to rewrite portions of IV: */
1437 		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1438 				       chunksize, rctx->is_encrypt,
1439 				       ctx->is_esp);
1440 	}
1441 
1442 	if (ctx->is_rfc4543) {
1443 		/*
1444 		 * RFC4543: data is included in AAD, so don't pad after AAD
1445 		 * and pad data based on both AAD + data size
1446 		 */
1447 		aead_parms.aad_pad_len = 0;
1448 		if (!rctx->is_encrypt)
1449 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1450 					ctx->cipher.mode,
1451 					aead_parms.assoc_size + chunksize -
1452 					digestsize);
1453 		else
1454 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1455 					ctx->cipher.mode,
1456 					aead_parms.assoc_size + chunksize);
1457 
1458 		req_opts.is_rfc4543 = true;
1459 	}
1460 
1461 	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1462 		incl_icv = true;
1463 		tx_frag_num++;
1464 		/* Copy ICV from end of src scatterlist to digest buf */
1465 		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1466 				    req->assoclen + rctx->total_sent -
1467 				    digestsize);
1468 	}
1469 
1470 	atomic64_add(chunksize, &iproc_priv.bytes_out);
1471 
1472 	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1473 
1474 	/* Prepend SPU header with type 3 BCM header */
1475 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1476 
1477 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1478 					      BCM_HDR_LEN, &req_opts,
1479 					      &cipher_parms, &hash_parms,
1480 					      &aead_parms, chunksize);
1481 
1482 	/* Determine total length of padding. Put all padding in one buffer. */
1483 	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1484 				   chunksize, aead_parms.aad_pad_len,
1485 				   aead_parms.data_pad_len, 0);
1486 
1487 	stat_pad_len = spu->spu_wordalign_padlen(db_size);
1488 
1489 	if (stat_pad_len)
1490 		rx_frag_num++;
1491 	pad_len = aead_parms.data_pad_len + stat_pad_len;
1492 	if (pad_len) {
1493 		tx_frag_num++;
1494 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1495 				     aead_parms.data_pad_len, 0,
1496 				     ctx->auth.alg, ctx->auth.mode,
1497 				     rctx->total_sent, stat_pad_len);
1498 	}
1499 
1500 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1501 			      spu_hdr_len);
1502 	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1503 	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1504 	packet_log("BD:\n");
1505 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1506 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1507 
1508 	/*
1509 	 * Build mailbox message containing SPU request msg and rx buffers
1510 	 * to catch response message
1511 	 */
1512 	memset(mssg, 0, sizeof(*mssg));
1513 	mssg->type = BRCM_MESSAGE_SPU;
1514 	mssg->ctx = rctx;	/* Will be returned in response */
1515 
1516 	/* Create rx scatterlist to catch result */
1517 	rx_frag_num += rctx->dst_nents;
1518 	resp_len = chunksize;
1519 
1520 	/*
1521 	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1522 	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
1523 	 * sends entire digest back.
1524 	 */
1525 	rx_frag_num++;
1526 
1527 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1528 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1529 		/*
1530 		 * Input is ciphertxt plus ICV, but ICV not incl
1531 		 * in output.
1532 		 */
1533 		resp_len -= ctx->digestsize;
1534 		if (resp_len == 0)
1535 			/* no rx frags to catch output data */
1536 			rx_frag_num -= rctx->dst_nents;
1537 	}
1538 
1539 	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1540 				    aead_parms.assoc_size,
1541 				    aead_parms.ret_iv_len, resp_len, digestsize,
1542 				    stat_pad_len);
1543 	if (err)
1544 		return err;
1545 
1546 	/* Create tx scatterlist containing SPU request message */
1547 	tx_frag_num += rctx->src_nents;
1548 	tx_frag_num += assoc_nents;
1549 	if (aead_parms.aad_pad_len)
1550 		tx_frag_num++;
1551 	if (aead_parms.iv_len)
1552 		tx_frag_num++;
1553 	if (spu->spu_tx_status_len())
1554 		tx_frag_num++;
1555 	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1556 				    rctx->assoc, aead_parms.assoc_size,
1557 				    assoc_nents, aead_parms.iv_len, chunksize,
1558 				    aead_parms.aad_pad_len, pad_len, incl_icv);
1559 	if (err)
1560 		return err;
1561 
1562 	err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
1563 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
1564 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
1565 			/*
1566 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
1567 			 * not in atomic context and we can wait and try again.
1568 			 */
1569 			retry_cnt++;
1570 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
1571 			err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
1572 						mssg);
1573 			atomic_inc(&iproc_priv.mb_no_spc);
1574 		}
1575 	}
1576 	if (err < 0) {
1577 		atomic_inc(&iproc_priv.mb_send_fail);
1578 		return err;
1579 	}
1580 
1581 	return -EINPROGRESS;
1582 }
1583 
1584 /**
1585  * handle_aead_resp() - Process a SPU response message for an AEAD request.
1586  * @rctx:  Crypto request context
1587  */
1588 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1589 {
1590 	struct spu_hw *spu = &iproc_priv.spu;
1591 	struct crypto_async_request *areq = rctx->parent;
1592 	struct aead_request *req = container_of(areq,
1593 						struct aead_request, base);
1594 	struct iproc_ctx_s *ctx = rctx->ctx;
1595 	u32 payload_len;
1596 	unsigned int icv_offset;
1597 	u32 result_len;
1598 
1599 	/* See how much data was returned */
1600 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1601 	flow_log("payload_len %u\n", payload_len);
1602 
1603 	/* only count payload */
1604 	atomic64_add(payload_len, &iproc_priv.bytes_in);
1605 
1606 	if (req->assoclen)
1607 		packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
1608 			    req->assoclen);
1609 
1610 	/*
1611 	 * Copy the ICV back to the destination
1612 	 * buffer. In decrypt case, SPU gives us back the digest, but crypto
1613 	 * API doesn't expect ICV in dst buffer.
1614 	 */
1615 	result_len = req->cryptlen;
1616 	if (rctx->is_encrypt) {
1617 		icv_offset = req->assoclen + rctx->total_sent;
1618 		packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1619 		flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1620 		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1621 				      ctx->digestsize, icv_offset);
1622 		result_len += ctx->digestsize;
1623 	}
1624 
1625 	packet_log("response data:  ");
1626 	dump_sg(req->dst, req->assoclen, result_len);
1627 
1628 	atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1629 	if (ctx->cipher.alg == CIPHER_ALG_AES) {
1630 		if (ctx->cipher.mode == CIPHER_MODE_CCM)
1631 			atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1632 		else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1633 			atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1634 		else
1635 			atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1636 	} else {
1637 		atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1638 	}
1639 }
1640 
1641 /**
1642  * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1643  * @rctx:  request context
1644  *
1645  * Mailbox scatterlists are allocated for each chunk. So free them after
1646  * processing each chunk.
1647  */
1648 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1649 {
1650 	/* mailbox message used to tx request */
1651 	struct brcm_message *mssg = &rctx->mb_mssg;
1652 
1653 	kfree(mssg->spu.src);
1654 	kfree(mssg->spu.dst);
1655 	memset(mssg, 0, sizeof(struct brcm_message));
1656 }
1657 
1658 /**
1659  * finish_req() - Used to invoke the complete callback from the requester when
1660  * a request has been handled asynchronously.
1661  * @rctx:  Request context
1662  * @err:   Indicates whether the request was successful or not
1663  *
1664  * Ensures that cleanup has been done for request
1665  */
1666 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1667 {
1668 	struct crypto_async_request *areq = rctx->parent;
1669 
1670 	flow_log("%s() err:%d\n\n", __func__, err);
1671 
1672 	/* No harm done if already called */
1673 	spu_chunk_cleanup(rctx);
1674 
1675 	if (areq)
1676 		areq->complete(areq, err);
1677 }
1678 
1679 /**
1680  * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1681  * @cl:		mailbox client structure for SPU driver
1682  * @msg:	mailbox message containing SPU response
1683  */
1684 static void spu_rx_callback(struct mbox_client *cl, void *msg)
1685 {
1686 	struct spu_hw *spu = &iproc_priv.spu;
1687 	struct brcm_message *mssg = msg;
1688 	struct iproc_reqctx_s *rctx;
1689 	struct iproc_ctx_s *ctx;
1690 	struct crypto_async_request *areq;
1691 	int err = 0;
1692 
1693 	rctx = mssg->ctx;
1694 	if (unlikely(!rctx)) {
1695 		/* This is fatal */
1696 		pr_err("%s(): no request context", __func__);
1697 		err = -EFAULT;
1698 		goto cb_finish;
1699 	}
1700 	areq = rctx->parent;
1701 	ctx = rctx->ctx;
1702 
1703 	/* process the SPU status */
1704 	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1705 	if (err != 0) {
1706 		if (err == SPU_INVALID_ICV)
1707 			atomic_inc(&iproc_priv.bad_icv);
1708 		err = -EBADMSG;
1709 		goto cb_finish;
1710 	}
1711 
1712 	/* Process the SPU response message */
1713 	switch (rctx->ctx->alg->type) {
1714 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
1715 		handle_ablkcipher_resp(rctx);
1716 		break;
1717 	case CRYPTO_ALG_TYPE_AHASH:
1718 		handle_ahash_resp(rctx);
1719 		break;
1720 	case CRYPTO_ALG_TYPE_AEAD:
1721 		handle_aead_resp(rctx);
1722 		break;
1723 	default:
1724 		err = -EINVAL;
1725 		goto cb_finish;
1726 	}
1727 
1728 	/*
1729 	 * If this response does not complete the request, then send the next
1730 	 * request chunk.
1731 	 */
1732 	if (rctx->total_sent < rctx->total_todo) {
1733 		/* Deallocate anything specific to previous chunk */
1734 		spu_chunk_cleanup(rctx);
1735 
1736 		switch (rctx->ctx->alg->type) {
1737 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
1738 			err = handle_ablkcipher_req(rctx);
1739 			break;
1740 		case CRYPTO_ALG_TYPE_AHASH:
1741 			err = handle_ahash_req(rctx);
1742 			if (err == -EAGAIN)
1743 				/*
1744 				 * we saved data in hash carry, but tell crypto
1745 				 * API we successfully completed request.
1746 				 */
1747 				err = 0;
1748 			break;
1749 		case CRYPTO_ALG_TYPE_AEAD:
1750 			err = handle_aead_req(rctx);
1751 			break;
1752 		default:
1753 			err = -EINVAL;
1754 		}
1755 
1756 		if (err == -EINPROGRESS)
1757 			/* Successfully submitted request for next chunk */
1758 			return;
1759 	}
1760 
1761 cb_finish:
1762 	finish_req(rctx, err);
1763 }
1764 
1765 /* ==================== Kernel Cryptographic API ==================== */
1766 
1767 /**
1768  * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
1769  * @req:	Crypto API request
1770  * @encrypt:	true if encrypting; false if decrypting
1771  *
1772  * Return: -EINPROGRESS if request accepted and result will be returned
1773  *			asynchronously
1774  *	   < 0 if an error
1775  */
1776 static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1777 {
1778 	struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1779 	struct iproc_ctx_s *ctx =
1780 	    crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1781 	int err;
1782 
1783 	flow_log("%s() enc:%u\n", __func__, encrypt);
1784 
1785 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1786 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1787 	rctx->parent = &req->base;
1788 	rctx->is_encrypt = encrypt;
1789 	rctx->bd_suppress = false;
1790 	rctx->total_todo = req->nbytes;
1791 	rctx->src_sent = 0;
1792 	rctx->total_sent = 0;
1793 	rctx->total_received = 0;
1794 	rctx->ctx = ctx;
1795 
1796 	/* Initialize current position in src and dst scatterlists */
1797 	rctx->src_sg = req->src;
1798 	rctx->src_nents = 0;
1799 	rctx->src_skip = 0;
1800 	rctx->dst_sg = req->dst;
1801 	rctx->dst_nents = 0;
1802 	rctx->dst_skip = 0;
1803 
1804 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1805 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
1806 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
1807 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
1808 	    ctx->cipher.mode == CIPHER_MODE_GCM ||
1809 	    ctx->cipher.mode == CIPHER_MODE_CCM) {
1810 		rctx->iv_ctr_len =
1811 		    crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1812 		memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1813 	} else {
1814 		rctx->iv_ctr_len = 0;
1815 	}
1816 
1817 	/* Choose a SPU to process this request */
1818 	rctx->chan_idx = select_channel();
1819 	err = handle_ablkcipher_req(rctx);
1820 	if (err != -EINPROGRESS)
1821 		/* synchronous result */
1822 		spu_chunk_cleanup(rctx);
1823 
1824 	return err;
1825 }
1826 
1827 static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1828 		      unsigned int keylen)
1829 {
1830 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1831 	u32 tmp[DES_EXPKEY_WORDS];
1832 
1833 	if (keylen == DES_KEY_SIZE) {
1834 		if (des_ekey(tmp, key) == 0) {
1835 			if (crypto_ablkcipher_get_flags(cipher) &
1836 			    CRYPTO_TFM_REQ_WEAK_KEY) {
1837 				u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1838 
1839 				crypto_ablkcipher_set_flags(cipher, flags);
1840 				return -EINVAL;
1841 			}
1842 		}
1843 
1844 		ctx->cipher_type = CIPHER_TYPE_DES;
1845 	} else {
1846 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1847 		return -EINVAL;
1848 	}
1849 	return 0;
1850 }
1851 
1852 static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1853 			   unsigned int keylen)
1854 {
1855 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1856 
1857 	if (keylen == (DES_KEY_SIZE * 3)) {
1858 		const u32 *K = (const u32 *)key;
1859 		u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
1860 
1861 		if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1862 		    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
1863 			crypto_ablkcipher_set_flags(cipher, flags);
1864 			return -EINVAL;
1865 		}
1866 
1867 		ctx->cipher_type = CIPHER_TYPE_3DES;
1868 	} else {
1869 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1870 		return -EINVAL;
1871 	}
1872 	return 0;
1873 }
1874 
1875 static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1876 		      unsigned int keylen)
1877 {
1878 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1879 
1880 	if (ctx->cipher.mode == CIPHER_MODE_XTS)
1881 		/* XTS includes two keys of equal length */
1882 		keylen = keylen / 2;
1883 
1884 	switch (keylen) {
1885 	case AES_KEYSIZE_128:
1886 		ctx->cipher_type = CIPHER_TYPE_AES128;
1887 		break;
1888 	case AES_KEYSIZE_192:
1889 		ctx->cipher_type = CIPHER_TYPE_AES192;
1890 		break;
1891 	case AES_KEYSIZE_256:
1892 		ctx->cipher_type = CIPHER_TYPE_AES256;
1893 		break;
1894 	default:
1895 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1896 		return -EINVAL;
1897 	}
1898 	WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1899 		((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1900 	return 0;
1901 }
1902 
1903 static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1904 		      unsigned int keylen)
1905 {
1906 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1907 	int i;
1908 
1909 	ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1910 
1911 	ctx->enckey[0] = 0x00;	/* 0x00 */
1912 	ctx->enckey[1] = 0x00;	/* i    */
1913 	ctx->enckey[2] = 0x00;	/* 0x00 */
1914 	ctx->enckey[3] = 0x00;	/* j    */
1915 	for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1916 		ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1917 
1918 	ctx->cipher_type = CIPHER_TYPE_INIT;
1919 
1920 	return 0;
1921 }
1922 
1923 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1924 			     unsigned int keylen)
1925 {
1926 	struct spu_hw *spu = &iproc_priv.spu;
1927 	struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1928 	struct spu_cipher_parms cipher_parms;
1929 	u32 alloc_len = 0;
1930 	int err;
1931 
1932 	flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1933 	flow_dump("  key: ", key, keylen);
1934 
1935 	switch (ctx->cipher.alg) {
1936 	case CIPHER_ALG_DES:
1937 		err = des_setkey(cipher, key, keylen);
1938 		break;
1939 	case CIPHER_ALG_3DES:
1940 		err = threedes_setkey(cipher, key, keylen);
1941 		break;
1942 	case CIPHER_ALG_AES:
1943 		err = aes_setkey(cipher, key, keylen);
1944 		break;
1945 	case CIPHER_ALG_RC4:
1946 		err = rc4_setkey(cipher, key, keylen);
1947 		break;
1948 	default:
1949 		pr_err("%s() Error: unknown cipher alg\n", __func__);
1950 		err = -EINVAL;
1951 	}
1952 	if (err)
1953 		return err;
1954 
1955 	/* RC4 already populated ctx->enkey */
1956 	if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1957 		memcpy(ctx->enckey, key, keylen);
1958 		ctx->enckeylen = keylen;
1959 	}
1960 	/* SPU needs XTS keys in the reverse order the crypto API presents */
1961 	if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1962 	    (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1963 		unsigned int xts_keylen = keylen / 2;
1964 
1965 		memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1966 		memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1967 	}
1968 
1969 	if (spu->spu_type == SPU_TYPE_SPUM)
1970 		alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1971 	else if (spu->spu_type == SPU_TYPE_SPU2)
1972 		alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1973 	memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1974 	cipher_parms.iv_buf = NULL;
1975 	cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1976 	flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1977 
1978 	cipher_parms.alg = ctx->cipher.alg;
1979 	cipher_parms.mode = ctx->cipher.mode;
1980 	cipher_parms.type = ctx->cipher_type;
1981 	cipher_parms.key_buf = ctx->enckey;
1982 	cipher_parms.key_len = ctx->enckeylen;
1983 
1984 	/* Prepend SPU request message with BCM header */
1985 	memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1986 	ctx->spu_req_hdr_len =
1987 	    spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1988 				     &cipher_parms);
1989 
1990 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1991 							  ctx->enckeylen,
1992 							  false);
1993 
1994 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1995 
1996 	return 0;
1997 }
1998 
1999 static int ablkcipher_encrypt(struct ablkcipher_request *req)
2000 {
2001 	flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
2002 
2003 	return ablkcipher_enqueue(req, true);
2004 }
2005 
2006 static int ablkcipher_decrypt(struct ablkcipher_request *req)
2007 {
2008 	flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
2009 	return ablkcipher_enqueue(req, false);
2010 }
2011 
2012 static int ahash_enqueue(struct ahash_request *req)
2013 {
2014 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2015 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2016 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2017 	int err = 0;
2018 	const char *alg_name;
2019 
2020 	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
2021 
2022 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2023 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2024 	rctx->parent = &req->base;
2025 	rctx->ctx = ctx;
2026 	rctx->bd_suppress = true;
2027 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2028 
2029 	/* Initialize position in src scatterlist */
2030 	rctx->src_sg = req->src;
2031 	rctx->src_skip = 0;
2032 	rctx->src_nents = 0;
2033 	rctx->dst_sg = NULL;
2034 	rctx->dst_skip = 0;
2035 	rctx->dst_nents = 0;
2036 
2037 	/* SPU2 hardware does not compute hash of zero length data */
2038 	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
2039 	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
2040 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2041 		flow_log("Doing %sfinal %s zero-len hash request in software\n",
2042 			 rctx->is_final ? "" : "non-", alg_name);
2043 		err = do_shash((unsigned char *)alg_name, req->result,
2044 			       NULL, 0, NULL, 0, ctx->authkey,
2045 			       ctx->authkeylen);
2046 		if (err < 0)
2047 			flow_log("Hash request failed with error %d\n", err);
2048 		return err;
2049 	}
2050 	/* Choose a SPU to process this request */
2051 	rctx->chan_idx = select_channel();
2052 
2053 	err = handle_ahash_req(rctx);
2054 	if (err != -EINPROGRESS)
2055 		/* synchronous result */
2056 		spu_chunk_cleanup(rctx);
2057 
2058 	if (err == -EAGAIN)
2059 		/*
2060 		 * we saved data in hash carry, but tell crypto API
2061 		 * we successfully completed request.
2062 		 */
2063 		err = 0;
2064 
2065 	return err;
2066 }
2067 
2068 static int __ahash_init(struct ahash_request *req)
2069 {
2070 	struct spu_hw *spu = &iproc_priv.spu;
2071 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2072 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2073 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2074 
2075 	flow_log("%s()\n", __func__);
2076 
2077 	/* Initialize the context */
2078 	rctx->hash_carry_len = 0;
2079 	rctx->is_final = 0;
2080 
2081 	rctx->total_todo = 0;
2082 	rctx->src_sent = 0;
2083 	rctx->total_sent = 0;
2084 	rctx->total_received = 0;
2085 
2086 	ctx->digestsize = crypto_ahash_digestsize(tfm);
2087 	/* If we add a hash whose digest is larger, catch it here. */
2088 	WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2089 
2090 	rctx->is_sw_hmac = false;
2091 
2092 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2093 							  true);
2094 
2095 	return 0;
2096 }
2097 
2098 /**
2099  * spu_no_incr_hash() - Determine whether incremental hashing is supported.
2100  * @ctx:  Crypto session context
2101  *
2102  * SPU-2 does not support incremental hashing (we'll have to revisit and
2103  * condition based on chip revision or device tree entry if future versions do
2104  * support incremental hash)
2105  *
2106  * SPU-M also doesn't support incremental hashing of AES-XCBC
2107  *
2108  * Return: true if incremental hashing is not supported
2109  *         false otherwise
2110  */
2111 bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2112 {
2113 	struct spu_hw *spu = &iproc_priv.spu;
2114 
2115 	if (spu->spu_type == SPU_TYPE_SPU2)
2116 		return true;
2117 
2118 	if ((ctx->auth.alg == HASH_ALG_AES) &&
2119 	    (ctx->auth.mode == HASH_MODE_XCBC))
2120 		return true;
2121 
2122 	/* Otherwise, incremental hashing is supported */
2123 	return false;
2124 }
2125 
2126 static int ahash_init(struct ahash_request *req)
2127 {
2128 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2129 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2130 	const char *alg_name;
2131 	struct crypto_shash *hash;
2132 	int ret;
2133 	gfp_t gfp;
2134 
2135 	if (spu_no_incr_hash(ctx)) {
2136 		/*
2137 		 * If we get an incremental hashing request and it's not
2138 		 * supported by the hardware, we need to handle it in software
2139 		 * by calling synchronous hash functions.
2140 		 */
2141 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2142 		hash = crypto_alloc_shash(alg_name, 0, 0);
2143 		if (IS_ERR(hash)) {
2144 			ret = PTR_ERR(hash);
2145 			goto err;
2146 		}
2147 
2148 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2149 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2150 		ctx->shash = kmalloc(sizeof(*ctx->shash) +
2151 				     crypto_shash_descsize(hash), gfp);
2152 		if (!ctx->shash) {
2153 			ret = -ENOMEM;
2154 			goto err_hash;
2155 		}
2156 		ctx->shash->tfm = hash;
2157 		ctx->shash->flags = 0;
2158 
2159 		/* Set the key using data we already have from setkey */
2160 		if (ctx->authkeylen > 0) {
2161 			ret = crypto_shash_setkey(hash, ctx->authkey,
2162 						  ctx->authkeylen);
2163 			if (ret)
2164 				goto err_shash;
2165 		}
2166 
2167 		/* Initialize hash w/ this key and other params */
2168 		ret = crypto_shash_init(ctx->shash);
2169 		if (ret)
2170 			goto err_shash;
2171 	} else {
2172 		/* Otherwise call the internal function which uses SPU hw */
2173 		ret = __ahash_init(req);
2174 	}
2175 
2176 	return ret;
2177 
2178 err_shash:
2179 	kfree(ctx->shash);
2180 err_hash:
2181 	crypto_free_shash(hash);
2182 err:
2183 	return ret;
2184 }
2185 
2186 static int __ahash_update(struct ahash_request *req)
2187 {
2188 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2189 
2190 	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2191 
2192 	if (!req->nbytes)
2193 		return 0;
2194 	rctx->total_todo += req->nbytes;
2195 	rctx->src_sent = 0;
2196 
2197 	return ahash_enqueue(req);
2198 }
2199 
2200 static int ahash_update(struct ahash_request *req)
2201 {
2202 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2203 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2204 	u8 *tmpbuf;
2205 	int ret;
2206 	int nents;
2207 	gfp_t gfp;
2208 
2209 	if (spu_no_incr_hash(ctx)) {
2210 		/*
2211 		 * If we get an incremental hashing request and it's not
2212 		 * supported by the hardware, we need to handle it in software
2213 		 * by calling synchronous hash functions.
2214 		 */
2215 		if (req->src)
2216 			nents = sg_nents(req->src);
2217 		else
2218 			return -EINVAL;
2219 
2220 		/* Copy data from req scatterlist to tmp buffer */
2221 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2222 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2223 		tmpbuf = kmalloc(req->nbytes, gfp);
2224 		if (!tmpbuf)
2225 			return -ENOMEM;
2226 
2227 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2228 				req->nbytes) {
2229 			kfree(tmpbuf);
2230 			return -EINVAL;
2231 		}
2232 
2233 		/* Call synchronous update */
2234 		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2235 		kfree(tmpbuf);
2236 	} else {
2237 		/* Otherwise call the internal function which uses SPU hw */
2238 		ret = __ahash_update(req);
2239 	}
2240 
2241 	return ret;
2242 }
2243 
2244 static int __ahash_final(struct ahash_request *req)
2245 {
2246 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2247 
2248 	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2249 
2250 	rctx->is_final = 1;
2251 
2252 	return ahash_enqueue(req);
2253 }
2254 
2255 static int ahash_final(struct ahash_request *req)
2256 {
2257 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2258 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2259 	int ret;
2260 
2261 	if (spu_no_incr_hash(ctx)) {
2262 		/*
2263 		 * If we get an incremental hashing request and it's not
2264 		 * supported by the hardware, we need to handle it in software
2265 		 * by calling synchronous hash functions.
2266 		 */
2267 		ret = crypto_shash_final(ctx->shash, req->result);
2268 
2269 		/* Done with hash, can deallocate it now */
2270 		crypto_free_shash(ctx->shash->tfm);
2271 		kfree(ctx->shash);
2272 
2273 	} else {
2274 		/* Otherwise call the internal function which uses SPU hw */
2275 		ret = __ahash_final(req);
2276 	}
2277 
2278 	return ret;
2279 }
2280 
2281 static int __ahash_finup(struct ahash_request *req)
2282 {
2283 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2284 
2285 	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2286 
2287 	rctx->total_todo += req->nbytes;
2288 	rctx->src_sent = 0;
2289 	rctx->is_final = 1;
2290 
2291 	return ahash_enqueue(req);
2292 }
2293 
2294 static int ahash_finup(struct ahash_request *req)
2295 {
2296 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2297 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2298 	u8 *tmpbuf;
2299 	int ret;
2300 	int nents;
2301 	gfp_t gfp;
2302 
2303 	if (spu_no_incr_hash(ctx)) {
2304 		/*
2305 		 * If we get an incremental hashing request and it's not
2306 		 * supported by the hardware, we need to handle it in software
2307 		 * by calling synchronous hash functions.
2308 		 */
2309 		if (req->src) {
2310 			nents = sg_nents(req->src);
2311 		} else {
2312 			ret = -EINVAL;
2313 			goto ahash_finup_exit;
2314 		}
2315 
2316 		/* Copy data from req scatterlist to tmp buffer */
2317 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2318 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2319 		tmpbuf = kmalloc(req->nbytes, gfp);
2320 		if (!tmpbuf) {
2321 			ret = -ENOMEM;
2322 			goto ahash_finup_exit;
2323 		}
2324 
2325 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2326 				req->nbytes) {
2327 			ret = -EINVAL;
2328 			goto ahash_finup_free;
2329 		}
2330 
2331 		/* Call synchronous update */
2332 		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2333 					 req->result);
2334 	} else {
2335 		/* Otherwise call the internal function which uses SPU hw */
2336 		return __ahash_finup(req);
2337 	}
2338 ahash_finup_free:
2339 	kfree(tmpbuf);
2340 
2341 ahash_finup_exit:
2342 	/* Done with hash, can deallocate it now */
2343 	crypto_free_shash(ctx->shash->tfm);
2344 	kfree(ctx->shash);
2345 	return ret;
2346 }
2347 
2348 static int ahash_digest(struct ahash_request *req)
2349 {
2350 	int err = 0;
2351 
2352 	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2353 
2354 	/* whole thing at once */
2355 	err = __ahash_init(req);
2356 	if (!err)
2357 		err = __ahash_finup(req);
2358 
2359 	return err;
2360 }
2361 
2362 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2363 			unsigned int keylen)
2364 {
2365 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2366 
2367 	flow_log("%s() ahash:%p key:%p keylen:%u\n",
2368 		 __func__, ahash, key, keylen);
2369 	flow_dump("  key: ", key, keylen);
2370 
2371 	if (ctx->auth.alg == HASH_ALG_AES) {
2372 		switch (keylen) {
2373 		case AES_KEYSIZE_128:
2374 			ctx->cipher_type = CIPHER_TYPE_AES128;
2375 			break;
2376 		case AES_KEYSIZE_192:
2377 			ctx->cipher_type = CIPHER_TYPE_AES192;
2378 			break;
2379 		case AES_KEYSIZE_256:
2380 			ctx->cipher_type = CIPHER_TYPE_AES256;
2381 			break;
2382 		default:
2383 			pr_err("%s() Error: Invalid key length\n", __func__);
2384 			return -EINVAL;
2385 		}
2386 	} else {
2387 		pr_err("%s() Error: unknown hash alg\n", __func__);
2388 		return -EINVAL;
2389 	}
2390 	memcpy(ctx->authkey, key, keylen);
2391 	ctx->authkeylen = keylen;
2392 
2393 	return 0;
2394 }
2395 
2396 static int ahash_export(struct ahash_request *req, void *out)
2397 {
2398 	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2399 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2400 
2401 	spu_exp->total_todo = rctx->total_todo;
2402 	spu_exp->total_sent = rctx->total_sent;
2403 	spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2404 	memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2405 	spu_exp->hash_carry_len = rctx->hash_carry_len;
2406 	memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2407 
2408 	return 0;
2409 }
2410 
2411 static int ahash_import(struct ahash_request *req, const void *in)
2412 {
2413 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2414 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2415 
2416 	rctx->total_todo = spu_exp->total_todo;
2417 	rctx->total_sent = spu_exp->total_sent;
2418 	rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2419 	memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2420 	rctx->hash_carry_len = spu_exp->hash_carry_len;
2421 	memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2422 
2423 	return 0;
2424 }
2425 
2426 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2427 			     unsigned int keylen)
2428 {
2429 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2430 	unsigned int blocksize =
2431 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2432 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
2433 	unsigned int index;
2434 	int rc;
2435 
2436 	flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2437 		 __func__, ahash, key, keylen, blocksize, digestsize);
2438 	flow_dump("  key: ", key, keylen);
2439 
2440 	if (keylen > blocksize) {
2441 		switch (ctx->auth.alg) {
2442 		case HASH_ALG_MD5:
2443 			rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2444 				      0, NULL, 0);
2445 			break;
2446 		case HASH_ALG_SHA1:
2447 			rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2448 				      0, NULL, 0);
2449 			break;
2450 		case HASH_ALG_SHA224:
2451 			rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2452 				      0, NULL, 0);
2453 			break;
2454 		case HASH_ALG_SHA256:
2455 			rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2456 				      0, NULL, 0);
2457 			break;
2458 		case HASH_ALG_SHA384:
2459 			rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2460 				      0, NULL, 0);
2461 			break;
2462 		case HASH_ALG_SHA512:
2463 			rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2464 				      0, NULL, 0);
2465 			break;
2466 		case HASH_ALG_SHA3_224:
2467 			rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2468 				      NULL, 0, NULL, 0);
2469 			break;
2470 		case HASH_ALG_SHA3_256:
2471 			rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2472 				      NULL, 0, NULL, 0);
2473 			break;
2474 		case HASH_ALG_SHA3_384:
2475 			rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2476 				      NULL, 0, NULL, 0);
2477 			break;
2478 		case HASH_ALG_SHA3_512:
2479 			rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2480 				      NULL, 0, NULL, 0);
2481 			break;
2482 		default:
2483 			pr_err("%s() Error: unknown hash alg\n", __func__);
2484 			return -EINVAL;
2485 		}
2486 		if (rc < 0) {
2487 			pr_err("%s() Error %d computing shash for %s\n",
2488 			       __func__, rc, hash_alg_name[ctx->auth.alg]);
2489 			return rc;
2490 		}
2491 		ctx->authkeylen = digestsize;
2492 
2493 		flow_log("  keylen > digestsize... hashed\n");
2494 		flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
2495 	} else {
2496 		memcpy(ctx->authkey, key, keylen);
2497 		ctx->authkeylen = keylen;
2498 	}
2499 
2500 	/*
2501 	 * Full HMAC operation in SPUM is not verified,
2502 	 * So keeping the generation of IPAD, OPAD and
2503 	 * outer hashing in software.
2504 	 */
2505 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2506 		memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2507 		memset(ctx->ipad + ctx->authkeylen, 0,
2508 		       blocksize - ctx->authkeylen);
2509 		ctx->authkeylen = 0;
2510 		memcpy(ctx->opad, ctx->ipad, blocksize);
2511 
2512 		for (index = 0; index < blocksize; index++) {
2513 			ctx->ipad[index] ^= 0x36;
2514 			ctx->opad[index] ^= 0x5c;
2515 		}
2516 
2517 		flow_dump("  ipad: ", ctx->ipad, blocksize);
2518 		flow_dump("  opad: ", ctx->opad, blocksize);
2519 	}
2520 	ctx->digestsize = digestsize;
2521 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2522 
2523 	return 0;
2524 }
2525 
2526 static int ahash_hmac_init(struct ahash_request *req)
2527 {
2528 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2529 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2530 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2531 	unsigned int blocksize =
2532 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2533 
2534 	flow_log("ahash_hmac_init()\n");
2535 
2536 	/* init the context as a hash */
2537 	ahash_init(req);
2538 
2539 	if (!spu_no_incr_hash(ctx)) {
2540 		/* SPU-M can do incr hashing but needs sw for outer HMAC */
2541 		rctx->is_sw_hmac = true;
2542 		ctx->auth.mode = HASH_MODE_HASH;
2543 		/* start with a prepended ipad */
2544 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2545 		rctx->hash_carry_len = blocksize;
2546 		rctx->total_todo += blocksize;
2547 	}
2548 
2549 	return 0;
2550 }
2551 
2552 static int ahash_hmac_update(struct ahash_request *req)
2553 {
2554 	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2555 
2556 	if (!req->nbytes)
2557 		return 0;
2558 
2559 	return ahash_update(req);
2560 }
2561 
2562 static int ahash_hmac_final(struct ahash_request *req)
2563 {
2564 	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2565 
2566 	return ahash_final(req);
2567 }
2568 
2569 static int ahash_hmac_finup(struct ahash_request *req)
2570 {
2571 	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2572 
2573 	return ahash_finup(req);
2574 }
2575 
2576 static int ahash_hmac_digest(struct ahash_request *req)
2577 {
2578 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2579 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2580 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2581 	unsigned int blocksize =
2582 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2583 
2584 	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2585 
2586 	/* Perform initialization and then call finup */
2587 	__ahash_init(req);
2588 
2589 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2590 		/*
2591 		 * SPU2 supports full HMAC implementation in the
2592 		 * hardware, need not to generate IPAD, OPAD and
2593 		 * outer hash in software.
2594 		 * Only for hash key len > hash block size, SPU2
2595 		 * expects to perform hashing on the key, shorten
2596 		 * it to digest size and feed it as hash key.
2597 		 */
2598 		rctx->is_sw_hmac = false;
2599 		ctx->auth.mode = HASH_MODE_HMAC;
2600 	} else {
2601 		rctx->is_sw_hmac = true;
2602 		ctx->auth.mode = HASH_MODE_HASH;
2603 		/* start with a prepended ipad */
2604 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2605 		rctx->hash_carry_len = blocksize;
2606 		rctx->total_todo += blocksize;
2607 	}
2608 
2609 	return __ahash_finup(req);
2610 }
2611 
2612 /* aead helpers */
2613 
2614 static int aead_need_fallback(struct aead_request *req)
2615 {
2616 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2617 	struct spu_hw *spu = &iproc_priv.spu;
2618 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2619 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2620 	u32 payload_len;
2621 
2622 	/*
2623 	 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2624 	 * and AAD are both 0 bytes long. So use fallback in this case.
2625 	 */
2626 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2627 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2628 	    (req->assoclen == 0)) {
2629 		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2630 		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2631 			flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2632 			return 1;
2633 		}
2634 	}
2635 
2636 	/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2637 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2638 	    (spu->spu_type == SPU_TYPE_SPUM) &&
2639 	    (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2640 	    (ctx->digestsize != 16)) {
2641 		flow_log("%s() AES CCM needs fallbck for digest size %d\n",
2642 			 __func__, ctx->digestsize);
2643 		return 1;
2644 	}
2645 
2646 	/*
2647 	 * SPU-M on NSP has an issue where AES-CCM hash is not correct
2648 	 * when AAD size is 0
2649 	 */
2650 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2651 	    (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2652 	    (req->assoclen == 0)) {
2653 		flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2654 			 __func__);
2655 		return 1;
2656 	}
2657 
2658 	payload_len = req->cryptlen;
2659 	if (spu->spu_type == SPU_TYPE_SPUM)
2660 		payload_len += req->assoclen;
2661 
2662 	flow_log("%s() payload len: %u\n", __func__, payload_len);
2663 
2664 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2665 		return 0;
2666 	else
2667 		return payload_len > ctx->max_payload;
2668 }
2669 
2670 static void aead_complete(struct crypto_async_request *areq, int err)
2671 {
2672 	struct aead_request *req =
2673 	    container_of(areq, struct aead_request, base);
2674 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2675 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2676 
2677 	flow_log("%s() err:%d\n", __func__, err);
2678 
2679 	areq->tfm = crypto_aead_tfm(aead);
2680 
2681 	areq->complete = rctx->old_complete;
2682 	areq->data = rctx->old_data;
2683 
2684 	areq->complete(areq, err);
2685 }
2686 
2687 static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2688 {
2689 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2690 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2691 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2692 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2693 	int err;
2694 	u32 req_flags;
2695 
2696 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2697 
2698 	if (ctx->fallback_cipher) {
2699 		/* Store the cipher tfm and then use the fallback tfm */
2700 		rctx->old_tfm = tfm;
2701 		aead_request_set_tfm(req, ctx->fallback_cipher);
2702 		/*
2703 		 * Save the callback and chain ourselves in, so we can restore
2704 		 * the tfm
2705 		 */
2706 		rctx->old_complete = req->base.complete;
2707 		rctx->old_data = req->base.data;
2708 		req_flags = aead_request_flags(req);
2709 		aead_request_set_callback(req, req_flags, aead_complete, req);
2710 		err = is_encrypt ? crypto_aead_encrypt(req) :
2711 		    crypto_aead_decrypt(req);
2712 
2713 		if (err == 0) {
2714 			/*
2715 			 * fallback was synchronous (did not return
2716 			 * -EINPROGRESS). So restore request state here.
2717 			 */
2718 			aead_request_set_callback(req, req_flags,
2719 						  rctx->old_complete, req);
2720 			req->base.data = rctx->old_data;
2721 			aead_request_set_tfm(req, aead);
2722 			flow_log("%s() fallback completed successfully\n\n",
2723 				 __func__);
2724 		}
2725 	} else {
2726 		err = -EINVAL;
2727 	}
2728 
2729 	return err;
2730 }
2731 
2732 static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2733 {
2734 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2735 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2736 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2737 	int err;
2738 
2739 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2740 
2741 	if (req->assoclen > MAX_ASSOC_SIZE) {
2742 		pr_err
2743 		    ("%s() Error: associated data too long. (%u > %u bytes)\n",
2744 		     __func__, req->assoclen, MAX_ASSOC_SIZE);
2745 		return -EINVAL;
2746 	}
2747 
2748 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2749 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2750 	rctx->parent = &req->base;
2751 	rctx->is_encrypt = is_encrypt;
2752 	rctx->bd_suppress = false;
2753 	rctx->total_todo = req->cryptlen;
2754 	rctx->src_sent = 0;
2755 	rctx->total_sent = 0;
2756 	rctx->total_received = 0;
2757 	rctx->is_sw_hmac = false;
2758 	rctx->ctx = ctx;
2759 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2760 
2761 	/* assoc data is at start of src sg */
2762 	rctx->assoc = req->src;
2763 
2764 	/*
2765 	 * Init current position in src scatterlist to be after assoc data.
2766 	 * src_skip set to buffer offset where data begins. (Assoc data could
2767 	 * end in the middle of a buffer.)
2768 	 */
2769 	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2770 			     &rctx->src_skip) < 0) {
2771 		pr_err("%s() Error: Unable to find start of src data\n",
2772 		       __func__);
2773 		return -EINVAL;
2774 	}
2775 
2776 	rctx->src_nents = 0;
2777 	rctx->dst_nents = 0;
2778 	if (req->dst == req->src) {
2779 		rctx->dst_sg = rctx->src_sg;
2780 		rctx->dst_skip = rctx->src_skip;
2781 	} else {
2782 		/*
2783 		 * Expect req->dst to have room for assoc data followed by
2784 		 * output data and ICV, if encrypt. So initialize dst_sg
2785 		 * to point beyond assoc len offset.
2786 		 */
2787 		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2788 				     &rctx->dst_skip) < 0) {
2789 			pr_err("%s() Error: Unable to find start of dst data\n",
2790 			       __func__);
2791 			return -EINVAL;
2792 		}
2793 	}
2794 
2795 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2796 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
2797 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
2798 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
2799 	    ctx->cipher.mode == CIPHER_MODE_GCM) {
2800 		rctx->iv_ctr_len =
2801 			ctx->salt_len +
2802 			crypto_aead_ivsize(crypto_aead_reqtfm(req));
2803 	} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2804 		rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2805 	} else {
2806 		rctx->iv_ctr_len = 0;
2807 	}
2808 
2809 	rctx->hash_carry_len = 0;
2810 
2811 	flow_log("  src sg: %p\n", req->src);
2812 	flow_log("  rctx->src_sg: %p, src_skip %u\n",
2813 		 rctx->src_sg, rctx->src_skip);
2814 	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
2815 	flow_log("  dst sg: %p\n", req->dst);
2816 	flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
2817 		 rctx->dst_sg, rctx->dst_skip);
2818 	flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
2819 	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
2820 	flow_log("  authkeylen:%u\n", ctx->authkeylen);
2821 	flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2822 
2823 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2824 		flow_log("  max_payload infinite");
2825 	else
2826 		flow_log("  max_payload: %u\n", ctx->max_payload);
2827 
2828 	if (unlikely(aead_need_fallback(req)))
2829 		return aead_do_fallback(req, is_encrypt);
2830 
2831 	/*
2832 	 * Do memory allocations for request after fallback check, because if we
2833 	 * do fallback, we won't call finish_req() to dealloc.
2834 	 */
2835 	if (rctx->iv_ctr_len) {
2836 		if (ctx->salt_len)
2837 			memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2838 			       ctx->salt, ctx->salt_len);
2839 		memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2840 		       req->iv,
2841 		       rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2842 	}
2843 
2844 	rctx->chan_idx = select_channel();
2845 	err = handle_aead_req(rctx);
2846 	if (err != -EINPROGRESS)
2847 		/* synchronous result */
2848 		spu_chunk_cleanup(rctx);
2849 
2850 	return err;
2851 }
2852 
2853 static int aead_authenc_setkey(struct crypto_aead *cipher,
2854 			       const u8 *key, unsigned int keylen)
2855 {
2856 	struct spu_hw *spu = &iproc_priv.spu;
2857 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2858 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2859 	struct rtattr *rta = (void *)key;
2860 	struct crypto_authenc_key_param *param;
2861 	const u8 *origkey = key;
2862 	const unsigned int origkeylen = keylen;
2863 
2864 	int ret = 0;
2865 
2866 	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2867 		 keylen);
2868 	flow_dump("  key: ", key, keylen);
2869 
2870 	if (!RTA_OK(rta, keylen))
2871 		goto badkey;
2872 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
2873 		goto badkey;
2874 	if (RTA_PAYLOAD(rta) < sizeof(*param))
2875 		goto badkey;
2876 
2877 	param = RTA_DATA(rta);
2878 	ctx->enckeylen = be32_to_cpu(param->enckeylen);
2879 
2880 	key += RTA_ALIGN(rta->rta_len);
2881 	keylen -= RTA_ALIGN(rta->rta_len);
2882 
2883 	if (keylen < ctx->enckeylen)
2884 		goto badkey;
2885 	if (ctx->enckeylen > MAX_KEY_SIZE)
2886 		goto badkey;
2887 
2888 	ctx->authkeylen = keylen - ctx->enckeylen;
2889 
2890 	if (ctx->authkeylen > MAX_KEY_SIZE)
2891 		goto badkey;
2892 
2893 	memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
2894 	/* May end up padding auth key. So make sure it's zeroed. */
2895 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
2896 	memcpy(ctx->authkey, key, ctx->authkeylen);
2897 
2898 	switch (ctx->alg->cipher_info.alg) {
2899 	case CIPHER_ALG_DES:
2900 		if (ctx->enckeylen == DES_KEY_SIZE) {
2901 			u32 tmp[DES_EXPKEY_WORDS];
2902 			u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2903 
2904 			if (des_ekey(tmp, key) == 0) {
2905 				if (crypto_aead_get_flags(cipher) &
2906 				    CRYPTO_TFM_REQ_WEAK_KEY) {
2907 					crypto_aead_set_flags(cipher, flags);
2908 					return -EINVAL;
2909 				}
2910 			}
2911 
2912 			ctx->cipher_type = CIPHER_TYPE_DES;
2913 		} else {
2914 			goto badkey;
2915 		}
2916 		break;
2917 	case CIPHER_ALG_3DES:
2918 		if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2919 			const u32 *K = (const u32 *)key;
2920 			u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2921 
2922 			if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
2923 			    !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
2924 				crypto_aead_set_flags(cipher, flags);
2925 				return -EINVAL;
2926 			}
2927 
2928 			ctx->cipher_type = CIPHER_TYPE_3DES;
2929 		} else {
2930 			crypto_aead_set_flags(cipher,
2931 					      CRYPTO_TFM_RES_BAD_KEY_LEN);
2932 			return -EINVAL;
2933 		}
2934 		break;
2935 	case CIPHER_ALG_AES:
2936 		switch (ctx->enckeylen) {
2937 		case AES_KEYSIZE_128:
2938 			ctx->cipher_type = CIPHER_TYPE_AES128;
2939 			break;
2940 		case AES_KEYSIZE_192:
2941 			ctx->cipher_type = CIPHER_TYPE_AES192;
2942 			break;
2943 		case AES_KEYSIZE_256:
2944 			ctx->cipher_type = CIPHER_TYPE_AES256;
2945 			break;
2946 		default:
2947 			goto badkey;
2948 		}
2949 		break;
2950 	case CIPHER_ALG_RC4:
2951 		ctx->cipher_type = CIPHER_TYPE_INIT;
2952 		break;
2953 	default:
2954 		pr_err("%s() Error: Unknown cipher alg\n", __func__);
2955 		return -EINVAL;
2956 	}
2957 
2958 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2959 		 ctx->authkeylen);
2960 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2961 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2962 
2963 	/* setkey the fallback just in case we needto use it */
2964 	if (ctx->fallback_cipher) {
2965 		flow_log("  running fallback setkey()\n");
2966 
2967 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2968 		ctx->fallback_cipher->base.crt_flags |=
2969 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2970 		ret =
2971 		    crypto_aead_setkey(ctx->fallback_cipher, origkey,
2972 				       origkeylen);
2973 		if (ret) {
2974 			flow_log("  fallback setkey() returned:%d\n", ret);
2975 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2976 			tfm->crt_flags |=
2977 			    (ctx->fallback_cipher->base.crt_flags &
2978 			     CRYPTO_TFM_RES_MASK);
2979 		}
2980 	}
2981 
2982 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2983 							  ctx->enckeylen,
2984 							  false);
2985 
2986 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2987 
2988 	return ret;
2989 
2990 badkey:
2991 	ctx->enckeylen = 0;
2992 	ctx->authkeylen = 0;
2993 	ctx->digestsize = 0;
2994 
2995 	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2996 	return -EINVAL;
2997 }
2998 
2999 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
3000 			       const u8 *key, unsigned int keylen)
3001 {
3002 	struct spu_hw *spu = &iproc_priv.spu;
3003 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3004 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
3005 
3006 	int ret = 0;
3007 
3008 	flow_log("%s() keylen:%u\n", __func__, keylen);
3009 	flow_dump("  key: ", key, keylen);
3010 
3011 	if (!ctx->is_esp)
3012 		ctx->digestsize = keylen;
3013 
3014 	ctx->enckeylen = keylen;
3015 	ctx->authkeylen = 0;
3016 	memcpy(ctx->enckey, key, ctx->enckeylen);
3017 
3018 	switch (ctx->enckeylen) {
3019 	case AES_KEYSIZE_128:
3020 		ctx->cipher_type = CIPHER_TYPE_AES128;
3021 		break;
3022 	case AES_KEYSIZE_192:
3023 		ctx->cipher_type = CIPHER_TYPE_AES192;
3024 		break;
3025 	case AES_KEYSIZE_256:
3026 		ctx->cipher_type = CIPHER_TYPE_AES256;
3027 		break;
3028 	default:
3029 		goto badkey;
3030 	}
3031 
3032 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3033 		 ctx->authkeylen);
3034 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
3035 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
3036 
3037 	/* setkey the fallback just in case we need to use it */
3038 	if (ctx->fallback_cipher) {
3039 		flow_log("  running fallback setkey()\n");
3040 
3041 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
3042 		ctx->fallback_cipher->base.crt_flags |=
3043 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
3044 		ret = crypto_aead_setkey(ctx->fallback_cipher, key,
3045 					 keylen + ctx->salt_len);
3046 		if (ret) {
3047 			flow_log("  fallback setkey() returned:%d\n", ret);
3048 			tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
3049 			tfm->crt_flags |=
3050 			    (ctx->fallback_cipher->base.crt_flags &
3051 			     CRYPTO_TFM_RES_MASK);
3052 		}
3053 	}
3054 
3055 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
3056 							  ctx->enckeylen,
3057 							  false);
3058 
3059 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
3060 
3061 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3062 		 ctx->authkeylen);
3063 
3064 	return ret;
3065 
3066 badkey:
3067 	ctx->enckeylen = 0;
3068 	ctx->authkeylen = 0;
3069 	ctx->digestsize = 0;
3070 
3071 	crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
3072 	return -EINVAL;
3073 }
3074 
3075 /**
3076  * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
3077  * @cipher: AEAD structure
3078  * @key:    Key followed by 4 bytes of salt
3079  * @keylen: Length of key plus salt, in bytes
3080  *
3081  * Extracts salt from key and stores it to be prepended to IV on each request.
3082  * Digest is always 16 bytes
3083  *
3084  * Return: Value from generic gcm setkey.
3085  */
3086 static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3087 			       const u8 *key, unsigned int keylen)
3088 {
3089 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3090 
3091 	flow_log("%s\n", __func__);
3092 	ctx->salt_len = GCM_ESP_SALT_SIZE;
3093 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3094 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3095 	keylen -= GCM_ESP_SALT_SIZE;
3096 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
3097 	ctx->is_esp = true;
3098 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3099 
3100 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3101 }
3102 
3103 /**
3104  * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
3105  * cipher: AEAD structure
3106  * key:    Key followed by 4 bytes of salt
3107  * keylen: Length of key plus salt, in bytes
3108  *
3109  * Extracts salt from key and stores it to be prepended to IV on each request.
3110  * Digest is always 16 bytes
3111  *
3112  * Return: Value from generic gcm setkey.
3113  */
3114 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3115 				  const u8 *key, unsigned int keylen)
3116 {
3117 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3118 
3119 	flow_log("%s\n", __func__);
3120 	ctx->salt_len = GCM_ESP_SALT_SIZE;
3121 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3122 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3123 	keylen -= GCM_ESP_SALT_SIZE;
3124 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
3125 	ctx->is_esp = true;
3126 	ctx->is_rfc4543 = true;
3127 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3128 
3129 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3130 }
3131 
3132 /**
3133  * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
3134  * @cipher: AEAD structure
3135  * @key:    Key followed by 4 bytes of salt
3136  * @keylen: Length of key plus salt, in bytes
3137  *
3138  * Extracts salt from key and stores it to be prepended to IV on each request.
3139  * Digest is always 16 bytes
3140  *
3141  * Return: Value from generic ccm setkey.
3142  */
3143 static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3144 			       const u8 *key, unsigned int keylen)
3145 {
3146 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3147 
3148 	flow_log("%s\n", __func__);
3149 	ctx->salt_len = CCM_ESP_SALT_SIZE;
3150 	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3151 	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3152 	keylen -= CCM_ESP_SALT_SIZE;
3153 	ctx->is_esp = true;
3154 	flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3155 
3156 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3157 }
3158 
3159 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3160 {
3161 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3162 	int ret = 0;
3163 
3164 	flow_log("%s() authkeylen:%u authsize:%u\n",
3165 		 __func__, ctx->authkeylen, authsize);
3166 
3167 	ctx->digestsize = authsize;
3168 
3169 	/* setkey the fallback just in case we needto use it */
3170 	if (ctx->fallback_cipher) {
3171 		flow_log("  running fallback setauth()\n");
3172 
3173 		ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3174 		if (ret)
3175 			flow_log("  fallback setauth() returned:%d\n", ret);
3176 	}
3177 
3178 	return ret;
3179 }
3180 
3181 static int aead_encrypt(struct aead_request *req)
3182 {
3183 	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3184 		 req->cryptlen);
3185 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3186 	flow_log("  assoc_len:%u\n", req->assoclen);
3187 
3188 	return aead_enqueue(req, true);
3189 }
3190 
3191 static int aead_decrypt(struct aead_request *req)
3192 {
3193 	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3194 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3195 	flow_log("  assoc_len:%u\n", req->assoclen);
3196 
3197 	return aead_enqueue(req, false);
3198 }
3199 
3200 /* ==================== Supported Cipher Algorithms ==================== */
3201 
3202 static struct iproc_alg_s driver_algs[] = {
3203 	{
3204 	 .type = CRYPTO_ALG_TYPE_AEAD,
3205 	 .alg.aead = {
3206 		 .base = {
3207 			.cra_name = "gcm(aes)",
3208 			.cra_driver_name = "gcm-aes-iproc",
3209 			.cra_blocksize = AES_BLOCK_SIZE,
3210 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3211 		 },
3212 		 .setkey = aead_gcm_ccm_setkey,
3213 		 .ivsize = GCM_AES_IV_SIZE,
3214 		.maxauthsize = AES_BLOCK_SIZE,
3215 	 },
3216 	 .cipher_info = {
3217 			 .alg = CIPHER_ALG_AES,
3218 			 .mode = CIPHER_MODE_GCM,
3219 			 },
3220 	 .auth_info = {
3221 		       .alg = HASH_ALG_AES,
3222 		       .mode = HASH_MODE_GCM,
3223 		       },
3224 	 .auth_first = 0,
3225 	 },
3226 	{
3227 	 .type = CRYPTO_ALG_TYPE_AEAD,
3228 	 .alg.aead = {
3229 		 .base = {
3230 			.cra_name = "ccm(aes)",
3231 			.cra_driver_name = "ccm-aes-iproc",
3232 			.cra_blocksize = AES_BLOCK_SIZE,
3233 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3234 		 },
3235 		 .setkey = aead_gcm_ccm_setkey,
3236 		 .ivsize = CCM_AES_IV_SIZE,
3237 		.maxauthsize = AES_BLOCK_SIZE,
3238 	 },
3239 	 .cipher_info = {
3240 			 .alg = CIPHER_ALG_AES,
3241 			 .mode = CIPHER_MODE_CCM,
3242 			 },
3243 	 .auth_info = {
3244 		       .alg = HASH_ALG_AES,
3245 		       .mode = HASH_MODE_CCM,
3246 		       },
3247 	 .auth_first = 0,
3248 	 },
3249 	{
3250 	 .type = CRYPTO_ALG_TYPE_AEAD,
3251 	 .alg.aead = {
3252 		 .base = {
3253 			.cra_name = "rfc4106(gcm(aes))",
3254 			.cra_driver_name = "gcm-aes-esp-iproc",
3255 			.cra_blocksize = AES_BLOCK_SIZE,
3256 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3257 		 },
3258 		 .setkey = aead_gcm_esp_setkey,
3259 		 .ivsize = GCM_ESP_IV_SIZE,
3260 		 .maxauthsize = AES_BLOCK_SIZE,
3261 	 },
3262 	 .cipher_info = {
3263 			 .alg = CIPHER_ALG_AES,
3264 			 .mode = CIPHER_MODE_GCM,
3265 			 },
3266 	 .auth_info = {
3267 		       .alg = HASH_ALG_AES,
3268 		       .mode = HASH_MODE_GCM,
3269 		       },
3270 	 .auth_first = 0,
3271 	 },
3272 	{
3273 	 .type = CRYPTO_ALG_TYPE_AEAD,
3274 	 .alg.aead = {
3275 		 .base = {
3276 			.cra_name = "rfc4309(ccm(aes))",
3277 			.cra_driver_name = "ccm-aes-esp-iproc",
3278 			.cra_blocksize = AES_BLOCK_SIZE,
3279 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3280 		 },
3281 		 .setkey = aead_ccm_esp_setkey,
3282 		 .ivsize = CCM_AES_IV_SIZE,
3283 		 .maxauthsize = AES_BLOCK_SIZE,
3284 	 },
3285 	 .cipher_info = {
3286 			 .alg = CIPHER_ALG_AES,
3287 			 .mode = CIPHER_MODE_CCM,
3288 			 },
3289 	 .auth_info = {
3290 		       .alg = HASH_ALG_AES,
3291 		       .mode = HASH_MODE_CCM,
3292 		       },
3293 	 .auth_first = 0,
3294 	 },
3295 	{
3296 	 .type = CRYPTO_ALG_TYPE_AEAD,
3297 	 .alg.aead = {
3298 		 .base = {
3299 			.cra_name = "rfc4543(gcm(aes))",
3300 			.cra_driver_name = "gmac-aes-esp-iproc",
3301 			.cra_blocksize = AES_BLOCK_SIZE,
3302 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3303 		 },
3304 		 .setkey = rfc4543_gcm_esp_setkey,
3305 		 .ivsize = GCM_ESP_IV_SIZE,
3306 		 .maxauthsize = AES_BLOCK_SIZE,
3307 	 },
3308 	 .cipher_info = {
3309 			 .alg = CIPHER_ALG_AES,
3310 			 .mode = CIPHER_MODE_GCM,
3311 			 },
3312 	 .auth_info = {
3313 		       .alg = HASH_ALG_AES,
3314 		       .mode = HASH_MODE_GCM,
3315 		       },
3316 	 .auth_first = 0,
3317 	 },
3318 	{
3319 	 .type = CRYPTO_ALG_TYPE_AEAD,
3320 	 .alg.aead = {
3321 		 .base = {
3322 			.cra_name = "authenc(hmac(md5),cbc(aes))",
3323 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3324 			.cra_blocksize = AES_BLOCK_SIZE,
3325 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3326 		 },
3327 		 .setkey = aead_authenc_setkey,
3328 		.ivsize = AES_BLOCK_SIZE,
3329 		.maxauthsize = MD5_DIGEST_SIZE,
3330 	 },
3331 	 .cipher_info = {
3332 			 .alg = CIPHER_ALG_AES,
3333 			 .mode = CIPHER_MODE_CBC,
3334 			 },
3335 	 .auth_info = {
3336 		       .alg = HASH_ALG_MD5,
3337 		       .mode = HASH_MODE_HMAC,
3338 		       },
3339 	 .auth_first = 0,
3340 	 },
3341 	{
3342 	 .type = CRYPTO_ALG_TYPE_AEAD,
3343 	 .alg.aead = {
3344 		 .base = {
3345 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
3346 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3347 			.cra_blocksize = AES_BLOCK_SIZE,
3348 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3349 		 },
3350 		 .setkey = aead_authenc_setkey,
3351 		 .ivsize = AES_BLOCK_SIZE,
3352 		 .maxauthsize = SHA1_DIGEST_SIZE,
3353 	 },
3354 	 .cipher_info = {
3355 			 .alg = CIPHER_ALG_AES,
3356 			 .mode = CIPHER_MODE_CBC,
3357 			 },
3358 	 .auth_info = {
3359 		       .alg = HASH_ALG_SHA1,
3360 		       .mode = HASH_MODE_HMAC,
3361 		       },
3362 	 .auth_first = 0,
3363 	 },
3364 	{
3365 	 .type = CRYPTO_ALG_TYPE_AEAD,
3366 	 .alg.aead = {
3367 		 .base = {
3368 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
3369 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3370 			.cra_blocksize = AES_BLOCK_SIZE,
3371 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3372 		 },
3373 		 .setkey = aead_authenc_setkey,
3374 		 .ivsize = AES_BLOCK_SIZE,
3375 		 .maxauthsize = SHA256_DIGEST_SIZE,
3376 	 },
3377 	 .cipher_info = {
3378 			 .alg = CIPHER_ALG_AES,
3379 			 .mode = CIPHER_MODE_CBC,
3380 			 },
3381 	 .auth_info = {
3382 		       .alg = HASH_ALG_SHA256,
3383 		       .mode = HASH_MODE_HMAC,
3384 		       },
3385 	 .auth_first = 0,
3386 	 },
3387 	{
3388 	 .type = CRYPTO_ALG_TYPE_AEAD,
3389 	 .alg.aead = {
3390 		 .base = {
3391 			.cra_name = "authenc(hmac(md5),cbc(des))",
3392 			.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3393 			.cra_blocksize = DES_BLOCK_SIZE,
3394 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3395 		 },
3396 		 .setkey = aead_authenc_setkey,
3397 		 .ivsize = DES_BLOCK_SIZE,
3398 		 .maxauthsize = MD5_DIGEST_SIZE,
3399 	 },
3400 	 .cipher_info = {
3401 			 .alg = CIPHER_ALG_DES,
3402 			 .mode = CIPHER_MODE_CBC,
3403 			 },
3404 	 .auth_info = {
3405 		       .alg = HASH_ALG_MD5,
3406 		       .mode = HASH_MODE_HMAC,
3407 		       },
3408 	 .auth_first = 0,
3409 	 },
3410 	{
3411 	 .type = CRYPTO_ALG_TYPE_AEAD,
3412 	 .alg.aead = {
3413 		 .base = {
3414 			.cra_name = "authenc(hmac(sha1),cbc(des))",
3415 			.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3416 			.cra_blocksize = DES_BLOCK_SIZE,
3417 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3418 		 },
3419 		 .setkey = aead_authenc_setkey,
3420 		 .ivsize = DES_BLOCK_SIZE,
3421 		 .maxauthsize = SHA1_DIGEST_SIZE,
3422 	 },
3423 	 .cipher_info = {
3424 			 .alg = CIPHER_ALG_DES,
3425 			 .mode = CIPHER_MODE_CBC,
3426 			 },
3427 	 .auth_info = {
3428 		       .alg = HASH_ALG_SHA1,
3429 		       .mode = HASH_MODE_HMAC,
3430 		       },
3431 	 .auth_first = 0,
3432 	 },
3433 	{
3434 	 .type = CRYPTO_ALG_TYPE_AEAD,
3435 	 .alg.aead = {
3436 		 .base = {
3437 			.cra_name = "authenc(hmac(sha224),cbc(des))",
3438 			.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3439 			.cra_blocksize = DES_BLOCK_SIZE,
3440 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3441 		 },
3442 		 .setkey = aead_authenc_setkey,
3443 		 .ivsize = DES_BLOCK_SIZE,
3444 		 .maxauthsize = SHA224_DIGEST_SIZE,
3445 	 },
3446 	 .cipher_info = {
3447 			 .alg = CIPHER_ALG_DES,
3448 			 .mode = CIPHER_MODE_CBC,
3449 			 },
3450 	 .auth_info = {
3451 		       .alg = HASH_ALG_SHA224,
3452 		       .mode = HASH_MODE_HMAC,
3453 		       },
3454 	 .auth_first = 0,
3455 	 },
3456 	{
3457 	 .type = CRYPTO_ALG_TYPE_AEAD,
3458 	 .alg.aead = {
3459 		 .base = {
3460 			.cra_name = "authenc(hmac(sha256),cbc(des))",
3461 			.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3462 			.cra_blocksize = DES_BLOCK_SIZE,
3463 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3464 		 },
3465 		 .setkey = aead_authenc_setkey,
3466 		 .ivsize = DES_BLOCK_SIZE,
3467 		 .maxauthsize = SHA256_DIGEST_SIZE,
3468 	 },
3469 	 .cipher_info = {
3470 			 .alg = CIPHER_ALG_DES,
3471 			 .mode = CIPHER_MODE_CBC,
3472 			 },
3473 	 .auth_info = {
3474 		       .alg = HASH_ALG_SHA256,
3475 		       .mode = HASH_MODE_HMAC,
3476 		       },
3477 	 .auth_first = 0,
3478 	 },
3479 	{
3480 	 .type = CRYPTO_ALG_TYPE_AEAD,
3481 	 .alg.aead = {
3482 		 .base = {
3483 			.cra_name = "authenc(hmac(sha384),cbc(des))",
3484 			.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3485 			.cra_blocksize = DES_BLOCK_SIZE,
3486 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3487 		 },
3488 		 .setkey = aead_authenc_setkey,
3489 		 .ivsize = DES_BLOCK_SIZE,
3490 		 .maxauthsize = SHA384_DIGEST_SIZE,
3491 	 },
3492 	 .cipher_info = {
3493 			 .alg = CIPHER_ALG_DES,
3494 			 .mode = CIPHER_MODE_CBC,
3495 			 },
3496 	 .auth_info = {
3497 		       .alg = HASH_ALG_SHA384,
3498 		       .mode = HASH_MODE_HMAC,
3499 		       },
3500 	 .auth_first = 0,
3501 	 },
3502 	{
3503 	 .type = CRYPTO_ALG_TYPE_AEAD,
3504 	 .alg.aead = {
3505 		 .base = {
3506 			.cra_name = "authenc(hmac(sha512),cbc(des))",
3507 			.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3508 			.cra_blocksize = DES_BLOCK_SIZE,
3509 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3510 		 },
3511 		 .setkey = aead_authenc_setkey,
3512 		 .ivsize = DES_BLOCK_SIZE,
3513 		 .maxauthsize = SHA512_DIGEST_SIZE,
3514 	 },
3515 	 .cipher_info = {
3516 			 .alg = CIPHER_ALG_DES,
3517 			 .mode = CIPHER_MODE_CBC,
3518 			 },
3519 	 .auth_info = {
3520 		       .alg = HASH_ALG_SHA512,
3521 		       .mode = HASH_MODE_HMAC,
3522 		       },
3523 	 .auth_first = 0,
3524 	 },
3525 	{
3526 	 .type = CRYPTO_ALG_TYPE_AEAD,
3527 	 .alg.aead = {
3528 		 .base = {
3529 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3530 			.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3531 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3532 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3533 		 },
3534 		 .setkey = aead_authenc_setkey,
3535 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3536 		 .maxauthsize = MD5_DIGEST_SIZE,
3537 	 },
3538 	 .cipher_info = {
3539 			 .alg = CIPHER_ALG_3DES,
3540 			 .mode = CIPHER_MODE_CBC,
3541 			 },
3542 	 .auth_info = {
3543 		       .alg = HASH_ALG_MD5,
3544 		       .mode = HASH_MODE_HMAC,
3545 		       },
3546 	 .auth_first = 0,
3547 	 },
3548 	{
3549 	 .type = CRYPTO_ALG_TYPE_AEAD,
3550 	 .alg.aead = {
3551 		 .base = {
3552 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3553 			.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3554 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3555 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3556 		 },
3557 		 .setkey = aead_authenc_setkey,
3558 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3559 		 .maxauthsize = SHA1_DIGEST_SIZE,
3560 	 },
3561 	 .cipher_info = {
3562 			 .alg = CIPHER_ALG_3DES,
3563 			 .mode = CIPHER_MODE_CBC,
3564 			 },
3565 	 .auth_info = {
3566 		       .alg = HASH_ALG_SHA1,
3567 		       .mode = HASH_MODE_HMAC,
3568 		       },
3569 	 .auth_first = 0,
3570 	 },
3571 	{
3572 	 .type = CRYPTO_ALG_TYPE_AEAD,
3573 	 .alg.aead = {
3574 		 .base = {
3575 			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3576 			.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3577 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3578 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3579 		 },
3580 		 .setkey = aead_authenc_setkey,
3581 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3582 		 .maxauthsize = SHA224_DIGEST_SIZE,
3583 	 },
3584 	 .cipher_info = {
3585 			 .alg = CIPHER_ALG_3DES,
3586 			 .mode = CIPHER_MODE_CBC,
3587 			 },
3588 	 .auth_info = {
3589 		       .alg = HASH_ALG_SHA224,
3590 		       .mode = HASH_MODE_HMAC,
3591 		       },
3592 	 .auth_first = 0,
3593 	 },
3594 	{
3595 	 .type = CRYPTO_ALG_TYPE_AEAD,
3596 	 .alg.aead = {
3597 		 .base = {
3598 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3599 			.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3600 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3601 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3602 		 },
3603 		 .setkey = aead_authenc_setkey,
3604 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3605 		 .maxauthsize = SHA256_DIGEST_SIZE,
3606 	 },
3607 	 .cipher_info = {
3608 			 .alg = CIPHER_ALG_3DES,
3609 			 .mode = CIPHER_MODE_CBC,
3610 			 },
3611 	 .auth_info = {
3612 		       .alg = HASH_ALG_SHA256,
3613 		       .mode = HASH_MODE_HMAC,
3614 		       },
3615 	 .auth_first = 0,
3616 	 },
3617 	{
3618 	 .type = CRYPTO_ALG_TYPE_AEAD,
3619 	 .alg.aead = {
3620 		 .base = {
3621 			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3622 			.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3623 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3624 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3625 		 },
3626 		 .setkey = aead_authenc_setkey,
3627 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3628 		 .maxauthsize = SHA384_DIGEST_SIZE,
3629 	 },
3630 	 .cipher_info = {
3631 			 .alg = CIPHER_ALG_3DES,
3632 			 .mode = CIPHER_MODE_CBC,
3633 			 },
3634 	 .auth_info = {
3635 		       .alg = HASH_ALG_SHA384,
3636 		       .mode = HASH_MODE_HMAC,
3637 		       },
3638 	 .auth_first = 0,
3639 	 },
3640 	{
3641 	 .type = CRYPTO_ALG_TYPE_AEAD,
3642 	 .alg.aead = {
3643 		 .base = {
3644 			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3645 			.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3646 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3647 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3648 		 },
3649 		 .setkey = aead_authenc_setkey,
3650 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3651 		 .maxauthsize = SHA512_DIGEST_SIZE,
3652 	 },
3653 	 .cipher_info = {
3654 			 .alg = CIPHER_ALG_3DES,
3655 			 .mode = CIPHER_MODE_CBC,
3656 			 },
3657 	 .auth_info = {
3658 		       .alg = HASH_ALG_SHA512,
3659 		       .mode = HASH_MODE_HMAC,
3660 		       },
3661 	 .auth_first = 0,
3662 	 },
3663 
3664 /* ABLKCIPHER algorithms. */
3665 	{
3666 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3667 	 .alg.crypto = {
3668 			.cra_name = "ecb(arc4)",
3669 			.cra_driver_name = "ecb-arc4-iproc",
3670 			.cra_blocksize = ARC4_BLOCK_SIZE,
3671 			.cra_ablkcipher = {
3672 					   .min_keysize = ARC4_MIN_KEY_SIZE,
3673 					   .max_keysize = ARC4_MAX_KEY_SIZE,
3674 					   .ivsize = 0,
3675 					}
3676 			},
3677 	 .cipher_info = {
3678 			 .alg = CIPHER_ALG_RC4,
3679 			 .mode = CIPHER_MODE_NONE,
3680 			 },
3681 	 .auth_info = {
3682 		       .alg = HASH_ALG_NONE,
3683 		       .mode = HASH_MODE_NONE,
3684 		       },
3685 	 },
3686 	{
3687 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3688 	 .alg.crypto = {
3689 			.cra_name = "ofb(des)",
3690 			.cra_driver_name = "ofb-des-iproc",
3691 			.cra_blocksize = DES_BLOCK_SIZE,
3692 			.cra_ablkcipher = {
3693 					   .min_keysize = DES_KEY_SIZE,
3694 					   .max_keysize = DES_KEY_SIZE,
3695 					   .ivsize = DES_BLOCK_SIZE,
3696 					}
3697 			},
3698 	 .cipher_info = {
3699 			 .alg = CIPHER_ALG_DES,
3700 			 .mode = CIPHER_MODE_OFB,
3701 			 },
3702 	 .auth_info = {
3703 		       .alg = HASH_ALG_NONE,
3704 		       .mode = HASH_MODE_NONE,
3705 		       },
3706 	 },
3707 	{
3708 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3709 	 .alg.crypto = {
3710 			.cra_name = "cbc(des)",
3711 			.cra_driver_name = "cbc-des-iproc",
3712 			.cra_blocksize = DES_BLOCK_SIZE,
3713 			.cra_ablkcipher = {
3714 					   .min_keysize = DES_KEY_SIZE,
3715 					   .max_keysize = DES_KEY_SIZE,
3716 					   .ivsize = DES_BLOCK_SIZE,
3717 					}
3718 			},
3719 	 .cipher_info = {
3720 			 .alg = CIPHER_ALG_DES,
3721 			 .mode = CIPHER_MODE_CBC,
3722 			 },
3723 	 .auth_info = {
3724 		       .alg = HASH_ALG_NONE,
3725 		       .mode = HASH_MODE_NONE,
3726 		       },
3727 	 },
3728 	{
3729 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3730 	 .alg.crypto = {
3731 			.cra_name = "ecb(des)",
3732 			.cra_driver_name = "ecb-des-iproc",
3733 			.cra_blocksize = DES_BLOCK_SIZE,
3734 			.cra_ablkcipher = {
3735 					   .min_keysize = DES_KEY_SIZE,
3736 					   .max_keysize = DES_KEY_SIZE,
3737 					   .ivsize = 0,
3738 					}
3739 			},
3740 	 .cipher_info = {
3741 			 .alg = CIPHER_ALG_DES,
3742 			 .mode = CIPHER_MODE_ECB,
3743 			 },
3744 	 .auth_info = {
3745 		       .alg = HASH_ALG_NONE,
3746 		       .mode = HASH_MODE_NONE,
3747 		       },
3748 	 },
3749 	{
3750 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3751 	 .alg.crypto = {
3752 			.cra_name = "ofb(des3_ede)",
3753 			.cra_driver_name = "ofb-des3-iproc",
3754 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3755 			.cra_ablkcipher = {
3756 					   .min_keysize = DES3_EDE_KEY_SIZE,
3757 					   .max_keysize = DES3_EDE_KEY_SIZE,
3758 					   .ivsize = DES3_EDE_BLOCK_SIZE,
3759 					}
3760 			},
3761 	 .cipher_info = {
3762 			 .alg = CIPHER_ALG_3DES,
3763 			 .mode = CIPHER_MODE_OFB,
3764 			 },
3765 	 .auth_info = {
3766 		       .alg = HASH_ALG_NONE,
3767 		       .mode = HASH_MODE_NONE,
3768 		       },
3769 	 },
3770 	{
3771 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3772 	 .alg.crypto = {
3773 			.cra_name = "cbc(des3_ede)",
3774 			.cra_driver_name = "cbc-des3-iproc",
3775 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3776 			.cra_ablkcipher = {
3777 					   .min_keysize = DES3_EDE_KEY_SIZE,
3778 					   .max_keysize = DES3_EDE_KEY_SIZE,
3779 					   .ivsize = DES3_EDE_BLOCK_SIZE,
3780 					}
3781 			},
3782 	 .cipher_info = {
3783 			 .alg = CIPHER_ALG_3DES,
3784 			 .mode = CIPHER_MODE_CBC,
3785 			 },
3786 	 .auth_info = {
3787 		       .alg = HASH_ALG_NONE,
3788 		       .mode = HASH_MODE_NONE,
3789 		       },
3790 	 },
3791 	{
3792 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3793 	 .alg.crypto = {
3794 			.cra_name = "ecb(des3_ede)",
3795 			.cra_driver_name = "ecb-des3-iproc",
3796 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3797 			.cra_ablkcipher = {
3798 					   .min_keysize = DES3_EDE_KEY_SIZE,
3799 					   .max_keysize = DES3_EDE_KEY_SIZE,
3800 					   .ivsize = 0,
3801 					}
3802 			},
3803 	 .cipher_info = {
3804 			 .alg = CIPHER_ALG_3DES,
3805 			 .mode = CIPHER_MODE_ECB,
3806 			 },
3807 	 .auth_info = {
3808 		       .alg = HASH_ALG_NONE,
3809 		       .mode = HASH_MODE_NONE,
3810 		       },
3811 	 },
3812 	{
3813 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3814 	 .alg.crypto = {
3815 			.cra_name = "ofb(aes)",
3816 			.cra_driver_name = "ofb-aes-iproc",
3817 			.cra_blocksize = AES_BLOCK_SIZE,
3818 			.cra_ablkcipher = {
3819 					   .min_keysize = AES_MIN_KEY_SIZE,
3820 					   .max_keysize = AES_MAX_KEY_SIZE,
3821 					   .ivsize = AES_BLOCK_SIZE,
3822 					}
3823 			},
3824 	 .cipher_info = {
3825 			 .alg = CIPHER_ALG_AES,
3826 			 .mode = CIPHER_MODE_OFB,
3827 			 },
3828 	 .auth_info = {
3829 		       .alg = HASH_ALG_NONE,
3830 		       .mode = HASH_MODE_NONE,
3831 		       },
3832 	 },
3833 	{
3834 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3835 	 .alg.crypto = {
3836 			.cra_name = "cbc(aes)",
3837 			.cra_driver_name = "cbc-aes-iproc",
3838 			.cra_blocksize = AES_BLOCK_SIZE,
3839 			.cra_ablkcipher = {
3840 					   .min_keysize = AES_MIN_KEY_SIZE,
3841 					   .max_keysize = AES_MAX_KEY_SIZE,
3842 					   .ivsize = AES_BLOCK_SIZE,
3843 					}
3844 			},
3845 	 .cipher_info = {
3846 			 .alg = CIPHER_ALG_AES,
3847 			 .mode = CIPHER_MODE_CBC,
3848 			 },
3849 	 .auth_info = {
3850 		       .alg = HASH_ALG_NONE,
3851 		       .mode = HASH_MODE_NONE,
3852 		       },
3853 	 },
3854 	{
3855 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3856 	 .alg.crypto = {
3857 			.cra_name = "ecb(aes)",
3858 			.cra_driver_name = "ecb-aes-iproc",
3859 			.cra_blocksize = AES_BLOCK_SIZE,
3860 			.cra_ablkcipher = {
3861 					   .min_keysize = AES_MIN_KEY_SIZE,
3862 					   .max_keysize = AES_MAX_KEY_SIZE,
3863 					   .ivsize = 0,
3864 					}
3865 			},
3866 	 .cipher_info = {
3867 			 .alg = CIPHER_ALG_AES,
3868 			 .mode = CIPHER_MODE_ECB,
3869 			 },
3870 	 .auth_info = {
3871 		       .alg = HASH_ALG_NONE,
3872 		       .mode = HASH_MODE_NONE,
3873 		       },
3874 	 },
3875 	{
3876 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3877 	 .alg.crypto = {
3878 			.cra_name = "ctr(aes)",
3879 			.cra_driver_name = "ctr-aes-iproc",
3880 			.cra_blocksize = AES_BLOCK_SIZE,
3881 			.cra_ablkcipher = {
3882 					   /* .geniv = "chainiv", */
3883 					   .min_keysize = AES_MIN_KEY_SIZE,
3884 					   .max_keysize = AES_MAX_KEY_SIZE,
3885 					   .ivsize = AES_BLOCK_SIZE,
3886 					}
3887 			},
3888 	 .cipher_info = {
3889 			 .alg = CIPHER_ALG_AES,
3890 			 .mode = CIPHER_MODE_CTR,
3891 			 },
3892 	 .auth_info = {
3893 		       .alg = HASH_ALG_NONE,
3894 		       .mode = HASH_MODE_NONE,
3895 		       },
3896 	 },
3897 {
3898 	 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3899 	 .alg.crypto = {
3900 			.cra_name = "xts(aes)",
3901 			.cra_driver_name = "xts-aes-iproc",
3902 			.cra_blocksize = AES_BLOCK_SIZE,
3903 			.cra_ablkcipher = {
3904 				.min_keysize = 2 * AES_MIN_KEY_SIZE,
3905 				.max_keysize = 2 * AES_MAX_KEY_SIZE,
3906 				.ivsize = AES_BLOCK_SIZE,
3907 				}
3908 			},
3909 	 .cipher_info = {
3910 			 .alg = CIPHER_ALG_AES,
3911 			 .mode = CIPHER_MODE_XTS,
3912 			 },
3913 	 .auth_info = {
3914 		       .alg = HASH_ALG_NONE,
3915 		       .mode = HASH_MODE_NONE,
3916 		       },
3917 	 },
3918 
3919 /* AHASH algorithms. */
3920 	{
3921 	 .type = CRYPTO_ALG_TYPE_AHASH,
3922 	 .alg.hash = {
3923 		      .halg.digestsize = MD5_DIGEST_SIZE,
3924 		      .halg.base = {
3925 				    .cra_name = "md5",
3926 				    .cra_driver_name = "md5-iproc",
3927 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3928 				    .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3929 					     CRYPTO_ALG_ASYNC,
3930 				}
3931 		      },
3932 	 .cipher_info = {
3933 			 .alg = CIPHER_ALG_NONE,
3934 			 .mode = CIPHER_MODE_NONE,
3935 			 },
3936 	 .auth_info = {
3937 		       .alg = HASH_ALG_MD5,
3938 		       .mode = HASH_MODE_HASH,
3939 		       },
3940 	 },
3941 	{
3942 	 .type = CRYPTO_ALG_TYPE_AHASH,
3943 	 .alg.hash = {
3944 		      .halg.digestsize = MD5_DIGEST_SIZE,
3945 		      .halg.base = {
3946 				    .cra_name = "hmac(md5)",
3947 				    .cra_driver_name = "hmac-md5-iproc",
3948 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3949 				}
3950 		      },
3951 	 .cipher_info = {
3952 			 .alg = CIPHER_ALG_NONE,
3953 			 .mode = CIPHER_MODE_NONE,
3954 			 },
3955 	 .auth_info = {
3956 		       .alg = HASH_ALG_MD5,
3957 		       .mode = HASH_MODE_HMAC,
3958 		       },
3959 	 },
3960 	{.type = CRYPTO_ALG_TYPE_AHASH,
3961 	 .alg.hash = {
3962 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3963 		      .halg.base = {
3964 				    .cra_name = "sha1",
3965 				    .cra_driver_name = "sha1-iproc",
3966 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3967 				}
3968 		      },
3969 	 .cipher_info = {
3970 			 .alg = CIPHER_ALG_NONE,
3971 			 .mode = CIPHER_MODE_NONE,
3972 			 },
3973 	 .auth_info = {
3974 		       .alg = HASH_ALG_SHA1,
3975 		       .mode = HASH_MODE_HASH,
3976 		       },
3977 	 },
3978 	{.type = CRYPTO_ALG_TYPE_AHASH,
3979 	 .alg.hash = {
3980 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3981 		      .halg.base = {
3982 				    .cra_name = "hmac(sha1)",
3983 				    .cra_driver_name = "hmac-sha1-iproc",
3984 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3985 				}
3986 		      },
3987 	 .cipher_info = {
3988 			 .alg = CIPHER_ALG_NONE,
3989 			 .mode = CIPHER_MODE_NONE,
3990 			 },
3991 	 .auth_info = {
3992 		       .alg = HASH_ALG_SHA1,
3993 		       .mode = HASH_MODE_HMAC,
3994 		       },
3995 	 },
3996 	{.type = CRYPTO_ALG_TYPE_AHASH,
3997 	 .alg.hash = {
3998 			.halg.digestsize = SHA224_DIGEST_SIZE,
3999 			.halg.base = {
4000 				    .cra_name = "sha224",
4001 				    .cra_driver_name = "sha224-iproc",
4002 				    .cra_blocksize = SHA224_BLOCK_SIZE,
4003 			}
4004 		      },
4005 	 .cipher_info = {
4006 			 .alg = CIPHER_ALG_NONE,
4007 			 .mode = CIPHER_MODE_NONE,
4008 			 },
4009 	 .auth_info = {
4010 		       .alg = HASH_ALG_SHA224,
4011 		       .mode = HASH_MODE_HASH,
4012 		       },
4013 	 },
4014 	{.type = CRYPTO_ALG_TYPE_AHASH,
4015 	 .alg.hash = {
4016 		      .halg.digestsize = SHA224_DIGEST_SIZE,
4017 		      .halg.base = {
4018 				    .cra_name = "hmac(sha224)",
4019 				    .cra_driver_name = "hmac-sha224-iproc",
4020 				    .cra_blocksize = SHA224_BLOCK_SIZE,
4021 				}
4022 		      },
4023 	 .cipher_info = {
4024 			 .alg = CIPHER_ALG_NONE,
4025 			 .mode = CIPHER_MODE_NONE,
4026 			 },
4027 	 .auth_info = {
4028 		       .alg = HASH_ALG_SHA224,
4029 		       .mode = HASH_MODE_HMAC,
4030 		       },
4031 	 },
4032 	{.type = CRYPTO_ALG_TYPE_AHASH,
4033 	 .alg.hash = {
4034 		      .halg.digestsize = SHA256_DIGEST_SIZE,
4035 		      .halg.base = {
4036 				    .cra_name = "sha256",
4037 				    .cra_driver_name = "sha256-iproc",
4038 				    .cra_blocksize = SHA256_BLOCK_SIZE,
4039 				}
4040 		      },
4041 	 .cipher_info = {
4042 			 .alg = CIPHER_ALG_NONE,
4043 			 .mode = CIPHER_MODE_NONE,
4044 			 },
4045 	 .auth_info = {
4046 		       .alg = HASH_ALG_SHA256,
4047 		       .mode = HASH_MODE_HASH,
4048 		       },
4049 	 },
4050 	{.type = CRYPTO_ALG_TYPE_AHASH,
4051 	 .alg.hash = {
4052 		      .halg.digestsize = SHA256_DIGEST_SIZE,
4053 		      .halg.base = {
4054 				    .cra_name = "hmac(sha256)",
4055 				    .cra_driver_name = "hmac-sha256-iproc",
4056 				    .cra_blocksize = SHA256_BLOCK_SIZE,
4057 				}
4058 		      },
4059 	 .cipher_info = {
4060 			 .alg = CIPHER_ALG_NONE,
4061 			 .mode = CIPHER_MODE_NONE,
4062 			 },
4063 	 .auth_info = {
4064 		       .alg = HASH_ALG_SHA256,
4065 		       .mode = HASH_MODE_HMAC,
4066 		       },
4067 	 },
4068 	{
4069 	.type = CRYPTO_ALG_TYPE_AHASH,
4070 	 .alg.hash = {
4071 		      .halg.digestsize = SHA384_DIGEST_SIZE,
4072 		      .halg.base = {
4073 				    .cra_name = "sha384",
4074 				    .cra_driver_name = "sha384-iproc",
4075 				    .cra_blocksize = SHA384_BLOCK_SIZE,
4076 				}
4077 		      },
4078 	 .cipher_info = {
4079 			 .alg = CIPHER_ALG_NONE,
4080 			 .mode = CIPHER_MODE_NONE,
4081 			 },
4082 	 .auth_info = {
4083 		       .alg = HASH_ALG_SHA384,
4084 		       .mode = HASH_MODE_HASH,
4085 		       },
4086 	 },
4087 	{
4088 	 .type = CRYPTO_ALG_TYPE_AHASH,
4089 	 .alg.hash = {
4090 		      .halg.digestsize = SHA384_DIGEST_SIZE,
4091 		      .halg.base = {
4092 				    .cra_name = "hmac(sha384)",
4093 				    .cra_driver_name = "hmac-sha384-iproc",
4094 				    .cra_blocksize = SHA384_BLOCK_SIZE,
4095 				}
4096 		      },
4097 	 .cipher_info = {
4098 			 .alg = CIPHER_ALG_NONE,
4099 			 .mode = CIPHER_MODE_NONE,
4100 			 },
4101 	 .auth_info = {
4102 		       .alg = HASH_ALG_SHA384,
4103 		       .mode = HASH_MODE_HMAC,
4104 		       },
4105 	 },
4106 	{
4107 	 .type = CRYPTO_ALG_TYPE_AHASH,
4108 	 .alg.hash = {
4109 		      .halg.digestsize = SHA512_DIGEST_SIZE,
4110 		      .halg.base = {
4111 				    .cra_name = "sha512",
4112 				    .cra_driver_name = "sha512-iproc",
4113 				    .cra_blocksize = SHA512_BLOCK_SIZE,
4114 				}
4115 		      },
4116 	 .cipher_info = {
4117 			 .alg = CIPHER_ALG_NONE,
4118 			 .mode = CIPHER_MODE_NONE,
4119 			 },
4120 	 .auth_info = {
4121 		       .alg = HASH_ALG_SHA512,
4122 		       .mode = HASH_MODE_HASH,
4123 		       },
4124 	 },
4125 	{
4126 	 .type = CRYPTO_ALG_TYPE_AHASH,
4127 	 .alg.hash = {
4128 		      .halg.digestsize = SHA512_DIGEST_SIZE,
4129 		      .halg.base = {
4130 				    .cra_name = "hmac(sha512)",
4131 				    .cra_driver_name = "hmac-sha512-iproc",
4132 				    .cra_blocksize = SHA512_BLOCK_SIZE,
4133 				}
4134 		      },
4135 	 .cipher_info = {
4136 			 .alg = CIPHER_ALG_NONE,
4137 			 .mode = CIPHER_MODE_NONE,
4138 			 },
4139 	 .auth_info = {
4140 		       .alg = HASH_ALG_SHA512,
4141 		       .mode = HASH_MODE_HMAC,
4142 		       },
4143 	 },
4144 	{
4145 	 .type = CRYPTO_ALG_TYPE_AHASH,
4146 	 .alg.hash = {
4147 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4148 		      .halg.base = {
4149 				    .cra_name = "sha3-224",
4150 				    .cra_driver_name = "sha3-224-iproc",
4151 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4152 				}
4153 		      },
4154 	 .cipher_info = {
4155 			 .alg = CIPHER_ALG_NONE,
4156 			 .mode = CIPHER_MODE_NONE,
4157 			 },
4158 	 .auth_info = {
4159 		       .alg = HASH_ALG_SHA3_224,
4160 		       .mode = HASH_MODE_HASH,
4161 		       },
4162 	 },
4163 	{
4164 	 .type = CRYPTO_ALG_TYPE_AHASH,
4165 	 .alg.hash = {
4166 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4167 		      .halg.base = {
4168 				    .cra_name = "hmac(sha3-224)",
4169 				    .cra_driver_name = "hmac-sha3-224-iproc",
4170 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4171 				}
4172 		      },
4173 	 .cipher_info = {
4174 			 .alg = CIPHER_ALG_NONE,
4175 			 .mode = CIPHER_MODE_NONE,
4176 			 },
4177 	 .auth_info = {
4178 		       .alg = HASH_ALG_SHA3_224,
4179 		       .mode = HASH_MODE_HMAC
4180 		       },
4181 	 },
4182 	{
4183 	 .type = CRYPTO_ALG_TYPE_AHASH,
4184 	 .alg.hash = {
4185 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4186 		      .halg.base = {
4187 				    .cra_name = "sha3-256",
4188 				    .cra_driver_name = "sha3-256-iproc",
4189 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4190 				}
4191 		      },
4192 	 .cipher_info = {
4193 			 .alg = CIPHER_ALG_NONE,
4194 			 .mode = CIPHER_MODE_NONE,
4195 			 },
4196 	 .auth_info = {
4197 		       .alg = HASH_ALG_SHA3_256,
4198 		       .mode = HASH_MODE_HASH,
4199 		       },
4200 	 },
4201 	{
4202 	 .type = CRYPTO_ALG_TYPE_AHASH,
4203 	 .alg.hash = {
4204 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4205 		      .halg.base = {
4206 				    .cra_name = "hmac(sha3-256)",
4207 				    .cra_driver_name = "hmac-sha3-256-iproc",
4208 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4209 				}
4210 		      },
4211 	 .cipher_info = {
4212 			 .alg = CIPHER_ALG_NONE,
4213 			 .mode = CIPHER_MODE_NONE,
4214 			 },
4215 	 .auth_info = {
4216 		       .alg = HASH_ALG_SHA3_256,
4217 		       .mode = HASH_MODE_HMAC,
4218 		       },
4219 	 },
4220 	{
4221 	 .type = CRYPTO_ALG_TYPE_AHASH,
4222 	 .alg.hash = {
4223 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4224 		      .halg.base = {
4225 				    .cra_name = "sha3-384",
4226 				    .cra_driver_name = "sha3-384-iproc",
4227 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4228 				}
4229 		      },
4230 	 .cipher_info = {
4231 			 .alg = CIPHER_ALG_NONE,
4232 			 .mode = CIPHER_MODE_NONE,
4233 			 },
4234 	 .auth_info = {
4235 		       .alg = HASH_ALG_SHA3_384,
4236 		       .mode = HASH_MODE_HASH,
4237 		       },
4238 	 },
4239 	{
4240 	 .type = CRYPTO_ALG_TYPE_AHASH,
4241 	 .alg.hash = {
4242 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4243 		      .halg.base = {
4244 				    .cra_name = "hmac(sha3-384)",
4245 				    .cra_driver_name = "hmac-sha3-384-iproc",
4246 				    .cra_blocksize = SHA3_384_BLOCK_SIZE,
4247 				}
4248 		      },
4249 	 .cipher_info = {
4250 			 .alg = CIPHER_ALG_NONE,
4251 			 .mode = CIPHER_MODE_NONE,
4252 			 },
4253 	 .auth_info = {
4254 		       .alg = HASH_ALG_SHA3_384,
4255 		       .mode = HASH_MODE_HMAC,
4256 		       },
4257 	 },
4258 	{
4259 	 .type = CRYPTO_ALG_TYPE_AHASH,
4260 	 .alg.hash = {
4261 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4262 		      .halg.base = {
4263 				    .cra_name = "sha3-512",
4264 				    .cra_driver_name = "sha3-512-iproc",
4265 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4266 				}
4267 		      },
4268 	 .cipher_info = {
4269 			 .alg = CIPHER_ALG_NONE,
4270 			 .mode = CIPHER_MODE_NONE,
4271 			 },
4272 	 .auth_info = {
4273 		       .alg = HASH_ALG_SHA3_512,
4274 		       .mode = HASH_MODE_HASH,
4275 		       },
4276 	 },
4277 	{
4278 	 .type = CRYPTO_ALG_TYPE_AHASH,
4279 	 .alg.hash = {
4280 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4281 		      .halg.base = {
4282 				    .cra_name = "hmac(sha3-512)",
4283 				    .cra_driver_name = "hmac-sha3-512-iproc",
4284 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4285 				}
4286 		      },
4287 	 .cipher_info = {
4288 			 .alg = CIPHER_ALG_NONE,
4289 			 .mode = CIPHER_MODE_NONE,
4290 			 },
4291 	 .auth_info = {
4292 		       .alg = HASH_ALG_SHA3_512,
4293 		       .mode = HASH_MODE_HMAC,
4294 		       },
4295 	 },
4296 	{
4297 	 .type = CRYPTO_ALG_TYPE_AHASH,
4298 	 .alg.hash = {
4299 		      .halg.digestsize = AES_BLOCK_SIZE,
4300 		      .halg.base = {
4301 				    .cra_name = "xcbc(aes)",
4302 				    .cra_driver_name = "xcbc-aes-iproc",
4303 				    .cra_blocksize = AES_BLOCK_SIZE,
4304 				}
4305 		      },
4306 	 .cipher_info = {
4307 			 .alg = CIPHER_ALG_NONE,
4308 			 .mode = CIPHER_MODE_NONE,
4309 			 },
4310 	 .auth_info = {
4311 		       .alg = HASH_ALG_AES,
4312 		       .mode = HASH_MODE_XCBC,
4313 		       },
4314 	 },
4315 	{
4316 	 .type = CRYPTO_ALG_TYPE_AHASH,
4317 	 .alg.hash = {
4318 		      .halg.digestsize = AES_BLOCK_SIZE,
4319 		      .halg.base = {
4320 				    .cra_name = "cmac(aes)",
4321 				    .cra_driver_name = "cmac-aes-iproc",
4322 				    .cra_blocksize = AES_BLOCK_SIZE,
4323 				}
4324 		      },
4325 	 .cipher_info = {
4326 			 .alg = CIPHER_ALG_NONE,
4327 			 .mode = CIPHER_MODE_NONE,
4328 			 },
4329 	 .auth_info = {
4330 		       .alg = HASH_ALG_AES,
4331 		       .mode = HASH_MODE_CMAC,
4332 		       },
4333 	 },
4334 };
4335 
4336 static int generic_cra_init(struct crypto_tfm *tfm,
4337 			    struct iproc_alg_s *cipher_alg)
4338 {
4339 	struct spu_hw *spu = &iproc_priv.spu;
4340 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4341 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4342 
4343 	flow_log("%s()\n", __func__);
4344 
4345 	ctx->alg = cipher_alg;
4346 	ctx->cipher = cipher_alg->cipher_info;
4347 	ctx->auth = cipher_alg->auth_info;
4348 	ctx->auth_first = cipher_alg->auth_first;
4349 	ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4350 						    ctx->cipher.mode,
4351 						    blocksize);
4352 	ctx->fallback_cipher = NULL;
4353 
4354 	ctx->enckeylen = 0;
4355 	ctx->authkeylen = 0;
4356 
4357 	atomic_inc(&iproc_priv.stream_count);
4358 	atomic_inc(&iproc_priv.session_count);
4359 
4360 	return 0;
4361 }
4362 
4363 static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4364 {
4365 	struct crypto_alg *alg = tfm->__crt_alg;
4366 	struct iproc_alg_s *cipher_alg;
4367 
4368 	flow_log("%s()\n", __func__);
4369 
4370 	tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4371 
4372 	cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4373 	return generic_cra_init(tfm, cipher_alg);
4374 }
4375 
4376 static int ahash_cra_init(struct crypto_tfm *tfm)
4377 {
4378 	int err;
4379 	struct crypto_alg *alg = tfm->__crt_alg;
4380 	struct iproc_alg_s *cipher_alg;
4381 
4382 	cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4383 				  alg.hash);
4384 
4385 	err = generic_cra_init(tfm, cipher_alg);
4386 	flow_log("%s()\n", __func__);
4387 
4388 	/*
4389 	 * export state size has to be < 512 bytes. So don't include msg bufs
4390 	 * in state size.
4391 	 */
4392 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4393 				 sizeof(struct iproc_reqctx_s));
4394 
4395 	return err;
4396 }
4397 
4398 static int aead_cra_init(struct crypto_aead *aead)
4399 {
4400 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4401 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4402 	struct crypto_alg *alg = tfm->__crt_alg;
4403 	struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4404 	struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4405 						      alg.aead);
4406 
4407 	int err = generic_cra_init(tfm, cipher_alg);
4408 
4409 	flow_log("%s()\n", __func__);
4410 
4411 	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4412 	ctx->is_esp = false;
4413 	ctx->salt_len = 0;
4414 	ctx->salt_offset = 0;
4415 
4416 	/* random first IV */
4417 	get_random_bytes(ctx->iv, MAX_IV_SIZE);
4418 	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
4419 
4420 	if (!err) {
4421 		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4422 			flow_log("%s() creating fallback cipher\n", __func__);
4423 
4424 			ctx->fallback_cipher =
4425 			    crypto_alloc_aead(alg->cra_name, 0,
4426 					      CRYPTO_ALG_ASYNC |
4427 					      CRYPTO_ALG_NEED_FALLBACK);
4428 			if (IS_ERR(ctx->fallback_cipher)) {
4429 				pr_err("%s() Error: failed to allocate fallback for %s\n",
4430 				       __func__, alg->cra_name);
4431 				return PTR_ERR(ctx->fallback_cipher);
4432 			}
4433 		}
4434 	}
4435 
4436 	return err;
4437 }
4438 
4439 static void generic_cra_exit(struct crypto_tfm *tfm)
4440 {
4441 	atomic_dec(&iproc_priv.session_count);
4442 }
4443 
4444 static void aead_cra_exit(struct crypto_aead *aead)
4445 {
4446 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4447 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4448 
4449 	generic_cra_exit(tfm);
4450 
4451 	if (ctx->fallback_cipher) {
4452 		crypto_free_aead(ctx->fallback_cipher);
4453 		ctx->fallback_cipher = NULL;
4454 	}
4455 }
4456 
4457 /**
4458  * spu_functions_register() - Specify hardware-specific SPU functions based on
4459  * SPU type read from device tree.
4460  * @dev:	device structure
4461  * @spu_type:	SPU hardware generation
4462  * @spu_subtype: SPU hardware version
4463  */
4464 static void spu_functions_register(struct device *dev,
4465 				   enum spu_spu_type spu_type,
4466 				   enum spu_spu_subtype spu_subtype)
4467 {
4468 	struct spu_hw *spu = &iproc_priv.spu;
4469 
4470 	if (spu_type == SPU_TYPE_SPUM) {
4471 		dev_dbg(dev, "Registering SPUM functions");
4472 		spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4473 		spu->spu_payload_length = spum_payload_length;
4474 		spu->spu_response_hdr_len = spum_response_hdr_len;
4475 		spu->spu_hash_pad_len = spum_hash_pad_len;
4476 		spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4477 		spu->spu_assoc_resp_len = spum_assoc_resp_len;
4478 		spu->spu_aead_ivlen = spum_aead_ivlen;
4479 		spu->spu_hash_type = spum_hash_type;
4480 		spu->spu_digest_size = spum_digest_size;
4481 		spu->spu_create_request = spum_create_request;
4482 		spu->spu_cipher_req_init = spum_cipher_req_init;
4483 		spu->spu_cipher_req_finish = spum_cipher_req_finish;
4484 		spu->spu_request_pad = spum_request_pad;
4485 		spu->spu_tx_status_len = spum_tx_status_len;
4486 		spu->spu_rx_status_len = spum_rx_status_len;
4487 		spu->spu_status_process = spum_status_process;
4488 		spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4489 		spu->spu_ccm_update_iv = spum_ccm_update_iv;
4490 		spu->spu_wordalign_padlen = spum_wordalign_padlen;
4491 		if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4492 			spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4493 		else
4494 			spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4495 	} else {
4496 		dev_dbg(dev, "Registering SPU2 functions");
4497 		spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4498 		spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4499 		spu->spu_payload_length = spu2_payload_length;
4500 		spu->spu_response_hdr_len = spu2_response_hdr_len;
4501 		spu->spu_hash_pad_len = spu2_hash_pad_len;
4502 		spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4503 		spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4504 		spu->spu_aead_ivlen = spu2_aead_ivlen;
4505 		spu->spu_hash_type = spu2_hash_type;
4506 		spu->spu_digest_size = spu2_digest_size;
4507 		spu->spu_create_request = spu2_create_request;
4508 		spu->spu_cipher_req_init = spu2_cipher_req_init;
4509 		spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4510 		spu->spu_request_pad = spu2_request_pad;
4511 		spu->spu_tx_status_len = spu2_tx_status_len;
4512 		spu->spu_rx_status_len = spu2_rx_status_len;
4513 		spu->spu_status_process = spu2_status_process;
4514 		spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4515 		spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4516 		spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4517 	}
4518 }
4519 
4520 /**
4521  * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4522  * channel for the SPU being probed.
4523  * @dev:  SPU driver device structure
4524  *
4525  * Return: 0 if successful
4526  *	   < 0 otherwise
4527  */
4528 static int spu_mb_init(struct device *dev)
4529 {
4530 	struct mbox_client *mcl = &iproc_priv.mcl[iproc_priv.spu.num_spu];
4531 	int err;
4532 
4533 	mcl->dev = dev;
4534 	mcl->tx_block = false;
4535 	mcl->tx_tout = 0;
4536 	mcl->knows_txdone = false;
4537 	mcl->rx_callback = spu_rx_callback;
4538 	mcl->tx_done = NULL;
4539 
4540 	iproc_priv.mbox[iproc_priv.spu.num_spu] =
4541 			mbox_request_channel(mcl, 0);
4542 	if (IS_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu])) {
4543 		err = (int)PTR_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu]);
4544 		dev_err(dev,
4545 			"Mbox channel %d request failed with err %d",
4546 			iproc_priv.spu.num_spu, err);
4547 		iproc_priv.mbox[iproc_priv.spu.num_spu] = NULL;
4548 		return err;
4549 	}
4550 
4551 	return 0;
4552 }
4553 
4554 static void spu_mb_release(struct platform_device *pdev)
4555 {
4556 	int i;
4557 
4558 	for (i = 0; i < iproc_priv.spu.num_spu; i++)
4559 		mbox_free_channel(iproc_priv.mbox[i]);
4560 }
4561 
4562 static void spu_counters_init(void)
4563 {
4564 	int i;
4565 	int j;
4566 
4567 	atomic_set(&iproc_priv.session_count, 0);
4568 	atomic_set(&iproc_priv.stream_count, 0);
4569 	atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_spu);
4570 	atomic64_set(&iproc_priv.bytes_in, 0);
4571 	atomic64_set(&iproc_priv.bytes_out, 0);
4572 	for (i = 0; i < SPU_OP_NUM; i++) {
4573 		atomic_set(&iproc_priv.op_counts[i], 0);
4574 		atomic_set(&iproc_priv.setkey_cnt[i], 0);
4575 	}
4576 	for (i = 0; i < CIPHER_ALG_LAST; i++)
4577 		for (j = 0; j < CIPHER_MODE_LAST; j++)
4578 			atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4579 
4580 	for (i = 0; i < HASH_ALG_LAST; i++) {
4581 		atomic_set(&iproc_priv.hash_cnt[i], 0);
4582 		atomic_set(&iproc_priv.hmac_cnt[i], 0);
4583 	}
4584 	for (i = 0; i < AEAD_TYPE_LAST; i++)
4585 		atomic_set(&iproc_priv.aead_cnt[i], 0);
4586 
4587 	atomic_set(&iproc_priv.mb_no_spc, 0);
4588 	atomic_set(&iproc_priv.mb_send_fail, 0);
4589 	atomic_set(&iproc_priv.bad_icv, 0);
4590 }
4591 
4592 static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4593 {
4594 	struct spu_hw *spu = &iproc_priv.spu;
4595 	struct crypto_alg *crypto = &driver_alg->alg.crypto;
4596 	int err;
4597 
4598 	/* SPU2 does not support RC4 */
4599 	if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4600 	    (spu->spu_type == SPU_TYPE_SPU2))
4601 		return 0;
4602 
4603 	crypto->cra_module = THIS_MODULE;
4604 	crypto->cra_priority = cipher_pri;
4605 	crypto->cra_alignmask = 0;
4606 	crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4607 	INIT_LIST_HEAD(&crypto->cra_list);
4608 
4609 	crypto->cra_init = ablkcipher_cra_init;
4610 	crypto->cra_exit = generic_cra_exit;
4611 	crypto->cra_type = &crypto_ablkcipher_type;
4612 	crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4613 				CRYPTO_ALG_KERN_DRIVER_ONLY;
4614 
4615 	crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4616 	crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4617 	crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4618 
4619 	err = crypto_register_alg(crypto);
4620 	/* Mark alg as having been registered, if successful */
4621 	if (err == 0)
4622 		driver_alg->registered = true;
4623 	pr_debug("  registered ablkcipher %s\n", crypto->cra_driver_name);
4624 	return err;
4625 }
4626 
4627 static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4628 {
4629 	struct spu_hw *spu = &iproc_priv.spu;
4630 	struct ahash_alg *hash = &driver_alg->alg.hash;
4631 	int err;
4632 
4633 	/* AES-XCBC is the only AES hash type currently supported on SPU-M */
4634 	if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4635 	    (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4636 	    (spu->spu_type == SPU_TYPE_SPUM))
4637 		return 0;
4638 
4639 	/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4640 	if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4641 	    (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4642 		return 0;
4643 
4644 	hash->halg.base.cra_module = THIS_MODULE;
4645 	hash->halg.base.cra_priority = hash_pri;
4646 	hash->halg.base.cra_alignmask = 0;
4647 	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4648 	hash->halg.base.cra_init = ahash_cra_init;
4649 	hash->halg.base.cra_exit = generic_cra_exit;
4650 	hash->halg.base.cra_type = &crypto_ahash_type;
4651 	hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
4652 	hash->halg.statesize = sizeof(struct spu_hash_export_s);
4653 
4654 	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4655 		hash->setkey = ahash_setkey;
4656 		hash->init = ahash_init;
4657 		hash->update = ahash_update;
4658 		hash->final = ahash_final;
4659 		hash->finup = ahash_finup;
4660 		hash->digest = ahash_digest;
4661 	} else {
4662 		hash->setkey = ahash_hmac_setkey;
4663 		hash->init = ahash_hmac_init;
4664 		hash->update = ahash_hmac_update;
4665 		hash->final = ahash_hmac_final;
4666 		hash->finup = ahash_hmac_finup;
4667 		hash->digest = ahash_hmac_digest;
4668 	}
4669 	hash->export = ahash_export;
4670 	hash->import = ahash_import;
4671 
4672 	err = crypto_register_ahash(hash);
4673 	/* Mark alg as having been registered, if successful */
4674 	if (err == 0)
4675 		driver_alg->registered = true;
4676 	pr_debug("  registered ahash %s\n",
4677 		 hash->halg.base.cra_driver_name);
4678 	return err;
4679 }
4680 
4681 static int spu_register_aead(struct iproc_alg_s *driver_alg)
4682 {
4683 	struct aead_alg *aead = &driver_alg->alg.aead;
4684 	int err;
4685 
4686 	aead->base.cra_module = THIS_MODULE;
4687 	aead->base.cra_priority = aead_pri;
4688 	aead->base.cra_alignmask = 0;
4689 	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4690 	INIT_LIST_HEAD(&aead->base.cra_list);
4691 
4692 	aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
4693 	/* setkey set in alg initialization */
4694 	aead->setauthsize = aead_setauthsize;
4695 	aead->encrypt = aead_encrypt;
4696 	aead->decrypt = aead_decrypt;
4697 	aead->init = aead_cra_init;
4698 	aead->exit = aead_cra_exit;
4699 
4700 	err = crypto_register_aead(aead);
4701 	/* Mark alg as having been registered, if successful */
4702 	if (err == 0)
4703 		driver_alg->registered = true;
4704 	pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
4705 	return err;
4706 }
4707 
4708 /* register crypto algorithms the device supports */
4709 static int spu_algs_register(struct device *dev)
4710 {
4711 	int i, j;
4712 	int err;
4713 
4714 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4715 		switch (driver_algs[i].type) {
4716 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4717 			err = spu_register_ablkcipher(&driver_algs[i]);
4718 			break;
4719 		case CRYPTO_ALG_TYPE_AHASH:
4720 			err = spu_register_ahash(&driver_algs[i]);
4721 			break;
4722 		case CRYPTO_ALG_TYPE_AEAD:
4723 			err = spu_register_aead(&driver_algs[i]);
4724 			break;
4725 		default:
4726 			dev_err(dev,
4727 				"iproc-crypto: unknown alg type: %d",
4728 				driver_algs[i].type);
4729 			err = -EINVAL;
4730 		}
4731 
4732 		if (err) {
4733 			dev_err(dev, "alg registration failed with error %d\n",
4734 				err);
4735 			goto err_algs;
4736 		}
4737 	}
4738 
4739 	return 0;
4740 
4741 err_algs:
4742 	for (j = 0; j < i; j++) {
4743 		/* Skip any algorithm not registered */
4744 		if (!driver_algs[j].registered)
4745 			continue;
4746 		switch (driver_algs[j].type) {
4747 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4748 			crypto_unregister_alg(&driver_algs[j].alg.crypto);
4749 			driver_algs[j].registered = false;
4750 			break;
4751 		case CRYPTO_ALG_TYPE_AHASH:
4752 			crypto_unregister_ahash(&driver_algs[j].alg.hash);
4753 			driver_algs[j].registered = false;
4754 			break;
4755 		case CRYPTO_ALG_TYPE_AEAD:
4756 			crypto_unregister_aead(&driver_algs[j].alg.aead);
4757 			driver_algs[j].registered = false;
4758 			break;
4759 		}
4760 	}
4761 	return err;
4762 }
4763 
4764 /* ==================== Kernel Platform API ==================== */
4765 
4766 static struct spu_type_subtype spum_ns2_types = {
4767 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4768 };
4769 
4770 static struct spu_type_subtype spum_nsp_types = {
4771 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4772 };
4773 
4774 static struct spu_type_subtype spu2_types = {
4775 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4776 };
4777 
4778 static struct spu_type_subtype spu2_v2_types = {
4779 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4780 };
4781 
4782 static const struct of_device_id bcm_spu_dt_ids[] = {
4783 	{
4784 		.compatible = "brcm,spum-crypto",
4785 		.data = &spum_ns2_types,
4786 	},
4787 	{
4788 		.compatible = "brcm,spum-nsp-crypto",
4789 		.data = &spum_nsp_types,
4790 	},
4791 	{
4792 		.compatible = "brcm,spu2-crypto",
4793 		.data = &spu2_types,
4794 	},
4795 	{
4796 		.compatible = "brcm,spu2-v2-crypto",
4797 		.data = &spu2_v2_types,
4798 	},
4799 	{ /* sentinel */ }
4800 };
4801 
4802 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4803 
4804 static int spu_dt_read(struct platform_device *pdev)
4805 {
4806 	struct device *dev = &pdev->dev;
4807 	struct spu_hw *spu = &iproc_priv.spu;
4808 	struct resource *spu_ctrl_regs;
4809 	const struct of_device_id *match;
4810 	const struct spu_type_subtype *matched_spu_type;
4811 	void __iomem *spu_reg_vbase[MAX_SPUS];
4812 	int err;
4813 
4814 	match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
4815 	matched_spu_type = match->data;
4816 
4817 	if (iproc_priv.spu.num_spu > 1) {
4818 		/* If this is 2nd or later SPU, make sure it's same type */
4819 		if ((spu->spu_type != matched_spu_type->type) ||
4820 		    (spu->spu_subtype != matched_spu_type->subtype)) {
4821 			err = -EINVAL;
4822 			dev_err(&pdev->dev, "Multiple SPU types not allowed");
4823 			return err;
4824 		}
4825 	} else {
4826 		/* Record type of first SPU */
4827 		spu->spu_type = matched_spu_type->type;
4828 		spu->spu_subtype = matched_spu_type->subtype;
4829 	}
4830 
4831 	/* Get and map SPU registers */
4832 	spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4833 	if (!spu_ctrl_regs) {
4834 		err = -EINVAL;
4835 		dev_err(&pdev->dev, "Invalid/missing registers for SPU\n");
4836 		return err;
4837 	}
4838 
4839 	spu_reg_vbase[iproc_priv.spu.num_spu] =
4840 				devm_ioremap_resource(dev, spu_ctrl_regs);
4841 	if (IS_ERR(spu_reg_vbase[iproc_priv.spu.num_spu])) {
4842 		err = PTR_ERR(spu_reg_vbase[iproc_priv.spu.num_spu]);
4843 		dev_err(&pdev->dev, "Failed to map registers: %d\n",
4844 			err);
4845 		spu_reg_vbase[iproc_priv.spu.num_spu] = NULL;
4846 		return err;
4847 	}
4848 
4849 	dev_dbg(dev, "SPU %d detected.", iproc_priv.spu.num_spu);
4850 
4851 	spu->reg_vbase[iproc_priv.spu.num_spu] = spu_reg_vbase;
4852 
4853 	return 0;
4854 }
4855 
4856 int bcm_spu_probe(struct platform_device *pdev)
4857 {
4858 	struct device *dev = &pdev->dev;
4859 	struct spu_hw *spu = &iproc_priv.spu;
4860 	int err = 0;
4861 
4862 	iproc_priv.pdev[iproc_priv.spu.num_spu] = pdev;
4863 	platform_set_drvdata(iproc_priv.pdev[iproc_priv.spu.num_spu],
4864 			     &iproc_priv);
4865 
4866 	err = spu_dt_read(pdev);
4867 	if (err < 0)
4868 		goto failure;
4869 
4870 	err = spu_mb_init(&pdev->dev);
4871 	if (err < 0)
4872 		goto failure;
4873 
4874 	iproc_priv.spu.num_spu++;
4875 
4876 	/* If already initialized, we've just added another SPU and are done */
4877 	if (iproc_priv.inited)
4878 		return 0;
4879 
4880 	if (spu->spu_type == SPU_TYPE_SPUM)
4881 		iproc_priv.bcm_hdr_len = 8;
4882 	else if (spu->spu_type == SPU_TYPE_SPU2)
4883 		iproc_priv.bcm_hdr_len = 0;
4884 
4885 	spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4886 
4887 	spu_counters_init();
4888 
4889 	spu_setup_debugfs();
4890 
4891 	err = spu_algs_register(dev);
4892 	if (err < 0)
4893 		goto fail_reg;
4894 
4895 	iproc_priv.inited = true;
4896 
4897 	return 0;
4898 
4899 fail_reg:
4900 	spu_free_debugfs();
4901 failure:
4902 	spu_mb_release(pdev);
4903 	dev_err(dev, "%s failed with error %d.\n", __func__, err);
4904 
4905 	return err;
4906 }
4907 
4908 int bcm_spu_remove(struct platform_device *pdev)
4909 {
4910 	int i;
4911 	struct device *dev = &pdev->dev;
4912 	char *cdn;
4913 
4914 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4915 		/*
4916 		 * Not all algorithms were registered, depending on whether
4917 		 * hardware is SPU or SPU2.  So here we make sure to skip
4918 		 * those algorithms that were not previously registered.
4919 		 */
4920 		if (!driver_algs[i].registered)
4921 			continue;
4922 
4923 		switch (driver_algs[i].type) {
4924 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4925 			crypto_unregister_alg(&driver_algs[i].alg.crypto);
4926 			dev_dbg(dev, "  unregistered cipher %s\n",
4927 				driver_algs[i].alg.crypto.cra_driver_name);
4928 			driver_algs[i].registered = false;
4929 			break;
4930 		case CRYPTO_ALG_TYPE_AHASH:
4931 			crypto_unregister_ahash(&driver_algs[i].alg.hash);
4932 			cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4933 			dev_dbg(dev, "  unregistered hash %s\n", cdn);
4934 			driver_algs[i].registered = false;
4935 			break;
4936 		case CRYPTO_ALG_TYPE_AEAD:
4937 			crypto_unregister_aead(&driver_algs[i].alg.aead);
4938 			dev_dbg(dev, "  unregistered aead %s\n",
4939 				driver_algs[i].alg.aead.base.cra_driver_name);
4940 			driver_algs[i].registered = false;
4941 			break;
4942 		}
4943 	}
4944 	spu_free_debugfs();
4945 	spu_mb_release(pdev);
4946 	return 0;
4947 }
4948 
4949 /* ===== Kernel Module API ===== */
4950 
4951 static struct platform_driver bcm_spu_pdriver = {
4952 	.driver = {
4953 		   .name = "brcm-spu-crypto",
4954 		   .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4955 		   },
4956 	.probe = bcm_spu_probe,
4957 	.remove = bcm_spu_remove,
4958 };
4959 module_platform_driver(bcm_spu_pdriver);
4960 
4961 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4962 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4963 MODULE_LICENSE("GPL v2");
4964