xref: /openbmc/linux/drivers/crypto/talitos.c (revision 9f380456)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_platform.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/io.h>
38 #include <linux/spinlock.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/slab.h>
41 
42 #include <crypto/algapi.h>
43 #include <crypto/aes.h>
44 #include <crypto/des.h>
45 #include <crypto/sha.h>
46 #include <crypto/md5.h>
47 #include <crypto/aead.h>
48 #include <crypto/authenc.h>
49 #include <crypto/skcipher.h>
50 #include <crypto/hash.h>
51 #include <crypto/internal/hash.h>
52 #include <crypto/scatterwalk.h>
53 
54 #include "talitos.h"
55 
56 #define TALITOS_TIMEOUT 100000
57 #define TALITOS_MAX_DATA_LEN 65535
58 
59 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
60 #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
61 #define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
62 
63 /* descriptor pointer entry */
64 struct talitos_ptr {
65 	__be16 len;	/* length */
66 	u8 j_extent;	/* jump to sg link table and/or extent */
67 	u8 eptr;	/* extended address */
68 	__be32 ptr;	/* address */
69 };
70 
71 static const struct talitos_ptr zero_entry = {
72 	.len = 0,
73 	.j_extent = 0,
74 	.eptr = 0,
75 	.ptr = 0
76 };
77 
78 /* descriptor */
79 struct talitos_desc {
80 	__be32 hdr;			/* header high bits */
81 	__be32 hdr_lo;			/* header low bits */
82 	struct talitos_ptr ptr[7];	/* ptr/len pair array */
83 };
84 
85 /**
86  * talitos_request - descriptor submission request
87  * @desc: descriptor pointer (kernel virtual)
88  * @dma_desc: descriptor's physical bus address
89  * @callback: whom to call when descriptor processing is done
90  * @context: caller context (optional)
91  */
92 struct talitos_request {
93 	struct talitos_desc *desc;
94 	dma_addr_t dma_desc;
95 	void (*callback) (struct device *dev, struct talitos_desc *desc,
96 	                  void *context, int error);
97 	void *context;
98 };
99 
100 /* per-channel fifo management */
101 struct talitos_channel {
102 	void __iomem *reg;
103 
104 	/* request fifo */
105 	struct talitos_request *fifo;
106 
107 	/* number of requests pending in channel h/w fifo */
108 	atomic_t submit_count ____cacheline_aligned;
109 
110 	/* request submission (head) lock */
111 	spinlock_t head_lock ____cacheline_aligned;
112 	/* index to next free descriptor request */
113 	int head;
114 
115 	/* request release (tail) lock */
116 	spinlock_t tail_lock ____cacheline_aligned;
117 	/* index to next in-progress/done descriptor request */
118 	int tail;
119 };
120 
121 struct talitos_private {
122 	struct device *dev;
123 	struct platform_device *ofdev;
124 	void __iomem *reg;
125 	int irq[2];
126 
127 	/* SEC global registers lock  */
128 	spinlock_t reg_lock ____cacheline_aligned;
129 
130 	/* SEC version geometry (from device tree node) */
131 	unsigned int num_channels;
132 	unsigned int chfifo_len;
133 	unsigned int exec_units;
134 	unsigned int desc_types;
135 
136 	/* SEC Compatibility info */
137 	unsigned long features;
138 
139 	/*
140 	 * length of the request fifo
141 	 * fifo_len is chfifo_len rounded up to next power of 2
142 	 * so we can use bitwise ops to wrap
143 	 */
144 	unsigned int fifo_len;
145 
146 	struct talitos_channel *chan;
147 
148 	/* next channel to be assigned next incoming descriptor */
149 	atomic_t last_chan ____cacheline_aligned;
150 
151 	/* request callback tasklet */
152 	struct tasklet_struct done_task[2];
153 
154 	/* list of registered algorithms */
155 	struct list_head alg_list;
156 
157 	/* hwrng device */
158 	struct hwrng rng;
159 };
160 
161 /* .features flag */
162 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
163 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
164 #define TALITOS_FTR_SHA224_HWINIT 0x00000004
165 #define TALITOS_FTR_HMAC_OK 0x00000008
166 
167 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
168 {
169 	talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
170 	talitos_ptr->eptr = upper_32_bits(dma_addr);
171 }
172 
173 /*
174  * map virtual single (contiguous) pointer to h/w descriptor pointer
175  */
176 static void map_single_talitos_ptr(struct device *dev,
177 				   struct talitos_ptr *talitos_ptr,
178 				   unsigned short len, void *data,
179 				   unsigned char extent,
180 				   enum dma_data_direction dir)
181 {
182 	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
183 
184 	talitos_ptr->len = cpu_to_be16(len);
185 	to_talitos_ptr(talitos_ptr, dma_addr);
186 	talitos_ptr->j_extent = extent;
187 }
188 
189 /*
190  * unmap bus single (contiguous) h/w descriptor pointer
191  */
192 static void unmap_single_talitos_ptr(struct device *dev,
193 				     struct talitos_ptr *talitos_ptr,
194 				     enum dma_data_direction dir)
195 {
196 	dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
197 			 be16_to_cpu(talitos_ptr->len), dir);
198 }
199 
200 static int reset_channel(struct device *dev, int ch)
201 {
202 	struct talitos_private *priv = dev_get_drvdata(dev);
203 	unsigned int timeout = TALITOS_TIMEOUT;
204 
205 	setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
206 
207 	while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
208 	       && --timeout)
209 		cpu_relax();
210 
211 	if (timeout == 0) {
212 		dev_err(dev, "failed to reset channel %d\n", ch);
213 		return -EIO;
214 	}
215 
216 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
217 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
218 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
219 
220 	/* and ICCR writeback, if available */
221 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
222 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
223 		          TALITOS_CCCR_LO_IWSE);
224 
225 	return 0;
226 }
227 
228 static int reset_device(struct device *dev)
229 {
230 	struct talitos_private *priv = dev_get_drvdata(dev);
231 	unsigned int timeout = TALITOS_TIMEOUT;
232 	u32 mcr = TALITOS_MCR_SWR;
233 
234 	setbits32(priv->reg + TALITOS_MCR, mcr);
235 
236 	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
237 	       && --timeout)
238 		cpu_relax();
239 
240 	if (priv->irq[1]) {
241 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
242 		setbits32(priv->reg + TALITOS_MCR, mcr);
243 	}
244 
245 	if (timeout == 0) {
246 		dev_err(dev, "failed to reset device\n");
247 		return -EIO;
248 	}
249 
250 	return 0;
251 }
252 
253 /*
254  * Reset and initialize the device
255  */
256 static int init_device(struct device *dev)
257 {
258 	struct talitos_private *priv = dev_get_drvdata(dev);
259 	int ch, err;
260 
261 	/*
262 	 * Master reset
263 	 * errata documentation: warning: certain SEC interrupts
264 	 * are not fully cleared by writing the MCR:SWR bit,
265 	 * set bit twice to completely reset
266 	 */
267 	err = reset_device(dev);
268 	if (err)
269 		return err;
270 
271 	err = reset_device(dev);
272 	if (err)
273 		return err;
274 
275 	/* reset channels */
276 	for (ch = 0; ch < priv->num_channels; ch++) {
277 		err = reset_channel(dev, ch);
278 		if (err)
279 			return err;
280 	}
281 
282 	/* enable channel done and error interrupts */
283 	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
284 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
285 
286 	/* disable integrity check error interrupts (use writeback instead) */
287 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
288 		setbits32(priv->reg + TALITOS_MDEUICR_LO,
289 		          TALITOS_MDEUICR_LO_ICE);
290 
291 	return 0;
292 }
293 
294 /**
295  * talitos_submit - submits a descriptor to the device for processing
296  * @dev:	the SEC device to be used
297  * @ch:		the SEC device channel to be used
298  * @desc:	the descriptor to be processed by the device
299  * @callback:	whom to call when processing is complete
300  * @context:	a handle for use by caller (optional)
301  *
302  * desc must contain valid dma-mapped (bus physical) address pointers.
303  * callback must check err and feedback in descriptor header
304  * for device processing status.
305  */
306 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
307 			  void (*callback)(struct device *dev,
308 					   struct talitos_desc *desc,
309 					   void *context, int error),
310 			  void *context)
311 {
312 	struct talitos_private *priv = dev_get_drvdata(dev);
313 	struct talitos_request *request;
314 	unsigned long flags;
315 	int head;
316 
317 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
318 
319 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
320 		/* h/w fifo is full */
321 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
322 		return -EAGAIN;
323 	}
324 
325 	head = priv->chan[ch].head;
326 	request = &priv->chan[ch].fifo[head];
327 
328 	/* map descriptor and save caller data */
329 	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
330 					   DMA_BIDIRECTIONAL);
331 	request->callback = callback;
332 	request->context = context;
333 
334 	/* increment fifo head */
335 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
336 
337 	smp_wmb();
338 	request->desc = desc;
339 
340 	/* GO! */
341 	wmb();
342 	out_be32(priv->chan[ch].reg + TALITOS_FF,
343 		 upper_32_bits(request->dma_desc));
344 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
345 		 lower_32_bits(request->dma_desc));
346 
347 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
348 
349 	return -EINPROGRESS;
350 }
351 
352 /*
353  * process what was done, notify callback of error if not
354  */
355 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
356 {
357 	struct talitos_private *priv = dev_get_drvdata(dev);
358 	struct talitos_request *request, saved_req;
359 	unsigned long flags;
360 	int tail, status;
361 
362 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 
364 	tail = priv->chan[ch].tail;
365 	while (priv->chan[ch].fifo[tail].desc) {
366 		request = &priv->chan[ch].fifo[tail];
367 
368 		/* descriptors with their done bits set don't get the error */
369 		rmb();
370 		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
371 			status = 0;
372 		else
373 			if (!error)
374 				break;
375 			else
376 				status = error;
377 
378 		dma_unmap_single(dev, request->dma_desc,
379 				 sizeof(struct talitos_desc),
380 				 DMA_BIDIRECTIONAL);
381 
382 		/* copy entries so we can call callback outside lock */
383 		saved_req.desc = request->desc;
384 		saved_req.callback = request->callback;
385 		saved_req.context = request->context;
386 
387 		/* release request entry in fifo */
388 		smp_wmb();
389 		request->desc = NULL;
390 
391 		/* increment fifo tail */
392 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
393 
394 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
395 
396 		atomic_dec(&priv->chan[ch].submit_count);
397 
398 		saved_req.callback(dev, saved_req.desc, saved_req.context,
399 				   status);
400 		/* channel may resume processing in single desc error case */
401 		if (error && !reset_ch && status == error)
402 			return;
403 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
404 		tail = priv->chan[ch].tail;
405 	}
406 
407 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
408 }
409 
410 /*
411  * process completed requests for channels that have done status
412  */
413 #define DEF_TALITOS_DONE(name, ch_done_mask)				\
414 static void talitos_done_##name(unsigned long data)			\
415 {									\
416 	struct device *dev = (struct device *)data;			\
417 	struct talitos_private *priv = dev_get_drvdata(dev);		\
418 	unsigned long flags;						\
419 									\
420 	if (ch_done_mask & 1)						\
421 		flush_channel(dev, 0, 0, 0);				\
422 	if (priv->num_channels == 1)					\
423 		goto out;						\
424 	if (ch_done_mask & (1 << 2))					\
425 		flush_channel(dev, 1, 0, 0);				\
426 	if (ch_done_mask & (1 << 4))					\
427 		flush_channel(dev, 2, 0, 0);				\
428 	if (ch_done_mask & (1 << 6))					\
429 		flush_channel(dev, 3, 0, 0);				\
430 									\
431 out:									\
432 	/* At this point, all completed channels have been processed */	\
433 	/* Unmask done interrupts for channels completed later on. */	\
434 	spin_lock_irqsave(&priv->reg_lock, flags);			\
435 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
436 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
437 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
438 }
439 DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
440 DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
441 DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
442 
443 /*
444  * locate current (offending) descriptor
445  */
446 static u32 current_desc_hdr(struct device *dev, int ch)
447 {
448 	struct talitos_private *priv = dev_get_drvdata(dev);
449 	int tail = priv->chan[ch].tail;
450 	dma_addr_t cur_desc;
451 
452 	cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
453 
454 	while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
455 		tail = (tail + 1) & (priv->fifo_len - 1);
456 		if (tail == priv->chan[ch].tail) {
457 			dev_err(dev, "couldn't locate current descriptor\n");
458 			return 0;
459 		}
460 	}
461 
462 	return priv->chan[ch].fifo[tail].desc->hdr;
463 }
464 
465 /*
466  * user diagnostics; report root cause of error based on execution unit status
467  */
468 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
469 {
470 	struct talitos_private *priv = dev_get_drvdata(dev);
471 	int i;
472 
473 	if (!desc_hdr)
474 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
475 
476 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
477 	case DESC_HDR_SEL0_AFEU:
478 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
479 			in_be32(priv->reg + TALITOS_AFEUISR),
480 			in_be32(priv->reg + TALITOS_AFEUISR_LO));
481 		break;
482 	case DESC_HDR_SEL0_DEU:
483 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
484 			in_be32(priv->reg + TALITOS_DEUISR),
485 			in_be32(priv->reg + TALITOS_DEUISR_LO));
486 		break;
487 	case DESC_HDR_SEL0_MDEUA:
488 	case DESC_HDR_SEL0_MDEUB:
489 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
490 			in_be32(priv->reg + TALITOS_MDEUISR),
491 			in_be32(priv->reg + TALITOS_MDEUISR_LO));
492 		break;
493 	case DESC_HDR_SEL0_RNG:
494 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
495 			in_be32(priv->reg + TALITOS_RNGUISR),
496 			in_be32(priv->reg + TALITOS_RNGUISR_LO));
497 		break;
498 	case DESC_HDR_SEL0_PKEU:
499 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
500 			in_be32(priv->reg + TALITOS_PKEUISR),
501 			in_be32(priv->reg + TALITOS_PKEUISR_LO));
502 		break;
503 	case DESC_HDR_SEL0_AESU:
504 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
505 			in_be32(priv->reg + TALITOS_AESUISR),
506 			in_be32(priv->reg + TALITOS_AESUISR_LO));
507 		break;
508 	case DESC_HDR_SEL0_CRCU:
509 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
510 			in_be32(priv->reg + TALITOS_CRCUISR),
511 			in_be32(priv->reg + TALITOS_CRCUISR_LO));
512 		break;
513 	case DESC_HDR_SEL0_KEU:
514 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
515 			in_be32(priv->reg + TALITOS_KEUISR),
516 			in_be32(priv->reg + TALITOS_KEUISR_LO));
517 		break;
518 	}
519 
520 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
521 	case DESC_HDR_SEL1_MDEUA:
522 	case DESC_HDR_SEL1_MDEUB:
523 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
524 			in_be32(priv->reg + TALITOS_MDEUISR),
525 			in_be32(priv->reg + TALITOS_MDEUISR_LO));
526 		break;
527 	case DESC_HDR_SEL1_CRCU:
528 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
529 			in_be32(priv->reg + TALITOS_CRCUISR),
530 			in_be32(priv->reg + TALITOS_CRCUISR_LO));
531 		break;
532 	}
533 
534 	for (i = 0; i < 8; i++)
535 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
536 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
537 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
538 }
539 
540 /*
541  * recover from error interrupts
542  */
543 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
544 {
545 	struct talitos_private *priv = dev_get_drvdata(dev);
546 	unsigned int timeout = TALITOS_TIMEOUT;
547 	int ch, error, reset_dev = 0, reset_ch = 0;
548 	u32 v, v_lo;
549 
550 	for (ch = 0; ch < priv->num_channels; ch++) {
551 		/* skip channels without errors */
552 		if (!(isr & (1 << (ch * 2 + 1))))
553 			continue;
554 
555 		error = -EINVAL;
556 
557 		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
558 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
559 
560 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
561 			dev_err(dev, "double fetch fifo overflow error\n");
562 			error = -EAGAIN;
563 			reset_ch = 1;
564 		}
565 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
566 			/* h/w dropped descriptor */
567 			dev_err(dev, "single fetch fifo overflow error\n");
568 			error = -EAGAIN;
569 		}
570 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
571 			dev_err(dev, "master data transfer error\n");
572 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
573 			dev_err(dev, "s/g data length zero error\n");
574 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
575 			dev_err(dev, "fetch pointer zero error\n");
576 		if (v_lo & TALITOS_CCPSR_LO_IDH)
577 			dev_err(dev, "illegal descriptor header error\n");
578 		if (v_lo & TALITOS_CCPSR_LO_IEU)
579 			dev_err(dev, "invalid execution unit error\n");
580 		if (v_lo & TALITOS_CCPSR_LO_EU)
581 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
582 		if (v_lo & TALITOS_CCPSR_LO_GB)
583 			dev_err(dev, "gather boundary error\n");
584 		if (v_lo & TALITOS_CCPSR_LO_GRL)
585 			dev_err(dev, "gather return/length error\n");
586 		if (v_lo & TALITOS_CCPSR_LO_SB)
587 			dev_err(dev, "scatter boundary error\n");
588 		if (v_lo & TALITOS_CCPSR_LO_SRL)
589 			dev_err(dev, "scatter return/length error\n");
590 
591 		flush_channel(dev, ch, error, reset_ch);
592 
593 		if (reset_ch) {
594 			reset_channel(dev, ch);
595 		} else {
596 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
597 				  TALITOS_CCCR_CONT);
598 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
599 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
600 			       TALITOS_CCCR_CONT) && --timeout)
601 				cpu_relax();
602 			if (timeout == 0) {
603 				dev_err(dev, "failed to restart channel %d\n",
604 					ch);
605 				reset_dev = 1;
606 			}
607 		}
608 	}
609 	if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
610 		dev_err(dev, "done overflow, internal time out, or rngu error: "
611 		        "ISR 0x%08x_%08x\n", isr, isr_lo);
612 
613 		/* purge request queues */
614 		for (ch = 0; ch < priv->num_channels; ch++)
615 			flush_channel(dev, ch, -EIO, 1);
616 
617 		/* reset and reinitialize the device */
618 		init_device(dev);
619 	}
620 }
621 
622 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
623 static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
624 {									       \
625 	struct device *dev = data;					       \
626 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
627 	u32 isr, isr_lo;						       \
628 	unsigned long flags;						       \
629 									       \
630 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
631 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
632 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
633 	/* Acknowledge interrupt */					       \
634 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
635 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
636 									       \
637 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
638 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
639 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
640 	}								       \
641 	else {								       \
642 		if (likely(isr & ch_done_mask)) {			       \
643 			/* mask further done interrupts. */		       \
644 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
645 			/* done_task will unmask done interrupts at exit */    \
646 			tasklet_schedule(&priv->done_task[tlet]);	       \
647 		}							       \
648 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
649 	}								       \
650 									       \
651 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
652 								IRQ_NONE;      \
653 }
654 DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
655 DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
656 DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
657 
658 /*
659  * hwrng
660  */
661 static int talitos_rng_data_present(struct hwrng *rng, int wait)
662 {
663 	struct device *dev = (struct device *)rng->priv;
664 	struct talitos_private *priv = dev_get_drvdata(dev);
665 	u32 ofl;
666 	int i;
667 
668 	for (i = 0; i < 20; i++) {
669 		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
670 		      TALITOS_RNGUSR_LO_OFL;
671 		if (ofl || !wait)
672 			break;
673 		udelay(10);
674 	}
675 
676 	return !!ofl;
677 }
678 
679 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
680 {
681 	struct device *dev = (struct device *)rng->priv;
682 	struct talitos_private *priv = dev_get_drvdata(dev);
683 
684 	/* rng fifo requires 64-bit accesses */
685 	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
686 	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
687 
688 	return sizeof(u32);
689 }
690 
691 static int talitos_rng_init(struct hwrng *rng)
692 {
693 	struct device *dev = (struct device *)rng->priv;
694 	struct talitos_private *priv = dev_get_drvdata(dev);
695 	unsigned int timeout = TALITOS_TIMEOUT;
696 
697 	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
698 	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
699 	       && --timeout)
700 		cpu_relax();
701 	if (timeout == 0) {
702 		dev_err(dev, "failed to reset rng hw\n");
703 		return -ENODEV;
704 	}
705 
706 	/* start generating */
707 	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
708 
709 	return 0;
710 }
711 
712 static int talitos_register_rng(struct device *dev)
713 {
714 	struct talitos_private *priv = dev_get_drvdata(dev);
715 
716 	priv->rng.name		= dev_driver_string(dev),
717 	priv->rng.init		= talitos_rng_init,
718 	priv->rng.data_present	= talitos_rng_data_present,
719 	priv->rng.data_read	= talitos_rng_data_read,
720 	priv->rng.priv		= (unsigned long)dev;
721 
722 	return hwrng_register(&priv->rng);
723 }
724 
725 static void talitos_unregister_rng(struct device *dev)
726 {
727 	struct talitos_private *priv = dev_get_drvdata(dev);
728 
729 	hwrng_unregister(&priv->rng);
730 }
731 
732 /*
733  * crypto alg
734  */
735 #define TALITOS_CRA_PRIORITY		3000
736 #define TALITOS_MAX_KEY_SIZE		64
737 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
738 
739 #define MD5_BLOCK_SIZE    64
740 
741 struct talitos_ctx {
742 	struct device *dev;
743 	int ch;
744 	__be32 desc_hdr_template;
745 	u8 key[TALITOS_MAX_KEY_SIZE];
746 	u8 iv[TALITOS_MAX_IV_LENGTH];
747 	unsigned int keylen;
748 	unsigned int enckeylen;
749 	unsigned int authkeylen;
750 	unsigned int authsize;
751 };
752 
753 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
754 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
755 
756 struct talitos_ahash_req_ctx {
757 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
758 	unsigned int hw_context_size;
759 	u8 buf[HASH_MAX_BLOCK_SIZE];
760 	u8 bufnext[HASH_MAX_BLOCK_SIZE];
761 	unsigned int swinit;
762 	unsigned int first;
763 	unsigned int last;
764 	unsigned int to_hash_later;
765 	u64 nbuf;
766 	struct scatterlist bufsl[2];
767 	struct scatterlist *psrc;
768 };
769 
770 static int aead_setauthsize(struct crypto_aead *authenc,
771 			    unsigned int authsize)
772 {
773 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
774 
775 	ctx->authsize = authsize;
776 
777 	return 0;
778 }
779 
780 static int aead_setkey(struct crypto_aead *authenc,
781 		       const u8 *key, unsigned int keylen)
782 {
783 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
784 	struct rtattr *rta = (void *)key;
785 	struct crypto_authenc_key_param *param;
786 	unsigned int authkeylen;
787 	unsigned int enckeylen;
788 
789 	if (!RTA_OK(rta, keylen))
790 		goto badkey;
791 
792 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
793 		goto badkey;
794 
795 	if (RTA_PAYLOAD(rta) < sizeof(*param))
796 		goto badkey;
797 
798 	param = RTA_DATA(rta);
799 	enckeylen = be32_to_cpu(param->enckeylen);
800 
801 	key += RTA_ALIGN(rta->rta_len);
802 	keylen -= RTA_ALIGN(rta->rta_len);
803 
804 	if (keylen < enckeylen)
805 		goto badkey;
806 
807 	authkeylen = keylen - enckeylen;
808 
809 	if (keylen > TALITOS_MAX_KEY_SIZE)
810 		goto badkey;
811 
812 	memcpy(&ctx->key, key, keylen);
813 
814 	ctx->keylen = keylen;
815 	ctx->enckeylen = enckeylen;
816 	ctx->authkeylen = authkeylen;
817 
818 	return 0;
819 
820 badkey:
821 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
822 	return -EINVAL;
823 }
824 
825 /*
826  * talitos_edesc - s/w-extended descriptor
827  * @src_nents: number of segments in input scatterlist
828  * @dst_nents: number of segments in output scatterlist
829  * @dma_len: length of dma mapped link_tbl space
830  * @dma_link_tbl: bus physical address of link_tbl
831  * @desc: h/w descriptor
832  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
833  *
834  * if decrypting (with authcheck), or either one of src_nents or dst_nents
835  * is greater than 1, an integrity check value is concatenated to the end
836  * of link_tbl data
837  */
838 struct talitos_edesc {
839 	int src_nents;
840 	int dst_nents;
841 	int src_is_chained;
842 	int dst_is_chained;
843 	int dma_len;
844 	dma_addr_t dma_link_tbl;
845 	struct talitos_desc desc;
846 	struct talitos_ptr link_tbl[0];
847 };
848 
849 static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
850 			  unsigned int nents, enum dma_data_direction dir,
851 			  int chained)
852 {
853 	if (unlikely(chained))
854 		while (sg) {
855 			dma_map_sg(dev, sg, 1, dir);
856 			sg = scatterwalk_sg_next(sg);
857 		}
858 	else
859 		dma_map_sg(dev, sg, nents, dir);
860 	return nents;
861 }
862 
863 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
864 				   enum dma_data_direction dir)
865 {
866 	while (sg) {
867 		dma_unmap_sg(dev, sg, 1, dir);
868 		sg = scatterwalk_sg_next(sg);
869 	}
870 }
871 
872 static void talitos_sg_unmap(struct device *dev,
873 			     struct talitos_edesc *edesc,
874 			     struct scatterlist *src,
875 			     struct scatterlist *dst)
876 {
877 	unsigned int src_nents = edesc->src_nents ? : 1;
878 	unsigned int dst_nents = edesc->dst_nents ? : 1;
879 
880 	if (src != dst) {
881 		if (edesc->src_is_chained)
882 			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
883 		else
884 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
885 
886 		if (dst) {
887 			if (edesc->dst_is_chained)
888 				talitos_unmap_sg_chain(dev, dst,
889 						       DMA_FROM_DEVICE);
890 			else
891 				dma_unmap_sg(dev, dst, dst_nents,
892 					     DMA_FROM_DEVICE);
893 		}
894 	} else
895 		if (edesc->src_is_chained)
896 			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
897 		else
898 			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
899 }
900 
901 static void ipsec_esp_unmap(struct device *dev,
902 			    struct talitos_edesc *edesc,
903 			    struct aead_request *areq)
904 {
905 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
906 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
907 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
908 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
909 
910 	dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
911 
912 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
913 
914 	if (edesc->dma_len)
915 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
916 				 DMA_BIDIRECTIONAL);
917 }
918 
919 /*
920  * ipsec_esp descriptor callbacks
921  */
922 static void ipsec_esp_encrypt_done(struct device *dev,
923 				   struct talitos_desc *desc, void *context,
924 				   int err)
925 {
926 	struct aead_request *areq = context;
927 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
928 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
929 	struct talitos_edesc *edesc;
930 	struct scatterlist *sg;
931 	void *icvdata;
932 
933 	edesc = container_of(desc, struct talitos_edesc, desc);
934 
935 	ipsec_esp_unmap(dev, edesc, areq);
936 
937 	/* copy the generated ICV to dst */
938 	if (edesc->dma_len) {
939 		icvdata = &edesc->link_tbl[edesc->src_nents +
940 					   edesc->dst_nents + 2];
941 		sg = sg_last(areq->dst, edesc->dst_nents);
942 		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
943 		       icvdata, ctx->authsize);
944 	}
945 
946 	kfree(edesc);
947 
948 	aead_request_complete(areq, err);
949 }
950 
951 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
952 					  struct talitos_desc *desc,
953 					  void *context, int err)
954 {
955 	struct aead_request *req = context;
956 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
957 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
958 	struct talitos_edesc *edesc;
959 	struct scatterlist *sg;
960 	void *icvdata;
961 
962 	edesc = container_of(desc, struct talitos_edesc, desc);
963 
964 	ipsec_esp_unmap(dev, edesc, req);
965 
966 	if (!err) {
967 		/* auth check */
968 		if (edesc->dma_len)
969 			icvdata = &edesc->link_tbl[edesc->src_nents +
970 						   edesc->dst_nents + 2];
971 		else
972 			icvdata = &edesc->link_tbl[0];
973 
974 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
975 		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
976 			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
977 	}
978 
979 	kfree(edesc);
980 
981 	aead_request_complete(req, err);
982 }
983 
984 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
985 					  struct talitos_desc *desc,
986 					  void *context, int err)
987 {
988 	struct aead_request *req = context;
989 	struct talitos_edesc *edesc;
990 
991 	edesc = container_of(desc, struct talitos_edesc, desc);
992 
993 	ipsec_esp_unmap(dev, edesc, req);
994 
995 	/* check ICV auth status */
996 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
997 		     DESC_HDR_LO_ICCR1_PASS))
998 		err = -EBADMSG;
999 
1000 	kfree(edesc);
1001 
1002 	aead_request_complete(req, err);
1003 }
1004 
1005 /*
1006  * convert scatterlist to SEC h/w link table format
1007  * stop at cryptlen bytes
1008  */
1009 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1010 			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
1011 {
1012 	int n_sg = sg_count;
1013 
1014 	while (n_sg--) {
1015 		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
1016 		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
1017 		link_tbl_ptr->j_extent = 0;
1018 		link_tbl_ptr++;
1019 		cryptlen -= sg_dma_len(sg);
1020 		sg = scatterwalk_sg_next(sg);
1021 	}
1022 
1023 	/* adjust (decrease) last one (or two) entry's len to cryptlen */
1024 	link_tbl_ptr--;
1025 	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
1026 		/* Empty this entry, and move to previous one */
1027 		cryptlen += be16_to_cpu(link_tbl_ptr->len);
1028 		link_tbl_ptr->len = 0;
1029 		sg_count--;
1030 		link_tbl_ptr--;
1031 	}
1032 	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
1033 					+ cryptlen);
1034 
1035 	/* tag end of link table */
1036 	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1037 
1038 	return sg_count;
1039 }
1040 
1041 /*
1042  * fill in and submit ipsec_esp descriptor
1043  */
1044 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1045 		     u8 *giv, u64 seq,
1046 		     void (*callback) (struct device *dev,
1047 				       struct talitos_desc *desc,
1048 				       void *context, int error))
1049 {
1050 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1051 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1052 	struct device *dev = ctx->dev;
1053 	struct talitos_desc *desc = &edesc->desc;
1054 	unsigned int cryptlen = areq->cryptlen;
1055 	unsigned int authsize = ctx->authsize;
1056 	unsigned int ivsize = crypto_aead_ivsize(aead);
1057 	int sg_count, ret;
1058 	int sg_link_tbl_len;
1059 
1060 	/* hmac key */
1061 	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1062 			       0, DMA_TO_DEVICE);
1063 	/* hmac data */
1064 	map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
1065 			       sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
1066 	/* cipher iv */
1067 	map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
1068 			       DMA_TO_DEVICE);
1069 
1070 	/* cipher key */
1071 	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1072 			       (char *)&ctx->key + ctx->authkeylen, 0,
1073 			       DMA_TO_DEVICE);
1074 
1075 	/*
1076 	 * cipher in
1077 	 * map and adjust cipher len to aead request cryptlen.
1078 	 * extent is bytes of HMAC postpended to ciphertext,
1079 	 * typically 12 for ipsec
1080 	 */
1081 	desc->ptr[4].len = cpu_to_be16(cryptlen);
1082 	desc->ptr[4].j_extent = authsize;
1083 
1084 	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1085 				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1086 							   : DMA_TO_DEVICE,
1087 				  edesc->src_is_chained);
1088 
1089 	if (sg_count == 1) {
1090 		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1091 	} else {
1092 		sg_link_tbl_len = cryptlen;
1093 
1094 		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1095 			sg_link_tbl_len = cryptlen + authsize;
1096 
1097 		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1098 					  &edesc->link_tbl[0]);
1099 		if (sg_count > 1) {
1100 			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1101 			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1102 			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1103 						   edesc->dma_len,
1104 						   DMA_BIDIRECTIONAL);
1105 		} else {
1106 			/* Only one segment now, so no link tbl needed */
1107 			to_talitos_ptr(&desc->ptr[4],
1108 				       sg_dma_address(areq->src));
1109 		}
1110 	}
1111 
1112 	/* cipher out */
1113 	desc->ptr[5].len = cpu_to_be16(cryptlen);
1114 	desc->ptr[5].j_extent = authsize;
1115 
1116 	if (areq->src != areq->dst)
1117 		sg_count = talitos_map_sg(dev, areq->dst,
1118 					  edesc->dst_nents ? : 1,
1119 					  DMA_FROM_DEVICE,
1120 					  edesc->dst_is_chained);
1121 
1122 	if (sg_count == 1) {
1123 		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1124 	} else {
1125 		struct talitos_ptr *link_tbl_ptr =
1126 			&edesc->link_tbl[edesc->src_nents + 1];
1127 
1128 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1129 			       (edesc->src_nents + 1) *
1130 			       sizeof(struct talitos_ptr));
1131 		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1132 					  link_tbl_ptr);
1133 
1134 		/* Add an entry to the link table for ICV data */
1135 		link_tbl_ptr += sg_count - 1;
1136 		link_tbl_ptr->j_extent = 0;
1137 		sg_count++;
1138 		link_tbl_ptr++;
1139 		link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1140 		link_tbl_ptr->len = cpu_to_be16(authsize);
1141 
1142 		/* icv data follows link tables */
1143 		to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1144 			       (edesc->src_nents + edesc->dst_nents + 2) *
1145 			       sizeof(struct talitos_ptr));
1146 		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1147 		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1148 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1149 	}
1150 
1151 	/* iv out */
1152 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1153 			       DMA_FROM_DEVICE);
1154 
1155 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1156 	if (ret != -EINPROGRESS) {
1157 		ipsec_esp_unmap(dev, edesc, areq);
1158 		kfree(edesc);
1159 	}
1160 	return ret;
1161 }
1162 
1163 /*
1164  * derive number of elements in scatterlist
1165  */
1166 static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1167 {
1168 	struct scatterlist *sg = sg_list;
1169 	int sg_nents = 0;
1170 
1171 	*chained = 0;
1172 	while (nbytes > 0) {
1173 		sg_nents++;
1174 		nbytes -= sg->length;
1175 		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1176 			*chained = 1;
1177 		sg = scatterwalk_sg_next(sg);
1178 	}
1179 
1180 	return sg_nents;
1181 }
1182 
1183 /**
1184  * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1185  * @sgl:		 The SG list
1186  * @nents:		 Number of SG entries
1187  * @buf:		 Where to copy to
1188  * @buflen:		 The number of bytes to copy
1189  * @skip:		 The number of bytes to skip before copying.
1190  *                       Note: skip + buflen should equal SG total size.
1191  *
1192  * Returns the number of copied bytes.
1193  *
1194  **/
1195 static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1196 				    void *buf, size_t buflen, unsigned int skip)
1197 {
1198 	unsigned int offset = 0;
1199 	unsigned int boffset = 0;
1200 	struct sg_mapping_iter miter;
1201 	unsigned long flags;
1202 	unsigned int sg_flags = SG_MITER_ATOMIC;
1203 	size_t total_buffer = buflen + skip;
1204 
1205 	sg_flags |= SG_MITER_FROM_SG;
1206 
1207 	sg_miter_start(&miter, sgl, nents, sg_flags);
1208 
1209 	local_irq_save(flags);
1210 
1211 	while (sg_miter_next(&miter) && offset < total_buffer) {
1212 		unsigned int len;
1213 		unsigned int ignore;
1214 
1215 		if ((offset + miter.length) > skip) {
1216 			if (offset < skip) {
1217 				/* Copy part of this segment */
1218 				ignore = skip - offset;
1219 				len = miter.length - ignore;
1220 				if (boffset + len > buflen)
1221 					len = buflen - boffset;
1222 				memcpy(buf + boffset, miter.addr + ignore, len);
1223 			} else {
1224 				/* Copy all of this segment (up to buflen) */
1225 				len = miter.length;
1226 				if (boffset + len > buflen)
1227 					len = buflen - boffset;
1228 				memcpy(buf + boffset, miter.addr, len);
1229 			}
1230 			boffset += len;
1231 		}
1232 		offset += miter.length;
1233 	}
1234 
1235 	sg_miter_stop(&miter);
1236 
1237 	local_irq_restore(flags);
1238 	return boffset;
1239 }
1240 
1241 /*
1242  * allocate and map the extended descriptor
1243  */
1244 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1245 						 struct scatterlist *src,
1246 						 struct scatterlist *dst,
1247 						 int hash_result,
1248 						 unsigned int cryptlen,
1249 						 unsigned int authsize,
1250 						 int icv_stashing,
1251 						 u32 cryptoflags)
1252 {
1253 	struct talitos_edesc *edesc;
1254 	int src_nents, dst_nents, alloc_len, dma_len;
1255 	int src_chained, dst_chained = 0;
1256 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1257 		      GFP_ATOMIC;
1258 
1259 	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1260 		dev_err(dev, "length exceeds h/w max limit\n");
1261 		return ERR_PTR(-EINVAL);
1262 	}
1263 
1264 	src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1265 	src_nents = (src_nents == 1) ? 0 : src_nents;
1266 
1267 	if (hash_result) {
1268 		dst_nents = 0;
1269 	} else {
1270 		if (dst == src) {
1271 			dst_nents = src_nents;
1272 		} else {
1273 			dst_nents = sg_count(dst, cryptlen + authsize,
1274 					     &dst_chained);
1275 			dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1276 		}
1277 	}
1278 
1279 	/*
1280 	 * allocate space for base edesc plus the link tables,
1281 	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1282 	 * and the ICV data itself
1283 	 */
1284 	alloc_len = sizeof(struct talitos_edesc);
1285 	if (src_nents || dst_nents) {
1286 		dma_len = (src_nents + dst_nents + 2) *
1287 				 sizeof(struct talitos_ptr) + authsize;
1288 		alloc_len += dma_len;
1289 	} else {
1290 		dma_len = 0;
1291 		alloc_len += icv_stashing ? authsize : 0;
1292 	}
1293 
1294 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1295 	if (!edesc) {
1296 		dev_err(dev, "could not allocate edescriptor\n");
1297 		return ERR_PTR(-ENOMEM);
1298 	}
1299 
1300 	edesc->src_nents = src_nents;
1301 	edesc->dst_nents = dst_nents;
1302 	edesc->src_is_chained = src_chained;
1303 	edesc->dst_is_chained = dst_chained;
1304 	edesc->dma_len = dma_len;
1305 	if (dma_len)
1306 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1307 						     edesc->dma_len,
1308 						     DMA_BIDIRECTIONAL);
1309 
1310 	return edesc;
1311 }
1312 
1313 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1314 					      int icv_stashing)
1315 {
1316 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1317 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1318 
1319 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1320 				   areq->cryptlen, ctx->authsize, icv_stashing,
1321 				   areq->base.flags);
1322 }
1323 
1324 static int aead_encrypt(struct aead_request *req)
1325 {
1326 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1327 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1328 	struct talitos_edesc *edesc;
1329 
1330 	/* allocate extended descriptor */
1331 	edesc = aead_edesc_alloc(req, 0);
1332 	if (IS_ERR(edesc))
1333 		return PTR_ERR(edesc);
1334 
1335 	/* set encrypt */
1336 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1337 
1338 	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1339 }
1340 
1341 static int aead_decrypt(struct aead_request *req)
1342 {
1343 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1344 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1345 	unsigned int authsize = ctx->authsize;
1346 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1347 	struct talitos_edesc *edesc;
1348 	struct scatterlist *sg;
1349 	void *icvdata;
1350 
1351 	req->cryptlen -= authsize;
1352 
1353 	/* allocate extended descriptor */
1354 	edesc = aead_edesc_alloc(req, 1);
1355 	if (IS_ERR(edesc))
1356 		return PTR_ERR(edesc);
1357 
1358 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1359 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1360 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1361 
1362 		/* decrypt and check the ICV */
1363 		edesc->desc.hdr = ctx->desc_hdr_template |
1364 				  DESC_HDR_DIR_INBOUND |
1365 				  DESC_HDR_MODE1_MDEU_CICV;
1366 
1367 		/* reset integrity check result bits */
1368 		edesc->desc.hdr_lo = 0;
1369 
1370 		return ipsec_esp(edesc, req, NULL, 0,
1371 				 ipsec_esp_decrypt_hwauth_done);
1372 
1373 	}
1374 
1375 	/* Have to check the ICV with software */
1376 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1377 
1378 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1379 	if (edesc->dma_len)
1380 		icvdata = &edesc->link_tbl[edesc->src_nents +
1381 					   edesc->dst_nents + 2];
1382 	else
1383 		icvdata = &edesc->link_tbl[0];
1384 
1385 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1386 
1387 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1388 	       ctx->authsize);
1389 
1390 	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1391 }
1392 
1393 static int aead_givencrypt(struct aead_givcrypt_request *req)
1394 {
1395 	struct aead_request *areq = &req->areq;
1396 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1397 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1398 	struct talitos_edesc *edesc;
1399 
1400 	/* allocate extended descriptor */
1401 	edesc = aead_edesc_alloc(areq, 0);
1402 	if (IS_ERR(edesc))
1403 		return PTR_ERR(edesc);
1404 
1405 	/* set encrypt */
1406 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1407 
1408 	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1409 	/* avoid consecutive packets going out with same IV */
1410 	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1411 
1412 	return ipsec_esp(edesc, areq, req->giv, req->seq,
1413 			 ipsec_esp_encrypt_done);
1414 }
1415 
1416 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1417 			     const u8 *key, unsigned int keylen)
1418 {
1419 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1420 
1421 	memcpy(&ctx->key, key, keylen);
1422 	ctx->keylen = keylen;
1423 
1424 	return 0;
1425 }
1426 
1427 static void common_nonsnoop_unmap(struct device *dev,
1428 				  struct talitos_edesc *edesc,
1429 				  struct ablkcipher_request *areq)
1430 {
1431 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1432 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1433 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1434 
1435 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1436 
1437 	if (edesc->dma_len)
1438 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1439 				 DMA_BIDIRECTIONAL);
1440 }
1441 
1442 static void ablkcipher_done(struct device *dev,
1443 			    struct talitos_desc *desc, void *context,
1444 			    int err)
1445 {
1446 	struct ablkcipher_request *areq = context;
1447 	struct talitos_edesc *edesc;
1448 
1449 	edesc = container_of(desc, struct talitos_edesc, desc);
1450 
1451 	common_nonsnoop_unmap(dev, edesc, areq);
1452 
1453 	kfree(edesc);
1454 
1455 	areq->base.complete(&areq->base, err);
1456 }
1457 
1458 static int common_nonsnoop(struct talitos_edesc *edesc,
1459 			   struct ablkcipher_request *areq,
1460 			   void (*callback) (struct device *dev,
1461 					     struct talitos_desc *desc,
1462 					     void *context, int error))
1463 {
1464 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1465 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1466 	struct device *dev = ctx->dev;
1467 	struct talitos_desc *desc = &edesc->desc;
1468 	unsigned int cryptlen = areq->nbytes;
1469 	unsigned int ivsize;
1470 	int sg_count, ret;
1471 
1472 	/* first DWORD empty */
1473 	desc->ptr[0].len = 0;
1474 	to_talitos_ptr(&desc->ptr[0], 0);
1475 	desc->ptr[0].j_extent = 0;
1476 
1477 	/* cipher iv */
1478 	ivsize = crypto_ablkcipher_ivsize(cipher);
1479 	map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
1480 			       DMA_TO_DEVICE);
1481 
1482 	/* cipher key */
1483 	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1484 			       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1485 
1486 	/*
1487 	 * cipher in
1488 	 */
1489 	desc->ptr[3].len = cpu_to_be16(cryptlen);
1490 	desc->ptr[3].j_extent = 0;
1491 
1492 	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1493 				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1494 							   : DMA_TO_DEVICE,
1495 				  edesc->src_is_chained);
1496 
1497 	if (sg_count == 1) {
1498 		to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1499 	} else {
1500 		sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1501 					  &edesc->link_tbl[0]);
1502 		if (sg_count > 1) {
1503 			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1504 			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1505 			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1506 						   edesc->dma_len,
1507 						   DMA_BIDIRECTIONAL);
1508 		} else {
1509 			/* Only one segment now, so no link tbl needed */
1510 			to_talitos_ptr(&desc->ptr[3],
1511 				       sg_dma_address(areq->src));
1512 		}
1513 	}
1514 
1515 	/* cipher out */
1516 	desc->ptr[4].len = cpu_to_be16(cryptlen);
1517 	desc->ptr[4].j_extent = 0;
1518 
1519 	if (areq->src != areq->dst)
1520 		sg_count = talitos_map_sg(dev, areq->dst,
1521 					  edesc->dst_nents ? : 1,
1522 					  DMA_FROM_DEVICE,
1523 					  edesc->dst_is_chained);
1524 
1525 	if (sg_count == 1) {
1526 		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1527 	} else {
1528 		struct talitos_ptr *link_tbl_ptr =
1529 			&edesc->link_tbl[edesc->src_nents + 1];
1530 
1531 		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1532 					      (edesc->src_nents + 1) *
1533 					      sizeof(struct talitos_ptr));
1534 		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1535 		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1536 					  link_tbl_ptr);
1537 		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1538 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1539 	}
1540 
1541 	/* iv out */
1542 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1543 			       DMA_FROM_DEVICE);
1544 
1545 	/* last DWORD empty */
1546 	desc->ptr[6].len = 0;
1547 	to_talitos_ptr(&desc->ptr[6], 0);
1548 	desc->ptr[6].j_extent = 0;
1549 
1550 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1551 	if (ret != -EINPROGRESS) {
1552 		common_nonsnoop_unmap(dev, edesc, areq);
1553 		kfree(edesc);
1554 	}
1555 	return ret;
1556 }
1557 
1558 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1559 						    areq)
1560 {
1561 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1562 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1563 
1564 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1565 				   areq->nbytes, 0, 0, areq->base.flags);
1566 }
1567 
1568 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1569 {
1570 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1571 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1572 	struct talitos_edesc *edesc;
1573 
1574 	/* allocate extended descriptor */
1575 	edesc = ablkcipher_edesc_alloc(areq);
1576 	if (IS_ERR(edesc))
1577 		return PTR_ERR(edesc);
1578 
1579 	/* set encrypt */
1580 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1581 
1582 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1583 }
1584 
1585 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1586 {
1587 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1588 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1589 	struct talitos_edesc *edesc;
1590 
1591 	/* allocate extended descriptor */
1592 	edesc = ablkcipher_edesc_alloc(areq);
1593 	if (IS_ERR(edesc))
1594 		return PTR_ERR(edesc);
1595 
1596 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1597 
1598 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1599 }
1600 
1601 static void common_nonsnoop_hash_unmap(struct device *dev,
1602 				       struct talitos_edesc *edesc,
1603 				       struct ahash_request *areq)
1604 {
1605 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1606 
1607 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1608 
1609 	/* When using hashctx-in, must unmap it. */
1610 	if (edesc->desc.ptr[1].len)
1611 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1612 					 DMA_TO_DEVICE);
1613 
1614 	if (edesc->desc.ptr[2].len)
1615 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1616 					 DMA_TO_DEVICE);
1617 
1618 	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1619 
1620 	if (edesc->dma_len)
1621 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1622 				 DMA_BIDIRECTIONAL);
1623 
1624 }
1625 
1626 static void ahash_done(struct device *dev,
1627 		       struct talitos_desc *desc, void *context,
1628 		       int err)
1629 {
1630 	struct ahash_request *areq = context;
1631 	struct talitos_edesc *edesc =
1632 		 container_of(desc, struct talitos_edesc, desc);
1633 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1634 
1635 	if (!req_ctx->last && req_ctx->to_hash_later) {
1636 		/* Position any partial block for next update/final/finup */
1637 		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1638 		req_ctx->nbuf = req_ctx->to_hash_later;
1639 	}
1640 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1641 
1642 	kfree(edesc);
1643 
1644 	areq->base.complete(&areq->base, err);
1645 }
1646 
1647 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1648 				struct ahash_request *areq, unsigned int length,
1649 				void (*callback) (struct device *dev,
1650 						  struct talitos_desc *desc,
1651 						  void *context, int error))
1652 {
1653 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1654 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1655 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1656 	struct device *dev = ctx->dev;
1657 	struct talitos_desc *desc = &edesc->desc;
1658 	int sg_count, ret;
1659 
1660 	/* first DWORD empty */
1661 	desc->ptr[0] = zero_entry;
1662 
1663 	/* hash context in */
1664 	if (!req_ctx->first || req_ctx->swinit) {
1665 		map_single_talitos_ptr(dev, &desc->ptr[1],
1666 				       req_ctx->hw_context_size,
1667 				       (char *)req_ctx->hw_context, 0,
1668 				       DMA_TO_DEVICE);
1669 		req_ctx->swinit = 0;
1670 	} else {
1671 		desc->ptr[1] = zero_entry;
1672 		/* Indicate next op is not the first. */
1673 		req_ctx->first = 0;
1674 	}
1675 
1676 	/* HMAC key */
1677 	if (ctx->keylen)
1678 		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1679 				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1680 	else
1681 		desc->ptr[2] = zero_entry;
1682 
1683 	/*
1684 	 * data in
1685 	 */
1686 	desc->ptr[3].len = cpu_to_be16(length);
1687 	desc->ptr[3].j_extent = 0;
1688 
1689 	sg_count = talitos_map_sg(dev, req_ctx->psrc,
1690 				  edesc->src_nents ? : 1,
1691 				  DMA_TO_DEVICE,
1692 				  edesc->src_is_chained);
1693 
1694 	if (sg_count == 1) {
1695 		to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1696 	} else {
1697 		sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1698 					  &edesc->link_tbl[0]);
1699 		if (sg_count > 1) {
1700 			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1701 			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1702 			dma_sync_single_for_device(ctx->dev,
1703 						   edesc->dma_link_tbl,
1704 						   edesc->dma_len,
1705 						   DMA_BIDIRECTIONAL);
1706 		} else {
1707 			/* Only one segment now, so no link tbl needed */
1708 			to_talitos_ptr(&desc->ptr[3],
1709 				       sg_dma_address(req_ctx->psrc));
1710 		}
1711 	}
1712 
1713 	/* fifth DWORD empty */
1714 	desc->ptr[4] = zero_entry;
1715 
1716 	/* hash/HMAC out -or- hash context out */
1717 	if (req_ctx->last)
1718 		map_single_talitos_ptr(dev, &desc->ptr[5],
1719 				       crypto_ahash_digestsize(tfm),
1720 				       areq->result, 0, DMA_FROM_DEVICE);
1721 	else
1722 		map_single_talitos_ptr(dev, &desc->ptr[5],
1723 				       req_ctx->hw_context_size,
1724 				       req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1725 
1726 	/* last DWORD empty */
1727 	desc->ptr[6] = zero_entry;
1728 
1729 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1730 	if (ret != -EINPROGRESS) {
1731 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1732 		kfree(edesc);
1733 	}
1734 	return ret;
1735 }
1736 
1737 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1738 					       unsigned int nbytes)
1739 {
1740 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1741 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1742 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1743 
1744 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1745 				   nbytes, 0, 0, areq->base.flags);
1746 }
1747 
1748 static int ahash_init(struct ahash_request *areq)
1749 {
1750 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1751 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752 
1753 	/* Initialize the context */
1754 	req_ctx->nbuf = 0;
1755 	req_ctx->first = 1; /* first indicates h/w must init its context */
1756 	req_ctx->swinit = 0; /* assume h/w init of context */
1757 	req_ctx->hw_context_size =
1758 		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1759 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1760 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1761 
1762 	return 0;
1763 }
1764 
1765 /*
1766  * on h/w without explicit sha224 support, we initialize h/w context
1767  * manually with sha224 constants, and tell it to run sha256.
1768  */
1769 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1770 {
1771 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1772 
1773 	ahash_init(areq);
1774 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1775 
1776 	req_ctx->hw_context[0] = SHA224_H0;
1777 	req_ctx->hw_context[1] = SHA224_H1;
1778 	req_ctx->hw_context[2] = SHA224_H2;
1779 	req_ctx->hw_context[3] = SHA224_H3;
1780 	req_ctx->hw_context[4] = SHA224_H4;
1781 	req_ctx->hw_context[5] = SHA224_H5;
1782 	req_ctx->hw_context[6] = SHA224_H6;
1783 	req_ctx->hw_context[7] = SHA224_H7;
1784 
1785 	/* init 64-bit count */
1786 	req_ctx->hw_context[8] = 0;
1787 	req_ctx->hw_context[9] = 0;
1788 
1789 	return 0;
1790 }
1791 
1792 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1793 {
1794 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1795 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1796 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1797 	struct talitos_edesc *edesc;
1798 	unsigned int blocksize =
1799 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1800 	unsigned int nbytes_to_hash;
1801 	unsigned int to_hash_later;
1802 	unsigned int nsg;
1803 	int chained;
1804 
1805 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1806 		/* Buffer up to one whole block */
1807 		sg_copy_to_buffer(areq->src,
1808 				  sg_count(areq->src, nbytes, &chained),
1809 				  req_ctx->buf + req_ctx->nbuf, nbytes);
1810 		req_ctx->nbuf += nbytes;
1811 		return 0;
1812 	}
1813 
1814 	/* At least (blocksize + 1) bytes are available to hash */
1815 	nbytes_to_hash = nbytes + req_ctx->nbuf;
1816 	to_hash_later = nbytes_to_hash & (blocksize - 1);
1817 
1818 	if (req_ctx->last)
1819 		to_hash_later = 0;
1820 	else if (to_hash_later)
1821 		/* There is a partial block. Hash the full block(s) now */
1822 		nbytes_to_hash -= to_hash_later;
1823 	else {
1824 		/* Keep one block buffered */
1825 		nbytes_to_hash -= blocksize;
1826 		to_hash_later = blocksize;
1827 	}
1828 
1829 	/* Chain in any previously buffered data */
1830 	if (req_ctx->nbuf) {
1831 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1832 		sg_init_table(req_ctx->bufsl, nsg);
1833 		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1834 		if (nsg > 1)
1835 			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1836 		req_ctx->psrc = req_ctx->bufsl;
1837 	} else
1838 		req_ctx->psrc = areq->src;
1839 
1840 	if (to_hash_later) {
1841 		int nents = sg_count(areq->src, nbytes, &chained);
1842 		sg_copy_end_to_buffer(areq->src, nents,
1843 				      req_ctx->bufnext,
1844 				      to_hash_later,
1845 				      nbytes - to_hash_later);
1846 	}
1847 	req_ctx->to_hash_later = to_hash_later;
1848 
1849 	/* Allocate extended descriptor */
1850 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1851 	if (IS_ERR(edesc))
1852 		return PTR_ERR(edesc);
1853 
1854 	edesc->desc.hdr = ctx->desc_hdr_template;
1855 
1856 	/* On last one, request SEC to pad; otherwise continue */
1857 	if (req_ctx->last)
1858 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1859 	else
1860 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1861 
1862 	/* request SEC to INIT hash. */
1863 	if (req_ctx->first && !req_ctx->swinit)
1864 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1865 
1866 	/* When the tfm context has a keylen, it's an HMAC.
1867 	 * A first or last (ie. not middle) descriptor must request HMAC.
1868 	 */
1869 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1870 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1871 
1872 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1873 				    ahash_done);
1874 }
1875 
1876 static int ahash_update(struct ahash_request *areq)
1877 {
1878 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1879 
1880 	req_ctx->last = 0;
1881 
1882 	return ahash_process_req(areq, areq->nbytes);
1883 }
1884 
1885 static int ahash_final(struct ahash_request *areq)
1886 {
1887 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1888 
1889 	req_ctx->last = 1;
1890 
1891 	return ahash_process_req(areq, 0);
1892 }
1893 
1894 static int ahash_finup(struct ahash_request *areq)
1895 {
1896 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1897 
1898 	req_ctx->last = 1;
1899 
1900 	return ahash_process_req(areq, areq->nbytes);
1901 }
1902 
1903 static int ahash_digest(struct ahash_request *areq)
1904 {
1905 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1906 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1907 
1908 	ahash->init(areq);
1909 	req_ctx->last = 1;
1910 
1911 	return ahash_process_req(areq, areq->nbytes);
1912 }
1913 
1914 struct keyhash_result {
1915 	struct completion completion;
1916 	int err;
1917 };
1918 
1919 static void keyhash_complete(struct crypto_async_request *req, int err)
1920 {
1921 	struct keyhash_result *res = req->data;
1922 
1923 	if (err == -EINPROGRESS)
1924 		return;
1925 
1926 	res->err = err;
1927 	complete(&res->completion);
1928 }
1929 
1930 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1931 		   u8 *hash)
1932 {
1933 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1934 
1935 	struct scatterlist sg[1];
1936 	struct ahash_request *req;
1937 	struct keyhash_result hresult;
1938 	int ret;
1939 
1940 	init_completion(&hresult.completion);
1941 
1942 	req = ahash_request_alloc(tfm, GFP_KERNEL);
1943 	if (!req)
1944 		return -ENOMEM;
1945 
1946 	/* Keep tfm keylen == 0 during hash of the long key */
1947 	ctx->keylen = 0;
1948 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1949 				   keyhash_complete, &hresult);
1950 
1951 	sg_init_one(&sg[0], key, keylen);
1952 
1953 	ahash_request_set_crypt(req, sg, hash, keylen);
1954 	ret = crypto_ahash_digest(req);
1955 	switch (ret) {
1956 	case 0:
1957 		break;
1958 	case -EINPROGRESS:
1959 	case -EBUSY:
1960 		ret = wait_for_completion_interruptible(
1961 			&hresult.completion);
1962 		if (!ret)
1963 			ret = hresult.err;
1964 		break;
1965 	default:
1966 		break;
1967 	}
1968 	ahash_request_free(req);
1969 
1970 	return ret;
1971 }
1972 
1973 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1974 			unsigned int keylen)
1975 {
1976 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1977 	unsigned int blocksize =
1978 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1979 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1980 	unsigned int keysize = keylen;
1981 	u8 hash[SHA512_DIGEST_SIZE];
1982 	int ret;
1983 
1984 	if (keylen <= blocksize)
1985 		memcpy(ctx->key, key, keysize);
1986 	else {
1987 		/* Must get the hash of the long key */
1988 		ret = keyhash(tfm, key, keylen, hash);
1989 
1990 		if (ret) {
1991 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1992 			return -EINVAL;
1993 		}
1994 
1995 		keysize = digestsize;
1996 		memcpy(ctx->key, hash, digestsize);
1997 	}
1998 
1999 	ctx->keylen = keysize;
2000 
2001 	return 0;
2002 }
2003 
2004 
2005 struct talitos_alg_template {
2006 	u32 type;
2007 	union {
2008 		struct crypto_alg crypto;
2009 		struct ahash_alg hash;
2010 	} alg;
2011 	__be32 desc_hdr_template;
2012 };
2013 
2014 static struct talitos_alg_template driver_algs[] = {
2015 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2016 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2017 		.alg.crypto = {
2018 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
2019 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
2020 			.cra_blocksize = AES_BLOCK_SIZE,
2021 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2022 			.cra_type = &crypto_aead_type,
2023 			.cra_aead = {
2024 				.setkey = aead_setkey,
2025 				.setauthsize = aead_setauthsize,
2026 				.encrypt = aead_encrypt,
2027 				.decrypt = aead_decrypt,
2028 				.givencrypt = aead_givencrypt,
2029 				.geniv = "<built-in>",
2030 				.ivsize = AES_BLOCK_SIZE,
2031 				.maxauthsize = SHA1_DIGEST_SIZE,
2032 			}
2033 		},
2034 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2035 			             DESC_HDR_SEL0_AESU |
2036 		                     DESC_HDR_MODE0_AESU_CBC |
2037 		                     DESC_HDR_SEL1_MDEUA |
2038 		                     DESC_HDR_MODE1_MDEU_INIT |
2039 		                     DESC_HDR_MODE1_MDEU_PAD |
2040 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2041 	},
2042 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2043 		.alg.crypto = {
2044 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2045 			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
2046 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2047 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2048 			.cra_type = &crypto_aead_type,
2049 			.cra_aead = {
2050 				.setkey = aead_setkey,
2051 				.setauthsize = aead_setauthsize,
2052 				.encrypt = aead_encrypt,
2053 				.decrypt = aead_decrypt,
2054 				.givencrypt = aead_givencrypt,
2055 				.geniv = "<built-in>",
2056 				.ivsize = DES3_EDE_BLOCK_SIZE,
2057 				.maxauthsize = SHA1_DIGEST_SIZE,
2058 			}
2059 		},
2060 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2061 			             DESC_HDR_SEL0_DEU |
2062 		                     DESC_HDR_MODE0_DEU_CBC |
2063 		                     DESC_HDR_MODE0_DEU_3DES |
2064 		                     DESC_HDR_SEL1_MDEUA |
2065 		                     DESC_HDR_MODE1_MDEU_INIT |
2066 		                     DESC_HDR_MODE1_MDEU_PAD |
2067 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2068 	},
2069 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2070 		.alg.crypto = {
2071 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
2072 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2073 			.cra_blocksize = AES_BLOCK_SIZE,
2074 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2075 			.cra_type = &crypto_aead_type,
2076 			.cra_aead = {
2077 				.setkey = aead_setkey,
2078 				.setauthsize = aead_setauthsize,
2079 				.encrypt = aead_encrypt,
2080 				.decrypt = aead_decrypt,
2081 				.givencrypt = aead_givencrypt,
2082 				.geniv = "<built-in>",
2083 				.ivsize = AES_BLOCK_SIZE,
2084 				.maxauthsize = SHA256_DIGEST_SIZE,
2085 			}
2086 		},
2087 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2088 			             DESC_HDR_SEL0_AESU |
2089 		                     DESC_HDR_MODE0_AESU_CBC |
2090 		                     DESC_HDR_SEL1_MDEUA |
2091 		                     DESC_HDR_MODE1_MDEU_INIT |
2092 		                     DESC_HDR_MODE1_MDEU_PAD |
2093 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2094 	},
2095 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2096 		.alg.crypto = {
2097 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2098 			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2099 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2100 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2101 			.cra_type = &crypto_aead_type,
2102 			.cra_aead = {
2103 				.setkey = aead_setkey,
2104 				.setauthsize = aead_setauthsize,
2105 				.encrypt = aead_encrypt,
2106 				.decrypt = aead_decrypt,
2107 				.givencrypt = aead_givencrypt,
2108 				.geniv = "<built-in>",
2109 				.ivsize = DES3_EDE_BLOCK_SIZE,
2110 				.maxauthsize = SHA256_DIGEST_SIZE,
2111 			}
2112 		},
2113 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2114 			             DESC_HDR_SEL0_DEU |
2115 		                     DESC_HDR_MODE0_DEU_CBC |
2116 		                     DESC_HDR_MODE0_DEU_3DES |
2117 		                     DESC_HDR_SEL1_MDEUA |
2118 		                     DESC_HDR_MODE1_MDEU_INIT |
2119 		                     DESC_HDR_MODE1_MDEU_PAD |
2120 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2121 	},
2122 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2123 		.alg.crypto = {
2124 			.cra_name = "authenc(hmac(md5),cbc(aes))",
2125 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2126 			.cra_blocksize = AES_BLOCK_SIZE,
2127 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2128 			.cra_type = &crypto_aead_type,
2129 			.cra_aead = {
2130 				.setkey = aead_setkey,
2131 				.setauthsize = aead_setauthsize,
2132 				.encrypt = aead_encrypt,
2133 				.decrypt = aead_decrypt,
2134 				.givencrypt = aead_givencrypt,
2135 				.geniv = "<built-in>",
2136 				.ivsize = AES_BLOCK_SIZE,
2137 				.maxauthsize = MD5_DIGEST_SIZE,
2138 			}
2139 		},
2140 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2141 			             DESC_HDR_SEL0_AESU |
2142 		                     DESC_HDR_MODE0_AESU_CBC |
2143 		                     DESC_HDR_SEL1_MDEUA |
2144 		                     DESC_HDR_MODE1_MDEU_INIT |
2145 		                     DESC_HDR_MODE1_MDEU_PAD |
2146 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2147 	},
2148 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2149 		.alg.crypto = {
2150 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2151 			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2152 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2153 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2154 			.cra_type = &crypto_aead_type,
2155 			.cra_aead = {
2156 				.setkey = aead_setkey,
2157 				.setauthsize = aead_setauthsize,
2158 				.encrypt = aead_encrypt,
2159 				.decrypt = aead_decrypt,
2160 				.givencrypt = aead_givencrypt,
2161 				.geniv = "<built-in>",
2162 				.ivsize = DES3_EDE_BLOCK_SIZE,
2163 				.maxauthsize = MD5_DIGEST_SIZE,
2164 			}
2165 		},
2166 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2167 			             DESC_HDR_SEL0_DEU |
2168 		                     DESC_HDR_MODE0_DEU_CBC |
2169 		                     DESC_HDR_MODE0_DEU_3DES |
2170 		                     DESC_HDR_SEL1_MDEUA |
2171 		                     DESC_HDR_MODE1_MDEU_INIT |
2172 		                     DESC_HDR_MODE1_MDEU_PAD |
2173 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2174 	},
2175 	/* ABLKCIPHER algorithms. */
2176 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2177 		.alg.crypto = {
2178 			.cra_name = "cbc(aes)",
2179 			.cra_driver_name = "cbc-aes-talitos",
2180 			.cra_blocksize = AES_BLOCK_SIZE,
2181 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2182                                      CRYPTO_ALG_ASYNC,
2183 			.cra_type = &crypto_ablkcipher_type,
2184 			.cra_ablkcipher = {
2185 				.setkey = ablkcipher_setkey,
2186 				.encrypt = ablkcipher_encrypt,
2187 				.decrypt = ablkcipher_decrypt,
2188 				.geniv = "eseqiv",
2189 				.min_keysize = AES_MIN_KEY_SIZE,
2190 				.max_keysize = AES_MAX_KEY_SIZE,
2191 				.ivsize = AES_BLOCK_SIZE,
2192 			}
2193 		},
2194 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2195 				     DESC_HDR_SEL0_AESU |
2196 				     DESC_HDR_MODE0_AESU_CBC,
2197 	},
2198 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2199 		.alg.crypto = {
2200 			.cra_name = "cbc(des3_ede)",
2201 			.cra_driver_name = "cbc-3des-talitos",
2202 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2203 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2204                                      CRYPTO_ALG_ASYNC,
2205 			.cra_type = &crypto_ablkcipher_type,
2206 			.cra_ablkcipher = {
2207 				.setkey = ablkcipher_setkey,
2208 				.encrypt = ablkcipher_encrypt,
2209 				.decrypt = ablkcipher_decrypt,
2210 				.geniv = "eseqiv",
2211 				.min_keysize = DES3_EDE_KEY_SIZE,
2212 				.max_keysize = DES3_EDE_KEY_SIZE,
2213 				.ivsize = DES3_EDE_BLOCK_SIZE,
2214 			}
2215 		},
2216 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2217 			             DESC_HDR_SEL0_DEU |
2218 		                     DESC_HDR_MODE0_DEU_CBC |
2219 		                     DESC_HDR_MODE0_DEU_3DES,
2220 	},
2221 	/* AHASH algorithms. */
2222 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2223 		.alg.hash = {
2224 			.init = ahash_init,
2225 			.update = ahash_update,
2226 			.final = ahash_final,
2227 			.finup = ahash_finup,
2228 			.digest = ahash_digest,
2229 			.halg.digestsize = MD5_DIGEST_SIZE,
2230 			.halg.base = {
2231 				.cra_name = "md5",
2232 				.cra_driver_name = "md5-talitos",
2233 				.cra_blocksize = MD5_BLOCK_SIZE,
2234 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2235 					     CRYPTO_ALG_ASYNC,
2236 				.cra_type = &crypto_ahash_type
2237 			}
2238 		},
2239 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2240 				     DESC_HDR_SEL0_MDEUA |
2241 				     DESC_HDR_MODE0_MDEU_MD5,
2242 	},
2243 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2244 		.alg.hash = {
2245 			.init = ahash_init,
2246 			.update = ahash_update,
2247 			.final = ahash_final,
2248 			.finup = ahash_finup,
2249 			.digest = ahash_digest,
2250 			.halg.digestsize = SHA1_DIGEST_SIZE,
2251 			.halg.base = {
2252 				.cra_name = "sha1",
2253 				.cra_driver_name = "sha1-talitos",
2254 				.cra_blocksize = SHA1_BLOCK_SIZE,
2255 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2256 					     CRYPTO_ALG_ASYNC,
2257 				.cra_type = &crypto_ahash_type
2258 			}
2259 		},
2260 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2261 				     DESC_HDR_SEL0_MDEUA |
2262 				     DESC_HDR_MODE0_MDEU_SHA1,
2263 	},
2264 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2265 		.alg.hash = {
2266 			.init = ahash_init,
2267 			.update = ahash_update,
2268 			.final = ahash_final,
2269 			.finup = ahash_finup,
2270 			.digest = ahash_digest,
2271 			.halg.digestsize = SHA224_DIGEST_SIZE,
2272 			.halg.base = {
2273 				.cra_name = "sha224",
2274 				.cra_driver_name = "sha224-talitos",
2275 				.cra_blocksize = SHA224_BLOCK_SIZE,
2276 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2277 					     CRYPTO_ALG_ASYNC,
2278 				.cra_type = &crypto_ahash_type
2279 			}
2280 		},
2281 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2282 				     DESC_HDR_SEL0_MDEUA |
2283 				     DESC_HDR_MODE0_MDEU_SHA224,
2284 	},
2285 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2286 		.alg.hash = {
2287 			.init = ahash_init,
2288 			.update = ahash_update,
2289 			.final = ahash_final,
2290 			.finup = ahash_finup,
2291 			.digest = ahash_digest,
2292 			.halg.digestsize = SHA256_DIGEST_SIZE,
2293 			.halg.base = {
2294 				.cra_name = "sha256",
2295 				.cra_driver_name = "sha256-talitos",
2296 				.cra_blocksize = SHA256_BLOCK_SIZE,
2297 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2298 					     CRYPTO_ALG_ASYNC,
2299 				.cra_type = &crypto_ahash_type
2300 			}
2301 		},
2302 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2303 				     DESC_HDR_SEL0_MDEUA |
2304 				     DESC_HDR_MODE0_MDEU_SHA256,
2305 	},
2306 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2307 		.alg.hash = {
2308 			.init = ahash_init,
2309 			.update = ahash_update,
2310 			.final = ahash_final,
2311 			.finup = ahash_finup,
2312 			.digest = ahash_digest,
2313 			.halg.digestsize = SHA384_DIGEST_SIZE,
2314 			.halg.base = {
2315 				.cra_name = "sha384",
2316 				.cra_driver_name = "sha384-talitos",
2317 				.cra_blocksize = SHA384_BLOCK_SIZE,
2318 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2319 					     CRYPTO_ALG_ASYNC,
2320 				.cra_type = &crypto_ahash_type
2321 			}
2322 		},
2323 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2324 				     DESC_HDR_SEL0_MDEUB |
2325 				     DESC_HDR_MODE0_MDEUB_SHA384,
2326 	},
2327 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2328 		.alg.hash = {
2329 			.init = ahash_init,
2330 			.update = ahash_update,
2331 			.final = ahash_final,
2332 			.finup = ahash_finup,
2333 			.digest = ahash_digest,
2334 			.halg.digestsize = SHA512_DIGEST_SIZE,
2335 			.halg.base = {
2336 				.cra_name = "sha512",
2337 				.cra_driver_name = "sha512-talitos",
2338 				.cra_blocksize = SHA512_BLOCK_SIZE,
2339 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2340 					     CRYPTO_ALG_ASYNC,
2341 				.cra_type = &crypto_ahash_type
2342 			}
2343 		},
2344 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2345 				     DESC_HDR_SEL0_MDEUB |
2346 				     DESC_HDR_MODE0_MDEUB_SHA512,
2347 	},
2348 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2349 		.alg.hash = {
2350 			.init = ahash_init,
2351 			.update = ahash_update,
2352 			.final = ahash_final,
2353 			.finup = ahash_finup,
2354 			.digest = ahash_digest,
2355 			.setkey = ahash_setkey,
2356 			.halg.digestsize = MD5_DIGEST_SIZE,
2357 			.halg.base = {
2358 				.cra_name = "hmac(md5)",
2359 				.cra_driver_name = "hmac-md5-talitos",
2360 				.cra_blocksize = MD5_BLOCK_SIZE,
2361 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2362 					     CRYPTO_ALG_ASYNC,
2363 				.cra_type = &crypto_ahash_type
2364 			}
2365 		},
2366 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2367 				     DESC_HDR_SEL0_MDEUA |
2368 				     DESC_HDR_MODE0_MDEU_MD5,
2369 	},
2370 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2371 		.alg.hash = {
2372 			.init = ahash_init,
2373 			.update = ahash_update,
2374 			.final = ahash_final,
2375 			.finup = ahash_finup,
2376 			.digest = ahash_digest,
2377 			.setkey = ahash_setkey,
2378 			.halg.digestsize = SHA1_DIGEST_SIZE,
2379 			.halg.base = {
2380 				.cra_name = "hmac(sha1)",
2381 				.cra_driver_name = "hmac-sha1-talitos",
2382 				.cra_blocksize = SHA1_BLOCK_SIZE,
2383 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2384 					     CRYPTO_ALG_ASYNC,
2385 				.cra_type = &crypto_ahash_type
2386 			}
2387 		},
2388 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2389 				     DESC_HDR_SEL0_MDEUA |
2390 				     DESC_HDR_MODE0_MDEU_SHA1,
2391 	},
2392 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2393 		.alg.hash = {
2394 			.init = ahash_init,
2395 			.update = ahash_update,
2396 			.final = ahash_final,
2397 			.finup = ahash_finup,
2398 			.digest = ahash_digest,
2399 			.setkey = ahash_setkey,
2400 			.halg.digestsize = SHA224_DIGEST_SIZE,
2401 			.halg.base = {
2402 				.cra_name = "hmac(sha224)",
2403 				.cra_driver_name = "hmac-sha224-talitos",
2404 				.cra_blocksize = SHA224_BLOCK_SIZE,
2405 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2406 					     CRYPTO_ALG_ASYNC,
2407 				.cra_type = &crypto_ahash_type
2408 			}
2409 		},
2410 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2411 				     DESC_HDR_SEL0_MDEUA |
2412 				     DESC_HDR_MODE0_MDEU_SHA224,
2413 	},
2414 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2415 		.alg.hash = {
2416 			.init = ahash_init,
2417 			.update = ahash_update,
2418 			.final = ahash_final,
2419 			.finup = ahash_finup,
2420 			.digest = ahash_digest,
2421 			.setkey = ahash_setkey,
2422 			.halg.digestsize = SHA256_DIGEST_SIZE,
2423 			.halg.base = {
2424 				.cra_name = "hmac(sha256)",
2425 				.cra_driver_name = "hmac-sha256-talitos",
2426 				.cra_blocksize = SHA256_BLOCK_SIZE,
2427 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2428 					     CRYPTO_ALG_ASYNC,
2429 				.cra_type = &crypto_ahash_type
2430 			}
2431 		},
2432 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2433 				     DESC_HDR_SEL0_MDEUA |
2434 				     DESC_HDR_MODE0_MDEU_SHA256,
2435 	},
2436 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2437 		.alg.hash = {
2438 			.init = ahash_init,
2439 			.update = ahash_update,
2440 			.final = ahash_final,
2441 			.finup = ahash_finup,
2442 			.digest = ahash_digest,
2443 			.setkey = ahash_setkey,
2444 			.halg.digestsize = SHA384_DIGEST_SIZE,
2445 			.halg.base = {
2446 				.cra_name = "hmac(sha384)",
2447 				.cra_driver_name = "hmac-sha384-talitos",
2448 				.cra_blocksize = SHA384_BLOCK_SIZE,
2449 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2450 					     CRYPTO_ALG_ASYNC,
2451 				.cra_type = &crypto_ahash_type
2452 			}
2453 		},
2454 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2455 				     DESC_HDR_SEL0_MDEUB |
2456 				     DESC_HDR_MODE0_MDEUB_SHA384,
2457 	},
2458 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2459 		.alg.hash = {
2460 			.init = ahash_init,
2461 			.update = ahash_update,
2462 			.final = ahash_final,
2463 			.finup = ahash_finup,
2464 			.digest = ahash_digest,
2465 			.setkey = ahash_setkey,
2466 			.halg.digestsize = SHA512_DIGEST_SIZE,
2467 			.halg.base = {
2468 				.cra_name = "hmac(sha512)",
2469 				.cra_driver_name = "hmac-sha512-talitos",
2470 				.cra_blocksize = SHA512_BLOCK_SIZE,
2471 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2472 					     CRYPTO_ALG_ASYNC,
2473 				.cra_type = &crypto_ahash_type
2474 			}
2475 		},
2476 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2477 				     DESC_HDR_SEL0_MDEUB |
2478 				     DESC_HDR_MODE0_MDEUB_SHA512,
2479 	}
2480 };
2481 
2482 struct talitos_crypto_alg {
2483 	struct list_head entry;
2484 	struct device *dev;
2485 	struct talitos_alg_template algt;
2486 };
2487 
2488 static int talitos_cra_init(struct crypto_tfm *tfm)
2489 {
2490 	struct crypto_alg *alg = tfm->__crt_alg;
2491 	struct talitos_crypto_alg *talitos_alg;
2492 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2493 	struct talitos_private *priv;
2494 
2495 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2496 		talitos_alg = container_of(__crypto_ahash_alg(alg),
2497 					   struct talitos_crypto_alg,
2498 					   algt.alg.hash);
2499 	else
2500 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2501 					   algt.alg.crypto);
2502 
2503 	/* update context with ptr to dev */
2504 	ctx->dev = talitos_alg->dev;
2505 
2506 	/* assign SEC channel to tfm in round-robin fashion */
2507 	priv = dev_get_drvdata(ctx->dev);
2508 	ctx->ch = atomic_inc_return(&priv->last_chan) &
2509 		  (priv->num_channels - 1);
2510 
2511 	/* copy descriptor header template value */
2512 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2513 
2514 	/* select done notification */
2515 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2516 
2517 	return 0;
2518 }
2519 
2520 static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2521 {
2522 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2523 
2524 	talitos_cra_init(tfm);
2525 
2526 	/* random first IV */
2527 	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2528 
2529 	return 0;
2530 }
2531 
2532 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2533 {
2534 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2535 
2536 	talitos_cra_init(tfm);
2537 
2538 	ctx->keylen = 0;
2539 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2540 				 sizeof(struct talitos_ahash_req_ctx));
2541 
2542 	return 0;
2543 }
2544 
2545 /*
2546  * given the alg's descriptor header template, determine whether descriptor
2547  * type and primary/secondary execution units required match the hw
2548  * capabilities description provided in the device tree node.
2549  */
2550 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2551 {
2552 	struct talitos_private *priv = dev_get_drvdata(dev);
2553 	int ret;
2554 
2555 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2556 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2557 
2558 	if (SECONDARY_EU(desc_hdr_template))
2559 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2560 		              & priv->exec_units);
2561 
2562 	return ret;
2563 }
2564 
2565 static int talitos_remove(struct platform_device *ofdev)
2566 {
2567 	struct device *dev = &ofdev->dev;
2568 	struct talitos_private *priv = dev_get_drvdata(dev);
2569 	struct talitos_crypto_alg *t_alg, *n;
2570 	int i;
2571 
2572 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2573 		switch (t_alg->algt.type) {
2574 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2575 		case CRYPTO_ALG_TYPE_AEAD:
2576 			crypto_unregister_alg(&t_alg->algt.alg.crypto);
2577 			break;
2578 		case CRYPTO_ALG_TYPE_AHASH:
2579 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2580 			break;
2581 		}
2582 		list_del(&t_alg->entry);
2583 		kfree(t_alg);
2584 	}
2585 
2586 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2587 		talitos_unregister_rng(dev);
2588 
2589 	for (i = 0; i < priv->num_channels; i++)
2590 		kfree(priv->chan[i].fifo);
2591 
2592 	kfree(priv->chan);
2593 
2594 	for (i = 0; i < 2; i++)
2595 		if (priv->irq[i]) {
2596 			free_irq(priv->irq[i], dev);
2597 			irq_dispose_mapping(priv->irq[i]);
2598 		}
2599 
2600 	tasklet_kill(&priv->done_task[0]);
2601 	if (priv->irq[1])
2602 		tasklet_kill(&priv->done_task[1]);
2603 
2604 	iounmap(priv->reg);
2605 
2606 	dev_set_drvdata(dev, NULL);
2607 
2608 	kfree(priv);
2609 
2610 	return 0;
2611 }
2612 
2613 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2614 						    struct talitos_alg_template
2615 						           *template)
2616 {
2617 	struct talitos_private *priv = dev_get_drvdata(dev);
2618 	struct talitos_crypto_alg *t_alg;
2619 	struct crypto_alg *alg;
2620 
2621 	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2622 	if (!t_alg)
2623 		return ERR_PTR(-ENOMEM);
2624 
2625 	t_alg->algt = *template;
2626 
2627 	switch (t_alg->algt.type) {
2628 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2629 		alg = &t_alg->algt.alg.crypto;
2630 		alg->cra_init = talitos_cra_init;
2631 		break;
2632 	case CRYPTO_ALG_TYPE_AEAD:
2633 		alg = &t_alg->algt.alg.crypto;
2634 		alg->cra_init = talitos_cra_init_aead;
2635 		break;
2636 	case CRYPTO_ALG_TYPE_AHASH:
2637 		alg = &t_alg->algt.alg.hash.halg.base;
2638 		alg->cra_init = talitos_cra_init_ahash;
2639 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2640 		    !strncmp(alg->cra_name, "hmac", 4)) {
2641 			kfree(t_alg);
2642 			return ERR_PTR(-ENOTSUPP);
2643 		}
2644 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2645 		    (!strcmp(alg->cra_name, "sha224") ||
2646 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2647 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2648 			t_alg->algt.desc_hdr_template =
2649 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2650 					DESC_HDR_SEL0_MDEUA |
2651 					DESC_HDR_MODE0_MDEU_SHA256;
2652 		}
2653 		break;
2654 	default:
2655 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2656 		return ERR_PTR(-EINVAL);
2657 	}
2658 
2659 	alg->cra_module = THIS_MODULE;
2660 	alg->cra_priority = TALITOS_CRA_PRIORITY;
2661 	alg->cra_alignmask = 0;
2662 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2663 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2664 
2665 	t_alg->dev = dev;
2666 
2667 	return t_alg;
2668 }
2669 
2670 static int talitos_probe_irq(struct platform_device *ofdev)
2671 {
2672 	struct device *dev = &ofdev->dev;
2673 	struct device_node *np = ofdev->dev.of_node;
2674 	struct talitos_private *priv = dev_get_drvdata(dev);
2675 	int err;
2676 
2677 	priv->irq[0] = irq_of_parse_and_map(np, 0);
2678 	if (!priv->irq[0]) {
2679 		dev_err(dev, "failed to map irq\n");
2680 		return -EINVAL;
2681 	}
2682 
2683 	priv->irq[1] = irq_of_parse_and_map(np, 1);
2684 
2685 	/* get the primary irq line */
2686 	if (!priv->irq[1]) {
2687 		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2688 				  dev_driver_string(dev), dev);
2689 		goto primary_out;
2690 	}
2691 
2692 	err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2693 			  dev_driver_string(dev), dev);
2694 	if (err)
2695 		goto primary_out;
2696 
2697 	/* get the secondary irq line */
2698 	err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2699 			  dev_driver_string(dev), dev);
2700 	if (err) {
2701 		dev_err(dev, "failed to request secondary irq\n");
2702 		irq_dispose_mapping(priv->irq[1]);
2703 		priv->irq[1] = 0;
2704 	}
2705 
2706 	return err;
2707 
2708 primary_out:
2709 	if (err) {
2710 		dev_err(dev, "failed to request primary irq\n");
2711 		irq_dispose_mapping(priv->irq[0]);
2712 		priv->irq[0] = 0;
2713 	}
2714 
2715 	return err;
2716 }
2717 
2718 static int talitos_probe(struct platform_device *ofdev)
2719 {
2720 	struct device *dev = &ofdev->dev;
2721 	struct device_node *np = ofdev->dev.of_node;
2722 	struct talitos_private *priv;
2723 	const unsigned int *prop;
2724 	int i, err;
2725 
2726 	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2727 	if (!priv)
2728 		return -ENOMEM;
2729 
2730 	dev_set_drvdata(dev, priv);
2731 
2732 	priv->ofdev = ofdev;
2733 
2734 	spin_lock_init(&priv->reg_lock);
2735 
2736 	err = talitos_probe_irq(ofdev);
2737 	if (err)
2738 		goto err_out;
2739 
2740 	if (!priv->irq[1]) {
2741 		tasklet_init(&priv->done_task[0], talitos_done_4ch,
2742 			     (unsigned long)dev);
2743 	} else {
2744 		tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2745 			     (unsigned long)dev);
2746 		tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2747 			     (unsigned long)dev);
2748 	}
2749 
2750 	INIT_LIST_HEAD(&priv->alg_list);
2751 
2752 	priv->reg = of_iomap(np, 0);
2753 	if (!priv->reg) {
2754 		dev_err(dev, "failed to of_iomap\n");
2755 		err = -ENOMEM;
2756 		goto err_out;
2757 	}
2758 
2759 	/* get SEC version capabilities from device tree */
2760 	prop = of_get_property(np, "fsl,num-channels", NULL);
2761 	if (prop)
2762 		priv->num_channels = *prop;
2763 
2764 	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2765 	if (prop)
2766 		priv->chfifo_len = *prop;
2767 
2768 	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2769 	if (prop)
2770 		priv->exec_units = *prop;
2771 
2772 	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2773 	if (prop)
2774 		priv->desc_types = *prop;
2775 
2776 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2777 	    !priv->exec_units || !priv->desc_types) {
2778 		dev_err(dev, "invalid property data in device tree node\n");
2779 		err = -EINVAL;
2780 		goto err_out;
2781 	}
2782 
2783 	if (of_device_is_compatible(np, "fsl,sec3.0"))
2784 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2785 
2786 	if (of_device_is_compatible(np, "fsl,sec2.1"))
2787 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2788 				  TALITOS_FTR_SHA224_HWINIT |
2789 				  TALITOS_FTR_HMAC_OK;
2790 
2791 	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2792 			     priv->num_channels, GFP_KERNEL);
2793 	if (!priv->chan) {
2794 		dev_err(dev, "failed to allocate channel management space\n");
2795 		err = -ENOMEM;
2796 		goto err_out;
2797 	}
2798 
2799 	for (i = 0; i < priv->num_channels; i++) {
2800 		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2801 		if (!priv->irq[1] || !(i & 1))
2802 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2803 	}
2804 
2805 	for (i = 0; i < priv->num_channels; i++) {
2806 		spin_lock_init(&priv->chan[i].head_lock);
2807 		spin_lock_init(&priv->chan[i].tail_lock);
2808 	}
2809 
2810 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2811 
2812 	for (i = 0; i < priv->num_channels; i++) {
2813 		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2814 					     priv->fifo_len, GFP_KERNEL);
2815 		if (!priv->chan[i].fifo) {
2816 			dev_err(dev, "failed to allocate request fifo %d\n", i);
2817 			err = -ENOMEM;
2818 			goto err_out;
2819 		}
2820 	}
2821 
2822 	for (i = 0; i < priv->num_channels; i++)
2823 		atomic_set(&priv->chan[i].submit_count,
2824 			   -(priv->chfifo_len - 1));
2825 
2826 	dma_set_mask(dev, DMA_BIT_MASK(36));
2827 
2828 	/* reset and initialize the h/w */
2829 	err = init_device(dev);
2830 	if (err) {
2831 		dev_err(dev, "failed to initialize device\n");
2832 		goto err_out;
2833 	}
2834 
2835 	/* register the RNG, if available */
2836 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2837 		err = talitos_register_rng(dev);
2838 		if (err) {
2839 			dev_err(dev, "failed to register hwrng: %d\n", err);
2840 			goto err_out;
2841 		} else
2842 			dev_info(dev, "hwrng\n");
2843 	}
2844 
2845 	/* register crypto algorithms the device supports */
2846 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2847 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2848 			struct talitos_crypto_alg *t_alg;
2849 			char *name = NULL;
2850 
2851 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2852 			if (IS_ERR(t_alg)) {
2853 				err = PTR_ERR(t_alg);
2854 				if (err == -ENOTSUPP)
2855 					continue;
2856 				goto err_out;
2857 			}
2858 
2859 			switch (t_alg->algt.type) {
2860 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
2861 			case CRYPTO_ALG_TYPE_AEAD:
2862 				err = crypto_register_alg(
2863 						&t_alg->algt.alg.crypto);
2864 				name = t_alg->algt.alg.crypto.cra_driver_name;
2865 				break;
2866 			case CRYPTO_ALG_TYPE_AHASH:
2867 				err = crypto_register_ahash(
2868 						&t_alg->algt.alg.hash);
2869 				name =
2870 				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2871 				break;
2872 			}
2873 			if (err) {
2874 				dev_err(dev, "%s alg registration failed\n",
2875 					name);
2876 				kfree(t_alg);
2877 			} else
2878 				list_add_tail(&t_alg->entry, &priv->alg_list);
2879 		}
2880 	}
2881 	if (!list_empty(&priv->alg_list))
2882 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2883 			 (char *)of_get_property(np, "compatible", NULL));
2884 
2885 	return 0;
2886 
2887 err_out:
2888 	talitos_remove(ofdev);
2889 
2890 	return err;
2891 }
2892 
2893 static const struct of_device_id talitos_match[] = {
2894 	{
2895 		.compatible = "fsl,sec2.0",
2896 	},
2897 	{},
2898 };
2899 MODULE_DEVICE_TABLE(of, talitos_match);
2900 
2901 static struct platform_driver talitos_driver = {
2902 	.driver = {
2903 		.name = "talitos",
2904 		.owner = THIS_MODULE,
2905 		.of_match_table = talitos_match,
2906 	},
2907 	.probe = talitos_probe,
2908 	.remove = talitos_remove,
2909 };
2910 
2911 module_platform_driver(talitos_driver);
2912 
2913 MODULE_LICENSE("GPL");
2914 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2915 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
2916