xref: /openbmc/linux/drivers/crypto/talitos.c (revision 4da722ca)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43 
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55 
56 #include "talitos.h"
57 
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 			   bool is_sec1)
60 {
61 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 	if (!is_sec1)
63 		ptr->eptr = upper_32_bits(dma_addr);
64 }
65 
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 			     struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69 	dst_ptr->ptr = src_ptr->ptr;
70 	if (!is_sec1)
71 		dst_ptr->eptr = src_ptr->eptr;
72 }
73 
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75 			       bool is_sec1)
76 {
77 	if (is_sec1) {
78 		ptr->res = 0;
79 		ptr->len1 = cpu_to_be16(len);
80 	} else {
81 		ptr->len = cpu_to_be16(len);
82 	}
83 }
84 
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 					   bool is_sec1)
87 {
88 	if (is_sec1)
89 		return be16_to_cpu(ptr->len1);
90 	else
91 		return be16_to_cpu(ptr->len);
92 }
93 
94 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
95 				   bool is_sec1)
96 {
97 	if (!is_sec1)
98 		ptr->j_extent = val;
99 }
100 
101 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
102 {
103 	if (!is_sec1)
104 		ptr->j_extent |= val;
105 }
106 
107 /*
108  * map virtual single (contiguous) pointer to h/w descriptor pointer
109  */
110 static void map_single_talitos_ptr(struct device *dev,
111 				   struct talitos_ptr *ptr,
112 				   unsigned int len, void *data,
113 				   enum dma_data_direction dir)
114 {
115 	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
116 	struct talitos_private *priv = dev_get_drvdata(dev);
117 	bool is_sec1 = has_ftr_sec1(priv);
118 
119 	to_talitos_ptr_len(ptr, len, is_sec1);
120 	to_talitos_ptr(ptr, dma_addr, is_sec1);
121 	to_talitos_ptr_ext_set(ptr, 0, is_sec1);
122 }
123 
124 /*
125  * unmap bus single (contiguous) h/w descriptor pointer
126  */
127 static void unmap_single_talitos_ptr(struct device *dev,
128 				     struct talitos_ptr *ptr,
129 				     enum dma_data_direction dir)
130 {
131 	struct talitos_private *priv = dev_get_drvdata(dev);
132 	bool is_sec1 = has_ftr_sec1(priv);
133 
134 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 			 from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137 
138 static int reset_channel(struct device *dev, int ch)
139 {
140 	struct talitos_private *priv = dev_get_drvdata(dev);
141 	unsigned int timeout = TALITOS_TIMEOUT;
142 	bool is_sec1 = has_ftr_sec1(priv);
143 
144 	if (is_sec1) {
145 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 			  TALITOS1_CCCR_LO_RESET);
147 
148 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 			TALITOS1_CCCR_LO_RESET) && --timeout)
150 			cpu_relax();
151 	} else {
152 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 			  TALITOS2_CCCR_RESET);
154 
155 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 			TALITOS2_CCCR_RESET) && --timeout)
157 			cpu_relax();
158 	}
159 
160 	if (timeout == 0) {
161 		dev_err(dev, "failed to reset channel %d\n", ch);
162 		return -EIO;
163 	}
164 
165 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
166 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 
169 	/* and ICCR writeback, if available */
170 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
171 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 		          TALITOS_CCCR_LO_IWSE);
173 
174 	return 0;
175 }
176 
177 static int reset_device(struct device *dev)
178 {
179 	struct talitos_private *priv = dev_get_drvdata(dev);
180 	unsigned int timeout = TALITOS_TIMEOUT;
181 	bool is_sec1 = has_ftr_sec1(priv);
182 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
183 
184 	setbits32(priv->reg + TALITOS_MCR, mcr);
185 
186 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
187 	       && --timeout)
188 		cpu_relax();
189 
190 	if (priv->irq[1]) {
191 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
192 		setbits32(priv->reg + TALITOS_MCR, mcr);
193 	}
194 
195 	if (timeout == 0) {
196 		dev_err(dev, "failed to reset device\n");
197 		return -EIO;
198 	}
199 
200 	return 0;
201 }
202 
203 /*
204  * Reset and initialize the device
205  */
206 static int init_device(struct device *dev)
207 {
208 	struct talitos_private *priv = dev_get_drvdata(dev);
209 	int ch, err;
210 	bool is_sec1 = has_ftr_sec1(priv);
211 
212 	/*
213 	 * Master reset
214 	 * errata documentation: warning: certain SEC interrupts
215 	 * are not fully cleared by writing the MCR:SWR bit,
216 	 * set bit twice to completely reset
217 	 */
218 	err = reset_device(dev);
219 	if (err)
220 		return err;
221 
222 	err = reset_device(dev);
223 	if (err)
224 		return err;
225 
226 	/* reset channels */
227 	for (ch = 0; ch < priv->num_channels; ch++) {
228 		err = reset_channel(dev, ch);
229 		if (err)
230 			return err;
231 	}
232 
233 	/* enable channel done and error interrupts */
234 	if (is_sec1) {
235 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
236 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
237 		/* disable parity error check in DEU (erroneous? test vect.) */
238 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
239 	} else {
240 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
241 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
242 	}
243 
244 	/* disable integrity check error interrupts (use writeback instead) */
245 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
246 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
247 		          TALITOS_MDEUICR_LO_ICE);
248 
249 	return 0;
250 }
251 
252 /**
253  * talitos_submit - submits a descriptor to the device for processing
254  * @dev:	the SEC device to be used
255  * @ch:		the SEC device channel to be used
256  * @desc:	the descriptor to be processed by the device
257  * @callback:	whom to call when processing is complete
258  * @context:	a handle for use by caller (optional)
259  *
260  * desc must contain valid dma-mapped (bus physical) address pointers.
261  * callback must check err and feedback in descriptor header
262  * for device processing status.
263  */
264 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
265 		   void (*callback)(struct device *dev,
266 				    struct talitos_desc *desc,
267 				    void *context, int error),
268 		   void *context)
269 {
270 	struct talitos_private *priv = dev_get_drvdata(dev);
271 	struct talitos_request *request;
272 	unsigned long flags;
273 	int head;
274 	bool is_sec1 = has_ftr_sec1(priv);
275 
276 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
277 
278 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
279 		/* h/w fifo is full */
280 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
281 		return -EAGAIN;
282 	}
283 
284 	head = priv->chan[ch].head;
285 	request = &priv->chan[ch].fifo[head];
286 
287 	/* map descriptor and save caller data */
288 	if (is_sec1) {
289 		desc->hdr1 = desc->hdr;
290 		desc->next_desc = 0;
291 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
292 						   TALITOS_DESC_SIZE,
293 						   DMA_BIDIRECTIONAL);
294 	} else {
295 		request->dma_desc = dma_map_single(dev, desc,
296 						   TALITOS_DESC_SIZE,
297 						   DMA_BIDIRECTIONAL);
298 	}
299 	request->callback = callback;
300 	request->context = context;
301 
302 	/* increment fifo head */
303 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304 
305 	smp_wmb();
306 	request->desc = desc;
307 
308 	/* GO! */
309 	wmb();
310 	out_be32(priv->chan[ch].reg + TALITOS_FF,
311 		 upper_32_bits(request->dma_desc));
312 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
313 		 lower_32_bits(request->dma_desc));
314 
315 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
316 
317 	return -EINPROGRESS;
318 }
319 EXPORT_SYMBOL(talitos_submit);
320 
321 /*
322  * process what was done, notify callback of error if not
323  */
324 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
325 {
326 	struct talitos_private *priv = dev_get_drvdata(dev);
327 	struct talitos_request *request, saved_req;
328 	unsigned long flags;
329 	int tail, status;
330 	bool is_sec1 = has_ftr_sec1(priv);
331 
332 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
333 
334 	tail = priv->chan[ch].tail;
335 	while (priv->chan[ch].fifo[tail].desc) {
336 		__be32 hdr;
337 
338 		request = &priv->chan[ch].fifo[tail];
339 
340 		/* descriptors with their done bits set don't get the error */
341 		rmb();
342 		hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
343 
344 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
345 			status = 0;
346 		else
347 			if (!error)
348 				break;
349 			else
350 				status = error;
351 
352 		dma_unmap_single(dev, request->dma_desc,
353 				 TALITOS_DESC_SIZE,
354 				 DMA_BIDIRECTIONAL);
355 
356 		/* copy entries so we can call callback outside lock */
357 		saved_req.desc = request->desc;
358 		saved_req.callback = request->callback;
359 		saved_req.context = request->context;
360 
361 		/* release request entry in fifo */
362 		smp_wmb();
363 		request->desc = NULL;
364 
365 		/* increment fifo tail */
366 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
367 
368 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
369 
370 		atomic_dec(&priv->chan[ch].submit_count);
371 
372 		saved_req.callback(dev, saved_req.desc, saved_req.context,
373 				   status);
374 		/* channel may resume processing in single desc error case */
375 		if (error && !reset_ch && status == error)
376 			return;
377 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
378 		tail = priv->chan[ch].tail;
379 	}
380 
381 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
382 }
383 
384 /*
385  * process completed requests for channels that have done status
386  */
387 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
388 static void talitos1_done_##name(unsigned long data)			\
389 {									\
390 	struct device *dev = (struct device *)data;			\
391 	struct talitos_private *priv = dev_get_drvdata(dev);		\
392 	unsigned long flags;						\
393 									\
394 	if (ch_done_mask & 0x10000000)					\
395 		flush_channel(dev, 0, 0, 0);			\
396 	if (priv->num_channels == 1)					\
397 		goto out;						\
398 	if (ch_done_mask & 0x40000000)					\
399 		flush_channel(dev, 1, 0, 0);			\
400 	if (ch_done_mask & 0x00010000)					\
401 		flush_channel(dev, 2, 0, 0);			\
402 	if (ch_done_mask & 0x00040000)					\
403 		flush_channel(dev, 3, 0, 0);			\
404 									\
405 out:									\
406 	/* At this point, all completed channels have been processed */	\
407 	/* Unmask done interrupts for channels completed later on. */	\
408 	spin_lock_irqsave(&priv->reg_lock, flags);			\
409 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
410 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
411 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
412 }
413 
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415 
416 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
417 static void talitos2_done_##name(unsigned long data)			\
418 {									\
419 	struct device *dev = (struct device *)data;			\
420 	struct talitos_private *priv = dev_get_drvdata(dev);		\
421 	unsigned long flags;						\
422 									\
423 	if (ch_done_mask & 1)						\
424 		flush_channel(dev, 0, 0, 0);				\
425 	if (priv->num_channels == 1)					\
426 		goto out;						\
427 	if (ch_done_mask & (1 << 2))					\
428 		flush_channel(dev, 1, 0, 0);				\
429 	if (ch_done_mask & (1 << 4))					\
430 		flush_channel(dev, 2, 0, 0);				\
431 	if (ch_done_mask & (1 << 6))					\
432 		flush_channel(dev, 3, 0, 0);				\
433 									\
434 out:									\
435 	/* At this point, all completed channels have been processed */	\
436 	/* Unmask done interrupts for channels completed later on. */	\
437 	spin_lock_irqsave(&priv->reg_lock, flags);			\
438 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
439 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
440 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
441 }
442 
443 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
444 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
445 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
446 
447 /*
448  * locate current (offending) descriptor
449  */
450 static u32 current_desc_hdr(struct device *dev, int ch)
451 {
452 	struct talitos_private *priv = dev_get_drvdata(dev);
453 	int tail, iter;
454 	dma_addr_t cur_desc;
455 
456 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
457 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
458 
459 	if (!cur_desc) {
460 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
461 		return 0;
462 	}
463 
464 	tail = priv->chan[ch].tail;
465 
466 	iter = tail;
467 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
468 		iter = (iter + 1) & (priv->fifo_len - 1);
469 		if (iter == tail) {
470 			dev_err(dev, "couldn't locate current descriptor\n");
471 			return 0;
472 		}
473 	}
474 
475 	return priv->chan[ch].fifo[iter].desc->hdr;
476 }
477 
478 /*
479  * user diagnostics; report root cause of error based on execution unit status
480  */
481 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
482 {
483 	struct talitos_private *priv = dev_get_drvdata(dev);
484 	int i;
485 
486 	if (!desc_hdr)
487 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
488 
489 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
490 	case DESC_HDR_SEL0_AFEU:
491 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
492 			in_be32(priv->reg_afeu + TALITOS_EUISR),
493 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
494 		break;
495 	case DESC_HDR_SEL0_DEU:
496 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
497 			in_be32(priv->reg_deu + TALITOS_EUISR),
498 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
499 		break;
500 	case DESC_HDR_SEL0_MDEUA:
501 	case DESC_HDR_SEL0_MDEUB:
502 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
503 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
504 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
505 		break;
506 	case DESC_HDR_SEL0_RNG:
507 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
508 			in_be32(priv->reg_rngu + TALITOS_ISR),
509 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
510 		break;
511 	case DESC_HDR_SEL0_PKEU:
512 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
513 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 		break;
516 	case DESC_HDR_SEL0_AESU:
517 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
518 			in_be32(priv->reg_aesu + TALITOS_EUISR),
519 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
520 		break;
521 	case DESC_HDR_SEL0_CRCU:
522 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
523 			in_be32(priv->reg_crcu + TALITOS_EUISR),
524 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
525 		break;
526 	case DESC_HDR_SEL0_KEU:
527 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
528 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
529 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
530 		break;
531 	}
532 
533 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
534 	case DESC_HDR_SEL1_MDEUA:
535 	case DESC_HDR_SEL1_MDEUB:
536 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
537 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
538 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
539 		break;
540 	case DESC_HDR_SEL1_CRCU:
541 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
542 			in_be32(priv->reg_crcu + TALITOS_EUISR),
543 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
544 		break;
545 	}
546 
547 	for (i = 0; i < 8; i++)
548 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
549 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
550 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
551 }
552 
553 /*
554  * recover from error interrupts
555  */
556 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
557 {
558 	struct talitos_private *priv = dev_get_drvdata(dev);
559 	unsigned int timeout = TALITOS_TIMEOUT;
560 	int ch, error, reset_dev = 0;
561 	u32 v_lo;
562 	bool is_sec1 = has_ftr_sec1(priv);
563 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
564 
565 	for (ch = 0; ch < priv->num_channels; ch++) {
566 		/* skip channels without errors */
567 		if (is_sec1) {
568 			/* bits 29, 31, 17, 19 */
569 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
570 				continue;
571 		} else {
572 			if (!(isr & (1 << (ch * 2 + 1))))
573 				continue;
574 		}
575 
576 		error = -EINVAL;
577 
578 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
579 
580 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
581 			dev_err(dev, "double fetch fifo overflow error\n");
582 			error = -EAGAIN;
583 			reset_ch = 1;
584 		}
585 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
586 			/* h/w dropped descriptor */
587 			dev_err(dev, "single fetch fifo overflow error\n");
588 			error = -EAGAIN;
589 		}
590 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
591 			dev_err(dev, "master data transfer error\n");
592 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
593 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
594 					     : "s/g data length zero error\n");
595 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
596 			dev_err(dev, is_sec1 ? "parity error\n"
597 					     : "fetch pointer zero error\n");
598 		if (v_lo & TALITOS_CCPSR_LO_IDH)
599 			dev_err(dev, "illegal descriptor header error\n");
600 		if (v_lo & TALITOS_CCPSR_LO_IEU)
601 			dev_err(dev, is_sec1 ? "static assignment error\n"
602 					     : "invalid exec unit error\n");
603 		if (v_lo & TALITOS_CCPSR_LO_EU)
604 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
605 		if (!is_sec1) {
606 			if (v_lo & TALITOS_CCPSR_LO_GB)
607 				dev_err(dev, "gather boundary error\n");
608 			if (v_lo & TALITOS_CCPSR_LO_GRL)
609 				dev_err(dev, "gather return/length error\n");
610 			if (v_lo & TALITOS_CCPSR_LO_SB)
611 				dev_err(dev, "scatter boundary error\n");
612 			if (v_lo & TALITOS_CCPSR_LO_SRL)
613 				dev_err(dev, "scatter return/length error\n");
614 		}
615 
616 		flush_channel(dev, ch, error, reset_ch);
617 
618 		if (reset_ch) {
619 			reset_channel(dev, ch);
620 		} else {
621 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
622 				  TALITOS2_CCCR_CONT);
623 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
624 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
625 			       TALITOS2_CCCR_CONT) && --timeout)
626 				cpu_relax();
627 			if (timeout == 0) {
628 				dev_err(dev, "failed to restart channel %d\n",
629 					ch);
630 				reset_dev = 1;
631 			}
632 		}
633 	}
634 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
635 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
636 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
637 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
638 				isr, isr_lo);
639 		else
640 			dev_err(dev, "done overflow, internal time out, or "
641 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
642 
643 		/* purge request queues */
644 		for (ch = 0; ch < priv->num_channels; ch++)
645 			flush_channel(dev, ch, -EIO, 1);
646 
647 		/* reset and reinitialize the device */
648 		init_device(dev);
649 	}
650 }
651 
652 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
653 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
654 {									       \
655 	struct device *dev = data;					       \
656 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
657 	u32 isr, isr_lo;						       \
658 	unsigned long flags;						       \
659 									       \
660 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
661 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
662 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
663 	/* Acknowledge interrupt */					       \
664 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
666 									       \
667 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
668 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
669 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
670 	}								       \
671 	else {								       \
672 		if (likely(isr & ch_done_mask)) {			       \
673 			/* mask further done interrupts. */		       \
674 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
675 			/* done_task will unmask done interrupts at exit */    \
676 			tasklet_schedule(&priv->done_task[tlet]);	       \
677 		}							       \
678 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
679 	}								       \
680 									       \
681 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
682 								IRQ_NONE;      \
683 }
684 
685 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
686 
687 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
688 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
689 {									       \
690 	struct device *dev = data;					       \
691 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
692 	u32 isr, isr_lo;						       \
693 	unsigned long flags;						       \
694 									       \
695 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
696 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
697 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
698 	/* Acknowledge interrupt */					       \
699 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
701 									       \
702 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
703 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
704 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
705 	}								       \
706 	else {								       \
707 		if (likely(isr & ch_done_mask)) {			       \
708 			/* mask further done interrupts. */		       \
709 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
710 			/* done_task will unmask done interrupts at exit */    \
711 			tasklet_schedule(&priv->done_task[tlet]);	       \
712 		}							       \
713 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
714 	}								       \
715 									       \
716 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
717 								IRQ_NONE;      \
718 }
719 
720 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
721 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
722 		       0)
723 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
724 		       1)
725 
726 /*
727  * hwrng
728  */
729 static int talitos_rng_data_present(struct hwrng *rng, int wait)
730 {
731 	struct device *dev = (struct device *)rng->priv;
732 	struct talitos_private *priv = dev_get_drvdata(dev);
733 	u32 ofl;
734 	int i;
735 
736 	for (i = 0; i < 20; i++) {
737 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
738 		      TALITOS_RNGUSR_LO_OFL;
739 		if (ofl || !wait)
740 			break;
741 		udelay(10);
742 	}
743 
744 	return !!ofl;
745 }
746 
747 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
748 {
749 	struct device *dev = (struct device *)rng->priv;
750 	struct talitos_private *priv = dev_get_drvdata(dev);
751 
752 	/* rng fifo requires 64-bit accesses */
753 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
754 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
755 
756 	return sizeof(u32);
757 }
758 
759 static int talitos_rng_init(struct hwrng *rng)
760 {
761 	struct device *dev = (struct device *)rng->priv;
762 	struct talitos_private *priv = dev_get_drvdata(dev);
763 	unsigned int timeout = TALITOS_TIMEOUT;
764 
765 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
766 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
767 		 & TALITOS_RNGUSR_LO_RD)
768 	       && --timeout)
769 		cpu_relax();
770 	if (timeout == 0) {
771 		dev_err(dev, "failed to reset rng hw\n");
772 		return -ENODEV;
773 	}
774 
775 	/* start generating */
776 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
777 
778 	return 0;
779 }
780 
781 static int talitos_register_rng(struct device *dev)
782 {
783 	struct talitos_private *priv = dev_get_drvdata(dev);
784 	int err;
785 
786 	priv->rng.name		= dev_driver_string(dev),
787 	priv->rng.init		= talitos_rng_init,
788 	priv->rng.data_present	= talitos_rng_data_present,
789 	priv->rng.data_read	= talitos_rng_data_read,
790 	priv->rng.priv		= (unsigned long)dev;
791 
792 	err = hwrng_register(&priv->rng);
793 	if (!err)
794 		priv->rng_registered = true;
795 
796 	return err;
797 }
798 
799 static void talitos_unregister_rng(struct device *dev)
800 {
801 	struct talitos_private *priv = dev_get_drvdata(dev);
802 
803 	if (!priv->rng_registered)
804 		return;
805 
806 	hwrng_unregister(&priv->rng);
807 	priv->rng_registered = false;
808 }
809 
810 /*
811  * crypto alg
812  */
813 #define TALITOS_CRA_PRIORITY		3000
814 /*
815  * Defines a priority for doing AEAD with descriptors type
816  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
817  */
818 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
819 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
820 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
821 
822 struct talitos_ctx {
823 	struct device *dev;
824 	int ch;
825 	__be32 desc_hdr_template;
826 	u8 key[TALITOS_MAX_KEY_SIZE];
827 	u8 iv[TALITOS_MAX_IV_LENGTH];
828 	unsigned int keylen;
829 	unsigned int enckeylen;
830 	unsigned int authkeylen;
831 };
832 
833 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
834 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
835 
836 struct talitos_ahash_req_ctx {
837 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
838 	unsigned int hw_context_size;
839 	u8 buf[HASH_MAX_BLOCK_SIZE];
840 	u8 bufnext[HASH_MAX_BLOCK_SIZE];
841 	unsigned int swinit;
842 	unsigned int first;
843 	unsigned int last;
844 	unsigned int to_hash_later;
845 	unsigned int nbuf;
846 	struct scatterlist bufsl[2];
847 	struct scatterlist *psrc;
848 };
849 
850 struct talitos_export_state {
851 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
852 	u8 buf[HASH_MAX_BLOCK_SIZE];
853 	unsigned int swinit;
854 	unsigned int first;
855 	unsigned int last;
856 	unsigned int to_hash_later;
857 	unsigned int nbuf;
858 };
859 
860 static int aead_setkey(struct crypto_aead *authenc,
861 		       const u8 *key, unsigned int keylen)
862 {
863 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
864 	struct crypto_authenc_keys keys;
865 
866 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
867 		goto badkey;
868 
869 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
870 		goto badkey;
871 
872 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
873 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
874 
875 	ctx->keylen = keys.authkeylen + keys.enckeylen;
876 	ctx->enckeylen = keys.enckeylen;
877 	ctx->authkeylen = keys.authkeylen;
878 
879 	return 0;
880 
881 badkey:
882 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
883 	return -EINVAL;
884 }
885 
886 /*
887  * talitos_edesc - s/w-extended descriptor
888  * @src_nents: number of segments in input scatterlist
889  * @dst_nents: number of segments in output scatterlist
890  * @icv_ool: whether ICV is out-of-line
891  * @iv_dma: dma address of iv for checking continuity and link table
892  * @dma_len: length of dma mapped link_tbl space
893  * @dma_link_tbl: bus physical address of link_tbl/buf
894  * @desc: h/w descriptor
895  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
896  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
897  *
898  * if decrypting (with authcheck), or either one of src_nents or dst_nents
899  * is greater than 1, an integrity check value is concatenated to the end
900  * of link_tbl data
901  */
902 struct talitos_edesc {
903 	int src_nents;
904 	int dst_nents;
905 	bool icv_ool;
906 	dma_addr_t iv_dma;
907 	int dma_len;
908 	dma_addr_t dma_link_tbl;
909 	struct talitos_desc desc;
910 	union {
911 		struct talitos_ptr link_tbl[0];
912 		u8 buf[0];
913 	};
914 };
915 
916 static void talitos_sg_unmap(struct device *dev,
917 			     struct talitos_edesc *edesc,
918 			     struct scatterlist *src,
919 			     struct scatterlist *dst,
920 			     unsigned int len, unsigned int offset)
921 {
922 	struct talitos_private *priv = dev_get_drvdata(dev);
923 	bool is_sec1 = has_ftr_sec1(priv);
924 	unsigned int src_nents = edesc->src_nents ? : 1;
925 	unsigned int dst_nents = edesc->dst_nents ? : 1;
926 
927 	if (is_sec1 && dst && dst_nents > 1) {
928 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
929 					   len, DMA_FROM_DEVICE);
930 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
931 				     offset);
932 	}
933 	if (src != dst) {
934 		if (src_nents == 1 || !is_sec1)
935 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
936 
937 		if (dst && (dst_nents == 1 || !is_sec1))
938 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
939 	} else if (src_nents == 1 || !is_sec1) {
940 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
941 	}
942 }
943 
944 static void ipsec_esp_unmap(struct device *dev,
945 			    struct talitos_edesc *edesc,
946 			    struct aead_request *areq)
947 {
948 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
949 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
950 	unsigned int ivsize = crypto_aead_ivsize(aead);
951 
952 	if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
953 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
954 					 DMA_FROM_DEVICE);
955 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
956 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
957 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
958 
959 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
960 			 areq->assoclen);
961 
962 	if (edesc->dma_len)
963 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
964 				 DMA_BIDIRECTIONAL);
965 
966 	if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
967 		unsigned int dst_nents = edesc->dst_nents ? : 1;
968 
969 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
970 				   areq->assoclen + areq->cryptlen - ivsize);
971 	}
972 }
973 
974 /*
975  * ipsec_esp descriptor callbacks
976  */
977 static void ipsec_esp_encrypt_done(struct device *dev,
978 				   struct talitos_desc *desc, void *context,
979 				   int err)
980 {
981 	struct talitos_private *priv = dev_get_drvdata(dev);
982 	bool is_sec1 = has_ftr_sec1(priv);
983 	struct aead_request *areq = context;
984 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
985 	unsigned int authsize = crypto_aead_authsize(authenc);
986 	struct talitos_edesc *edesc;
987 	struct scatterlist *sg;
988 	void *icvdata;
989 
990 	edesc = container_of(desc, struct talitos_edesc, desc);
991 
992 	ipsec_esp_unmap(dev, edesc, areq);
993 
994 	/* copy the generated ICV to dst */
995 	if (edesc->icv_ool) {
996 		if (is_sec1)
997 			icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
998 		else
999 			icvdata = &edesc->link_tbl[edesc->src_nents +
1000 						   edesc->dst_nents + 2];
1001 		sg = sg_last(areq->dst, edesc->dst_nents);
1002 		memcpy((char *)sg_virt(sg) + sg->length - authsize,
1003 		       icvdata, authsize);
1004 	}
1005 
1006 	kfree(edesc);
1007 
1008 	aead_request_complete(areq, err);
1009 }
1010 
1011 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1012 					  struct talitos_desc *desc,
1013 					  void *context, int err)
1014 {
1015 	struct aead_request *req = context;
1016 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1017 	unsigned int authsize = crypto_aead_authsize(authenc);
1018 	struct talitos_edesc *edesc;
1019 	struct scatterlist *sg;
1020 	char *oicv, *icv;
1021 	struct talitos_private *priv = dev_get_drvdata(dev);
1022 	bool is_sec1 = has_ftr_sec1(priv);
1023 
1024 	edesc = container_of(desc, struct talitos_edesc, desc);
1025 
1026 	ipsec_esp_unmap(dev, edesc, req);
1027 
1028 	if (!err) {
1029 		/* auth check */
1030 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1031 		icv = (char *)sg_virt(sg) + sg->length - authsize;
1032 
1033 		if (edesc->dma_len) {
1034 			if (is_sec1)
1035 				oicv = (char *)&edesc->dma_link_tbl +
1036 					       req->assoclen + req->cryptlen;
1037 			else
1038 				oicv = (char *)
1039 				       &edesc->link_tbl[edesc->src_nents +
1040 							edesc->dst_nents + 2];
1041 			if (edesc->icv_ool)
1042 				icv = oicv + authsize;
1043 		} else
1044 			oicv = (char *)&edesc->link_tbl[0];
1045 
1046 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1047 	}
1048 
1049 	kfree(edesc);
1050 
1051 	aead_request_complete(req, err);
1052 }
1053 
1054 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1055 					  struct talitos_desc *desc,
1056 					  void *context, int err)
1057 {
1058 	struct aead_request *req = context;
1059 	struct talitos_edesc *edesc;
1060 
1061 	edesc = container_of(desc, struct talitos_edesc, desc);
1062 
1063 	ipsec_esp_unmap(dev, edesc, req);
1064 
1065 	/* check ICV auth status */
1066 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1067 		     DESC_HDR_LO_ICCR1_PASS))
1068 		err = -EBADMSG;
1069 
1070 	kfree(edesc);
1071 
1072 	aead_request_complete(req, err);
1073 }
1074 
1075 /*
1076  * convert scatterlist to SEC h/w link table format
1077  * stop at cryptlen bytes
1078  */
1079 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1080 				 unsigned int offset, int cryptlen,
1081 				 struct talitos_ptr *link_tbl_ptr)
1082 {
1083 	int n_sg = sg_count;
1084 	int count = 0;
1085 
1086 	while (cryptlen && sg && n_sg--) {
1087 		unsigned int len = sg_dma_len(sg);
1088 
1089 		if (offset >= len) {
1090 			offset -= len;
1091 			goto next;
1092 		}
1093 
1094 		len -= offset;
1095 
1096 		if (len > cryptlen)
1097 			len = cryptlen;
1098 
1099 		to_talitos_ptr(link_tbl_ptr + count,
1100 			       sg_dma_address(sg) + offset, 0);
1101 		to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1102 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1103 		count++;
1104 		cryptlen -= len;
1105 		offset = 0;
1106 
1107 next:
1108 		sg = sg_next(sg);
1109 	}
1110 
1111 	/* tag end of link table */
1112 	if (count > 0)
1113 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1114 				       DESC_PTR_LNKTBL_RETURN, 0);
1115 
1116 	return count;
1117 }
1118 
1119 int talitos_sg_map(struct device *dev, struct scatterlist *src,
1120 		   unsigned int len, struct talitos_edesc *edesc,
1121 		   struct talitos_ptr *ptr,
1122 		   int sg_count, unsigned int offset, int tbl_off)
1123 {
1124 	struct talitos_private *priv = dev_get_drvdata(dev);
1125 	bool is_sec1 = has_ftr_sec1(priv);
1126 
1127 	to_talitos_ptr_len(ptr, len, is_sec1);
1128 	to_talitos_ptr_ext_set(ptr, 0, is_sec1);
1129 
1130 	if (sg_count == 1) {
1131 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1132 		return sg_count;
1133 	}
1134 	if (is_sec1) {
1135 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1136 		return sg_count;
1137 	}
1138 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
1139 					 &edesc->link_tbl[tbl_off]);
1140 	if (sg_count == 1) {
1141 		/* Only one segment now, so no link tbl needed*/
1142 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1143 		return sg_count;
1144 	}
1145 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1146 			    tbl_off * sizeof(struct talitos_ptr), is_sec1);
1147 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1148 
1149 	return sg_count;
1150 }
1151 
1152 /*
1153  * fill in and submit ipsec_esp descriptor
1154  */
1155 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1156 		     void (*callback)(struct device *dev,
1157 				      struct talitos_desc *desc,
1158 				      void *context, int error))
1159 {
1160 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1161 	unsigned int authsize = crypto_aead_authsize(aead);
1162 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1163 	struct device *dev = ctx->dev;
1164 	struct talitos_desc *desc = &edesc->desc;
1165 	unsigned int cryptlen = areq->cryptlen;
1166 	unsigned int ivsize = crypto_aead_ivsize(aead);
1167 	int tbl_off = 0;
1168 	int sg_count, ret;
1169 	int sg_link_tbl_len;
1170 	bool sync_needed = false;
1171 	struct talitos_private *priv = dev_get_drvdata(dev);
1172 	bool is_sec1 = has_ftr_sec1(priv);
1173 
1174 	/* hmac key */
1175 	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1176 			       DMA_TO_DEVICE);
1177 
1178 	sg_count = edesc->src_nents ?: 1;
1179 	if (is_sec1 && sg_count > 1)
1180 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1181 				  areq->assoclen + cryptlen);
1182 	else
1183 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1184 				      (areq->src == areq->dst) ?
1185 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1186 
1187 	/* hmac data */
1188 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1189 			     &desc->ptr[1], sg_count, 0, tbl_off);
1190 
1191 	if (ret > 1) {
1192 		tbl_off += ret;
1193 		sync_needed = true;
1194 	}
1195 
1196 	/* cipher iv */
1197 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1198 		to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1199 		to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1200 		to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1201 	} else {
1202 		to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1203 		to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1204 		to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1205 	}
1206 
1207 	/* cipher key */
1208 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1209 		map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1210 				       (char *)&ctx->key + ctx->authkeylen,
1211 				       DMA_TO_DEVICE);
1212 	else
1213 		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1214 				       (char *)&ctx->key + ctx->authkeylen,
1215 				       DMA_TO_DEVICE);
1216 
1217 	/*
1218 	 * cipher in
1219 	 * map and adjust cipher len to aead request cryptlen.
1220 	 * extent is bytes of HMAC postpended to ciphertext,
1221 	 * typically 12 for ipsec
1222 	 */
1223 	to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
1224 	to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
1225 
1226 	sg_link_tbl_len = cryptlen;
1227 
1228 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1229 		to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1230 
1231 		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1232 			sg_link_tbl_len += authsize;
1233 	}
1234 
1235 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1236 				  &desc->ptr[4], sg_count, areq->assoclen,
1237 				  tbl_off);
1238 
1239 	if (sg_count > 1) {
1240 		tbl_off += sg_count;
1241 		sync_needed = true;
1242 	}
1243 
1244 	/* cipher out */
1245 	if (areq->src != areq->dst) {
1246 		sg_count = edesc->dst_nents ? : 1;
1247 		if (!is_sec1 || sg_count == 1)
1248 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1249 	}
1250 
1251 	sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
1252 				  &desc->ptr[5], sg_count, areq->assoclen,
1253 				  tbl_off);
1254 
1255 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1256 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1257 
1258 	if (sg_count > 1) {
1259 		edesc->icv_ool = true;
1260 		sync_needed = true;
1261 
1262 		if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1263 			struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1264 			int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1265 				     sizeof(struct talitos_ptr) + authsize;
1266 
1267 			/* Add an entry to the link table for ICV data */
1268 			tbl_ptr += sg_count - 1;
1269 			to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
1270 			tbl_ptr++;
1271 			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1272 					       is_sec1);
1273 			to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1274 
1275 			/* icv data follows link tables */
1276 			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1277 				       is_sec1);
1278 		}
1279 	} else {
1280 		edesc->icv_ool = false;
1281 	}
1282 
1283 	/* ICV data */
1284 	if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1285 		to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1286 		to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
1287 			       areq->assoclen + cryptlen, is_sec1);
1288 	}
1289 
1290 	/* iv out */
1291 	if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1292 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1293 				       DMA_FROM_DEVICE);
1294 
1295 	if (sync_needed)
1296 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1297 					   edesc->dma_len,
1298 					   DMA_BIDIRECTIONAL);
1299 
1300 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1301 	if (ret != -EINPROGRESS) {
1302 		ipsec_esp_unmap(dev, edesc, areq);
1303 		kfree(edesc);
1304 	}
1305 	return ret;
1306 }
1307 
1308 /*
1309  * allocate and map the extended descriptor
1310  */
1311 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1312 						 struct scatterlist *src,
1313 						 struct scatterlist *dst,
1314 						 u8 *iv,
1315 						 unsigned int assoclen,
1316 						 unsigned int cryptlen,
1317 						 unsigned int authsize,
1318 						 unsigned int ivsize,
1319 						 int icv_stashing,
1320 						 u32 cryptoflags,
1321 						 bool encrypt)
1322 {
1323 	struct talitos_edesc *edesc;
1324 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1325 	dma_addr_t iv_dma = 0;
1326 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1327 		      GFP_ATOMIC;
1328 	struct talitos_private *priv = dev_get_drvdata(dev);
1329 	bool is_sec1 = has_ftr_sec1(priv);
1330 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1331 	void *err;
1332 
1333 	if (cryptlen + authsize > max_len) {
1334 		dev_err(dev, "length exceeds h/w max limit\n");
1335 		return ERR_PTR(-EINVAL);
1336 	}
1337 
1338 	if (ivsize)
1339 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1340 
1341 	if (!dst || dst == src) {
1342 		src_len = assoclen + cryptlen + authsize;
1343 		src_nents = sg_nents_for_len(src, src_len);
1344 		if (src_nents < 0) {
1345 			dev_err(dev, "Invalid number of src SG.\n");
1346 			err = ERR_PTR(-EINVAL);
1347 			goto error_sg;
1348 		}
1349 		src_nents = (src_nents == 1) ? 0 : src_nents;
1350 		dst_nents = dst ? src_nents : 0;
1351 		dst_len = 0;
1352 	} else { /* dst && dst != src*/
1353 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1354 		src_nents = sg_nents_for_len(src, src_len);
1355 		if (src_nents < 0) {
1356 			dev_err(dev, "Invalid number of src SG.\n");
1357 			err = ERR_PTR(-EINVAL);
1358 			goto error_sg;
1359 		}
1360 		src_nents = (src_nents == 1) ? 0 : src_nents;
1361 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1362 		dst_nents = sg_nents_for_len(dst, dst_len);
1363 		if (dst_nents < 0) {
1364 			dev_err(dev, "Invalid number of dst SG.\n");
1365 			err = ERR_PTR(-EINVAL);
1366 			goto error_sg;
1367 		}
1368 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1369 	}
1370 
1371 	/*
1372 	 * allocate space for base edesc plus the link tables,
1373 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1374 	 * and space for two sets of ICVs (stashed and generated)
1375 	 */
1376 	alloc_len = sizeof(struct talitos_edesc);
1377 	if (src_nents || dst_nents) {
1378 		if (is_sec1)
1379 			dma_len = (src_nents ? src_len : 0) +
1380 				  (dst_nents ? dst_len : 0);
1381 		else
1382 			dma_len = (src_nents + dst_nents + 2) *
1383 				  sizeof(struct talitos_ptr) + authsize * 2;
1384 		alloc_len += dma_len;
1385 	} else {
1386 		dma_len = 0;
1387 		alloc_len += icv_stashing ? authsize : 0;
1388 	}
1389 
1390 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1391 	if (!edesc) {
1392 		dev_err(dev, "could not allocate edescriptor\n");
1393 		err = ERR_PTR(-ENOMEM);
1394 		goto error_sg;
1395 	}
1396 
1397 	edesc->src_nents = src_nents;
1398 	edesc->dst_nents = dst_nents;
1399 	edesc->iv_dma = iv_dma;
1400 	edesc->dma_len = dma_len;
1401 	if (dma_len)
1402 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1403 						     edesc->dma_len,
1404 						     DMA_BIDIRECTIONAL);
1405 
1406 	return edesc;
1407 error_sg:
1408 	if (iv_dma)
1409 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1410 	return err;
1411 }
1412 
1413 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1414 					      int icv_stashing, bool encrypt)
1415 {
1416 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1417 	unsigned int authsize = crypto_aead_authsize(authenc);
1418 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1419 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1420 
1421 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1422 				   iv, areq->assoclen, areq->cryptlen,
1423 				   authsize, ivsize, icv_stashing,
1424 				   areq->base.flags, encrypt);
1425 }
1426 
1427 static int aead_encrypt(struct aead_request *req)
1428 {
1429 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1430 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1431 	struct talitos_edesc *edesc;
1432 
1433 	/* allocate extended descriptor */
1434 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1435 	if (IS_ERR(edesc))
1436 		return PTR_ERR(edesc);
1437 
1438 	/* set encrypt */
1439 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1440 
1441 	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1442 }
1443 
1444 static int aead_decrypt(struct aead_request *req)
1445 {
1446 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1447 	unsigned int authsize = crypto_aead_authsize(authenc);
1448 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1449 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1450 	struct talitos_edesc *edesc;
1451 	struct scatterlist *sg;
1452 	void *icvdata;
1453 
1454 	req->cryptlen -= authsize;
1455 
1456 	/* allocate extended descriptor */
1457 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1458 	if (IS_ERR(edesc))
1459 		return PTR_ERR(edesc);
1460 
1461 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1462 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1463 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1464 
1465 		/* decrypt and check the ICV */
1466 		edesc->desc.hdr = ctx->desc_hdr_template |
1467 				  DESC_HDR_DIR_INBOUND |
1468 				  DESC_HDR_MODE1_MDEU_CICV;
1469 
1470 		/* reset integrity check result bits */
1471 		edesc->desc.hdr_lo = 0;
1472 
1473 		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1474 	}
1475 
1476 	/* Have to check the ICV with software */
1477 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1478 
1479 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1480 	if (edesc->dma_len)
1481 		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1482 						   edesc->dst_nents + 2];
1483 	else
1484 		icvdata = &edesc->link_tbl[0];
1485 
1486 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1487 
1488 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1489 
1490 	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1491 }
1492 
1493 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1494 			     const u8 *key, unsigned int keylen)
1495 {
1496 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1497 
1498 	if (keylen > TALITOS_MAX_KEY_SIZE) {
1499 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1500 		return -EINVAL;
1501 	}
1502 
1503 	memcpy(&ctx->key, key, keylen);
1504 	ctx->keylen = keylen;
1505 
1506 	return 0;
1507 }
1508 
1509 static void common_nonsnoop_unmap(struct device *dev,
1510 				  struct talitos_edesc *edesc,
1511 				  struct ablkcipher_request *areq)
1512 {
1513 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1514 
1515 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1516 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1517 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1518 
1519 	if (edesc->dma_len)
1520 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1521 				 DMA_BIDIRECTIONAL);
1522 }
1523 
1524 static void ablkcipher_done(struct device *dev,
1525 			    struct talitos_desc *desc, void *context,
1526 			    int err)
1527 {
1528 	struct ablkcipher_request *areq = context;
1529 	struct talitos_edesc *edesc;
1530 
1531 	edesc = container_of(desc, struct talitos_edesc, desc);
1532 
1533 	common_nonsnoop_unmap(dev, edesc, areq);
1534 
1535 	kfree(edesc);
1536 
1537 	areq->base.complete(&areq->base, err);
1538 }
1539 
1540 static int common_nonsnoop(struct talitos_edesc *edesc,
1541 			   struct ablkcipher_request *areq,
1542 			   void (*callback) (struct device *dev,
1543 					     struct talitos_desc *desc,
1544 					     void *context, int error))
1545 {
1546 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1547 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1548 	struct device *dev = ctx->dev;
1549 	struct talitos_desc *desc = &edesc->desc;
1550 	unsigned int cryptlen = areq->nbytes;
1551 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1552 	int sg_count, ret;
1553 	bool sync_needed = false;
1554 	struct talitos_private *priv = dev_get_drvdata(dev);
1555 	bool is_sec1 = has_ftr_sec1(priv);
1556 
1557 	/* first DWORD empty */
1558 	desc->ptr[0] = zero_entry;
1559 
1560 	/* cipher iv */
1561 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1562 	to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1563 	to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
1564 
1565 	/* cipher key */
1566 	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1567 			       (char *)&ctx->key, DMA_TO_DEVICE);
1568 
1569 	sg_count = edesc->src_nents ?: 1;
1570 	if (is_sec1 && sg_count > 1)
1571 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1572 				  cryptlen);
1573 	else
1574 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1575 				      (areq->src == areq->dst) ?
1576 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1577 	/*
1578 	 * cipher in
1579 	 */
1580 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1581 				  &desc->ptr[3], sg_count, 0, 0);
1582 	if (sg_count > 1)
1583 		sync_needed = true;
1584 
1585 	/* cipher out */
1586 	if (areq->src != areq->dst) {
1587 		sg_count = edesc->dst_nents ? : 1;
1588 		if (!is_sec1 || sg_count == 1)
1589 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1590 	}
1591 
1592 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1593 			     sg_count, 0, (edesc->src_nents + 1));
1594 	if (ret > 1)
1595 		sync_needed = true;
1596 
1597 	/* iv out */
1598 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1599 			       DMA_FROM_DEVICE);
1600 
1601 	/* last DWORD empty */
1602 	desc->ptr[6] = zero_entry;
1603 
1604 	if (sync_needed)
1605 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1606 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1607 
1608 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1609 	if (ret != -EINPROGRESS) {
1610 		common_nonsnoop_unmap(dev, edesc, areq);
1611 		kfree(edesc);
1612 	}
1613 	return ret;
1614 }
1615 
1616 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1617 						    areq, bool encrypt)
1618 {
1619 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1620 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1621 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1622 
1623 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1624 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1625 				   areq->base.flags, encrypt);
1626 }
1627 
1628 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1629 {
1630 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1631 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1632 	struct talitos_edesc *edesc;
1633 
1634 	/* allocate extended descriptor */
1635 	edesc = ablkcipher_edesc_alloc(areq, true);
1636 	if (IS_ERR(edesc))
1637 		return PTR_ERR(edesc);
1638 
1639 	/* set encrypt */
1640 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1641 
1642 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1643 }
1644 
1645 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1646 {
1647 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1648 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1649 	struct talitos_edesc *edesc;
1650 
1651 	/* allocate extended descriptor */
1652 	edesc = ablkcipher_edesc_alloc(areq, false);
1653 	if (IS_ERR(edesc))
1654 		return PTR_ERR(edesc);
1655 
1656 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1657 
1658 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1659 }
1660 
1661 static void common_nonsnoop_hash_unmap(struct device *dev,
1662 				       struct talitos_edesc *edesc,
1663 				       struct ahash_request *areq)
1664 {
1665 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1666 	struct talitos_private *priv = dev_get_drvdata(dev);
1667 	bool is_sec1 = has_ftr_sec1(priv);
1668 
1669 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1670 
1671 	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1672 
1673 	/* When using hashctx-in, must unmap it. */
1674 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1675 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1676 					 DMA_TO_DEVICE);
1677 
1678 	if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1679 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1680 					 DMA_TO_DEVICE);
1681 
1682 	if (edesc->dma_len)
1683 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1684 				 DMA_BIDIRECTIONAL);
1685 
1686 }
1687 
1688 static void ahash_done(struct device *dev,
1689 		       struct talitos_desc *desc, void *context,
1690 		       int err)
1691 {
1692 	struct ahash_request *areq = context;
1693 	struct talitos_edesc *edesc =
1694 		 container_of(desc, struct talitos_edesc, desc);
1695 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1696 
1697 	if (!req_ctx->last && req_ctx->to_hash_later) {
1698 		/* Position any partial block for next update/final/finup */
1699 		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1700 		req_ctx->nbuf = req_ctx->to_hash_later;
1701 	}
1702 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1703 
1704 	kfree(edesc);
1705 
1706 	areq->base.complete(&areq->base, err);
1707 }
1708 
1709 /*
1710  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1711  * ourself and submit a padded block
1712  */
1713 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1714 			       struct talitos_edesc *edesc,
1715 			       struct talitos_ptr *ptr)
1716 {
1717 	static u8 padded_hash[64] = {
1718 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1719 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1720 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1721 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1722 	};
1723 
1724 	pr_err_once("Bug in SEC1, padding ourself\n");
1725 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1726 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1727 			       (char *)padded_hash, DMA_TO_DEVICE);
1728 }
1729 
1730 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1731 				struct ahash_request *areq, unsigned int length,
1732 				void (*callback) (struct device *dev,
1733 						  struct talitos_desc *desc,
1734 						  void *context, int error))
1735 {
1736 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1737 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1738 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1739 	struct device *dev = ctx->dev;
1740 	struct talitos_desc *desc = &edesc->desc;
1741 	int ret;
1742 	bool sync_needed = false;
1743 	struct talitos_private *priv = dev_get_drvdata(dev);
1744 	bool is_sec1 = has_ftr_sec1(priv);
1745 	int sg_count;
1746 
1747 	/* first DWORD empty */
1748 	desc->ptr[0] = zero_entry;
1749 
1750 	/* hash context in */
1751 	if (!req_ctx->first || req_ctx->swinit) {
1752 		map_single_talitos_ptr(dev, &desc->ptr[1],
1753 				       req_ctx->hw_context_size,
1754 				       (char *)req_ctx->hw_context,
1755 				       DMA_TO_DEVICE);
1756 		req_ctx->swinit = 0;
1757 	} else {
1758 		desc->ptr[1] = zero_entry;
1759 		/* Indicate next op is not the first. */
1760 		req_ctx->first = 0;
1761 	}
1762 
1763 	/* HMAC key */
1764 	if (ctx->keylen)
1765 		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1766 				       (char *)&ctx->key, DMA_TO_DEVICE);
1767 	else
1768 		desc->ptr[2] = zero_entry;
1769 
1770 	sg_count = edesc->src_nents ?: 1;
1771 	if (is_sec1 && sg_count > 1)
1772 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
1773 	else
1774 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1775 				      DMA_TO_DEVICE);
1776 	/*
1777 	 * data in
1778 	 */
1779 	sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1780 				  &desc->ptr[3], sg_count, 0, 0);
1781 	if (sg_count > 1)
1782 		sync_needed = true;
1783 
1784 	/* fifth DWORD empty */
1785 	desc->ptr[4] = zero_entry;
1786 
1787 	/* hash/HMAC out -or- hash context out */
1788 	if (req_ctx->last)
1789 		map_single_talitos_ptr(dev, &desc->ptr[5],
1790 				       crypto_ahash_digestsize(tfm),
1791 				       areq->result, DMA_FROM_DEVICE);
1792 	else
1793 		map_single_talitos_ptr(dev, &desc->ptr[5],
1794 				       req_ctx->hw_context_size,
1795 				       req_ctx->hw_context, DMA_FROM_DEVICE);
1796 
1797 	/* last DWORD empty */
1798 	desc->ptr[6] = zero_entry;
1799 
1800 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1801 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1802 
1803 	if (sync_needed)
1804 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1805 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1806 
1807 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1808 	if (ret != -EINPROGRESS) {
1809 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1810 		kfree(edesc);
1811 	}
1812 	return ret;
1813 }
1814 
1815 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1816 					       unsigned int nbytes)
1817 {
1818 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1819 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1820 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1821 
1822 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1823 				   nbytes, 0, 0, 0, areq->base.flags, false);
1824 }
1825 
1826 static int ahash_init(struct ahash_request *areq)
1827 {
1828 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1829 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1830 
1831 	/* Initialize the context */
1832 	req_ctx->nbuf = 0;
1833 	req_ctx->first = 1; /* first indicates h/w must init its context */
1834 	req_ctx->swinit = 0; /* assume h/w init of context */
1835 	req_ctx->hw_context_size =
1836 		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1837 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1838 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1839 
1840 	return 0;
1841 }
1842 
1843 /*
1844  * on h/w without explicit sha224 support, we initialize h/w context
1845  * manually with sha224 constants, and tell it to run sha256.
1846  */
1847 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1848 {
1849 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1850 
1851 	ahash_init(areq);
1852 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1853 
1854 	req_ctx->hw_context[0] = SHA224_H0;
1855 	req_ctx->hw_context[1] = SHA224_H1;
1856 	req_ctx->hw_context[2] = SHA224_H2;
1857 	req_ctx->hw_context[3] = SHA224_H3;
1858 	req_ctx->hw_context[4] = SHA224_H4;
1859 	req_ctx->hw_context[5] = SHA224_H5;
1860 	req_ctx->hw_context[6] = SHA224_H6;
1861 	req_ctx->hw_context[7] = SHA224_H7;
1862 
1863 	/* init 64-bit count */
1864 	req_ctx->hw_context[8] = 0;
1865 	req_ctx->hw_context[9] = 0;
1866 
1867 	return 0;
1868 }
1869 
1870 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1871 {
1872 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1873 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1874 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1875 	struct talitos_edesc *edesc;
1876 	unsigned int blocksize =
1877 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1878 	unsigned int nbytes_to_hash;
1879 	unsigned int to_hash_later;
1880 	unsigned int nsg;
1881 	int nents;
1882 
1883 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1884 		/* Buffer up to one whole block */
1885 		nents = sg_nents_for_len(areq->src, nbytes);
1886 		if (nents < 0) {
1887 			dev_err(ctx->dev, "Invalid number of src SG.\n");
1888 			return nents;
1889 		}
1890 		sg_copy_to_buffer(areq->src, nents,
1891 				  req_ctx->buf + req_ctx->nbuf, nbytes);
1892 		req_ctx->nbuf += nbytes;
1893 		return 0;
1894 	}
1895 
1896 	/* At least (blocksize + 1) bytes are available to hash */
1897 	nbytes_to_hash = nbytes + req_ctx->nbuf;
1898 	to_hash_later = nbytes_to_hash & (blocksize - 1);
1899 
1900 	if (req_ctx->last)
1901 		to_hash_later = 0;
1902 	else if (to_hash_later)
1903 		/* There is a partial block. Hash the full block(s) now */
1904 		nbytes_to_hash -= to_hash_later;
1905 	else {
1906 		/* Keep one block buffered */
1907 		nbytes_to_hash -= blocksize;
1908 		to_hash_later = blocksize;
1909 	}
1910 
1911 	/* Chain in any previously buffered data */
1912 	if (req_ctx->nbuf) {
1913 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1914 		sg_init_table(req_ctx->bufsl, nsg);
1915 		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1916 		if (nsg > 1)
1917 			sg_chain(req_ctx->bufsl, 2, areq->src);
1918 		req_ctx->psrc = req_ctx->bufsl;
1919 	} else
1920 		req_ctx->psrc = areq->src;
1921 
1922 	if (to_hash_later) {
1923 		nents = sg_nents_for_len(areq->src, nbytes);
1924 		if (nents < 0) {
1925 			dev_err(ctx->dev, "Invalid number of src SG.\n");
1926 			return nents;
1927 		}
1928 		sg_pcopy_to_buffer(areq->src, nents,
1929 				      req_ctx->bufnext,
1930 				      to_hash_later,
1931 				      nbytes - to_hash_later);
1932 	}
1933 	req_ctx->to_hash_later = to_hash_later;
1934 
1935 	/* Allocate extended descriptor */
1936 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1937 	if (IS_ERR(edesc))
1938 		return PTR_ERR(edesc);
1939 
1940 	edesc->desc.hdr = ctx->desc_hdr_template;
1941 
1942 	/* On last one, request SEC to pad; otherwise continue */
1943 	if (req_ctx->last)
1944 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1945 	else
1946 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1947 
1948 	/* request SEC to INIT hash. */
1949 	if (req_ctx->first && !req_ctx->swinit)
1950 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1951 
1952 	/* When the tfm context has a keylen, it's an HMAC.
1953 	 * A first or last (ie. not middle) descriptor must request HMAC.
1954 	 */
1955 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1956 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1957 
1958 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1959 				    ahash_done);
1960 }
1961 
1962 static int ahash_update(struct ahash_request *areq)
1963 {
1964 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1965 
1966 	req_ctx->last = 0;
1967 
1968 	return ahash_process_req(areq, areq->nbytes);
1969 }
1970 
1971 static int ahash_final(struct ahash_request *areq)
1972 {
1973 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1974 
1975 	req_ctx->last = 1;
1976 
1977 	return ahash_process_req(areq, 0);
1978 }
1979 
1980 static int ahash_finup(struct ahash_request *areq)
1981 {
1982 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1983 
1984 	req_ctx->last = 1;
1985 
1986 	return ahash_process_req(areq, areq->nbytes);
1987 }
1988 
1989 static int ahash_digest(struct ahash_request *areq)
1990 {
1991 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1992 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1993 
1994 	ahash->init(areq);
1995 	req_ctx->last = 1;
1996 
1997 	return ahash_process_req(areq, areq->nbytes);
1998 }
1999 
2000 static int ahash_export(struct ahash_request *areq, void *out)
2001 {
2002 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2003 	struct talitos_export_state *export = out;
2004 
2005 	memcpy(export->hw_context, req_ctx->hw_context,
2006 	       req_ctx->hw_context_size);
2007 	memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2008 	export->swinit = req_ctx->swinit;
2009 	export->first = req_ctx->first;
2010 	export->last = req_ctx->last;
2011 	export->to_hash_later = req_ctx->to_hash_later;
2012 	export->nbuf = req_ctx->nbuf;
2013 
2014 	return 0;
2015 }
2016 
2017 static int ahash_import(struct ahash_request *areq, const void *in)
2018 {
2019 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2020 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2021 	const struct talitos_export_state *export = in;
2022 
2023 	memset(req_ctx, 0, sizeof(*req_ctx));
2024 	req_ctx->hw_context_size =
2025 		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2026 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2027 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2028 	memcpy(req_ctx->hw_context, export->hw_context,
2029 	       req_ctx->hw_context_size);
2030 	memcpy(req_ctx->buf, export->buf, export->nbuf);
2031 	req_ctx->swinit = export->swinit;
2032 	req_ctx->first = export->first;
2033 	req_ctx->last = export->last;
2034 	req_ctx->to_hash_later = export->to_hash_later;
2035 	req_ctx->nbuf = export->nbuf;
2036 
2037 	return 0;
2038 }
2039 
2040 struct keyhash_result {
2041 	struct completion completion;
2042 	int err;
2043 };
2044 
2045 static void keyhash_complete(struct crypto_async_request *req, int err)
2046 {
2047 	struct keyhash_result *res = req->data;
2048 
2049 	if (err == -EINPROGRESS)
2050 		return;
2051 
2052 	res->err = err;
2053 	complete(&res->completion);
2054 }
2055 
2056 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2057 		   u8 *hash)
2058 {
2059 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2060 
2061 	struct scatterlist sg[1];
2062 	struct ahash_request *req;
2063 	struct keyhash_result hresult;
2064 	int ret;
2065 
2066 	init_completion(&hresult.completion);
2067 
2068 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2069 	if (!req)
2070 		return -ENOMEM;
2071 
2072 	/* Keep tfm keylen == 0 during hash of the long key */
2073 	ctx->keylen = 0;
2074 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2075 				   keyhash_complete, &hresult);
2076 
2077 	sg_init_one(&sg[0], key, keylen);
2078 
2079 	ahash_request_set_crypt(req, sg, hash, keylen);
2080 	ret = crypto_ahash_digest(req);
2081 	switch (ret) {
2082 	case 0:
2083 		break;
2084 	case -EINPROGRESS:
2085 	case -EBUSY:
2086 		ret = wait_for_completion_interruptible(
2087 			&hresult.completion);
2088 		if (!ret)
2089 			ret = hresult.err;
2090 		break;
2091 	default:
2092 		break;
2093 	}
2094 	ahash_request_free(req);
2095 
2096 	return ret;
2097 }
2098 
2099 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2100 			unsigned int keylen)
2101 {
2102 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2103 	unsigned int blocksize =
2104 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2105 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2106 	unsigned int keysize = keylen;
2107 	u8 hash[SHA512_DIGEST_SIZE];
2108 	int ret;
2109 
2110 	if (keylen <= blocksize)
2111 		memcpy(ctx->key, key, keysize);
2112 	else {
2113 		/* Must get the hash of the long key */
2114 		ret = keyhash(tfm, key, keylen, hash);
2115 
2116 		if (ret) {
2117 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2118 			return -EINVAL;
2119 		}
2120 
2121 		keysize = digestsize;
2122 		memcpy(ctx->key, hash, digestsize);
2123 	}
2124 
2125 	ctx->keylen = keysize;
2126 
2127 	return 0;
2128 }
2129 
2130 
2131 struct talitos_alg_template {
2132 	u32 type;
2133 	u32 priority;
2134 	union {
2135 		struct crypto_alg crypto;
2136 		struct ahash_alg hash;
2137 		struct aead_alg aead;
2138 	} alg;
2139 	__be32 desc_hdr_template;
2140 };
2141 
2142 static struct talitos_alg_template driver_algs[] = {
2143 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2144 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2145 		.alg.aead = {
2146 			.base = {
2147 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2148 				.cra_driver_name = "authenc-hmac-sha1-"
2149 						   "cbc-aes-talitos",
2150 				.cra_blocksize = AES_BLOCK_SIZE,
2151 				.cra_flags = CRYPTO_ALG_ASYNC,
2152 			},
2153 			.ivsize = AES_BLOCK_SIZE,
2154 			.maxauthsize = SHA1_DIGEST_SIZE,
2155 		},
2156 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2157 			             DESC_HDR_SEL0_AESU |
2158 		                     DESC_HDR_MODE0_AESU_CBC |
2159 		                     DESC_HDR_SEL1_MDEUA |
2160 		                     DESC_HDR_MODE1_MDEU_INIT |
2161 		                     DESC_HDR_MODE1_MDEU_PAD |
2162 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2163 	},
2164 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2165 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2166 		.alg.aead = {
2167 			.base = {
2168 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2169 				.cra_driver_name = "authenc-hmac-sha1-"
2170 						   "cbc-aes-talitos",
2171 				.cra_blocksize = AES_BLOCK_SIZE,
2172 				.cra_flags = CRYPTO_ALG_ASYNC,
2173 			},
2174 			.ivsize = AES_BLOCK_SIZE,
2175 			.maxauthsize = SHA1_DIGEST_SIZE,
2176 		},
2177 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2178 				     DESC_HDR_SEL0_AESU |
2179 				     DESC_HDR_MODE0_AESU_CBC |
2180 				     DESC_HDR_SEL1_MDEUA |
2181 				     DESC_HDR_MODE1_MDEU_INIT |
2182 				     DESC_HDR_MODE1_MDEU_PAD |
2183 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2184 	},
2185 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2186 		.alg.aead = {
2187 			.base = {
2188 				.cra_name = "authenc(hmac(sha1),"
2189 					    "cbc(des3_ede))",
2190 				.cra_driver_name = "authenc-hmac-sha1-"
2191 						   "cbc-3des-talitos",
2192 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2193 				.cra_flags = CRYPTO_ALG_ASYNC,
2194 			},
2195 			.ivsize = DES3_EDE_BLOCK_SIZE,
2196 			.maxauthsize = SHA1_DIGEST_SIZE,
2197 		},
2198 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2199 			             DESC_HDR_SEL0_DEU |
2200 		                     DESC_HDR_MODE0_DEU_CBC |
2201 		                     DESC_HDR_MODE0_DEU_3DES |
2202 		                     DESC_HDR_SEL1_MDEUA |
2203 		                     DESC_HDR_MODE1_MDEU_INIT |
2204 		                     DESC_HDR_MODE1_MDEU_PAD |
2205 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2206 	},
2207 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2208 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2209 		.alg.aead = {
2210 			.base = {
2211 				.cra_name = "authenc(hmac(sha1),"
2212 					    "cbc(des3_ede))",
2213 				.cra_driver_name = "authenc-hmac-sha1-"
2214 						   "cbc-3des-talitos",
2215 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2216 				.cra_flags = CRYPTO_ALG_ASYNC,
2217 			},
2218 			.ivsize = DES3_EDE_BLOCK_SIZE,
2219 			.maxauthsize = SHA1_DIGEST_SIZE,
2220 		},
2221 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2222 				     DESC_HDR_SEL0_DEU |
2223 				     DESC_HDR_MODE0_DEU_CBC |
2224 				     DESC_HDR_MODE0_DEU_3DES |
2225 				     DESC_HDR_SEL1_MDEUA |
2226 				     DESC_HDR_MODE1_MDEU_INIT |
2227 				     DESC_HDR_MODE1_MDEU_PAD |
2228 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2229 	},
2230 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2231 		.alg.aead = {
2232 			.base = {
2233 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2234 				.cra_driver_name = "authenc-hmac-sha224-"
2235 						   "cbc-aes-talitos",
2236 				.cra_blocksize = AES_BLOCK_SIZE,
2237 				.cra_flags = CRYPTO_ALG_ASYNC,
2238 			},
2239 			.ivsize = AES_BLOCK_SIZE,
2240 			.maxauthsize = SHA224_DIGEST_SIZE,
2241 		},
2242 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2243 				     DESC_HDR_SEL0_AESU |
2244 				     DESC_HDR_MODE0_AESU_CBC |
2245 				     DESC_HDR_SEL1_MDEUA |
2246 				     DESC_HDR_MODE1_MDEU_INIT |
2247 				     DESC_HDR_MODE1_MDEU_PAD |
2248 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2249 	},
2250 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2251 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2252 		.alg.aead = {
2253 			.base = {
2254 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2255 				.cra_driver_name = "authenc-hmac-sha224-"
2256 						   "cbc-aes-talitos",
2257 				.cra_blocksize = AES_BLOCK_SIZE,
2258 				.cra_flags = CRYPTO_ALG_ASYNC,
2259 			},
2260 			.ivsize = AES_BLOCK_SIZE,
2261 			.maxauthsize = SHA224_DIGEST_SIZE,
2262 		},
2263 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2264 				     DESC_HDR_SEL0_AESU |
2265 				     DESC_HDR_MODE0_AESU_CBC |
2266 				     DESC_HDR_SEL1_MDEUA |
2267 				     DESC_HDR_MODE1_MDEU_INIT |
2268 				     DESC_HDR_MODE1_MDEU_PAD |
2269 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2270 	},
2271 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2272 		.alg.aead = {
2273 			.base = {
2274 				.cra_name = "authenc(hmac(sha224),"
2275 					    "cbc(des3_ede))",
2276 				.cra_driver_name = "authenc-hmac-sha224-"
2277 						   "cbc-3des-talitos",
2278 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2279 				.cra_flags = CRYPTO_ALG_ASYNC,
2280 			},
2281 			.ivsize = DES3_EDE_BLOCK_SIZE,
2282 			.maxauthsize = SHA224_DIGEST_SIZE,
2283 		},
2284 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2285 			             DESC_HDR_SEL0_DEU |
2286 		                     DESC_HDR_MODE0_DEU_CBC |
2287 		                     DESC_HDR_MODE0_DEU_3DES |
2288 		                     DESC_HDR_SEL1_MDEUA |
2289 		                     DESC_HDR_MODE1_MDEU_INIT |
2290 		                     DESC_HDR_MODE1_MDEU_PAD |
2291 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2292 	},
2293 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2294 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2295 		.alg.aead = {
2296 			.base = {
2297 				.cra_name = "authenc(hmac(sha224),"
2298 					    "cbc(des3_ede))",
2299 				.cra_driver_name = "authenc-hmac-sha224-"
2300 						   "cbc-3des-talitos",
2301 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2302 				.cra_flags = CRYPTO_ALG_ASYNC,
2303 			},
2304 			.ivsize = DES3_EDE_BLOCK_SIZE,
2305 			.maxauthsize = SHA224_DIGEST_SIZE,
2306 		},
2307 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2308 				     DESC_HDR_SEL0_DEU |
2309 				     DESC_HDR_MODE0_DEU_CBC |
2310 				     DESC_HDR_MODE0_DEU_3DES |
2311 				     DESC_HDR_SEL1_MDEUA |
2312 				     DESC_HDR_MODE1_MDEU_INIT |
2313 				     DESC_HDR_MODE1_MDEU_PAD |
2314 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2315 	},
2316 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2317 		.alg.aead = {
2318 			.base = {
2319 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2320 				.cra_driver_name = "authenc-hmac-sha256-"
2321 						   "cbc-aes-talitos",
2322 				.cra_blocksize = AES_BLOCK_SIZE,
2323 				.cra_flags = CRYPTO_ALG_ASYNC,
2324 			},
2325 			.ivsize = AES_BLOCK_SIZE,
2326 			.maxauthsize = SHA256_DIGEST_SIZE,
2327 		},
2328 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2329 			             DESC_HDR_SEL0_AESU |
2330 		                     DESC_HDR_MODE0_AESU_CBC |
2331 		                     DESC_HDR_SEL1_MDEUA |
2332 		                     DESC_HDR_MODE1_MDEU_INIT |
2333 		                     DESC_HDR_MODE1_MDEU_PAD |
2334 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2335 	},
2336 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2337 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2338 		.alg.aead = {
2339 			.base = {
2340 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2341 				.cra_driver_name = "authenc-hmac-sha256-"
2342 						   "cbc-aes-talitos",
2343 				.cra_blocksize = AES_BLOCK_SIZE,
2344 				.cra_flags = CRYPTO_ALG_ASYNC,
2345 			},
2346 			.ivsize = AES_BLOCK_SIZE,
2347 			.maxauthsize = SHA256_DIGEST_SIZE,
2348 		},
2349 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2350 				     DESC_HDR_SEL0_AESU |
2351 				     DESC_HDR_MODE0_AESU_CBC |
2352 				     DESC_HDR_SEL1_MDEUA |
2353 				     DESC_HDR_MODE1_MDEU_INIT |
2354 				     DESC_HDR_MODE1_MDEU_PAD |
2355 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2356 	},
2357 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2358 		.alg.aead = {
2359 			.base = {
2360 				.cra_name = "authenc(hmac(sha256),"
2361 					    "cbc(des3_ede))",
2362 				.cra_driver_name = "authenc-hmac-sha256-"
2363 						   "cbc-3des-talitos",
2364 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2365 				.cra_flags = CRYPTO_ALG_ASYNC,
2366 			},
2367 			.ivsize = DES3_EDE_BLOCK_SIZE,
2368 			.maxauthsize = SHA256_DIGEST_SIZE,
2369 		},
2370 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2371 			             DESC_HDR_SEL0_DEU |
2372 		                     DESC_HDR_MODE0_DEU_CBC |
2373 		                     DESC_HDR_MODE0_DEU_3DES |
2374 		                     DESC_HDR_SEL1_MDEUA |
2375 		                     DESC_HDR_MODE1_MDEU_INIT |
2376 		                     DESC_HDR_MODE1_MDEU_PAD |
2377 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2378 	},
2379 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2380 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2381 		.alg.aead = {
2382 			.base = {
2383 				.cra_name = "authenc(hmac(sha256),"
2384 					    "cbc(des3_ede))",
2385 				.cra_driver_name = "authenc-hmac-sha256-"
2386 						   "cbc-3des-talitos",
2387 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2388 				.cra_flags = CRYPTO_ALG_ASYNC,
2389 			},
2390 			.ivsize = DES3_EDE_BLOCK_SIZE,
2391 			.maxauthsize = SHA256_DIGEST_SIZE,
2392 		},
2393 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2394 				     DESC_HDR_SEL0_DEU |
2395 				     DESC_HDR_MODE0_DEU_CBC |
2396 				     DESC_HDR_MODE0_DEU_3DES |
2397 				     DESC_HDR_SEL1_MDEUA |
2398 				     DESC_HDR_MODE1_MDEU_INIT |
2399 				     DESC_HDR_MODE1_MDEU_PAD |
2400 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2401 	},
2402 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2403 		.alg.aead = {
2404 			.base = {
2405 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2406 				.cra_driver_name = "authenc-hmac-sha384-"
2407 						   "cbc-aes-talitos",
2408 				.cra_blocksize = AES_BLOCK_SIZE,
2409 				.cra_flags = CRYPTO_ALG_ASYNC,
2410 			},
2411 			.ivsize = AES_BLOCK_SIZE,
2412 			.maxauthsize = SHA384_DIGEST_SIZE,
2413 		},
2414 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2415 			             DESC_HDR_SEL0_AESU |
2416 		                     DESC_HDR_MODE0_AESU_CBC |
2417 		                     DESC_HDR_SEL1_MDEUB |
2418 		                     DESC_HDR_MODE1_MDEU_INIT |
2419 		                     DESC_HDR_MODE1_MDEU_PAD |
2420 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2421 	},
2422 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2423 		.alg.aead = {
2424 			.base = {
2425 				.cra_name = "authenc(hmac(sha384),"
2426 					    "cbc(des3_ede))",
2427 				.cra_driver_name = "authenc-hmac-sha384-"
2428 						   "cbc-3des-talitos",
2429 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2430 				.cra_flags = CRYPTO_ALG_ASYNC,
2431 			},
2432 			.ivsize = DES3_EDE_BLOCK_SIZE,
2433 			.maxauthsize = SHA384_DIGEST_SIZE,
2434 		},
2435 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2436 			             DESC_HDR_SEL0_DEU |
2437 		                     DESC_HDR_MODE0_DEU_CBC |
2438 		                     DESC_HDR_MODE0_DEU_3DES |
2439 		                     DESC_HDR_SEL1_MDEUB |
2440 		                     DESC_HDR_MODE1_MDEU_INIT |
2441 		                     DESC_HDR_MODE1_MDEU_PAD |
2442 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2443 	},
2444 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2445 		.alg.aead = {
2446 			.base = {
2447 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2448 				.cra_driver_name = "authenc-hmac-sha512-"
2449 						   "cbc-aes-talitos",
2450 				.cra_blocksize = AES_BLOCK_SIZE,
2451 				.cra_flags = CRYPTO_ALG_ASYNC,
2452 			},
2453 			.ivsize = AES_BLOCK_SIZE,
2454 			.maxauthsize = SHA512_DIGEST_SIZE,
2455 		},
2456 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2457 			             DESC_HDR_SEL0_AESU |
2458 		                     DESC_HDR_MODE0_AESU_CBC |
2459 		                     DESC_HDR_SEL1_MDEUB |
2460 		                     DESC_HDR_MODE1_MDEU_INIT |
2461 		                     DESC_HDR_MODE1_MDEU_PAD |
2462 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2463 	},
2464 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2465 		.alg.aead = {
2466 			.base = {
2467 				.cra_name = "authenc(hmac(sha512),"
2468 					    "cbc(des3_ede))",
2469 				.cra_driver_name = "authenc-hmac-sha512-"
2470 						   "cbc-3des-talitos",
2471 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2472 				.cra_flags = CRYPTO_ALG_ASYNC,
2473 			},
2474 			.ivsize = DES3_EDE_BLOCK_SIZE,
2475 			.maxauthsize = SHA512_DIGEST_SIZE,
2476 		},
2477 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2478 			             DESC_HDR_SEL0_DEU |
2479 		                     DESC_HDR_MODE0_DEU_CBC |
2480 		                     DESC_HDR_MODE0_DEU_3DES |
2481 		                     DESC_HDR_SEL1_MDEUB |
2482 		                     DESC_HDR_MODE1_MDEU_INIT |
2483 		                     DESC_HDR_MODE1_MDEU_PAD |
2484 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2485 	},
2486 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2487 		.alg.aead = {
2488 			.base = {
2489 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2490 				.cra_driver_name = "authenc-hmac-md5-"
2491 						   "cbc-aes-talitos",
2492 				.cra_blocksize = AES_BLOCK_SIZE,
2493 				.cra_flags = CRYPTO_ALG_ASYNC,
2494 			},
2495 			.ivsize = AES_BLOCK_SIZE,
2496 			.maxauthsize = MD5_DIGEST_SIZE,
2497 		},
2498 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2499 			             DESC_HDR_SEL0_AESU |
2500 		                     DESC_HDR_MODE0_AESU_CBC |
2501 		                     DESC_HDR_SEL1_MDEUA |
2502 		                     DESC_HDR_MODE1_MDEU_INIT |
2503 		                     DESC_HDR_MODE1_MDEU_PAD |
2504 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2505 	},
2506 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2507 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2508 		.alg.aead = {
2509 			.base = {
2510 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2511 				.cra_driver_name = "authenc-hmac-md5-"
2512 						   "cbc-aes-talitos",
2513 				.cra_blocksize = AES_BLOCK_SIZE,
2514 				.cra_flags = CRYPTO_ALG_ASYNC,
2515 			},
2516 			.ivsize = AES_BLOCK_SIZE,
2517 			.maxauthsize = MD5_DIGEST_SIZE,
2518 		},
2519 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2520 				     DESC_HDR_SEL0_AESU |
2521 				     DESC_HDR_MODE0_AESU_CBC |
2522 				     DESC_HDR_SEL1_MDEUA |
2523 				     DESC_HDR_MODE1_MDEU_INIT |
2524 				     DESC_HDR_MODE1_MDEU_PAD |
2525 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2526 	},
2527 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2528 		.alg.aead = {
2529 			.base = {
2530 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2531 				.cra_driver_name = "authenc-hmac-md5-"
2532 						   "cbc-3des-talitos",
2533 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2534 				.cra_flags = CRYPTO_ALG_ASYNC,
2535 			},
2536 			.ivsize = DES3_EDE_BLOCK_SIZE,
2537 			.maxauthsize = MD5_DIGEST_SIZE,
2538 		},
2539 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2540 			             DESC_HDR_SEL0_DEU |
2541 		                     DESC_HDR_MODE0_DEU_CBC |
2542 		                     DESC_HDR_MODE0_DEU_3DES |
2543 		                     DESC_HDR_SEL1_MDEUA |
2544 		                     DESC_HDR_MODE1_MDEU_INIT |
2545 		                     DESC_HDR_MODE1_MDEU_PAD |
2546 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2547 	},
2548 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2549 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2550 		.alg.aead = {
2551 			.base = {
2552 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2553 				.cra_driver_name = "authenc-hmac-md5-"
2554 						   "cbc-3des-talitos",
2555 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2556 				.cra_flags = CRYPTO_ALG_ASYNC,
2557 			},
2558 			.ivsize = DES3_EDE_BLOCK_SIZE,
2559 			.maxauthsize = MD5_DIGEST_SIZE,
2560 		},
2561 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2562 				     DESC_HDR_SEL0_DEU |
2563 				     DESC_HDR_MODE0_DEU_CBC |
2564 				     DESC_HDR_MODE0_DEU_3DES |
2565 				     DESC_HDR_SEL1_MDEUA |
2566 				     DESC_HDR_MODE1_MDEU_INIT |
2567 				     DESC_HDR_MODE1_MDEU_PAD |
2568 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2569 	},
2570 	/* ABLKCIPHER algorithms. */
2571 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2572 		.alg.crypto = {
2573 			.cra_name = "ecb(aes)",
2574 			.cra_driver_name = "ecb-aes-talitos",
2575 			.cra_blocksize = AES_BLOCK_SIZE,
2576 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2577 				     CRYPTO_ALG_ASYNC,
2578 			.cra_ablkcipher = {
2579 				.min_keysize = AES_MIN_KEY_SIZE,
2580 				.max_keysize = AES_MAX_KEY_SIZE,
2581 				.ivsize = AES_BLOCK_SIZE,
2582 			}
2583 		},
2584 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2585 				     DESC_HDR_SEL0_AESU,
2586 	},
2587 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2588 		.alg.crypto = {
2589 			.cra_name = "cbc(aes)",
2590 			.cra_driver_name = "cbc-aes-talitos",
2591 			.cra_blocksize = AES_BLOCK_SIZE,
2592 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2593                                      CRYPTO_ALG_ASYNC,
2594 			.cra_ablkcipher = {
2595 				.min_keysize = AES_MIN_KEY_SIZE,
2596 				.max_keysize = AES_MAX_KEY_SIZE,
2597 				.ivsize = AES_BLOCK_SIZE,
2598 			}
2599 		},
2600 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2601 				     DESC_HDR_SEL0_AESU |
2602 				     DESC_HDR_MODE0_AESU_CBC,
2603 	},
2604 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2605 		.alg.crypto = {
2606 			.cra_name = "ctr(aes)",
2607 			.cra_driver_name = "ctr-aes-talitos",
2608 			.cra_blocksize = AES_BLOCK_SIZE,
2609 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2610 				     CRYPTO_ALG_ASYNC,
2611 			.cra_ablkcipher = {
2612 				.min_keysize = AES_MIN_KEY_SIZE,
2613 				.max_keysize = AES_MAX_KEY_SIZE,
2614 				.ivsize = AES_BLOCK_SIZE,
2615 			}
2616 		},
2617 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2618 				     DESC_HDR_SEL0_AESU |
2619 				     DESC_HDR_MODE0_AESU_CTR,
2620 	},
2621 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2622 		.alg.crypto = {
2623 			.cra_name = "ecb(des)",
2624 			.cra_driver_name = "ecb-des-talitos",
2625 			.cra_blocksize = DES_BLOCK_SIZE,
2626 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2627 				     CRYPTO_ALG_ASYNC,
2628 			.cra_ablkcipher = {
2629 				.min_keysize = DES_KEY_SIZE,
2630 				.max_keysize = DES_KEY_SIZE,
2631 				.ivsize = DES_BLOCK_SIZE,
2632 			}
2633 		},
2634 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2635 				     DESC_HDR_SEL0_DEU,
2636 	},
2637 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2638 		.alg.crypto = {
2639 			.cra_name = "cbc(des)",
2640 			.cra_driver_name = "cbc-des-talitos",
2641 			.cra_blocksize = DES_BLOCK_SIZE,
2642 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2643 				     CRYPTO_ALG_ASYNC,
2644 			.cra_ablkcipher = {
2645 				.min_keysize = DES_KEY_SIZE,
2646 				.max_keysize = DES_KEY_SIZE,
2647 				.ivsize = DES_BLOCK_SIZE,
2648 			}
2649 		},
2650 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2651 				     DESC_HDR_SEL0_DEU |
2652 				     DESC_HDR_MODE0_DEU_CBC,
2653 	},
2654 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2655 		.alg.crypto = {
2656 			.cra_name = "ecb(des3_ede)",
2657 			.cra_driver_name = "ecb-3des-talitos",
2658 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2659 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2660 				     CRYPTO_ALG_ASYNC,
2661 			.cra_ablkcipher = {
2662 				.min_keysize = DES3_EDE_KEY_SIZE,
2663 				.max_keysize = DES3_EDE_KEY_SIZE,
2664 				.ivsize = DES3_EDE_BLOCK_SIZE,
2665 			}
2666 		},
2667 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2668 				     DESC_HDR_SEL0_DEU |
2669 				     DESC_HDR_MODE0_DEU_3DES,
2670 	},
2671 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2672 		.alg.crypto = {
2673 			.cra_name = "cbc(des3_ede)",
2674 			.cra_driver_name = "cbc-3des-talitos",
2675 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2676 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2677                                      CRYPTO_ALG_ASYNC,
2678 			.cra_ablkcipher = {
2679 				.min_keysize = DES3_EDE_KEY_SIZE,
2680 				.max_keysize = DES3_EDE_KEY_SIZE,
2681 				.ivsize = DES3_EDE_BLOCK_SIZE,
2682 			}
2683 		},
2684 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2685 			             DESC_HDR_SEL0_DEU |
2686 		                     DESC_HDR_MODE0_DEU_CBC |
2687 		                     DESC_HDR_MODE0_DEU_3DES,
2688 	},
2689 	/* AHASH algorithms. */
2690 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2691 		.alg.hash = {
2692 			.halg.digestsize = MD5_DIGEST_SIZE,
2693 			.halg.statesize = sizeof(struct talitos_export_state),
2694 			.halg.base = {
2695 				.cra_name = "md5",
2696 				.cra_driver_name = "md5-talitos",
2697 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2698 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2699 					     CRYPTO_ALG_ASYNC,
2700 			}
2701 		},
2702 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2703 				     DESC_HDR_SEL0_MDEUA |
2704 				     DESC_HDR_MODE0_MDEU_MD5,
2705 	},
2706 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2707 		.alg.hash = {
2708 			.halg.digestsize = SHA1_DIGEST_SIZE,
2709 			.halg.statesize = sizeof(struct talitos_export_state),
2710 			.halg.base = {
2711 				.cra_name = "sha1",
2712 				.cra_driver_name = "sha1-talitos",
2713 				.cra_blocksize = SHA1_BLOCK_SIZE,
2714 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2715 					     CRYPTO_ALG_ASYNC,
2716 			}
2717 		},
2718 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2719 				     DESC_HDR_SEL0_MDEUA |
2720 				     DESC_HDR_MODE0_MDEU_SHA1,
2721 	},
2722 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2723 		.alg.hash = {
2724 			.halg.digestsize = SHA224_DIGEST_SIZE,
2725 			.halg.statesize = sizeof(struct talitos_export_state),
2726 			.halg.base = {
2727 				.cra_name = "sha224",
2728 				.cra_driver_name = "sha224-talitos",
2729 				.cra_blocksize = SHA224_BLOCK_SIZE,
2730 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2731 					     CRYPTO_ALG_ASYNC,
2732 			}
2733 		},
2734 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2735 				     DESC_HDR_SEL0_MDEUA |
2736 				     DESC_HDR_MODE0_MDEU_SHA224,
2737 	},
2738 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2739 		.alg.hash = {
2740 			.halg.digestsize = SHA256_DIGEST_SIZE,
2741 			.halg.statesize = sizeof(struct talitos_export_state),
2742 			.halg.base = {
2743 				.cra_name = "sha256",
2744 				.cra_driver_name = "sha256-talitos",
2745 				.cra_blocksize = SHA256_BLOCK_SIZE,
2746 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2747 					     CRYPTO_ALG_ASYNC,
2748 			}
2749 		},
2750 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2751 				     DESC_HDR_SEL0_MDEUA |
2752 				     DESC_HDR_MODE0_MDEU_SHA256,
2753 	},
2754 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2755 		.alg.hash = {
2756 			.halg.digestsize = SHA384_DIGEST_SIZE,
2757 			.halg.statesize = sizeof(struct talitos_export_state),
2758 			.halg.base = {
2759 				.cra_name = "sha384",
2760 				.cra_driver_name = "sha384-talitos",
2761 				.cra_blocksize = SHA384_BLOCK_SIZE,
2762 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2763 					     CRYPTO_ALG_ASYNC,
2764 			}
2765 		},
2766 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2767 				     DESC_HDR_SEL0_MDEUB |
2768 				     DESC_HDR_MODE0_MDEUB_SHA384,
2769 	},
2770 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2771 		.alg.hash = {
2772 			.halg.digestsize = SHA512_DIGEST_SIZE,
2773 			.halg.statesize = sizeof(struct talitos_export_state),
2774 			.halg.base = {
2775 				.cra_name = "sha512",
2776 				.cra_driver_name = "sha512-talitos",
2777 				.cra_blocksize = SHA512_BLOCK_SIZE,
2778 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2779 					     CRYPTO_ALG_ASYNC,
2780 			}
2781 		},
2782 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2783 				     DESC_HDR_SEL0_MDEUB |
2784 				     DESC_HDR_MODE0_MDEUB_SHA512,
2785 	},
2786 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2787 		.alg.hash = {
2788 			.halg.digestsize = MD5_DIGEST_SIZE,
2789 			.halg.statesize = sizeof(struct talitos_export_state),
2790 			.halg.base = {
2791 				.cra_name = "hmac(md5)",
2792 				.cra_driver_name = "hmac-md5-talitos",
2793 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2794 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2795 					     CRYPTO_ALG_ASYNC,
2796 			}
2797 		},
2798 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2799 				     DESC_HDR_SEL0_MDEUA |
2800 				     DESC_HDR_MODE0_MDEU_MD5,
2801 	},
2802 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2803 		.alg.hash = {
2804 			.halg.digestsize = SHA1_DIGEST_SIZE,
2805 			.halg.statesize = sizeof(struct talitos_export_state),
2806 			.halg.base = {
2807 				.cra_name = "hmac(sha1)",
2808 				.cra_driver_name = "hmac-sha1-talitos",
2809 				.cra_blocksize = SHA1_BLOCK_SIZE,
2810 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2811 					     CRYPTO_ALG_ASYNC,
2812 			}
2813 		},
2814 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2815 				     DESC_HDR_SEL0_MDEUA |
2816 				     DESC_HDR_MODE0_MDEU_SHA1,
2817 	},
2818 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2819 		.alg.hash = {
2820 			.halg.digestsize = SHA224_DIGEST_SIZE,
2821 			.halg.statesize = sizeof(struct talitos_export_state),
2822 			.halg.base = {
2823 				.cra_name = "hmac(sha224)",
2824 				.cra_driver_name = "hmac-sha224-talitos",
2825 				.cra_blocksize = SHA224_BLOCK_SIZE,
2826 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2827 					     CRYPTO_ALG_ASYNC,
2828 			}
2829 		},
2830 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2831 				     DESC_HDR_SEL0_MDEUA |
2832 				     DESC_HDR_MODE0_MDEU_SHA224,
2833 	},
2834 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2835 		.alg.hash = {
2836 			.halg.digestsize = SHA256_DIGEST_SIZE,
2837 			.halg.statesize = sizeof(struct talitos_export_state),
2838 			.halg.base = {
2839 				.cra_name = "hmac(sha256)",
2840 				.cra_driver_name = "hmac-sha256-talitos",
2841 				.cra_blocksize = SHA256_BLOCK_SIZE,
2842 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2843 					     CRYPTO_ALG_ASYNC,
2844 			}
2845 		},
2846 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2847 				     DESC_HDR_SEL0_MDEUA |
2848 				     DESC_HDR_MODE0_MDEU_SHA256,
2849 	},
2850 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2851 		.alg.hash = {
2852 			.halg.digestsize = SHA384_DIGEST_SIZE,
2853 			.halg.statesize = sizeof(struct talitos_export_state),
2854 			.halg.base = {
2855 				.cra_name = "hmac(sha384)",
2856 				.cra_driver_name = "hmac-sha384-talitos",
2857 				.cra_blocksize = SHA384_BLOCK_SIZE,
2858 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2859 					     CRYPTO_ALG_ASYNC,
2860 			}
2861 		},
2862 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2863 				     DESC_HDR_SEL0_MDEUB |
2864 				     DESC_HDR_MODE0_MDEUB_SHA384,
2865 	},
2866 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2867 		.alg.hash = {
2868 			.halg.digestsize = SHA512_DIGEST_SIZE,
2869 			.halg.statesize = sizeof(struct talitos_export_state),
2870 			.halg.base = {
2871 				.cra_name = "hmac(sha512)",
2872 				.cra_driver_name = "hmac-sha512-talitos",
2873 				.cra_blocksize = SHA512_BLOCK_SIZE,
2874 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2875 					     CRYPTO_ALG_ASYNC,
2876 			}
2877 		},
2878 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2879 				     DESC_HDR_SEL0_MDEUB |
2880 				     DESC_HDR_MODE0_MDEUB_SHA512,
2881 	}
2882 };
2883 
2884 struct talitos_crypto_alg {
2885 	struct list_head entry;
2886 	struct device *dev;
2887 	struct talitos_alg_template algt;
2888 };
2889 
2890 static int talitos_init_common(struct talitos_ctx *ctx,
2891 			       struct talitos_crypto_alg *talitos_alg)
2892 {
2893 	struct talitos_private *priv;
2894 
2895 	/* update context with ptr to dev */
2896 	ctx->dev = talitos_alg->dev;
2897 
2898 	/* assign SEC channel to tfm in round-robin fashion */
2899 	priv = dev_get_drvdata(ctx->dev);
2900 	ctx->ch = atomic_inc_return(&priv->last_chan) &
2901 		  (priv->num_channels - 1);
2902 
2903 	/* copy descriptor header template value */
2904 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2905 
2906 	/* select done notification */
2907 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2908 
2909 	return 0;
2910 }
2911 
2912 static int talitos_cra_init(struct crypto_tfm *tfm)
2913 {
2914 	struct crypto_alg *alg = tfm->__crt_alg;
2915 	struct talitos_crypto_alg *talitos_alg;
2916 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2917 
2918 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2919 		talitos_alg = container_of(__crypto_ahash_alg(alg),
2920 					   struct talitos_crypto_alg,
2921 					   algt.alg.hash);
2922 	else
2923 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2924 					   algt.alg.crypto);
2925 
2926 	return talitos_init_common(ctx, talitos_alg);
2927 }
2928 
2929 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2930 {
2931 	struct aead_alg *alg = crypto_aead_alg(tfm);
2932 	struct talitos_crypto_alg *talitos_alg;
2933 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2934 
2935 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
2936 				   algt.alg.aead);
2937 
2938 	return talitos_init_common(ctx, talitos_alg);
2939 }
2940 
2941 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2942 {
2943 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2944 
2945 	talitos_cra_init(tfm);
2946 
2947 	ctx->keylen = 0;
2948 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2949 				 sizeof(struct talitos_ahash_req_ctx));
2950 
2951 	return 0;
2952 }
2953 
2954 /*
2955  * given the alg's descriptor header template, determine whether descriptor
2956  * type and primary/secondary execution units required match the hw
2957  * capabilities description provided in the device tree node.
2958  */
2959 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2960 {
2961 	struct talitos_private *priv = dev_get_drvdata(dev);
2962 	int ret;
2963 
2964 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2965 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2966 
2967 	if (SECONDARY_EU(desc_hdr_template))
2968 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2969 		              & priv->exec_units);
2970 
2971 	return ret;
2972 }
2973 
2974 static int talitos_remove(struct platform_device *ofdev)
2975 {
2976 	struct device *dev = &ofdev->dev;
2977 	struct talitos_private *priv = dev_get_drvdata(dev);
2978 	struct talitos_crypto_alg *t_alg, *n;
2979 	int i;
2980 
2981 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2982 		switch (t_alg->algt.type) {
2983 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2984 			break;
2985 		case CRYPTO_ALG_TYPE_AEAD:
2986 			crypto_unregister_aead(&t_alg->algt.alg.aead);
2987 		case CRYPTO_ALG_TYPE_AHASH:
2988 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2989 			break;
2990 		}
2991 		list_del(&t_alg->entry);
2992 		kfree(t_alg);
2993 	}
2994 
2995 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2996 		talitos_unregister_rng(dev);
2997 
2998 	for (i = 0; priv->chan && i < priv->num_channels; i++)
2999 		kfree(priv->chan[i].fifo);
3000 
3001 	kfree(priv->chan);
3002 
3003 	for (i = 0; i < 2; i++)
3004 		if (priv->irq[i]) {
3005 			free_irq(priv->irq[i], dev);
3006 			irq_dispose_mapping(priv->irq[i]);
3007 		}
3008 
3009 	tasklet_kill(&priv->done_task[0]);
3010 	if (priv->irq[1])
3011 		tasklet_kill(&priv->done_task[1]);
3012 
3013 	iounmap(priv->reg);
3014 
3015 	kfree(priv);
3016 
3017 	return 0;
3018 }
3019 
3020 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3021 						    struct talitos_alg_template
3022 						           *template)
3023 {
3024 	struct talitos_private *priv = dev_get_drvdata(dev);
3025 	struct talitos_crypto_alg *t_alg;
3026 	struct crypto_alg *alg;
3027 
3028 	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
3029 	if (!t_alg)
3030 		return ERR_PTR(-ENOMEM);
3031 
3032 	t_alg->algt = *template;
3033 
3034 	switch (t_alg->algt.type) {
3035 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
3036 		alg = &t_alg->algt.alg.crypto;
3037 		alg->cra_init = talitos_cra_init;
3038 		alg->cra_type = &crypto_ablkcipher_type;
3039 		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3040 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3041 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3042 		alg->cra_ablkcipher.geniv = "eseqiv";
3043 		break;
3044 	case CRYPTO_ALG_TYPE_AEAD:
3045 		alg = &t_alg->algt.alg.aead.base;
3046 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3047 		t_alg->algt.alg.aead.setkey = aead_setkey;
3048 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3049 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3050 		break;
3051 	case CRYPTO_ALG_TYPE_AHASH:
3052 		alg = &t_alg->algt.alg.hash.halg.base;
3053 		alg->cra_init = talitos_cra_init_ahash;
3054 		alg->cra_type = &crypto_ahash_type;
3055 		t_alg->algt.alg.hash.init = ahash_init;
3056 		t_alg->algt.alg.hash.update = ahash_update;
3057 		t_alg->algt.alg.hash.final = ahash_final;
3058 		t_alg->algt.alg.hash.finup = ahash_finup;
3059 		t_alg->algt.alg.hash.digest = ahash_digest;
3060 		t_alg->algt.alg.hash.setkey = ahash_setkey;
3061 		t_alg->algt.alg.hash.import = ahash_import;
3062 		t_alg->algt.alg.hash.export = ahash_export;
3063 
3064 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3065 		    !strncmp(alg->cra_name, "hmac", 4)) {
3066 			kfree(t_alg);
3067 			return ERR_PTR(-ENOTSUPP);
3068 		}
3069 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3070 		    (!strcmp(alg->cra_name, "sha224") ||
3071 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3072 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3073 			t_alg->algt.desc_hdr_template =
3074 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3075 					DESC_HDR_SEL0_MDEUA |
3076 					DESC_HDR_MODE0_MDEU_SHA256;
3077 		}
3078 		break;
3079 	default:
3080 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3081 		kfree(t_alg);
3082 		return ERR_PTR(-EINVAL);
3083 	}
3084 
3085 	alg->cra_module = THIS_MODULE;
3086 	if (t_alg->algt.priority)
3087 		alg->cra_priority = t_alg->algt.priority;
3088 	else
3089 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3090 	alg->cra_alignmask = 0;
3091 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3092 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3093 
3094 	t_alg->dev = dev;
3095 
3096 	return t_alg;
3097 }
3098 
3099 static int talitos_probe_irq(struct platform_device *ofdev)
3100 {
3101 	struct device *dev = &ofdev->dev;
3102 	struct device_node *np = ofdev->dev.of_node;
3103 	struct talitos_private *priv = dev_get_drvdata(dev);
3104 	int err;
3105 	bool is_sec1 = has_ftr_sec1(priv);
3106 
3107 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3108 	if (!priv->irq[0]) {
3109 		dev_err(dev, "failed to map irq\n");
3110 		return -EINVAL;
3111 	}
3112 	if (is_sec1) {
3113 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3114 				  dev_driver_string(dev), dev);
3115 		goto primary_out;
3116 	}
3117 
3118 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3119 
3120 	/* get the primary irq line */
3121 	if (!priv->irq[1]) {
3122 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3123 				  dev_driver_string(dev), dev);
3124 		goto primary_out;
3125 	}
3126 
3127 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3128 			  dev_driver_string(dev), dev);
3129 	if (err)
3130 		goto primary_out;
3131 
3132 	/* get the secondary irq line */
3133 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3134 			  dev_driver_string(dev), dev);
3135 	if (err) {
3136 		dev_err(dev, "failed to request secondary irq\n");
3137 		irq_dispose_mapping(priv->irq[1]);
3138 		priv->irq[1] = 0;
3139 	}
3140 
3141 	return err;
3142 
3143 primary_out:
3144 	if (err) {
3145 		dev_err(dev, "failed to request primary irq\n");
3146 		irq_dispose_mapping(priv->irq[0]);
3147 		priv->irq[0] = 0;
3148 	}
3149 
3150 	return err;
3151 }
3152 
3153 static int talitos_probe(struct platform_device *ofdev)
3154 {
3155 	struct device *dev = &ofdev->dev;
3156 	struct device_node *np = ofdev->dev.of_node;
3157 	struct talitos_private *priv;
3158 	const unsigned int *prop;
3159 	int i, err;
3160 	int stride;
3161 
3162 	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
3163 	if (!priv)
3164 		return -ENOMEM;
3165 
3166 	INIT_LIST_HEAD(&priv->alg_list);
3167 
3168 	dev_set_drvdata(dev, priv);
3169 
3170 	priv->ofdev = ofdev;
3171 
3172 	spin_lock_init(&priv->reg_lock);
3173 
3174 	priv->reg = of_iomap(np, 0);
3175 	if (!priv->reg) {
3176 		dev_err(dev, "failed to of_iomap\n");
3177 		err = -ENOMEM;
3178 		goto err_out;
3179 	}
3180 
3181 	/* get SEC version capabilities from device tree */
3182 	prop = of_get_property(np, "fsl,num-channels", NULL);
3183 	if (prop)
3184 		priv->num_channels = *prop;
3185 
3186 	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3187 	if (prop)
3188 		priv->chfifo_len = *prop;
3189 
3190 	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3191 	if (prop)
3192 		priv->exec_units = *prop;
3193 
3194 	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3195 	if (prop)
3196 		priv->desc_types = *prop;
3197 
3198 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3199 	    !priv->exec_units || !priv->desc_types) {
3200 		dev_err(dev, "invalid property data in device tree node\n");
3201 		err = -EINVAL;
3202 		goto err_out;
3203 	}
3204 
3205 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3206 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3207 
3208 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3209 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3210 				  TALITOS_FTR_SHA224_HWINIT |
3211 				  TALITOS_FTR_HMAC_OK;
3212 
3213 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3214 		priv->features |= TALITOS_FTR_SEC1;
3215 
3216 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3217 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3218 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3219 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3220 		stride = TALITOS1_CH_STRIDE;
3221 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3222 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3223 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3224 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3225 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3226 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3227 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3228 		stride = TALITOS1_CH_STRIDE;
3229 	} else {
3230 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3231 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3232 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3233 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3234 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3235 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3236 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3237 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3238 		stride = TALITOS2_CH_STRIDE;
3239 	}
3240 
3241 	err = talitos_probe_irq(ofdev);
3242 	if (err)
3243 		goto err_out;
3244 
3245 	if (of_device_is_compatible(np, "fsl,sec1.0")) {
3246 		tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3247 			     (unsigned long)dev);
3248 	} else {
3249 		if (!priv->irq[1]) {
3250 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3251 				     (unsigned long)dev);
3252 		} else {
3253 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3254 				     (unsigned long)dev);
3255 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3256 				     (unsigned long)dev);
3257 		}
3258 	}
3259 
3260 	priv->chan = kzalloc(sizeof(struct talitos_channel) *
3261 			     priv->num_channels, GFP_KERNEL);
3262 	if (!priv->chan) {
3263 		dev_err(dev, "failed to allocate channel management space\n");
3264 		err = -ENOMEM;
3265 		goto err_out;
3266 	}
3267 
3268 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3269 
3270 	for (i = 0; i < priv->num_channels; i++) {
3271 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3272 		if (!priv->irq[1] || !(i & 1))
3273 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3274 
3275 		spin_lock_init(&priv->chan[i].head_lock);
3276 		spin_lock_init(&priv->chan[i].tail_lock);
3277 
3278 		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3279 					     priv->fifo_len, GFP_KERNEL);
3280 		if (!priv->chan[i].fifo) {
3281 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3282 			err = -ENOMEM;
3283 			goto err_out;
3284 		}
3285 
3286 		atomic_set(&priv->chan[i].submit_count,
3287 			   -(priv->chfifo_len - 1));
3288 	}
3289 
3290 	dma_set_mask(dev, DMA_BIT_MASK(36));
3291 
3292 	/* reset and initialize the h/w */
3293 	err = init_device(dev);
3294 	if (err) {
3295 		dev_err(dev, "failed to initialize device\n");
3296 		goto err_out;
3297 	}
3298 
3299 	/* register the RNG, if available */
3300 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3301 		err = talitos_register_rng(dev);
3302 		if (err) {
3303 			dev_err(dev, "failed to register hwrng: %d\n", err);
3304 			goto err_out;
3305 		} else
3306 			dev_info(dev, "hwrng\n");
3307 	}
3308 
3309 	/* register crypto algorithms the device supports */
3310 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3311 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3312 			struct talitos_crypto_alg *t_alg;
3313 			struct crypto_alg *alg = NULL;
3314 
3315 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3316 			if (IS_ERR(t_alg)) {
3317 				err = PTR_ERR(t_alg);
3318 				if (err == -ENOTSUPP)
3319 					continue;
3320 				goto err_out;
3321 			}
3322 
3323 			switch (t_alg->algt.type) {
3324 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3325 				err = crypto_register_alg(
3326 						&t_alg->algt.alg.crypto);
3327 				alg = &t_alg->algt.alg.crypto;
3328 				break;
3329 
3330 			case CRYPTO_ALG_TYPE_AEAD:
3331 				err = crypto_register_aead(
3332 					&t_alg->algt.alg.aead);
3333 				alg = &t_alg->algt.alg.aead.base;
3334 				break;
3335 
3336 			case CRYPTO_ALG_TYPE_AHASH:
3337 				err = crypto_register_ahash(
3338 						&t_alg->algt.alg.hash);
3339 				alg = &t_alg->algt.alg.hash.halg.base;
3340 				break;
3341 			}
3342 			if (err) {
3343 				dev_err(dev, "%s alg registration failed\n",
3344 					alg->cra_driver_name);
3345 				kfree(t_alg);
3346 			} else
3347 				list_add_tail(&t_alg->entry, &priv->alg_list);
3348 		}
3349 	}
3350 	if (!list_empty(&priv->alg_list))
3351 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3352 			 (char *)of_get_property(np, "compatible", NULL));
3353 
3354 	return 0;
3355 
3356 err_out:
3357 	talitos_remove(ofdev);
3358 
3359 	return err;
3360 }
3361 
3362 static const struct of_device_id talitos_match[] = {
3363 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3364 	{
3365 		.compatible = "fsl,sec1.0",
3366 	},
3367 #endif
3368 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3369 	{
3370 		.compatible = "fsl,sec2.0",
3371 	},
3372 #endif
3373 	{},
3374 };
3375 MODULE_DEVICE_TABLE(of, talitos_match);
3376 
3377 static struct platform_driver talitos_driver = {
3378 	.driver = {
3379 		.name = "talitos",
3380 		.of_match_table = talitos_match,
3381 	},
3382 	.probe = talitos_probe,
3383 	.remove = talitos_remove,
3384 };
3385 
3386 module_platform_driver(talitos_driver);
3387 
3388 MODULE_LICENSE("GPL");
3389 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3390 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3391