xref: /openbmc/linux/drivers/crypto/talitos.c (revision 151f4e2b)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43 
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55 
56 #include "talitos.h"
57 
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 			   unsigned int len, bool is_sec1)
60 {
61 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 	if (is_sec1) {
63 		ptr->len1 = cpu_to_be16(len);
64 	} else {
65 		ptr->len = cpu_to_be16(len);
66 		ptr->eptr = upper_32_bits(dma_addr);
67 	}
68 }
69 
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 			     struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73 	dst_ptr->ptr = src_ptr->ptr;
74 	if (is_sec1) {
75 		dst_ptr->len1 = src_ptr->len1;
76 	} else {
77 		dst_ptr->len = src_ptr->len;
78 		dst_ptr->eptr = src_ptr->eptr;
79 	}
80 }
81 
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 					   bool is_sec1)
84 {
85 	if (is_sec1)
86 		return be16_to_cpu(ptr->len1);
87 	else
88 		return be16_to_cpu(ptr->len);
89 }
90 
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 				   bool is_sec1)
93 {
94 	if (!is_sec1)
95 		ptr->j_extent = val;
96 }
97 
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100 	if (!is_sec1)
101 		ptr->j_extent |= val;
102 }
103 
104 /*
105  * map virtual single (contiguous) pointer to h/w descriptor pointer
106  */
107 static void __map_single_talitos_ptr(struct device *dev,
108 				     struct talitos_ptr *ptr,
109 				     unsigned int len, void *data,
110 				     enum dma_data_direction dir,
111 				     unsigned long attrs)
112 {
113 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 	struct talitos_private *priv = dev_get_drvdata(dev);
115 	bool is_sec1 = has_ftr_sec1(priv);
116 
117 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118 }
119 
120 static void map_single_talitos_ptr(struct device *dev,
121 				   struct talitos_ptr *ptr,
122 				   unsigned int len, void *data,
123 				   enum dma_data_direction dir)
124 {
125 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
126 }
127 
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129 					  struct talitos_ptr *ptr,
130 					  unsigned int len, void *data,
131 					  enum dma_data_direction dir)
132 {
133 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
134 				 DMA_ATTR_SKIP_CPU_SYNC);
135 }
136 
137 /*
138  * unmap bus single (contiguous) h/w descriptor pointer
139  */
140 static void unmap_single_talitos_ptr(struct device *dev,
141 				     struct talitos_ptr *ptr,
142 				     enum dma_data_direction dir)
143 {
144 	struct talitos_private *priv = dev_get_drvdata(dev);
145 	bool is_sec1 = has_ftr_sec1(priv);
146 
147 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 			 from_talitos_ptr_len(ptr, is_sec1), dir);
149 }
150 
151 static int reset_channel(struct device *dev, int ch)
152 {
153 	struct talitos_private *priv = dev_get_drvdata(dev);
154 	unsigned int timeout = TALITOS_TIMEOUT;
155 	bool is_sec1 = has_ftr_sec1(priv);
156 
157 	if (is_sec1) {
158 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 			  TALITOS1_CCCR_LO_RESET);
160 
161 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 			TALITOS1_CCCR_LO_RESET) && --timeout)
163 			cpu_relax();
164 	} else {
165 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 			  TALITOS2_CCCR_RESET);
167 
168 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 			TALITOS2_CCCR_RESET) && --timeout)
170 			cpu_relax();
171 	}
172 
173 	if (timeout == 0) {
174 		dev_err(dev, "failed to reset channel %d\n", ch);
175 		return -EIO;
176 	}
177 
178 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
179 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 	/* enable chaining descriptors */
182 	if (is_sec1)
183 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184 			  TALITOS_CCCR_LO_NE);
185 
186 	/* and ICCR writeback, if available */
187 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 		          TALITOS_CCCR_LO_IWSE);
190 
191 	return 0;
192 }
193 
194 static int reset_device(struct device *dev)
195 {
196 	struct talitos_private *priv = dev_get_drvdata(dev);
197 	unsigned int timeout = TALITOS_TIMEOUT;
198 	bool is_sec1 = has_ftr_sec1(priv);
199 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
200 
201 	setbits32(priv->reg + TALITOS_MCR, mcr);
202 
203 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204 	       && --timeout)
205 		cpu_relax();
206 
207 	if (priv->irq[1]) {
208 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 		setbits32(priv->reg + TALITOS_MCR, mcr);
210 	}
211 
212 	if (timeout == 0) {
213 		dev_err(dev, "failed to reset device\n");
214 		return -EIO;
215 	}
216 
217 	return 0;
218 }
219 
220 /*
221  * Reset and initialize the device
222  */
223 static int init_device(struct device *dev)
224 {
225 	struct talitos_private *priv = dev_get_drvdata(dev);
226 	int ch, err;
227 	bool is_sec1 = has_ftr_sec1(priv);
228 
229 	/*
230 	 * Master reset
231 	 * errata documentation: warning: certain SEC interrupts
232 	 * are not fully cleared by writing the MCR:SWR bit,
233 	 * set bit twice to completely reset
234 	 */
235 	err = reset_device(dev);
236 	if (err)
237 		return err;
238 
239 	err = reset_device(dev);
240 	if (err)
241 		return err;
242 
243 	/* reset channels */
244 	for (ch = 0; ch < priv->num_channels; ch++) {
245 		err = reset_channel(dev, ch);
246 		if (err)
247 			return err;
248 	}
249 
250 	/* enable channel done and error interrupts */
251 	if (is_sec1) {
252 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 		/* disable parity error check in DEU (erroneous? test vect.) */
255 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256 	} else {
257 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
259 	}
260 
261 	/* disable integrity check error interrupts (use writeback instead) */
262 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 		          TALITOS_MDEUICR_LO_ICE);
265 
266 	return 0;
267 }
268 
269 /**
270  * talitos_submit - submits a descriptor to the device for processing
271  * @dev:	the SEC device to be used
272  * @ch:		the SEC device channel to be used
273  * @desc:	the descriptor to be processed by the device
274  * @callback:	whom to call when processing is complete
275  * @context:	a handle for use by caller (optional)
276  *
277  * desc must contain valid dma-mapped (bus physical) address pointers.
278  * callback must check err and feedback in descriptor header
279  * for device processing status.
280  */
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 		   void (*callback)(struct device *dev,
283 				    struct talitos_desc *desc,
284 				    void *context, int error),
285 		   void *context)
286 {
287 	struct talitos_private *priv = dev_get_drvdata(dev);
288 	struct talitos_request *request;
289 	unsigned long flags;
290 	int head;
291 	bool is_sec1 = has_ftr_sec1(priv);
292 
293 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
294 
295 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 		/* h/w fifo is full */
297 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298 		return -EAGAIN;
299 	}
300 
301 	head = priv->chan[ch].head;
302 	request = &priv->chan[ch].fifo[head];
303 
304 	/* map descriptor and save caller data */
305 	if (is_sec1) {
306 		desc->hdr1 = desc->hdr;
307 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
308 						   TALITOS_DESC_SIZE,
309 						   DMA_BIDIRECTIONAL);
310 	} else {
311 		request->dma_desc = dma_map_single(dev, desc,
312 						   TALITOS_DESC_SIZE,
313 						   DMA_BIDIRECTIONAL);
314 	}
315 	request->callback = callback;
316 	request->context = context;
317 
318 	/* increment fifo head */
319 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
320 
321 	smp_wmb();
322 	request->desc = desc;
323 
324 	/* GO! */
325 	wmb();
326 	out_be32(priv->chan[ch].reg + TALITOS_FF,
327 		 upper_32_bits(request->dma_desc));
328 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 		 lower_32_bits(request->dma_desc));
330 
331 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
332 
333 	return -EINPROGRESS;
334 }
335 EXPORT_SYMBOL(talitos_submit);
336 
337 /*
338  * process what was done, notify callback of error if not
339  */
340 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
341 {
342 	struct talitos_private *priv = dev_get_drvdata(dev);
343 	struct talitos_request *request, saved_req;
344 	unsigned long flags;
345 	int tail, status;
346 	bool is_sec1 = has_ftr_sec1(priv);
347 
348 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
349 
350 	tail = priv->chan[ch].tail;
351 	while (priv->chan[ch].fifo[tail].desc) {
352 		__be32 hdr;
353 
354 		request = &priv->chan[ch].fifo[tail];
355 
356 		/* descriptors with their done bits set don't get the error */
357 		rmb();
358 		if (!is_sec1)
359 			hdr = request->desc->hdr;
360 		else if (request->desc->next_desc)
361 			hdr = (request->desc + 1)->hdr1;
362 		else
363 			hdr = request->desc->hdr1;
364 
365 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
366 			status = 0;
367 		else
368 			if (!error)
369 				break;
370 			else
371 				status = error;
372 
373 		dma_unmap_single(dev, request->dma_desc,
374 				 TALITOS_DESC_SIZE,
375 				 DMA_BIDIRECTIONAL);
376 
377 		/* copy entries so we can call callback outside lock */
378 		saved_req.desc = request->desc;
379 		saved_req.callback = request->callback;
380 		saved_req.context = request->context;
381 
382 		/* release request entry in fifo */
383 		smp_wmb();
384 		request->desc = NULL;
385 
386 		/* increment fifo tail */
387 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
388 
389 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
390 
391 		atomic_dec(&priv->chan[ch].submit_count);
392 
393 		saved_req.callback(dev, saved_req.desc, saved_req.context,
394 				   status);
395 		/* channel may resume processing in single desc error case */
396 		if (error && !reset_ch && status == error)
397 			return;
398 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
399 		tail = priv->chan[ch].tail;
400 	}
401 
402 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
403 }
404 
405 /*
406  * process completed requests for channels that have done status
407  */
408 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
409 static void talitos1_done_##name(unsigned long data)			\
410 {									\
411 	struct device *dev = (struct device *)data;			\
412 	struct talitos_private *priv = dev_get_drvdata(dev);		\
413 	unsigned long flags;						\
414 									\
415 	if (ch_done_mask & 0x10000000)					\
416 		flush_channel(dev, 0, 0, 0);			\
417 	if (ch_done_mask & 0x40000000)					\
418 		flush_channel(dev, 1, 0, 0);			\
419 	if (ch_done_mask & 0x00010000)					\
420 		flush_channel(dev, 2, 0, 0);			\
421 	if (ch_done_mask & 0x00040000)					\
422 		flush_channel(dev, 3, 0, 0);			\
423 									\
424 	/* At this point, all completed channels have been processed */	\
425 	/* Unmask done interrupts for channels completed later on. */	\
426 	spin_lock_irqsave(&priv->reg_lock, flags);			\
427 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
428 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
429 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
430 }
431 
432 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
433 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
434 
435 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
436 static void talitos2_done_##name(unsigned long data)			\
437 {									\
438 	struct device *dev = (struct device *)data;			\
439 	struct talitos_private *priv = dev_get_drvdata(dev);		\
440 	unsigned long flags;						\
441 									\
442 	if (ch_done_mask & 1)						\
443 		flush_channel(dev, 0, 0, 0);				\
444 	if (ch_done_mask & (1 << 2))					\
445 		flush_channel(dev, 1, 0, 0);				\
446 	if (ch_done_mask & (1 << 4))					\
447 		flush_channel(dev, 2, 0, 0);				\
448 	if (ch_done_mask & (1 << 6))					\
449 		flush_channel(dev, 3, 0, 0);				\
450 									\
451 	/* At this point, all completed channels have been processed */	\
452 	/* Unmask done interrupts for channels completed later on. */	\
453 	spin_lock_irqsave(&priv->reg_lock, flags);			\
454 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
455 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
456 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
457 }
458 
459 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
460 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
461 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
462 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
463 
464 /*
465  * locate current (offending) descriptor
466  */
467 static u32 current_desc_hdr(struct device *dev, int ch)
468 {
469 	struct talitos_private *priv = dev_get_drvdata(dev);
470 	int tail, iter;
471 	dma_addr_t cur_desc;
472 
473 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
474 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
475 
476 	if (!cur_desc) {
477 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
478 		return 0;
479 	}
480 
481 	tail = priv->chan[ch].tail;
482 
483 	iter = tail;
484 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
485 	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
486 		iter = (iter + 1) & (priv->fifo_len - 1);
487 		if (iter == tail) {
488 			dev_err(dev, "couldn't locate current descriptor\n");
489 			return 0;
490 		}
491 	}
492 
493 	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
494 		return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
495 
496 	return priv->chan[ch].fifo[iter].desc->hdr;
497 }
498 
499 /*
500  * user diagnostics; report root cause of error based on execution unit status
501  */
502 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
503 {
504 	struct talitos_private *priv = dev_get_drvdata(dev);
505 	int i;
506 
507 	if (!desc_hdr)
508 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
509 
510 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
511 	case DESC_HDR_SEL0_AFEU:
512 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
513 			in_be32(priv->reg_afeu + TALITOS_EUISR),
514 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
515 		break;
516 	case DESC_HDR_SEL0_DEU:
517 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
518 			in_be32(priv->reg_deu + TALITOS_EUISR),
519 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
520 		break;
521 	case DESC_HDR_SEL0_MDEUA:
522 	case DESC_HDR_SEL0_MDEUB:
523 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
524 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
525 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
526 		break;
527 	case DESC_HDR_SEL0_RNG:
528 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
529 			in_be32(priv->reg_rngu + TALITOS_ISR),
530 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
531 		break;
532 	case DESC_HDR_SEL0_PKEU:
533 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
534 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
535 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
536 		break;
537 	case DESC_HDR_SEL0_AESU:
538 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
539 			in_be32(priv->reg_aesu + TALITOS_EUISR),
540 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
541 		break;
542 	case DESC_HDR_SEL0_CRCU:
543 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
544 			in_be32(priv->reg_crcu + TALITOS_EUISR),
545 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
546 		break;
547 	case DESC_HDR_SEL0_KEU:
548 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
549 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
550 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
551 		break;
552 	}
553 
554 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
555 	case DESC_HDR_SEL1_MDEUA:
556 	case DESC_HDR_SEL1_MDEUB:
557 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
558 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
559 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
560 		break;
561 	case DESC_HDR_SEL1_CRCU:
562 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
563 			in_be32(priv->reg_crcu + TALITOS_EUISR),
564 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
565 		break;
566 	}
567 
568 	for (i = 0; i < 8; i++)
569 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
570 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
571 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
572 }
573 
574 /*
575  * recover from error interrupts
576  */
577 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
578 {
579 	struct talitos_private *priv = dev_get_drvdata(dev);
580 	unsigned int timeout = TALITOS_TIMEOUT;
581 	int ch, error, reset_dev = 0;
582 	u32 v_lo;
583 	bool is_sec1 = has_ftr_sec1(priv);
584 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
585 
586 	for (ch = 0; ch < priv->num_channels; ch++) {
587 		/* skip channels without errors */
588 		if (is_sec1) {
589 			/* bits 29, 31, 17, 19 */
590 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
591 				continue;
592 		} else {
593 			if (!(isr & (1 << (ch * 2 + 1))))
594 				continue;
595 		}
596 
597 		error = -EINVAL;
598 
599 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
600 
601 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
602 			dev_err(dev, "double fetch fifo overflow error\n");
603 			error = -EAGAIN;
604 			reset_ch = 1;
605 		}
606 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
607 			/* h/w dropped descriptor */
608 			dev_err(dev, "single fetch fifo overflow error\n");
609 			error = -EAGAIN;
610 		}
611 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
612 			dev_err(dev, "master data transfer error\n");
613 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
614 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
615 					     : "s/g data length zero error\n");
616 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
617 			dev_err(dev, is_sec1 ? "parity error\n"
618 					     : "fetch pointer zero error\n");
619 		if (v_lo & TALITOS_CCPSR_LO_IDH)
620 			dev_err(dev, "illegal descriptor header error\n");
621 		if (v_lo & TALITOS_CCPSR_LO_IEU)
622 			dev_err(dev, is_sec1 ? "static assignment error\n"
623 					     : "invalid exec unit error\n");
624 		if (v_lo & TALITOS_CCPSR_LO_EU)
625 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
626 		if (!is_sec1) {
627 			if (v_lo & TALITOS_CCPSR_LO_GB)
628 				dev_err(dev, "gather boundary error\n");
629 			if (v_lo & TALITOS_CCPSR_LO_GRL)
630 				dev_err(dev, "gather return/length error\n");
631 			if (v_lo & TALITOS_CCPSR_LO_SB)
632 				dev_err(dev, "scatter boundary error\n");
633 			if (v_lo & TALITOS_CCPSR_LO_SRL)
634 				dev_err(dev, "scatter return/length error\n");
635 		}
636 
637 		flush_channel(dev, ch, error, reset_ch);
638 
639 		if (reset_ch) {
640 			reset_channel(dev, ch);
641 		} else {
642 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
643 				  TALITOS2_CCCR_CONT);
644 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
645 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
646 			       TALITOS2_CCCR_CONT) && --timeout)
647 				cpu_relax();
648 			if (timeout == 0) {
649 				dev_err(dev, "failed to restart channel %d\n",
650 					ch);
651 				reset_dev = 1;
652 			}
653 		}
654 	}
655 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
656 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
657 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
658 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
659 				isr, isr_lo);
660 		else
661 			dev_err(dev, "done overflow, internal time out, or "
662 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
663 
664 		/* purge request queues */
665 		for (ch = 0; ch < priv->num_channels; ch++)
666 			flush_channel(dev, ch, -EIO, 1);
667 
668 		/* reset and reinitialize the device */
669 		init_device(dev);
670 	}
671 }
672 
673 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
674 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
675 {									       \
676 	struct device *dev = data;					       \
677 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
678 	u32 isr, isr_lo;						       \
679 	unsigned long flags;						       \
680 									       \
681 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
682 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
683 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
684 	/* Acknowledge interrupt */					       \
685 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
687 									       \
688 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
689 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
690 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
691 	}								       \
692 	else {								       \
693 		if (likely(isr & ch_done_mask)) {			       \
694 			/* mask further done interrupts. */		       \
695 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
696 			/* done_task will unmask done interrupts at exit */    \
697 			tasklet_schedule(&priv->done_task[tlet]);	       \
698 		}							       \
699 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
700 	}								       \
701 									       \
702 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
703 								IRQ_NONE;      \
704 }
705 
706 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
707 
708 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
709 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
710 {									       \
711 	struct device *dev = data;					       \
712 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
713 	u32 isr, isr_lo;						       \
714 	unsigned long flags;						       \
715 									       \
716 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
717 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
718 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
719 	/* Acknowledge interrupt */					       \
720 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
721 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
722 									       \
723 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
724 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
725 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
726 	}								       \
727 	else {								       \
728 		if (likely(isr & ch_done_mask)) {			       \
729 			/* mask further done interrupts. */		       \
730 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
731 			/* done_task will unmask done interrupts at exit */    \
732 			tasklet_schedule(&priv->done_task[tlet]);	       \
733 		}							       \
734 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
735 	}								       \
736 									       \
737 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
738 								IRQ_NONE;      \
739 }
740 
741 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
742 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
743 		       0)
744 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
745 		       1)
746 
747 /*
748  * hwrng
749  */
750 static int talitos_rng_data_present(struct hwrng *rng, int wait)
751 {
752 	struct device *dev = (struct device *)rng->priv;
753 	struct talitos_private *priv = dev_get_drvdata(dev);
754 	u32 ofl;
755 	int i;
756 
757 	for (i = 0; i < 20; i++) {
758 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
759 		      TALITOS_RNGUSR_LO_OFL;
760 		if (ofl || !wait)
761 			break;
762 		udelay(10);
763 	}
764 
765 	return !!ofl;
766 }
767 
768 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
769 {
770 	struct device *dev = (struct device *)rng->priv;
771 	struct talitos_private *priv = dev_get_drvdata(dev);
772 
773 	/* rng fifo requires 64-bit accesses */
774 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
775 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
776 
777 	return sizeof(u32);
778 }
779 
780 static int talitos_rng_init(struct hwrng *rng)
781 {
782 	struct device *dev = (struct device *)rng->priv;
783 	struct talitos_private *priv = dev_get_drvdata(dev);
784 	unsigned int timeout = TALITOS_TIMEOUT;
785 
786 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
787 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
788 		 & TALITOS_RNGUSR_LO_RD)
789 	       && --timeout)
790 		cpu_relax();
791 	if (timeout == 0) {
792 		dev_err(dev, "failed to reset rng hw\n");
793 		return -ENODEV;
794 	}
795 
796 	/* start generating */
797 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
798 
799 	return 0;
800 }
801 
802 static int talitos_register_rng(struct device *dev)
803 {
804 	struct talitos_private *priv = dev_get_drvdata(dev);
805 	int err;
806 
807 	priv->rng.name		= dev_driver_string(dev),
808 	priv->rng.init		= talitos_rng_init,
809 	priv->rng.data_present	= talitos_rng_data_present,
810 	priv->rng.data_read	= talitos_rng_data_read,
811 	priv->rng.priv		= (unsigned long)dev;
812 
813 	err = hwrng_register(&priv->rng);
814 	if (!err)
815 		priv->rng_registered = true;
816 
817 	return err;
818 }
819 
820 static void talitos_unregister_rng(struct device *dev)
821 {
822 	struct talitos_private *priv = dev_get_drvdata(dev);
823 
824 	if (!priv->rng_registered)
825 		return;
826 
827 	hwrng_unregister(&priv->rng);
828 	priv->rng_registered = false;
829 }
830 
831 /*
832  * crypto alg
833  */
834 #define TALITOS_CRA_PRIORITY		3000
835 /*
836  * Defines a priority for doing AEAD with descriptors type
837  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
838  */
839 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
840 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
841 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
842 
843 struct talitos_ctx {
844 	struct device *dev;
845 	int ch;
846 	__be32 desc_hdr_template;
847 	u8 key[TALITOS_MAX_KEY_SIZE];
848 	u8 iv[TALITOS_MAX_IV_LENGTH];
849 	dma_addr_t dma_key;
850 	unsigned int keylen;
851 	unsigned int enckeylen;
852 	unsigned int authkeylen;
853 };
854 
855 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
856 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
857 
858 struct talitos_ahash_req_ctx {
859 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
860 	unsigned int hw_context_size;
861 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
862 	int buf_idx;
863 	unsigned int swinit;
864 	unsigned int first;
865 	unsigned int last;
866 	unsigned int to_hash_later;
867 	unsigned int nbuf;
868 	struct scatterlist bufsl[2];
869 	struct scatterlist *psrc;
870 };
871 
872 struct talitos_export_state {
873 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
874 	u8 buf[HASH_MAX_BLOCK_SIZE];
875 	unsigned int swinit;
876 	unsigned int first;
877 	unsigned int last;
878 	unsigned int to_hash_later;
879 	unsigned int nbuf;
880 };
881 
882 static int aead_setkey(struct crypto_aead *authenc,
883 		       const u8 *key, unsigned int keylen)
884 {
885 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
886 	struct device *dev = ctx->dev;
887 	struct crypto_authenc_keys keys;
888 
889 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
890 		goto badkey;
891 
892 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
893 		goto badkey;
894 
895 	if (ctx->keylen)
896 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
897 
898 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
899 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
900 
901 	ctx->keylen = keys.authkeylen + keys.enckeylen;
902 	ctx->enckeylen = keys.enckeylen;
903 	ctx->authkeylen = keys.authkeylen;
904 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
905 				      DMA_TO_DEVICE);
906 
907 	memzero_explicit(&keys, sizeof(keys));
908 	return 0;
909 
910 badkey:
911 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
912 	memzero_explicit(&keys, sizeof(keys));
913 	return -EINVAL;
914 }
915 
916 static int aead_des3_setkey(struct crypto_aead *authenc,
917 			    const u8 *key, unsigned int keylen)
918 {
919 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
920 	struct device *dev = ctx->dev;
921 	struct crypto_authenc_keys keys;
922 	u32 flags;
923 	int err;
924 
925 	err = crypto_authenc_extractkeys(&keys, key, keylen);
926 	if (unlikely(err))
927 		goto badkey;
928 
929 	err = -EINVAL;
930 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
931 		goto badkey;
932 
933 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
934 		goto badkey;
935 
936 	flags = crypto_aead_get_flags(authenc);
937 	err = __des3_verify_key(&flags, keys.enckey);
938 	if (unlikely(err)) {
939 		crypto_aead_set_flags(authenc, flags);
940 		goto out;
941 	}
942 
943 	if (ctx->keylen)
944 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
945 
946 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
947 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
948 
949 	ctx->keylen = keys.authkeylen + keys.enckeylen;
950 	ctx->enckeylen = keys.enckeylen;
951 	ctx->authkeylen = keys.authkeylen;
952 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
953 				      DMA_TO_DEVICE);
954 
955 out:
956 	memzero_explicit(&keys, sizeof(keys));
957 	return err;
958 
959 badkey:
960 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
961 	goto out;
962 }
963 
964 /*
965  * talitos_edesc - s/w-extended descriptor
966  * @src_nents: number of segments in input scatterlist
967  * @dst_nents: number of segments in output scatterlist
968  * @icv_ool: whether ICV is out-of-line
969  * @iv_dma: dma address of iv for checking continuity and link table
970  * @dma_len: length of dma mapped link_tbl space
971  * @dma_link_tbl: bus physical address of link_tbl/buf
972  * @desc: h/w descriptor
973  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
974  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
975  *
976  * if decrypting (with authcheck), or either one of src_nents or dst_nents
977  * is greater than 1, an integrity check value is concatenated to the end
978  * of link_tbl data
979  */
980 struct talitos_edesc {
981 	int src_nents;
982 	int dst_nents;
983 	bool icv_ool;
984 	dma_addr_t iv_dma;
985 	int dma_len;
986 	dma_addr_t dma_link_tbl;
987 	struct talitos_desc desc;
988 	union {
989 		struct talitos_ptr link_tbl[0];
990 		u8 buf[0];
991 	};
992 };
993 
994 static void talitos_sg_unmap(struct device *dev,
995 			     struct talitos_edesc *edesc,
996 			     struct scatterlist *src,
997 			     struct scatterlist *dst,
998 			     unsigned int len, unsigned int offset)
999 {
1000 	struct talitos_private *priv = dev_get_drvdata(dev);
1001 	bool is_sec1 = has_ftr_sec1(priv);
1002 	unsigned int src_nents = edesc->src_nents ? : 1;
1003 	unsigned int dst_nents = edesc->dst_nents ? : 1;
1004 
1005 	if (is_sec1 && dst && dst_nents > 1) {
1006 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
1007 					   len, DMA_FROM_DEVICE);
1008 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
1009 				     offset);
1010 	}
1011 	if (src != dst) {
1012 		if (src_nents == 1 || !is_sec1)
1013 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
1014 
1015 		if (dst && (dst_nents == 1 || !is_sec1))
1016 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
1017 	} else if (src_nents == 1 || !is_sec1) {
1018 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
1019 	}
1020 }
1021 
1022 static void ipsec_esp_unmap(struct device *dev,
1023 			    struct talitos_edesc *edesc,
1024 			    struct aead_request *areq)
1025 {
1026 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1027 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1028 	unsigned int ivsize = crypto_aead_ivsize(aead);
1029 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1030 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1031 
1032 	if (is_ipsec_esp)
1033 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1034 					 DMA_FROM_DEVICE);
1035 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1036 
1037 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
1038 			 areq->assoclen);
1039 
1040 	if (edesc->dma_len)
1041 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1042 				 DMA_BIDIRECTIONAL);
1043 
1044 	if (!is_ipsec_esp) {
1045 		unsigned int dst_nents = edesc->dst_nents ? : 1;
1046 
1047 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1048 				   areq->assoclen + areq->cryptlen - ivsize);
1049 	}
1050 }
1051 
1052 /*
1053  * ipsec_esp descriptor callbacks
1054  */
1055 static void ipsec_esp_encrypt_done(struct device *dev,
1056 				   struct talitos_desc *desc, void *context,
1057 				   int err)
1058 {
1059 	struct talitos_private *priv = dev_get_drvdata(dev);
1060 	bool is_sec1 = has_ftr_sec1(priv);
1061 	struct aead_request *areq = context;
1062 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1063 	unsigned int authsize = crypto_aead_authsize(authenc);
1064 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1065 	struct talitos_edesc *edesc;
1066 	struct scatterlist *sg;
1067 	void *icvdata;
1068 
1069 	edesc = container_of(desc, struct talitos_edesc, desc);
1070 
1071 	ipsec_esp_unmap(dev, edesc, areq);
1072 
1073 	/* copy the generated ICV to dst */
1074 	if (edesc->icv_ool) {
1075 		if (is_sec1)
1076 			icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1077 		else
1078 			icvdata = &edesc->link_tbl[edesc->src_nents +
1079 						   edesc->dst_nents + 2];
1080 		sg = sg_last(areq->dst, edesc->dst_nents);
1081 		memcpy((char *)sg_virt(sg) + sg->length - authsize,
1082 		       icvdata, authsize);
1083 	}
1084 
1085 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1086 
1087 	kfree(edesc);
1088 
1089 	aead_request_complete(areq, err);
1090 }
1091 
1092 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1093 					  struct talitos_desc *desc,
1094 					  void *context, int err)
1095 {
1096 	struct aead_request *req = context;
1097 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1098 	unsigned int authsize = crypto_aead_authsize(authenc);
1099 	struct talitos_edesc *edesc;
1100 	struct scatterlist *sg;
1101 	char *oicv, *icv;
1102 	struct talitos_private *priv = dev_get_drvdata(dev);
1103 	bool is_sec1 = has_ftr_sec1(priv);
1104 
1105 	edesc = container_of(desc, struct talitos_edesc, desc);
1106 
1107 	ipsec_esp_unmap(dev, edesc, req);
1108 
1109 	if (!err) {
1110 		/* auth check */
1111 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1112 		icv = (char *)sg_virt(sg) + sg->length - authsize;
1113 
1114 		if (edesc->dma_len) {
1115 			if (is_sec1)
1116 				oicv = (char *)&edesc->dma_link_tbl +
1117 					       req->assoclen + req->cryptlen;
1118 			else
1119 				oicv = (char *)
1120 				       &edesc->link_tbl[edesc->src_nents +
1121 							edesc->dst_nents + 2];
1122 			if (edesc->icv_ool)
1123 				icv = oicv + authsize;
1124 		} else
1125 			oicv = (char *)&edesc->link_tbl[0];
1126 
1127 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1128 	}
1129 
1130 	kfree(edesc);
1131 
1132 	aead_request_complete(req, err);
1133 }
1134 
1135 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1136 					  struct talitos_desc *desc,
1137 					  void *context, int err)
1138 {
1139 	struct aead_request *req = context;
1140 	struct talitos_edesc *edesc;
1141 
1142 	edesc = container_of(desc, struct talitos_edesc, desc);
1143 
1144 	ipsec_esp_unmap(dev, edesc, req);
1145 
1146 	/* check ICV auth status */
1147 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1148 		     DESC_HDR_LO_ICCR1_PASS))
1149 		err = -EBADMSG;
1150 
1151 	kfree(edesc);
1152 
1153 	aead_request_complete(req, err);
1154 }
1155 
1156 /*
1157  * convert scatterlist to SEC h/w link table format
1158  * stop at cryptlen bytes
1159  */
1160 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1161 				 unsigned int offset, int cryptlen,
1162 				 struct talitos_ptr *link_tbl_ptr)
1163 {
1164 	int n_sg = sg_count;
1165 	int count = 0;
1166 
1167 	while (cryptlen && sg && n_sg--) {
1168 		unsigned int len = sg_dma_len(sg);
1169 
1170 		if (offset >= len) {
1171 			offset -= len;
1172 			goto next;
1173 		}
1174 
1175 		len -= offset;
1176 
1177 		if (len > cryptlen)
1178 			len = cryptlen;
1179 
1180 		to_talitos_ptr(link_tbl_ptr + count,
1181 			       sg_dma_address(sg) + offset, len, 0);
1182 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1183 		count++;
1184 		cryptlen -= len;
1185 		offset = 0;
1186 
1187 next:
1188 		sg = sg_next(sg);
1189 	}
1190 
1191 	/* tag end of link table */
1192 	if (count > 0)
1193 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1194 				       DESC_PTR_LNKTBL_RETURN, 0);
1195 
1196 	return count;
1197 }
1198 
1199 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1200 			      unsigned int len, struct talitos_edesc *edesc,
1201 			      struct talitos_ptr *ptr, int sg_count,
1202 			      unsigned int offset, int tbl_off, int elen)
1203 {
1204 	struct talitos_private *priv = dev_get_drvdata(dev);
1205 	bool is_sec1 = has_ftr_sec1(priv);
1206 
1207 	if (!src) {
1208 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1209 		return 1;
1210 	}
1211 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1212 	if (sg_count == 1) {
1213 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1214 		return sg_count;
1215 	}
1216 	if (is_sec1) {
1217 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1218 		return sg_count;
1219 	}
1220 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1221 					 &edesc->link_tbl[tbl_off]);
1222 	if (sg_count == 1) {
1223 		/* Only one segment now, so no link tbl needed*/
1224 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1225 		return sg_count;
1226 	}
1227 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1228 			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1229 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1230 
1231 	return sg_count;
1232 }
1233 
1234 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1235 			  unsigned int len, struct talitos_edesc *edesc,
1236 			  struct talitos_ptr *ptr, int sg_count,
1237 			  unsigned int offset, int tbl_off)
1238 {
1239 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1240 				  tbl_off, 0);
1241 }
1242 
1243 /*
1244  * fill in and submit ipsec_esp descriptor
1245  */
1246 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1247 		     void (*callback)(struct device *dev,
1248 				      struct talitos_desc *desc,
1249 				      void *context, int error))
1250 {
1251 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1252 	unsigned int authsize = crypto_aead_authsize(aead);
1253 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1254 	struct device *dev = ctx->dev;
1255 	struct talitos_desc *desc = &edesc->desc;
1256 	unsigned int cryptlen = areq->cryptlen;
1257 	unsigned int ivsize = crypto_aead_ivsize(aead);
1258 	int tbl_off = 0;
1259 	int sg_count, ret;
1260 	int elen = 0;
1261 	bool sync_needed = false;
1262 	struct talitos_private *priv = dev_get_drvdata(dev);
1263 	bool is_sec1 = has_ftr_sec1(priv);
1264 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1265 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1266 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1267 
1268 	/* hmac key */
1269 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1270 
1271 	sg_count = edesc->src_nents ?: 1;
1272 	if (is_sec1 && sg_count > 1)
1273 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1274 				  areq->assoclen + cryptlen);
1275 	else
1276 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1277 				      (areq->src == areq->dst) ?
1278 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1279 
1280 	/* hmac data */
1281 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1282 			     &desc->ptr[1], sg_count, 0, tbl_off);
1283 
1284 	if (ret > 1) {
1285 		tbl_off += ret;
1286 		sync_needed = true;
1287 	}
1288 
1289 	/* cipher iv */
1290 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1291 
1292 	/* cipher key */
1293 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1294 		       ctx->enckeylen, is_sec1);
1295 
1296 	/*
1297 	 * cipher in
1298 	 * map and adjust cipher len to aead request cryptlen.
1299 	 * extent is bytes of HMAC postpended to ciphertext,
1300 	 * typically 12 for ipsec
1301 	 */
1302 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1303 		elen = authsize;
1304 
1305 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1306 				 sg_count, areq->assoclen, tbl_off, elen);
1307 
1308 	if (ret > 1) {
1309 		tbl_off += ret;
1310 		sync_needed = true;
1311 	}
1312 
1313 	/* cipher out */
1314 	if (areq->src != areq->dst) {
1315 		sg_count = edesc->dst_nents ? : 1;
1316 		if (!is_sec1 || sg_count == 1)
1317 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1318 	}
1319 
1320 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1321 			     sg_count, areq->assoclen, tbl_off);
1322 
1323 	if (is_ipsec_esp)
1324 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1325 
1326 	/* ICV data */
1327 	if (ret > 1) {
1328 		tbl_off += ret;
1329 		edesc->icv_ool = true;
1330 		sync_needed = true;
1331 
1332 		if (is_ipsec_esp) {
1333 			struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1334 			int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1335 				     sizeof(struct talitos_ptr) + authsize;
1336 
1337 			/* Add an entry to the link table for ICV data */
1338 			to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1339 			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1340 					       is_sec1);
1341 
1342 			/* icv data follows link tables */
1343 			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1344 				       authsize, is_sec1);
1345 		} else {
1346 			dma_addr_t addr = edesc->dma_link_tbl;
1347 
1348 			if (is_sec1)
1349 				addr += areq->assoclen + cryptlen;
1350 			else
1351 				addr += sizeof(struct talitos_ptr) * tbl_off;
1352 
1353 			to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1354 		}
1355 	} else if (!is_ipsec_esp) {
1356 		ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1357 				     &desc->ptr[6], sg_count, areq->assoclen +
1358 							      cryptlen,
1359 				     tbl_off);
1360 		if (ret > 1) {
1361 			tbl_off += ret;
1362 			edesc->icv_ool = true;
1363 			sync_needed = true;
1364 		} else {
1365 			edesc->icv_ool = false;
1366 		}
1367 	} else {
1368 		edesc->icv_ool = false;
1369 	}
1370 
1371 	/* iv out */
1372 	if (is_ipsec_esp)
1373 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1374 				       DMA_FROM_DEVICE);
1375 
1376 	if (sync_needed)
1377 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1378 					   edesc->dma_len,
1379 					   DMA_BIDIRECTIONAL);
1380 
1381 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1382 	if (ret != -EINPROGRESS) {
1383 		ipsec_esp_unmap(dev, edesc, areq);
1384 		kfree(edesc);
1385 	}
1386 	return ret;
1387 }
1388 
1389 /*
1390  * allocate and map the extended descriptor
1391  */
1392 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1393 						 struct scatterlist *src,
1394 						 struct scatterlist *dst,
1395 						 u8 *iv,
1396 						 unsigned int assoclen,
1397 						 unsigned int cryptlen,
1398 						 unsigned int authsize,
1399 						 unsigned int ivsize,
1400 						 int icv_stashing,
1401 						 u32 cryptoflags,
1402 						 bool encrypt)
1403 {
1404 	struct talitos_edesc *edesc;
1405 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1406 	dma_addr_t iv_dma = 0;
1407 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1408 		      GFP_ATOMIC;
1409 	struct talitos_private *priv = dev_get_drvdata(dev);
1410 	bool is_sec1 = has_ftr_sec1(priv);
1411 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1412 
1413 	if (cryptlen + authsize > max_len) {
1414 		dev_err(dev, "length exceeds h/w max limit\n");
1415 		return ERR_PTR(-EINVAL);
1416 	}
1417 
1418 	if (!dst || dst == src) {
1419 		src_len = assoclen + cryptlen + authsize;
1420 		src_nents = sg_nents_for_len(src, src_len);
1421 		if (src_nents < 0) {
1422 			dev_err(dev, "Invalid number of src SG.\n");
1423 			return ERR_PTR(-EINVAL);
1424 		}
1425 		src_nents = (src_nents == 1) ? 0 : src_nents;
1426 		dst_nents = dst ? src_nents : 0;
1427 		dst_len = 0;
1428 	} else { /* dst && dst != src*/
1429 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1430 		src_nents = sg_nents_for_len(src, src_len);
1431 		if (src_nents < 0) {
1432 			dev_err(dev, "Invalid number of src SG.\n");
1433 			return ERR_PTR(-EINVAL);
1434 		}
1435 		src_nents = (src_nents == 1) ? 0 : src_nents;
1436 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1437 		dst_nents = sg_nents_for_len(dst, dst_len);
1438 		if (dst_nents < 0) {
1439 			dev_err(dev, "Invalid number of dst SG.\n");
1440 			return ERR_PTR(-EINVAL);
1441 		}
1442 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1443 	}
1444 
1445 	/*
1446 	 * allocate space for base edesc plus the link tables,
1447 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1448 	 * and space for two sets of ICVs (stashed and generated)
1449 	 */
1450 	alloc_len = sizeof(struct talitos_edesc);
1451 	if (src_nents || dst_nents) {
1452 		if (is_sec1)
1453 			dma_len = (src_nents ? src_len : 0) +
1454 				  (dst_nents ? dst_len : 0);
1455 		else
1456 			dma_len = (src_nents + dst_nents + 2) *
1457 				  sizeof(struct talitos_ptr) + authsize * 2;
1458 		alloc_len += dma_len;
1459 	} else {
1460 		dma_len = 0;
1461 		alloc_len += icv_stashing ? authsize : 0;
1462 	}
1463 
1464 	/* if its a ahash, add space for a second desc next to the first one */
1465 	if (is_sec1 && !dst)
1466 		alloc_len += sizeof(struct talitos_desc);
1467 	alloc_len += ivsize;
1468 
1469 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1470 	if (!edesc)
1471 		return ERR_PTR(-ENOMEM);
1472 	if (ivsize) {
1473 		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1474 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1475 	}
1476 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1477 
1478 	edesc->src_nents = src_nents;
1479 	edesc->dst_nents = dst_nents;
1480 	edesc->iv_dma = iv_dma;
1481 	edesc->dma_len = dma_len;
1482 	if (dma_len) {
1483 		void *addr = &edesc->link_tbl[0];
1484 
1485 		if (is_sec1 && !dst)
1486 			addr += sizeof(struct talitos_desc);
1487 		edesc->dma_link_tbl = dma_map_single(dev, addr,
1488 						     edesc->dma_len,
1489 						     DMA_BIDIRECTIONAL);
1490 	}
1491 	return edesc;
1492 }
1493 
1494 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1495 					      int icv_stashing, bool encrypt)
1496 {
1497 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1498 	unsigned int authsize = crypto_aead_authsize(authenc);
1499 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1500 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1501 
1502 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1503 				   iv, areq->assoclen, areq->cryptlen,
1504 				   authsize, ivsize, icv_stashing,
1505 				   areq->base.flags, encrypt);
1506 }
1507 
1508 static int aead_encrypt(struct aead_request *req)
1509 {
1510 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1511 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1512 	struct talitos_edesc *edesc;
1513 
1514 	/* allocate extended descriptor */
1515 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1516 	if (IS_ERR(edesc))
1517 		return PTR_ERR(edesc);
1518 
1519 	/* set encrypt */
1520 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1521 
1522 	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1523 }
1524 
1525 static int aead_decrypt(struct aead_request *req)
1526 {
1527 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1528 	unsigned int authsize = crypto_aead_authsize(authenc);
1529 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1530 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1531 	struct talitos_edesc *edesc;
1532 	struct scatterlist *sg;
1533 	void *icvdata;
1534 
1535 	req->cryptlen -= authsize;
1536 
1537 	/* allocate extended descriptor */
1538 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1539 	if (IS_ERR(edesc))
1540 		return PTR_ERR(edesc);
1541 
1542 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1543 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1544 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1545 
1546 		/* decrypt and check the ICV */
1547 		edesc->desc.hdr = ctx->desc_hdr_template |
1548 				  DESC_HDR_DIR_INBOUND |
1549 				  DESC_HDR_MODE1_MDEU_CICV;
1550 
1551 		/* reset integrity check result bits */
1552 
1553 		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1554 	}
1555 
1556 	/* Have to check the ICV with software */
1557 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1558 
1559 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1560 	if (edesc->dma_len)
1561 		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1562 						   edesc->dst_nents + 2];
1563 	else
1564 		icvdata = &edesc->link_tbl[0];
1565 
1566 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1567 
1568 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1569 
1570 	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1571 }
1572 
1573 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1574 			     const u8 *key, unsigned int keylen)
1575 {
1576 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1577 	struct device *dev = ctx->dev;
1578 
1579 	if (ctx->keylen)
1580 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1581 
1582 	memcpy(&ctx->key, key, keylen);
1583 	ctx->keylen = keylen;
1584 
1585 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1586 
1587 	return 0;
1588 }
1589 
1590 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1591 				 const u8 *key, unsigned int keylen)
1592 {
1593 	u32 tmp[DES_EXPKEY_WORDS];
1594 
1595 	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1596 		     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1597 	    !des_ekey(tmp, key)) {
1598 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1599 		return -EINVAL;
1600 	}
1601 
1602 	return ablkcipher_setkey(cipher, key, keylen);
1603 }
1604 
1605 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1606 				  const u8 *key, unsigned int keylen)
1607 {
1608 	u32 flags;
1609 	int err;
1610 
1611 	flags = crypto_ablkcipher_get_flags(cipher);
1612 	err = __des3_verify_key(&flags, key);
1613 	if (unlikely(err)) {
1614 		crypto_ablkcipher_set_flags(cipher, flags);
1615 		return err;
1616 	}
1617 
1618 	return ablkcipher_setkey(cipher, key, keylen);
1619 }
1620 
1621 static void common_nonsnoop_unmap(struct device *dev,
1622 				  struct talitos_edesc *edesc,
1623 				  struct ablkcipher_request *areq)
1624 {
1625 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1626 
1627 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1628 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1629 
1630 	if (edesc->dma_len)
1631 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1632 				 DMA_BIDIRECTIONAL);
1633 }
1634 
1635 static void ablkcipher_done(struct device *dev,
1636 			    struct talitos_desc *desc, void *context,
1637 			    int err)
1638 {
1639 	struct ablkcipher_request *areq = context;
1640 	struct talitos_edesc *edesc;
1641 
1642 	edesc = container_of(desc, struct talitos_edesc, desc);
1643 
1644 	common_nonsnoop_unmap(dev, edesc, areq);
1645 
1646 	kfree(edesc);
1647 
1648 	areq->base.complete(&areq->base, err);
1649 }
1650 
1651 static int common_nonsnoop(struct talitos_edesc *edesc,
1652 			   struct ablkcipher_request *areq,
1653 			   void (*callback) (struct device *dev,
1654 					     struct talitos_desc *desc,
1655 					     void *context, int error))
1656 {
1657 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1658 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1659 	struct device *dev = ctx->dev;
1660 	struct talitos_desc *desc = &edesc->desc;
1661 	unsigned int cryptlen = areq->nbytes;
1662 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1663 	int sg_count, ret;
1664 	bool sync_needed = false;
1665 	struct talitos_private *priv = dev_get_drvdata(dev);
1666 	bool is_sec1 = has_ftr_sec1(priv);
1667 
1668 	/* first DWORD empty */
1669 
1670 	/* cipher iv */
1671 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1672 
1673 	/* cipher key */
1674 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1675 
1676 	sg_count = edesc->src_nents ?: 1;
1677 	if (is_sec1 && sg_count > 1)
1678 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1679 				  cryptlen);
1680 	else
1681 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1682 				      (areq->src == areq->dst) ?
1683 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1684 	/*
1685 	 * cipher in
1686 	 */
1687 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1688 				  &desc->ptr[3], sg_count, 0, 0);
1689 	if (sg_count > 1)
1690 		sync_needed = true;
1691 
1692 	/* cipher out */
1693 	if (areq->src != areq->dst) {
1694 		sg_count = edesc->dst_nents ? : 1;
1695 		if (!is_sec1 || sg_count == 1)
1696 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1697 	}
1698 
1699 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1700 			     sg_count, 0, (edesc->src_nents + 1));
1701 	if (ret > 1)
1702 		sync_needed = true;
1703 
1704 	/* iv out */
1705 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1706 			       DMA_FROM_DEVICE);
1707 
1708 	/* last DWORD empty */
1709 
1710 	if (sync_needed)
1711 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1712 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1713 
1714 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1715 	if (ret != -EINPROGRESS) {
1716 		common_nonsnoop_unmap(dev, edesc, areq);
1717 		kfree(edesc);
1718 	}
1719 	return ret;
1720 }
1721 
1722 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1723 						    areq, bool encrypt)
1724 {
1725 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1726 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1727 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1728 
1729 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1730 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1731 				   areq->base.flags, encrypt);
1732 }
1733 
1734 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1735 {
1736 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1737 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1738 	struct talitos_edesc *edesc;
1739 
1740 	/* allocate extended descriptor */
1741 	edesc = ablkcipher_edesc_alloc(areq, true);
1742 	if (IS_ERR(edesc))
1743 		return PTR_ERR(edesc);
1744 
1745 	/* set encrypt */
1746 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1747 
1748 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1749 }
1750 
1751 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1752 {
1753 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1754 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1755 	struct talitos_edesc *edesc;
1756 
1757 	/* allocate extended descriptor */
1758 	edesc = ablkcipher_edesc_alloc(areq, false);
1759 	if (IS_ERR(edesc))
1760 		return PTR_ERR(edesc);
1761 
1762 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1763 
1764 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1765 }
1766 
1767 static void common_nonsnoop_hash_unmap(struct device *dev,
1768 				       struct talitos_edesc *edesc,
1769 				       struct ahash_request *areq)
1770 {
1771 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1772 	struct talitos_private *priv = dev_get_drvdata(dev);
1773 	bool is_sec1 = has_ftr_sec1(priv);
1774 	struct talitos_desc *desc = &edesc->desc;
1775 	struct talitos_desc *desc2 = desc + 1;
1776 
1777 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1778 	if (desc->next_desc &&
1779 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1780 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1781 
1782 	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1783 
1784 	/* When using hashctx-in, must unmap it. */
1785 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1786 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1787 					 DMA_TO_DEVICE);
1788 	else if (desc->next_desc)
1789 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1790 					 DMA_TO_DEVICE);
1791 
1792 	if (is_sec1 && req_ctx->nbuf)
1793 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1794 					 DMA_TO_DEVICE);
1795 
1796 	if (edesc->dma_len)
1797 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1798 				 DMA_BIDIRECTIONAL);
1799 
1800 	if (edesc->desc.next_desc)
1801 		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1802 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1803 }
1804 
1805 static void ahash_done(struct device *dev,
1806 		       struct talitos_desc *desc, void *context,
1807 		       int err)
1808 {
1809 	struct ahash_request *areq = context;
1810 	struct talitos_edesc *edesc =
1811 		 container_of(desc, struct talitos_edesc, desc);
1812 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1813 
1814 	if (!req_ctx->last && req_ctx->to_hash_later) {
1815 		/* Position any partial block for next update/final/finup */
1816 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1817 		req_ctx->nbuf = req_ctx->to_hash_later;
1818 	}
1819 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1820 
1821 	kfree(edesc);
1822 
1823 	areq->base.complete(&areq->base, err);
1824 }
1825 
1826 /*
1827  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1828  * ourself and submit a padded block
1829  */
1830 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1831 			       struct talitos_edesc *edesc,
1832 			       struct talitos_ptr *ptr)
1833 {
1834 	static u8 padded_hash[64] = {
1835 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1836 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1837 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1838 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1839 	};
1840 
1841 	pr_err_once("Bug in SEC1, padding ourself\n");
1842 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1843 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1844 			       (char *)padded_hash, DMA_TO_DEVICE);
1845 }
1846 
1847 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1848 				struct ahash_request *areq, unsigned int length,
1849 				unsigned int offset,
1850 				void (*callback) (struct device *dev,
1851 						  struct talitos_desc *desc,
1852 						  void *context, int error))
1853 {
1854 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1855 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1856 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1857 	struct device *dev = ctx->dev;
1858 	struct talitos_desc *desc = &edesc->desc;
1859 	int ret;
1860 	bool sync_needed = false;
1861 	struct talitos_private *priv = dev_get_drvdata(dev);
1862 	bool is_sec1 = has_ftr_sec1(priv);
1863 	int sg_count;
1864 
1865 	/* first DWORD empty */
1866 
1867 	/* hash context in */
1868 	if (!req_ctx->first || req_ctx->swinit) {
1869 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1870 					      req_ctx->hw_context_size,
1871 					      req_ctx->hw_context,
1872 					      DMA_TO_DEVICE);
1873 		req_ctx->swinit = 0;
1874 	}
1875 	/* Indicate next op is not the first. */
1876 	req_ctx->first = 0;
1877 
1878 	/* HMAC key */
1879 	if (ctx->keylen)
1880 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1881 			       is_sec1);
1882 
1883 	if (is_sec1 && req_ctx->nbuf)
1884 		length -= req_ctx->nbuf;
1885 
1886 	sg_count = edesc->src_nents ?: 1;
1887 	if (is_sec1 && sg_count > 1)
1888 		sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1889 				   edesc->buf + sizeof(struct talitos_desc),
1890 				   length, req_ctx->nbuf);
1891 	else if (length)
1892 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1893 				      DMA_TO_DEVICE);
1894 	/*
1895 	 * data in
1896 	 */
1897 	if (is_sec1 && req_ctx->nbuf) {
1898 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1899 				       req_ctx->buf[req_ctx->buf_idx],
1900 				       DMA_TO_DEVICE);
1901 	} else {
1902 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1903 					  &desc->ptr[3], sg_count, offset, 0);
1904 		if (sg_count > 1)
1905 			sync_needed = true;
1906 	}
1907 
1908 	/* fifth DWORD empty */
1909 
1910 	/* hash/HMAC out -or- hash context out */
1911 	if (req_ctx->last)
1912 		map_single_talitos_ptr(dev, &desc->ptr[5],
1913 				       crypto_ahash_digestsize(tfm),
1914 				       areq->result, DMA_FROM_DEVICE);
1915 	else
1916 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1917 					      req_ctx->hw_context_size,
1918 					      req_ctx->hw_context,
1919 					      DMA_FROM_DEVICE);
1920 
1921 	/* last DWORD empty */
1922 
1923 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1924 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1925 
1926 	if (is_sec1 && req_ctx->nbuf && length) {
1927 		struct talitos_desc *desc2 = desc + 1;
1928 		dma_addr_t next_desc;
1929 
1930 		memset(desc2, 0, sizeof(*desc2));
1931 		desc2->hdr = desc->hdr;
1932 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1933 		desc2->hdr1 = desc2->hdr;
1934 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1935 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1936 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1937 
1938 		if (desc->ptr[1].ptr)
1939 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1940 					 is_sec1);
1941 		else
1942 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1943 						      req_ctx->hw_context_size,
1944 						      req_ctx->hw_context,
1945 						      DMA_TO_DEVICE);
1946 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1947 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1948 					  &desc2->ptr[3], sg_count, offset, 0);
1949 		if (sg_count > 1)
1950 			sync_needed = true;
1951 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1952 		if (req_ctx->last)
1953 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1954 						      req_ctx->hw_context_size,
1955 						      req_ctx->hw_context,
1956 						      DMA_FROM_DEVICE);
1957 
1958 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1959 					   DMA_BIDIRECTIONAL);
1960 		desc->next_desc = cpu_to_be32(next_desc);
1961 	}
1962 
1963 	if (sync_needed)
1964 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1965 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1966 
1967 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1968 	if (ret != -EINPROGRESS) {
1969 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1970 		kfree(edesc);
1971 	}
1972 	return ret;
1973 }
1974 
1975 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1976 					       unsigned int nbytes)
1977 {
1978 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1979 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1980 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1981 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1982 	bool is_sec1 = has_ftr_sec1(priv);
1983 
1984 	if (is_sec1)
1985 		nbytes -= req_ctx->nbuf;
1986 
1987 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1988 				   nbytes, 0, 0, 0, areq->base.flags, false);
1989 }
1990 
1991 static int ahash_init(struct ahash_request *areq)
1992 {
1993 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1994 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1995 	struct device *dev = ctx->dev;
1996 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1997 	unsigned int size;
1998 	dma_addr_t dma;
1999 
2000 	/* Initialize the context */
2001 	req_ctx->buf_idx = 0;
2002 	req_ctx->nbuf = 0;
2003 	req_ctx->first = 1; /* first indicates h/w must init its context */
2004 	req_ctx->swinit = 0; /* assume h/w init of context */
2005 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2006 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2007 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2008 	req_ctx->hw_context_size = size;
2009 
2010 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2011 			     DMA_TO_DEVICE);
2012 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2013 
2014 	return 0;
2015 }
2016 
2017 /*
2018  * on h/w without explicit sha224 support, we initialize h/w context
2019  * manually with sha224 constants, and tell it to run sha256.
2020  */
2021 static int ahash_init_sha224_swinit(struct ahash_request *areq)
2022 {
2023 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2024 
2025 	req_ctx->hw_context[0] = SHA224_H0;
2026 	req_ctx->hw_context[1] = SHA224_H1;
2027 	req_ctx->hw_context[2] = SHA224_H2;
2028 	req_ctx->hw_context[3] = SHA224_H3;
2029 	req_ctx->hw_context[4] = SHA224_H4;
2030 	req_ctx->hw_context[5] = SHA224_H5;
2031 	req_ctx->hw_context[6] = SHA224_H6;
2032 	req_ctx->hw_context[7] = SHA224_H7;
2033 
2034 	/* init 64-bit count */
2035 	req_ctx->hw_context[8] = 0;
2036 	req_ctx->hw_context[9] = 0;
2037 
2038 	ahash_init(areq);
2039 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2040 
2041 	return 0;
2042 }
2043 
2044 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2045 {
2046 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2047 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2048 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2049 	struct talitos_edesc *edesc;
2050 	unsigned int blocksize =
2051 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2052 	unsigned int nbytes_to_hash;
2053 	unsigned int to_hash_later;
2054 	unsigned int nsg;
2055 	int nents;
2056 	struct device *dev = ctx->dev;
2057 	struct talitos_private *priv = dev_get_drvdata(dev);
2058 	bool is_sec1 = has_ftr_sec1(priv);
2059 	int offset = 0;
2060 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2061 
2062 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2063 		/* Buffer up to one whole block */
2064 		nents = sg_nents_for_len(areq->src, nbytes);
2065 		if (nents < 0) {
2066 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2067 			return nents;
2068 		}
2069 		sg_copy_to_buffer(areq->src, nents,
2070 				  ctx_buf + req_ctx->nbuf, nbytes);
2071 		req_ctx->nbuf += nbytes;
2072 		return 0;
2073 	}
2074 
2075 	/* At least (blocksize + 1) bytes are available to hash */
2076 	nbytes_to_hash = nbytes + req_ctx->nbuf;
2077 	to_hash_later = nbytes_to_hash & (blocksize - 1);
2078 
2079 	if (req_ctx->last)
2080 		to_hash_later = 0;
2081 	else if (to_hash_later)
2082 		/* There is a partial block. Hash the full block(s) now */
2083 		nbytes_to_hash -= to_hash_later;
2084 	else {
2085 		/* Keep one block buffered */
2086 		nbytes_to_hash -= blocksize;
2087 		to_hash_later = blocksize;
2088 	}
2089 
2090 	/* Chain in any previously buffered data */
2091 	if (!is_sec1 && req_ctx->nbuf) {
2092 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2093 		sg_init_table(req_ctx->bufsl, nsg);
2094 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2095 		if (nsg > 1)
2096 			sg_chain(req_ctx->bufsl, 2, areq->src);
2097 		req_ctx->psrc = req_ctx->bufsl;
2098 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2099 		if (nbytes_to_hash > blocksize)
2100 			offset = blocksize - req_ctx->nbuf;
2101 		else
2102 			offset = nbytes_to_hash - req_ctx->nbuf;
2103 		nents = sg_nents_for_len(areq->src, offset);
2104 		if (nents < 0) {
2105 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2106 			return nents;
2107 		}
2108 		sg_copy_to_buffer(areq->src, nents,
2109 				  ctx_buf + req_ctx->nbuf, offset);
2110 		req_ctx->nbuf += offset;
2111 		req_ctx->psrc = areq->src;
2112 	} else
2113 		req_ctx->psrc = areq->src;
2114 
2115 	if (to_hash_later) {
2116 		nents = sg_nents_for_len(areq->src, nbytes);
2117 		if (nents < 0) {
2118 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2119 			return nents;
2120 		}
2121 		sg_pcopy_to_buffer(areq->src, nents,
2122 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2123 				      to_hash_later,
2124 				      nbytes - to_hash_later);
2125 	}
2126 	req_ctx->to_hash_later = to_hash_later;
2127 
2128 	/* Allocate extended descriptor */
2129 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2130 	if (IS_ERR(edesc))
2131 		return PTR_ERR(edesc);
2132 
2133 	edesc->desc.hdr = ctx->desc_hdr_template;
2134 
2135 	/* On last one, request SEC to pad; otherwise continue */
2136 	if (req_ctx->last)
2137 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2138 	else
2139 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2140 
2141 	/* request SEC to INIT hash. */
2142 	if (req_ctx->first && !req_ctx->swinit)
2143 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2144 
2145 	/* When the tfm context has a keylen, it's an HMAC.
2146 	 * A first or last (ie. not middle) descriptor must request HMAC.
2147 	 */
2148 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2149 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2150 
2151 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2152 				    ahash_done);
2153 }
2154 
2155 static int ahash_update(struct ahash_request *areq)
2156 {
2157 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2158 
2159 	req_ctx->last = 0;
2160 
2161 	return ahash_process_req(areq, areq->nbytes);
2162 }
2163 
2164 static int ahash_final(struct ahash_request *areq)
2165 {
2166 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2167 
2168 	req_ctx->last = 1;
2169 
2170 	return ahash_process_req(areq, 0);
2171 }
2172 
2173 static int ahash_finup(struct ahash_request *areq)
2174 {
2175 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2176 
2177 	req_ctx->last = 1;
2178 
2179 	return ahash_process_req(areq, areq->nbytes);
2180 }
2181 
2182 static int ahash_digest(struct ahash_request *areq)
2183 {
2184 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2185 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2186 
2187 	ahash->init(areq);
2188 	req_ctx->last = 1;
2189 
2190 	return ahash_process_req(areq, areq->nbytes);
2191 }
2192 
2193 static int ahash_export(struct ahash_request *areq, void *out)
2194 {
2195 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2196 	struct talitos_export_state *export = out;
2197 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2198 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2199 	struct device *dev = ctx->dev;
2200 	dma_addr_t dma;
2201 
2202 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2203 			     DMA_FROM_DEVICE);
2204 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2205 
2206 	memcpy(export->hw_context, req_ctx->hw_context,
2207 	       req_ctx->hw_context_size);
2208 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2209 	export->swinit = req_ctx->swinit;
2210 	export->first = req_ctx->first;
2211 	export->last = req_ctx->last;
2212 	export->to_hash_later = req_ctx->to_hash_later;
2213 	export->nbuf = req_ctx->nbuf;
2214 
2215 	return 0;
2216 }
2217 
2218 static int ahash_import(struct ahash_request *areq, const void *in)
2219 {
2220 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2221 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2222 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2223 	struct device *dev = ctx->dev;
2224 	const struct talitos_export_state *export = in;
2225 	unsigned int size;
2226 	dma_addr_t dma;
2227 
2228 	memset(req_ctx, 0, sizeof(*req_ctx));
2229 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2230 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2231 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2232 	req_ctx->hw_context_size = size;
2233 	memcpy(req_ctx->hw_context, export->hw_context, size);
2234 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2235 	req_ctx->swinit = export->swinit;
2236 	req_ctx->first = export->first;
2237 	req_ctx->last = export->last;
2238 	req_ctx->to_hash_later = export->to_hash_later;
2239 	req_ctx->nbuf = export->nbuf;
2240 
2241 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2242 			     DMA_TO_DEVICE);
2243 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2244 
2245 	return 0;
2246 }
2247 
2248 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2249 		   u8 *hash)
2250 {
2251 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2252 
2253 	struct scatterlist sg[1];
2254 	struct ahash_request *req;
2255 	struct crypto_wait wait;
2256 	int ret;
2257 
2258 	crypto_init_wait(&wait);
2259 
2260 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2261 	if (!req)
2262 		return -ENOMEM;
2263 
2264 	/* Keep tfm keylen == 0 during hash of the long key */
2265 	ctx->keylen = 0;
2266 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2267 				   crypto_req_done, &wait);
2268 
2269 	sg_init_one(&sg[0], key, keylen);
2270 
2271 	ahash_request_set_crypt(req, sg, hash, keylen);
2272 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2273 
2274 	ahash_request_free(req);
2275 
2276 	return ret;
2277 }
2278 
2279 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2280 			unsigned int keylen)
2281 {
2282 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2283 	struct device *dev = ctx->dev;
2284 	unsigned int blocksize =
2285 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2286 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2287 	unsigned int keysize = keylen;
2288 	u8 hash[SHA512_DIGEST_SIZE];
2289 	int ret;
2290 
2291 	if (keylen <= blocksize)
2292 		memcpy(ctx->key, key, keysize);
2293 	else {
2294 		/* Must get the hash of the long key */
2295 		ret = keyhash(tfm, key, keylen, hash);
2296 
2297 		if (ret) {
2298 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2299 			return -EINVAL;
2300 		}
2301 
2302 		keysize = digestsize;
2303 		memcpy(ctx->key, hash, digestsize);
2304 	}
2305 
2306 	if (ctx->keylen)
2307 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2308 
2309 	ctx->keylen = keysize;
2310 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2311 
2312 	return 0;
2313 }
2314 
2315 
2316 struct talitos_alg_template {
2317 	u32 type;
2318 	u32 priority;
2319 	union {
2320 		struct crypto_alg crypto;
2321 		struct ahash_alg hash;
2322 		struct aead_alg aead;
2323 	} alg;
2324 	__be32 desc_hdr_template;
2325 };
2326 
2327 static struct talitos_alg_template driver_algs[] = {
2328 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2329 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2330 		.alg.aead = {
2331 			.base = {
2332 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2333 				.cra_driver_name = "authenc-hmac-sha1-"
2334 						   "cbc-aes-talitos",
2335 				.cra_blocksize = AES_BLOCK_SIZE,
2336 				.cra_flags = CRYPTO_ALG_ASYNC,
2337 			},
2338 			.ivsize = AES_BLOCK_SIZE,
2339 			.maxauthsize = SHA1_DIGEST_SIZE,
2340 		},
2341 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2342 			             DESC_HDR_SEL0_AESU |
2343 		                     DESC_HDR_MODE0_AESU_CBC |
2344 		                     DESC_HDR_SEL1_MDEUA |
2345 		                     DESC_HDR_MODE1_MDEU_INIT |
2346 		                     DESC_HDR_MODE1_MDEU_PAD |
2347 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2348 	},
2349 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2350 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2351 		.alg.aead = {
2352 			.base = {
2353 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2354 				.cra_driver_name = "authenc-hmac-sha1-"
2355 						   "cbc-aes-talitos",
2356 				.cra_blocksize = AES_BLOCK_SIZE,
2357 				.cra_flags = CRYPTO_ALG_ASYNC,
2358 			},
2359 			.ivsize = AES_BLOCK_SIZE,
2360 			.maxauthsize = SHA1_DIGEST_SIZE,
2361 		},
2362 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2363 				     DESC_HDR_SEL0_AESU |
2364 				     DESC_HDR_MODE0_AESU_CBC |
2365 				     DESC_HDR_SEL1_MDEUA |
2366 				     DESC_HDR_MODE1_MDEU_INIT |
2367 				     DESC_HDR_MODE1_MDEU_PAD |
2368 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2369 	},
2370 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2371 		.alg.aead = {
2372 			.base = {
2373 				.cra_name = "authenc(hmac(sha1),"
2374 					    "cbc(des3_ede))",
2375 				.cra_driver_name = "authenc-hmac-sha1-"
2376 						   "cbc-3des-talitos",
2377 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2378 				.cra_flags = CRYPTO_ALG_ASYNC,
2379 			},
2380 			.ivsize = DES3_EDE_BLOCK_SIZE,
2381 			.maxauthsize = SHA1_DIGEST_SIZE,
2382 			.setkey = aead_des3_setkey,
2383 		},
2384 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2385 			             DESC_HDR_SEL0_DEU |
2386 		                     DESC_HDR_MODE0_DEU_CBC |
2387 		                     DESC_HDR_MODE0_DEU_3DES |
2388 		                     DESC_HDR_SEL1_MDEUA |
2389 		                     DESC_HDR_MODE1_MDEU_INIT |
2390 		                     DESC_HDR_MODE1_MDEU_PAD |
2391 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2392 	},
2393 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2394 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2395 		.alg.aead = {
2396 			.base = {
2397 				.cra_name = "authenc(hmac(sha1),"
2398 					    "cbc(des3_ede))",
2399 				.cra_driver_name = "authenc-hmac-sha1-"
2400 						   "cbc-3des-talitos",
2401 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2402 				.cra_flags = CRYPTO_ALG_ASYNC,
2403 			},
2404 			.ivsize = DES3_EDE_BLOCK_SIZE,
2405 			.maxauthsize = SHA1_DIGEST_SIZE,
2406 			.setkey = aead_des3_setkey,
2407 		},
2408 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2409 				     DESC_HDR_SEL0_DEU |
2410 				     DESC_HDR_MODE0_DEU_CBC |
2411 				     DESC_HDR_MODE0_DEU_3DES |
2412 				     DESC_HDR_SEL1_MDEUA |
2413 				     DESC_HDR_MODE1_MDEU_INIT |
2414 				     DESC_HDR_MODE1_MDEU_PAD |
2415 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2416 	},
2417 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2418 		.alg.aead = {
2419 			.base = {
2420 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2421 				.cra_driver_name = "authenc-hmac-sha224-"
2422 						   "cbc-aes-talitos",
2423 				.cra_blocksize = AES_BLOCK_SIZE,
2424 				.cra_flags = CRYPTO_ALG_ASYNC,
2425 			},
2426 			.ivsize = AES_BLOCK_SIZE,
2427 			.maxauthsize = SHA224_DIGEST_SIZE,
2428 		},
2429 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2430 				     DESC_HDR_SEL0_AESU |
2431 				     DESC_HDR_MODE0_AESU_CBC |
2432 				     DESC_HDR_SEL1_MDEUA |
2433 				     DESC_HDR_MODE1_MDEU_INIT |
2434 				     DESC_HDR_MODE1_MDEU_PAD |
2435 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2436 	},
2437 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2438 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2439 		.alg.aead = {
2440 			.base = {
2441 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2442 				.cra_driver_name = "authenc-hmac-sha224-"
2443 						   "cbc-aes-talitos",
2444 				.cra_blocksize = AES_BLOCK_SIZE,
2445 				.cra_flags = CRYPTO_ALG_ASYNC,
2446 			},
2447 			.ivsize = AES_BLOCK_SIZE,
2448 			.maxauthsize = SHA224_DIGEST_SIZE,
2449 		},
2450 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2451 				     DESC_HDR_SEL0_AESU |
2452 				     DESC_HDR_MODE0_AESU_CBC |
2453 				     DESC_HDR_SEL1_MDEUA |
2454 				     DESC_HDR_MODE1_MDEU_INIT |
2455 				     DESC_HDR_MODE1_MDEU_PAD |
2456 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2457 	},
2458 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2459 		.alg.aead = {
2460 			.base = {
2461 				.cra_name = "authenc(hmac(sha224),"
2462 					    "cbc(des3_ede))",
2463 				.cra_driver_name = "authenc-hmac-sha224-"
2464 						   "cbc-3des-talitos",
2465 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2466 				.cra_flags = CRYPTO_ALG_ASYNC,
2467 			},
2468 			.ivsize = DES3_EDE_BLOCK_SIZE,
2469 			.maxauthsize = SHA224_DIGEST_SIZE,
2470 			.setkey = aead_des3_setkey,
2471 		},
2472 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2473 			             DESC_HDR_SEL0_DEU |
2474 		                     DESC_HDR_MODE0_DEU_CBC |
2475 		                     DESC_HDR_MODE0_DEU_3DES |
2476 		                     DESC_HDR_SEL1_MDEUA |
2477 		                     DESC_HDR_MODE1_MDEU_INIT |
2478 		                     DESC_HDR_MODE1_MDEU_PAD |
2479 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2480 	},
2481 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2482 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2483 		.alg.aead = {
2484 			.base = {
2485 				.cra_name = "authenc(hmac(sha224),"
2486 					    "cbc(des3_ede))",
2487 				.cra_driver_name = "authenc-hmac-sha224-"
2488 						   "cbc-3des-talitos",
2489 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2490 				.cra_flags = CRYPTO_ALG_ASYNC,
2491 			},
2492 			.ivsize = DES3_EDE_BLOCK_SIZE,
2493 			.maxauthsize = SHA224_DIGEST_SIZE,
2494 			.setkey = aead_des3_setkey,
2495 		},
2496 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2497 				     DESC_HDR_SEL0_DEU |
2498 				     DESC_HDR_MODE0_DEU_CBC |
2499 				     DESC_HDR_MODE0_DEU_3DES |
2500 				     DESC_HDR_SEL1_MDEUA |
2501 				     DESC_HDR_MODE1_MDEU_INIT |
2502 				     DESC_HDR_MODE1_MDEU_PAD |
2503 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2504 	},
2505 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2506 		.alg.aead = {
2507 			.base = {
2508 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2509 				.cra_driver_name = "authenc-hmac-sha256-"
2510 						   "cbc-aes-talitos",
2511 				.cra_blocksize = AES_BLOCK_SIZE,
2512 				.cra_flags = CRYPTO_ALG_ASYNC,
2513 			},
2514 			.ivsize = AES_BLOCK_SIZE,
2515 			.maxauthsize = SHA256_DIGEST_SIZE,
2516 		},
2517 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2518 			             DESC_HDR_SEL0_AESU |
2519 		                     DESC_HDR_MODE0_AESU_CBC |
2520 		                     DESC_HDR_SEL1_MDEUA |
2521 		                     DESC_HDR_MODE1_MDEU_INIT |
2522 		                     DESC_HDR_MODE1_MDEU_PAD |
2523 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2524 	},
2525 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2526 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2527 		.alg.aead = {
2528 			.base = {
2529 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2530 				.cra_driver_name = "authenc-hmac-sha256-"
2531 						   "cbc-aes-talitos",
2532 				.cra_blocksize = AES_BLOCK_SIZE,
2533 				.cra_flags = CRYPTO_ALG_ASYNC,
2534 			},
2535 			.ivsize = AES_BLOCK_SIZE,
2536 			.maxauthsize = SHA256_DIGEST_SIZE,
2537 		},
2538 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2539 				     DESC_HDR_SEL0_AESU |
2540 				     DESC_HDR_MODE0_AESU_CBC |
2541 				     DESC_HDR_SEL1_MDEUA |
2542 				     DESC_HDR_MODE1_MDEU_INIT |
2543 				     DESC_HDR_MODE1_MDEU_PAD |
2544 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2545 	},
2546 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2547 		.alg.aead = {
2548 			.base = {
2549 				.cra_name = "authenc(hmac(sha256),"
2550 					    "cbc(des3_ede))",
2551 				.cra_driver_name = "authenc-hmac-sha256-"
2552 						   "cbc-3des-talitos",
2553 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2554 				.cra_flags = CRYPTO_ALG_ASYNC,
2555 			},
2556 			.ivsize = DES3_EDE_BLOCK_SIZE,
2557 			.maxauthsize = SHA256_DIGEST_SIZE,
2558 			.setkey = aead_des3_setkey,
2559 		},
2560 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2561 			             DESC_HDR_SEL0_DEU |
2562 		                     DESC_HDR_MODE0_DEU_CBC |
2563 		                     DESC_HDR_MODE0_DEU_3DES |
2564 		                     DESC_HDR_SEL1_MDEUA |
2565 		                     DESC_HDR_MODE1_MDEU_INIT |
2566 		                     DESC_HDR_MODE1_MDEU_PAD |
2567 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2568 	},
2569 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2570 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2571 		.alg.aead = {
2572 			.base = {
2573 				.cra_name = "authenc(hmac(sha256),"
2574 					    "cbc(des3_ede))",
2575 				.cra_driver_name = "authenc-hmac-sha256-"
2576 						   "cbc-3des-talitos",
2577 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2578 				.cra_flags = CRYPTO_ALG_ASYNC,
2579 			},
2580 			.ivsize = DES3_EDE_BLOCK_SIZE,
2581 			.maxauthsize = SHA256_DIGEST_SIZE,
2582 			.setkey = aead_des3_setkey,
2583 		},
2584 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2585 				     DESC_HDR_SEL0_DEU |
2586 				     DESC_HDR_MODE0_DEU_CBC |
2587 				     DESC_HDR_MODE0_DEU_3DES |
2588 				     DESC_HDR_SEL1_MDEUA |
2589 				     DESC_HDR_MODE1_MDEU_INIT |
2590 				     DESC_HDR_MODE1_MDEU_PAD |
2591 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2592 	},
2593 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2594 		.alg.aead = {
2595 			.base = {
2596 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2597 				.cra_driver_name = "authenc-hmac-sha384-"
2598 						   "cbc-aes-talitos",
2599 				.cra_blocksize = AES_BLOCK_SIZE,
2600 				.cra_flags = CRYPTO_ALG_ASYNC,
2601 			},
2602 			.ivsize = AES_BLOCK_SIZE,
2603 			.maxauthsize = SHA384_DIGEST_SIZE,
2604 		},
2605 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2606 			             DESC_HDR_SEL0_AESU |
2607 		                     DESC_HDR_MODE0_AESU_CBC |
2608 		                     DESC_HDR_SEL1_MDEUB |
2609 		                     DESC_HDR_MODE1_MDEU_INIT |
2610 		                     DESC_HDR_MODE1_MDEU_PAD |
2611 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2612 	},
2613 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2614 		.alg.aead = {
2615 			.base = {
2616 				.cra_name = "authenc(hmac(sha384),"
2617 					    "cbc(des3_ede))",
2618 				.cra_driver_name = "authenc-hmac-sha384-"
2619 						   "cbc-3des-talitos",
2620 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2621 				.cra_flags = CRYPTO_ALG_ASYNC,
2622 			},
2623 			.ivsize = DES3_EDE_BLOCK_SIZE,
2624 			.maxauthsize = SHA384_DIGEST_SIZE,
2625 			.setkey = aead_des3_setkey,
2626 		},
2627 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2628 			             DESC_HDR_SEL0_DEU |
2629 		                     DESC_HDR_MODE0_DEU_CBC |
2630 		                     DESC_HDR_MODE0_DEU_3DES |
2631 		                     DESC_HDR_SEL1_MDEUB |
2632 		                     DESC_HDR_MODE1_MDEU_INIT |
2633 		                     DESC_HDR_MODE1_MDEU_PAD |
2634 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2635 	},
2636 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2637 		.alg.aead = {
2638 			.base = {
2639 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2640 				.cra_driver_name = "authenc-hmac-sha512-"
2641 						   "cbc-aes-talitos",
2642 				.cra_blocksize = AES_BLOCK_SIZE,
2643 				.cra_flags = CRYPTO_ALG_ASYNC,
2644 			},
2645 			.ivsize = AES_BLOCK_SIZE,
2646 			.maxauthsize = SHA512_DIGEST_SIZE,
2647 		},
2648 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2649 			             DESC_HDR_SEL0_AESU |
2650 		                     DESC_HDR_MODE0_AESU_CBC |
2651 		                     DESC_HDR_SEL1_MDEUB |
2652 		                     DESC_HDR_MODE1_MDEU_INIT |
2653 		                     DESC_HDR_MODE1_MDEU_PAD |
2654 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2655 	},
2656 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2657 		.alg.aead = {
2658 			.base = {
2659 				.cra_name = "authenc(hmac(sha512),"
2660 					    "cbc(des3_ede))",
2661 				.cra_driver_name = "authenc-hmac-sha512-"
2662 						   "cbc-3des-talitos",
2663 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2664 				.cra_flags = CRYPTO_ALG_ASYNC,
2665 			},
2666 			.ivsize = DES3_EDE_BLOCK_SIZE,
2667 			.maxauthsize = SHA512_DIGEST_SIZE,
2668 			.setkey = aead_des3_setkey,
2669 		},
2670 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2671 			             DESC_HDR_SEL0_DEU |
2672 		                     DESC_HDR_MODE0_DEU_CBC |
2673 		                     DESC_HDR_MODE0_DEU_3DES |
2674 		                     DESC_HDR_SEL1_MDEUB |
2675 		                     DESC_HDR_MODE1_MDEU_INIT |
2676 		                     DESC_HDR_MODE1_MDEU_PAD |
2677 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2678 	},
2679 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2680 		.alg.aead = {
2681 			.base = {
2682 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2683 				.cra_driver_name = "authenc-hmac-md5-"
2684 						   "cbc-aes-talitos",
2685 				.cra_blocksize = AES_BLOCK_SIZE,
2686 				.cra_flags = CRYPTO_ALG_ASYNC,
2687 			},
2688 			.ivsize = AES_BLOCK_SIZE,
2689 			.maxauthsize = MD5_DIGEST_SIZE,
2690 		},
2691 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2692 			             DESC_HDR_SEL0_AESU |
2693 		                     DESC_HDR_MODE0_AESU_CBC |
2694 		                     DESC_HDR_SEL1_MDEUA |
2695 		                     DESC_HDR_MODE1_MDEU_INIT |
2696 		                     DESC_HDR_MODE1_MDEU_PAD |
2697 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2698 	},
2699 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2700 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2701 		.alg.aead = {
2702 			.base = {
2703 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2704 				.cra_driver_name = "authenc-hmac-md5-"
2705 						   "cbc-aes-talitos",
2706 				.cra_blocksize = AES_BLOCK_SIZE,
2707 				.cra_flags = CRYPTO_ALG_ASYNC,
2708 			},
2709 			.ivsize = AES_BLOCK_SIZE,
2710 			.maxauthsize = MD5_DIGEST_SIZE,
2711 		},
2712 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2713 				     DESC_HDR_SEL0_AESU |
2714 				     DESC_HDR_MODE0_AESU_CBC |
2715 				     DESC_HDR_SEL1_MDEUA |
2716 				     DESC_HDR_MODE1_MDEU_INIT |
2717 				     DESC_HDR_MODE1_MDEU_PAD |
2718 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2719 	},
2720 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2721 		.alg.aead = {
2722 			.base = {
2723 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2724 				.cra_driver_name = "authenc-hmac-md5-"
2725 						   "cbc-3des-talitos",
2726 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2727 				.cra_flags = CRYPTO_ALG_ASYNC,
2728 			},
2729 			.ivsize = DES3_EDE_BLOCK_SIZE,
2730 			.maxauthsize = MD5_DIGEST_SIZE,
2731 			.setkey = aead_des3_setkey,
2732 		},
2733 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2734 			             DESC_HDR_SEL0_DEU |
2735 		                     DESC_HDR_MODE0_DEU_CBC |
2736 		                     DESC_HDR_MODE0_DEU_3DES |
2737 		                     DESC_HDR_SEL1_MDEUA |
2738 		                     DESC_HDR_MODE1_MDEU_INIT |
2739 		                     DESC_HDR_MODE1_MDEU_PAD |
2740 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2741 	},
2742 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2743 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2744 		.alg.aead = {
2745 			.base = {
2746 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2747 				.cra_driver_name = "authenc-hmac-md5-"
2748 						   "cbc-3des-talitos",
2749 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2750 				.cra_flags = CRYPTO_ALG_ASYNC,
2751 			},
2752 			.ivsize = DES3_EDE_BLOCK_SIZE,
2753 			.maxauthsize = MD5_DIGEST_SIZE,
2754 			.setkey = aead_des3_setkey,
2755 		},
2756 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2757 				     DESC_HDR_SEL0_DEU |
2758 				     DESC_HDR_MODE0_DEU_CBC |
2759 				     DESC_HDR_MODE0_DEU_3DES |
2760 				     DESC_HDR_SEL1_MDEUA |
2761 				     DESC_HDR_MODE1_MDEU_INIT |
2762 				     DESC_HDR_MODE1_MDEU_PAD |
2763 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2764 	},
2765 	/* ABLKCIPHER algorithms. */
2766 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2767 		.alg.crypto = {
2768 			.cra_name = "ecb(aes)",
2769 			.cra_driver_name = "ecb-aes-talitos",
2770 			.cra_blocksize = AES_BLOCK_SIZE,
2771 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2772 				     CRYPTO_ALG_ASYNC,
2773 			.cra_ablkcipher = {
2774 				.min_keysize = AES_MIN_KEY_SIZE,
2775 				.max_keysize = AES_MAX_KEY_SIZE,
2776 				.ivsize = AES_BLOCK_SIZE,
2777 			}
2778 		},
2779 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2780 				     DESC_HDR_SEL0_AESU,
2781 	},
2782 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2783 		.alg.crypto = {
2784 			.cra_name = "cbc(aes)",
2785 			.cra_driver_name = "cbc-aes-talitos",
2786 			.cra_blocksize = AES_BLOCK_SIZE,
2787 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2788                                      CRYPTO_ALG_ASYNC,
2789 			.cra_ablkcipher = {
2790 				.min_keysize = AES_MIN_KEY_SIZE,
2791 				.max_keysize = AES_MAX_KEY_SIZE,
2792 				.ivsize = AES_BLOCK_SIZE,
2793 			}
2794 		},
2795 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2796 				     DESC_HDR_SEL0_AESU |
2797 				     DESC_HDR_MODE0_AESU_CBC,
2798 	},
2799 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2800 		.alg.crypto = {
2801 			.cra_name = "ctr(aes)",
2802 			.cra_driver_name = "ctr-aes-talitos",
2803 			.cra_blocksize = AES_BLOCK_SIZE,
2804 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2805 				     CRYPTO_ALG_ASYNC,
2806 			.cra_ablkcipher = {
2807 				.min_keysize = AES_MIN_KEY_SIZE,
2808 				.max_keysize = AES_MAX_KEY_SIZE,
2809 				.ivsize = AES_BLOCK_SIZE,
2810 			}
2811 		},
2812 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2813 				     DESC_HDR_SEL0_AESU |
2814 				     DESC_HDR_MODE0_AESU_CTR,
2815 	},
2816 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2817 		.alg.crypto = {
2818 			.cra_name = "ecb(des)",
2819 			.cra_driver_name = "ecb-des-talitos",
2820 			.cra_blocksize = DES_BLOCK_SIZE,
2821 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2822 				     CRYPTO_ALG_ASYNC,
2823 			.cra_ablkcipher = {
2824 				.min_keysize = DES_KEY_SIZE,
2825 				.max_keysize = DES_KEY_SIZE,
2826 				.ivsize = DES_BLOCK_SIZE,
2827 				.setkey = ablkcipher_des_setkey,
2828 			}
2829 		},
2830 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2831 				     DESC_HDR_SEL0_DEU,
2832 	},
2833 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2834 		.alg.crypto = {
2835 			.cra_name = "cbc(des)",
2836 			.cra_driver_name = "cbc-des-talitos",
2837 			.cra_blocksize = DES_BLOCK_SIZE,
2838 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2839 				     CRYPTO_ALG_ASYNC,
2840 			.cra_ablkcipher = {
2841 				.min_keysize = DES_KEY_SIZE,
2842 				.max_keysize = DES_KEY_SIZE,
2843 				.ivsize = DES_BLOCK_SIZE,
2844 				.setkey = ablkcipher_des_setkey,
2845 			}
2846 		},
2847 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2848 				     DESC_HDR_SEL0_DEU |
2849 				     DESC_HDR_MODE0_DEU_CBC,
2850 	},
2851 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2852 		.alg.crypto = {
2853 			.cra_name = "ecb(des3_ede)",
2854 			.cra_driver_name = "ecb-3des-talitos",
2855 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2856 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2857 				     CRYPTO_ALG_ASYNC,
2858 			.cra_ablkcipher = {
2859 				.min_keysize = DES3_EDE_KEY_SIZE,
2860 				.max_keysize = DES3_EDE_KEY_SIZE,
2861 				.ivsize = DES3_EDE_BLOCK_SIZE,
2862 				.setkey = ablkcipher_des3_setkey,
2863 			}
2864 		},
2865 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2866 				     DESC_HDR_SEL0_DEU |
2867 				     DESC_HDR_MODE0_DEU_3DES,
2868 	},
2869 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2870 		.alg.crypto = {
2871 			.cra_name = "cbc(des3_ede)",
2872 			.cra_driver_name = "cbc-3des-talitos",
2873 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2874 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2875                                      CRYPTO_ALG_ASYNC,
2876 			.cra_ablkcipher = {
2877 				.min_keysize = DES3_EDE_KEY_SIZE,
2878 				.max_keysize = DES3_EDE_KEY_SIZE,
2879 				.ivsize = DES3_EDE_BLOCK_SIZE,
2880 				.setkey = ablkcipher_des3_setkey,
2881 			}
2882 		},
2883 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2884 			             DESC_HDR_SEL0_DEU |
2885 		                     DESC_HDR_MODE0_DEU_CBC |
2886 		                     DESC_HDR_MODE0_DEU_3DES,
2887 	},
2888 	/* AHASH algorithms. */
2889 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2890 		.alg.hash = {
2891 			.halg.digestsize = MD5_DIGEST_SIZE,
2892 			.halg.statesize = sizeof(struct talitos_export_state),
2893 			.halg.base = {
2894 				.cra_name = "md5",
2895 				.cra_driver_name = "md5-talitos",
2896 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2897 				.cra_flags = CRYPTO_ALG_ASYNC,
2898 			}
2899 		},
2900 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2901 				     DESC_HDR_SEL0_MDEUA |
2902 				     DESC_HDR_MODE0_MDEU_MD5,
2903 	},
2904 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2905 		.alg.hash = {
2906 			.halg.digestsize = SHA1_DIGEST_SIZE,
2907 			.halg.statesize = sizeof(struct talitos_export_state),
2908 			.halg.base = {
2909 				.cra_name = "sha1",
2910 				.cra_driver_name = "sha1-talitos",
2911 				.cra_blocksize = SHA1_BLOCK_SIZE,
2912 				.cra_flags = CRYPTO_ALG_ASYNC,
2913 			}
2914 		},
2915 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2916 				     DESC_HDR_SEL0_MDEUA |
2917 				     DESC_HDR_MODE0_MDEU_SHA1,
2918 	},
2919 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2920 		.alg.hash = {
2921 			.halg.digestsize = SHA224_DIGEST_SIZE,
2922 			.halg.statesize = sizeof(struct talitos_export_state),
2923 			.halg.base = {
2924 				.cra_name = "sha224",
2925 				.cra_driver_name = "sha224-talitos",
2926 				.cra_blocksize = SHA224_BLOCK_SIZE,
2927 				.cra_flags = CRYPTO_ALG_ASYNC,
2928 			}
2929 		},
2930 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2931 				     DESC_HDR_SEL0_MDEUA |
2932 				     DESC_HDR_MODE0_MDEU_SHA224,
2933 	},
2934 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2935 		.alg.hash = {
2936 			.halg.digestsize = SHA256_DIGEST_SIZE,
2937 			.halg.statesize = sizeof(struct talitos_export_state),
2938 			.halg.base = {
2939 				.cra_name = "sha256",
2940 				.cra_driver_name = "sha256-talitos",
2941 				.cra_blocksize = SHA256_BLOCK_SIZE,
2942 				.cra_flags = CRYPTO_ALG_ASYNC,
2943 			}
2944 		},
2945 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2946 				     DESC_HDR_SEL0_MDEUA |
2947 				     DESC_HDR_MODE0_MDEU_SHA256,
2948 	},
2949 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2950 		.alg.hash = {
2951 			.halg.digestsize = SHA384_DIGEST_SIZE,
2952 			.halg.statesize = sizeof(struct talitos_export_state),
2953 			.halg.base = {
2954 				.cra_name = "sha384",
2955 				.cra_driver_name = "sha384-talitos",
2956 				.cra_blocksize = SHA384_BLOCK_SIZE,
2957 				.cra_flags = CRYPTO_ALG_ASYNC,
2958 			}
2959 		},
2960 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2961 				     DESC_HDR_SEL0_MDEUB |
2962 				     DESC_HDR_MODE0_MDEUB_SHA384,
2963 	},
2964 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2965 		.alg.hash = {
2966 			.halg.digestsize = SHA512_DIGEST_SIZE,
2967 			.halg.statesize = sizeof(struct talitos_export_state),
2968 			.halg.base = {
2969 				.cra_name = "sha512",
2970 				.cra_driver_name = "sha512-talitos",
2971 				.cra_blocksize = SHA512_BLOCK_SIZE,
2972 				.cra_flags = CRYPTO_ALG_ASYNC,
2973 			}
2974 		},
2975 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2976 				     DESC_HDR_SEL0_MDEUB |
2977 				     DESC_HDR_MODE0_MDEUB_SHA512,
2978 	},
2979 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2980 		.alg.hash = {
2981 			.halg.digestsize = MD5_DIGEST_SIZE,
2982 			.halg.statesize = sizeof(struct talitos_export_state),
2983 			.halg.base = {
2984 				.cra_name = "hmac(md5)",
2985 				.cra_driver_name = "hmac-md5-talitos",
2986 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2987 				.cra_flags = CRYPTO_ALG_ASYNC,
2988 			}
2989 		},
2990 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2991 				     DESC_HDR_SEL0_MDEUA |
2992 				     DESC_HDR_MODE0_MDEU_MD5,
2993 	},
2994 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2995 		.alg.hash = {
2996 			.halg.digestsize = SHA1_DIGEST_SIZE,
2997 			.halg.statesize = sizeof(struct talitos_export_state),
2998 			.halg.base = {
2999 				.cra_name = "hmac(sha1)",
3000 				.cra_driver_name = "hmac-sha1-talitos",
3001 				.cra_blocksize = SHA1_BLOCK_SIZE,
3002 				.cra_flags = CRYPTO_ALG_ASYNC,
3003 			}
3004 		},
3005 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3006 				     DESC_HDR_SEL0_MDEUA |
3007 				     DESC_HDR_MODE0_MDEU_SHA1,
3008 	},
3009 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3010 		.alg.hash = {
3011 			.halg.digestsize = SHA224_DIGEST_SIZE,
3012 			.halg.statesize = sizeof(struct talitos_export_state),
3013 			.halg.base = {
3014 				.cra_name = "hmac(sha224)",
3015 				.cra_driver_name = "hmac-sha224-talitos",
3016 				.cra_blocksize = SHA224_BLOCK_SIZE,
3017 				.cra_flags = CRYPTO_ALG_ASYNC,
3018 			}
3019 		},
3020 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3021 				     DESC_HDR_SEL0_MDEUA |
3022 				     DESC_HDR_MODE0_MDEU_SHA224,
3023 	},
3024 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3025 		.alg.hash = {
3026 			.halg.digestsize = SHA256_DIGEST_SIZE,
3027 			.halg.statesize = sizeof(struct talitos_export_state),
3028 			.halg.base = {
3029 				.cra_name = "hmac(sha256)",
3030 				.cra_driver_name = "hmac-sha256-talitos",
3031 				.cra_blocksize = SHA256_BLOCK_SIZE,
3032 				.cra_flags = CRYPTO_ALG_ASYNC,
3033 			}
3034 		},
3035 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3036 				     DESC_HDR_SEL0_MDEUA |
3037 				     DESC_HDR_MODE0_MDEU_SHA256,
3038 	},
3039 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3040 		.alg.hash = {
3041 			.halg.digestsize = SHA384_DIGEST_SIZE,
3042 			.halg.statesize = sizeof(struct talitos_export_state),
3043 			.halg.base = {
3044 				.cra_name = "hmac(sha384)",
3045 				.cra_driver_name = "hmac-sha384-talitos",
3046 				.cra_blocksize = SHA384_BLOCK_SIZE,
3047 				.cra_flags = CRYPTO_ALG_ASYNC,
3048 			}
3049 		},
3050 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3051 				     DESC_HDR_SEL0_MDEUB |
3052 				     DESC_HDR_MODE0_MDEUB_SHA384,
3053 	},
3054 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3055 		.alg.hash = {
3056 			.halg.digestsize = SHA512_DIGEST_SIZE,
3057 			.halg.statesize = sizeof(struct talitos_export_state),
3058 			.halg.base = {
3059 				.cra_name = "hmac(sha512)",
3060 				.cra_driver_name = "hmac-sha512-talitos",
3061 				.cra_blocksize = SHA512_BLOCK_SIZE,
3062 				.cra_flags = CRYPTO_ALG_ASYNC,
3063 			}
3064 		},
3065 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3066 				     DESC_HDR_SEL0_MDEUB |
3067 				     DESC_HDR_MODE0_MDEUB_SHA512,
3068 	}
3069 };
3070 
3071 struct talitos_crypto_alg {
3072 	struct list_head entry;
3073 	struct device *dev;
3074 	struct talitos_alg_template algt;
3075 };
3076 
3077 static int talitos_init_common(struct talitos_ctx *ctx,
3078 			       struct talitos_crypto_alg *talitos_alg)
3079 {
3080 	struct talitos_private *priv;
3081 
3082 	/* update context with ptr to dev */
3083 	ctx->dev = talitos_alg->dev;
3084 
3085 	/* assign SEC channel to tfm in round-robin fashion */
3086 	priv = dev_get_drvdata(ctx->dev);
3087 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3088 		  (priv->num_channels - 1);
3089 
3090 	/* copy descriptor header template value */
3091 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3092 
3093 	/* select done notification */
3094 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3095 
3096 	return 0;
3097 }
3098 
3099 static int talitos_cra_init(struct crypto_tfm *tfm)
3100 {
3101 	struct crypto_alg *alg = tfm->__crt_alg;
3102 	struct talitos_crypto_alg *talitos_alg;
3103 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3104 
3105 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3106 		talitos_alg = container_of(__crypto_ahash_alg(alg),
3107 					   struct talitos_crypto_alg,
3108 					   algt.alg.hash);
3109 	else
3110 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
3111 					   algt.alg.crypto);
3112 
3113 	return talitos_init_common(ctx, talitos_alg);
3114 }
3115 
3116 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3117 {
3118 	struct aead_alg *alg = crypto_aead_alg(tfm);
3119 	struct talitos_crypto_alg *talitos_alg;
3120 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3121 
3122 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3123 				   algt.alg.aead);
3124 
3125 	return talitos_init_common(ctx, talitos_alg);
3126 }
3127 
3128 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3129 {
3130 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3131 
3132 	talitos_cra_init(tfm);
3133 
3134 	ctx->keylen = 0;
3135 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3136 				 sizeof(struct talitos_ahash_req_ctx));
3137 
3138 	return 0;
3139 }
3140 
3141 static void talitos_cra_exit(struct crypto_tfm *tfm)
3142 {
3143 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3144 	struct device *dev = ctx->dev;
3145 
3146 	if (ctx->keylen)
3147 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3148 }
3149 
3150 /*
3151  * given the alg's descriptor header template, determine whether descriptor
3152  * type and primary/secondary execution units required match the hw
3153  * capabilities description provided in the device tree node.
3154  */
3155 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3156 {
3157 	struct talitos_private *priv = dev_get_drvdata(dev);
3158 	int ret;
3159 
3160 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3161 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3162 
3163 	if (SECONDARY_EU(desc_hdr_template))
3164 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3165 		              & priv->exec_units);
3166 
3167 	return ret;
3168 }
3169 
3170 static int talitos_remove(struct platform_device *ofdev)
3171 {
3172 	struct device *dev = &ofdev->dev;
3173 	struct talitos_private *priv = dev_get_drvdata(dev);
3174 	struct talitos_crypto_alg *t_alg, *n;
3175 	int i;
3176 
3177 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3178 		switch (t_alg->algt.type) {
3179 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3180 			break;
3181 		case CRYPTO_ALG_TYPE_AEAD:
3182 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3183 		case CRYPTO_ALG_TYPE_AHASH:
3184 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3185 			break;
3186 		}
3187 		list_del(&t_alg->entry);
3188 	}
3189 
3190 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3191 		talitos_unregister_rng(dev);
3192 
3193 	for (i = 0; i < 2; i++)
3194 		if (priv->irq[i]) {
3195 			free_irq(priv->irq[i], dev);
3196 			irq_dispose_mapping(priv->irq[i]);
3197 		}
3198 
3199 	tasklet_kill(&priv->done_task[0]);
3200 	if (priv->irq[1])
3201 		tasklet_kill(&priv->done_task[1]);
3202 
3203 	return 0;
3204 }
3205 
3206 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3207 						    struct talitos_alg_template
3208 						           *template)
3209 {
3210 	struct talitos_private *priv = dev_get_drvdata(dev);
3211 	struct talitos_crypto_alg *t_alg;
3212 	struct crypto_alg *alg;
3213 
3214 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3215 			     GFP_KERNEL);
3216 	if (!t_alg)
3217 		return ERR_PTR(-ENOMEM);
3218 
3219 	t_alg->algt = *template;
3220 
3221 	switch (t_alg->algt.type) {
3222 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
3223 		alg = &t_alg->algt.alg.crypto;
3224 		alg->cra_init = talitos_cra_init;
3225 		alg->cra_exit = talitos_cra_exit;
3226 		alg->cra_type = &crypto_ablkcipher_type;
3227 		alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3228 					     ablkcipher_setkey;
3229 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3230 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3231 		break;
3232 	case CRYPTO_ALG_TYPE_AEAD:
3233 		alg = &t_alg->algt.alg.aead.base;
3234 		alg->cra_exit = talitos_cra_exit;
3235 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3236 		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3237 					      aead_setkey;
3238 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3239 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3240 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3241 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3242 			devm_kfree(dev, t_alg);
3243 			return ERR_PTR(-ENOTSUPP);
3244 		}
3245 		break;
3246 	case CRYPTO_ALG_TYPE_AHASH:
3247 		alg = &t_alg->algt.alg.hash.halg.base;
3248 		alg->cra_init = talitos_cra_init_ahash;
3249 		alg->cra_exit = talitos_cra_exit;
3250 		t_alg->algt.alg.hash.init = ahash_init;
3251 		t_alg->algt.alg.hash.update = ahash_update;
3252 		t_alg->algt.alg.hash.final = ahash_final;
3253 		t_alg->algt.alg.hash.finup = ahash_finup;
3254 		t_alg->algt.alg.hash.digest = ahash_digest;
3255 		if (!strncmp(alg->cra_name, "hmac", 4))
3256 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3257 		t_alg->algt.alg.hash.import = ahash_import;
3258 		t_alg->algt.alg.hash.export = ahash_export;
3259 
3260 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3261 		    !strncmp(alg->cra_name, "hmac", 4)) {
3262 			devm_kfree(dev, t_alg);
3263 			return ERR_PTR(-ENOTSUPP);
3264 		}
3265 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3266 		    (!strcmp(alg->cra_name, "sha224") ||
3267 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3268 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3269 			t_alg->algt.desc_hdr_template =
3270 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3271 					DESC_HDR_SEL0_MDEUA |
3272 					DESC_HDR_MODE0_MDEU_SHA256;
3273 		}
3274 		break;
3275 	default:
3276 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3277 		devm_kfree(dev, t_alg);
3278 		return ERR_PTR(-EINVAL);
3279 	}
3280 
3281 	alg->cra_module = THIS_MODULE;
3282 	if (t_alg->algt.priority)
3283 		alg->cra_priority = t_alg->algt.priority;
3284 	else
3285 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3286 	alg->cra_alignmask = 0;
3287 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3288 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3289 
3290 	t_alg->dev = dev;
3291 
3292 	return t_alg;
3293 }
3294 
3295 static int talitos_probe_irq(struct platform_device *ofdev)
3296 {
3297 	struct device *dev = &ofdev->dev;
3298 	struct device_node *np = ofdev->dev.of_node;
3299 	struct talitos_private *priv = dev_get_drvdata(dev);
3300 	int err;
3301 	bool is_sec1 = has_ftr_sec1(priv);
3302 
3303 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3304 	if (!priv->irq[0]) {
3305 		dev_err(dev, "failed to map irq\n");
3306 		return -EINVAL;
3307 	}
3308 	if (is_sec1) {
3309 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3310 				  dev_driver_string(dev), dev);
3311 		goto primary_out;
3312 	}
3313 
3314 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3315 
3316 	/* get the primary irq line */
3317 	if (!priv->irq[1]) {
3318 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3319 				  dev_driver_string(dev), dev);
3320 		goto primary_out;
3321 	}
3322 
3323 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3324 			  dev_driver_string(dev), dev);
3325 	if (err)
3326 		goto primary_out;
3327 
3328 	/* get the secondary irq line */
3329 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3330 			  dev_driver_string(dev), dev);
3331 	if (err) {
3332 		dev_err(dev, "failed to request secondary irq\n");
3333 		irq_dispose_mapping(priv->irq[1]);
3334 		priv->irq[1] = 0;
3335 	}
3336 
3337 	return err;
3338 
3339 primary_out:
3340 	if (err) {
3341 		dev_err(dev, "failed to request primary irq\n");
3342 		irq_dispose_mapping(priv->irq[0]);
3343 		priv->irq[0] = 0;
3344 	}
3345 
3346 	return err;
3347 }
3348 
3349 static int talitos_probe(struct platform_device *ofdev)
3350 {
3351 	struct device *dev = &ofdev->dev;
3352 	struct device_node *np = ofdev->dev.of_node;
3353 	struct talitos_private *priv;
3354 	int i, err;
3355 	int stride;
3356 	struct resource *res;
3357 
3358 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3359 	if (!priv)
3360 		return -ENOMEM;
3361 
3362 	INIT_LIST_HEAD(&priv->alg_list);
3363 
3364 	dev_set_drvdata(dev, priv);
3365 
3366 	priv->ofdev = ofdev;
3367 
3368 	spin_lock_init(&priv->reg_lock);
3369 
3370 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3371 	if (!res)
3372 		return -ENXIO;
3373 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3374 	if (!priv->reg) {
3375 		dev_err(dev, "failed to of_iomap\n");
3376 		err = -ENOMEM;
3377 		goto err_out;
3378 	}
3379 
3380 	/* get SEC version capabilities from device tree */
3381 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3382 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3383 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3384 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3385 			     &priv->desc_types);
3386 
3387 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3388 	    !priv->exec_units || !priv->desc_types) {
3389 		dev_err(dev, "invalid property data in device tree node\n");
3390 		err = -EINVAL;
3391 		goto err_out;
3392 	}
3393 
3394 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3395 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3396 
3397 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3398 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3399 				  TALITOS_FTR_SHA224_HWINIT |
3400 				  TALITOS_FTR_HMAC_OK;
3401 
3402 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3403 		priv->features |= TALITOS_FTR_SEC1;
3404 
3405 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3406 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3407 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3408 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3409 		stride = TALITOS1_CH_STRIDE;
3410 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3411 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3412 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3413 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3414 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3415 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3416 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3417 		stride = TALITOS1_CH_STRIDE;
3418 	} else {
3419 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3420 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3421 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3422 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3423 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3424 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3425 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3426 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3427 		stride = TALITOS2_CH_STRIDE;
3428 	}
3429 
3430 	err = talitos_probe_irq(ofdev);
3431 	if (err)
3432 		goto err_out;
3433 
3434 	if (of_device_is_compatible(np, "fsl,sec1.0")) {
3435 		if (priv->num_channels == 1)
3436 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3437 				     (unsigned long)dev);
3438 		else
3439 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3440 				     (unsigned long)dev);
3441 	} else {
3442 		if (priv->irq[1]) {
3443 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3444 				     (unsigned long)dev);
3445 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3446 				     (unsigned long)dev);
3447 		} else if (priv->num_channels == 1) {
3448 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3449 				     (unsigned long)dev);
3450 		} else {
3451 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3452 				     (unsigned long)dev);
3453 		}
3454 	}
3455 
3456 	priv->chan = devm_kcalloc(dev,
3457 				  priv->num_channels,
3458 				  sizeof(struct talitos_channel),
3459 				  GFP_KERNEL);
3460 	if (!priv->chan) {
3461 		dev_err(dev, "failed to allocate channel management space\n");
3462 		err = -ENOMEM;
3463 		goto err_out;
3464 	}
3465 
3466 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3467 
3468 	for (i = 0; i < priv->num_channels; i++) {
3469 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3470 		if (!priv->irq[1] || !(i & 1))
3471 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3472 
3473 		spin_lock_init(&priv->chan[i].head_lock);
3474 		spin_lock_init(&priv->chan[i].tail_lock);
3475 
3476 		priv->chan[i].fifo = devm_kcalloc(dev,
3477 						priv->fifo_len,
3478 						sizeof(struct talitos_request),
3479 						GFP_KERNEL);
3480 		if (!priv->chan[i].fifo) {
3481 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3482 			err = -ENOMEM;
3483 			goto err_out;
3484 		}
3485 
3486 		atomic_set(&priv->chan[i].submit_count,
3487 			   -(priv->chfifo_len - 1));
3488 	}
3489 
3490 	dma_set_mask(dev, DMA_BIT_MASK(36));
3491 
3492 	/* reset and initialize the h/w */
3493 	err = init_device(dev);
3494 	if (err) {
3495 		dev_err(dev, "failed to initialize device\n");
3496 		goto err_out;
3497 	}
3498 
3499 	/* register the RNG, if available */
3500 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3501 		err = talitos_register_rng(dev);
3502 		if (err) {
3503 			dev_err(dev, "failed to register hwrng: %d\n", err);
3504 			goto err_out;
3505 		} else
3506 			dev_info(dev, "hwrng\n");
3507 	}
3508 
3509 	/* register crypto algorithms the device supports */
3510 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3511 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3512 			struct talitos_crypto_alg *t_alg;
3513 			struct crypto_alg *alg = NULL;
3514 
3515 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3516 			if (IS_ERR(t_alg)) {
3517 				err = PTR_ERR(t_alg);
3518 				if (err == -ENOTSUPP)
3519 					continue;
3520 				goto err_out;
3521 			}
3522 
3523 			switch (t_alg->algt.type) {
3524 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3525 				err = crypto_register_alg(
3526 						&t_alg->algt.alg.crypto);
3527 				alg = &t_alg->algt.alg.crypto;
3528 				break;
3529 
3530 			case CRYPTO_ALG_TYPE_AEAD:
3531 				err = crypto_register_aead(
3532 					&t_alg->algt.alg.aead);
3533 				alg = &t_alg->algt.alg.aead.base;
3534 				break;
3535 
3536 			case CRYPTO_ALG_TYPE_AHASH:
3537 				err = crypto_register_ahash(
3538 						&t_alg->algt.alg.hash);
3539 				alg = &t_alg->algt.alg.hash.halg.base;
3540 				break;
3541 			}
3542 			if (err) {
3543 				dev_err(dev, "%s alg registration failed\n",
3544 					alg->cra_driver_name);
3545 				devm_kfree(dev, t_alg);
3546 			} else
3547 				list_add_tail(&t_alg->entry, &priv->alg_list);
3548 		}
3549 	}
3550 	if (!list_empty(&priv->alg_list))
3551 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3552 			 (char *)of_get_property(np, "compatible", NULL));
3553 
3554 	return 0;
3555 
3556 err_out:
3557 	talitos_remove(ofdev);
3558 
3559 	return err;
3560 }
3561 
3562 static const struct of_device_id talitos_match[] = {
3563 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3564 	{
3565 		.compatible = "fsl,sec1.0",
3566 	},
3567 #endif
3568 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3569 	{
3570 		.compatible = "fsl,sec2.0",
3571 	},
3572 #endif
3573 	{},
3574 };
3575 MODULE_DEVICE_TABLE(of, talitos_match);
3576 
3577 static struct platform_driver talitos_driver = {
3578 	.driver = {
3579 		.name = "talitos",
3580 		.of_match_table = talitos_match,
3581 	},
3582 	.probe = talitos_probe,
3583 	.remove = talitos_remove,
3584 };
3585 
3586 module_platform_driver(talitos_driver);
3587 
3588 MODULE_LICENSE("GPL");
3589 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3590 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3591