xref: /openbmc/linux/drivers/crypto/talitos.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * talitos - Freescale Integrated Security Engine (SEC) device driver
4  *
5  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6  *
7  * Scatterlist Crypto API glue code copied from files with the following:
8  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * Crypto algorithm registration code copied from hifn driver:
11  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12  * All rights reserved.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/io.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
30 
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
42 
43 #include "talitos.h"
44 
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 			   unsigned int len, bool is_sec1)
47 {
48 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
49 	if (is_sec1) {
50 		ptr->len1 = cpu_to_be16(len);
51 	} else {
52 		ptr->len = cpu_to_be16(len);
53 		ptr->eptr = upper_32_bits(dma_addr);
54 	}
55 }
56 
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 			     struct talitos_ptr *src_ptr, bool is_sec1)
59 {
60 	dst_ptr->ptr = src_ptr->ptr;
61 	if (is_sec1) {
62 		dst_ptr->len1 = src_ptr->len1;
63 	} else {
64 		dst_ptr->len = src_ptr->len;
65 		dst_ptr->eptr = src_ptr->eptr;
66 	}
67 }
68 
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
70 					   bool is_sec1)
71 {
72 	if (is_sec1)
73 		return be16_to_cpu(ptr->len1);
74 	else
75 		return be16_to_cpu(ptr->len);
76 }
77 
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
79 				   bool is_sec1)
80 {
81 	if (!is_sec1)
82 		ptr->j_extent = val;
83 }
84 
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
86 {
87 	if (!is_sec1)
88 		ptr->j_extent |= val;
89 }
90 
91 /*
92  * map virtual single (contiguous) pointer to h/w descriptor pointer
93  */
94 static void __map_single_talitos_ptr(struct device *dev,
95 				     struct talitos_ptr *ptr,
96 				     unsigned int len, void *data,
97 				     enum dma_data_direction dir,
98 				     unsigned long attrs)
99 {
100 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 	struct talitos_private *priv = dev_get_drvdata(dev);
102 	bool is_sec1 = has_ftr_sec1(priv);
103 
104 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
105 }
106 
107 static void map_single_talitos_ptr(struct device *dev,
108 				   struct talitos_ptr *ptr,
109 				   unsigned int len, void *data,
110 				   enum dma_data_direction dir)
111 {
112 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113 }
114 
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116 					  struct talitos_ptr *ptr,
117 					  unsigned int len, void *data,
118 					  enum dma_data_direction dir)
119 {
120 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
121 				 DMA_ATTR_SKIP_CPU_SYNC);
122 }
123 
124 /*
125  * unmap bus single (contiguous) h/w descriptor pointer
126  */
127 static void unmap_single_talitos_ptr(struct device *dev,
128 				     struct talitos_ptr *ptr,
129 				     enum dma_data_direction dir)
130 {
131 	struct talitos_private *priv = dev_get_drvdata(dev);
132 	bool is_sec1 = has_ftr_sec1(priv);
133 
134 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 			 from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137 
138 static int reset_channel(struct device *dev, int ch)
139 {
140 	struct talitos_private *priv = dev_get_drvdata(dev);
141 	unsigned int timeout = TALITOS_TIMEOUT;
142 	bool is_sec1 = has_ftr_sec1(priv);
143 
144 	if (is_sec1) {
145 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 			  TALITOS1_CCCR_LO_RESET);
147 
148 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 			TALITOS1_CCCR_LO_RESET) && --timeout)
150 			cpu_relax();
151 	} else {
152 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 			  TALITOS2_CCCR_RESET);
154 
155 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 			TALITOS2_CCCR_RESET) && --timeout)
157 			cpu_relax();
158 	}
159 
160 	if (timeout == 0) {
161 		dev_err(dev, "failed to reset channel %d\n", ch);
162 		return -EIO;
163 	}
164 
165 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
166 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 	/* enable chaining descriptors */
169 	if (is_sec1)
170 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 			  TALITOS_CCCR_LO_NE);
172 
173 	/* and ICCR writeback, if available */
174 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 		          TALITOS_CCCR_LO_IWSE);
177 
178 	return 0;
179 }
180 
181 static int reset_device(struct device *dev)
182 {
183 	struct talitos_private *priv = dev_get_drvdata(dev);
184 	unsigned int timeout = TALITOS_TIMEOUT;
185 	bool is_sec1 = has_ftr_sec1(priv);
186 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
187 
188 	setbits32(priv->reg + TALITOS_MCR, mcr);
189 
190 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
191 	       && --timeout)
192 		cpu_relax();
193 
194 	if (priv->irq[1]) {
195 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 		setbits32(priv->reg + TALITOS_MCR, mcr);
197 	}
198 
199 	if (timeout == 0) {
200 		dev_err(dev, "failed to reset device\n");
201 		return -EIO;
202 	}
203 
204 	return 0;
205 }
206 
207 /*
208  * Reset and initialize the device
209  */
210 static int init_device(struct device *dev)
211 {
212 	struct talitos_private *priv = dev_get_drvdata(dev);
213 	int ch, err;
214 	bool is_sec1 = has_ftr_sec1(priv);
215 
216 	/*
217 	 * Master reset
218 	 * errata documentation: warning: certain SEC interrupts
219 	 * are not fully cleared by writing the MCR:SWR bit,
220 	 * set bit twice to completely reset
221 	 */
222 	err = reset_device(dev);
223 	if (err)
224 		return err;
225 
226 	err = reset_device(dev);
227 	if (err)
228 		return err;
229 
230 	/* reset channels */
231 	for (ch = 0; ch < priv->num_channels; ch++) {
232 		err = reset_channel(dev, ch);
233 		if (err)
234 			return err;
235 	}
236 
237 	/* enable channel done and error interrupts */
238 	if (is_sec1) {
239 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 		/* disable parity error check in DEU (erroneous? test vect.) */
242 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
243 	} else {
244 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
246 	}
247 
248 	/* disable integrity check error interrupts (use writeback instead) */
249 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 		          TALITOS_MDEUICR_LO_ICE);
252 
253 	return 0;
254 }
255 
256 /**
257  * talitos_submit - submits a descriptor to the device for processing
258  * @dev:	the SEC device to be used
259  * @ch:		the SEC device channel to be used
260  * @desc:	the descriptor to be processed by the device
261  * @callback:	whom to call when processing is complete
262  * @context:	a handle for use by caller (optional)
263  *
264  * desc must contain valid dma-mapped (bus physical) address pointers.
265  * callback must check err and feedback in descriptor header
266  * for device processing status.
267  */
268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 			  void (*callback)(struct device *dev,
270 					   struct talitos_desc *desc,
271 					   void *context, int error),
272 			  void *context)
273 {
274 	struct talitos_private *priv = dev_get_drvdata(dev);
275 	struct talitos_request *request;
276 	unsigned long flags;
277 	int head;
278 	bool is_sec1 = has_ftr_sec1(priv);
279 
280 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
281 
282 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 		/* h/w fifo is full */
284 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
285 		return -EAGAIN;
286 	}
287 
288 	head = priv->chan[ch].head;
289 	request = &priv->chan[ch].fifo[head];
290 
291 	/* map descriptor and save caller data */
292 	if (is_sec1) {
293 		desc->hdr1 = desc->hdr;
294 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
295 						   TALITOS_DESC_SIZE,
296 						   DMA_BIDIRECTIONAL);
297 	} else {
298 		request->dma_desc = dma_map_single(dev, desc,
299 						   TALITOS_DESC_SIZE,
300 						   DMA_BIDIRECTIONAL);
301 	}
302 	request->callback = callback;
303 	request->context = context;
304 
305 	/* increment fifo head */
306 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
307 
308 	smp_wmb();
309 	request->desc = desc;
310 
311 	/* GO! */
312 	wmb();
313 	out_be32(priv->chan[ch].reg + TALITOS_FF,
314 		 upper_32_bits(request->dma_desc));
315 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 		 lower_32_bits(request->dma_desc));
317 
318 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
319 
320 	return -EINPROGRESS;
321 }
322 
323 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
324 {
325 	struct talitos_edesc *edesc;
326 
327 	if (!is_sec1)
328 		return request->desc->hdr;
329 
330 	if (!request->desc->next_desc)
331 		return request->desc->hdr1;
332 
333 	edesc = container_of(request->desc, struct talitos_edesc, desc);
334 
335 	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
336 }
337 
338 /*
339  * process what was done, notify callback of error if not
340  */
341 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
342 {
343 	struct talitos_private *priv = dev_get_drvdata(dev);
344 	struct talitos_request *request, saved_req;
345 	unsigned long flags;
346 	int tail, status;
347 	bool is_sec1 = has_ftr_sec1(priv);
348 
349 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
350 
351 	tail = priv->chan[ch].tail;
352 	while (priv->chan[ch].fifo[tail].desc) {
353 		__be32 hdr;
354 
355 		request = &priv->chan[ch].fifo[tail];
356 
357 		/* descriptors with their done bits set don't get the error */
358 		rmb();
359 		hdr = get_request_hdr(request, is_sec1);
360 
361 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
362 			status = 0;
363 		else
364 			if (!error)
365 				break;
366 			else
367 				status = error;
368 
369 		dma_unmap_single(dev, request->dma_desc,
370 				 TALITOS_DESC_SIZE,
371 				 DMA_BIDIRECTIONAL);
372 
373 		/* copy entries so we can call callback outside lock */
374 		saved_req.desc = request->desc;
375 		saved_req.callback = request->callback;
376 		saved_req.context = request->context;
377 
378 		/* release request entry in fifo */
379 		smp_wmb();
380 		request->desc = NULL;
381 
382 		/* increment fifo tail */
383 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
384 
385 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
386 
387 		atomic_dec(&priv->chan[ch].submit_count);
388 
389 		saved_req.callback(dev, saved_req.desc, saved_req.context,
390 				   status);
391 		/* channel may resume processing in single desc error case */
392 		if (error && !reset_ch && status == error)
393 			return;
394 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
395 		tail = priv->chan[ch].tail;
396 	}
397 
398 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
399 }
400 
401 /*
402  * process completed requests for channels that have done status
403  */
404 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
405 static void talitos1_done_##name(unsigned long data)			\
406 {									\
407 	struct device *dev = (struct device *)data;			\
408 	struct talitos_private *priv = dev_get_drvdata(dev);		\
409 	unsigned long flags;						\
410 									\
411 	if (ch_done_mask & 0x10000000)					\
412 		flush_channel(dev, 0, 0, 0);			\
413 	if (ch_done_mask & 0x40000000)					\
414 		flush_channel(dev, 1, 0, 0);			\
415 	if (ch_done_mask & 0x00010000)					\
416 		flush_channel(dev, 2, 0, 0);			\
417 	if (ch_done_mask & 0x00040000)					\
418 		flush_channel(dev, 3, 0, 0);			\
419 									\
420 	/* At this point, all completed channels have been processed */	\
421 	/* Unmask done interrupts for channels completed later on. */	\
422 	spin_lock_irqsave(&priv->reg_lock, flags);			\
423 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
424 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
425 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
426 }
427 
428 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
429 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
430 
431 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
432 static void talitos2_done_##name(unsigned long data)			\
433 {									\
434 	struct device *dev = (struct device *)data;			\
435 	struct talitos_private *priv = dev_get_drvdata(dev);		\
436 	unsigned long flags;						\
437 									\
438 	if (ch_done_mask & 1)						\
439 		flush_channel(dev, 0, 0, 0);				\
440 	if (ch_done_mask & (1 << 2))					\
441 		flush_channel(dev, 1, 0, 0);				\
442 	if (ch_done_mask & (1 << 4))					\
443 		flush_channel(dev, 2, 0, 0);				\
444 	if (ch_done_mask & (1 << 6))					\
445 		flush_channel(dev, 3, 0, 0);				\
446 									\
447 	/* At this point, all completed channels have been processed */	\
448 	/* Unmask done interrupts for channels completed later on. */	\
449 	spin_lock_irqsave(&priv->reg_lock, flags);			\
450 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
451 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
452 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
453 }
454 
455 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
456 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
457 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
458 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
459 
460 /*
461  * locate current (offending) descriptor
462  */
463 static u32 current_desc_hdr(struct device *dev, int ch)
464 {
465 	struct talitos_private *priv = dev_get_drvdata(dev);
466 	int tail, iter;
467 	dma_addr_t cur_desc;
468 
469 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
470 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
471 
472 	if (!cur_desc) {
473 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
474 		return 0;
475 	}
476 
477 	tail = priv->chan[ch].tail;
478 
479 	iter = tail;
480 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
481 	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
482 		iter = (iter + 1) & (priv->fifo_len - 1);
483 		if (iter == tail) {
484 			dev_err(dev, "couldn't locate current descriptor\n");
485 			return 0;
486 		}
487 	}
488 
489 	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
490 		struct talitos_edesc *edesc;
491 
492 		edesc = container_of(priv->chan[ch].fifo[iter].desc,
493 				     struct talitos_edesc, desc);
494 		return ((struct talitos_desc *)
495 			(edesc->buf + edesc->dma_len))->hdr;
496 	}
497 
498 	return priv->chan[ch].fifo[iter].desc->hdr;
499 }
500 
501 /*
502  * user diagnostics; report root cause of error based on execution unit status
503  */
504 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
505 {
506 	struct talitos_private *priv = dev_get_drvdata(dev);
507 	int i;
508 
509 	if (!desc_hdr)
510 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
511 
512 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
513 	case DESC_HDR_SEL0_AFEU:
514 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
515 			in_be32(priv->reg_afeu + TALITOS_EUISR),
516 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
517 		break;
518 	case DESC_HDR_SEL0_DEU:
519 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
520 			in_be32(priv->reg_deu + TALITOS_EUISR),
521 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
522 		break;
523 	case DESC_HDR_SEL0_MDEUA:
524 	case DESC_HDR_SEL0_MDEUB:
525 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
526 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
527 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
528 		break;
529 	case DESC_HDR_SEL0_RNG:
530 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
531 			in_be32(priv->reg_rngu + TALITOS_ISR),
532 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
533 		break;
534 	case DESC_HDR_SEL0_PKEU:
535 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
536 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
537 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
538 		break;
539 	case DESC_HDR_SEL0_AESU:
540 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
541 			in_be32(priv->reg_aesu + TALITOS_EUISR),
542 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
543 		break;
544 	case DESC_HDR_SEL0_CRCU:
545 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
546 			in_be32(priv->reg_crcu + TALITOS_EUISR),
547 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
548 		break;
549 	case DESC_HDR_SEL0_KEU:
550 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
551 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
552 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
553 		break;
554 	}
555 
556 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
557 	case DESC_HDR_SEL1_MDEUA:
558 	case DESC_HDR_SEL1_MDEUB:
559 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
560 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
561 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
562 		break;
563 	case DESC_HDR_SEL1_CRCU:
564 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
565 			in_be32(priv->reg_crcu + TALITOS_EUISR),
566 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
567 		break;
568 	}
569 
570 	for (i = 0; i < 8; i++)
571 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
572 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
573 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
574 }
575 
576 /*
577  * recover from error interrupts
578  */
579 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
580 {
581 	struct talitos_private *priv = dev_get_drvdata(dev);
582 	unsigned int timeout = TALITOS_TIMEOUT;
583 	int ch, error, reset_dev = 0;
584 	u32 v_lo;
585 	bool is_sec1 = has_ftr_sec1(priv);
586 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
587 
588 	for (ch = 0; ch < priv->num_channels; ch++) {
589 		/* skip channels without errors */
590 		if (is_sec1) {
591 			/* bits 29, 31, 17, 19 */
592 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
593 				continue;
594 		} else {
595 			if (!(isr & (1 << (ch * 2 + 1))))
596 				continue;
597 		}
598 
599 		error = -EINVAL;
600 
601 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
602 
603 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
604 			dev_err(dev, "double fetch fifo overflow error\n");
605 			error = -EAGAIN;
606 			reset_ch = 1;
607 		}
608 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
609 			/* h/w dropped descriptor */
610 			dev_err(dev, "single fetch fifo overflow error\n");
611 			error = -EAGAIN;
612 		}
613 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
614 			dev_err(dev, "master data transfer error\n");
615 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
616 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
617 					     : "s/g data length zero error\n");
618 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
619 			dev_err(dev, is_sec1 ? "parity error\n"
620 					     : "fetch pointer zero error\n");
621 		if (v_lo & TALITOS_CCPSR_LO_IDH)
622 			dev_err(dev, "illegal descriptor header error\n");
623 		if (v_lo & TALITOS_CCPSR_LO_IEU)
624 			dev_err(dev, is_sec1 ? "static assignment error\n"
625 					     : "invalid exec unit error\n");
626 		if (v_lo & TALITOS_CCPSR_LO_EU)
627 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
628 		if (!is_sec1) {
629 			if (v_lo & TALITOS_CCPSR_LO_GB)
630 				dev_err(dev, "gather boundary error\n");
631 			if (v_lo & TALITOS_CCPSR_LO_GRL)
632 				dev_err(dev, "gather return/length error\n");
633 			if (v_lo & TALITOS_CCPSR_LO_SB)
634 				dev_err(dev, "scatter boundary error\n");
635 			if (v_lo & TALITOS_CCPSR_LO_SRL)
636 				dev_err(dev, "scatter return/length error\n");
637 		}
638 
639 		flush_channel(dev, ch, error, reset_ch);
640 
641 		if (reset_ch) {
642 			reset_channel(dev, ch);
643 		} else {
644 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
645 				  TALITOS2_CCCR_CONT);
646 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
647 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
648 			       TALITOS2_CCCR_CONT) && --timeout)
649 				cpu_relax();
650 			if (timeout == 0) {
651 				dev_err(dev, "failed to restart channel %d\n",
652 					ch);
653 				reset_dev = 1;
654 			}
655 		}
656 	}
657 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
658 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
659 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
660 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
661 				isr, isr_lo);
662 		else
663 			dev_err(dev, "done overflow, internal time out, or "
664 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
665 
666 		/* purge request queues */
667 		for (ch = 0; ch < priv->num_channels; ch++)
668 			flush_channel(dev, ch, -EIO, 1);
669 
670 		/* reset and reinitialize the device */
671 		init_device(dev);
672 	}
673 }
674 
675 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
676 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
677 {									       \
678 	struct device *dev = data;					       \
679 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
680 	u32 isr, isr_lo;						       \
681 	unsigned long flags;						       \
682 									       \
683 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
684 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
685 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
686 	/* Acknowledge interrupt */					       \
687 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
688 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
689 									       \
690 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
691 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
692 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
693 	}								       \
694 	else {								       \
695 		if (likely(isr & ch_done_mask)) {			       \
696 			/* mask further done interrupts. */		       \
697 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
698 			/* done_task will unmask done interrupts at exit */    \
699 			tasklet_schedule(&priv->done_task[tlet]);	       \
700 		}							       \
701 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
702 	}								       \
703 									       \
704 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
705 								IRQ_NONE;      \
706 }
707 
708 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
709 
710 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
711 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
712 {									       \
713 	struct device *dev = data;					       \
714 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
715 	u32 isr, isr_lo;						       \
716 	unsigned long flags;						       \
717 									       \
718 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
719 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
720 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
721 	/* Acknowledge interrupt */					       \
722 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
723 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
724 									       \
725 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
726 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
727 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
728 	}								       \
729 	else {								       \
730 		if (likely(isr & ch_done_mask)) {			       \
731 			/* mask further done interrupts. */		       \
732 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
733 			/* done_task will unmask done interrupts at exit */    \
734 			tasklet_schedule(&priv->done_task[tlet]);	       \
735 		}							       \
736 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
737 	}								       \
738 									       \
739 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
740 								IRQ_NONE;      \
741 }
742 
743 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
744 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
745 		       0)
746 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
747 		       1)
748 
749 /*
750  * hwrng
751  */
752 static int talitos_rng_data_present(struct hwrng *rng, int wait)
753 {
754 	struct device *dev = (struct device *)rng->priv;
755 	struct talitos_private *priv = dev_get_drvdata(dev);
756 	u32 ofl;
757 	int i;
758 
759 	for (i = 0; i < 20; i++) {
760 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
761 		      TALITOS_RNGUSR_LO_OFL;
762 		if (ofl || !wait)
763 			break;
764 		udelay(10);
765 	}
766 
767 	return !!ofl;
768 }
769 
770 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
771 {
772 	struct device *dev = (struct device *)rng->priv;
773 	struct talitos_private *priv = dev_get_drvdata(dev);
774 
775 	/* rng fifo requires 64-bit accesses */
776 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
777 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
778 
779 	return sizeof(u32);
780 }
781 
782 static int talitos_rng_init(struct hwrng *rng)
783 {
784 	struct device *dev = (struct device *)rng->priv;
785 	struct talitos_private *priv = dev_get_drvdata(dev);
786 	unsigned int timeout = TALITOS_TIMEOUT;
787 
788 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
789 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
790 		 & TALITOS_RNGUSR_LO_RD)
791 	       && --timeout)
792 		cpu_relax();
793 	if (timeout == 0) {
794 		dev_err(dev, "failed to reset rng hw\n");
795 		return -ENODEV;
796 	}
797 
798 	/* start generating */
799 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
800 
801 	return 0;
802 }
803 
804 static int talitos_register_rng(struct device *dev)
805 {
806 	struct talitos_private *priv = dev_get_drvdata(dev);
807 	int err;
808 
809 	priv->rng.name		= dev_driver_string(dev),
810 	priv->rng.init		= talitos_rng_init,
811 	priv->rng.data_present	= talitos_rng_data_present,
812 	priv->rng.data_read	= talitos_rng_data_read,
813 	priv->rng.priv		= (unsigned long)dev;
814 
815 	err = hwrng_register(&priv->rng);
816 	if (!err)
817 		priv->rng_registered = true;
818 
819 	return err;
820 }
821 
822 static void talitos_unregister_rng(struct device *dev)
823 {
824 	struct talitos_private *priv = dev_get_drvdata(dev);
825 
826 	if (!priv->rng_registered)
827 		return;
828 
829 	hwrng_unregister(&priv->rng);
830 	priv->rng_registered = false;
831 }
832 
833 /*
834  * crypto alg
835  */
836 #define TALITOS_CRA_PRIORITY		3000
837 /*
838  * Defines a priority for doing AEAD with descriptors type
839  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
840  */
841 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
842 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
843 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
844 #else
845 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
846 #endif
847 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
848 
849 struct talitos_ctx {
850 	struct device *dev;
851 	int ch;
852 	__be32 desc_hdr_template;
853 	u8 key[TALITOS_MAX_KEY_SIZE];
854 	u8 iv[TALITOS_MAX_IV_LENGTH];
855 	dma_addr_t dma_key;
856 	unsigned int keylen;
857 	unsigned int enckeylen;
858 	unsigned int authkeylen;
859 };
860 
861 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
862 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
863 
864 struct talitos_ahash_req_ctx {
865 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
866 	unsigned int hw_context_size;
867 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
868 	int buf_idx;
869 	unsigned int swinit;
870 	unsigned int first;
871 	unsigned int last;
872 	unsigned int to_hash_later;
873 	unsigned int nbuf;
874 	struct scatterlist bufsl[2];
875 	struct scatterlist *psrc;
876 };
877 
878 struct talitos_export_state {
879 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
880 	u8 buf[HASH_MAX_BLOCK_SIZE];
881 	unsigned int swinit;
882 	unsigned int first;
883 	unsigned int last;
884 	unsigned int to_hash_later;
885 	unsigned int nbuf;
886 };
887 
888 static int aead_setkey(struct crypto_aead *authenc,
889 		       const u8 *key, unsigned int keylen)
890 {
891 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
892 	struct device *dev = ctx->dev;
893 	struct crypto_authenc_keys keys;
894 
895 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
896 		goto badkey;
897 
898 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
899 		goto badkey;
900 
901 	if (ctx->keylen)
902 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
903 
904 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
905 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
906 
907 	ctx->keylen = keys.authkeylen + keys.enckeylen;
908 	ctx->enckeylen = keys.enckeylen;
909 	ctx->authkeylen = keys.authkeylen;
910 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
911 				      DMA_TO_DEVICE);
912 
913 	memzero_explicit(&keys, sizeof(keys));
914 	return 0;
915 
916 badkey:
917 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
918 	memzero_explicit(&keys, sizeof(keys));
919 	return -EINVAL;
920 }
921 
922 static int aead_des3_setkey(struct crypto_aead *authenc,
923 			    const u8 *key, unsigned int keylen)
924 {
925 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 	struct device *dev = ctx->dev;
927 	struct crypto_authenc_keys keys;
928 	u32 flags;
929 	int err;
930 
931 	err = crypto_authenc_extractkeys(&keys, key, keylen);
932 	if (unlikely(err))
933 		goto badkey;
934 
935 	err = -EINVAL;
936 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
937 		goto badkey;
938 
939 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
940 		goto badkey;
941 
942 	flags = crypto_aead_get_flags(authenc);
943 	err = __des3_verify_key(&flags, keys.enckey);
944 	if (unlikely(err)) {
945 		crypto_aead_set_flags(authenc, flags);
946 		goto out;
947 	}
948 
949 	if (ctx->keylen)
950 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
951 
952 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
953 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
954 
955 	ctx->keylen = keys.authkeylen + keys.enckeylen;
956 	ctx->enckeylen = keys.enckeylen;
957 	ctx->authkeylen = keys.authkeylen;
958 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
959 				      DMA_TO_DEVICE);
960 
961 out:
962 	memzero_explicit(&keys, sizeof(keys));
963 	return err;
964 
965 badkey:
966 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
967 	goto out;
968 }
969 
970 static void talitos_sg_unmap(struct device *dev,
971 			     struct talitos_edesc *edesc,
972 			     struct scatterlist *src,
973 			     struct scatterlist *dst,
974 			     unsigned int len, unsigned int offset)
975 {
976 	struct talitos_private *priv = dev_get_drvdata(dev);
977 	bool is_sec1 = has_ftr_sec1(priv);
978 	unsigned int src_nents = edesc->src_nents ? : 1;
979 	unsigned int dst_nents = edesc->dst_nents ? : 1;
980 
981 	if (is_sec1 && dst && dst_nents > 1) {
982 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
983 					   len, DMA_FROM_DEVICE);
984 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
985 				     offset);
986 	}
987 	if (src != dst) {
988 		if (src_nents == 1 || !is_sec1)
989 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
990 
991 		if (dst && (dst_nents == 1 || !is_sec1))
992 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
993 	} else if (src_nents == 1 || !is_sec1) {
994 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
995 	}
996 }
997 
998 static void ipsec_esp_unmap(struct device *dev,
999 			    struct talitos_edesc *edesc,
1000 			    struct aead_request *areq, bool encrypt)
1001 {
1002 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1003 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1004 	unsigned int ivsize = crypto_aead_ivsize(aead);
1005 	unsigned int authsize = crypto_aead_authsize(aead);
1006 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1007 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1008 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1009 
1010 	if (is_ipsec_esp)
1011 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1012 					 DMA_FROM_DEVICE);
1013 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1014 
1015 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1016 			 cryptlen + authsize, areq->assoclen);
1017 
1018 	if (edesc->dma_len)
1019 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1020 				 DMA_BIDIRECTIONAL);
1021 
1022 	if (!is_ipsec_esp) {
1023 		unsigned int dst_nents = edesc->dst_nents ? : 1;
1024 
1025 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1026 				   areq->assoclen + cryptlen - ivsize);
1027 	}
1028 }
1029 
1030 /*
1031  * ipsec_esp descriptor callbacks
1032  */
1033 static void ipsec_esp_encrypt_done(struct device *dev,
1034 				   struct talitos_desc *desc, void *context,
1035 				   int err)
1036 {
1037 	struct aead_request *areq = context;
1038 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1039 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1040 	struct talitos_edesc *edesc;
1041 
1042 	edesc = container_of(desc, struct talitos_edesc, desc);
1043 
1044 	ipsec_esp_unmap(dev, edesc, areq, true);
1045 
1046 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1047 
1048 	kfree(edesc);
1049 
1050 	aead_request_complete(areq, err);
1051 }
1052 
1053 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1054 					  struct talitos_desc *desc,
1055 					  void *context, int err)
1056 {
1057 	struct aead_request *req = context;
1058 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1059 	unsigned int authsize = crypto_aead_authsize(authenc);
1060 	struct talitos_edesc *edesc;
1061 	char *oicv, *icv;
1062 
1063 	edesc = container_of(desc, struct talitos_edesc, desc);
1064 
1065 	ipsec_esp_unmap(dev, edesc, req, false);
1066 
1067 	if (!err) {
1068 		/* auth check */
1069 		oicv = edesc->buf + edesc->dma_len;
1070 		icv = oicv - authsize;
1071 
1072 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1073 	}
1074 
1075 	kfree(edesc);
1076 
1077 	aead_request_complete(req, err);
1078 }
1079 
1080 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1081 					  struct talitos_desc *desc,
1082 					  void *context, int err)
1083 {
1084 	struct aead_request *req = context;
1085 	struct talitos_edesc *edesc;
1086 
1087 	edesc = container_of(desc, struct talitos_edesc, desc);
1088 
1089 	ipsec_esp_unmap(dev, edesc, req, false);
1090 
1091 	/* check ICV auth status */
1092 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1093 		     DESC_HDR_LO_ICCR1_PASS))
1094 		err = -EBADMSG;
1095 
1096 	kfree(edesc);
1097 
1098 	aead_request_complete(req, err);
1099 }
1100 
1101 /*
1102  * convert scatterlist to SEC h/w link table format
1103  * stop at cryptlen bytes
1104  */
1105 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1106 				 unsigned int offset, int datalen, int elen,
1107 				 struct talitos_ptr *link_tbl_ptr)
1108 {
1109 	int n_sg = elen ? sg_count + 1 : sg_count;
1110 	int count = 0;
1111 	int cryptlen = datalen + elen;
1112 
1113 	while (cryptlen && sg && n_sg--) {
1114 		unsigned int len = sg_dma_len(sg);
1115 
1116 		if (offset >= len) {
1117 			offset -= len;
1118 			goto next;
1119 		}
1120 
1121 		len -= offset;
1122 
1123 		if (len > cryptlen)
1124 			len = cryptlen;
1125 
1126 		if (datalen > 0 && len > datalen) {
1127 			to_talitos_ptr(link_tbl_ptr + count,
1128 				       sg_dma_address(sg) + offset, datalen, 0);
1129 			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1130 			count++;
1131 			len -= datalen;
1132 			offset += datalen;
1133 		}
1134 		to_talitos_ptr(link_tbl_ptr + count,
1135 			       sg_dma_address(sg) + offset, len, 0);
1136 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1137 		count++;
1138 		cryptlen -= len;
1139 		datalen -= len;
1140 		offset = 0;
1141 
1142 next:
1143 		sg = sg_next(sg);
1144 	}
1145 
1146 	/* tag end of link table */
1147 	if (count > 0)
1148 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1149 				       DESC_PTR_LNKTBL_RET, 0);
1150 
1151 	return count;
1152 }
1153 
1154 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1155 			      unsigned int len, struct talitos_edesc *edesc,
1156 			      struct talitos_ptr *ptr, int sg_count,
1157 			      unsigned int offset, int tbl_off, int elen,
1158 			      bool force)
1159 {
1160 	struct talitos_private *priv = dev_get_drvdata(dev);
1161 	bool is_sec1 = has_ftr_sec1(priv);
1162 
1163 	if (!src) {
1164 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1165 		return 1;
1166 	}
1167 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1168 	if (sg_count == 1 && !force) {
1169 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1170 		return sg_count;
1171 	}
1172 	if (is_sec1) {
1173 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1174 		return sg_count;
1175 	}
1176 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1177 					 &edesc->link_tbl[tbl_off]);
1178 	if (sg_count == 1 && !force) {
1179 		/* Only one segment now, so no link tbl needed*/
1180 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1181 		return sg_count;
1182 	}
1183 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1184 			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1185 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1186 
1187 	return sg_count;
1188 }
1189 
1190 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1191 			  unsigned int len, struct talitos_edesc *edesc,
1192 			  struct talitos_ptr *ptr, int sg_count,
1193 			  unsigned int offset, int tbl_off)
1194 {
1195 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1196 				  tbl_off, 0, false);
1197 }
1198 
1199 /*
1200  * fill in and submit ipsec_esp descriptor
1201  */
1202 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1203 		     bool encrypt,
1204 		     void (*callback)(struct device *dev,
1205 				      struct talitos_desc *desc,
1206 				      void *context, int error))
1207 {
1208 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1209 	unsigned int authsize = crypto_aead_authsize(aead);
1210 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1211 	struct device *dev = ctx->dev;
1212 	struct talitos_desc *desc = &edesc->desc;
1213 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1214 	unsigned int ivsize = crypto_aead_ivsize(aead);
1215 	int tbl_off = 0;
1216 	int sg_count, ret;
1217 	int elen = 0;
1218 	bool sync_needed = false;
1219 	struct talitos_private *priv = dev_get_drvdata(dev);
1220 	bool is_sec1 = has_ftr_sec1(priv);
1221 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1222 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1223 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1224 	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1225 
1226 	/* hmac key */
1227 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1228 
1229 	sg_count = edesc->src_nents ?: 1;
1230 	if (is_sec1 && sg_count > 1)
1231 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1232 				  areq->assoclen + cryptlen);
1233 	else
1234 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1235 				      (areq->src == areq->dst) ?
1236 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1237 
1238 	/* hmac data */
1239 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1240 			     &desc->ptr[1], sg_count, 0, tbl_off);
1241 
1242 	if (ret > 1) {
1243 		tbl_off += ret;
1244 		sync_needed = true;
1245 	}
1246 
1247 	/* cipher iv */
1248 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1249 
1250 	/* cipher key */
1251 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1252 		       ctx->enckeylen, is_sec1);
1253 
1254 	/*
1255 	 * cipher in
1256 	 * map and adjust cipher len to aead request cryptlen.
1257 	 * extent is bytes of HMAC postpended to ciphertext,
1258 	 * typically 12 for ipsec
1259 	 */
1260 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1261 		elen = authsize;
1262 
1263 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1264 				 sg_count, areq->assoclen, tbl_off, elen,
1265 				 false);
1266 
1267 	if (ret > 1) {
1268 		tbl_off += ret;
1269 		sync_needed = true;
1270 	}
1271 
1272 	/* cipher out */
1273 	if (areq->src != areq->dst) {
1274 		sg_count = edesc->dst_nents ? : 1;
1275 		if (!is_sec1 || sg_count == 1)
1276 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1277 	}
1278 
1279 	if (is_ipsec_esp && encrypt)
1280 		elen = authsize;
1281 	else
1282 		elen = 0;
1283 	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1284 				 sg_count, areq->assoclen, tbl_off, elen,
1285 				 is_ipsec_esp && !encrypt);
1286 	tbl_off += ret;
1287 
1288 	if (!encrypt && is_ipsec_esp) {
1289 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1290 
1291 		/* Add an entry to the link table for ICV data */
1292 		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1293 		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1294 
1295 		/* icv data follows link tables */
1296 		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1297 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1298 		sync_needed = true;
1299 	} else if (!encrypt) {
1300 		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1301 		sync_needed = true;
1302 	} else if (!is_ipsec_esp) {
1303 		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1304 			       sg_count, areq->assoclen + cryptlen, tbl_off);
1305 	}
1306 
1307 	/* iv out */
1308 	if (is_ipsec_esp)
1309 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1310 				       DMA_FROM_DEVICE);
1311 
1312 	if (sync_needed)
1313 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1314 					   edesc->dma_len,
1315 					   DMA_BIDIRECTIONAL);
1316 
1317 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1318 	if (ret != -EINPROGRESS) {
1319 		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1320 		kfree(edesc);
1321 	}
1322 	return ret;
1323 }
1324 
1325 /*
1326  * allocate and map the extended descriptor
1327  */
1328 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1329 						 struct scatterlist *src,
1330 						 struct scatterlist *dst,
1331 						 u8 *iv,
1332 						 unsigned int assoclen,
1333 						 unsigned int cryptlen,
1334 						 unsigned int authsize,
1335 						 unsigned int ivsize,
1336 						 int icv_stashing,
1337 						 u32 cryptoflags,
1338 						 bool encrypt)
1339 {
1340 	struct talitos_edesc *edesc;
1341 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1342 	dma_addr_t iv_dma = 0;
1343 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1344 		      GFP_ATOMIC;
1345 	struct talitos_private *priv = dev_get_drvdata(dev);
1346 	bool is_sec1 = has_ftr_sec1(priv);
1347 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1348 
1349 	if (cryptlen + authsize > max_len) {
1350 		dev_err(dev, "length exceeds h/w max limit\n");
1351 		return ERR_PTR(-EINVAL);
1352 	}
1353 
1354 	if (!dst || dst == src) {
1355 		src_len = assoclen + cryptlen + authsize;
1356 		src_nents = sg_nents_for_len(src, src_len);
1357 		if (src_nents < 0) {
1358 			dev_err(dev, "Invalid number of src SG.\n");
1359 			return ERR_PTR(-EINVAL);
1360 		}
1361 		src_nents = (src_nents == 1) ? 0 : src_nents;
1362 		dst_nents = dst ? src_nents : 0;
1363 		dst_len = 0;
1364 	} else { /* dst && dst != src*/
1365 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1366 		src_nents = sg_nents_for_len(src, src_len);
1367 		if (src_nents < 0) {
1368 			dev_err(dev, "Invalid number of src SG.\n");
1369 			return ERR_PTR(-EINVAL);
1370 		}
1371 		src_nents = (src_nents == 1) ? 0 : src_nents;
1372 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1373 		dst_nents = sg_nents_for_len(dst, dst_len);
1374 		if (dst_nents < 0) {
1375 			dev_err(dev, "Invalid number of dst SG.\n");
1376 			return ERR_PTR(-EINVAL);
1377 		}
1378 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1379 	}
1380 
1381 	/*
1382 	 * allocate space for base edesc plus the link tables,
1383 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1384 	 * and space for two sets of ICVs (stashed and generated)
1385 	 */
1386 	alloc_len = sizeof(struct talitos_edesc);
1387 	if (src_nents || dst_nents || !encrypt) {
1388 		if (is_sec1)
1389 			dma_len = (src_nents ? src_len : 0) +
1390 				  (dst_nents ? dst_len : 0) + authsize;
1391 		else
1392 			dma_len = (src_nents + dst_nents + 2) *
1393 				  sizeof(struct talitos_ptr) + authsize;
1394 		alloc_len += dma_len;
1395 	} else {
1396 		dma_len = 0;
1397 	}
1398 	alloc_len += icv_stashing ? authsize : 0;
1399 
1400 	/* if its a ahash, add space for a second desc next to the first one */
1401 	if (is_sec1 && !dst)
1402 		alloc_len += sizeof(struct talitos_desc);
1403 	alloc_len += ivsize;
1404 
1405 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1406 	if (!edesc)
1407 		return ERR_PTR(-ENOMEM);
1408 	if (ivsize) {
1409 		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1410 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1411 	}
1412 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1413 
1414 	edesc->src_nents = src_nents;
1415 	edesc->dst_nents = dst_nents;
1416 	edesc->iv_dma = iv_dma;
1417 	edesc->dma_len = dma_len;
1418 	if (dma_len)
1419 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1420 						     edesc->dma_len,
1421 						     DMA_BIDIRECTIONAL);
1422 
1423 	return edesc;
1424 }
1425 
1426 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1427 					      int icv_stashing, bool encrypt)
1428 {
1429 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1430 	unsigned int authsize = crypto_aead_authsize(authenc);
1431 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1432 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1433 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1434 
1435 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1436 				   iv, areq->assoclen, cryptlen,
1437 				   authsize, ivsize, icv_stashing,
1438 				   areq->base.flags, encrypt);
1439 }
1440 
1441 static int aead_encrypt(struct aead_request *req)
1442 {
1443 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1444 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1445 	struct talitos_edesc *edesc;
1446 
1447 	/* allocate extended descriptor */
1448 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1449 	if (IS_ERR(edesc))
1450 		return PTR_ERR(edesc);
1451 
1452 	/* set encrypt */
1453 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1454 
1455 	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1456 }
1457 
1458 static int aead_decrypt(struct aead_request *req)
1459 {
1460 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1461 	unsigned int authsize = crypto_aead_authsize(authenc);
1462 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1463 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1464 	struct talitos_edesc *edesc;
1465 	void *icvdata;
1466 
1467 	/* allocate extended descriptor */
1468 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1469 	if (IS_ERR(edesc))
1470 		return PTR_ERR(edesc);
1471 
1472 	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1473 	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1474 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1475 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1476 
1477 		/* decrypt and check the ICV */
1478 		edesc->desc.hdr = ctx->desc_hdr_template |
1479 				  DESC_HDR_DIR_INBOUND |
1480 				  DESC_HDR_MODE1_MDEU_CICV;
1481 
1482 		/* reset integrity check result bits */
1483 
1484 		return ipsec_esp(edesc, req, false,
1485 				 ipsec_esp_decrypt_hwauth_done);
1486 	}
1487 
1488 	/* Have to check the ICV with software */
1489 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1490 
1491 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1492 	icvdata = edesc->buf + edesc->dma_len;
1493 
1494 	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1495 			   req->assoclen + req->cryptlen - authsize);
1496 
1497 	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1498 }
1499 
1500 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1501 			     const u8 *key, unsigned int keylen)
1502 {
1503 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1504 	struct device *dev = ctx->dev;
1505 
1506 	if (ctx->keylen)
1507 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1508 
1509 	memcpy(&ctx->key, key, keylen);
1510 	ctx->keylen = keylen;
1511 
1512 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1513 
1514 	return 0;
1515 }
1516 
1517 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1518 				 const u8 *key, unsigned int keylen)
1519 {
1520 	u32 tmp[DES_EXPKEY_WORDS];
1521 
1522 	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1523 		     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1524 	    !des_ekey(tmp, key)) {
1525 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1526 		return -EINVAL;
1527 	}
1528 
1529 	return ablkcipher_setkey(cipher, key, keylen);
1530 }
1531 
1532 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1533 				  const u8 *key, unsigned int keylen)
1534 {
1535 	u32 flags;
1536 	int err;
1537 
1538 	flags = crypto_ablkcipher_get_flags(cipher);
1539 	err = __des3_verify_key(&flags, key);
1540 	if (unlikely(err)) {
1541 		crypto_ablkcipher_set_flags(cipher, flags);
1542 		return err;
1543 	}
1544 
1545 	return ablkcipher_setkey(cipher, key, keylen);
1546 }
1547 
1548 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1549 				  const u8 *key, unsigned int keylen)
1550 {
1551 	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1552 	    keylen == AES_KEYSIZE_256)
1553 		return ablkcipher_setkey(cipher, key, keylen);
1554 
1555 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1556 
1557 	return -EINVAL;
1558 }
1559 
1560 static void common_nonsnoop_unmap(struct device *dev,
1561 				  struct talitos_edesc *edesc,
1562 				  struct ablkcipher_request *areq)
1563 {
1564 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1565 
1566 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1567 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1568 
1569 	if (edesc->dma_len)
1570 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1571 				 DMA_BIDIRECTIONAL);
1572 }
1573 
1574 static void ablkcipher_done(struct device *dev,
1575 			    struct talitos_desc *desc, void *context,
1576 			    int err)
1577 {
1578 	struct ablkcipher_request *areq = context;
1579 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1580 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1581 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1582 	struct talitos_edesc *edesc;
1583 
1584 	edesc = container_of(desc, struct talitos_edesc, desc);
1585 
1586 	common_nonsnoop_unmap(dev, edesc, areq);
1587 	memcpy(areq->info, ctx->iv, ivsize);
1588 
1589 	kfree(edesc);
1590 
1591 	areq->base.complete(&areq->base, err);
1592 }
1593 
1594 static int common_nonsnoop(struct talitos_edesc *edesc,
1595 			   struct ablkcipher_request *areq,
1596 			   void (*callback) (struct device *dev,
1597 					     struct talitos_desc *desc,
1598 					     void *context, int error))
1599 {
1600 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1601 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1602 	struct device *dev = ctx->dev;
1603 	struct talitos_desc *desc = &edesc->desc;
1604 	unsigned int cryptlen = areq->nbytes;
1605 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1606 	int sg_count, ret;
1607 	bool sync_needed = false;
1608 	struct talitos_private *priv = dev_get_drvdata(dev);
1609 	bool is_sec1 = has_ftr_sec1(priv);
1610 
1611 	/* first DWORD empty */
1612 
1613 	/* cipher iv */
1614 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1615 
1616 	/* cipher key */
1617 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1618 
1619 	sg_count = edesc->src_nents ?: 1;
1620 	if (is_sec1 && sg_count > 1)
1621 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1622 				  cryptlen);
1623 	else
1624 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1625 				      (areq->src == areq->dst) ?
1626 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1627 	/*
1628 	 * cipher in
1629 	 */
1630 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1631 				  &desc->ptr[3], sg_count, 0, 0);
1632 	if (sg_count > 1)
1633 		sync_needed = true;
1634 
1635 	/* cipher out */
1636 	if (areq->src != areq->dst) {
1637 		sg_count = edesc->dst_nents ? : 1;
1638 		if (!is_sec1 || sg_count == 1)
1639 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1640 	}
1641 
1642 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1643 			     sg_count, 0, (edesc->src_nents + 1));
1644 	if (ret > 1)
1645 		sync_needed = true;
1646 
1647 	/* iv out */
1648 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1649 			       DMA_FROM_DEVICE);
1650 
1651 	/* last DWORD empty */
1652 
1653 	if (sync_needed)
1654 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1655 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1656 
1657 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1658 	if (ret != -EINPROGRESS) {
1659 		common_nonsnoop_unmap(dev, edesc, areq);
1660 		kfree(edesc);
1661 	}
1662 	return ret;
1663 }
1664 
1665 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1666 						    areq, bool encrypt)
1667 {
1668 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1669 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1670 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1671 
1672 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1673 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1674 				   areq->base.flags, encrypt);
1675 }
1676 
1677 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1678 {
1679 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1680 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1681 	struct talitos_edesc *edesc;
1682 	unsigned int blocksize =
1683 			crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1684 
1685 	if (!areq->nbytes)
1686 		return 0;
1687 
1688 	if (areq->nbytes % blocksize)
1689 		return -EINVAL;
1690 
1691 	/* allocate extended descriptor */
1692 	edesc = ablkcipher_edesc_alloc(areq, true);
1693 	if (IS_ERR(edesc))
1694 		return PTR_ERR(edesc);
1695 
1696 	/* set encrypt */
1697 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1698 
1699 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1700 }
1701 
1702 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1703 {
1704 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1705 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1706 	struct talitos_edesc *edesc;
1707 	unsigned int blocksize =
1708 			crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1709 
1710 	if (!areq->nbytes)
1711 		return 0;
1712 
1713 	if (areq->nbytes % blocksize)
1714 		return -EINVAL;
1715 
1716 	/* allocate extended descriptor */
1717 	edesc = ablkcipher_edesc_alloc(areq, false);
1718 	if (IS_ERR(edesc))
1719 		return PTR_ERR(edesc);
1720 
1721 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1722 
1723 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1724 }
1725 
1726 static void common_nonsnoop_hash_unmap(struct device *dev,
1727 				       struct talitos_edesc *edesc,
1728 				       struct ahash_request *areq)
1729 {
1730 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1731 	struct talitos_private *priv = dev_get_drvdata(dev);
1732 	bool is_sec1 = has_ftr_sec1(priv);
1733 	struct talitos_desc *desc = &edesc->desc;
1734 	struct talitos_desc *desc2 = (struct talitos_desc *)
1735 				     (edesc->buf + edesc->dma_len);
1736 
1737 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1738 	if (desc->next_desc &&
1739 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1740 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1741 
1742 	if (req_ctx->psrc)
1743 		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1744 
1745 	/* When using hashctx-in, must unmap it. */
1746 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1747 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1748 					 DMA_TO_DEVICE);
1749 	else if (desc->next_desc)
1750 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1751 					 DMA_TO_DEVICE);
1752 
1753 	if (is_sec1 && req_ctx->nbuf)
1754 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1755 					 DMA_TO_DEVICE);
1756 
1757 	if (edesc->dma_len)
1758 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1759 				 DMA_BIDIRECTIONAL);
1760 
1761 	if (edesc->desc.next_desc)
1762 		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1763 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1764 }
1765 
1766 static void ahash_done(struct device *dev,
1767 		       struct talitos_desc *desc, void *context,
1768 		       int err)
1769 {
1770 	struct ahash_request *areq = context;
1771 	struct talitos_edesc *edesc =
1772 		 container_of(desc, struct talitos_edesc, desc);
1773 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1774 
1775 	if (!req_ctx->last && req_ctx->to_hash_later) {
1776 		/* Position any partial block for next update/final/finup */
1777 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1778 		req_ctx->nbuf = req_ctx->to_hash_later;
1779 	}
1780 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1781 
1782 	kfree(edesc);
1783 
1784 	areq->base.complete(&areq->base, err);
1785 }
1786 
1787 /*
1788  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1789  * ourself and submit a padded block
1790  */
1791 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1792 			       struct talitos_edesc *edesc,
1793 			       struct talitos_ptr *ptr)
1794 {
1795 	static u8 padded_hash[64] = {
1796 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1797 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1798 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1799 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1800 	};
1801 
1802 	pr_err_once("Bug in SEC1, padding ourself\n");
1803 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1804 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1805 			       (char *)padded_hash, DMA_TO_DEVICE);
1806 }
1807 
1808 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1809 				struct ahash_request *areq, unsigned int length,
1810 				void (*callback) (struct device *dev,
1811 						  struct talitos_desc *desc,
1812 						  void *context, int error))
1813 {
1814 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1815 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1816 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1817 	struct device *dev = ctx->dev;
1818 	struct talitos_desc *desc = &edesc->desc;
1819 	int ret;
1820 	bool sync_needed = false;
1821 	struct talitos_private *priv = dev_get_drvdata(dev);
1822 	bool is_sec1 = has_ftr_sec1(priv);
1823 	int sg_count;
1824 
1825 	/* first DWORD empty */
1826 
1827 	/* hash context in */
1828 	if (!req_ctx->first || req_ctx->swinit) {
1829 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1830 					      req_ctx->hw_context_size,
1831 					      req_ctx->hw_context,
1832 					      DMA_TO_DEVICE);
1833 		req_ctx->swinit = 0;
1834 	}
1835 	/* Indicate next op is not the first. */
1836 	req_ctx->first = 0;
1837 
1838 	/* HMAC key */
1839 	if (ctx->keylen)
1840 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1841 			       is_sec1);
1842 
1843 	if (is_sec1 && req_ctx->nbuf)
1844 		length -= req_ctx->nbuf;
1845 
1846 	sg_count = edesc->src_nents ?: 1;
1847 	if (is_sec1 && sg_count > 1)
1848 		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1849 	else if (length)
1850 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1851 				      DMA_TO_DEVICE);
1852 	/*
1853 	 * data in
1854 	 */
1855 	if (is_sec1 && req_ctx->nbuf) {
1856 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1857 				       req_ctx->buf[req_ctx->buf_idx],
1858 				       DMA_TO_DEVICE);
1859 	} else {
1860 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1861 					  &desc->ptr[3], sg_count, 0, 0);
1862 		if (sg_count > 1)
1863 			sync_needed = true;
1864 	}
1865 
1866 	/* fifth DWORD empty */
1867 
1868 	/* hash/HMAC out -or- hash context out */
1869 	if (req_ctx->last)
1870 		map_single_talitos_ptr(dev, &desc->ptr[5],
1871 				       crypto_ahash_digestsize(tfm),
1872 				       areq->result, DMA_FROM_DEVICE);
1873 	else
1874 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1875 					      req_ctx->hw_context_size,
1876 					      req_ctx->hw_context,
1877 					      DMA_FROM_DEVICE);
1878 
1879 	/* last DWORD empty */
1880 
1881 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1882 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1883 
1884 	if (is_sec1 && req_ctx->nbuf && length) {
1885 		struct talitos_desc *desc2 = (struct talitos_desc *)
1886 					     (edesc->buf + edesc->dma_len);
1887 		dma_addr_t next_desc;
1888 
1889 		memset(desc2, 0, sizeof(*desc2));
1890 		desc2->hdr = desc->hdr;
1891 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1892 		desc2->hdr1 = desc2->hdr;
1893 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1894 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1895 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1896 
1897 		if (desc->ptr[1].ptr)
1898 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1899 					 is_sec1);
1900 		else
1901 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1902 						      req_ctx->hw_context_size,
1903 						      req_ctx->hw_context,
1904 						      DMA_TO_DEVICE);
1905 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1906 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1907 					  &desc2->ptr[3], sg_count, 0, 0);
1908 		if (sg_count > 1)
1909 			sync_needed = true;
1910 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1911 		if (req_ctx->last)
1912 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1913 						      req_ctx->hw_context_size,
1914 						      req_ctx->hw_context,
1915 						      DMA_FROM_DEVICE);
1916 
1917 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1918 					   DMA_BIDIRECTIONAL);
1919 		desc->next_desc = cpu_to_be32(next_desc);
1920 	}
1921 
1922 	if (sync_needed)
1923 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1924 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1925 
1926 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1927 	if (ret != -EINPROGRESS) {
1928 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1929 		kfree(edesc);
1930 	}
1931 	return ret;
1932 }
1933 
1934 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1935 					       unsigned int nbytes)
1936 {
1937 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1938 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1939 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1940 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1941 	bool is_sec1 = has_ftr_sec1(priv);
1942 
1943 	if (is_sec1)
1944 		nbytes -= req_ctx->nbuf;
1945 
1946 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1947 				   nbytes, 0, 0, 0, areq->base.flags, false);
1948 }
1949 
1950 static int ahash_init(struct ahash_request *areq)
1951 {
1952 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1953 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1954 	struct device *dev = ctx->dev;
1955 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1956 	unsigned int size;
1957 	dma_addr_t dma;
1958 
1959 	/* Initialize the context */
1960 	req_ctx->buf_idx = 0;
1961 	req_ctx->nbuf = 0;
1962 	req_ctx->first = 1; /* first indicates h/w must init its context */
1963 	req_ctx->swinit = 0; /* assume h/w init of context */
1964 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1965 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1966 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1967 	req_ctx->hw_context_size = size;
1968 
1969 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1970 			     DMA_TO_DEVICE);
1971 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1972 
1973 	return 0;
1974 }
1975 
1976 /*
1977  * on h/w without explicit sha224 support, we initialize h/w context
1978  * manually with sha224 constants, and tell it to run sha256.
1979  */
1980 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1981 {
1982 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1983 
1984 	req_ctx->hw_context[0] = SHA224_H0;
1985 	req_ctx->hw_context[1] = SHA224_H1;
1986 	req_ctx->hw_context[2] = SHA224_H2;
1987 	req_ctx->hw_context[3] = SHA224_H3;
1988 	req_ctx->hw_context[4] = SHA224_H4;
1989 	req_ctx->hw_context[5] = SHA224_H5;
1990 	req_ctx->hw_context[6] = SHA224_H6;
1991 	req_ctx->hw_context[7] = SHA224_H7;
1992 
1993 	/* init 64-bit count */
1994 	req_ctx->hw_context[8] = 0;
1995 	req_ctx->hw_context[9] = 0;
1996 
1997 	ahash_init(areq);
1998 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1999 
2000 	return 0;
2001 }
2002 
2003 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2004 {
2005 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2006 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2007 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2008 	struct talitos_edesc *edesc;
2009 	unsigned int blocksize =
2010 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2011 	unsigned int nbytes_to_hash;
2012 	unsigned int to_hash_later;
2013 	unsigned int nsg;
2014 	int nents;
2015 	struct device *dev = ctx->dev;
2016 	struct talitos_private *priv = dev_get_drvdata(dev);
2017 	bool is_sec1 = has_ftr_sec1(priv);
2018 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2019 
2020 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2021 		/* Buffer up to one whole block */
2022 		nents = sg_nents_for_len(areq->src, nbytes);
2023 		if (nents < 0) {
2024 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2025 			return nents;
2026 		}
2027 		sg_copy_to_buffer(areq->src, nents,
2028 				  ctx_buf + req_ctx->nbuf, nbytes);
2029 		req_ctx->nbuf += nbytes;
2030 		return 0;
2031 	}
2032 
2033 	/* At least (blocksize + 1) bytes are available to hash */
2034 	nbytes_to_hash = nbytes + req_ctx->nbuf;
2035 	to_hash_later = nbytes_to_hash & (blocksize - 1);
2036 
2037 	if (req_ctx->last)
2038 		to_hash_later = 0;
2039 	else if (to_hash_later)
2040 		/* There is a partial block. Hash the full block(s) now */
2041 		nbytes_to_hash -= to_hash_later;
2042 	else {
2043 		/* Keep one block buffered */
2044 		nbytes_to_hash -= blocksize;
2045 		to_hash_later = blocksize;
2046 	}
2047 
2048 	/* Chain in any previously buffered data */
2049 	if (!is_sec1 && req_ctx->nbuf) {
2050 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2051 		sg_init_table(req_ctx->bufsl, nsg);
2052 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2053 		if (nsg > 1)
2054 			sg_chain(req_ctx->bufsl, 2, areq->src);
2055 		req_ctx->psrc = req_ctx->bufsl;
2056 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2057 		int offset;
2058 
2059 		if (nbytes_to_hash > blocksize)
2060 			offset = blocksize - req_ctx->nbuf;
2061 		else
2062 			offset = nbytes_to_hash - req_ctx->nbuf;
2063 		nents = sg_nents_for_len(areq->src, offset);
2064 		if (nents < 0) {
2065 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2066 			return nents;
2067 		}
2068 		sg_copy_to_buffer(areq->src, nents,
2069 				  ctx_buf + req_ctx->nbuf, offset);
2070 		req_ctx->nbuf += offset;
2071 		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2072 						 offset);
2073 	} else
2074 		req_ctx->psrc = areq->src;
2075 
2076 	if (to_hash_later) {
2077 		nents = sg_nents_for_len(areq->src, nbytes);
2078 		if (nents < 0) {
2079 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2080 			return nents;
2081 		}
2082 		sg_pcopy_to_buffer(areq->src, nents,
2083 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2084 				      to_hash_later,
2085 				      nbytes - to_hash_later);
2086 	}
2087 	req_ctx->to_hash_later = to_hash_later;
2088 
2089 	/* Allocate extended descriptor */
2090 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2091 	if (IS_ERR(edesc))
2092 		return PTR_ERR(edesc);
2093 
2094 	edesc->desc.hdr = ctx->desc_hdr_template;
2095 
2096 	/* On last one, request SEC to pad; otherwise continue */
2097 	if (req_ctx->last)
2098 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2099 	else
2100 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2101 
2102 	/* request SEC to INIT hash. */
2103 	if (req_ctx->first && !req_ctx->swinit)
2104 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2105 
2106 	/* When the tfm context has a keylen, it's an HMAC.
2107 	 * A first or last (ie. not middle) descriptor must request HMAC.
2108 	 */
2109 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2110 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2111 
2112 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2113 }
2114 
2115 static int ahash_update(struct ahash_request *areq)
2116 {
2117 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2118 
2119 	req_ctx->last = 0;
2120 
2121 	return ahash_process_req(areq, areq->nbytes);
2122 }
2123 
2124 static int ahash_final(struct ahash_request *areq)
2125 {
2126 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2127 
2128 	req_ctx->last = 1;
2129 
2130 	return ahash_process_req(areq, 0);
2131 }
2132 
2133 static int ahash_finup(struct ahash_request *areq)
2134 {
2135 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2136 
2137 	req_ctx->last = 1;
2138 
2139 	return ahash_process_req(areq, areq->nbytes);
2140 }
2141 
2142 static int ahash_digest(struct ahash_request *areq)
2143 {
2144 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2145 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2146 
2147 	ahash->init(areq);
2148 	req_ctx->last = 1;
2149 
2150 	return ahash_process_req(areq, areq->nbytes);
2151 }
2152 
2153 static int ahash_export(struct ahash_request *areq, void *out)
2154 {
2155 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2156 	struct talitos_export_state *export = out;
2157 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2158 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2159 	struct device *dev = ctx->dev;
2160 	dma_addr_t dma;
2161 
2162 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2163 			     DMA_FROM_DEVICE);
2164 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2165 
2166 	memcpy(export->hw_context, req_ctx->hw_context,
2167 	       req_ctx->hw_context_size);
2168 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2169 	export->swinit = req_ctx->swinit;
2170 	export->first = req_ctx->first;
2171 	export->last = req_ctx->last;
2172 	export->to_hash_later = req_ctx->to_hash_later;
2173 	export->nbuf = req_ctx->nbuf;
2174 
2175 	return 0;
2176 }
2177 
2178 static int ahash_import(struct ahash_request *areq, const void *in)
2179 {
2180 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2181 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2182 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2183 	struct device *dev = ctx->dev;
2184 	const struct talitos_export_state *export = in;
2185 	unsigned int size;
2186 	dma_addr_t dma;
2187 
2188 	memset(req_ctx, 0, sizeof(*req_ctx));
2189 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2190 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2191 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2192 	req_ctx->hw_context_size = size;
2193 	memcpy(req_ctx->hw_context, export->hw_context, size);
2194 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2195 	req_ctx->swinit = export->swinit;
2196 	req_ctx->first = export->first;
2197 	req_ctx->last = export->last;
2198 	req_ctx->to_hash_later = export->to_hash_later;
2199 	req_ctx->nbuf = export->nbuf;
2200 
2201 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2202 			     DMA_TO_DEVICE);
2203 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2204 
2205 	return 0;
2206 }
2207 
2208 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2209 		   u8 *hash)
2210 {
2211 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2212 
2213 	struct scatterlist sg[1];
2214 	struct ahash_request *req;
2215 	struct crypto_wait wait;
2216 	int ret;
2217 
2218 	crypto_init_wait(&wait);
2219 
2220 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2221 	if (!req)
2222 		return -ENOMEM;
2223 
2224 	/* Keep tfm keylen == 0 during hash of the long key */
2225 	ctx->keylen = 0;
2226 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2227 				   crypto_req_done, &wait);
2228 
2229 	sg_init_one(&sg[0], key, keylen);
2230 
2231 	ahash_request_set_crypt(req, sg, hash, keylen);
2232 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2233 
2234 	ahash_request_free(req);
2235 
2236 	return ret;
2237 }
2238 
2239 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2240 			unsigned int keylen)
2241 {
2242 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2243 	struct device *dev = ctx->dev;
2244 	unsigned int blocksize =
2245 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2246 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2247 	unsigned int keysize = keylen;
2248 	u8 hash[SHA512_DIGEST_SIZE];
2249 	int ret;
2250 
2251 	if (keylen <= blocksize)
2252 		memcpy(ctx->key, key, keysize);
2253 	else {
2254 		/* Must get the hash of the long key */
2255 		ret = keyhash(tfm, key, keylen, hash);
2256 
2257 		if (ret) {
2258 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2259 			return -EINVAL;
2260 		}
2261 
2262 		keysize = digestsize;
2263 		memcpy(ctx->key, hash, digestsize);
2264 	}
2265 
2266 	if (ctx->keylen)
2267 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2268 
2269 	ctx->keylen = keysize;
2270 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2271 
2272 	return 0;
2273 }
2274 
2275 
2276 struct talitos_alg_template {
2277 	u32 type;
2278 	u32 priority;
2279 	union {
2280 		struct crypto_alg crypto;
2281 		struct ahash_alg hash;
2282 		struct aead_alg aead;
2283 	} alg;
2284 	__be32 desc_hdr_template;
2285 };
2286 
2287 static struct talitos_alg_template driver_algs[] = {
2288 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2289 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2290 		.alg.aead = {
2291 			.base = {
2292 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2293 				.cra_driver_name = "authenc-hmac-sha1-"
2294 						   "cbc-aes-talitos",
2295 				.cra_blocksize = AES_BLOCK_SIZE,
2296 				.cra_flags = CRYPTO_ALG_ASYNC,
2297 			},
2298 			.ivsize = AES_BLOCK_SIZE,
2299 			.maxauthsize = SHA1_DIGEST_SIZE,
2300 		},
2301 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2302 			             DESC_HDR_SEL0_AESU |
2303 		                     DESC_HDR_MODE0_AESU_CBC |
2304 		                     DESC_HDR_SEL1_MDEUA |
2305 		                     DESC_HDR_MODE1_MDEU_INIT |
2306 		                     DESC_HDR_MODE1_MDEU_PAD |
2307 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2308 	},
2309 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2310 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2311 		.alg.aead = {
2312 			.base = {
2313 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2314 				.cra_driver_name = "authenc-hmac-sha1-"
2315 						   "cbc-aes-talitos-hsna",
2316 				.cra_blocksize = AES_BLOCK_SIZE,
2317 				.cra_flags = CRYPTO_ALG_ASYNC,
2318 			},
2319 			.ivsize = AES_BLOCK_SIZE,
2320 			.maxauthsize = SHA1_DIGEST_SIZE,
2321 		},
2322 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2323 				     DESC_HDR_SEL0_AESU |
2324 				     DESC_HDR_MODE0_AESU_CBC |
2325 				     DESC_HDR_SEL1_MDEUA |
2326 				     DESC_HDR_MODE1_MDEU_INIT |
2327 				     DESC_HDR_MODE1_MDEU_PAD |
2328 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2329 	},
2330 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2331 		.alg.aead = {
2332 			.base = {
2333 				.cra_name = "authenc(hmac(sha1),"
2334 					    "cbc(des3_ede))",
2335 				.cra_driver_name = "authenc-hmac-sha1-"
2336 						   "cbc-3des-talitos",
2337 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2338 				.cra_flags = CRYPTO_ALG_ASYNC,
2339 			},
2340 			.ivsize = DES3_EDE_BLOCK_SIZE,
2341 			.maxauthsize = SHA1_DIGEST_SIZE,
2342 			.setkey = aead_des3_setkey,
2343 		},
2344 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2345 			             DESC_HDR_SEL0_DEU |
2346 		                     DESC_HDR_MODE0_DEU_CBC |
2347 		                     DESC_HDR_MODE0_DEU_3DES |
2348 		                     DESC_HDR_SEL1_MDEUA |
2349 		                     DESC_HDR_MODE1_MDEU_INIT |
2350 		                     DESC_HDR_MODE1_MDEU_PAD |
2351 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2352 	},
2353 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2354 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2355 		.alg.aead = {
2356 			.base = {
2357 				.cra_name = "authenc(hmac(sha1),"
2358 					    "cbc(des3_ede))",
2359 				.cra_driver_name = "authenc-hmac-sha1-"
2360 						   "cbc-3des-talitos-hsna",
2361 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2362 				.cra_flags = CRYPTO_ALG_ASYNC,
2363 			},
2364 			.ivsize = DES3_EDE_BLOCK_SIZE,
2365 			.maxauthsize = SHA1_DIGEST_SIZE,
2366 			.setkey = aead_des3_setkey,
2367 		},
2368 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2369 				     DESC_HDR_SEL0_DEU |
2370 				     DESC_HDR_MODE0_DEU_CBC |
2371 				     DESC_HDR_MODE0_DEU_3DES |
2372 				     DESC_HDR_SEL1_MDEUA |
2373 				     DESC_HDR_MODE1_MDEU_INIT |
2374 				     DESC_HDR_MODE1_MDEU_PAD |
2375 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2376 	},
2377 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2378 		.alg.aead = {
2379 			.base = {
2380 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2381 				.cra_driver_name = "authenc-hmac-sha224-"
2382 						   "cbc-aes-talitos",
2383 				.cra_blocksize = AES_BLOCK_SIZE,
2384 				.cra_flags = CRYPTO_ALG_ASYNC,
2385 			},
2386 			.ivsize = AES_BLOCK_SIZE,
2387 			.maxauthsize = SHA224_DIGEST_SIZE,
2388 		},
2389 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2390 				     DESC_HDR_SEL0_AESU |
2391 				     DESC_HDR_MODE0_AESU_CBC |
2392 				     DESC_HDR_SEL1_MDEUA |
2393 				     DESC_HDR_MODE1_MDEU_INIT |
2394 				     DESC_HDR_MODE1_MDEU_PAD |
2395 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2396 	},
2397 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2398 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2399 		.alg.aead = {
2400 			.base = {
2401 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2402 				.cra_driver_name = "authenc-hmac-sha224-"
2403 						   "cbc-aes-talitos-hsna",
2404 				.cra_blocksize = AES_BLOCK_SIZE,
2405 				.cra_flags = CRYPTO_ALG_ASYNC,
2406 			},
2407 			.ivsize = AES_BLOCK_SIZE,
2408 			.maxauthsize = SHA224_DIGEST_SIZE,
2409 		},
2410 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2411 				     DESC_HDR_SEL0_AESU |
2412 				     DESC_HDR_MODE0_AESU_CBC |
2413 				     DESC_HDR_SEL1_MDEUA |
2414 				     DESC_HDR_MODE1_MDEU_INIT |
2415 				     DESC_HDR_MODE1_MDEU_PAD |
2416 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2417 	},
2418 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2419 		.alg.aead = {
2420 			.base = {
2421 				.cra_name = "authenc(hmac(sha224),"
2422 					    "cbc(des3_ede))",
2423 				.cra_driver_name = "authenc-hmac-sha224-"
2424 						   "cbc-3des-talitos",
2425 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2426 				.cra_flags = CRYPTO_ALG_ASYNC,
2427 			},
2428 			.ivsize = DES3_EDE_BLOCK_SIZE,
2429 			.maxauthsize = SHA224_DIGEST_SIZE,
2430 			.setkey = aead_des3_setkey,
2431 		},
2432 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2433 			             DESC_HDR_SEL0_DEU |
2434 		                     DESC_HDR_MODE0_DEU_CBC |
2435 		                     DESC_HDR_MODE0_DEU_3DES |
2436 		                     DESC_HDR_SEL1_MDEUA |
2437 		                     DESC_HDR_MODE1_MDEU_INIT |
2438 		                     DESC_HDR_MODE1_MDEU_PAD |
2439 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2440 	},
2441 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2442 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2443 		.alg.aead = {
2444 			.base = {
2445 				.cra_name = "authenc(hmac(sha224),"
2446 					    "cbc(des3_ede))",
2447 				.cra_driver_name = "authenc-hmac-sha224-"
2448 						   "cbc-3des-talitos-hsna",
2449 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2450 				.cra_flags = CRYPTO_ALG_ASYNC,
2451 			},
2452 			.ivsize = DES3_EDE_BLOCK_SIZE,
2453 			.maxauthsize = SHA224_DIGEST_SIZE,
2454 			.setkey = aead_des3_setkey,
2455 		},
2456 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2457 				     DESC_HDR_SEL0_DEU |
2458 				     DESC_HDR_MODE0_DEU_CBC |
2459 				     DESC_HDR_MODE0_DEU_3DES |
2460 				     DESC_HDR_SEL1_MDEUA |
2461 				     DESC_HDR_MODE1_MDEU_INIT |
2462 				     DESC_HDR_MODE1_MDEU_PAD |
2463 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2464 	},
2465 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2466 		.alg.aead = {
2467 			.base = {
2468 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2469 				.cra_driver_name = "authenc-hmac-sha256-"
2470 						   "cbc-aes-talitos",
2471 				.cra_blocksize = AES_BLOCK_SIZE,
2472 				.cra_flags = CRYPTO_ALG_ASYNC,
2473 			},
2474 			.ivsize = AES_BLOCK_SIZE,
2475 			.maxauthsize = SHA256_DIGEST_SIZE,
2476 		},
2477 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2478 			             DESC_HDR_SEL0_AESU |
2479 		                     DESC_HDR_MODE0_AESU_CBC |
2480 		                     DESC_HDR_SEL1_MDEUA |
2481 		                     DESC_HDR_MODE1_MDEU_INIT |
2482 		                     DESC_HDR_MODE1_MDEU_PAD |
2483 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2484 	},
2485 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2486 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2487 		.alg.aead = {
2488 			.base = {
2489 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2490 				.cra_driver_name = "authenc-hmac-sha256-"
2491 						   "cbc-aes-talitos-hsna",
2492 				.cra_blocksize = AES_BLOCK_SIZE,
2493 				.cra_flags = CRYPTO_ALG_ASYNC,
2494 			},
2495 			.ivsize = AES_BLOCK_SIZE,
2496 			.maxauthsize = SHA256_DIGEST_SIZE,
2497 		},
2498 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2499 				     DESC_HDR_SEL0_AESU |
2500 				     DESC_HDR_MODE0_AESU_CBC |
2501 				     DESC_HDR_SEL1_MDEUA |
2502 				     DESC_HDR_MODE1_MDEU_INIT |
2503 				     DESC_HDR_MODE1_MDEU_PAD |
2504 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2505 	},
2506 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2507 		.alg.aead = {
2508 			.base = {
2509 				.cra_name = "authenc(hmac(sha256),"
2510 					    "cbc(des3_ede))",
2511 				.cra_driver_name = "authenc-hmac-sha256-"
2512 						   "cbc-3des-talitos",
2513 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2514 				.cra_flags = CRYPTO_ALG_ASYNC,
2515 			},
2516 			.ivsize = DES3_EDE_BLOCK_SIZE,
2517 			.maxauthsize = SHA256_DIGEST_SIZE,
2518 			.setkey = aead_des3_setkey,
2519 		},
2520 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2521 			             DESC_HDR_SEL0_DEU |
2522 		                     DESC_HDR_MODE0_DEU_CBC |
2523 		                     DESC_HDR_MODE0_DEU_3DES |
2524 		                     DESC_HDR_SEL1_MDEUA |
2525 		                     DESC_HDR_MODE1_MDEU_INIT |
2526 		                     DESC_HDR_MODE1_MDEU_PAD |
2527 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2528 	},
2529 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2530 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2531 		.alg.aead = {
2532 			.base = {
2533 				.cra_name = "authenc(hmac(sha256),"
2534 					    "cbc(des3_ede))",
2535 				.cra_driver_name = "authenc-hmac-sha256-"
2536 						   "cbc-3des-talitos-hsna",
2537 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2538 				.cra_flags = CRYPTO_ALG_ASYNC,
2539 			},
2540 			.ivsize = DES3_EDE_BLOCK_SIZE,
2541 			.maxauthsize = SHA256_DIGEST_SIZE,
2542 			.setkey = aead_des3_setkey,
2543 		},
2544 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2545 				     DESC_HDR_SEL0_DEU |
2546 				     DESC_HDR_MODE0_DEU_CBC |
2547 				     DESC_HDR_MODE0_DEU_3DES |
2548 				     DESC_HDR_SEL1_MDEUA |
2549 				     DESC_HDR_MODE1_MDEU_INIT |
2550 				     DESC_HDR_MODE1_MDEU_PAD |
2551 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2552 	},
2553 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2554 		.alg.aead = {
2555 			.base = {
2556 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2557 				.cra_driver_name = "authenc-hmac-sha384-"
2558 						   "cbc-aes-talitos",
2559 				.cra_blocksize = AES_BLOCK_SIZE,
2560 				.cra_flags = CRYPTO_ALG_ASYNC,
2561 			},
2562 			.ivsize = AES_BLOCK_SIZE,
2563 			.maxauthsize = SHA384_DIGEST_SIZE,
2564 		},
2565 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2566 			             DESC_HDR_SEL0_AESU |
2567 		                     DESC_HDR_MODE0_AESU_CBC |
2568 		                     DESC_HDR_SEL1_MDEUB |
2569 		                     DESC_HDR_MODE1_MDEU_INIT |
2570 		                     DESC_HDR_MODE1_MDEU_PAD |
2571 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2572 	},
2573 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2574 		.alg.aead = {
2575 			.base = {
2576 				.cra_name = "authenc(hmac(sha384),"
2577 					    "cbc(des3_ede))",
2578 				.cra_driver_name = "authenc-hmac-sha384-"
2579 						   "cbc-3des-talitos",
2580 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2581 				.cra_flags = CRYPTO_ALG_ASYNC,
2582 			},
2583 			.ivsize = DES3_EDE_BLOCK_SIZE,
2584 			.maxauthsize = SHA384_DIGEST_SIZE,
2585 			.setkey = aead_des3_setkey,
2586 		},
2587 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2588 			             DESC_HDR_SEL0_DEU |
2589 		                     DESC_HDR_MODE0_DEU_CBC |
2590 		                     DESC_HDR_MODE0_DEU_3DES |
2591 		                     DESC_HDR_SEL1_MDEUB |
2592 		                     DESC_HDR_MODE1_MDEU_INIT |
2593 		                     DESC_HDR_MODE1_MDEU_PAD |
2594 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2595 	},
2596 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2597 		.alg.aead = {
2598 			.base = {
2599 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2600 				.cra_driver_name = "authenc-hmac-sha512-"
2601 						   "cbc-aes-talitos",
2602 				.cra_blocksize = AES_BLOCK_SIZE,
2603 				.cra_flags = CRYPTO_ALG_ASYNC,
2604 			},
2605 			.ivsize = AES_BLOCK_SIZE,
2606 			.maxauthsize = SHA512_DIGEST_SIZE,
2607 		},
2608 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2609 			             DESC_HDR_SEL0_AESU |
2610 		                     DESC_HDR_MODE0_AESU_CBC |
2611 		                     DESC_HDR_SEL1_MDEUB |
2612 		                     DESC_HDR_MODE1_MDEU_INIT |
2613 		                     DESC_HDR_MODE1_MDEU_PAD |
2614 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2615 	},
2616 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2617 		.alg.aead = {
2618 			.base = {
2619 				.cra_name = "authenc(hmac(sha512),"
2620 					    "cbc(des3_ede))",
2621 				.cra_driver_name = "authenc-hmac-sha512-"
2622 						   "cbc-3des-talitos",
2623 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2624 				.cra_flags = CRYPTO_ALG_ASYNC,
2625 			},
2626 			.ivsize = DES3_EDE_BLOCK_SIZE,
2627 			.maxauthsize = SHA512_DIGEST_SIZE,
2628 			.setkey = aead_des3_setkey,
2629 		},
2630 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2631 			             DESC_HDR_SEL0_DEU |
2632 		                     DESC_HDR_MODE0_DEU_CBC |
2633 		                     DESC_HDR_MODE0_DEU_3DES |
2634 		                     DESC_HDR_SEL1_MDEUB |
2635 		                     DESC_HDR_MODE1_MDEU_INIT |
2636 		                     DESC_HDR_MODE1_MDEU_PAD |
2637 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2638 	},
2639 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2640 		.alg.aead = {
2641 			.base = {
2642 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2643 				.cra_driver_name = "authenc-hmac-md5-"
2644 						   "cbc-aes-talitos",
2645 				.cra_blocksize = AES_BLOCK_SIZE,
2646 				.cra_flags = CRYPTO_ALG_ASYNC,
2647 			},
2648 			.ivsize = AES_BLOCK_SIZE,
2649 			.maxauthsize = MD5_DIGEST_SIZE,
2650 		},
2651 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2652 			             DESC_HDR_SEL0_AESU |
2653 		                     DESC_HDR_MODE0_AESU_CBC |
2654 		                     DESC_HDR_SEL1_MDEUA |
2655 		                     DESC_HDR_MODE1_MDEU_INIT |
2656 		                     DESC_HDR_MODE1_MDEU_PAD |
2657 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2658 	},
2659 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2660 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2661 		.alg.aead = {
2662 			.base = {
2663 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2664 				.cra_driver_name = "authenc-hmac-md5-"
2665 						   "cbc-aes-talitos-hsna",
2666 				.cra_blocksize = AES_BLOCK_SIZE,
2667 				.cra_flags = CRYPTO_ALG_ASYNC,
2668 			},
2669 			.ivsize = AES_BLOCK_SIZE,
2670 			.maxauthsize = MD5_DIGEST_SIZE,
2671 		},
2672 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2673 				     DESC_HDR_SEL0_AESU |
2674 				     DESC_HDR_MODE0_AESU_CBC |
2675 				     DESC_HDR_SEL1_MDEUA |
2676 				     DESC_HDR_MODE1_MDEU_INIT |
2677 				     DESC_HDR_MODE1_MDEU_PAD |
2678 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2679 	},
2680 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2681 		.alg.aead = {
2682 			.base = {
2683 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2684 				.cra_driver_name = "authenc-hmac-md5-"
2685 						   "cbc-3des-talitos",
2686 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2687 				.cra_flags = CRYPTO_ALG_ASYNC,
2688 			},
2689 			.ivsize = DES3_EDE_BLOCK_SIZE,
2690 			.maxauthsize = MD5_DIGEST_SIZE,
2691 			.setkey = aead_des3_setkey,
2692 		},
2693 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2694 			             DESC_HDR_SEL0_DEU |
2695 		                     DESC_HDR_MODE0_DEU_CBC |
2696 		                     DESC_HDR_MODE0_DEU_3DES |
2697 		                     DESC_HDR_SEL1_MDEUA |
2698 		                     DESC_HDR_MODE1_MDEU_INIT |
2699 		                     DESC_HDR_MODE1_MDEU_PAD |
2700 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2701 	},
2702 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2703 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2704 		.alg.aead = {
2705 			.base = {
2706 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2707 				.cra_driver_name = "authenc-hmac-md5-"
2708 						   "cbc-3des-talitos-hsna",
2709 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2710 				.cra_flags = CRYPTO_ALG_ASYNC,
2711 			},
2712 			.ivsize = DES3_EDE_BLOCK_SIZE,
2713 			.maxauthsize = MD5_DIGEST_SIZE,
2714 			.setkey = aead_des3_setkey,
2715 		},
2716 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2717 				     DESC_HDR_SEL0_DEU |
2718 				     DESC_HDR_MODE0_DEU_CBC |
2719 				     DESC_HDR_MODE0_DEU_3DES |
2720 				     DESC_HDR_SEL1_MDEUA |
2721 				     DESC_HDR_MODE1_MDEU_INIT |
2722 				     DESC_HDR_MODE1_MDEU_PAD |
2723 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2724 	},
2725 	/* ABLKCIPHER algorithms. */
2726 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2727 		.alg.crypto = {
2728 			.cra_name = "ecb(aes)",
2729 			.cra_driver_name = "ecb-aes-talitos",
2730 			.cra_blocksize = AES_BLOCK_SIZE,
2731 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2732 				     CRYPTO_ALG_ASYNC,
2733 			.cra_ablkcipher = {
2734 				.min_keysize = AES_MIN_KEY_SIZE,
2735 				.max_keysize = AES_MAX_KEY_SIZE,
2736 				.setkey = ablkcipher_aes_setkey,
2737 			}
2738 		},
2739 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2740 				     DESC_HDR_SEL0_AESU,
2741 	},
2742 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2743 		.alg.crypto = {
2744 			.cra_name = "cbc(aes)",
2745 			.cra_driver_name = "cbc-aes-talitos",
2746 			.cra_blocksize = AES_BLOCK_SIZE,
2747 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2748                                      CRYPTO_ALG_ASYNC,
2749 			.cra_ablkcipher = {
2750 				.min_keysize = AES_MIN_KEY_SIZE,
2751 				.max_keysize = AES_MAX_KEY_SIZE,
2752 				.ivsize = AES_BLOCK_SIZE,
2753 				.setkey = ablkcipher_aes_setkey,
2754 			}
2755 		},
2756 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2757 				     DESC_HDR_SEL0_AESU |
2758 				     DESC_HDR_MODE0_AESU_CBC,
2759 	},
2760 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2761 		.alg.crypto = {
2762 			.cra_name = "ctr(aes)",
2763 			.cra_driver_name = "ctr-aes-talitos",
2764 			.cra_blocksize = 1,
2765 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2766 				     CRYPTO_ALG_ASYNC,
2767 			.cra_ablkcipher = {
2768 				.min_keysize = AES_MIN_KEY_SIZE,
2769 				.max_keysize = AES_MAX_KEY_SIZE,
2770 				.ivsize = AES_BLOCK_SIZE,
2771 				.setkey = ablkcipher_aes_setkey,
2772 			}
2773 		},
2774 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2775 				     DESC_HDR_SEL0_AESU |
2776 				     DESC_HDR_MODE0_AESU_CTR,
2777 	},
2778 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2779 		.alg.crypto = {
2780 			.cra_name = "ecb(des)",
2781 			.cra_driver_name = "ecb-des-talitos",
2782 			.cra_blocksize = DES_BLOCK_SIZE,
2783 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2784 				     CRYPTO_ALG_ASYNC,
2785 			.cra_ablkcipher = {
2786 				.min_keysize = DES_KEY_SIZE,
2787 				.max_keysize = DES_KEY_SIZE,
2788 				.setkey = ablkcipher_des_setkey,
2789 			}
2790 		},
2791 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2792 				     DESC_HDR_SEL0_DEU,
2793 	},
2794 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2795 		.alg.crypto = {
2796 			.cra_name = "cbc(des)",
2797 			.cra_driver_name = "cbc-des-talitos",
2798 			.cra_blocksize = DES_BLOCK_SIZE,
2799 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2800 				     CRYPTO_ALG_ASYNC,
2801 			.cra_ablkcipher = {
2802 				.min_keysize = DES_KEY_SIZE,
2803 				.max_keysize = DES_KEY_SIZE,
2804 				.ivsize = DES_BLOCK_SIZE,
2805 				.setkey = ablkcipher_des_setkey,
2806 			}
2807 		},
2808 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2809 				     DESC_HDR_SEL0_DEU |
2810 				     DESC_HDR_MODE0_DEU_CBC,
2811 	},
2812 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2813 		.alg.crypto = {
2814 			.cra_name = "ecb(des3_ede)",
2815 			.cra_driver_name = "ecb-3des-talitos",
2816 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2817 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2818 				     CRYPTO_ALG_ASYNC,
2819 			.cra_ablkcipher = {
2820 				.min_keysize = DES3_EDE_KEY_SIZE,
2821 				.max_keysize = DES3_EDE_KEY_SIZE,
2822 				.setkey = ablkcipher_des3_setkey,
2823 			}
2824 		},
2825 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2826 				     DESC_HDR_SEL0_DEU |
2827 				     DESC_HDR_MODE0_DEU_3DES,
2828 	},
2829 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2830 		.alg.crypto = {
2831 			.cra_name = "cbc(des3_ede)",
2832 			.cra_driver_name = "cbc-3des-talitos",
2833 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2834 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2835                                      CRYPTO_ALG_ASYNC,
2836 			.cra_ablkcipher = {
2837 				.min_keysize = DES3_EDE_KEY_SIZE,
2838 				.max_keysize = DES3_EDE_KEY_SIZE,
2839 				.ivsize = DES3_EDE_BLOCK_SIZE,
2840 				.setkey = ablkcipher_des3_setkey,
2841 			}
2842 		},
2843 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2844 			             DESC_HDR_SEL0_DEU |
2845 		                     DESC_HDR_MODE0_DEU_CBC |
2846 		                     DESC_HDR_MODE0_DEU_3DES,
2847 	},
2848 	/* AHASH algorithms. */
2849 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2850 		.alg.hash = {
2851 			.halg.digestsize = MD5_DIGEST_SIZE,
2852 			.halg.statesize = sizeof(struct talitos_export_state),
2853 			.halg.base = {
2854 				.cra_name = "md5",
2855 				.cra_driver_name = "md5-talitos",
2856 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2857 				.cra_flags = CRYPTO_ALG_ASYNC,
2858 			}
2859 		},
2860 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2861 				     DESC_HDR_SEL0_MDEUA |
2862 				     DESC_HDR_MODE0_MDEU_MD5,
2863 	},
2864 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2865 		.alg.hash = {
2866 			.halg.digestsize = SHA1_DIGEST_SIZE,
2867 			.halg.statesize = sizeof(struct talitos_export_state),
2868 			.halg.base = {
2869 				.cra_name = "sha1",
2870 				.cra_driver_name = "sha1-talitos",
2871 				.cra_blocksize = SHA1_BLOCK_SIZE,
2872 				.cra_flags = CRYPTO_ALG_ASYNC,
2873 			}
2874 		},
2875 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2876 				     DESC_HDR_SEL0_MDEUA |
2877 				     DESC_HDR_MODE0_MDEU_SHA1,
2878 	},
2879 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2880 		.alg.hash = {
2881 			.halg.digestsize = SHA224_DIGEST_SIZE,
2882 			.halg.statesize = sizeof(struct talitos_export_state),
2883 			.halg.base = {
2884 				.cra_name = "sha224",
2885 				.cra_driver_name = "sha224-talitos",
2886 				.cra_blocksize = SHA224_BLOCK_SIZE,
2887 				.cra_flags = CRYPTO_ALG_ASYNC,
2888 			}
2889 		},
2890 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2891 				     DESC_HDR_SEL0_MDEUA |
2892 				     DESC_HDR_MODE0_MDEU_SHA224,
2893 	},
2894 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2895 		.alg.hash = {
2896 			.halg.digestsize = SHA256_DIGEST_SIZE,
2897 			.halg.statesize = sizeof(struct talitos_export_state),
2898 			.halg.base = {
2899 				.cra_name = "sha256",
2900 				.cra_driver_name = "sha256-talitos",
2901 				.cra_blocksize = SHA256_BLOCK_SIZE,
2902 				.cra_flags = CRYPTO_ALG_ASYNC,
2903 			}
2904 		},
2905 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2906 				     DESC_HDR_SEL0_MDEUA |
2907 				     DESC_HDR_MODE0_MDEU_SHA256,
2908 	},
2909 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2910 		.alg.hash = {
2911 			.halg.digestsize = SHA384_DIGEST_SIZE,
2912 			.halg.statesize = sizeof(struct talitos_export_state),
2913 			.halg.base = {
2914 				.cra_name = "sha384",
2915 				.cra_driver_name = "sha384-talitos",
2916 				.cra_blocksize = SHA384_BLOCK_SIZE,
2917 				.cra_flags = CRYPTO_ALG_ASYNC,
2918 			}
2919 		},
2920 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2921 				     DESC_HDR_SEL0_MDEUB |
2922 				     DESC_HDR_MODE0_MDEUB_SHA384,
2923 	},
2924 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2925 		.alg.hash = {
2926 			.halg.digestsize = SHA512_DIGEST_SIZE,
2927 			.halg.statesize = sizeof(struct talitos_export_state),
2928 			.halg.base = {
2929 				.cra_name = "sha512",
2930 				.cra_driver_name = "sha512-talitos",
2931 				.cra_blocksize = SHA512_BLOCK_SIZE,
2932 				.cra_flags = CRYPTO_ALG_ASYNC,
2933 			}
2934 		},
2935 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2936 				     DESC_HDR_SEL0_MDEUB |
2937 				     DESC_HDR_MODE0_MDEUB_SHA512,
2938 	},
2939 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2940 		.alg.hash = {
2941 			.halg.digestsize = MD5_DIGEST_SIZE,
2942 			.halg.statesize = sizeof(struct talitos_export_state),
2943 			.halg.base = {
2944 				.cra_name = "hmac(md5)",
2945 				.cra_driver_name = "hmac-md5-talitos",
2946 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2947 				.cra_flags = CRYPTO_ALG_ASYNC,
2948 			}
2949 		},
2950 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2951 				     DESC_HDR_SEL0_MDEUA |
2952 				     DESC_HDR_MODE0_MDEU_MD5,
2953 	},
2954 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2955 		.alg.hash = {
2956 			.halg.digestsize = SHA1_DIGEST_SIZE,
2957 			.halg.statesize = sizeof(struct talitos_export_state),
2958 			.halg.base = {
2959 				.cra_name = "hmac(sha1)",
2960 				.cra_driver_name = "hmac-sha1-talitos",
2961 				.cra_blocksize = SHA1_BLOCK_SIZE,
2962 				.cra_flags = CRYPTO_ALG_ASYNC,
2963 			}
2964 		},
2965 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2966 				     DESC_HDR_SEL0_MDEUA |
2967 				     DESC_HDR_MODE0_MDEU_SHA1,
2968 	},
2969 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2970 		.alg.hash = {
2971 			.halg.digestsize = SHA224_DIGEST_SIZE,
2972 			.halg.statesize = sizeof(struct talitos_export_state),
2973 			.halg.base = {
2974 				.cra_name = "hmac(sha224)",
2975 				.cra_driver_name = "hmac-sha224-talitos",
2976 				.cra_blocksize = SHA224_BLOCK_SIZE,
2977 				.cra_flags = CRYPTO_ALG_ASYNC,
2978 			}
2979 		},
2980 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2981 				     DESC_HDR_SEL0_MDEUA |
2982 				     DESC_HDR_MODE0_MDEU_SHA224,
2983 	},
2984 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2985 		.alg.hash = {
2986 			.halg.digestsize = SHA256_DIGEST_SIZE,
2987 			.halg.statesize = sizeof(struct talitos_export_state),
2988 			.halg.base = {
2989 				.cra_name = "hmac(sha256)",
2990 				.cra_driver_name = "hmac-sha256-talitos",
2991 				.cra_blocksize = SHA256_BLOCK_SIZE,
2992 				.cra_flags = CRYPTO_ALG_ASYNC,
2993 			}
2994 		},
2995 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2996 				     DESC_HDR_SEL0_MDEUA |
2997 				     DESC_HDR_MODE0_MDEU_SHA256,
2998 	},
2999 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3000 		.alg.hash = {
3001 			.halg.digestsize = SHA384_DIGEST_SIZE,
3002 			.halg.statesize = sizeof(struct talitos_export_state),
3003 			.halg.base = {
3004 				.cra_name = "hmac(sha384)",
3005 				.cra_driver_name = "hmac-sha384-talitos",
3006 				.cra_blocksize = SHA384_BLOCK_SIZE,
3007 				.cra_flags = CRYPTO_ALG_ASYNC,
3008 			}
3009 		},
3010 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3011 				     DESC_HDR_SEL0_MDEUB |
3012 				     DESC_HDR_MODE0_MDEUB_SHA384,
3013 	},
3014 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3015 		.alg.hash = {
3016 			.halg.digestsize = SHA512_DIGEST_SIZE,
3017 			.halg.statesize = sizeof(struct talitos_export_state),
3018 			.halg.base = {
3019 				.cra_name = "hmac(sha512)",
3020 				.cra_driver_name = "hmac-sha512-talitos",
3021 				.cra_blocksize = SHA512_BLOCK_SIZE,
3022 				.cra_flags = CRYPTO_ALG_ASYNC,
3023 			}
3024 		},
3025 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3026 				     DESC_HDR_SEL0_MDEUB |
3027 				     DESC_HDR_MODE0_MDEUB_SHA512,
3028 	}
3029 };
3030 
3031 struct talitos_crypto_alg {
3032 	struct list_head entry;
3033 	struct device *dev;
3034 	struct talitos_alg_template algt;
3035 };
3036 
3037 static int talitos_init_common(struct talitos_ctx *ctx,
3038 			       struct talitos_crypto_alg *talitos_alg)
3039 {
3040 	struct talitos_private *priv;
3041 
3042 	/* update context with ptr to dev */
3043 	ctx->dev = talitos_alg->dev;
3044 
3045 	/* assign SEC channel to tfm in round-robin fashion */
3046 	priv = dev_get_drvdata(ctx->dev);
3047 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3048 		  (priv->num_channels - 1);
3049 
3050 	/* copy descriptor header template value */
3051 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3052 
3053 	/* select done notification */
3054 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3055 
3056 	return 0;
3057 }
3058 
3059 static int talitos_cra_init(struct crypto_tfm *tfm)
3060 {
3061 	struct crypto_alg *alg = tfm->__crt_alg;
3062 	struct talitos_crypto_alg *talitos_alg;
3063 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3064 
3065 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3066 		talitos_alg = container_of(__crypto_ahash_alg(alg),
3067 					   struct talitos_crypto_alg,
3068 					   algt.alg.hash);
3069 	else
3070 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
3071 					   algt.alg.crypto);
3072 
3073 	return talitos_init_common(ctx, talitos_alg);
3074 }
3075 
3076 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3077 {
3078 	struct aead_alg *alg = crypto_aead_alg(tfm);
3079 	struct talitos_crypto_alg *talitos_alg;
3080 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3081 
3082 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3083 				   algt.alg.aead);
3084 
3085 	return talitos_init_common(ctx, talitos_alg);
3086 }
3087 
3088 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3089 {
3090 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3091 
3092 	talitos_cra_init(tfm);
3093 
3094 	ctx->keylen = 0;
3095 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3096 				 sizeof(struct talitos_ahash_req_ctx));
3097 
3098 	return 0;
3099 }
3100 
3101 static void talitos_cra_exit(struct crypto_tfm *tfm)
3102 {
3103 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3104 	struct device *dev = ctx->dev;
3105 
3106 	if (ctx->keylen)
3107 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3108 }
3109 
3110 /*
3111  * given the alg's descriptor header template, determine whether descriptor
3112  * type and primary/secondary execution units required match the hw
3113  * capabilities description provided in the device tree node.
3114  */
3115 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3116 {
3117 	struct talitos_private *priv = dev_get_drvdata(dev);
3118 	int ret;
3119 
3120 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3121 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3122 
3123 	if (SECONDARY_EU(desc_hdr_template))
3124 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3125 		              & priv->exec_units);
3126 
3127 	return ret;
3128 }
3129 
3130 static int talitos_remove(struct platform_device *ofdev)
3131 {
3132 	struct device *dev = &ofdev->dev;
3133 	struct talitos_private *priv = dev_get_drvdata(dev);
3134 	struct talitos_crypto_alg *t_alg, *n;
3135 	int i;
3136 
3137 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3138 		switch (t_alg->algt.type) {
3139 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3140 			break;
3141 		case CRYPTO_ALG_TYPE_AEAD:
3142 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3143 		case CRYPTO_ALG_TYPE_AHASH:
3144 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3145 			break;
3146 		}
3147 		list_del(&t_alg->entry);
3148 	}
3149 
3150 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3151 		talitos_unregister_rng(dev);
3152 
3153 	for (i = 0; i < 2; i++)
3154 		if (priv->irq[i]) {
3155 			free_irq(priv->irq[i], dev);
3156 			irq_dispose_mapping(priv->irq[i]);
3157 		}
3158 
3159 	tasklet_kill(&priv->done_task[0]);
3160 	if (priv->irq[1])
3161 		tasklet_kill(&priv->done_task[1]);
3162 
3163 	return 0;
3164 }
3165 
3166 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3167 						    struct talitos_alg_template
3168 						           *template)
3169 {
3170 	struct talitos_private *priv = dev_get_drvdata(dev);
3171 	struct talitos_crypto_alg *t_alg;
3172 	struct crypto_alg *alg;
3173 
3174 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3175 			     GFP_KERNEL);
3176 	if (!t_alg)
3177 		return ERR_PTR(-ENOMEM);
3178 
3179 	t_alg->algt = *template;
3180 
3181 	switch (t_alg->algt.type) {
3182 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
3183 		alg = &t_alg->algt.alg.crypto;
3184 		alg->cra_init = talitos_cra_init;
3185 		alg->cra_exit = talitos_cra_exit;
3186 		alg->cra_type = &crypto_ablkcipher_type;
3187 		alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3188 					     ablkcipher_setkey;
3189 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3190 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3191 		break;
3192 	case CRYPTO_ALG_TYPE_AEAD:
3193 		alg = &t_alg->algt.alg.aead.base;
3194 		alg->cra_exit = talitos_cra_exit;
3195 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3196 		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3197 					      aead_setkey;
3198 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3199 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3200 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3201 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3202 			devm_kfree(dev, t_alg);
3203 			return ERR_PTR(-ENOTSUPP);
3204 		}
3205 		break;
3206 	case CRYPTO_ALG_TYPE_AHASH:
3207 		alg = &t_alg->algt.alg.hash.halg.base;
3208 		alg->cra_init = talitos_cra_init_ahash;
3209 		alg->cra_exit = talitos_cra_exit;
3210 		t_alg->algt.alg.hash.init = ahash_init;
3211 		t_alg->algt.alg.hash.update = ahash_update;
3212 		t_alg->algt.alg.hash.final = ahash_final;
3213 		t_alg->algt.alg.hash.finup = ahash_finup;
3214 		t_alg->algt.alg.hash.digest = ahash_digest;
3215 		if (!strncmp(alg->cra_name, "hmac", 4))
3216 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3217 		t_alg->algt.alg.hash.import = ahash_import;
3218 		t_alg->algt.alg.hash.export = ahash_export;
3219 
3220 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3221 		    !strncmp(alg->cra_name, "hmac", 4)) {
3222 			devm_kfree(dev, t_alg);
3223 			return ERR_PTR(-ENOTSUPP);
3224 		}
3225 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3226 		    (!strcmp(alg->cra_name, "sha224") ||
3227 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3228 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3229 			t_alg->algt.desc_hdr_template =
3230 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3231 					DESC_HDR_SEL0_MDEUA |
3232 					DESC_HDR_MODE0_MDEU_SHA256;
3233 		}
3234 		break;
3235 	default:
3236 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3237 		devm_kfree(dev, t_alg);
3238 		return ERR_PTR(-EINVAL);
3239 	}
3240 
3241 	alg->cra_module = THIS_MODULE;
3242 	if (t_alg->algt.priority)
3243 		alg->cra_priority = t_alg->algt.priority;
3244 	else
3245 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3246 	if (has_ftr_sec1(priv))
3247 		alg->cra_alignmask = 3;
3248 	else
3249 		alg->cra_alignmask = 0;
3250 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3251 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3252 
3253 	t_alg->dev = dev;
3254 
3255 	return t_alg;
3256 }
3257 
3258 static int talitos_probe_irq(struct platform_device *ofdev)
3259 {
3260 	struct device *dev = &ofdev->dev;
3261 	struct device_node *np = ofdev->dev.of_node;
3262 	struct talitos_private *priv = dev_get_drvdata(dev);
3263 	int err;
3264 	bool is_sec1 = has_ftr_sec1(priv);
3265 
3266 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3267 	if (!priv->irq[0]) {
3268 		dev_err(dev, "failed to map irq\n");
3269 		return -EINVAL;
3270 	}
3271 	if (is_sec1) {
3272 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3273 				  dev_driver_string(dev), dev);
3274 		goto primary_out;
3275 	}
3276 
3277 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3278 
3279 	/* get the primary irq line */
3280 	if (!priv->irq[1]) {
3281 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3282 				  dev_driver_string(dev), dev);
3283 		goto primary_out;
3284 	}
3285 
3286 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3287 			  dev_driver_string(dev), dev);
3288 	if (err)
3289 		goto primary_out;
3290 
3291 	/* get the secondary irq line */
3292 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3293 			  dev_driver_string(dev), dev);
3294 	if (err) {
3295 		dev_err(dev, "failed to request secondary irq\n");
3296 		irq_dispose_mapping(priv->irq[1]);
3297 		priv->irq[1] = 0;
3298 	}
3299 
3300 	return err;
3301 
3302 primary_out:
3303 	if (err) {
3304 		dev_err(dev, "failed to request primary irq\n");
3305 		irq_dispose_mapping(priv->irq[0]);
3306 		priv->irq[0] = 0;
3307 	}
3308 
3309 	return err;
3310 }
3311 
3312 static int talitos_probe(struct platform_device *ofdev)
3313 {
3314 	struct device *dev = &ofdev->dev;
3315 	struct device_node *np = ofdev->dev.of_node;
3316 	struct talitos_private *priv;
3317 	int i, err;
3318 	int stride;
3319 	struct resource *res;
3320 
3321 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3322 	if (!priv)
3323 		return -ENOMEM;
3324 
3325 	INIT_LIST_HEAD(&priv->alg_list);
3326 
3327 	dev_set_drvdata(dev, priv);
3328 
3329 	priv->ofdev = ofdev;
3330 
3331 	spin_lock_init(&priv->reg_lock);
3332 
3333 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3334 	if (!res)
3335 		return -ENXIO;
3336 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3337 	if (!priv->reg) {
3338 		dev_err(dev, "failed to of_iomap\n");
3339 		err = -ENOMEM;
3340 		goto err_out;
3341 	}
3342 
3343 	/* get SEC version capabilities from device tree */
3344 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3345 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3346 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3347 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3348 			     &priv->desc_types);
3349 
3350 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3351 	    !priv->exec_units || !priv->desc_types) {
3352 		dev_err(dev, "invalid property data in device tree node\n");
3353 		err = -EINVAL;
3354 		goto err_out;
3355 	}
3356 
3357 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3358 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3359 
3360 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3361 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3362 				  TALITOS_FTR_SHA224_HWINIT |
3363 				  TALITOS_FTR_HMAC_OK;
3364 
3365 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3366 		priv->features |= TALITOS_FTR_SEC1;
3367 
3368 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3369 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3370 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3371 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3372 		stride = TALITOS1_CH_STRIDE;
3373 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3374 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3375 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3376 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3377 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3378 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3379 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3380 		stride = TALITOS1_CH_STRIDE;
3381 	} else {
3382 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3383 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3384 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3385 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3386 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3387 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3388 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3389 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3390 		stride = TALITOS2_CH_STRIDE;
3391 	}
3392 
3393 	err = talitos_probe_irq(ofdev);
3394 	if (err)
3395 		goto err_out;
3396 
3397 	if (has_ftr_sec1(priv)) {
3398 		if (priv->num_channels == 1)
3399 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3400 				     (unsigned long)dev);
3401 		else
3402 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3403 				     (unsigned long)dev);
3404 	} else {
3405 		if (priv->irq[1]) {
3406 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3407 				     (unsigned long)dev);
3408 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3409 				     (unsigned long)dev);
3410 		} else if (priv->num_channels == 1) {
3411 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3412 				     (unsigned long)dev);
3413 		} else {
3414 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3415 				     (unsigned long)dev);
3416 		}
3417 	}
3418 
3419 	priv->chan = devm_kcalloc(dev,
3420 				  priv->num_channels,
3421 				  sizeof(struct talitos_channel),
3422 				  GFP_KERNEL);
3423 	if (!priv->chan) {
3424 		dev_err(dev, "failed to allocate channel management space\n");
3425 		err = -ENOMEM;
3426 		goto err_out;
3427 	}
3428 
3429 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3430 
3431 	for (i = 0; i < priv->num_channels; i++) {
3432 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3433 		if (!priv->irq[1] || !(i & 1))
3434 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3435 
3436 		spin_lock_init(&priv->chan[i].head_lock);
3437 		spin_lock_init(&priv->chan[i].tail_lock);
3438 
3439 		priv->chan[i].fifo = devm_kcalloc(dev,
3440 						priv->fifo_len,
3441 						sizeof(struct talitos_request),
3442 						GFP_KERNEL);
3443 		if (!priv->chan[i].fifo) {
3444 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3445 			err = -ENOMEM;
3446 			goto err_out;
3447 		}
3448 
3449 		atomic_set(&priv->chan[i].submit_count,
3450 			   -(priv->chfifo_len - 1));
3451 	}
3452 
3453 	dma_set_mask(dev, DMA_BIT_MASK(36));
3454 
3455 	/* reset and initialize the h/w */
3456 	err = init_device(dev);
3457 	if (err) {
3458 		dev_err(dev, "failed to initialize device\n");
3459 		goto err_out;
3460 	}
3461 
3462 	/* register the RNG, if available */
3463 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3464 		err = talitos_register_rng(dev);
3465 		if (err) {
3466 			dev_err(dev, "failed to register hwrng: %d\n", err);
3467 			goto err_out;
3468 		} else
3469 			dev_info(dev, "hwrng\n");
3470 	}
3471 
3472 	/* register crypto algorithms the device supports */
3473 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3474 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3475 			struct talitos_crypto_alg *t_alg;
3476 			struct crypto_alg *alg = NULL;
3477 
3478 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3479 			if (IS_ERR(t_alg)) {
3480 				err = PTR_ERR(t_alg);
3481 				if (err == -ENOTSUPP)
3482 					continue;
3483 				goto err_out;
3484 			}
3485 
3486 			switch (t_alg->algt.type) {
3487 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3488 				err = crypto_register_alg(
3489 						&t_alg->algt.alg.crypto);
3490 				alg = &t_alg->algt.alg.crypto;
3491 				break;
3492 
3493 			case CRYPTO_ALG_TYPE_AEAD:
3494 				err = crypto_register_aead(
3495 					&t_alg->algt.alg.aead);
3496 				alg = &t_alg->algt.alg.aead.base;
3497 				break;
3498 
3499 			case CRYPTO_ALG_TYPE_AHASH:
3500 				err = crypto_register_ahash(
3501 						&t_alg->algt.alg.hash);
3502 				alg = &t_alg->algt.alg.hash.halg.base;
3503 				break;
3504 			}
3505 			if (err) {
3506 				dev_err(dev, "%s alg registration failed\n",
3507 					alg->cra_driver_name);
3508 				devm_kfree(dev, t_alg);
3509 			} else
3510 				list_add_tail(&t_alg->entry, &priv->alg_list);
3511 		}
3512 	}
3513 	if (!list_empty(&priv->alg_list))
3514 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3515 			 (char *)of_get_property(np, "compatible", NULL));
3516 
3517 	return 0;
3518 
3519 err_out:
3520 	talitos_remove(ofdev);
3521 
3522 	return err;
3523 }
3524 
3525 static const struct of_device_id talitos_match[] = {
3526 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3527 	{
3528 		.compatible = "fsl,sec1.0",
3529 	},
3530 #endif
3531 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3532 	{
3533 		.compatible = "fsl,sec2.0",
3534 	},
3535 #endif
3536 	{},
3537 };
3538 MODULE_DEVICE_TABLE(of, talitos_match);
3539 
3540 static struct platform_driver talitos_driver = {
3541 	.driver = {
3542 		.name = "talitos",
3543 		.of_match_table = talitos_match,
3544 	},
3545 	.probe = talitos_probe,
3546 	.remove = talitos_remove,
3547 };
3548 
3549 module_platform_driver(talitos_driver);
3550 
3551 MODULE_LICENSE("GPL");
3552 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3553 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3554