xref: /openbmc/linux/drivers/crypto/talitos.c (revision a86854d0)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43 
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55 
56 #include "talitos.h"
57 
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 			   unsigned int len, bool is_sec1)
60 {
61 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 	if (is_sec1) {
63 		ptr->len1 = cpu_to_be16(len);
64 	} else {
65 		ptr->len = cpu_to_be16(len);
66 		ptr->eptr = upper_32_bits(dma_addr);
67 	}
68 }
69 
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 			     struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73 	dst_ptr->ptr = src_ptr->ptr;
74 	if (is_sec1) {
75 		dst_ptr->len1 = src_ptr->len1;
76 	} else {
77 		dst_ptr->len = src_ptr->len;
78 		dst_ptr->eptr = src_ptr->eptr;
79 	}
80 }
81 
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 					   bool is_sec1)
84 {
85 	if (is_sec1)
86 		return be16_to_cpu(ptr->len1);
87 	else
88 		return be16_to_cpu(ptr->len);
89 }
90 
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 				   bool is_sec1)
93 {
94 	if (!is_sec1)
95 		ptr->j_extent = val;
96 }
97 
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100 	if (!is_sec1)
101 		ptr->j_extent |= val;
102 }
103 
104 /*
105  * map virtual single (contiguous) pointer to h/w descriptor pointer
106  */
107 static void __map_single_talitos_ptr(struct device *dev,
108 				     struct talitos_ptr *ptr,
109 				     unsigned int len, void *data,
110 				     enum dma_data_direction dir,
111 				     unsigned long attrs)
112 {
113 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 	struct talitos_private *priv = dev_get_drvdata(dev);
115 	bool is_sec1 = has_ftr_sec1(priv);
116 
117 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118 }
119 
120 static void map_single_talitos_ptr(struct device *dev,
121 				   struct talitos_ptr *ptr,
122 				   unsigned int len, void *data,
123 				   enum dma_data_direction dir)
124 {
125 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
126 }
127 
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129 					  struct talitos_ptr *ptr,
130 					  unsigned int len, void *data,
131 					  enum dma_data_direction dir)
132 {
133 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
134 				 DMA_ATTR_SKIP_CPU_SYNC);
135 }
136 
137 /*
138  * unmap bus single (contiguous) h/w descriptor pointer
139  */
140 static void unmap_single_talitos_ptr(struct device *dev,
141 				     struct talitos_ptr *ptr,
142 				     enum dma_data_direction dir)
143 {
144 	struct talitos_private *priv = dev_get_drvdata(dev);
145 	bool is_sec1 = has_ftr_sec1(priv);
146 
147 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 			 from_talitos_ptr_len(ptr, is_sec1), dir);
149 }
150 
151 static int reset_channel(struct device *dev, int ch)
152 {
153 	struct talitos_private *priv = dev_get_drvdata(dev);
154 	unsigned int timeout = TALITOS_TIMEOUT;
155 	bool is_sec1 = has_ftr_sec1(priv);
156 
157 	if (is_sec1) {
158 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 			  TALITOS1_CCCR_LO_RESET);
160 
161 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 			TALITOS1_CCCR_LO_RESET) && --timeout)
163 			cpu_relax();
164 	} else {
165 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 			  TALITOS2_CCCR_RESET);
167 
168 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 			TALITOS2_CCCR_RESET) && --timeout)
170 			cpu_relax();
171 	}
172 
173 	if (timeout == 0) {
174 		dev_err(dev, "failed to reset channel %d\n", ch);
175 		return -EIO;
176 	}
177 
178 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
179 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 	/* enable chaining descriptors */
182 	if (is_sec1)
183 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184 			  TALITOS_CCCR_LO_NE);
185 
186 	/* and ICCR writeback, if available */
187 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 		          TALITOS_CCCR_LO_IWSE);
190 
191 	return 0;
192 }
193 
194 static int reset_device(struct device *dev)
195 {
196 	struct talitos_private *priv = dev_get_drvdata(dev);
197 	unsigned int timeout = TALITOS_TIMEOUT;
198 	bool is_sec1 = has_ftr_sec1(priv);
199 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
200 
201 	setbits32(priv->reg + TALITOS_MCR, mcr);
202 
203 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204 	       && --timeout)
205 		cpu_relax();
206 
207 	if (priv->irq[1]) {
208 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 		setbits32(priv->reg + TALITOS_MCR, mcr);
210 	}
211 
212 	if (timeout == 0) {
213 		dev_err(dev, "failed to reset device\n");
214 		return -EIO;
215 	}
216 
217 	return 0;
218 }
219 
220 /*
221  * Reset and initialize the device
222  */
223 static int init_device(struct device *dev)
224 {
225 	struct talitos_private *priv = dev_get_drvdata(dev);
226 	int ch, err;
227 	bool is_sec1 = has_ftr_sec1(priv);
228 
229 	/*
230 	 * Master reset
231 	 * errata documentation: warning: certain SEC interrupts
232 	 * are not fully cleared by writing the MCR:SWR bit,
233 	 * set bit twice to completely reset
234 	 */
235 	err = reset_device(dev);
236 	if (err)
237 		return err;
238 
239 	err = reset_device(dev);
240 	if (err)
241 		return err;
242 
243 	/* reset channels */
244 	for (ch = 0; ch < priv->num_channels; ch++) {
245 		err = reset_channel(dev, ch);
246 		if (err)
247 			return err;
248 	}
249 
250 	/* enable channel done and error interrupts */
251 	if (is_sec1) {
252 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 		/* disable parity error check in DEU (erroneous? test vect.) */
255 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256 	} else {
257 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
259 	}
260 
261 	/* disable integrity check error interrupts (use writeback instead) */
262 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 		          TALITOS_MDEUICR_LO_ICE);
265 
266 	return 0;
267 }
268 
269 /**
270  * talitos_submit - submits a descriptor to the device for processing
271  * @dev:	the SEC device to be used
272  * @ch:		the SEC device channel to be used
273  * @desc:	the descriptor to be processed by the device
274  * @callback:	whom to call when processing is complete
275  * @context:	a handle for use by caller (optional)
276  *
277  * desc must contain valid dma-mapped (bus physical) address pointers.
278  * callback must check err and feedback in descriptor header
279  * for device processing status.
280  */
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 		   void (*callback)(struct device *dev,
283 				    struct talitos_desc *desc,
284 				    void *context, int error),
285 		   void *context)
286 {
287 	struct talitos_private *priv = dev_get_drvdata(dev);
288 	struct talitos_request *request;
289 	unsigned long flags;
290 	int head;
291 	bool is_sec1 = has_ftr_sec1(priv);
292 
293 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
294 
295 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 		/* h/w fifo is full */
297 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298 		return -EAGAIN;
299 	}
300 
301 	head = priv->chan[ch].head;
302 	request = &priv->chan[ch].fifo[head];
303 
304 	/* map descriptor and save caller data */
305 	if (is_sec1) {
306 		desc->hdr1 = desc->hdr;
307 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
308 						   TALITOS_DESC_SIZE,
309 						   DMA_BIDIRECTIONAL);
310 	} else {
311 		request->dma_desc = dma_map_single(dev, desc,
312 						   TALITOS_DESC_SIZE,
313 						   DMA_BIDIRECTIONAL);
314 	}
315 	request->callback = callback;
316 	request->context = context;
317 
318 	/* increment fifo head */
319 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
320 
321 	smp_wmb();
322 	request->desc = desc;
323 
324 	/* GO! */
325 	wmb();
326 	out_be32(priv->chan[ch].reg + TALITOS_FF,
327 		 upper_32_bits(request->dma_desc));
328 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 		 lower_32_bits(request->dma_desc));
330 
331 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
332 
333 	return -EINPROGRESS;
334 }
335 EXPORT_SYMBOL(talitos_submit);
336 
337 /*
338  * process what was done, notify callback of error if not
339  */
340 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
341 {
342 	struct talitos_private *priv = dev_get_drvdata(dev);
343 	struct talitos_request *request, saved_req;
344 	unsigned long flags;
345 	int tail, status;
346 	bool is_sec1 = has_ftr_sec1(priv);
347 
348 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
349 
350 	tail = priv->chan[ch].tail;
351 	while (priv->chan[ch].fifo[tail].desc) {
352 		__be32 hdr;
353 
354 		request = &priv->chan[ch].fifo[tail];
355 
356 		/* descriptors with their done bits set don't get the error */
357 		rmb();
358 		if (!is_sec1)
359 			hdr = request->desc->hdr;
360 		else if (request->desc->next_desc)
361 			hdr = (request->desc + 1)->hdr1;
362 		else
363 			hdr = request->desc->hdr1;
364 
365 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
366 			status = 0;
367 		else
368 			if (!error)
369 				break;
370 			else
371 				status = error;
372 
373 		dma_unmap_single(dev, request->dma_desc,
374 				 TALITOS_DESC_SIZE,
375 				 DMA_BIDIRECTIONAL);
376 
377 		/* copy entries so we can call callback outside lock */
378 		saved_req.desc = request->desc;
379 		saved_req.callback = request->callback;
380 		saved_req.context = request->context;
381 
382 		/* release request entry in fifo */
383 		smp_wmb();
384 		request->desc = NULL;
385 
386 		/* increment fifo tail */
387 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
388 
389 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
390 
391 		atomic_dec(&priv->chan[ch].submit_count);
392 
393 		saved_req.callback(dev, saved_req.desc, saved_req.context,
394 				   status);
395 		/* channel may resume processing in single desc error case */
396 		if (error && !reset_ch && status == error)
397 			return;
398 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
399 		tail = priv->chan[ch].tail;
400 	}
401 
402 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
403 }
404 
405 /*
406  * process completed requests for channels that have done status
407  */
408 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
409 static void talitos1_done_##name(unsigned long data)			\
410 {									\
411 	struct device *dev = (struct device *)data;			\
412 	struct talitos_private *priv = dev_get_drvdata(dev);		\
413 	unsigned long flags;						\
414 									\
415 	if (ch_done_mask & 0x10000000)					\
416 		flush_channel(dev, 0, 0, 0);			\
417 	if (ch_done_mask & 0x40000000)					\
418 		flush_channel(dev, 1, 0, 0);			\
419 	if (ch_done_mask & 0x00010000)					\
420 		flush_channel(dev, 2, 0, 0);			\
421 	if (ch_done_mask & 0x00040000)					\
422 		flush_channel(dev, 3, 0, 0);			\
423 									\
424 	/* At this point, all completed channels have been processed */	\
425 	/* Unmask done interrupts for channels completed later on. */	\
426 	spin_lock_irqsave(&priv->reg_lock, flags);			\
427 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
428 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
429 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
430 }
431 
432 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
433 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
434 
435 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
436 static void talitos2_done_##name(unsigned long data)			\
437 {									\
438 	struct device *dev = (struct device *)data;			\
439 	struct talitos_private *priv = dev_get_drvdata(dev);		\
440 	unsigned long flags;						\
441 									\
442 	if (ch_done_mask & 1)						\
443 		flush_channel(dev, 0, 0, 0);				\
444 	if (ch_done_mask & (1 << 2))					\
445 		flush_channel(dev, 1, 0, 0);				\
446 	if (ch_done_mask & (1 << 4))					\
447 		flush_channel(dev, 2, 0, 0);				\
448 	if (ch_done_mask & (1 << 6))					\
449 		flush_channel(dev, 3, 0, 0);				\
450 									\
451 	/* At this point, all completed channels have been processed */	\
452 	/* Unmask done interrupts for channels completed later on. */	\
453 	spin_lock_irqsave(&priv->reg_lock, flags);			\
454 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
455 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
456 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
457 }
458 
459 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
460 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
461 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
462 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
463 
464 /*
465  * locate current (offending) descriptor
466  */
467 static u32 current_desc_hdr(struct device *dev, int ch)
468 {
469 	struct talitos_private *priv = dev_get_drvdata(dev);
470 	int tail, iter;
471 	dma_addr_t cur_desc;
472 
473 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
474 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
475 
476 	if (!cur_desc) {
477 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
478 		return 0;
479 	}
480 
481 	tail = priv->chan[ch].tail;
482 
483 	iter = tail;
484 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
485 	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
486 		iter = (iter + 1) & (priv->fifo_len - 1);
487 		if (iter == tail) {
488 			dev_err(dev, "couldn't locate current descriptor\n");
489 			return 0;
490 		}
491 	}
492 
493 	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
494 		return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
495 
496 	return priv->chan[ch].fifo[iter].desc->hdr;
497 }
498 
499 /*
500  * user diagnostics; report root cause of error based on execution unit status
501  */
502 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
503 {
504 	struct talitos_private *priv = dev_get_drvdata(dev);
505 	int i;
506 
507 	if (!desc_hdr)
508 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
509 
510 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
511 	case DESC_HDR_SEL0_AFEU:
512 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
513 			in_be32(priv->reg_afeu + TALITOS_EUISR),
514 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
515 		break;
516 	case DESC_HDR_SEL0_DEU:
517 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
518 			in_be32(priv->reg_deu + TALITOS_EUISR),
519 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
520 		break;
521 	case DESC_HDR_SEL0_MDEUA:
522 	case DESC_HDR_SEL0_MDEUB:
523 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
524 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
525 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
526 		break;
527 	case DESC_HDR_SEL0_RNG:
528 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
529 			in_be32(priv->reg_rngu + TALITOS_ISR),
530 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
531 		break;
532 	case DESC_HDR_SEL0_PKEU:
533 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
534 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
535 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
536 		break;
537 	case DESC_HDR_SEL0_AESU:
538 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
539 			in_be32(priv->reg_aesu + TALITOS_EUISR),
540 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
541 		break;
542 	case DESC_HDR_SEL0_CRCU:
543 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
544 			in_be32(priv->reg_crcu + TALITOS_EUISR),
545 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
546 		break;
547 	case DESC_HDR_SEL0_KEU:
548 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
549 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
550 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
551 		break;
552 	}
553 
554 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
555 	case DESC_HDR_SEL1_MDEUA:
556 	case DESC_HDR_SEL1_MDEUB:
557 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
558 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
559 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
560 		break;
561 	case DESC_HDR_SEL1_CRCU:
562 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
563 			in_be32(priv->reg_crcu + TALITOS_EUISR),
564 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
565 		break;
566 	}
567 
568 	for (i = 0; i < 8; i++)
569 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
570 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
571 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
572 }
573 
574 /*
575  * recover from error interrupts
576  */
577 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
578 {
579 	struct talitos_private *priv = dev_get_drvdata(dev);
580 	unsigned int timeout = TALITOS_TIMEOUT;
581 	int ch, error, reset_dev = 0;
582 	u32 v_lo;
583 	bool is_sec1 = has_ftr_sec1(priv);
584 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
585 
586 	for (ch = 0; ch < priv->num_channels; ch++) {
587 		/* skip channels without errors */
588 		if (is_sec1) {
589 			/* bits 29, 31, 17, 19 */
590 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
591 				continue;
592 		} else {
593 			if (!(isr & (1 << (ch * 2 + 1))))
594 				continue;
595 		}
596 
597 		error = -EINVAL;
598 
599 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
600 
601 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
602 			dev_err(dev, "double fetch fifo overflow error\n");
603 			error = -EAGAIN;
604 			reset_ch = 1;
605 		}
606 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
607 			/* h/w dropped descriptor */
608 			dev_err(dev, "single fetch fifo overflow error\n");
609 			error = -EAGAIN;
610 		}
611 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
612 			dev_err(dev, "master data transfer error\n");
613 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
614 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
615 					     : "s/g data length zero error\n");
616 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
617 			dev_err(dev, is_sec1 ? "parity error\n"
618 					     : "fetch pointer zero error\n");
619 		if (v_lo & TALITOS_CCPSR_LO_IDH)
620 			dev_err(dev, "illegal descriptor header error\n");
621 		if (v_lo & TALITOS_CCPSR_LO_IEU)
622 			dev_err(dev, is_sec1 ? "static assignment error\n"
623 					     : "invalid exec unit error\n");
624 		if (v_lo & TALITOS_CCPSR_LO_EU)
625 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
626 		if (!is_sec1) {
627 			if (v_lo & TALITOS_CCPSR_LO_GB)
628 				dev_err(dev, "gather boundary error\n");
629 			if (v_lo & TALITOS_CCPSR_LO_GRL)
630 				dev_err(dev, "gather return/length error\n");
631 			if (v_lo & TALITOS_CCPSR_LO_SB)
632 				dev_err(dev, "scatter boundary error\n");
633 			if (v_lo & TALITOS_CCPSR_LO_SRL)
634 				dev_err(dev, "scatter return/length error\n");
635 		}
636 
637 		flush_channel(dev, ch, error, reset_ch);
638 
639 		if (reset_ch) {
640 			reset_channel(dev, ch);
641 		} else {
642 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
643 				  TALITOS2_CCCR_CONT);
644 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
645 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
646 			       TALITOS2_CCCR_CONT) && --timeout)
647 				cpu_relax();
648 			if (timeout == 0) {
649 				dev_err(dev, "failed to restart channel %d\n",
650 					ch);
651 				reset_dev = 1;
652 			}
653 		}
654 	}
655 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
656 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
657 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
658 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
659 				isr, isr_lo);
660 		else
661 			dev_err(dev, "done overflow, internal time out, or "
662 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
663 
664 		/* purge request queues */
665 		for (ch = 0; ch < priv->num_channels; ch++)
666 			flush_channel(dev, ch, -EIO, 1);
667 
668 		/* reset and reinitialize the device */
669 		init_device(dev);
670 	}
671 }
672 
673 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
674 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
675 {									       \
676 	struct device *dev = data;					       \
677 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
678 	u32 isr, isr_lo;						       \
679 	unsigned long flags;						       \
680 									       \
681 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
682 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
683 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
684 	/* Acknowledge interrupt */					       \
685 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
687 									       \
688 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
689 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
690 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
691 	}								       \
692 	else {								       \
693 		if (likely(isr & ch_done_mask)) {			       \
694 			/* mask further done interrupts. */		       \
695 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
696 			/* done_task will unmask done interrupts at exit */    \
697 			tasklet_schedule(&priv->done_task[tlet]);	       \
698 		}							       \
699 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
700 	}								       \
701 									       \
702 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
703 								IRQ_NONE;      \
704 }
705 
706 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
707 
708 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
709 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
710 {									       \
711 	struct device *dev = data;					       \
712 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
713 	u32 isr, isr_lo;						       \
714 	unsigned long flags;						       \
715 									       \
716 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
717 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
718 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
719 	/* Acknowledge interrupt */					       \
720 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
721 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
722 									       \
723 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
724 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
725 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
726 	}								       \
727 	else {								       \
728 		if (likely(isr & ch_done_mask)) {			       \
729 			/* mask further done interrupts. */		       \
730 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
731 			/* done_task will unmask done interrupts at exit */    \
732 			tasklet_schedule(&priv->done_task[tlet]);	       \
733 		}							       \
734 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
735 	}								       \
736 									       \
737 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
738 								IRQ_NONE;      \
739 }
740 
741 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
742 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
743 		       0)
744 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
745 		       1)
746 
747 /*
748  * hwrng
749  */
750 static int talitos_rng_data_present(struct hwrng *rng, int wait)
751 {
752 	struct device *dev = (struct device *)rng->priv;
753 	struct talitos_private *priv = dev_get_drvdata(dev);
754 	u32 ofl;
755 	int i;
756 
757 	for (i = 0; i < 20; i++) {
758 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
759 		      TALITOS_RNGUSR_LO_OFL;
760 		if (ofl || !wait)
761 			break;
762 		udelay(10);
763 	}
764 
765 	return !!ofl;
766 }
767 
768 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
769 {
770 	struct device *dev = (struct device *)rng->priv;
771 	struct talitos_private *priv = dev_get_drvdata(dev);
772 
773 	/* rng fifo requires 64-bit accesses */
774 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
775 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
776 
777 	return sizeof(u32);
778 }
779 
780 static int talitos_rng_init(struct hwrng *rng)
781 {
782 	struct device *dev = (struct device *)rng->priv;
783 	struct talitos_private *priv = dev_get_drvdata(dev);
784 	unsigned int timeout = TALITOS_TIMEOUT;
785 
786 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
787 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
788 		 & TALITOS_RNGUSR_LO_RD)
789 	       && --timeout)
790 		cpu_relax();
791 	if (timeout == 0) {
792 		dev_err(dev, "failed to reset rng hw\n");
793 		return -ENODEV;
794 	}
795 
796 	/* start generating */
797 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
798 
799 	return 0;
800 }
801 
802 static int talitos_register_rng(struct device *dev)
803 {
804 	struct talitos_private *priv = dev_get_drvdata(dev);
805 	int err;
806 
807 	priv->rng.name		= dev_driver_string(dev),
808 	priv->rng.init		= talitos_rng_init,
809 	priv->rng.data_present	= talitos_rng_data_present,
810 	priv->rng.data_read	= talitos_rng_data_read,
811 	priv->rng.priv		= (unsigned long)dev;
812 
813 	err = hwrng_register(&priv->rng);
814 	if (!err)
815 		priv->rng_registered = true;
816 
817 	return err;
818 }
819 
820 static void talitos_unregister_rng(struct device *dev)
821 {
822 	struct talitos_private *priv = dev_get_drvdata(dev);
823 
824 	if (!priv->rng_registered)
825 		return;
826 
827 	hwrng_unregister(&priv->rng);
828 	priv->rng_registered = false;
829 }
830 
831 /*
832  * crypto alg
833  */
834 #define TALITOS_CRA_PRIORITY		3000
835 /*
836  * Defines a priority for doing AEAD with descriptors type
837  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
838  */
839 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
840 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
841 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
842 
843 struct talitos_ctx {
844 	struct device *dev;
845 	int ch;
846 	__be32 desc_hdr_template;
847 	u8 key[TALITOS_MAX_KEY_SIZE];
848 	u8 iv[TALITOS_MAX_IV_LENGTH];
849 	dma_addr_t dma_key;
850 	unsigned int keylen;
851 	unsigned int enckeylen;
852 	unsigned int authkeylen;
853 };
854 
855 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
856 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
857 
858 struct talitos_ahash_req_ctx {
859 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
860 	unsigned int hw_context_size;
861 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
862 	int buf_idx;
863 	unsigned int swinit;
864 	unsigned int first;
865 	unsigned int last;
866 	unsigned int to_hash_later;
867 	unsigned int nbuf;
868 	struct scatterlist bufsl[2];
869 	struct scatterlist *psrc;
870 };
871 
872 struct talitos_export_state {
873 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
874 	u8 buf[HASH_MAX_BLOCK_SIZE];
875 	unsigned int swinit;
876 	unsigned int first;
877 	unsigned int last;
878 	unsigned int to_hash_later;
879 	unsigned int nbuf;
880 };
881 
882 static int aead_setkey(struct crypto_aead *authenc,
883 		       const u8 *key, unsigned int keylen)
884 {
885 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
886 	struct device *dev = ctx->dev;
887 	struct crypto_authenc_keys keys;
888 
889 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
890 		goto badkey;
891 
892 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
893 		goto badkey;
894 
895 	if (ctx->keylen)
896 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
897 
898 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
899 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
900 
901 	ctx->keylen = keys.authkeylen + keys.enckeylen;
902 	ctx->enckeylen = keys.enckeylen;
903 	ctx->authkeylen = keys.authkeylen;
904 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
905 				      DMA_TO_DEVICE);
906 
907 	memzero_explicit(&keys, sizeof(keys));
908 	return 0;
909 
910 badkey:
911 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
912 	memzero_explicit(&keys, sizeof(keys));
913 	return -EINVAL;
914 }
915 
916 /*
917  * talitos_edesc - s/w-extended descriptor
918  * @src_nents: number of segments in input scatterlist
919  * @dst_nents: number of segments in output scatterlist
920  * @icv_ool: whether ICV is out-of-line
921  * @iv_dma: dma address of iv for checking continuity and link table
922  * @dma_len: length of dma mapped link_tbl space
923  * @dma_link_tbl: bus physical address of link_tbl/buf
924  * @desc: h/w descriptor
925  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
926  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
927  *
928  * if decrypting (with authcheck), or either one of src_nents or dst_nents
929  * is greater than 1, an integrity check value is concatenated to the end
930  * of link_tbl data
931  */
932 struct talitos_edesc {
933 	int src_nents;
934 	int dst_nents;
935 	bool icv_ool;
936 	dma_addr_t iv_dma;
937 	int dma_len;
938 	dma_addr_t dma_link_tbl;
939 	struct talitos_desc desc;
940 	union {
941 		struct talitos_ptr link_tbl[0];
942 		u8 buf[0];
943 	};
944 };
945 
946 static void talitos_sg_unmap(struct device *dev,
947 			     struct talitos_edesc *edesc,
948 			     struct scatterlist *src,
949 			     struct scatterlist *dst,
950 			     unsigned int len, unsigned int offset)
951 {
952 	struct talitos_private *priv = dev_get_drvdata(dev);
953 	bool is_sec1 = has_ftr_sec1(priv);
954 	unsigned int src_nents = edesc->src_nents ? : 1;
955 	unsigned int dst_nents = edesc->dst_nents ? : 1;
956 
957 	if (is_sec1 && dst && dst_nents > 1) {
958 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
959 					   len, DMA_FROM_DEVICE);
960 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
961 				     offset);
962 	}
963 	if (src != dst) {
964 		if (src_nents == 1 || !is_sec1)
965 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
966 
967 		if (dst && (dst_nents == 1 || !is_sec1))
968 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
969 	} else if (src_nents == 1 || !is_sec1) {
970 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
971 	}
972 }
973 
974 static void ipsec_esp_unmap(struct device *dev,
975 			    struct talitos_edesc *edesc,
976 			    struct aead_request *areq)
977 {
978 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
979 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
980 	unsigned int ivsize = crypto_aead_ivsize(aead);
981 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
982 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
983 
984 	if (is_ipsec_esp)
985 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
986 					 DMA_FROM_DEVICE);
987 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
988 
989 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
990 			 areq->assoclen);
991 
992 	if (edesc->dma_len)
993 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
994 				 DMA_BIDIRECTIONAL);
995 
996 	if (!is_ipsec_esp) {
997 		unsigned int dst_nents = edesc->dst_nents ? : 1;
998 
999 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1000 				   areq->assoclen + areq->cryptlen - ivsize);
1001 	}
1002 }
1003 
1004 /*
1005  * ipsec_esp descriptor callbacks
1006  */
1007 static void ipsec_esp_encrypt_done(struct device *dev,
1008 				   struct talitos_desc *desc, void *context,
1009 				   int err)
1010 {
1011 	struct talitos_private *priv = dev_get_drvdata(dev);
1012 	bool is_sec1 = has_ftr_sec1(priv);
1013 	struct aead_request *areq = context;
1014 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1015 	unsigned int authsize = crypto_aead_authsize(authenc);
1016 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1017 	struct talitos_edesc *edesc;
1018 	struct scatterlist *sg;
1019 	void *icvdata;
1020 
1021 	edesc = container_of(desc, struct talitos_edesc, desc);
1022 
1023 	ipsec_esp_unmap(dev, edesc, areq);
1024 
1025 	/* copy the generated ICV to dst */
1026 	if (edesc->icv_ool) {
1027 		if (is_sec1)
1028 			icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1029 		else
1030 			icvdata = &edesc->link_tbl[edesc->src_nents +
1031 						   edesc->dst_nents + 2];
1032 		sg = sg_last(areq->dst, edesc->dst_nents);
1033 		memcpy((char *)sg_virt(sg) + sg->length - authsize,
1034 		       icvdata, authsize);
1035 	}
1036 
1037 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1038 
1039 	kfree(edesc);
1040 
1041 	aead_request_complete(areq, err);
1042 }
1043 
1044 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1045 					  struct talitos_desc *desc,
1046 					  void *context, int err)
1047 {
1048 	struct aead_request *req = context;
1049 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1050 	unsigned int authsize = crypto_aead_authsize(authenc);
1051 	struct talitos_edesc *edesc;
1052 	struct scatterlist *sg;
1053 	char *oicv, *icv;
1054 	struct talitos_private *priv = dev_get_drvdata(dev);
1055 	bool is_sec1 = has_ftr_sec1(priv);
1056 
1057 	edesc = container_of(desc, struct talitos_edesc, desc);
1058 
1059 	ipsec_esp_unmap(dev, edesc, req);
1060 
1061 	if (!err) {
1062 		/* auth check */
1063 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1064 		icv = (char *)sg_virt(sg) + sg->length - authsize;
1065 
1066 		if (edesc->dma_len) {
1067 			if (is_sec1)
1068 				oicv = (char *)&edesc->dma_link_tbl +
1069 					       req->assoclen + req->cryptlen;
1070 			else
1071 				oicv = (char *)
1072 				       &edesc->link_tbl[edesc->src_nents +
1073 							edesc->dst_nents + 2];
1074 			if (edesc->icv_ool)
1075 				icv = oicv + authsize;
1076 		} else
1077 			oicv = (char *)&edesc->link_tbl[0];
1078 
1079 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1080 	}
1081 
1082 	kfree(edesc);
1083 
1084 	aead_request_complete(req, err);
1085 }
1086 
1087 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1088 					  struct talitos_desc *desc,
1089 					  void *context, int err)
1090 {
1091 	struct aead_request *req = context;
1092 	struct talitos_edesc *edesc;
1093 
1094 	edesc = container_of(desc, struct talitos_edesc, desc);
1095 
1096 	ipsec_esp_unmap(dev, edesc, req);
1097 
1098 	/* check ICV auth status */
1099 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1100 		     DESC_HDR_LO_ICCR1_PASS))
1101 		err = -EBADMSG;
1102 
1103 	kfree(edesc);
1104 
1105 	aead_request_complete(req, err);
1106 }
1107 
1108 /*
1109  * convert scatterlist to SEC h/w link table format
1110  * stop at cryptlen bytes
1111  */
1112 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1113 				 unsigned int offset, int cryptlen,
1114 				 struct talitos_ptr *link_tbl_ptr)
1115 {
1116 	int n_sg = sg_count;
1117 	int count = 0;
1118 
1119 	while (cryptlen && sg && n_sg--) {
1120 		unsigned int len = sg_dma_len(sg);
1121 
1122 		if (offset >= len) {
1123 			offset -= len;
1124 			goto next;
1125 		}
1126 
1127 		len -= offset;
1128 
1129 		if (len > cryptlen)
1130 			len = cryptlen;
1131 
1132 		to_talitos_ptr(link_tbl_ptr + count,
1133 			       sg_dma_address(sg) + offset, len, 0);
1134 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1135 		count++;
1136 		cryptlen -= len;
1137 		offset = 0;
1138 
1139 next:
1140 		sg = sg_next(sg);
1141 	}
1142 
1143 	/* tag end of link table */
1144 	if (count > 0)
1145 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1146 				       DESC_PTR_LNKTBL_RETURN, 0);
1147 
1148 	return count;
1149 }
1150 
1151 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1152 			      unsigned int len, struct talitos_edesc *edesc,
1153 			      struct talitos_ptr *ptr, int sg_count,
1154 			      unsigned int offset, int tbl_off, int elen)
1155 {
1156 	struct talitos_private *priv = dev_get_drvdata(dev);
1157 	bool is_sec1 = has_ftr_sec1(priv);
1158 
1159 	if (!src) {
1160 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1161 		return 1;
1162 	}
1163 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1164 	if (sg_count == 1) {
1165 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1166 		return sg_count;
1167 	}
1168 	if (is_sec1) {
1169 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1170 		return sg_count;
1171 	}
1172 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1173 					 &edesc->link_tbl[tbl_off]);
1174 	if (sg_count == 1) {
1175 		/* Only one segment now, so no link tbl needed*/
1176 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1177 		return sg_count;
1178 	}
1179 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1180 			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1181 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1182 
1183 	return sg_count;
1184 }
1185 
1186 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1187 			  unsigned int len, struct talitos_edesc *edesc,
1188 			  struct talitos_ptr *ptr, int sg_count,
1189 			  unsigned int offset, int tbl_off)
1190 {
1191 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1192 				  tbl_off, 0);
1193 }
1194 
1195 /*
1196  * fill in and submit ipsec_esp descriptor
1197  */
1198 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1199 		     void (*callback)(struct device *dev,
1200 				      struct talitos_desc *desc,
1201 				      void *context, int error))
1202 {
1203 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1204 	unsigned int authsize = crypto_aead_authsize(aead);
1205 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1206 	struct device *dev = ctx->dev;
1207 	struct talitos_desc *desc = &edesc->desc;
1208 	unsigned int cryptlen = areq->cryptlen;
1209 	unsigned int ivsize = crypto_aead_ivsize(aead);
1210 	int tbl_off = 0;
1211 	int sg_count, ret;
1212 	int elen = 0;
1213 	bool sync_needed = false;
1214 	struct talitos_private *priv = dev_get_drvdata(dev);
1215 	bool is_sec1 = has_ftr_sec1(priv);
1216 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1217 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1218 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1219 
1220 	/* hmac key */
1221 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1222 
1223 	sg_count = edesc->src_nents ?: 1;
1224 	if (is_sec1 && sg_count > 1)
1225 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1226 				  areq->assoclen + cryptlen);
1227 	else
1228 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1229 				      (areq->src == areq->dst) ?
1230 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1231 
1232 	/* hmac data */
1233 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1234 			     &desc->ptr[1], sg_count, 0, tbl_off);
1235 
1236 	if (ret > 1) {
1237 		tbl_off += ret;
1238 		sync_needed = true;
1239 	}
1240 
1241 	/* cipher iv */
1242 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1243 
1244 	/* cipher key */
1245 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1246 		       ctx->enckeylen, is_sec1);
1247 
1248 	/*
1249 	 * cipher in
1250 	 * map and adjust cipher len to aead request cryptlen.
1251 	 * extent is bytes of HMAC postpended to ciphertext,
1252 	 * typically 12 for ipsec
1253 	 */
1254 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1255 		elen = authsize;
1256 
1257 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1258 				 sg_count, areq->assoclen, tbl_off, elen);
1259 
1260 	if (ret > 1) {
1261 		tbl_off += ret;
1262 		sync_needed = true;
1263 	}
1264 
1265 	/* cipher out */
1266 	if (areq->src != areq->dst) {
1267 		sg_count = edesc->dst_nents ? : 1;
1268 		if (!is_sec1 || sg_count == 1)
1269 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1270 	}
1271 
1272 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1273 			     sg_count, areq->assoclen, tbl_off);
1274 
1275 	if (is_ipsec_esp)
1276 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1277 
1278 	/* ICV data */
1279 	if (ret > 1) {
1280 		tbl_off += ret;
1281 		edesc->icv_ool = true;
1282 		sync_needed = true;
1283 
1284 		if (is_ipsec_esp) {
1285 			struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1286 			int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1287 				     sizeof(struct talitos_ptr) + authsize;
1288 
1289 			/* Add an entry to the link table for ICV data */
1290 			to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1291 			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1292 					       is_sec1);
1293 
1294 			/* icv data follows link tables */
1295 			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1296 				       authsize, is_sec1);
1297 		} else {
1298 			dma_addr_t addr = edesc->dma_link_tbl;
1299 
1300 			if (is_sec1)
1301 				addr += areq->assoclen + cryptlen;
1302 			else
1303 				addr += sizeof(struct talitos_ptr) * tbl_off;
1304 
1305 			to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1306 		}
1307 	} else if (!is_ipsec_esp) {
1308 		ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1309 				     &desc->ptr[6], sg_count, areq->assoclen +
1310 							      cryptlen,
1311 				     tbl_off);
1312 		if (ret > 1) {
1313 			tbl_off += ret;
1314 			edesc->icv_ool = true;
1315 			sync_needed = true;
1316 		} else {
1317 			edesc->icv_ool = false;
1318 		}
1319 	} else {
1320 		edesc->icv_ool = false;
1321 	}
1322 
1323 	/* iv out */
1324 	if (is_ipsec_esp)
1325 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1326 				       DMA_FROM_DEVICE);
1327 
1328 	if (sync_needed)
1329 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1330 					   edesc->dma_len,
1331 					   DMA_BIDIRECTIONAL);
1332 
1333 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1334 	if (ret != -EINPROGRESS) {
1335 		ipsec_esp_unmap(dev, edesc, areq);
1336 		kfree(edesc);
1337 	}
1338 	return ret;
1339 }
1340 
1341 /*
1342  * allocate and map the extended descriptor
1343  */
1344 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1345 						 struct scatterlist *src,
1346 						 struct scatterlist *dst,
1347 						 u8 *iv,
1348 						 unsigned int assoclen,
1349 						 unsigned int cryptlen,
1350 						 unsigned int authsize,
1351 						 unsigned int ivsize,
1352 						 int icv_stashing,
1353 						 u32 cryptoflags,
1354 						 bool encrypt)
1355 {
1356 	struct talitos_edesc *edesc;
1357 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1358 	dma_addr_t iv_dma = 0;
1359 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1360 		      GFP_ATOMIC;
1361 	struct talitos_private *priv = dev_get_drvdata(dev);
1362 	bool is_sec1 = has_ftr_sec1(priv);
1363 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1364 	void *err;
1365 
1366 	if (cryptlen + authsize > max_len) {
1367 		dev_err(dev, "length exceeds h/w max limit\n");
1368 		return ERR_PTR(-EINVAL);
1369 	}
1370 
1371 	if (ivsize)
1372 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1373 
1374 	if (!dst || dst == src) {
1375 		src_len = assoclen + cryptlen + authsize;
1376 		src_nents = sg_nents_for_len(src, src_len);
1377 		if (src_nents < 0) {
1378 			dev_err(dev, "Invalid number of src SG.\n");
1379 			err = ERR_PTR(-EINVAL);
1380 			goto error_sg;
1381 		}
1382 		src_nents = (src_nents == 1) ? 0 : src_nents;
1383 		dst_nents = dst ? src_nents : 0;
1384 		dst_len = 0;
1385 	} else { /* dst && dst != src*/
1386 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1387 		src_nents = sg_nents_for_len(src, src_len);
1388 		if (src_nents < 0) {
1389 			dev_err(dev, "Invalid number of src SG.\n");
1390 			err = ERR_PTR(-EINVAL);
1391 			goto error_sg;
1392 		}
1393 		src_nents = (src_nents == 1) ? 0 : src_nents;
1394 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1395 		dst_nents = sg_nents_for_len(dst, dst_len);
1396 		if (dst_nents < 0) {
1397 			dev_err(dev, "Invalid number of dst SG.\n");
1398 			err = ERR_PTR(-EINVAL);
1399 			goto error_sg;
1400 		}
1401 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1402 	}
1403 
1404 	/*
1405 	 * allocate space for base edesc plus the link tables,
1406 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1407 	 * and space for two sets of ICVs (stashed and generated)
1408 	 */
1409 	alloc_len = sizeof(struct talitos_edesc);
1410 	if (src_nents || dst_nents) {
1411 		if (is_sec1)
1412 			dma_len = (src_nents ? src_len : 0) +
1413 				  (dst_nents ? dst_len : 0);
1414 		else
1415 			dma_len = (src_nents + dst_nents + 2) *
1416 				  sizeof(struct talitos_ptr) + authsize * 2;
1417 		alloc_len += dma_len;
1418 	} else {
1419 		dma_len = 0;
1420 		alloc_len += icv_stashing ? authsize : 0;
1421 	}
1422 
1423 	/* if its a ahash, add space for a second desc next to the first one */
1424 	if (is_sec1 && !dst)
1425 		alloc_len += sizeof(struct talitos_desc);
1426 
1427 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1428 	if (!edesc) {
1429 		err = ERR_PTR(-ENOMEM);
1430 		goto error_sg;
1431 	}
1432 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1433 
1434 	edesc->src_nents = src_nents;
1435 	edesc->dst_nents = dst_nents;
1436 	edesc->iv_dma = iv_dma;
1437 	edesc->dma_len = dma_len;
1438 	if (dma_len) {
1439 		void *addr = &edesc->link_tbl[0];
1440 
1441 		if (is_sec1 && !dst)
1442 			addr += sizeof(struct talitos_desc);
1443 		edesc->dma_link_tbl = dma_map_single(dev, addr,
1444 						     edesc->dma_len,
1445 						     DMA_BIDIRECTIONAL);
1446 	}
1447 	return edesc;
1448 error_sg:
1449 	if (iv_dma)
1450 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1451 	return err;
1452 }
1453 
1454 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1455 					      int icv_stashing, bool encrypt)
1456 {
1457 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1458 	unsigned int authsize = crypto_aead_authsize(authenc);
1459 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1460 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1461 
1462 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1463 				   iv, areq->assoclen, areq->cryptlen,
1464 				   authsize, ivsize, icv_stashing,
1465 				   areq->base.flags, encrypt);
1466 }
1467 
1468 static int aead_encrypt(struct aead_request *req)
1469 {
1470 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1471 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1472 	struct talitos_edesc *edesc;
1473 
1474 	/* allocate extended descriptor */
1475 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1476 	if (IS_ERR(edesc))
1477 		return PTR_ERR(edesc);
1478 
1479 	/* set encrypt */
1480 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1481 
1482 	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1483 }
1484 
1485 static int aead_decrypt(struct aead_request *req)
1486 {
1487 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1488 	unsigned int authsize = crypto_aead_authsize(authenc);
1489 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1490 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1491 	struct talitos_edesc *edesc;
1492 	struct scatterlist *sg;
1493 	void *icvdata;
1494 
1495 	req->cryptlen -= authsize;
1496 
1497 	/* allocate extended descriptor */
1498 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1499 	if (IS_ERR(edesc))
1500 		return PTR_ERR(edesc);
1501 
1502 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1503 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1504 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1505 
1506 		/* decrypt and check the ICV */
1507 		edesc->desc.hdr = ctx->desc_hdr_template |
1508 				  DESC_HDR_DIR_INBOUND |
1509 				  DESC_HDR_MODE1_MDEU_CICV;
1510 
1511 		/* reset integrity check result bits */
1512 
1513 		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1514 	}
1515 
1516 	/* Have to check the ICV with software */
1517 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1518 
1519 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1520 	if (edesc->dma_len)
1521 		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1522 						   edesc->dst_nents + 2];
1523 	else
1524 		icvdata = &edesc->link_tbl[0];
1525 
1526 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1527 
1528 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1529 
1530 	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1531 }
1532 
1533 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1534 			     const u8 *key, unsigned int keylen)
1535 {
1536 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1537 	struct device *dev = ctx->dev;
1538 	u32 tmp[DES_EXPKEY_WORDS];
1539 
1540 	if (keylen > TALITOS_MAX_KEY_SIZE) {
1541 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1542 		return -EINVAL;
1543 	}
1544 
1545 	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1546 		     CRYPTO_TFM_REQ_WEAK_KEY) &&
1547 	    !des_ekey(tmp, key)) {
1548 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1549 		return -EINVAL;
1550 	}
1551 
1552 	if (ctx->keylen)
1553 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1554 
1555 	memcpy(&ctx->key, key, keylen);
1556 	ctx->keylen = keylen;
1557 
1558 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1559 
1560 	return 0;
1561 }
1562 
1563 static void common_nonsnoop_unmap(struct device *dev,
1564 				  struct talitos_edesc *edesc,
1565 				  struct ablkcipher_request *areq)
1566 {
1567 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1568 
1569 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1570 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1571 
1572 	if (edesc->dma_len)
1573 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1574 				 DMA_BIDIRECTIONAL);
1575 }
1576 
1577 static void ablkcipher_done(struct device *dev,
1578 			    struct talitos_desc *desc, void *context,
1579 			    int err)
1580 {
1581 	struct ablkcipher_request *areq = context;
1582 	struct talitos_edesc *edesc;
1583 
1584 	edesc = container_of(desc, struct talitos_edesc, desc);
1585 
1586 	common_nonsnoop_unmap(dev, edesc, areq);
1587 
1588 	kfree(edesc);
1589 
1590 	areq->base.complete(&areq->base, err);
1591 }
1592 
1593 static int common_nonsnoop(struct talitos_edesc *edesc,
1594 			   struct ablkcipher_request *areq,
1595 			   void (*callback) (struct device *dev,
1596 					     struct talitos_desc *desc,
1597 					     void *context, int error))
1598 {
1599 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1600 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1601 	struct device *dev = ctx->dev;
1602 	struct talitos_desc *desc = &edesc->desc;
1603 	unsigned int cryptlen = areq->nbytes;
1604 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1605 	int sg_count, ret;
1606 	bool sync_needed = false;
1607 	struct talitos_private *priv = dev_get_drvdata(dev);
1608 	bool is_sec1 = has_ftr_sec1(priv);
1609 
1610 	/* first DWORD empty */
1611 
1612 	/* cipher iv */
1613 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1614 
1615 	/* cipher key */
1616 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1617 
1618 	sg_count = edesc->src_nents ?: 1;
1619 	if (is_sec1 && sg_count > 1)
1620 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1621 				  cryptlen);
1622 	else
1623 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1624 				      (areq->src == areq->dst) ?
1625 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1626 	/*
1627 	 * cipher in
1628 	 */
1629 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1630 				  &desc->ptr[3], sg_count, 0, 0);
1631 	if (sg_count > 1)
1632 		sync_needed = true;
1633 
1634 	/* cipher out */
1635 	if (areq->src != areq->dst) {
1636 		sg_count = edesc->dst_nents ? : 1;
1637 		if (!is_sec1 || sg_count == 1)
1638 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1639 	}
1640 
1641 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1642 			     sg_count, 0, (edesc->src_nents + 1));
1643 	if (ret > 1)
1644 		sync_needed = true;
1645 
1646 	/* iv out */
1647 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1648 			       DMA_FROM_DEVICE);
1649 
1650 	/* last DWORD empty */
1651 
1652 	if (sync_needed)
1653 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1654 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1655 
1656 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1657 	if (ret != -EINPROGRESS) {
1658 		common_nonsnoop_unmap(dev, edesc, areq);
1659 		kfree(edesc);
1660 	}
1661 	return ret;
1662 }
1663 
1664 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1665 						    areq, bool encrypt)
1666 {
1667 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1668 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1669 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1670 
1671 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1672 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1673 				   areq->base.flags, encrypt);
1674 }
1675 
1676 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1677 {
1678 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1679 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1680 	struct talitos_edesc *edesc;
1681 
1682 	/* allocate extended descriptor */
1683 	edesc = ablkcipher_edesc_alloc(areq, true);
1684 	if (IS_ERR(edesc))
1685 		return PTR_ERR(edesc);
1686 
1687 	/* set encrypt */
1688 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1689 
1690 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1691 }
1692 
1693 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1694 {
1695 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1696 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1697 	struct talitos_edesc *edesc;
1698 
1699 	/* allocate extended descriptor */
1700 	edesc = ablkcipher_edesc_alloc(areq, false);
1701 	if (IS_ERR(edesc))
1702 		return PTR_ERR(edesc);
1703 
1704 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1705 
1706 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1707 }
1708 
1709 static void common_nonsnoop_hash_unmap(struct device *dev,
1710 				       struct talitos_edesc *edesc,
1711 				       struct ahash_request *areq)
1712 {
1713 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1714 	struct talitos_private *priv = dev_get_drvdata(dev);
1715 	bool is_sec1 = has_ftr_sec1(priv);
1716 	struct talitos_desc *desc = &edesc->desc;
1717 	struct talitos_desc *desc2 = desc + 1;
1718 
1719 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1720 	if (desc->next_desc &&
1721 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1722 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1723 
1724 	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1725 
1726 	/* When using hashctx-in, must unmap it. */
1727 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1728 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1729 					 DMA_TO_DEVICE);
1730 	else if (desc->next_desc)
1731 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1732 					 DMA_TO_DEVICE);
1733 
1734 	if (is_sec1 && req_ctx->nbuf)
1735 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1736 					 DMA_TO_DEVICE);
1737 
1738 	if (edesc->dma_len)
1739 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1740 				 DMA_BIDIRECTIONAL);
1741 
1742 	if (edesc->desc.next_desc)
1743 		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1744 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1745 }
1746 
1747 static void ahash_done(struct device *dev,
1748 		       struct talitos_desc *desc, void *context,
1749 		       int err)
1750 {
1751 	struct ahash_request *areq = context;
1752 	struct talitos_edesc *edesc =
1753 		 container_of(desc, struct talitos_edesc, desc);
1754 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1755 
1756 	if (!req_ctx->last && req_ctx->to_hash_later) {
1757 		/* Position any partial block for next update/final/finup */
1758 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1759 		req_ctx->nbuf = req_ctx->to_hash_later;
1760 	}
1761 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1762 
1763 	kfree(edesc);
1764 
1765 	areq->base.complete(&areq->base, err);
1766 }
1767 
1768 /*
1769  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1770  * ourself and submit a padded block
1771  */
1772 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1773 			       struct talitos_edesc *edesc,
1774 			       struct talitos_ptr *ptr)
1775 {
1776 	static u8 padded_hash[64] = {
1777 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1779 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1780 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1781 	};
1782 
1783 	pr_err_once("Bug in SEC1, padding ourself\n");
1784 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1785 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1786 			       (char *)padded_hash, DMA_TO_DEVICE);
1787 }
1788 
1789 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1790 				struct ahash_request *areq, unsigned int length,
1791 				unsigned int offset,
1792 				void (*callback) (struct device *dev,
1793 						  struct talitos_desc *desc,
1794 						  void *context, int error))
1795 {
1796 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1797 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1798 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1799 	struct device *dev = ctx->dev;
1800 	struct talitos_desc *desc = &edesc->desc;
1801 	int ret;
1802 	bool sync_needed = false;
1803 	struct talitos_private *priv = dev_get_drvdata(dev);
1804 	bool is_sec1 = has_ftr_sec1(priv);
1805 	int sg_count;
1806 
1807 	/* first DWORD empty */
1808 
1809 	/* hash context in */
1810 	if (!req_ctx->first || req_ctx->swinit) {
1811 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1812 					      req_ctx->hw_context_size,
1813 					      req_ctx->hw_context,
1814 					      DMA_TO_DEVICE);
1815 		req_ctx->swinit = 0;
1816 	}
1817 	/* Indicate next op is not the first. */
1818 	req_ctx->first = 0;
1819 
1820 	/* HMAC key */
1821 	if (ctx->keylen)
1822 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1823 			       is_sec1);
1824 
1825 	if (is_sec1 && req_ctx->nbuf)
1826 		length -= req_ctx->nbuf;
1827 
1828 	sg_count = edesc->src_nents ?: 1;
1829 	if (is_sec1 && sg_count > 1)
1830 		sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1831 				   edesc->buf + sizeof(struct talitos_desc),
1832 				   length, req_ctx->nbuf);
1833 	else if (length)
1834 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1835 				      DMA_TO_DEVICE);
1836 	/*
1837 	 * data in
1838 	 */
1839 	if (is_sec1 && req_ctx->nbuf) {
1840 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1841 				       req_ctx->buf[req_ctx->buf_idx],
1842 				       DMA_TO_DEVICE);
1843 	} else {
1844 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1845 					  &desc->ptr[3], sg_count, offset, 0);
1846 		if (sg_count > 1)
1847 			sync_needed = true;
1848 	}
1849 
1850 	/* fifth DWORD empty */
1851 
1852 	/* hash/HMAC out -or- hash context out */
1853 	if (req_ctx->last)
1854 		map_single_talitos_ptr(dev, &desc->ptr[5],
1855 				       crypto_ahash_digestsize(tfm),
1856 				       areq->result, DMA_FROM_DEVICE);
1857 	else
1858 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1859 					      req_ctx->hw_context_size,
1860 					      req_ctx->hw_context,
1861 					      DMA_FROM_DEVICE);
1862 
1863 	/* last DWORD empty */
1864 
1865 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1866 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1867 
1868 	if (is_sec1 && req_ctx->nbuf && length) {
1869 		struct talitos_desc *desc2 = desc + 1;
1870 		dma_addr_t next_desc;
1871 
1872 		memset(desc2, 0, sizeof(*desc2));
1873 		desc2->hdr = desc->hdr;
1874 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1875 		desc2->hdr1 = desc2->hdr;
1876 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1877 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1878 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1879 
1880 		if (desc->ptr[1].ptr)
1881 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1882 					 is_sec1);
1883 		else
1884 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1885 						      req_ctx->hw_context_size,
1886 						      req_ctx->hw_context,
1887 						      DMA_TO_DEVICE);
1888 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1889 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1890 					  &desc2->ptr[3], sg_count, offset, 0);
1891 		if (sg_count > 1)
1892 			sync_needed = true;
1893 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1894 		if (req_ctx->last)
1895 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1896 						      req_ctx->hw_context_size,
1897 						      req_ctx->hw_context,
1898 						      DMA_FROM_DEVICE);
1899 
1900 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1901 					   DMA_BIDIRECTIONAL);
1902 		desc->next_desc = cpu_to_be32(next_desc);
1903 	}
1904 
1905 	if (sync_needed)
1906 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1907 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1908 
1909 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1910 	if (ret != -EINPROGRESS) {
1911 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1912 		kfree(edesc);
1913 	}
1914 	return ret;
1915 }
1916 
1917 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1918 					       unsigned int nbytes)
1919 {
1920 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1921 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1922 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1923 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1924 	bool is_sec1 = has_ftr_sec1(priv);
1925 
1926 	if (is_sec1)
1927 		nbytes -= req_ctx->nbuf;
1928 
1929 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1930 				   nbytes, 0, 0, 0, areq->base.flags, false);
1931 }
1932 
1933 static int ahash_init(struct ahash_request *areq)
1934 {
1935 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1936 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1937 	struct device *dev = ctx->dev;
1938 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1939 	unsigned int size;
1940 	dma_addr_t dma;
1941 
1942 	/* Initialize the context */
1943 	req_ctx->buf_idx = 0;
1944 	req_ctx->nbuf = 0;
1945 	req_ctx->first = 1; /* first indicates h/w must init its context */
1946 	req_ctx->swinit = 0; /* assume h/w init of context */
1947 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1948 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1949 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1950 	req_ctx->hw_context_size = size;
1951 
1952 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1953 			     DMA_TO_DEVICE);
1954 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1955 
1956 	return 0;
1957 }
1958 
1959 /*
1960  * on h/w without explicit sha224 support, we initialize h/w context
1961  * manually with sha224 constants, and tell it to run sha256.
1962  */
1963 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1964 {
1965 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1966 
1967 	req_ctx->hw_context[0] = SHA224_H0;
1968 	req_ctx->hw_context[1] = SHA224_H1;
1969 	req_ctx->hw_context[2] = SHA224_H2;
1970 	req_ctx->hw_context[3] = SHA224_H3;
1971 	req_ctx->hw_context[4] = SHA224_H4;
1972 	req_ctx->hw_context[5] = SHA224_H5;
1973 	req_ctx->hw_context[6] = SHA224_H6;
1974 	req_ctx->hw_context[7] = SHA224_H7;
1975 
1976 	/* init 64-bit count */
1977 	req_ctx->hw_context[8] = 0;
1978 	req_ctx->hw_context[9] = 0;
1979 
1980 	ahash_init(areq);
1981 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1982 
1983 	return 0;
1984 }
1985 
1986 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1987 {
1988 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1989 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1990 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1991 	struct talitos_edesc *edesc;
1992 	unsigned int blocksize =
1993 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1994 	unsigned int nbytes_to_hash;
1995 	unsigned int to_hash_later;
1996 	unsigned int nsg;
1997 	int nents;
1998 	struct device *dev = ctx->dev;
1999 	struct talitos_private *priv = dev_get_drvdata(dev);
2000 	bool is_sec1 = has_ftr_sec1(priv);
2001 	int offset = 0;
2002 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2003 
2004 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2005 		/* Buffer up to one whole block */
2006 		nents = sg_nents_for_len(areq->src, nbytes);
2007 		if (nents < 0) {
2008 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2009 			return nents;
2010 		}
2011 		sg_copy_to_buffer(areq->src, nents,
2012 				  ctx_buf + req_ctx->nbuf, nbytes);
2013 		req_ctx->nbuf += nbytes;
2014 		return 0;
2015 	}
2016 
2017 	/* At least (blocksize + 1) bytes are available to hash */
2018 	nbytes_to_hash = nbytes + req_ctx->nbuf;
2019 	to_hash_later = nbytes_to_hash & (blocksize - 1);
2020 
2021 	if (req_ctx->last)
2022 		to_hash_later = 0;
2023 	else if (to_hash_later)
2024 		/* There is a partial block. Hash the full block(s) now */
2025 		nbytes_to_hash -= to_hash_later;
2026 	else {
2027 		/* Keep one block buffered */
2028 		nbytes_to_hash -= blocksize;
2029 		to_hash_later = blocksize;
2030 	}
2031 
2032 	/* Chain in any previously buffered data */
2033 	if (!is_sec1 && req_ctx->nbuf) {
2034 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2035 		sg_init_table(req_ctx->bufsl, nsg);
2036 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2037 		if (nsg > 1)
2038 			sg_chain(req_ctx->bufsl, 2, areq->src);
2039 		req_ctx->psrc = req_ctx->bufsl;
2040 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2041 		if (nbytes_to_hash > blocksize)
2042 			offset = blocksize - req_ctx->nbuf;
2043 		else
2044 			offset = nbytes_to_hash - req_ctx->nbuf;
2045 		nents = sg_nents_for_len(areq->src, offset);
2046 		if (nents < 0) {
2047 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2048 			return nents;
2049 		}
2050 		sg_copy_to_buffer(areq->src, nents,
2051 				  ctx_buf + req_ctx->nbuf, offset);
2052 		req_ctx->nbuf += offset;
2053 		req_ctx->psrc = areq->src;
2054 	} else
2055 		req_ctx->psrc = areq->src;
2056 
2057 	if (to_hash_later) {
2058 		nents = sg_nents_for_len(areq->src, nbytes);
2059 		if (nents < 0) {
2060 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2061 			return nents;
2062 		}
2063 		sg_pcopy_to_buffer(areq->src, nents,
2064 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2065 				      to_hash_later,
2066 				      nbytes - to_hash_later);
2067 	}
2068 	req_ctx->to_hash_later = to_hash_later;
2069 
2070 	/* Allocate extended descriptor */
2071 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2072 	if (IS_ERR(edesc))
2073 		return PTR_ERR(edesc);
2074 
2075 	edesc->desc.hdr = ctx->desc_hdr_template;
2076 
2077 	/* On last one, request SEC to pad; otherwise continue */
2078 	if (req_ctx->last)
2079 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2080 	else
2081 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2082 
2083 	/* request SEC to INIT hash. */
2084 	if (req_ctx->first && !req_ctx->swinit)
2085 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2086 
2087 	/* When the tfm context has a keylen, it's an HMAC.
2088 	 * A first or last (ie. not middle) descriptor must request HMAC.
2089 	 */
2090 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2091 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2092 
2093 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2094 				    ahash_done);
2095 }
2096 
2097 static int ahash_update(struct ahash_request *areq)
2098 {
2099 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2100 
2101 	req_ctx->last = 0;
2102 
2103 	return ahash_process_req(areq, areq->nbytes);
2104 }
2105 
2106 static int ahash_final(struct ahash_request *areq)
2107 {
2108 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2109 
2110 	req_ctx->last = 1;
2111 
2112 	return ahash_process_req(areq, 0);
2113 }
2114 
2115 static int ahash_finup(struct ahash_request *areq)
2116 {
2117 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2118 
2119 	req_ctx->last = 1;
2120 
2121 	return ahash_process_req(areq, areq->nbytes);
2122 }
2123 
2124 static int ahash_digest(struct ahash_request *areq)
2125 {
2126 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2127 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2128 
2129 	ahash->init(areq);
2130 	req_ctx->last = 1;
2131 
2132 	return ahash_process_req(areq, areq->nbytes);
2133 }
2134 
2135 static int ahash_export(struct ahash_request *areq, void *out)
2136 {
2137 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2138 	struct talitos_export_state *export = out;
2139 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2140 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2141 	struct device *dev = ctx->dev;
2142 	dma_addr_t dma;
2143 
2144 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2145 			     DMA_FROM_DEVICE);
2146 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2147 
2148 	memcpy(export->hw_context, req_ctx->hw_context,
2149 	       req_ctx->hw_context_size);
2150 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2151 	export->swinit = req_ctx->swinit;
2152 	export->first = req_ctx->first;
2153 	export->last = req_ctx->last;
2154 	export->to_hash_later = req_ctx->to_hash_later;
2155 	export->nbuf = req_ctx->nbuf;
2156 
2157 	return 0;
2158 }
2159 
2160 static int ahash_import(struct ahash_request *areq, const void *in)
2161 {
2162 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2163 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2164 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2165 	struct device *dev = ctx->dev;
2166 	const struct talitos_export_state *export = in;
2167 	unsigned int size;
2168 	dma_addr_t dma;
2169 
2170 	memset(req_ctx, 0, sizeof(*req_ctx));
2171 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2172 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2173 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2174 	req_ctx->hw_context_size = size;
2175 	memcpy(req_ctx->hw_context, export->hw_context, size);
2176 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2177 	req_ctx->swinit = export->swinit;
2178 	req_ctx->first = export->first;
2179 	req_ctx->last = export->last;
2180 	req_ctx->to_hash_later = export->to_hash_later;
2181 	req_ctx->nbuf = export->nbuf;
2182 
2183 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2184 			     DMA_TO_DEVICE);
2185 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2186 
2187 	return 0;
2188 }
2189 
2190 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2191 		   u8 *hash)
2192 {
2193 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2194 
2195 	struct scatterlist sg[1];
2196 	struct ahash_request *req;
2197 	struct crypto_wait wait;
2198 	int ret;
2199 
2200 	crypto_init_wait(&wait);
2201 
2202 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2203 	if (!req)
2204 		return -ENOMEM;
2205 
2206 	/* Keep tfm keylen == 0 during hash of the long key */
2207 	ctx->keylen = 0;
2208 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2209 				   crypto_req_done, &wait);
2210 
2211 	sg_init_one(&sg[0], key, keylen);
2212 
2213 	ahash_request_set_crypt(req, sg, hash, keylen);
2214 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2215 
2216 	ahash_request_free(req);
2217 
2218 	return ret;
2219 }
2220 
2221 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2222 			unsigned int keylen)
2223 {
2224 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2225 	struct device *dev = ctx->dev;
2226 	unsigned int blocksize =
2227 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2228 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2229 	unsigned int keysize = keylen;
2230 	u8 hash[SHA512_DIGEST_SIZE];
2231 	int ret;
2232 
2233 	if (keylen <= blocksize)
2234 		memcpy(ctx->key, key, keysize);
2235 	else {
2236 		/* Must get the hash of the long key */
2237 		ret = keyhash(tfm, key, keylen, hash);
2238 
2239 		if (ret) {
2240 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2241 			return -EINVAL;
2242 		}
2243 
2244 		keysize = digestsize;
2245 		memcpy(ctx->key, hash, digestsize);
2246 	}
2247 
2248 	if (ctx->keylen)
2249 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2250 
2251 	ctx->keylen = keysize;
2252 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2253 
2254 	return 0;
2255 }
2256 
2257 
2258 struct talitos_alg_template {
2259 	u32 type;
2260 	u32 priority;
2261 	union {
2262 		struct crypto_alg crypto;
2263 		struct ahash_alg hash;
2264 		struct aead_alg aead;
2265 	} alg;
2266 	__be32 desc_hdr_template;
2267 };
2268 
2269 static struct talitos_alg_template driver_algs[] = {
2270 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2271 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2272 		.alg.aead = {
2273 			.base = {
2274 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2275 				.cra_driver_name = "authenc-hmac-sha1-"
2276 						   "cbc-aes-talitos",
2277 				.cra_blocksize = AES_BLOCK_SIZE,
2278 				.cra_flags = CRYPTO_ALG_ASYNC,
2279 			},
2280 			.ivsize = AES_BLOCK_SIZE,
2281 			.maxauthsize = SHA1_DIGEST_SIZE,
2282 		},
2283 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2284 			             DESC_HDR_SEL0_AESU |
2285 		                     DESC_HDR_MODE0_AESU_CBC |
2286 		                     DESC_HDR_SEL1_MDEUA |
2287 		                     DESC_HDR_MODE1_MDEU_INIT |
2288 		                     DESC_HDR_MODE1_MDEU_PAD |
2289 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2290 	},
2291 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2292 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2293 		.alg.aead = {
2294 			.base = {
2295 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2296 				.cra_driver_name = "authenc-hmac-sha1-"
2297 						   "cbc-aes-talitos",
2298 				.cra_blocksize = AES_BLOCK_SIZE,
2299 				.cra_flags = CRYPTO_ALG_ASYNC,
2300 			},
2301 			.ivsize = AES_BLOCK_SIZE,
2302 			.maxauthsize = SHA1_DIGEST_SIZE,
2303 		},
2304 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2305 				     DESC_HDR_SEL0_AESU |
2306 				     DESC_HDR_MODE0_AESU_CBC |
2307 				     DESC_HDR_SEL1_MDEUA |
2308 				     DESC_HDR_MODE1_MDEU_INIT |
2309 				     DESC_HDR_MODE1_MDEU_PAD |
2310 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2311 	},
2312 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2313 		.alg.aead = {
2314 			.base = {
2315 				.cra_name = "authenc(hmac(sha1),"
2316 					    "cbc(des3_ede))",
2317 				.cra_driver_name = "authenc-hmac-sha1-"
2318 						   "cbc-3des-talitos",
2319 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2320 				.cra_flags = CRYPTO_ALG_ASYNC,
2321 			},
2322 			.ivsize = DES3_EDE_BLOCK_SIZE,
2323 			.maxauthsize = SHA1_DIGEST_SIZE,
2324 		},
2325 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2326 			             DESC_HDR_SEL0_DEU |
2327 		                     DESC_HDR_MODE0_DEU_CBC |
2328 		                     DESC_HDR_MODE0_DEU_3DES |
2329 		                     DESC_HDR_SEL1_MDEUA |
2330 		                     DESC_HDR_MODE1_MDEU_INIT |
2331 		                     DESC_HDR_MODE1_MDEU_PAD |
2332 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2333 	},
2334 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2335 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2336 		.alg.aead = {
2337 			.base = {
2338 				.cra_name = "authenc(hmac(sha1),"
2339 					    "cbc(des3_ede))",
2340 				.cra_driver_name = "authenc-hmac-sha1-"
2341 						   "cbc-3des-talitos",
2342 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2343 				.cra_flags = CRYPTO_ALG_ASYNC,
2344 			},
2345 			.ivsize = DES3_EDE_BLOCK_SIZE,
2346 			.maxauthsize = SHA1_DIGEST_SIZE,
2347 		},
2348 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2349 				     DESC_HDR_SEL0_DEU |
2350 				     DESC_HDR_MODE0_DEU_CBC |
2351 				     DESC_HDR_MODE0_DEU_3DES |
2352 				     DESC_HDR_SEL1_MDEUA |
2353 				     DESC_HDR_MODE1_MDEU_INIT |
2354 				     DESC_HDR_MODE1_MDEU_PAD |
2355 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2356 	},
2357 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2358 		.alg.aead = {
2359 			.base = {
2360 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2361 				.cra_driver_name = "authenc-hmac-sha224-"
2362 						   "cbc-aes-talitos",
2363 				.cra_blocksize = AES_BLOCK_SIZE,
2364 				.cra_flags = CRYPTO_ALG_ASYNC,
2365 			},
2366 			.ivsize = AES_BLOCK_SIZE,
2367 			.maxauthsize = SHA224_DIGEST_SIZE,
2368 		},
2369 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370 				     DESC_HDR_SEL0_AESU |
2371 				     DESC_HDR_MODE0_AESU_CBC |
2372 				     DESC_HDR_SEL1_MDEUA |
2373 				     DESC_HDR_MODE1_MDEU_INIT |
2374 				     DESC_HDR_MODE1_MDEU_PAD |
2375 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2376 	},
2377 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2378 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2379 		.alg.aead = {
2380 			.base = {
2381 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2382 				.cra_driver_name = "authenc-hmac-sha224-"
2383 						   "cbc-aes-talitos",
2384 				.cra_blocksize = AES_BLOCK_SIZE,
2385 				.cra_flags = CRYPTO_ALG_ASYNC,
2386 			},
2387 			.ivsize = AES_BLOCK_SIZE,
2388 			.maxauthsize = SHA224_DIGEST_SIZE,
2389 		},
2390 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2391 				     DESC_HDR_SEL0_AESU |
2392 				     DESC_HDR_MODE0_AESU_CBC |
2393 				     DESC_HDR_SEL1_MDEUA |
2394 				     DESC_HDR_MODE1_MDEU_INIT |
2395 				     DESC_HDR_MODE1_MDEU_PAD |
2396 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2397 	},
2398 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2399 		.alg.aead = {
2400 			.base = {
2401 				.cra_name = "authenc(hmac(sha224),"
2402 					    "cbc(des3_ede))",
2403 				.cra_driver_name = "authenc-hmac-sha224-"
2404 						   "cbc-3des-talitos",
2405 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2406 				.cra_flags = CRYPTO_ALG_ASYNC,
2407 			},
2408 			.ivsize = DES3_EDE_BLOCK_SIZE,
2409 			.maxauthsize = SHA224_DIGEST_SIZE,
2410 		},
2411 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2412 			             DESC_HDR_SEL0_DEU |
2413 		                     DESC_HDR_MODE0_DEU_CBC |
2414 		                     DESC_HDR_MODE0_DEU_3DES |
2415 		                     DESC_HDR_SEL1_MDEUA |
2416 		                     DESC_HDR_MODE1_MDEU_INIT |
2417 		                     DESC_HDR_MODE1_MDEU_PAD |
2418 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2419 	},
2420 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2421 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2422 		.alg.aead = {
2423 			.base = {
2424 				.cra_name = "authenc(hmac(sha224),"
2425 					    "cbc(des3_ede))",
2426 				.cra_driver_name = "authenc-hmac-sha224-"
2427 						   "cbc-3des-talitos",
2428 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2429 				.cra_flags = CRYPTO_ALG_ASYNC,
2430 			},
2431 			.ivsize = DES3_EDE_BLOCK_SIZE,
2432 			.maxauthsize = SHA224_DIGEST_SIZE,
2433 		},
2434 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2435 				     DESC_HDR_SEL0_DEU |
2436 				     DESC_HDR_MODE0_DEU_CBC |
2437 				     DESC_HDR_MODE0_DEU_3DES |
2438 				     DESC_HDR_SEL1_MDEUA |
2439 				     DESC_HDR_MODE1_MDEU_INIT |
2440 				     DESC_HDR_MODE1_MDEU_PAD |
2441 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2442 	},
2443 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2444 		.alg.aead = {
2445 			.base = {
2446 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2447 				.cra_driver_name = "authenc-hmac-sha256-"
2448 						   "cbc-aes-talitos",
2449 				.cra_blocksize = AES_BLOCK_SIZE,
2450 				.cra_flags = CRYPTO_ALG_ASYNC,
2451 			},
2452 			.ivsize = AES_BLOCK_SIZE,
2453 			.maxauthsize = SHA256_DIGEST_SIZE,
2454 		},
2455 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2456 			             DESC_HDR_SEL0_AESU |
2457 		                     DESC_HDR_MODE0_AESU_CBC |
2458 		                     DESC_HDR_SEL1_MDEUA |
2459 		                     DESC_HDR_MODE1_MDEU_INIT |
2460 		                     DESC_HDR_MODE1_MDEU_PAD |
2461 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2462 	},
2463 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2464 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2465 		.alg.aead = {
2466 			.base = {
2467 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2468 				.cra_driver_name = "authenc-hmac-sha256-"
2469 						   "cbc-aes-talitos",
2470 				.cra_blocksize = AES_BLOCK_SIZE,
2471 				.cra_flags = CRYPTO_ALG_ASYNC,
2472 			},
2473 			.ivsize = AES_BLOCK_SIZE,
2474 			.maxauthsize = SHA256_DIGEST_SIZE,
2475 		},
2476 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2477 				     DESC_HDR_SEL0_AESU |
2478 				     DESC_HDR_MODE0_AESU_CBC |
2479 				     DESC_HDR_SEL1_MDEUA |
2480 				     DESC_HDR_MODE1_MDEU_INIT |
2481 				     DESC_HDR_MODE1_MDEU_PAD |
2482 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2483 	},
2484 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2485 		.alg.aead = {
2486 			.base = {
2487 				.cra_name = "authenc(hmac(sha256),"
2488 					    "cbc(des3_ede))",
2489 				.cra_driver_name = "authenc-hmac-sha256-"
2490 						   "cbc-3des-talitos",
2491 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2492 				.cra_flags = CRYPTO_ALG_ASYNC,
2493 			},
2494 			.ivsize = DES3_EDE_BLOCK_SIZE,
2495 			.maxauthsize = SHA256_DIGEST_SIZE,
2496 		},
2497 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2498 			             DESC_HDR_SEL0_DEU |
2499 		                     DESC_HDR_MODE0_DEU_CBC |
2500 		                     DESC_HDR_MODE0_DEU_3DES |
2501 		                     DESC_HDR_SEL1_MDEUA |
2502 		                     DESC_HDR_MODE1_MDEU_INIT |
2503 		                     DESC_HDR_MODE1_MDEU_PAD |
2504 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2505 	},
2506 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2507 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2508 		.alg.aead = {
2509 			.base = {
2510 				.cra_name = "authenc(hmac(sha256),"
2511 					    "cbc(des3_ede))",
2512 				.cra_driver_name = "authenc-hmac-sha256-"
2513 						   "cbc-3des-talitos",
2514 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2515 				.cra_flags = CRYPTO_ALG_ASYNC,
2516 			},
2517 			.ivsize = DES3_EDE_BLOCK_SIZE,
2518 			.maxauthsize = SHA256_DIGEST_SIZE,
2519 		},
2520 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2521 				     DESC_HDR_SEL0_DEU |
2522 				     DESC_HDR_MODE0_DEU_CBC |
2523 				     DESC_HDR_MODE0_DEU_3DES |
2524 				     DESC_HDR_SEL1_MDEUA |
2525 				     DESC_HDR_MODE1_MDEU_INIT |
2526 				     DESC_HDR_MODE1_MDEU_PAD |
2527 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2528 	},
2529 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2530 		.alg.aead = {
2531 			.base = {
2532 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2533 				.cra_driver_name = "authenc-hmac-sha384-"
2534 						   "cbc-aes-talitos",
2535 				.cra_blocksize = AES_BLOCK_SIZE,
2536 				.cra_flags = CRYPTO_ALG_ASYNC,
2537 			},
2538 			.ivsize = AES_BLOCK_SIZE,
2539 			.maxauthsize = SHA384_DIGEST_SIZE,
2540 		},
2541 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2542 			             DESC_HDR_SEL0_AESU |
2543 		                     DESC_HDR_MODE0_AESU_CBC |
2544 		                     DESC_HDR_SEL1_MDEUB |
2545 		                     DESC_HDR_MODE1_MDEU_INIT |
2546 		                     DESC_HDR_MODE1_MDEU_PAD |
2547 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2548 	},
2549 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2550 		.alg.aead = {
2551 			.base = {
2552 				.cra_name = "authenc(hmac(sha384),"
2553 					    "cbc(des3_ede))",
2554 				.cra_driver_name = "authenc-hmac-sha384-"
2555 						   "cbc-3des-talitos",
2556 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2557 				.cra_flags = CRYPTO_ALG_ASYNC,
2558 			},
2559 			.ivsize = DES3_EDE_BLOCK_SIZE,
2560 			.maxauthsize = SHA384_DIGEST_SIZE,
2561 		},
2562 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2563 			             DESC_HDR_SEL0_DEU |
2564 		                     DESC_HDR_MODE0_DEU_CBC |
2565 		                     DESC_HDR_MODE0_DEU_3DES |
2566 		                     DESC_HDR_SEL1_MDEUB |
2567 		                     DESC_HDR_MODE1_MDEU_INIT |
2568 		                     DESC_HDR_MODE1_MDEU_PAD |
2569 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2570 	},
2571 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2572 		.alg.aead = {
2573 			.base = {
2574 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2575 				.cra_driver_name = "authenc-hmac-sha512-"
2576 						   "cbc-aes-talitos",
2577 				.cra_blocksize = AES_BLOCK_SIZE,
2578 				.cra_flags = CRYPTO_ALG_ASYNC,
2579 			},
2580 			.ivsize = AES_BLOCK_SIZE,
2581 			.maxauthsize = SHA512_DIGEST_SIZE,
2582 		},
2583 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2584 			             DESC_HDR_SEL0_AESU |
2585 		                     DESC_HDR_MODE0_AESU_CBC |
2586 		                     DESC_HDR_SEL1_MDEUB |
2587 		                     DESC_HDR_MODE1_MDEU_INIT |
2588 		                     DESC_HDR_MODE1_MDEU_PAD |
2589 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2590 	},
2591 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2592 		.alg.aead = {
2593 			.base = {
2594 				.cra_name = "authenc(hmac(sha512),"
2595 					    "cbc(des3_ede))",
2596 				.cra_driver_name = "authenc-hmac-sha512-"
2597 						   "cbc-3des-talitos",
2598 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2599 				.cra_flags = CRYPTO_ALG_ASYNC,
2600 			},
2601 			.ivsize = DES3_EDE_BLOCK_SIZE,
2602 			.maxauthsize = SHA512_DIGEST_SIZE,
2603 		},
2604 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2605 			             DESC_HDR_SEL0_DEU |
2606 		                     DESC_HDR_MODE0_DEU_CBC |
2607 		                     DESC_HDR_MODE0_DEU_3DES |
2608 		                     DESC_HDR_SEL1_MDEUB |
2609 		                     DESC_HDR_MODE1_MDEU_INIT |
2610 		                     DESC_HDR_MODE1_MDEU_PAD |
2611 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2612 	},
2613 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2614 		.alg.aead = {
2615 			.base = {
2616 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2617 				.cra_driver_name = "authenc-hmac-md5-"
2618 						   "cbc-aes-talitos",
2619 				.cra_blocksize = AES_BLOCK_SIZE,
2620 				.cra_flags = CRYPTO_ALG_ASYNC,
2621 			},
2622 			.ivsize = AES_BLOCK_SIZE,
2623 			.maxauthsize = MD5_DIGEST_SIZE,
2624 		},
2625 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2626 			             DESC_HDR_SEL0_AESU |
2627 		                     DESC_HDR_MODE0_AESU_CBC |
2628 		                     DESC_HDR_SEL1_MDEUA |
2629 		                     DESC_HDR_MODE1_MDEU_INIT |
2630 		                     DESC_HDR_MODE1_MDEU_PAD |
2631 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2632 	},
2633 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2634 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2635 		.alg.aead = {
2636 			.base = {
2637 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2638 				.cra_driver_name = "authenc-hmac-md5-"
2639 						   "cbc-aes-talitos",
2640 				.cra_blocksize = AES_BLOCK_SIZE,
2641 				.cra_flags = CRYPTO_ALG_ASYNC,
2642 			},
2643 			.ivsize = AES_BLOCK_SIZE,
2644 			.maxauthsize = MD5_DIGEST_SIZE,
2645 		},
2646 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2647 				     DESC_HDR_SEL0_AESU |
2648 				     DESC_HDR_MODE0_AESU_CBC |
2649 				     DESC_HDR_SEL1_MDEUA |
2650 				     DESC_HDR_MODE1_MDEU_INIT |
2651 				     DESC_HDR_MODE1_MDEU_PAD |
2652 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2653 	},
2654 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2655 		.alg.aead = {
2656 			.base = {
2657 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2658 				.cra_driver_name = "authenc-hmac-md5-"
2659 						   "cbc-3des-talitos",
2660 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2661 				.cra_flags = CRYPTO_ALG_ASYNC,
2662 			},
2663 			.ivsize = DES3_EDE_BLOCK_SIZE,
2664 			.maxauthsize = MD5_DIGEST_SIZE,
2665 		},
2666 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2667 			             DESC_HDR_SEL0_DEU |
2668 		                     DESC_HDR_MODE0_DEU_CBC |
2669 		                     DESC_HDR_MODE0_DEU_3DES |
2670 		                     DESC_HDR_SEL1_MDEUA |
2671 		                     DESC_HDR_MODE1_MDEU_INIT |
2672 		                     DESC_HDR_MODE1_MDEU_PAD |
2673 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2674 	},
2675 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2676 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2677 		.alg.aead = {
2678 			.base = {
2679 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2680 				.cra_driver_name = "authenc-hmac-md5-"
2681 						   "cbc-3des-talitos",
2682 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2683 				.cra_flags = CRYPTO_ALG_ASYNC,
2684 			},
2685 			.ivsize = DES3_EDE_BLOCK_SIZE,
2686 			.maxauthsize = MD5_DIGEST_SIZE,
2687 		},
2688 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2689 				     DESC_HDR_SEL0_DEU |
2690 				     DESC_HDR_MODE0_DEU_CBC |
2691 				     DESC_HDR_MODE0_DEU_3DES |
2692 				     DESC_HDR_SEL1_MDEUA |
2693 				     DESC_HDR_MODE1_MDEU_INIT |
2694 				     DESC_HDR_MODE1_MDEU_PAD |
2695 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2696 	},
2697 	/* ABLKCIPHER algorithms. */
2698 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2699 		.alg.crypto = {
2700 			.cra_name = "ecb(aes)",
2701 			.cra_driver_name = "ecb-aes-talitos",
2702 			.cra_blocksize = AES_BLOCK_SIZE,
2703 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2704 				     CRYPTO_ALG_ASYNC,
2705 			.cra_ablkcipher = {
2706 				.min_keysize = AES_MIN_KEY_SIZE,
2707 				.max_keysize = AES_MAX_KEY_SIZE,
2708 				.ivsize = AES_BLOCK_SIZE,
2709 			}
2710 		},
2711 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2712 				     DESC_HDR_SEL0_AESU,
2713 	},
2714 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2715 		.alg.crypto = {
2716 			.cra_name = "cbc(aes)",
2717 			.cra_driver_name = "cbc-aes-talitos",
2718 			.cra_blocksize = AES_BLOCK_SIZE,
2719 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2720                                      CRYPTO_ALG_ASYNC,
2721 			.cra_ablkcipher = {
2722 				.min_keysize = AES_MIN_KEY_SIZE,
2723 				.max_keysize = AES_MAX_KEY_SIZE,
2724 				.ivsize = AES_BLOCK_SIZE,
2725 			}
2726 		},
2727 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2728 				     DESC_HDR_SEL0_AESU |
2729 				     DESC_HDR_MODE0_AESU_CBC,
2730 	},
2731 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2732 		.alg.crypto = {
2733 			.cra_name = "ctr(aes)",
2734 			.cra_driver_name = "ctr-aes-talitos",
2735 			.cra_blocksize = AES_BLOCK_SIZE,
2736 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2737 				     CRYPTO_ALG_ASYNC,
2738 			.cra_ablkcipher = {
2739 				.min_keysize = AES_MIN_KEY_SIZE,
2740 				.max_keysize = AES_MAX_KEY_SIZE,
2741 				.ivsize = AES_BLOCK_SIZE,
2742 			}
2743 		},
2744 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2745 				     DESC_HDR_SEL0_AESU |
2746 				     DESC_HDR_MODE0_AESU_CTR,
2747 	},
2748 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2749 		.alg.crypto = {
2750 			.cra_name = "ecb(des)",
2751 			.cra_driver_name = "ecb-des-talitos",
2752 			.cra_blocksize = DES_BLOCK_SIZE,
2753 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2754 				     CRYPTO_ALG_ASYNC,
2755 			.cra_ablkcipher = {
2756 				.min_keysize = DES_KEY_SIZE,
2757 				.max_keysize = DES_KEY_SIZE,
2758 				.ivsize = DES_BLOCK_SIZE,
2759 			}
2760 		},
2761 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2762 				     DESC_HDR_SEL0_DEU,
2763 	},
2764 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2765 		.alg.crypto = {
2766 			.cra_name = "cbc(des)",
2767 			.cra_driver_name = "cbc-des-talitos",
2768 			.cra_blocksize = DES_BLOCK_SIZE,
2769 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2770 				     CRYPTO_ALG_ASYNC,
2771 			.cra_ablkcipher = {
2772 				.min_keysize = DES_KEY_SIZE,
2773 				.max_keysize = DES_KEY_SIZE,
2774 				.ivsize = DES_BLOCK_SIZE,
2775 			}
2776 		},
2777 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2778 				     DESC_HDR_SEL0_DEU |
2779 				     DESC_HDR_MODE0_DEU_CBC,
2780 	},
2781 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2782 		.alg.crypto = {
2783 			.cra_name = "ecb(des3_ede)",
2784 			.cra_driver_name = "ecb-3des-talitos",
2785 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2786 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2787 				     CRYPTO_ALG_ASYNC,
2788 			.cra_ablkcipher = {
2789 				.min_keysize = DES3_EDE_KEY_SIZE,
2790 				.max_keysize = DES3_EDE_KEY_SIZE,
2791 				.ivsize = DES3_EDE_BLOCK_SIZE,
2792 			}
2793 		},
2794 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2795 				     DESC_HDR_SEL0_DEU |
2796 				     DESC_HDR_MODE0_DEU_3DES,
2797 	},
2798 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2799 		.alg.crypto = {
2800 			.cra_name = "cbc(des3_ede)",
2801 			.cra_driver_name = "cbc-3des-talitos",
2802 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2803 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2804                                      CRYPTO_ALG_ASYNC,
2805 			.cra_ablkcipher = {
2806 				.min_keysize = DES3_EDE_KEY_SIZE,
2807 				.max_keysize = DES3_EDE_KEY_SIZE,
2808 				.ivsize = DES3_EDE_BLOCK_SIZE,
2809 			}
2810 		},
2811 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812 			             DESC_HDR_SEL0_DEU |
2813 		                     DESC_HDR_MODE0_DEU_CBC |
2814 		                     DESC_HDR_MODE0_DEU_3DES,
2815 	},
2816 	/* AHASH algorithms. */
2817 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2818 		.alg.hash = {
2819 			.halg.digestsize = MD5_DIGEST_SIZE,
2820 			.halg.statesize = sizeof(struct talitos_export_state),
2821 			.halg.base = {
2822 				.cra_name = "md5",
2823 				.cra_driver_name = "md5-talitos",
2824 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2825 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2826 					     CRYPTO_ALG_ASYNC,
2827 			}
2828 		},
2829 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2830 				     DESC_HDR_SEL0_MDEUA |
2831 				     DESC_HDR_MODE0_MDEU_MD5,
2832 	},
2833 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2834 		.alg.hash = {
2835 			.halg.digestsize = SHA1_DIGEST_SIZE,
2836 			.halg.statesize = sizeof(struct talitos_export_state),
2837 			.halg.base = {
2838 				.cra_name = "sha1",
2839 				.cra_driver_name = "sha1-talitos",
2840 				.cra_blocksize = SHA1_BLOCK_SIZE,
2841 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2842 					     CRYPTO_ALG_ASYNC,
2843 			}
2844 		},
2845 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2846 				     DESC_HDR_SEL0_MDEUA |
2847 				     DESC_HDR_MODE0_MDEU_SHA1,
2848 	},
2849 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2850 		.alg.hash = {
2851 			.halg.digestsize = SHA224_DIGEST_SIZE,
2852 			.halg.statesize = sizeof(struct talitos_export_state),
2853 			.halg.base = {
2854 				.cra_name = "sha224",
2855 				.cra_driver_name = "sha224-talitos",
2856 				.cra_blocksize = SHA224_BLOCK_SIZE,
2857 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2858 					     CRYPTO_ALG_ASYNC,
2859 			}
2860 		},
2861 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2862 				     DESC_HDR_SEL0_MDEUA |
2863 				     DESC_HDR_MODE0_MDEU_SHA224,
2864 	},
2865 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2866 		.alg.hash = {
2867 			.halg.digestsize = SHA256_DIGEST_SIZE,
2868 			.halg.statesize = sizeof(struct talitos_export_state),
2869 			.halg.base = {
2870 				.cra_name = "sha256",
2871 				.cra_driver_name = "sha256-talitos",
2872 				.cra_blocksize = SHA256_BLOCK_SIZE,
2873 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2874 					     CRYPTO_ALG_ASYNC,
2875 			}
2876 		},
2877 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2878 				     DESC_HDR_SEL0_MDEUA |
2879 				     DESC_HDR_MODE0_MDEU_SHA256,
2880 	},
2881 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2882 		.alg.hash = {
2883 			.halg.digestsize = SHA384_DIGEST_SIZE,
2884 			.halg.statesize = sizeof(struct talitos_export_state),
2885 			.halg.base = {
2886 				.cra_name = "sha384",
2887 				.cra_driver_name = "sha384-talitos",
2888 				.cra_blocksize = SHA384_BLOCK_SIZE,
2889 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2890 					     CRYPTO_ALG_ASYNC,
2891 			}
2892 		},
2893 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2894 				     DESC_HDR_SEL0_MDEUB |
2895 				     DESC_HDR_MODE0_MDEUB_SHA384,
2896 	},
2897 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2898 		.alg.hash = {
2899 			.halg.digestsize = SHA512_DIGEST_SIZE,
2900 			.halg.statesize = sizeof(struct talitos_export_state),
2901 			.halg.base = {
2902 				.cra_name = "sha512",
2903 				.cra_driver_name = "sha512-talitos",
2904 				.cra_blocksize = SHA512_BLOCK_SIZE,
2905 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2906 					     CRYPTO_ALG_ASYNC,
2907 			}
2908 		},
2909 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2910 				     DESC_HDR_SEL0_MDEUB |
2911 				     DESC_HDR_MODE0_MDEUB_SHA512,
2912 	},
2913 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2914 		.alg.hash = {
2915 			.halg.digestsize = MD5_DIGEST_SIZE,
2916 			.halg.statesize = sizeof(struct talitos_export_state),
2917 			.halg.base = {
2918 				.cra_name = "hmac(md5)",
2919 				.cra_driver_name = "hmac-md5-talitos",
2920 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2921 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2922 					     CRYPTO_ALG_ASYNC,
2923 			}
2924 		},
2925 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926 				     DESC_HDR_SEL0_MDEUA |
2927 				     DESC_HDR_MODE0_MDEU_MD5,
2928 	},
2929 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2930 		.alg.hash = {
2931 			.halg.digestsize = SHA1_DIGEST_SIZE,
2932 			.halg.statesize = sizeof(struct talitos_export_state),
2933 			.halg.base = {
2934 				.cra_name = "hmac(sha1)",
2935 				.cra_driver_name = "hmac-sha1-talitos",
2936 				.cra_blocksize = SHA1_BLOCK_SIZE,
2937 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2938 					     CRYPTO_ALG_ASYNC,
2939 			}
2940 		},
2941 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2942 				     DESC_HDR_SEL0_MDEUA |
2943 				     DESC_HDR_MODE0_MDEU_SHA1,
2944 	},
2945 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2946 		.alg.hash = {
2947 			.halg.digestsize = SHA224_DIGEST_SIZE,
2948 			.halg.statesize = sizeof(struct talitos_export_state),
2949 			.halg.base = {
2950 				.cra_name = "hmac(sha224)",
2951 				.cra_driver_name = "hmac-sha224-talitos",
2952 				.cra_blocksize = SHA224_BLOCK_SIZE,
2953 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2954 					     CRYPTO_ALG_ASYNC,
2955 			}
2956 		},
2957 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2958 				     DESC_HDR_SEL0_MDEUA |
2959 				     DESC_HDR_MODE0_MDEU_SHA224,
2960 	},
2961 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2962 		.alg.hash = {
2963 			.halg.digestsize = SHA256_DIGEST_SIZE,
2964 			.halg.statesize = sizeof(struct talitos_export_state),
2965 			.halg.base = {
2966 				.cra_name = "hmac(sha256)",
2967 				.cra_driver_name = "hmac-sha256-talitos",
2968 				.cra_blocksize = SHA256_BLOCK_SIZE,
2969 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2970 					     CRYPTO_ALG_ASYNC,
2971 			}
2972 		},
2973 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2974 				     DESC_HDR_SEL0_MDEUA |
2975 				     DESC_HDR_MODE0_MDEU_SHA256,
2976 	},
2977 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2978 		.alg.hash = {
2979 			.halg.digestsize = SHA384_DIGEST_SIZE,
2980 			.halg.statesize = sizeof(struct talitos_export_state),
2981 			.halg.base = {
2982 				.cra_name = "hmac(sha384)",
2983 				.cra_driver_name = "hmac-sha384-talitos",
2984 				.cra_blocksize = SHA384_BLOCK_SIZE,
2985 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2986 					     CRYPTO_ALG_ASYNC,
2987 			}
2988 		},
2989 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2990 				     DESC_HDR_SEL0_MDEUB |
2991 				     DESC_HDR_MODE0_MDEUB_SHA384,
2992 	},
2993 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2994 		.alg.hash = {
2995 			.halg.digestsize = SHA512_DIGEST_SIZE,
2996 			.halg.statesize = sizeof(struct talitos_export_state),
2997 			.halg.base = {
2998 				.cra_name = "hmac(sha512)",
2999 				.cra_driver_name = "hmac-sha512-talitos",
3000 				.cra_blocksize = SHA512_BLOCK_SIZE,
3001 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
3002 					     CRYPTO_ALG_ASYNC,
3003 			}
3004 		},
3005 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3006 				     DESC_HDR_SEL0_MDEUB |
3007 				     DESC_HDR_MODE0_MDEUB_SHA512,
3008 	}
3009 };
3010 
3011 struct talitos_crypto_alg {
3012 	struct list_head entry;
3013 	struct device *dev;
3014 	struct talitos_alg_template algt;
3015 };
3016 
3017 static int talitos_init_common(struct talitos_ctx *ctx,
3018 			       struct talitos_crypto_alg *talitos_alg)
3019 {
3020 	struct talitos_private *priv;
3021 
3022 	/* update context with ptr to dev */
3023 	ctx->dev = talitos_alg->dev;
3024 
3025 	/* assign SEC channel to tfm in round-robin fashion */
3026 	priv = dev_get_drvdata(ctx->dev);
3027 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3028 		  (priv->num_channels - 1);
3029 
3030 	/* copy descriptor header template value */
3031 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3032 
3033 	/* select done notification */
3034 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3035 
3036 	return 0;
3037 }
3038 
3039 static int talitos_cra_init(struct crypto_tfm *tfm)
3040 {
3041 	struct crypto_alg *alg = tfm->__crt_alg;
3042 	struct talitos_crypto_alg *talitos_alg;
3043 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3044 
3045 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3046 		talitos_alg = container_of(__crypto_ahash_alg(alg),
3047 					   struct talitos_crypto_alg,
3048 					   algt.alg.hash);
3049 	else
3050 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
3051 					   algt.alg.crypto);
3052 
3053 	return talitos_init_common(ctx, talitos_alg);
3054 }
3055 
3056 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3057 {
3058 	struct aead_alg *alg = crypto_aead_alg(tfm);
3059 	struct talitos_crypto_alg *talitos_alg;
3060 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3061 
3062 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3063 				   algt.alg.aead);
3064 
3065 	return talitos_init_common(ctx, talitos_alg);
3066 }
3067 
3068 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3069 {
3070 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3071 
3072 	talitos_cra_init(tfm);
3073 
3074 	ctx->keylen = 0;
3075 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3076 				 sizeof(struct talitos_ahash_req_ctx));
3077 
3078 	return 0;
3079 }
3080 
3081 static void talitos_cra_exit(struct crypto_tfm *tfm)
3082 {
3083 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3084 	struct device *dev = ctx->dev;
3085 
3086 	if (ctx->keylen)
3087 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3088 }
3089 
3090 /*
3091  * given the alg's descriptor header template, determine whether descriptor
3092  * type and primary/secondary execution units required match the hw
3093  * capabilities description provided in the device tree node.
3094  */
3095 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3096 {
3097 	struct talitos_private *priv = dev_get_drvdata(dev);
3098 	int ret;
3099 
3100 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3101 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3102 
3103 	if (SECONDARY_EU(desc_hdr_template))
3104 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3105 		              & priv->exec_units);
3106 
3107 	return ret;
3108 }
3109 
3110 static int talitos_remove(struct platform_device *ofdev)
3111 {
3112 	struct device *dev = &ofdev->dev;
3113 	struct talitos_private *priv = dev_get_drvdata(dev);
3114 	struct talitos_crypto_alg *t_alg, *n;
3115 	int i;
3116 
3117 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3118 		switch (t_alg->algt.type) {
3119 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3120 			break;
3121 		case CRYPTO_ALG_TYPE_AEAD:
3122 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3123 		case CRYPTO_ALG_TYPE_AHASH:
3124 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3125 			break;
3126 		}
3127 		list_del(&t_alg->entry);
3128 	}
3129 
3130 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3131 		talitos_unregister_rng(dev);
3132 
3133 	for (i = 0; i < 2; i++)
3134 		if (priv->irq[i]) {
3135 			free_irq(priv->irq[i], dev);
3136 			irq_dispose_mapping(priv->irq[i]);
3137 		}
3138 
3139 	tasklet_kill(&priv->done_task[0]);
3140 	if (priv->irq[1])
3141 		tasklet_kill(&priv->done_task[1]);
3142 
3143 	return 0;
3144 }
3145 
3146 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3147 						    struct talitos_alg_template
3148 						           *template)
3149 {
3150 	struct talitos_private *priv = dev_get_drvdata(dev);
3151 	struct talitos_crypto_alg *t_alg;
3152 	struct crypto_alg *alg;
3153 
3154 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3155 			     GFP_KERNEL);
3156 	if (!t_alg)
3157 		return ERR_PTR(-ENOMEM);
3158 
3159 	t_alg->algt = *template;
3160 
3161 	switch (t_alg->algt.type) {
3162 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
3163 		alg = &t_alg->algt.alg.crypto;
3164 		alg->cra_init = talitos_cra_init;
3165 		alg->cra_exit = talitos_cra_exit;
3166 		alg->cra_type = &crypto_ablkcipher_type;
3167 		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3168 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3169 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3170 		alg->cra_ablkcipher.geniv = "eseqiv";
3171 		break;
3172 	case CRYPTO_ALG_TYPE_AEAD:
3173 		alg = &t_alg->algt.alg.aead.base;
3174 		alg->cra_exit = talitos_cra_exit;
3175 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3176 		t_alg->algt.alg.aead.setkey = aead_setkey;
3177 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3178 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3179 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3180 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3181 			devm_kfree(dev, t_alg);
3182 			return ERR_PTR(-ENOTSUPP);
3183 		}
3184 		break;
3185 	case CRYPTO_ALG_TYPE_AHASH:
3186 		alg = &t_alg->algt.alg.hash.halg.base;
3187 		alg->cra_init = talitos_cra_init_ahash;
3188 		alg->cra_exit = talitos_cra_exit;
3189 		alg->cra_type = &crypto_ahash_type;
3190 		t_alg->algt.alg.hash.init = ahash_init;
3191 		t_alg->algt.alg.hash.update = ahash_update;
3192 		t_alg->algt.alg.hash.final = ahash_final;
3193 		t_alg->algt.alg.hash.finup = ahash_finup;
3194 		t_alg->algt.alg.hash.digest = ahash_digest;
3195 		if (!strncmp(alg->cra_name, "hmac", 4))
3196 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3197 		t_alg->algt.alg.hash.import = ahash_import;
3198 		t_alg->algt.alg.hash.export = ahash_export;
3199 
3200 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3201 		    !strncmp(alg->cra_name, "hmac", 4)) {
3202 			devm_kfree(dev, t_alg);
3203 			return ERR_PTR(-ENOTSUPP);
3204 		}
3205 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3206 		    (!strcmp(alg->cra_name, "sha224") ||
3207 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3208 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3209 			t_alg->algt.desc_hdr_template =
3210 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3211 					DESC_HDR_SEL0_MDEUA |
3212 					DESC_HDR_MODE0_MDEU_SHA256;
3213 		}
3214 		break;
3215 	default:
3216 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3217 		devm_kfree(dev, t_alg);
3218 		return ERR_PTR(-EINVAL);
3219 	}
3220 
3221 	alg->cra_module = THIS_MODULE;
3222 	if (t_alg->algt.priority)
3223 		alg->cra_priority = t_alg->algt.priority;
3224 	else
3225 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3226 	alg->cra_alignmask = 0;
3227 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3228 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3229 
3230 	t_alg->dev = dev;
3231 
3232 	return t_alg;
3233 }
3234 
3235 static int talitos_probe_irq(struct platform_device *ofdev)
3236 {
3237 	struct device *dev = &ofdev->dev;
3238 	struct device_node *np = ofdev->dev.of_node;
3239 	struct talitos_private *priv = dev_get_drvdata(dev);
3240 	int err;
3241 	bool is_sec1 = has_ftr_sec1(priv);
3242 
3243 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3244 	if (!priv->irq[0]) {
3245 		dev_err(dev, "failed to map irq\n");
3246 		return -EINVAL;
3247 	}
3248 	if (is_sec1) {
3249 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3250 				  dev_driver_string(dev), dev);
3251 		goto primary_out;
3252 	}
3253 
3254 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3255 
3256 	/* get the primary irq line */
3257 	if (!priv->irq[1]) {
3258 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3259 				  dev_driver_string(dev), dev);
3260 		goto primary_out;
3261 	}
3262 
3263 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3264 			  dev_driver_string(dev), dev);
3265 	if (err)
3266 		goto primary_out;
3267 
3268 	/* get the secondary irq line */
3269 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3270 			  dev_driver_string(dev), dev);
3271 	if (err) {
3272 		dev_err(dev, "failed to request secondary irq\n");
3273 		irq_dispose_mapping(priv->irq[1]);
3274 		priv->irq[1] = 0;
3275 	}
3276 
3277 	return err;
3278 
3279 primary_out:
3280 	if (err) {
3281 		dev_err(dev, "failed to request primary irq\n");
3282 		irq_dispose_mapping(priv->irq[0]);
3283 		priv->irq[0] = 0;
3284 	}
3285 
3286 	return err;
3287 }
3288 
3289 static int talitos_probe(struct platform_device *ofdev)
3290 {
3291 	struct device *dev = &ofdev->dev;
3292 	struct device_node *np = ofdev->dev.of_node;
3293 	struct talitos_private *priv;
3294 	int i, err;
3295 	int stride;
3296 	struct resource *res;
3297 
3298 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3299 	if (!priv)
3300 		return -ENOMEM;
3301 
3302 	INIT_LIST_HEAD(&priv->alg_list);
3303 
3304 	dev_set_drvdata(dev, priv);
3305 
3306 	priv->ofdev = ofdev;
3307 
3308 	spin_lock_init(&priv->reg_lock);
3309 
3310 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3311 	if (!res)
3312 		return -ENXIO;
3313 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3314 	if (!priv->reg) {
3315 		dev_err(dev, "failed to of_iomap\n");
3316 		err = -ENOMEM;
3317 		goto err_out;
3318 	}
3319 
3320 	/* get SEC version capabilities from device tree */
3321 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3322 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3323 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3324 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3325 			     &priv->desc_types);
3326 
3327 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3328 	    !priv->exec_units || !priv->desc_types) {
3329 		dev_err(dev, "invalid property data in device tree node\n");
3330 		err = -EINVAL;
3331 		goto err_out;
3332 	}
3333 
3334 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3335 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3336 
3337 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3338 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3339 				  TALITOS_FTR_SHA224_HWINIT |
3340 				  TALITOS_FTR_HMAC_OK;
3341 
3342 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3343 		priv->features |= TALITOS_FTR_SEC1;
3344 
3345 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3346 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3347 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3348 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3349 		stride = TALITOS1_CH_STRIDE;
3350 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3351 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3352 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3353 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3354 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3355 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3356 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3357 		stride = TALITOS1_CH_STRIDE;
3358 	} else {
3359 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3360 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3361 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3362 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3363 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3364 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3365 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3366 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3367 		stride = TALITOS2_CH_STRIDE;
3368 	}
3369 
3370 	err = talitos_probe_irq(ofdev);
3371 	if (err)
3372 		goto err_out;
3373 
3374 	if (of_device_is_compatible(np, "fsl,sec1.0")) {
3375 		if (priv->num_channels == 1)
3376 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3377 				     (unsigned long)dev);
3378 		else
3379 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3380 				     (unsigned long)dev);
3381 	} else {
3382 		if (priv->irq[1]) {
3383 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3384 				     (unsigned long)dev);
3385 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3386 				     (unsigned long)dev);
3387 		} else if (priv->num_channels == 1) {
3388 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3389 				     (unsigned long)dev);
3390 		} else {
3391 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3392 				     (unsigned long)dev);
3393 		}
3394 	}
3395 
3396 	priv->chan = devm_kcalloc(dev,
3397 				  priv->num_channels,
3398 				  sizeof(struct talitos_channel),
3399 				  GFP_KERNEL);
3400 	if (!priv->chan) {
3401 		dev_err(dev, "failed to allocate channel management space\n");
3402 		err = -ENOMEM;
3403 		goto err_out;
3404 	}
3405 
3406 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3407 
3408 	for (i = 0; i < priv->num_channels; i++) {
3409 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3410 		if (!priv->irq[1] || !(i & 1))
3411 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3412 
3413 		spin_lock_init(&priv->chan[i].head_lock);
3414 		spin_lock_init(&priv->chan[i].tail_lock);
3415 
3416 		priv->chan[i].fifo = devm_kcalloc(dev,
3417 						priv->fifo_len,
3418 						sizeof(struct talitos_request),
3419 						GFP_KERNEL);
3420 		if (!priv->chan[i].fifo) {
3421 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3422 			err = -ENOMEM;
3423 			goto err_out;
3424 		}
3425 
3426 		atomic_set(&priv->chan[i].submit_count,
3427 			   -(priv->chfifo_len - 1));
3428 	}
3429 
3430 	dma_set_mask(dev, DMA_BIT_MASK(36));
3431 
3432 	/* reset and initialize the h/w */
3433 	err = init_device(dev);
3434 	if (err) {
3435 		dev_err(dev, "failed to initialize device\n");
3436 		goto err_out;
3437 	}
3438 
3439 	/* register the RNG, if available */
3440 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3441 		err = talitos_register_rng(dev);
3442 		if (err) {
3443 			dev_err(dev, "failed to register hwrng: %d\n", err);
3444 			goto err_out;
3445 		} else
3446 			dev_info(dev, "hwrng\n");
3447 	}
3448 
3449 	/* register crypto algorithms the device supports */
3450 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3451 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3452 			struct talitos_crypto_alg *t_alg;
3453 			struct crypto_alg *alg = NULL;
3454 
3455 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3456 			if (IS_ERR(t_alg)) {
3457 				err = PTR_ERR(t_alg);
3458 				if (err == -ENOTSUPP)
3459 					continue;
3460 				goto err_out;
3461 			}
3462 
3463 			switch (t_alg->algt.type) {
3464 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3465 				err = crypto_register_alg(
3466 						&t_alg->algt.alg.crypto);
3467 				alg = &t_alg->algt.alg.crypto;
3468 				break;
3469 
3470 			case CRYPTO_ALG_TYPE_AEAD:
3471 				err = crypto_register_aead(
3472 					&t_alg->algt.alg.aead);
3473 				alg = &t_alg->algt.alg.aead.base;
3474 				break;
3475 
3476 			case CRYPTO_ALG_TYPE_AHASH:
3477 				err = crypto_register_ahash(
3478 						&t_alg->algt.alg.hash);
3479 				alg = &t_alg->algt.alg.hash.halg.base;
3480 				break;
3481 			}
3482 			if (err) {
3483 				dev_err(dev, "%s alg registration failed\n",
3484 					alg->cra_driver_name);
3485 				devm_kfree(dev, t_alg);
3486 			} else
3487 				list_add_tail(&t_alg->entry, &priv->alg_list);
3488 		}
3489 	}
3490 	if (!list_empty(&priv->alg_list))
3491 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3492 			 (char *)of_get_property(np, "compatible", NULL));
3493 
3494 	return 0;
3495 
3496 err_out:
3497 	talitos_remove(ofdev);
3498 
3499 	return err;
3500 }
3501 
3502 static const struct of_device_id talitos_match[] = {
3503 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3504 	{
3505 		.compatible = "fsl,sec1.0",
3506 	},
3507 #endif
3508 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3509 	{
3510 		.compatible = "fsl,sec2.0",
3511 	},
3512 #endif
3513 	{},
3514 };
3515 MODULE_DEVICE_TABLE(of, talitos_match);
3516 
3517 static struct platform_driver talitos_driver = {
3518 	.driver = {
3519 		.name = "talitos",
3520 		.of_match_table = talitos_match,
3521 	},
3522 	.probe = talitos_probe,
3523 	.remove = talitos_remove,
3524 };
3525 
3526 module_platform_driver(talitos_driver);
3527 
3528 MODULE_LICENSE("GPL");
3529 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3530 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3531