xref: /openbmc/linux/drivers/crypto/talitos.c (revision 1fa0a7dc)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43 
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55 
56 #include "talitos.h"
57 
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 			   unsigned int len, bool is_sec1)
60 {
61 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 	if (is_sec1) {
63 		ptr->len1 = cpu_to_be16(len);
64 	} else {
65 		ptr->len = cpu_to_be16(len);
66 		ptr->eptr = upper_32_bits(dma_addr);
67 	}
68 }
69 
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 			     struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73 	dst_ptr->ptr = src_ptr->ptr;
74 	if (is_sec1) {
75 		dst_ptr->len1 = src_ptr->len1;
76 	} else {
77 		dst_ptr->len = src_ptr->len;
78 		dst_ptr->eptr = src_ptr->eptr;
79 	}
80 }
81 
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 					   bool is_sec1)
84 {
85 	if (is_sec1)
86 		return be16_to_cpu(ptr->len1);
87 	else
88 		return be16_to_cpu(ptr->len);
89 }
90 
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 				   bool is_sec1)
93 {
94 	if (!is_sec1)
95 		ptr->j_extent = val;
96 }
97 
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100 	if (!is_sec1)
101 		ptr->j_extent |= val;
102 }
103 
104 /*
105  * map virtual single (contiguous) pointer to h/w descriptor pointer
106  */
107 static void __map_single_talitos_ptr(struct device *dev,
108 				     struct talitos_ptr *ptr,
109 				     unsigned int len, void *data,
110 				     enum dma_data_direction dir,
111 				     unsigned long attrs)
112 {
113 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 	struct talitos_private *priv = dev_get_drvdata(dev);
115 	bool is_sec1 = has_ftr_sec1(priv);
116 
117 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118 }
119 
120 static void map_single_talitos_ptr(struct device *dev,
121 				   struct talitos_ptr *ptr,
122 				   unsigned int len, void *data,
123 				   enum dma_data_direction dir)
124 {
125 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
126 }
127 
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129 					  struct talitos_ptr *ptr,
130 					  unsigned int len, void *data,
131 					  enum dma_data_direction dir)
132 {
133 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
134 				 DMA_ATTR_SKIP_CPU_SYNC);
135 }
136 
137 /*
138  * unmap bus single (contiguous) h/w descriptor pointer
139  */
140 static void unmap_single_talitos_ptr(struct device *dev,
141 				     struct talitos_ptr *ptr,
142 				     enum dma_data_direction dir)
143 {
144 	struct talitos_private *priv = dev_get_drvdata(dev);
145 	bool is_sec1 = has_ftr_sec1(priv);
146 
147 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 			 from_talitos_ptr_len(ptr, is_sec1), dir);
149 }
150 
151 static int reset_channel(struct device *dev, int ch)
152 {
153 	struct talitos_private *priv = dev_get_drvdata(dev);
154 	unsigned int timeout = TALITOS_TIMEOUT;
155 	bool is_sec1 = has_ftr_sec1(priv);
156 
157 	if (is_sec1) {
158 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 			  TALITOS1_CCCR_LO_RESET);
160 
161 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 			TALITOS1_CCCR_LO_RESET) && --timeout)
163 			cpu_relax();
164 	} else {
165 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 			  TALITOS2_CCCR_RESET);
167 
168 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 			TALITOS2_CCCR_RESET) && --timeout)
170 			cpu_relax();
171 	}
172 
173 	if (timeout == 0) {
174 		dev_err(dev, "failed to reset channel %d\n", ch);
175 		return -EIO;
176 	}
177 
178 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
179 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 	/* enable chaining descriptors */
182 	if (is_sec1)
183 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184 			  TALITOS_CCCR_LO_NE);
185 
186 	/* and ICCR writeback, if available */
187 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 		          TALITOS_CCCR_LO_IWSE);
190 
191 	return 0;
192 }
193 
194 static int reset_device(struct device *dev)
195 {
196 	struct talitos_private *priv = dev_get_drvdata(dev);
197 	unsigned int timeout = TALITOS_TIMEOUT;
198 	bool is_sec1 = has_ftr_sec1(priv);
199 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
200 
201 	setbits32(priv->reg + TALITOS_MCR, mcr);
202 
203 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204 	       && --timeout)
205 		cpu_relax();
206 
207 	if (priv->irq[1]) {
208 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 		setbits32(priv->reg + TALITOS_MCR, mcr);
210 	}
211 
212 	if (timeout == 0) {
213 		dev_err(dev, "failed to reset device\n");
214 		return -EIO;
215 	}
216 
217 	return 0;
218 }
219 
220 /*
221  * Reset and initialize the device
222  */
223 static int init_device(struct device *dev)
224 {
225 	struct talitos_private *priv = dev_get_drvdata(dev);
226 	int ch, err;
227 	bool is_sec1 = has_ftr_sec1(priv);
228 
229 	/*
230 	 * Master reset
231 	 * errata documentation: warning: certain SEC interrupts
232 	 * are not fully cleared by writing the MCR:SWR bit,
233 	 * set bit twice to completely reset
234 	 */
235 	err = reset_device(dev);
236 	if (err)
237 		return err;
238 
239 	err = reset_device(dev);
240 	if (err)
241 		return err;
242 
243 	/* reset channels */
244 	for (ch = 0; ch < priv->num_channels; ch++) {
245 		err = reset_channel(dev, ch);
246 		if (err)
247 			return err;
248 	}
249 
250 	/* enable channel done and error interrupts */
251 	if (is_sec1) {
252 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 		/* disable parity error check in DEU (erroneous? test vect.) */
255 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256 	} else {
257 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
259 	}
260 
261 	/* disable integrity check error interrupts (use writeback instead) */
262 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 		          TALITOS_MDEUICR_LO_ICE);
265 
266 	return 0;
267 }
268 
269 /**
270  * talitos_submit - submits a descriptor to the device for processing
271  * @dev:	the SEC device to be used
272  * @ch:		the SEC device channel to be used
273  * @desc:	the descriptor to be processed by the device
274  * @callback:	whom to call when processing is complete
275  * @context:	a handle for use by caller (optional)
276  *
277  * desc must contain valid dma-mapped (bus physical) address pointers.
278  * callback must check err and feedback in descriptor header
279  * for device processing status.
280  */
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 		   void (*callback)(struct device *dev,
283 				    struct talitos_desc *desc,
284 				    void *context, int error),
285 		   void *context)
286 {
287 	struct talitos_private *priv = dev_get_drvdata(dev);
288 	struct talitos_request *request;
289 	unsigned long flags;
290 	int head;
291 	bool is_sec1 = has_ftr_sec1(priv);
292 
293 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
294 
295 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 		/* h/w fifo is full */
297 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298 		return -EAGAIN;
299 	}
300 
301 	head = priv->chan[ch].head;
302 	request = &priv->chan[ch].fifo[head];
303 
304 	/* map descriptor and save caller data */
305 	if (is_sec1) {
306 		desc->hdr1 = desc->hdr;
307 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
308 						   TALITOS_DESC_SIZE,
309 						   DMA_BIDIRECTIONAL);
310 	} else {
311 		request->dma_desc = dma_map_single(dev, desc,
312 						   TALITOS_DESC_SIZE,
313 						   DMA_BIDIRECTIONAL);
314 	}
315 	request->callback = callback;
316 	request->context = context;
317 
318 	/* increment fifo head */
319 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
320 
321 	smp_wmb();
322 	request->desc = desc;
323 
324 	/* GO! */
325 	wmb();
326 	out_be32(priv->chan[ch].reg + TALITOS_FF,
327 		 upper_32_bits(request->dma_desc));
328 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 		 lower_32_bits(request->dma_desc));
330 
331 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
332 
333 	return -EINPROGRESS;
334 }
335 EXPORT_SYMBOL(talitos_submit);
336 
337 /*
338  * process what was done, notify callback of error if not
339  */
340 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
341 {
342 	struct talitos_private *priv = dev_get_drvdata(dev);
343 	struct talitos_request *request, saved_req;
344 	unsigned long flags;
345 	int tail, status;
346 	bool is_sec1 = has_ftr_sec1(priv);
347 
348 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
349 
350 	tail = priv->chan[ch].tail;
351 	while (priv->chan[ch].fifo[tail].desc) {
352 		__be32 hdr;
353 
354 		request = &priv->chan[ch].fifo[tail];
355 
356 		/* descriptors with their done bits set don't get the error */
357 		rmb();
358 		if (!is_sec1)
359 			hdr = request->desc->hdr;
360 		else if (request->desc->next_desc)
361 			hdr = (request->desc + 1)->hdr1;
362 		else
363 			hdr = request->desc->hdr1;
364 
365 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
366 			status = 0;
367 		else
368 			if (!error)
369 				break;
370 			else
371 				status = error;
372 
373 		dma_unmap_single(dev, request->dma_desc,
374 				 TALITOS_DESC_SIZE,
375 				 DMA_BIDIRECTIONAL);
376 
377 		/* copy entries so we can call callback outside lock */
378 		saved_req.desc = request->desc;
379 		saved_req.callback = request->callback;
380 		saved_req.context = request->context;
381 
382 		/* release request entry in fifo */
383 		smp_wmb();
384 		request->desc = NULL;
385 
386 		/* increment fifo tail */
387 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
388 
389 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
390 
391 		atomic_dec(&priv->chan[ch].submit_count);
392 
393 		saved_req.callback(dev, saved_req.desc, saved_req.context,
394 				   status);
395 		/* channel may resume processing in single desc error case */
396 		if (error && !reset_ch && status == error)
397 			return;
398 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
399 		tail = priv->chan[ch].tail;
400 	}
401 
402 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
403 }
404 
405 /*
406  * process completed requests for channels that have done status
407  */
408 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
409 static void talitos1_done_##name(unsigned long data)			\
410 {									\
411 	struct device *dev = (struct device *)data;			\
412 	struct talitos_private *priv = dev_get_drvdata(dev);		\
413 	unsigned long flags;						\
414 									\
415 	if (ch_done_mask & 0x10000000)					\
416 		flush_channel(dev, 0, 0, 0);			\
417 	if (ch_done_mask & 0x40000000)					\
418 		flush_channel(dev, 1, 0, 0);			\
419 	if (ch_done_mask & 0x00010000)					\
420 		flush_channel(dev, 2, 0, 0);			\
421 	if (ch_done_mask & 0x00040000)					\
422 		flush_channel(dev, 3, 0, 0);			\
423 									\
424 	/* At this point, all completed channels have been processed */	\
425 	/* Unmask done interrupts for channels completed later on. */	\
426 	spin_lock_irqsave(&priv->reg_lock, flags);			\
427 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
428 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
429 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
430 }
431 
432 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
433 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
434 
435 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
436 static void talitos2_done_##name(unsigned long data)			\
437 {									\
438 	struct device *dev = (struct device *)data;			\
439 	struct talitos_private *priv = dev_get_drvdata(dev);		\
440 	unsigned long flags;						\
441 									\
442 	if (ch_done_mask & 1)						\
443 		flush_channel(dev, 0, 0, 0);				\
444 	if (ch_done_mask & (1 << 2))					\
445 		flush_channel(dev, 1, 0, 0);				\
446 	if (ch_done_mask & (1 << 4))					\
447 		flush_channel(dev, 2, 0, 0);				\
448 	if (ch_done_mask & (1 << 6))					\
449 		flush_channel(dev, 3, 0, 0);				\
450 									\
451 	/* At this point, all completed channels have been processed */	\
452 	/* Unmask done interrupts for channels completed later on. */	\
453 	spin_lock_irqsave(&priv->reg_lock, flags);			\
454 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
455 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
456 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
457 }
458 
459 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
460 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
461 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
462 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
463 
464 /*
465  * locate current (offending) descriptor
466  */
467 static u32 current_desc_hdr(struct device *dev, int ch)
468 {
469 	struct talitos_private *priv = dev_get_drvdata(dev);
470 	int tail, iter;
471 	dma_addr_t cur_desc;
472 
473 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
474 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
475 
476 	if (!cur_desc) {
477 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
478 		return 0;
479 	}
480 
481 	tail = priv->chan[ch].tail;
482 
483 	iter = tail;
484 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
485 	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
486 		iter = (iter + 1) & (priv->fifo_len - 1);
487 		if (iter == tail) {
488 			dev_err(dev, "couldn't locate current descriptor\n");
489 			return 0;
490 		}
491 	}
492 
493 	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
494 		return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
495 
496 	return priv->chan[ch].fifo[iter].desc->hdr;
497 }
498 
499 /*
500  * user diagnostics; report root cause of error based on execution unit status
501  */
502 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
503 {
504 	struct talitos_private *priv = dev_get_drvdata(dev);
505 	int i;
506 
507 	if (!desc_hdr)
508 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
509 
510 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
511 	case DESC_HDR_SEL0_AFEU:
512 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
513 			in_be32(priv->reg_afeu + TALITOS_EUISR),
514 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
515 		break;
516 	case DESC_HDR_SEL0_DEU:
517 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
518 			in_be32(priv->reg_deu + TALITOS_EUISR),
519 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
520 		break;
521 	case DESC_HDR_SEL0_MDEUA:
522 	case DESC_HDR_SEL0_MDEUB:
523 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
524 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
525 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
526 		break;
527 	case DESC_HDR_SEL0_RNG:
528 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
529 			in_be32(priv->reg_rngu + TALITOS_ISR),
530 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
531 		break;
532 	case DESC_HDR_SEL0_PKEU:
533 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
534 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
535 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
536 		break;
537 	case DESC_HDR_SEL0_AESU:
538 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
539 			in_be32(priv->reg_aesu + TALITOS_EUISR),
540 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
541 		break;
542 	case DESC_HDR_SEL0_CRCU:
543 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
544 			in_be32(priv->reg_crcu + TALITOS_EUISR),
545 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
546 		break;
547 	case DESC_HDR_SEL0_KEU:
548 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
549 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
550 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
551 		break;
552 	}
553 
554 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
555 	case DESC_HDR_SEL1_MDEUA:
556 	case DESC_HDR_SEL1_MDEUB:
557 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
558 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
559 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
560 		break;
561 	case DESC_HDR_SEL1_CRCU:
562 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
563 			in_be32(priv->reg_crcu + TALITOS_EUISR),
564 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
565 		break;
566 	}
567 
568 	for (i = 0; i < 8; i++)
569 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
570 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
571 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
572 }
573 
574 /*
575  * recover from error interrupts
576  */
577 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
578 {
579 	struct talitos_private *priv = dev_get_drvdata(dev);
580 	unsigned int timeout = TALITOS_TIMEOUT;
581 	int ch, error, reset_dev = 0;
582 	u32 v_lo;
583 	bool is_sec1 = has_ftr_sec1(priv);
584 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
585 
586 	for (ch = 0; ch < priv->num_channels; ch++) {
587 		/* skip channels without errors */
588 		if (is_sec1) {
589 			/* bits 29, 31, 17, 19 */
590 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
591 				continue;
592 		} else {
593 			if (!(isr & (1 << (ch * 2 + 1))))
594 				continue;
595 		}
596 
597 		error = -EINVAL;
598 
599 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
600 
601 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
602 			dev_err(dev, "double fetch fifo overflow error\n");
603 			error = -EAGAIN;
604 			reset_ch = 1;
605 		}
606 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
607 			/* h/w dropped descriptor */
608 			dev_err(dev, "single fetch fifo overflow error\n");
609 			error = -EAGAIN;
610 		}
611 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
612 			dev_err(dev, "master data transfer error\n");
613 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
614 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
615 					     : "s/g data length zero error\n");
616 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
617 			dev_err(dev, is_sec1 ? "parity error\n"
618 					     : "fetch pointer zero error\n");
619 		if (v_lo & TALITOS_CCPSR_LO_IDH)
620 			dev_err(dev, "illegal descriptor header error\n");
621 		if (v_lo & TALITOS_CCPSR_LO_IEU)
622 			dev_err(dev, is_sec1 ? "static assignment error\n"
623 					     : "invalid exec unit error\n");
624 		if (v_lo & TALITOS_CCPSR_LO_EU)
625 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
626 		if (!is_sec1) {
627 			if (v_lo & TALITOS_CCPSR_LO_GB)
628 				dev_err(dev, "gather boundary error\n");
629 			if (v_lo & TALITOS_CCPSR_LO_GRL)
630 				dev_err(dev, "gather return/length error\n");
631 			if (v_lo & TALITOS_CCPSR_LO_SB)
632 				dev_err(dev, "scatter boundary error\n");
633 			if (v_lo & TALITOS_CCPSR_LO_SRL)
634 				dev_err(dev, "scatter return/length error\n");
635 		}
636 
637 		flush_channel(dev, ch, error, reset_ch);
638 
639 		if (reset_ch) {
640 			reset_channel(dev, ch);
641 		} else {
642 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
643 				  TALITOS2_CCCR_CONT);
644 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
645 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
646 			       TALITOS2_CCCR_CONT) && --timeout)
647 				cpu_relax();
648 			if (timeout == 0) {
649 				dev_err(dev, "failed to restart channel %d\n",
650 					ch);
651 				reset_dev = 1;
652 			}
653 		}
654 	}
655 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
656 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
657 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
658 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
659 				isr, isr_lo);
660 		else
661 			dev_err(dev, "done overflow, internal time out, or "
662 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
663 
664 		/* purge request queues */
665 		for (ch = 0; ch < priv->num_channels; ch++)
666 			flush_channel(dev, ch, -EIO, 1);
667 
668 		/* reset and reinitialize the device */
669 		init_device(dev);
670 	}
671 }
672 
673 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
674 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
675 {									       \
676 	struct device *dev = data;					       \
677 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
678 	u32 isr, isr_lo;						       \
679 	unsigned long flags;						       \
680 									       \
681 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
682 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
683 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
684 	/* Acknowledge interrupt */					       \
685 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
687 									       \
688 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
689 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
690 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
691 	}								       \
692 	else {								       \
693 		if (likely(isr & ch_done_mask)) {			       \
694 			/* mask further done interrupts. */		       \
695 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
696 			/* done_task will unmask done interrupts at exit */    \
697 			tasklet_schedule(&priv->done_task[tlet]);	       \
698 		}							       \
699 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
700 	}								       \
701 									       \
702 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
703 								IRQ_NONE;      \
704 }
705 
706 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
707 
708 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
709 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
710 {									       \
711 	struct device *dev = data;					       \
712 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
713 	u32 isr, isr_lo;						       \
714 	unsigned long flags;						       \
715 									       \
716 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
717 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
718 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
719 	/* Acknowledge interrupt */					       \
720 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
721 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
722 									       \
723 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
724 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
725 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
726 	}								       \
727 	else {								       \
728 		if (likely(isr & ch_done_mask)) {			       \
729 			/* mask further done interrupts. */		       \
730 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
731 			/* done_task will unmask done interrupts at exit */    \
732 			tasklet_schedule(&priv->done_task[tlet]);	       \
733 		}							       \
734 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
735 	}								       \
736 									       \
737 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
738 								IRQ_NONE;      \
739 }
740 
741 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
742 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
743 		       0)
744 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
745 		       1)
746 
747 /*
748  * hwrng
749  */
750 static int talitos_rng_data_present(struct hwrng *rng, int wait)
751 {
752 	struct device *dev = (struct device *)rng->priv;
753 	struct talitos_private *priv = dev_get_drvdata(dev);
754 	u32 ofl;
755 	int i;
756 
757 	for (i = 0; i < 20; i++) {
758 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
759 		      TALITOS_RNGUSR_LO_OFL;
760 		if (ofl || !wait)
761 			break;
762 		udelay(10);
763 	}
764 
765 	return !!ofl;
766 }
767 
768 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
769 {
770 	struct device *dev = (struct device *)rng->priv;
771 	struct talitos_private *priv = dev_get_drvdata(dev);
772 
773 	/* rng fifo requires 64-bit accesses */
774 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
775 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
776 
777 	return sizeof(u32);
778 }
779 
780 static int talitos_rng_init(struct hwrng *rng)
781 {
782 	struct device *dev = (struct device *)rng->priv;
783 	struct talitos_private *priv = dev_get_drvdata(dev);
784 	unsigned int timeout = TALITOS_TIMEOUT;
785 
786 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
787 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
788 		 & TALITOS_RNGUSR_LO_RD)
789 	       && --timeout)
790 		cpu_relax();
791 	if (timeout == 0) {
792 		dev_err(dev, "failed to reset rng hw\n");
793 		return -ENODEV;
794 	}
795 
796 	/* start generating */
797 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
798 
799 	return 0;
800 }
801 
802 static int talitos_register_rng(struct device *dev)
803 {
804 	struct talitos_private *priv = dev_get_drvdata(dev);
805 	int err;
806 
807 	priv->rng.name		= dev_driver_string(dev),
808 	priv->rng.init		= talitos_rng_init,
809 	priv->rng.data_present	= talitos_rng_data_present,
810 	priv->rng.data_read	= talitos_rng_data_read,
811 	priv->rng.priv		= (unsigned long)dev;
812 
813 	err = hwrng_register(&priv->rng);
814 	if (!err)
815 		priv->rng_registered = true;
816 
817 	return err;
818 }
819 
820 static void talitos_unregister_rng(struct device *dev)
821 {
822 	struct talitos_private *priv = dev_get_drvdata(dev);
823 
824 	if (!priv->rng_registered)
825 		return;
826 
827 	hwrng_unregister(&priv->rng);
828 	priv->rng_registered = false;
829 }
830 
831 /*
832  * crypto alg
833  */
834 #define TALITOS_CRA_PRIORITY		3000
835 /*
836  * Defines a priority for doing AEAD with descriptors type
837  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
838  */
839 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
840 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
841 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
842 
843 struct talitos_ctx {
844 	struct device *dev;
845 	int ch;
846 	__be32 desc_hdr_template;
847 	u8 key[TALITOS_MAX_KEY_SIZE];
848 	u8 iv[TALITOS_MAX_IV_LENGTH];
849 	dma_addr_t dma_key;
850 	unsigned int keylen;
851 	unsigned int enckeylen;
852 	unsigned int authkeylen;
853 };
854 
855 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
856 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
857 
858 struct talitos_ahash_req_ctx {
859 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
860 	unsigned int hw_context_size;
861 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
862 	int buf_idx;
863 	unsigned int swinit;
864 	unsigned int first;
865 	unsigned int last;
866 	unsigned int to_hash_later;
867 	unsigned int nbuf;
868 	struct scatterlist bufsl[2];
869 	struct scatterlist *psrc;
870 };
871 
872 struct talitos_export_state {
873 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
874 	u8 buf[HASH_MAX_BLOCK_SIZE];
875 	unsigned int swinit;
876 	unsigned int first;
877 	unsigned int last;
878 	unsigned int to_hash_later;
879 	unsigned int nbuf;
880 };
881 
882 static int aead_setkey(struct crypto_aead *authenc,
883 		       const u8 *key, unsigned int keylen)
884 {
885 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
886 	struct device *dev = ctx->dev;
887 	struct crypto_authenc_keys keys;
888 
889 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
890 		goto badkey;
891 
892 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
893 		goto badkey;
894 
895 	if (ctx->keylen)
896 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
897 
898 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
899 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
900 
901 	ctx->keylen = keys.authkeylen + keys.enckeylen;
902 	ctx->enckeylen = keys.enckeylen;
903 	ctx->authkeylen = keys.authkeylen;
904 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
905 				      DMA_TO_DEVICE);
906 
907 	memzero_explicit(&keys, sizeof(keys));
908 	return 0;
909 
910 badkey:
911 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
912 	memzero_explicit(&keys, sizeof(keys));
913 	return -EINVAL;
914 }
915 
916 static int aead_des3_setkey(struct crypto_aead *authenc,
917 			    const u8 *key, unsigned int keylen)
918 {
919 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
920 	struct device *dev = ctx->dev;
921 	struct crypto_authenc_keys keys;
922 	u32 flags;
923 	int err;
924 
925 	err = crypto_authenc_extractkeys(&keys, key, keylen);
926 	if (unlikely(err))
927 		goto badkey;
928 
929 	err = -EINVAL;
930 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
931 		goto badkey;
932 
933 	if (keys.enckeylen != DES3_EDE_KEY_SIZE)
934 		goto badkey;
935 
936 	flags = crypto_aead_get_flags(authenc);
937 	err = __des3_verify_key(&flags, keys.enckey);
938 	if (unlikely(err)) {
939 		crypto_aead_set_flags(authenc, flags);
940 		goto out;
941 	}
942 
943 	if (ctx->keylen)
944 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
945 
946 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
947 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
948 
949 	ctx->keylen = keys.authkeylen + keys.enckeylen;
950 	ctx->enckeylen = keys.enckeylen;
951 	ctx->authkeylen = keys.authkeylen;
952 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
953 				      DMA_TO_DEVICE);
954 
955 out:
956 	memzero_explicit(&keys, sizeof(keys));
957 	return err;
958 
959 badkey:
960 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
961 	goto out;
962 }
963 
964 /*
965  * talitos_edesc - s/w-extended descriptor
966  * @src_nents: number of segments in input scatterlist
967  * @dst_nents: number of segments in output scatterlist
968  * @icv_ool: whether ICV is out-of-line
969  * @iv_dma: dma address of iv for checking continuity and link table
970  * @dma_len: length of dma mapped link_tbl space
971  * @dma_link_tbl: bus physical address of link_tbl/buf
972  * @desc: h/w descriptor
973  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
974  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
975  *
976  * if decrypting (with authcheck), or either one of src_nents or dst_nents
977  * is greater than 1, an integrity check value is concatenated to the end
978  * of link_tbl data
979  */
980 struct talitos_edesc {
981 	int src_nents;
982 	int dst_nents;
983 	bool icv_ool;
984 	dma_addr_t iv_dma;
985 	int dma_len;
986 	dma_addr_t dma_link_tbl;
987 	struct talitos_desc desc;
988 	union {
989 		struct talitos_ptr link_tbl[0];
990 		u8 buf[0];
991 	};
992 };
993 
994 static void talitos_sg_unmap(struct device *dev,
995 			     struct talitos_edesc *edesc,
996 			     struct scatterlist *src,
997 			     struct scatterlist *dst,
998 			     unsigned int len, unsigned int offset)
999 {
1000 	struct talitos_private *priv = dev_get_drvdata(dev);
1001 	bool is_sec1 = has_ftr_sec1(priv);
1002 	unsigned int src_nents = edesc->src_nents ? : 1;
1003 	unsigned int dst_nents = edesc->dst_nents ? : 1;
1004 
1005 	if (is_sec1 && dst && dst_nents > 1) {
1006 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
1007 					   len, DMA_FROM_DEVICE);
1008 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
1009 				     offset);
1010 	}
1011 	if (src != dst) {
1012 		if (src_nents == 1 || !is_sec1)
1013 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
1014 
1015 		if (dst && (dst_nents == 1 || !is_sec1))
1016 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
1017 	} else if (src_nents == 1 || !is_sec1) {
1018 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
1019 	}
1020 }
1021 
1022 static void ipsec_esp_unmap(struct device *dev,
1023 			    struct talitos_edesc *edesc,
1024 			    struct aead_request *areq)
1025 {
1026 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1027 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1028 	unsigned int ivsize = crypto_aead_ivsize(aead);
1029 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1030 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1031 
1032 	if (is_ipsec_esp)
1033 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1034 					 DMA_FROM_DEVICE);
1035 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1036 
1037 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
1038 			 areq->assoclen);
1039 
1040 	if (edesc->dma_len)
1041 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1042 				 DMA_BIDIRECTIONAL);
1043 
1044 	if (!is_ipsec_esp) {
1045 		unsigned int dst_nents = edesc->dst_nents ? : 1;
1046 
1047 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1048 				   areq->assoclen + areq->cryptlen - ivsize);
1049 	}
1050 }
1051 
1052 /*
1053  * ipsec_esp descriptor callbacks
1054  */
1055 static void ipsec_esp_encrypt_done(struct device *dev,
1056 				   struct talitos_desc *desc, void *context,
1057 				   int err)
1058 {
1059 	struct talitos_private *priv = dev_get_drvdata(dev);
1060 	bool is_sec1 = has_ftr_sec1(priv);
1061 	struct aead_request *areq = context;
1062 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1063 	unsigned int authsize = crypto_aead_authsize(authenc);
1064 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1065 	struct talitos_edesc *edesc;
1066 	struct scatterlist *sg;
1067 	void *icvdata;
1068 
1069 	edesc = container_of(desc, struct talitos_edesc, desc);
1070 
1071 	ipsec_esp_unmap(dev, edesc, areq);
1072 
1073 	/* copy the generated ICV to dst */
1074 	if (edesc->icv_ool) {
1075 		if (is_sec1)
1076 			icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1077 		else
1078 			icvdata = &edesc->link_tbl[edesc->src_nents +
1079 						   edesc->dst_nents + 2];
1080 		sg = sg_last(areq->dst, edesc->dst_nents);
1081 		memcpy((char *)sg_virt(sg) + sg->length - authsize,
1082 		       icvdata, authsize);
1083 	}
1084 
1085 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1086 
1087 	kfree(edesc);
1088 
1089 	aead_request_complete(areq, err);
1090 }
1091 
1092 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1093 					  struct talitos_desc *desc,
1094 					  void *context, int err)
1095 {
1096 	struct aead_request *req = context;
1097 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1098 	unsigned int authsize = crypto_aead_authsize(authenc);
1099 	struct talitos_edesc *edesc;
1100 	struct scatterlist *sg;
1101 	char *oicv, *icv;
1102 	struct talitos_private *priv = dev_get_drvdata(dev);
1103 	bool is_sec1 = has_ftr_sec1(priv);
1104 
1105 	edesc = container_of(desc, struct talitos_edesc, desc);
1106 
1107 	ipsec_esp_unmap(dev, edesc, req);
1108 
1109 	if (!err) {
1110 		/* auth check */
1111 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1112 		icv = (char *)sg_virt(sg) + sg->length - authsize;
1113 
1114 		if (edesc->dma_len) {
1115 			if (is_sec1)
1116 				oicv = (char *)&edesc->dma_link_tbl +
1117 					       req->assoclen + req->cryptlen;
1118 			else
1119 				oicv = (char *)
1120 				       &edesc->link_tbl[edesc->src_nents +
1121 							edesc->dst_nents + 2];
1122 			if (edesc->icv_ool)
1123 				icv = oicv + authsize;
1124 		} else
1125 			oicv = (char *)&edesc->link_tbl[0];
1126 
1127 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1128 	}
1129 
1130 	kfree(edesc);
1131 
1132 	aead_request_complete(req, err);
1133 }
1134 
1135 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1136 					  struct talitos_desc *desc,
1137 					  void *context, int err)
1138 {
1139 	struct aead_request *req = context;
1140 	struct talitos_edesc *edesc;
1141 
1142 	edesc = container_of(desc, struct talitos_edesc, desc);
1143 
1144 	ipsec_esp_unmap(dev, edesc, req);
1145 
1146 	/* check ICV auth status */
1147 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1148 		     DESC_HDR_LO_ICCR1_PASS))
1149 		err = -EBADMSG;
1150 
1151 	kfree(edesc);
1152 
1153 	aead_request_complete(req, err);
1154 }
1155 
1156 /*
1157  * convert scatterlist to SEC h/w link table format
1158  * stop at cryptlen bytes
1159  */
1160 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1161 				 unsigned int offset, int cryptlen,
1162 				 struct talitos_ptr *link_tbl_ptr)
1163 {
1164 	int n_sg = sg_count;
1165 	int count = 0;
1166 
1167 	while (cryptlen && sg && n_sg--) {
1168 		unsigned int len = sg_dma_len(sg);
1169 
1170 		if (offset >= len) {
1171 			offset -= len;
1172 			goto next;
1173 		}
1174 
1175 		len -= offset;
1176 
1177 		if (len > cryptlen)
1178 			len = cryptlen;
1179 
1180 		to_talitos_ptr(link_tbl_ptr + count,
1181 			       sg_dma_address(sg) + offset, len, 0);
1182 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1183 		count++;
1184 		cryptlen -= len;
1185 		offset = 0;
1186 
1187 next:
1188 		sg = sg_next(sg);
1189 	}
1190 
1191 	/* tag end of link table */
1192 	if (count > 0)
1193 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1194 				       DESC_PTR_LNKTBL_RETURN, 0);
1195 
1196 	return count;
1197 }
1198 
1199 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1200 			      unsigned int len, struct talitos_edesc *edesc,
1201 			      struct talitos_ptr *ptr, int sg_count,
1202 			      unsigned int offset, int tbl_off, int elen)
1203 {
1204 	struct talitos_private *priv = dev_get_drvdata(dev);
1205 	bool is_sec1 = has_ftr_sec1(priv);
1206 
1207 	if (!src) {
1208 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1209 		return 1;
1210 	}
1211 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1212 	if (sg_count == 1) {
1213 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1214 		return sg_count;
1215 	}
1216 	if (is_sec1) {
1217 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1218 		return sg_count;
1219 	}
1220 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1221 					 &edesc->link_tbl[tbl_off]);
1222 	if (sg_count == 1) {
1223 		/* Only one segment now, so no link tbl needed*/
1224 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1225 		return sg_count;
1226 	}
1227 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1228 			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1229 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1230 
1231 	return sg_count;
1232 }
1233 
1234 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1235 			  unsigned int len, struct talitos_edesc *edesc,
1236 			  struct talitos_ptr *ptr, int sg_count,
1237 			  unsigned int offset, int tbl_off)
1238 {
1239 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1240 				  tbl_off, 0);
1241 }
1242 
1243 /*
1244  * fill in and submit ipsec_esp descriptor
1245  */
1246 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1247 		     void (*callback)(struct device *dev,
1248 				      struct talitos_desc *desc,
1249 				      void *context, int error))
1250 {
1251 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1252 	unsigned int authsize = crypto_aead_authsize(aead);
1253 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1254 	struct device *dev = ctx->dev;
1255 	struct talitos_desc *desc = &edesc->desc;
1256 	unsigned int cryptlen = areq->cryptlen;
1257 	unsigned int ivsize = crypto_aead_ivsize(aead);
1258 	int tbl_off = 0;
1259 	int sg_count, ret;
1260 	int elen = 0;
1261 	bool sync_needed = false;
1262 	struct talitos_private *priv = dev_get_drvdata(dev);
1263 	bool is_sec1 = has_ftr_sec1(priv);
1264 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1265 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1266 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1267 
1268 	/* hmac key */
1269 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1270 
1271 	sg_count = edesc->src_nents ?: 1;
1272 	if (is_sec1 && sg_count > 1)
1273 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1274 				  areq->assoclen + cryptlen);
1275 	else
1276 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1277 				      (areq->src == areq->dst) ?
1278 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1279 
1280 	/* hmac data */
1281 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1282 			     &desc->ptr[1], sg_count, 0, tbl_off);
1283 
1284 	if (ret > 1) {
1285 		tbl_off += ret;
1286 		sync_needed = true;
1287 	}
1288 
1289 	/* cipher iv */
1290 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1291 
1292 	/* cipher key */
1293 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1294 		       ctx->enckeylen, is_sec1);
1295 
1296 	/*
1297 	 * cipher in
1298 	 * map and adjust cipher len to aead request cryptlen.
1299 	 * extent is bytes of HMAC postpended to ciphertext,
1300 	 * typically 12 for ipsec
1301 	 */
1302 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1303 		elen = authsize;
1304 
1305 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1306 				 sg_count, areq->assoclen, tbl_off, elen);
1307 
1308 	if (ret > 1) {
1309 		tbl_off += ret;
1310 		sync_needed = true;
1311 	}
1312 
1313 	/* cipher out */
1314 	if (areq->src != areq->dst) {
1315 		sg_count = edesc->dst_nents ? : 1;
1316 		if (!is_sec1 || sg_count == 1)
1317 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1318 	}
1319 
1320 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1321 			     sg_count, areq->assoclen, tbl_off);
1322 
1323 	if (is_ipsec_esp)
1324 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1325 
1326 	/* ICV data */
1327 	if (ret > 1) {
1328 		tbl_off += ret;
1329 		edesc->icv_ool = true;
1330 		sync_needed = true;
1331 
1332 		if (is_ipsec_esp) {
1333 			struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1334 			int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1335 				     sizeof(struct talitos_ptr) + authsize;
1336 
1337 			/* Add an entry to the link table for ICV data */
1338 			to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1339 			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1340 					       is_sec1);
1341 
1342 			/* icv data follows link tables */
1343 			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1344 				       authsize, is_sec1);
1345 		} else {
1346 			dma_addr_t addr = edesc->dma_link_tbl;
1347 
1348 			if (is_sec1)
1349 				addr += areq->assoclen + cryptlen;
1350 			else
1351 				addr += sizeof(struct talitos_ptr) * tbl_off;
1352 
1353 			to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1354 		}
1355 	} else if (!is_ipsec_esp) {
1356 		ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1357 				     &desc->ptr[6], sg_count, areq->assoclen +
1358 							      cryptlen,
1359 				     tbl_off);
1360 		if (ret > 1) {
1361 			tbl_off += ret;
1362 			edesc->icv_ool = true;
1363 			sync_needed = true;
1364 		} else {
1365 			edesc->icv_ool = false;
1366 		}
1367 	} else {
1368 		edesc->icv_ool = false;
1369 	}
1370 
1371 	/* iv out */
1372 	if (is_ipsec_esp)
1373 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1374 				       DMA_FROM_DEVICE);
1375 
1376 	if (sync_needed)
1377 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1378 					   edesc->dma_len,
1379 					   DMA_BIDIRECTIONAL);
1380 
1381 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1382 	if (ret != -EINPROGRESS) {
1383 		ipsec_esp_unmap(dev, edesc, areq);
1384 		kfree(edesc);
1385 	}
1386 	return ret;
1387 }
1388 
1389 /*
1390  * allocate and map the extended descriptor
1391  */
1392 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1393 						 struct scatterlist *src,
1394 						 struct scatterlist *dst,
1395 						 u8 *iv,
1396 						 unsigned int assoclen,
1397 						 unsigned int cryptlen,
1398 						 unsigned int authsize,
1399 						 unsigned int ivsize,
1400 						 int icv_stashing,
1401 						 u32 cryptoflags,
1402 						 bool encrypt)
1403 {
1404 	struct talitos_edesc *edesc;
1405 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1406 	dma_addr_t iv_dma = 0;
1407 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1408 		      GFP_ATOMIC;
1409 	struct talitos_private *priv = dev_get_drvdata(dev);
1410 	bool is_sec1 = has_ftr_sec1(priv);
1411 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1412 
1413 	if (cryptlen + authsize > max_len) {
1414 		dev_err(dev, "length exceeds h/w max limit\n");
1415 		return ERR_PTR(-EINVAL);
1416 	}
1417 
1418 	if (!dst || dst == src) {
1419 		src_len = assoclen + cryptlen + authsize;
1420 		src_nents = sg_nents_for_len(src, src_len);
1421 		if (src_nents < 0) {
1422 			dev_err(dev, "Invalid number of src SG.\n");
1423 			return ERR_PTR(-EINVAL);
1424 		}
1425 		src_nents = (src_nents == 1) ? 0 : src_nents;
1426 		dst_nents = dst ? src_nents : 0;
1427 		dst_len = 0;
1428 	} else { /* dst && dst != src*/
1429 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1430 		src_nents = sg_nents_for_len(src, src_len);
1431 		if (src_nents < 0) {
1432 			dev_err(dev, "Invalid number of src SG.\n");
1433 			return ERR_PTR(-EINVAL);
1434 		}
1435 		src_nents = (src_nents == 1) ? 0 : src_nents;
1436 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1437 		dst_nents = sg_nents_for_len(dst, dst_len);
1438 		if (dst_nents < 0) {
1439 			dev_err(dev, "Invalid number of dst SG.\n");
1440 			return ERR_PTR(-EINVAL);
1441 		}
1442 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1443 	}
1444 
1445 	/*
1446 	 * allocate space for base edesc plus the link tables,
1447 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1448 	 * and space for two sets of ICVs (stashed and generated)
1449 	 */
1450 	alloc_len = sizeof(struct talitos_edesc);
1451 	if (src_nents || dst_nents) {
1452 		if (is_sec1)
1453 			dma_len = (src_nents ? src_len : 0) +
1454 				  (dst_nents ? dst_len : 0);
1455 		else
1456 			dma_len = (src_nents + dst_nents + 2) *
1457 				  sizeof(struct talitos_ptr) + authsize * 2;
1458 		alloc_len += dma_len;
1459 	} else {
1460 		dma_len = 0;
1461 		alloc_len += icv_stashing ? authsize : 0;
1462 	}
1463 
1464 	/* if its a ahash, add space for a second desc next to the first one */
1465 	if (is_sec1 && !dst)
1466 		alloc_len += sizeof(struct talitos_desc);
1467 	alloc_len += ivsize;
1468 
1469 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1470 	if (!edesc)
1471 		return ERR_PTR(-ENOMEM);
1472 	if (ivsize) {
1473 		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1474 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1475 	}
1476 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1477 
1478 	edesc->src_nents = src_nents;
1479 	edesc->dst_nents = dst_nents;
1480 	edesc->iv_dma = iv_dma;
1481 	edesc->dma_len = dma_len;
1482 	if (dma_len) {
1483 		void *addr = &edesc->link_tbl[0];
1484 
1485 		if (is_sec1 && !dst)
1486 			addr += sizeof(struct talitos_desc);
1487 		edesc->dma_link_tbl = dma_map_single(dev, addr,
1488 						     edesc->dma_len,
1489 						     DMA_BIDIRECTIONAL);
1490 	}
1491 	return edesc;
1492 }
1493 
1494 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1495 					      int icv_stashing, bool encrypt)
1496 {
1497 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1498 	unsigned int authsize = crypto_aead_authsize(authenc);
1499 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1500 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1501 
1502 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1503 				   iv, areq->assoclen, areq->cryptlen,
1504 				   authsize, ivsize, icv_stashing,
1505 				   areq->base.flags, encrypt);
1506 }
1507 
1508 static int aead_encrypt(struct aead_request *req)
1509 {
1510 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1511 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1512 	struct talitos_edesc *edesc;
1513 
1514 	/* allocate extended descriptor */
1515 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1516 	if (IS_ERR(edesc))
1517 		return PTR_ERR(edesc);
1518 
1519 	/* set encrypt */
1520 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1521 
1522 	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1523 }
1524 
1525 static int aead_decrypt(struct aead_request *req)
1526 {
1527 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1528 	unsigned int authsize = crypto_aead_authsize(authenc);
1529 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1530 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1531 	struct talitos_edesc *edesc;
1532 	struct scatterlist *sg;
1533 	void *icvdata;
1534 
1535 	req->cryptlen -= authsize;
1536 
1537 	/* allocate extended descriptor */
1538 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1539 	if (IS_ERR(edesc))
1540 		return PTR_ERR(edesc);
1541 
1542 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1543 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1544 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1545 
1546 		/* decrypt and check the ICV */
1547 		edesc->desc.hdr = ctx->desc_hdr_template |
1548 				  DESC_HDR_DIR_INBOUND |
1549 				  DESC_HDR_MODE1_MDEU_CICV;
1550 
1551 		/* reset integrity check result bits */
1552 
1553 		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1554 	}
1555 
1556 	/* Have to check the ICV with software */
1557 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1558 
1559 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1560 	if (edesc->dma_len)
1561 		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1562 						   edesc->dst_nents + 2];
1563 	else
1564 		icvdata = &edesc->link_tbl[0];
1565 
1566 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1567 
1568 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1569 
1570 	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1571 }
1572 
1573 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1574 			     const u8 *key, unsigned int keylen)
1575 {
1576 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1577 	struct device *dev = ctx->dev;
1578 
1579 	if (ctx->keylen)
1580 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1581 
1582 	memcpy(&ctx->key, key, keylen);
1583 	ctx->keylen = keylen;
1584 
1585 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1586 
1587 	return 0;
1588 }
1589 
1590 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1591 				 const u8 *key, unsigned int keylen)
1592 {
1593 	u32 tmp[DES_EXPKEY_WORDS];
1594 
1595 	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1596 		     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1597 	    !des_ekey(tmp, key)) {
1598 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1599 		return -EINVAL;
1600 	}
1601 
1602 	return ablkcipher_setkey(cipher, key, keylen);
1603 }
1604 
1605 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1606 				  const u8 *key, unsigned int keylen)
1607 {
1608 	u32 flags;
1609 	int err;
1610 
1611 	flags = crypto_ablkcipher_get_flags(cipher);
1612 	err = __des3_verify_key(&flags, key);
1613 	if (unlikely(err)) {
1614 		crypto_ablkcipher_set_flags(cipher, flags);
1615 		return err;
1616 	}
1617 
1618 	return ablkcipher_setkey(cipher, key, keylen);
1619 }
1620 
1621 static void common_nonsnoop_unmap(struct device *dev,
1622 				  struct talitos_edesc *edesc,
1623 				  struct ablkcipher_request *areq)
1624 {
1625 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1626 
1627 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1628 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1629 
1630 	if (edesc->dma_len)
1631 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1632 				 DMA_BIDIRECTIONAL);
1633 }
1634 
1635 static void ablkcipher_done(struct device *dev,
1636 			    struct talitos_desc *desc, void *context,
1637 			    int err)
1638 {
1639 	struct ablkcipher_request *areq = context;
1640 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1641 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1642 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1643 	struct talitos_edesc *edesc;
1644 
1645 	edesc = container_of(desc, struct talitos_edesc, desc);
1646 
1647 	common_nonsnoop_unmap(dev, edesc, areq);
1648 	memcpy(areq->info, ctx->iv, ivsize);
1649 
1650 	kfree(edesc);
1651 
1652 	areq->base.complete(&areq->base, err);
1653 }
1654 
1655 static int common_nonsnoop(struct talitos_edesc *edesc,
1656 			   struct ablkcipher_request *areq,
1657 			   void (*callback) (struct device *dev,
1658 					     struct talitos_desc *desc,
1659 					     void *context, int error))
1660 {
1661 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1662 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1663 	struct device *dev = ctx->dev;
1664 	struct talitos_desc *desc = &edesc->desc;
1665 	unsigned int cryptlen = areq->nbytes;
1666 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1667 	int sg_count, ret;
1668 	bool sync_needed = false;
1669 	struct talitos_private *priv = dev_get_drvdata(dev);
1670 	bool is_sec1 = has_ftr_sec1(priv);
1671 
1672 	/* first DWORD empty */
1673 
1674 	/* cipher iv */
1675 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1676 
1677 	/* cipher key */
1678 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1679 
1680 	sg_count = edesc->src_nents ?: 1;
1681 	if (is_sec1 && sg_count > 1)
1682 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1683 				  cryptlen);
1684 	else
1685 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1686 				      (areq->src == areq->dst) ?
1687 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1688 	/*
1689 	 * cipher in
1690 	 */
1691 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1692 				  &desc->ptr[3], sg_count, 0, 0);
1693 	if (sg_count > 1)
1694 		sync_needed = true;
1695 
1696 	/* cipher out */
1697 	if (areq->src != areq->dst) {
1698 		sg_count = edesc->dst_nents ? : 1;
1699 		if (!is_sec1 || sg_count == 1)
1700 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1701 	}
1702 
1703 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1704 			     sg_count, 0, (edesc->src_nents + 1));
1705 	if (ret > 1)
1706 		sync_needed = true;
1707 
1708 	/* iv out */
1709 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1710 			       DMA_FROM_DEVICE);
1711 
1712 	/* last DWORD empty */
1713 
1714 	if (sync_needed)
1715 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1716 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1717 
1718 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1719 	if (ret != -EINPROGRESS) {
1720 		common_nonsnoop_unmap(dev, edesc, areq);
1721 		kfree(edesc);
1722 	}
1723 	return ret;
1724 }
1725 
1726 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1727 						    areq, bool encrypt)
1728 {
1729 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1730 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1731 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1732 
1733 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1734 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1735 				   areq->base.flags, encrypt);
1736 }
1737 
1738 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1739 {
1740 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1741 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1742 	struct talitos_edesc *edesc;
1743 
1744 	/* allocate extended descriptor */
1745 	edesc = ablkcipher_edesc_alloc(areq, true);
1746 	if (IS_ERR(edesc))
1747 		return PTR_ERR(edesc);
1748 
1749 	/* set encrypt */
1750 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1751 
1752 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1753 }
1754 
1755 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1756 {
1757 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1758 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1759 	struct talitos_edesc *edesc;
1760 
1761 	/* allocate extended descriptor */
1762 	edesc = ablkcipher_edesc_alloc(areq, false);
1763 	if (IS_ERR(edesc))
1764 		return PTR_ERR(edesc);
1765 
1766 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1767 
1768 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1769 }
1770 
1771 static void common_nonsnoop_hash_unmap(struct device *dev,
1772 				       struct talitos_edesc *edesc,
1773 				       struct ahash_request *areq)
1774 {
1775 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1776 	struct talitos_private *priv = dev_get_drvdata(dev);
1777 	bool is_sec1 = has_ftr_sec1(priv);
1778 	struct talitos_desc *desc = &edesc->desc;
1779 	struct talitos_desc *desc2 = desc + 1;
1780 
1781 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1782 	if (desc->next_desc &&
1783 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1784 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1785 
1786 	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1787 
1788 	/* When using hashctx-in, must unmap it. */
1789 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1790 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1791 					 DMA_TO_DEVICE);
1792 	else if (desc->next_desc)
1793 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1794 					 DMA_TO_DEVICE);
1795 
1796 	if (is_sec1 && req_ctx->nbuf)
1797 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1798 					 DMA_TO_DEVICE);
1799 
1800 	if (edesc->dma_len)
1801 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1802 				 DMA_BIDIRECTIONAL);
1803 
1804 	if (edesc->desc.next_desc)
1805 		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1806 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1807 }
1808 
1809 static void ahash_done(struct device *dev,
1810 		       struct talitos_desc *desc, void *context,
1811 		       int err)
1812 {
1813 	struct ahash_request *areq = context;
1814 	struct talitos_edesc *edesc =
1815 		 container_of(desc, struct talitos_edesc, desc);
1816 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1817 
1818 	if (!req_ctx->last && req_ctx->to_hash_later) {
1819 		/* Position any partial block for next update/final/finup */
1820 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1821 		req_ctx->nbuf = req_ctx->to_hash_later;
1822 	}
1823 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1824 
1825 	kfree(edesc);
1826 
1827 	areq->base.complete(&areq->base, err);
1828 }
1829 
1830 /*
1831  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1832  * ourself and submit a padded block
1833  */
1834 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1835 			       struct talitos_edesc *edesc,
1836 			       struct talitos_ptr *ptr)
1837 {
1838 	static u8 padded_hash[64] = {
1839 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1840 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1841 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1842 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1843 	};
1844 
1845 	pr_err_once("Bug in SEC1, padding ourself\n");
1846 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1847 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1848 			       (char *)padded_hash, DMA_TO_DEVICE);
1849 }
1850 
1851 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1852 				struct ahash_request *areq, unsigned int length,
1853 				unsigned int offset,
1854 				void (*callback) (struct device *dev,
1855 						  struct talitos_desc *desc,
1856 						  void *context, int error))
1857 {
1858 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1859 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1860 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1861 	struct device *dev = ctx->dev;
1862 	struct talitos_desc *desc = &edesc->desc;
1863 	int ret;
1864 	bool sync_needed = false;
1865 	struct talitos_private *priv = dev_get_drvdata(dev);
1866 	bool is_sec1 = has_ftr_sec1(priv);
1867 	int sg_count;
1868 
1869 	/* first DWORD empty */
1870 
1871 	/* hash context in */
1872 	if (!req_ctx->first || req_ctx->swinit) {
1873 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1874 					      req_ctx->hw_context_size,
1875 					      req_ctx->hw_context,
1876 					      DMA_TO_DEVICE);
1877 		req_ctx->swinit = 0;
1878 	}
1879 	/* Indicate next op is not the first. */
1880 	req_ctx->first = 0;
1881 
1882 	/* HMAC key */
1883 	if (ctx->keylen)
1884 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1885 			       is_sec1);
1886 
1887 	if (is_sec1 && req_ctx->nbuf)
1888 		length -= req_ctx->nbuf;
1889 
1890 	sg_count = edesc->src_nents ?: 1;
1891 	if (is_sec1 && sg_count > 1)
1892 		sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1893 				   edesc->buf + sizeof(struct talitos_desc),
1894 				   length, req_ctx->nbuf);
1895 	else if (length)
1896 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1897 				      DMA_TO_DEVICE);
1898 	/*
1899 	 * data in
1900 	 */
1901 	if (is_sec1 && req_ctx->nbuf) {
1902 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1903 				       req_ctx->buf[req_ctx->buf_idx],
1904 				       DMA_TO_DEVICE);
1905 	} else {
1906 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1907 					  &desc->ptr[3], sg_count, offset, 0);
1908 		if (sg_count > 1)
1909 			sync_needed = true;
1910 	}
1911 
1912 	/* fifth DWORD empty */
1913 
1914 	/* hash/HMAC out -or- hash context out */
1915 	if (req_ctx->last)
1916 		map_single_talitos_ptr(dev, &desc->ptr[5],
1917 				       crypto_ahash_digestsize(tfm),
1918 				       areq->result, DMA_FROM_DEVICE);
1919 	else
1920 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1921 					      req_ctx->hw_context_size,
1922 					      req_ctx->hw_context,
1923 					      DMA_FROM_DEVICE);
1924 
1925 	/* last DWORD empty */
1926 
1927 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1928 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1929 
1930 	if (is_sec1 && req_ctx->nbuf && length) {
1931 		struct talitos_desc *desc2 = desc + 1;
1932 		dma_addr_t next_desc;
1933 
1934 		memset(desc2, 0, sizeof(*desc2));
1935 		desc2->hdr = desc->hdr;
1936 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1937 		desc2->hdr1 = desc2->hdr;
1938 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1939 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1940 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1941 
1942 		if (desc->ptr[1].ptr)
1943 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1944 					 is_sec1);
1945 		else
1946 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1947 						      req_ctx->hw_context_size,
1948 						      req_ctx->hw_context,
1949 						      DMA_TO_DEVICE);
1950 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1951 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1952 					  &desc2->ptr[3], sg_count, offset, 0);
1953 		if (sg_count > 1)
1954 			sync_needed = true;
1955 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1956 		if (req_ctx->last)
1957 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1958 						      req_ctx->hw_context_size,
1959 						      req_ctx->hw_context,
1960 						      DMA_FROM_DEVICE);
1961 
1962 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1963 					   DMA_BIDIRECTIONAL);
1964 		desc->next_desc = cpu_to_be32(next_desc);
1965 	}
1966 
1967 	if (sync_needed)
1968 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1969 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1970 
1971 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1972 	if (ret != -EINPROGRESS) {
1973 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1974 		kfree(edesc);
1975 	}
1976 	return ret;
1977 }
1978 
1979 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1980 					       unsigned int nbytes)
1981 {
1982 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1983 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1984 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1985 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1986 	bool is_sec1 = has_ftr_sec1(priv);
1987 
1988 	if (is_sec1)
1989 		nbytes -= req_ctx->nbuf;
1990 
1991 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1992 				   nbytes, 0, 0, 0, areq->base.flags, false);
1993 }
1994 
1995 static int ahash_init(struct ahash_request *areq)
1996 {
1997 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1998 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1999 	struct device *dev = ctx->dev;
2000 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2001 	unsigned int size;
2002 	dma_addr_t dma;
2003 
2004 	/* Initialize the context */
2005 	req_ctx->buf_idx = 0;
2006 	req_ctx->nbuf = 0;
2007 	req_ctx->first = 1; /* first indicates h/w must init its context */
2008 	req_ctx->swinit = 0; /* assume h/w init of context */
2009 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2010 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2011 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2012 	req_ctx->hw_context_size = size;
2013 
2014 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2015 			     DMA_TO_DEVICE);
2016 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2017 
2018 	return 0;
2019 }
2020 
2021 /*
2022  * on h/w without explicit sha224 support, we initialize h/w context
2023  * manually with sha224 constants, and tell it to run sha256.
2024  */
2025 static int ahash_init_sha224_swinit(struct ahash_request *areq)
2026 {
2027 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2028 
2029 	req_ctx->hw_context[0] = SHA224_H0;
2030 	req_ctx->hw_context[1] = SHA224_H1;
2031 	req_ctx->hw_context[2] = SHA224_H2;
2032 	req_ctx->hw_context[3] = SHA224_H3;
2033 	req_ctx->hw_context[4] = SHA224_H4;
2034 	req_ctx->hw_context[5] = SHA224_H5;
2035 	req_ctx->hw_context[6] = SHA224_H6;
2036 	req_ctx->hw_context[7] = SHA224_H7;
2037 
2038 	/* init 64-bit count */
2039 	req_ctx->hw_context[8] = 0;
2040 	req_ctx->hw_context[9] = 0;
2041 
2042 	ahash_init(areq);
2043 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2044 
2045 	return 0;
2046 }
2047 
2048 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2049 {
2050 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2051 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2052 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2053 	struct talitos_edesc *edesc;
2054 	unsigned int blocksize =
2055 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2056 	unsigned int nbytes_to_hash;
2057 	unsigned int to_hash_later;
2058 	unsigned int nsg;
2059 	int nents;
2060 	struct device *dev = ctx->dev;
2061 	struct talitos_private *priv = dev_get_drvdata(dev);
2062 	bool is_sec1 = has_ftr_sec1(priv);
2063 	int offset = 0;
2064 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2065 
2066 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2067 		/* Buffer up to one whole block */
2068 		nents = sg_nents_for_len(areq->src, nbytes);
2069 		if (nents < 0) {
2070 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2071 			return nents;
2072 		}
2073 		sg_copy_to_buffer(areq->src, nents,
2074 				  ctx_buf + req_ctx->nbuf, nbytes);
2075 		req_ctx->nbuf += nbytes;
2076 		return 0;
2077 	}
2078 
2079 	/* At least (blocksize + 1) bytes are available to hash */
2080 	nbytes_to_hash = nbytes + req_ctx->nbuf;
2081 	to_hash_later = nbytes_to_hash & (blocksize - 1);
2082 
2083 	if (req_ctx->last)
2084 		to_hash_later = 0;
2085 	else if (to_hash_later)
2086 		/* There is a partial block. Hash the full block(s) now */
2087 		nbytes_to_hash -= to_hash_later;
2088 	else {
2089 		/* Keep one block buffered */
2090 		nbytes_to_hash -= blocksize;
2091 		to_hash_later = blocksize;
2092 	}
2093 
2094 	/* Chain in any previously buffered data */
2095 	if (!is_sec1 && req_ctx->nbuf) {
2096 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2097 		sg_init_table(req_ctx->bufsl, nsg);
2098 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2099 		if (nsg > 1)
2100 			sg_chain(req_ctx->bufsl, 2, areq->src);
2101 		req_ctx->psrc = req_ctx->bufsl;
2102 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2103 		if (nbytes_to_hash > blocksize)
2104 			offset = blocksize - req_ctx->nbuf;
2105 		else
2106 			offset = nbytes_to_hash - req_ctx->nbuf;
2107 		nents = sg_nents_for_len(areq->src, offset);
2108 		if (nents < 0) {
2109 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2110 			return nents;
2111 		}
2112 		sg_copy_to_buffer(areq->src, nents,
2113 				  ctx_buf + req_ctx->nbuf, offset);
2114 		req_ctx->nbuf += offset;
2115 		req_ctx->psrc = areq->src;
2116 	} else
2117 		req_ctx->psrc = areq->src;
2118 
2119 	if (to_hash_later) {
2120 		nents = sg_nents_for_len(areq->src, nbytes);
2121 		if (nents < 0) {
2122 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2123 			return nents;
2124 		}
2125 		sg_pcopy_to_buffer(areq->src, nents,
2126 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2127 				      to_hash_later,
2128 				      nbytes - to_hash_later);
2129 	}
2130 	req_ctx->to_hash_later = to_hash_later;
2131 
2132 	/* Allocate extended descriptor */
2133 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2134 	if (IS_ERR(edesc))
2135 		return PTR_ERR(edesc);
2136 
2137 	edesc->desc.hdr = ctx->desc_hdr_template;
2138 
2139 	/* On last one, request SEC to pad; otherwise continue */
2140 	if (req_ctx->last)
2141 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2142 	else
2143 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2144 
2145 	/* request SEC to INIT hash. */
2146 	if (req_ctx->first && !req_ctx->swinit)
2147 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2148 
2149 	/* When the tfm context has a keylen, it's an HMAC.
2150 	 * A first or last (ie. not middle) descriptor must request HMAC.
2151 	 */
2152 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2153 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2154 
2155 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2156 				    ahash_done);
2157 }
2158 
2159 static int ahash_update(struct ahash_request *areq)
2160 {
2161 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2162 
2163 	req_ctx->last = 0;
2164 
2165 	return ahash_process_req(areq, areq->nbytes);
2166 }
2167 
2168 static int ahash_final(struct ahash_request *areq)
2169 {
2170 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2171 
2172 	req_ctx->last = 1;
2173 
2174 	return ahash_process_req(areq, 0);
2175 }
2176 
2177 static int ahash_finup(struct ahash_request *areq)
2178 {
2179 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2180 
2181 	req_ctx->last = 1;
2182 
2183 	return ahash_process_req(areq, areq->nbytes);
2184 }
2185 
2186 static int ahash_digest(struct ahash_request *areq)
2187 {
2188 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2189 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2190 
2191 	ahash->init(areq);
2192 	req_ctx->last = 1;
2193 
2194 	return ahash_process_req(areq, areq->nbytes);
2195 }
2196 
2197 static int ahash_export(struct ahash_request *areq, void *out)
2198 {
2199 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2200 	struct talitos_export_state *export = out;
2201 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2202 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2203 	struct device *dev = ctx->dev;
2204 	dma_addr_t dma;
2205 
2206 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2207 			     DMA_FROM_DEVICE);
2208 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2209 
2210 	memcpy(export->hw_context, req_ctx->hw_context,
2211 	       req_ctx->hw_context_size);
2212 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2213 	export->swinit = req_ctx->swinit;
2214 	export->first = req_ctx->first;
2215 	export->last = req_ctx->last;
2216 	export->to_hash_later = req_ctx->to_hash_later;
2217 	export->nbuf = req_ctx->nbuf;
2218 
2219 	return 0;
2220 }
2221 
2222 static int ahash_import(struct ahash_request *areq, const void *in)
2223 {
2224 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2225 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2226 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2227 	struct device *dev = ctx->dev;
2228 	const struct talitos_export_state *export = in;
2229 	unsigned int size;
2230 	dma_addr_t dma;
2231 
2232 	memset(req_ctx, 0, sizeof(*req_ctx));
2233 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2234 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2235 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2236 	req_ctx->hw_context_size = size;
2237 	memcpy(req_ctx->hw_context, export->hw_context, size);
2238 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2239 	req_ctx->swinit = export->swinit;
2240 	req_ctx->first = export->first;
2241 	req_ctx->last = export->last;
2242 	req_ctx->to_hash_later = export->to_hash_later;
2243 	req_ctx->nbuf = export->nbuf;
2244 
2245 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2246 			     DMA_TO_DEVICE);
2247 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2248 
2249 	return 0;
2250 }
2251 
2252 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2253 		   u8 *hash)
2254 {
2255 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2256 
2257 	struct scatterlist sg[1];
2258 	struct ahash_request *req;
2259 	struct crypto_wait wait;
2260 	int ret;
2261 
2262 	crypto_init_wait(&wait);
2263 
2264 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2265 	if (!req)
2266 		return -ENOMEM;
2267 
2268 	/* Keep tfm keylen == 0 during hash of the long key */
2269 	ctx->keylen = 0;
2270 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2271 				   crypto_req_done, &wait);
2272 
2273 	sg_init_one(&sg[0], key, keylen);
2274 
2275 	ahash_request_set_crypt(req, sg, hash, keylen);
2276 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2277 
2278 	ahash_request_free(req);
2279 
2280 	return ret;
2281 }
2282 
2283 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2284 			unsigned int keylen)
2285 {
2286 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2287 	struct device *dev = ctx->dev;
2288 	unsigned int blocksize =
2289 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2290 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2291 	unsigned int keysize = keylen;
2292 	u8 hash[SHA512_DIGEST_SIZE];
2293 	int ret;
2294 
2295 	if (keylen <= blocksize)
2296 		memcpy(ctx->key, key, keysize);
2297 	else {
2298 		/* Must get the hash of the long key */
2299 		ret = keyhash(tfm, key, keylen, hash);
2300 
2301 		if (ret) {
2302 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2303 			return -EINVAL;
2304 		}
2305 
2306 		keysize = digestsize;
2307 		memcpy(ctx->key, hash, digestsize);
2308 	}
2309 
2310 	if (ctx->keylen)
2311 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2312 
2313 	ctx->keylen = keysize;
2314 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2315 
2316 	return 0;
2317 }
2318 
2319 
2320 struct talitos_alg_template {
2321 	u32 type;
2322 	u32 priority;
2323 	union {
2324 		struct crypto_alg crypto;
2325 		struct ahash_alg hash;
2326 		struct aead_alg aead;
2327 	} alg;
2328 	__be32 desc_hdr_template;
2329 };
2330 
2331 static struct talitos_alg_template driver_algs[] = {
2332 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2333 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2334 		.alg.aead = {
2335 			.base = {
2336 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2337 				.cra_driver_name = "authenc-hmac-sha1-"
2338 						   "cbc-aes-talitos",
2339 				.cra_blocksize = AES_BLOCK_SIZE,
2340 				.cra_flags = CRYPTO_ALG_ASYNC,
2341 			},
2342 			.ivsize = AES_BLOCK_SIZE,
2343 			.maxauthsize = SHA1_DIGEST_SIZE,
2344 		},
2345 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2346 			             DESC_HDR_SEL0_AESU |
2347 		                     DESC_HDR_MODE0_AESU_CBC |
2348 		                     DESC_HDR_SEL1_MDEUA |
2349 		                     DESC_HDR_MODE1_MDEU_INIT |
2350 		                     DESC_HDR_MODE1_MDEU_PAD |
2351 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2352 	},
2353 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2354 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2355 		.alg.aead = {
2356 			.base = {
2357 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2358 				.cra_driver_name = "authenc-hmac-sha1-"
2359 						   "cbc-aes-talitos",
2360 				.cra_blocksize = AES_BLOCK_SIZE,
2361 				.cra_flags = CRYPTO_ALG_ASYNC,
2362 			},
2363 			.ivsize = AES_BLOCK_SIZE,
2364 			.maxauthsize = SHA1_DIGEST_SIZE,
2365 		},
2366 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2367 				     DESC_HDR_SEL0_AESU |
2368 				     DESC_HDR_MODE0_AESU_CBC |
2369 				     DESC_HDR_SEL1_MDEUA |
2370 				     DESC_HDR_MODE1_MDEU_INIT |
2371 				     DESC_HDR_MODE1_MDEU_PAD |
2372 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2373 	},
2374 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2375 		.alg.aead = {
2376 			.base = {
2377 				.cra_name = "authenc(hmac(sha1),"
2378 					    "cbc(des3_ede))",
2379 				.cra_driver_name = "authenc-hmac-sha1-"
2380 						   "cbc-3des-talitos",
2381 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2382 				.cra_flags = CRYPTO_ALG_ASYNC,
2383 			},
2384 			.ivsize = DES3_EDE_BLOCK_SIZE,
2385 			.maxauthsize = SHA1_DIGEST_SIZE,
2386 			.setkey = aead_des3_setkey,
2387 		},
2388 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2389 			             DESC_HDR_SEL0_DEU |
2390 		                     DESC_HDR_MODE0_DEU_CBC |
2391 		                     DESC_HDR_MODE0_DEU_3DES |
2392 		                     DESC_HDR_SEL1_MDEUA |
2393 		                     DESC_HDR_MODE1_MDEU_INIT |
2394 		                     DESC_HDR_MODE1_MDEU_PAD |
2395 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2396 	},
2397 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2398 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2399 		.alg.aead = {
2400 			.base = {
2401 				.cra_name = "authenc(hmac(sha1),"
2402 					    "cbc(des3_ede))",
2403 				.cra_driver_name = "authenc-hmac-sha1-"
2404 						   "cbc-3des-talitos",
2405 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2406 				.cra_flags = CRYPTO_ALG_ASYNC,
2407 			},
2408 			.ivsize = DES3_EDE_BLOCK_SIZE,
2409 			.maxauthsize = SHA1_DIGEST_SIZE,
2410 			.setkey = aead_des3_setkey,
2411 		},
2412 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2413 				     DESC_HDR_SEL0_DEU |
2414 				     DESC_HDR_MODE0_DEU_CBC |
2415 				     DESC_HDR_MODE0_DEU_3DES |
2416 				     DESC_HDR_SEL1_MDEUA |
2417 				     DESC_HDR_MODE1_MDEU_INIT |
2418 				     DESC_HDR_MODE1_MDEU_PAD |
2419 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2420 	},
2421 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2422 		.alg.aead = {
2423 			.base = {
2424 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2425 				.cra_driver_name = "authenc-hmac-sha224-"
2426 						   "cbc-aes-talitos",
2427 				.cra_blocksize = AES_BLOCK_SIZE,
2428 				.cra_flags = CRYPTO_ALG_ASYNC,
2429 			},
2430 			.ivsize = AES_BLOCK_SIZE,
2431 			.maxauthsize = SHA224_DIGEST_SIZE,
2432 		},
2433 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2434 				     DESC_HDR_SEL0_AESU |
2435 				     DESC_HDR_MODE0_AESU_CBC |
2436 				     DESC_HDR_SEL1_MDEUA |
2437 				     DESC_HDR_MODE1_MDEU_INIT |
2438 				     DESC_HDR_MODE1_MDEU_PAD |
2439 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2440 	},
2441 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2442 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2443 		.alg.aead = {
2444 			.base = {
2445 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2446 				.cra_driver_name = "authenc-hmac-sha224-"
2447 						   "cbc-aes-talitos",
2448 				.cra_blocksize = AES_BLOCK_SIZE,
2449 				.cra_flags = CRYPTO_ALG_ASYNC,
2450 			},
2451 			.ivsize = AES_BLOCK_SIZE,
2452 			.maxauthsize = SHA224_DIGEST_SIZE,
2453 		},
2454 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2455 				     DESC_HDR_SEL0_AESU |
2456 				     DESC_HDR_MODE0_AESU_CBC |
2457 				     DESC_HDR_SEL1_MDEUA |
2458 				     DESC_HDR_MODE1_MDEU_INIT |
2459 				     DESC_HDR_MODE1_MDEU_PAD |
2460 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2461 	},
2462 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2463 		.alg.aead = {
2464 			.base = {
2465 				.cra_name = "authenc(hmac(sha224),"
2466 					    "cbc(des3_ede))",
2467 				.cra_driver_name = "authenc-hmac-sha224-"
2468 						   "cbc-3des-talitos",
2469 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2470 				.cra_flags = CRYPTO_ALG_ASYNC,
2471 			},
2472 			.ivsize = DES3_EDE_BLOCK_SIZE,
2473 			.maxauthsize = SHA224_DIGEST_SIZE,
2474 			.setkey = aead_des3_setkey,
2475 		},
2476 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2477 			             DESC_HDR_SEL0_DEU |
2478 		                     DESC_HDR_MODE0_DEU_CBC |
2479 		                     DESC_HDR_MODE0_DEU_3DES |
2480 		                     DESC_HDR_SEL1_MDEUA |
2481 		                     DESC_HDR_MODE1_MDEU_INIT |
2482 		                     DESC_HDR_MODE1_MDEU_PAD |
2483 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2484 	},
2485 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2486 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2487 		.alg.aead = {
2488 			.base = {
2489 				.cra_name = "authenc(hmac(sha224),"
2490 					    "cbc(des3_ede))",
2491 				.cra_driver_name = "authenc-hmac-sha224-"
2492 						   "cbc-3des-talitos",
2493 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2494 				.cra_flags = CRYPTO_ALG_ASYNC,
2495 			},
2496 			.ivsize = DES3_EDE_BLOCK_SIZE,
2497 			.maxauthsize = SHA224_DIGEST_SIZE,
2498 			.setkey = aead_des3_setkey,
2499 		},
2500 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2501 				     DESC_HDR_SEL0_DEU |
2502 				     DESC_HDR_MODE0_DEU_CBC |
2503 				     DESC_HDR_MODE0_DEU_3DES |
2504 				     DESC_HDR_SEL1_MDEUA |
2505 				     DESC_HDR_MODE1_MDEU_INIT |
2506 				     DESC_HDR_MODE1_MDEU_PAD |
2507 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2508 	},
2509 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2510 		.alg.aead = {
2511 			.base = {
2512 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2513 				.cra_driver_name = "authenc-hmac-sha256-"
2514 						   "cbc-aes-talitos",
2515 				.cra_blocksize = AES_BLOCK_SIZE,
2516 				.cra_flags = CRYPTO_ALG_ASYNC,
2517 			},
2518 			.ivsize = AES_BLOCK_SIZE,
2519 			.maxauthsize = SHA256_DIGEST_SIZE,
2520 		},
2521 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2522 			             DESC_HDR_SEL0_AESU |
2523 		                     DESC_HDR_MODE0_AESU_CBC |
2524 		                     DESC_HDR_SEL1_MDEUA |
2525 		                     DESC_HDR_MODE1_MDEU_INIT |
2526 		                     DESC_HDR_MODE1_MDEU_PAD |
2527 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2528 	},
2529 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2530 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2531 		.alg.aead = {
2532 			.base = {
2533 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2534 				.cra_driver_name = "authenc-hmac-sha256-"
2535 						   "cbc-aes-talitos",
2536 				.cra_blocksize = AES_BLOCK_SIZE,
2537 				.cra_flags = CRYPTO_ALG_ASYNC,
2538 			},
2539 			.ivsize = AES_BLOCK_SIZE,
2540 			.maxauthsize = SHA256_DIGEST_SIZE,
2541 		},
2542 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2543 				     DESC_HDR_SEL0_AESU |
2544 				     DESC_HDR_MODE0_AESU_CBC |
2545 				     DESC_HDR_SEL1_MDEUA |
2546 				     DESC_HDR_MODE1_MDEU_INIT |
2547 				     DESC_HDR_MODE1_MDEU_PAD |
2548 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2549 	},
2550 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2551 		.alg.aead = {
2552 			.base = {
2553 				.cra_name = "authenc(hmac(sha256),"
2554 					    "cbc(des3_ede))",
2555 				.cra_driver_name = "authenc-hmac-sha256-"
2556 						   "cbc-3des-talitos",
2557 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2558 				.cra_flags = CRYPTO_ALG_ASYNC,
2559 			},
2560 			.ivsize = DES3_EDE_BLOCK_SIZE,
2561 			.maxauthsize = SHA256_DIGEST_SIZE,
2562 			.setkey = aead_des3_setkey,
2563 		},
2564 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2565 			             DESC_HDR_SEL0_DEU |
2566 		                     DESC_HDR_MODE0_DEU_CBC |
2567 		                     DESC_HDR_MODE0_DEU_3DES |
2568 		                     DESC_HDR_SEL1_MDEUA |
2569 		                     DESC_HDR_MODE1_MDEU_INIT |
2570 		                     DESC_HDR_MODE1_MDEU_PAD |
2571 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2572 	},
2573 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2574 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2575 		.alg.aead = {
2576 			.base = {
2577 				.cra_name = "authenc(hmac(sha256),"
2578 					    "cbc(des3_ede))",
2579 				.cra_driver_name = "authenc-hmac-sha256-"
2580 						   "cbc-3des-talitos",
2581 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2582 				.cra_flags = CRYPTO_ALG_ASYNC,
2583 			},
2584 			.ivsize = DES3_EDE_BLOCK_SIZE,
2585 			.maxauthsize = SHA256_DIGEST_SIZE,
2586 			.setkey = aead_des3_setkey,
2587 		},
2588 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2589 				     DESC_HDR_SEL0_DEU |
2590 				     DESC_HDR_MODE0_DEU_CBC |
2591 				     DESC_HDR_MODE0_DEU_3DES |
2592 				     DESC_HDR_SEL1_MDEUA |
2593 				     DESC_HDR_MODE1_MDEU_INIT |
2594 				     DESC_HDR_MODE1_MDEU_PAD |
2595 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2596 	},
2597 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2598 		.alg.aead = {
2599 			.base = {
2600 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2601 				.cra_driver_name = "authenc-hmac-sha384-"
2602 						   "cbc-aes-talitos",
2603 				.cra_blocksize = AES_BLOCK_SIZE,
2604 				.cra_flags = CRYPTO_ALG_ASYNC,
2605 			},
2606 			.ivsize = AES_BLOCK_SIZE,
2607 			.maxauthsize = SHA384_DIGEST_SIZE,
2608 		},
2609 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2610 			             DESC_HDR_SEL0_AESU |
2611 		                     DESC_HDR_MODE0_AESU_CBC |
2612 		                     DESC_HDR_SEL1_MDEUB |
2613 		                     DESC_HDR_MODE1_MDEU_INIT |
2614 		                     DESC_HDR_MODE1_MDEU_PAD |
2615 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2616 	},
2617 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2618 		.alg.aead = {
2619 			.base = {
2620 				.cra_name = "authenc(hmac(sha384),"
2621 					    "cbc(des3_ede))",
2622 				.cra_driver_name = "authenc-hmac-sha384-"
2623 						   "cbc-3des-talitos",
2624 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2625 				.cra_flags = CRYPTO_ALG_ASYNC,
2626 			},
2627 			.ivsize = DES3_EDE_BLOCK_SIZE,
2628 			.maxauthsize = SHA384_DIGEST_SIZE,
2629 			.setkey = aead_des3_setkey,
2630 		},
2631 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2632 			             DESC_HDR_SEL0_DEU |
2633 		                     DESC_HDR_MODE0_DEU_CBC |
2634 		                     DESC_HDR_MODE0_DEU_3DES |
2635 		                     DESC_HDR_SEL1_MDEUB |
2636 		                     DESC_HDR_MODE1_MDEU_INIT |
2637 		                     DESC_HDR_MODE1_MDEU_PAD |
2638 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2639 	},
2640 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2641 		.alg.aead = {
2642 			.base = {
2643 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2644 				.cra_driver_name = "authenc-hmac-sha512-"
2645 						   "cbc-aes-talitos",
2646 				.cra_blocksize = AES_BLOCK_SIZE,
2647 				.cra_flags = CRYPTO_ALG_ASYNC,
2648 			},
2649 			.ivsize = AES_BLOCK_SIZE,
2650 			.maxauthsize = SHA512_DIGEST_SIZE,
2651 		},
2652 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2653 			             DESC_HDR_SEL0_AESU |
2654 		                     DESC_HDR_MODE0_AESU_CBC |
2655 		                     DESC_HDR_SEL1_MDEUB |
2656 		                     DESC_HDR_MODE1_MDEU_INIT |
2657 		                     DESC_HDR_MODE1_MDEU_PAD |
2658 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2659 	},
2660 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2661 		.alg.aead = {
2662 			.base = {
2663 				.cra_name = "authenc(hmac(sha512),"
2664 					    "cbc(des3_ede))",
2665 				.cra_driver_name = "authenc-hmac-sha512-"
2666 						   "cbc-3des-talitos",
2667 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2668 				.cra_flags = CRYPTO_ALG_ASYNC,
2669 			},
2670 			.ivsize = DES3_EDE_BLOCK_SIZE,
2671 			.maxauthsize = SHA512_DIGEST_SIZE,
2672 			.setkey = aead_des3_setkey,
2673 		},
2674 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2675 			             DESC_HDR_SEL0_DEU |
2676 		                     DESC_HDR_MODE0_DEU_CBC |
2677 		                     DESC_HDR_MODE0_DEU_3DES |
2678 		                     DESC_HDR_SEL1_MDEUB |
2679 		                     DESC_HDR_MODE1_MDEU_INIT |
2680 		                     DESC_HDR_MODE1_MDEU_PAD |
2681 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2682 	},
2683 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2684 		.alg.aead = {
2685 			.base = {
2686 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2687 				.cra_driver_name = "authenc-hmac-md5-"
2688 						   "cbc-aes-talitos",
2689 				.cra_blocksize = AES_BLOCK_SIZE,
2690 				.cra_flags = CRYPTO_ALG_ASYNC,
2691 			},
2692 			.ivsize = AES_BLOCK_SIZE,
2693 			.maxauthsize = MD5_DIGEST_SIZE,
2694 		},
2695 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2696 			             DESC_HDR_SEL0_AESU |
2697 		                     DESC_HDR_MODE0_AESU_CBC |
2698 		                     DESC_HDR_SEL1_MDEUA |
2699 		                     DESC_HDR_MODE1_MDEU_INIT |
2700 		                     DESC_HDR_MODE1_MDEU_PAD |
2701 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2702 	},
2703 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2704 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2705 		.alg.aead = {
2706 			.base = {
2707 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2708 				.cra_driver_name = "authenc-hmac-md5-"
2709 						   "cbc-aes-talitos",
2710 				.cra_blocksize = AES_BLOCK_SIZE,
2711 				.cra_flags = CRYPTO_ALG_ASYNC,
2712 			},
2713 			.ivsize = AES_BLOCK_SIZE,
2714 			.maxauthsize = MD5_DIGEST_SIZE,
2715 		},
2716 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2717 				     DESC_HDR_SEL0_AESU |
2718 				     DESC_HDR_MODE0_AESU_CBC |
2719 				     DESC_HDR_SEL1_MDEUA |
2720 				     DESC_HDR_MODE1_MDEU_INIT |
2721 				     DESC_HDR_MODE1_MDEU_PAD |
2722 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2723 	},
2724 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2725 		.alg.aead = {
2726 			.base = {
2727 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2728 				.cra_driver_name = "authenc-hmac-md5-"
2729 						   "cbc-3des-talitos",
2730 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2731 				.cra_flags = CRYPTO_ALG_ASYNC,
2732 			},
2733 			.ivsize = DES3_EDE_BLOCK_SIZE,
2734 			.maxauthsize = MD5_DIGEST_SIZE,
2735 			.setkey = aead_des3_setkey,
2736 		},
2737 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2738 			             DESC_HDR_SEL0_DEU |
2739 		                     DESC_HDR_MODE0_DEU_CBC |
2740 		                     DESC_HDR_MODE0_DEU_3DES |
2741 		                     DESC_HDR_SEL1_MDEUA |
2742 		                     DESC_HDR_MODE1_MDEU_INIT |
2743 		                     DESC_HDR_MODE1_MDEU_PAD |
2744 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2745 	},
2746 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2747 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2748 		.alg.aead = {
2749 			.base = {
2750 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2751 				.cra_driver_name = "authenc-hmac-md5-"
2752 						   "cbc-3des-talitos",
2753 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2754 				.cra_flags = CRYPTO_ALG_ASYNC,
2755 			},
2756 			.ivsize = DES3_EDE_BLOCK_SIZE,
2757 			.maxauthsize = MD5_DIGEST_SIZE,
2758 			.setkey = aead_des3_setkey,
2759 		},
2760 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2761 				     DESC_HDR_SEL0_DEU |
2762 				     DESC_HDR_MODE0_DEU_CBC |
2763 				     DESC_HDR_MODE0_DEU_3DES |
2764 				     DESC_HDR_SEL1_MDEUA |
2765 				     DESC_HDR_MODE1_MDEU_INIT |
2766 				     DESC_HDR_MODE1_MDEU_PAD |
2767 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2768 	},
2769 	/* ABLKCIPHER algorithms. */
2770 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2771 		.alg.crypto = {
2772 			.cra_name = "ecb(aes)",
2773 			.cra_driver_name = "ecb-aes-talitos",
2774 			.cra_blocksize = AES_BLOCK_SIZE,
2775 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2776 				     CRYPTO_ALG_ASYNC,
2777 			.cra_ablkcipher = {
2778 				.min_keysize = AES_MIN_KEY_SIZE,
2779 				.max_keysize = AES_MAX_KEY_SIZE,
2780 				.ivsize = AES_BLOCK_SIZE,
2781 			}
2782 		},
2783 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2784 				     DESC_HDR_SEL0_AESU,
2785 	},
2786 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2787 		.alg.crypto = {
2788 			.cra_name = "cbc(aes)",
2789 			.cra_driver_name = "cbc-aes-talitos",
2790 			.cra_blocksize = AES_BLOCK_SIZE,
2791 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2792                                      CRYPTO_ALG_ASYNC,
2793 			.cra_ablkcipher = {
2794 				.min_keysize = AES_MIN_KEY_SIZE,
2795 				.max_keysize = AES_MAX_KEY_SIZE,
2796 				.ivsize = AES_BLOCK_SIZE,
2797 			}
2798 		},
2799 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2800 				     DESC_HDR_SEL0_AESU |
2801 				     DESC_HDR_MODE0_AESU_CBC,
2802 	},
2803 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2804 		.alg.crypto = {
2805 			.cra_name = "ctr(aes)",
2806 			.cra_driver_name = "ctr-aes-talitos",
2807 			.cra_blocksize = AES_BLOCK_SIZE,
2808 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2809 				     CRYPTO_ALG_ASYNC,
2810 			.cra_ablkcipher = {
2811 				.min_keysize = AES_MIN_KEY_SIZE,
2812 				.max_keysize = AES_MAX_KEY_SIZE,
2813 				.ivsize = AES_BLOCK_SIZE,
2814 			}
2815 		},
2816 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2817 				     DESC_HDR_SEL0_AESU |
2818 				     DESC_HDR_MODE0_AESU_CTR,
2819 	},
2820 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2821 		.alg.crypto = {
2822 			.cra_name = "ecb(des)",
2823 			.cra_driver_name = "ecb-des-talitos",
2824 			.cra_blocksize = DES_BLOCK_SIZE,
2825 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2826 				     CRYPTO_ALG_ASYNC,
2827 			.cra_ablkcipher = {
2828 				.min_keysize = DES_KEY_SIZE,
2829 				.max_keysize = DES_KEY_SIZE,
2830 				.ivsize = DES_BLOCK_SIZE,
2831 				.setkey = ablkcipher_des_setkey,
2832 			}
2833 		},
2834 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2835 				     DESC_HDR_SEL0_DEU,
2836 	},
2837 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2838 		.alg.crypto = {
2839 			.cra_name = "cbc(des)",
2840 			.cra_driver_name = "cbc-des-talitos",
2841 			.cra_blocksize = DES_BLOCK_SIZE,
2842 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2843 				     CRYPTO_ALG_ASYNC,
2844 			.cra_ablkcipher = {
2845 				.min_keysize = DES_KEY_SIZE,
2846 				.max_keysize = DES_KEY_SIZE,
2847 				.ivsize = DES_BLOCK_SIZE,
2848 				.setkey = ablkcipher_des_setkey,
2849 			}
2850 		},
2851 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2852 				     DESC_HDR_SEL0_DEU |
2853 				     DESC_HDR_MODE0_DEU_CBC,
2854 	},
2855 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2856 		.alg.crypto = {
2857 			.cra_name = "ecb(des3_ede)",
2858 			.cra_driver_name = "ecb-3des-talitos",
2859 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2860 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2861 				     CRYPTO_ALG_ASYNC,
2862 			.cra_ablkcipher = {
2863 				.min_keysize = DES3_EDE_KEY_SIZE,
2864 				.max_keysize = DES3_EDE_KEY_SIZE,
2865 				.ivsize = DES3_EDE_BLOCK_SIZE,
2866 				.setkey = ablkcipher_des3_setkey,
2867 			}
2868 		},
2869 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2870 				     DESC_HDR_SEL0_DEU |
2871 				     DESC_HDR_MODE0_DEU_3DES,
2872 	},
2873 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2874 		.alg.crypto = {
2875 			.cra_name = "cbc(des3_ede)",
2876 			.cra_driver_name = "cbc-3des-talitos",
2877 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2878 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2879                                      CRYPTO_ALG_ASYNC,
2880 			.cra_ablkcipher = {
2881 				.min_keysize = DES3_EDE_KEY_SIZE,
2882 				.max_keysize = DES3_EDE_KEY_SIZE,
2883 				.ivsize = DES3_EDE_BLOCK_SIZE,
2884 				.setkey = ablkcipher_des3_setkey,
2885 			}
2886 		},
2887 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2888 			             DESC_HDR_SEL0_DEU |
2889 		                     DESC_HDR_MODE0_DEU_CBC |
2890 		                     DESC_HDR_MODE0_DEU_3DES,
2891 	},
2892 	/* AHASH algorithms. */
2893 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2894 		.alg.hash = {
2895 			.halg.digestsize = MD5_DIGEST_SIZE,
2896 			.halg.statesize = sizeof(struct talitos_export_state),
2897 			.halg.base = {
2898 				.cra_name = "md5",
2899 				.cra_driver_name = "md5-talitos",
2900 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2901 				.cra_flags = CRYPTO_ALG_ASYNC,
2902 			}
2903 		},
2904 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2905 				     DESC_HDR_SEL0_MDEUA |
2906 				     DESC_HDR_MODE0_MDEU_MD5,
2907 	},
2908 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2909 		.alg.hash = {
2910 			.halg.digestsize = SHA1_DIGEST_SIZE,
2911 			.halg.statesize = sizeof(struct talitos_export_state),
2912 			.halg.base = {
2913 				.cra_name = "sha1",
2914 				.cra_driver_name = "sha1-talitos",
2915 				.cra_blocksize = SHA1_BLOCK_SIZE,
2916 				.cra_flags = CRYPTO_ALG_ASYNC,
2917 			}
2918 		},
2919 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2920 				     DESC_HDR_SEL0_MDEUA |
2921 				     DESC_HDR_MODE0_MDEU_SHA1,
2922 	},
2923 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2924 		.alg.hash = {
2925 			.halg.digestsize = SHA224_DIGEST_SIZE,
2926 			.halg.statesize = sizeof(struct talitos_export_state),
2927 			.halg.base = {
2928 				.cra_name = "sha224",
2929 				.cra_driver_name = "sha224-talitos",
2930 				.cra_blocksize = SHA224_BLOCK_SIZE,
2931 				.cra_flags = CRYPTO_ALG_ASYNC,
2932 			}
2933 		},
2934 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2935 				     DESC_HDR_SEL0_MDEUA |
2936 				     DESC_HDR_MODE0_MDEU_SHA224,
2937 	},
2938 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2939 		.alg.hash = {
2940 			.halg.digestsize = SHA256_DIGEST_SIZE,
2941 			.halg.statesize = sizeof(struct talitos_export_state),
2942 			.halg.base = {
2943 				.cra_name = "sha256",
2944 				.cra_driver_name = "sha256-talitos",
2945 				.cra_blocksize = SHA256_BLOCK_SIZE,
2946 				.cra_flags = CRYPTO_ALG_ASYNC,
2947 			}
2948 		},
2949 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2950 				     DESC_HDR_SEL0_MDEUA |
2951 				     DESC_HDR_MODE0_MDEU_SHA256,
2952 	},
2953 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2954 		.alg.hash = {
2955 			.halg.digestsize = SHA384_DIGEST_SIZE,
2956 			.halg.statesize = sizeof(struct talitos_export_state),
2957 			.halg.base = {
2958 				.cra_name = "sha384",
2959 				.cra_driver_name = "sha384-talitos",
2960 				.cra_blocksize = SHA384_BLOCK_SIZE,
2961 				.cra_flags = CRYPTO_ALG_ASYNC,
2962 			}
2963 		},
2964 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2965 				     DESC_HDR_SEL0_MDEUB |
2966 				     DESC_HDR_MODE0_MDEUB_SHA384,
2967 	},
2968 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2969 		.alg.hash = {
2970 			.halg.digestsize = SHA512_DIGEST_SIZE,
2971 			.halg.statesize = sizeof(struct talitos_export_state),
2972 			.halg.base = {
2973 				.cra_name = "sha512",
2974 				.cra_driver_name = "sha512-talitos",
2975 				.cra_blocksize = SHA512_BLOCK_SIZE,
2976 				.cra_flags = CRYPTO_ALG_ASYNC,
2977 			}
2978 		},
2979 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2980 				     DESC_HDR_SEL0_MDEUB |
2981 				     DESC_HDR_MODE0_MDEUB_SHA512,
2982 	},
2983 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2984 		.alg.hash = {
2985 			.halg.digestsize = MD5_DIGEST_SIZE,
2986 			.halg.statesize = sizeof(struct talitos_export_state),
2987 			.halg.base = {
2988 				.cra_name = "hmac(md5)",
2989 				.cra_driver_name = "hmac-md5-talitos",
2990 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2991 				.cra_flags = CRYPTO_ALG_ASYNC,
2992 			}
2993 		},
2994 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2995 				     DESC_HDR_SEL0_MDEUA |
2996 				     DESC_HDR_MODE0_MDEU_MD5,
2997 	},
2998 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2999 		.alg.hash = {
3000 			.halg.digestsize = SHA1_DIGEST_SIZE,
3001 			.halg.statesize = sizeof(struct talitos_export_state),
3002 			.halg.base = {
3003 				.cra_name = "hmac(sha1)",
3004 				.cra_driver_name = "hmac-sha1-talitos",
3005 				.cra_blocksize = SHA1_BLOCK_SIZE,
3006 				.cra_flags = CRYPTO_ALG_ASYNC,
3007 			}
3008 		},
3009 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3010 				     DESC_HDR_SEL0_MDEUA |
3011 				     DESC_HDR_MODE0_MDEU_SHA1,
3012 	},
3013 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3014 		.alg.hash = {
3015 			.halg.digestsize = SHA224_DIGEST_SIZE,
3016 			.halg.statesize = sizeof(struct talitos_export_state),
3017 			.halg.base = {
3018 				.cra_name = "hmac(sha224)",
3019 				.cra_driver_name = "hmac-sha224-talitos",
3020 				.cra_blocksize = SHA224_BLOCK_SIZE,
3021 				.cra_flags = CRYPTO_ALG_ASYNC,
3022 			}
3023 		},
3024 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3025 				     DESC_HDR_SEL0_MDEUA |
3026 				     DESC_HDR_MODE0_MDEU_SHA224,
3027 	},
3028 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3029 		.alg.hash = {
3030 			.halg.digestsize = SHA256_DIGEST_SIZE,
3031 			.halg.statesize = sizeof(struct talitos_export_state),
3032 			.halg.base = {
3033 				.cra_name = "hmac(sha256)",
3034 				.cra_driver_name = "hmac-sha256-talitos",
3035 				.cra_blocksize = SHA256_BLOCK_SIZE,
3036 				.cra_flags = CRYPTO_ALG_ASYNC,
3037 			}
3038 		},
3039 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3040 				     DESC_HDR_SEL0_MDEUA |
3041 				     DESC_HDR_MODE0_MDEU_SHA256,
3042 	},
3043 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3044 		.alg.hash = {
3045 			.halg.digestsize = SHA384_DIGEST_SIZE,
3046 			.halg.statesize = sizeof(struct talitos_export_state),
3047 			.halg.base = {
3048 				.cra_name = "hmac(sha384)",
3049 				.cra_driver_name = "hmac-sha384-talitos",
3050 				.cra_blocksize = SHA384_BLOCK_SIZE,
3051 				.cra_flags = CRYPTO_ALG_ASYNC,
3052 			}
3053 		},
3054 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3055 				     DESC_HDR_SEL0_MDEUB |
3056 				     DESC_HDR_MODE0_MDEUB_SHA384,
3057 	},
3058 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3059 		.alg.hash = {
3060 			.halg.digestsize = SHA512_DIGEST_SIZE,
3061 			.halg.statesize = sizeof(struct talitos_export_state),
3062 			.halg.base = {
3063 				.cra_name = "hmac(sha512)",
3064 				.cra_driver_name = "hmac-sha512-talitos",
3065 				.cra_blocksize = SHA512_BLOCK_SIZE,
3066 				.cra_flags = CRYPTO_ALG_ASYNC,
3067 			}
3068 		},
3069 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3070 				     DESC_HDR_SEL0_MDEUB |
3071 				     DESC_HDR_MODE0_MDEUB_SHA512,
3072 	}
3073 };
3074 
3075 struct talitos_crypto_alg {
3076 	struct list_head entry;
3077 	struct device *dev;
3078 	struct talitos_alg_template algt;
3079 };
3080 
3081 static int talitos_init_common(struct talitos_ctx *ctx,
3082 			       struct talitos_crypto_alg *talitos_alg)
3083 {
3084 	struct talitos_private *priv;
3085 
3086 	/* update context with ptr to dev */
3087 	ctx->dev = talitos_alg->dev;
3088 
3089 	/* assign SEC channel to tfm in round-robin fashion */
3090 	priv = dev_get_drvdata(ctx->dev);
3091 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3092 		  (priv->num_channels - 1);
3093 
3094 	/* copy descriptor header template value */
3095 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3096 
3097 	/* select done notification */
3098 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3099 
3100 	return 0;
3101 }
3102 
3103 static int talitos_cra_init(struct crypto_tfm *tfm)
3104 {
3105 	struct crypto_alg *alg = tfm->__crt_alg;
3106 	struct talitos_crypto_alg *talitos_alg;
3107 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3108 
3109 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3110 		talitos_alg = container_of(__crypto_ahash_alg(alg),
3111 					   struct talitos_crypto_alg,
3112 					   algt.alg.hash);
3113 	else
3114 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
3115 					   algt.alg.crypto);
3116 
3117 	return talitos_init_common(ctx, talitos_alg);
3118 }
3119 
3120 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3121 {
3122 	struct aead_alg *alg = crypto_aead_alg(tfm);
3123 	struct talitos_crypto_alg *talitos_alg;
3124 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3125 
3126 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3127 				   algt.alg.aead);
3128 
3129 	return talitos_init_common(ctx, talitos_alg);
3130 }
3131 
3132 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3133 {
3134 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3135 
3136 	talitos_cra_init(tfm);
3137 
3138 	ctx->keylen = 0;
3139 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3140 				 sizeof(struct talitos_ahash_req_ctx));
3141 
3142 	return 0;
3143 }
3144 
3145 static void talitos_cra_exit(struct crypto_tfm *tfm)
3146 {
3147 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3148 	struct device *dev = ctx->dev;
3149 
3150 	if (ctx->keylen)
3151 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3152 }
3153 
3154 /*
3155  * given the alg's descriptor header template, determine whether descriptor
3156  * type and primary/secondary execution units required match the hw
3157  * capabilities description provided in the device tree node.
3158  */
3159 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3160 {
3161 	struct talitos_private *priv = dev_get_drvdata(dev);
3162 	int ret;
3163 
3164 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3165 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3166 
3167 	if (SECONDARY_EU(desc_hdr_template))
3168 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3169 		              & priv->exec_units);
3170 
3171 	return ret;
3172 }
3173 
3174 static int talitos_remove(struct platform_device *ofdev)
3175 {
3176 	struct device *dev = &ofdev->dev;
3177 	struct talitos_private *priv = dev_get_drvdata(dev);
3178 	struct talitos_crypto_alg *t_alg, *n;
3179 	int i;
3180 
3181 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3182 		switch (t_alg->algt.type) {
3183 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3184 			break;
3185 		case CRYPTO_ALG_TYPE_AEAD:
3186 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3187 		case CRYPTO_ALG_TYPE_AHASH:
3188 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3189 			break;
3190 		}
3191 		list_del(&t_alg->entry);
3192 	}
3193 
3194 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3195 		talitos_unregister_rng(dev);
3196 
3197 	for (i = 0; i < 2; i++)
3198 		if (priv->irq[i]) {
3199 			free_irq(priv->irq[i], dev);
3200 			irq_dispose_mapping(priv->irq[i]);
3201 		}
3202 
3203 	tasklet_kill(&priv->done_task[0]);
3204 	if (priv->irq[1])
3205 		tasklet_kill(&priv->done_task[1]);
3206 
3207 	return 0;
3208 }
3209 
3210 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3211 						    struct talitos_alg_template
3212 						           *template)
3213 {
3214 	struct talitos_private *priv = dev_get_drvdata(dev);
3215 	struct talitos_crypto_alg *t_alg;
3216 	struct crypto_alg *alg;
3217 
3218 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3219 			     GFP_KERNEL);
3220 	if (!t_alg)
3221 		return ERR_PTR(-ENOMEM);
3222 
3223 	t_alg->algt = *template;
3224 
3225 	switch (t_alg->algt.type) {
3226 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
3227 		alg = &t_alg->algt.alg.crypto;
3228 		alg->cra_init = talitos_cra_init;
3229 		alg->cra_exit = talitos_cra_exit;
3230 		alg->cra_type = &crypto_ablkcipher_type;
3231 		alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3232 					     ablkcipher_setkey;
3233 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3234 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3235 		break;
3236 	case CRYPTO_ALG_TYPE_AEAD:
3237 		alg = &t_alg->algt.alg.aead.base;
3238 		alg->cra_exit = talitos_cra_exit;
3239 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3240 		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3241 					      aead_setkey;
3242 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3243 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3244 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3245 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3246 			devm_kfree(dev, t_alg);
3247 			return ERR_PTR(-ENOTSUPP);
3248 		}
3249 		break;
3250 	case CRYPTO_ALG_TYPE_AHASH:
3251 		alg = &t_alg->algt.alg.hash.halg.base;
3252 		alg->cra_init = talitos_cra_init_ahash;
3253 		alg->cra_exit = talitos_cra_exit;
3254 		t_alg->algt.alg.hash.init = ahash_init;
3255 		t_alg->algt.alg.hash.update = ahash_update;
3256 		t_alg->algt.alg.hash.final = ahash_final;
3257 		t_alg->algt.alg.hash.finup = ahash_finup;
3258 		t_alg->algt.alg.hash.digest = ahash_digest;
3259 		if (!strncmp(alg->cra_name, "hmac", 4))
3260 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3261 		t_alg->algt.alg.hash.import = ahash_import;
3262 		t_alg->algt.alg.hash.export = ahash_export;
3263 
3264 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3265 		    !strncmp(alg->cra_name, "hmac", 4)) {
3266 			devm_kfree(dev, t_alg);
3267 			return ERR_PTR(-ENOTSUPP);
3268 		}
3269 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3270 		    (!strcmp(alg->cra_name, "sha224") ||
3271 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3272 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3273 			t_alg->algt.desc_hdr_template =
3274 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3275 					DESC_HDR_SEL0_MDEUA |
3276 					DESC_HDR_MODE0_MDEU_SHA256;
3277 		}
3278 		break;
3279 	default:
3280 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3281 		devm_kfree(dev, t_alg);
3282 		return ERR_PTR(-EINVAL);
3283 	}
3284 
3285 	alg->cra_module = THIS_MODULE;
3286 	if (t_alg->algt.priority)
3287 		alg->cra_priority = t_alg->algt.priority;
3288 	else
3289 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3290 	alg->cra_alignmask = 0;
3291 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3292 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3293 
3294 	t_alg->dev = dev;
3295 
3296 	return t_alg;
3297 }
3298 
3299 static int talitos_probe_irq(struct platform_device *ofdev)
3300 {
3301 	struct device *dev = &ofdev->dev;
3302 	struct device_node *np = ofdev->dev.of_node;
3303 	struct talitos_private *priv = dev_get_drvdata(dev);
3304 	int err;
3305 	bool is_sec1 = has_ftr_sec1(priv);
3306 
3307 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3308 	if (!priv->irq[0]) {
3309 		dev_err(dev, "failed to map irq\n");
3310 		return -EINVAL;
3311 	}
3312 	if (is_sec1) {
3313 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3314 				  dev_driver_string(dev), dev);
3315 		goto primary_out;
3316 	}
3317 
3318 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3319 
3320 	/* get the primary irq line */
3321 	if (!priv->irq[1]) {
3322 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3323 				  dev_driver_string(dev), dev);
3324 		goto primary_out;
3325 	}
3326 
3327 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3328 			  dev_driver_string(dev), dev);
3329 	if (err)
3330 		goto primary_out;
3331 
3332 	/* get the secondary irq line */
3333 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3334 			  dev_driver_string(dev), dev);
3335 	if (err) {
3336 		dev_err(dev, "failed to request secondary irq\n");
3337 		irq_dispose_mapping(priv->irq[1]);
3338 		priv->irq[1] = 0;
3339 	}
3340 
3341 	return err;
3342 
3343 primary_out:
3344 	if (err) {
3345 		dev_err(dev, "failed to request primary irq\n");
3346 		irq_dispose_mapping(priv->irq[0]);
3347 		priv->irq[0] = 0;
3348 	}
3349 
3350 	return err;
3351 }
3352 
3353 static int talitos_probe(struct platform_device *ofdev)
3354 {
3355 	struct device *dev = &ofdev->dev;
3356 	struct device_node *np = ofdev->dev.of_node;
3357 	struct talitos_private *priv;
3358 	int i, err;
3359 	int stride;
3360 	struct resource *res;
3361 
3362 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3363 	if (!priv)
3364 		return -ENOMEM;
3365 
3366 	INIT_LIST_HEAD(&priv->alg_list);
3367 
3368 	dev_set_drvdata(dev, priv);
3369 
3370 	priv->ofdev = ofdev;
3371 
3372 	spin_lock_init(&priv->reg_lock);
3373 
3374 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3375 	if (!res)
3376 		return -ENXIO;
3377 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3378 	if (!priv->reg) {
3379 		dev_err(dev, "failed to of_iomap\n");
3380 		err = -ENOMEM;
3381 		goto err_out;
3382 	}
3383 
3384 	/* get SEC version capabilities from device tree */
3385 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3386 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3387 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3388 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3389 			     &priv->desc_types);
3390 
3391 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3392 	    !priv->exec_units || !priv->desc_types) {
3393 		dev_err(dev, "invalid property data in device tree node\n");
3394 		err = -EINVAL;
3395 		goto err_out;
3396 	}
3397 
3398 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3399 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3400 
3401 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3402 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3403 				  TALITOS_FTR_SHA224_HWINIT |
3404 				  TALITOS_FTR_HMAC_OK;
3405 
3406 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3407 		priv->features |= TALITOS_FTR_SEC1;
3408 
3409 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3410 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3411 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3412 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3413 		stride = TALITOS1_CH_STRIDE;
3414 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3415 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3416 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3417 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3418 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3419 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3420 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3421 		stride = TALITOS1_CH_STRIDE;
3422 	} else {
3423 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3424 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3425 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3426 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3427 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3428 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3429 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3430 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3431 		stride = TALITOS2_CH_STRIDE;
3432 	}
3433 
3434 	err = talitos_probe_irq(ofdev);
3435 	if (err)
3436 		goto err_out;
3437 
3438 	if (of_device_is_compatible(np, "fsl,sec1.0")) {
3439 		if (priv->num_channels == 1)
3440 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3441 				     (unsigned long)dev);
3442 		else
3443 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3444 				     (unsigned long)dev);
3445 	} else {
3446 		if (priv->irq[1]) {
3447 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3448 				     (unsigned long)dev);
3449 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3450 				     (unsigned long)dev);
3451 		} else if (priv->num_channels == 1) {
3452 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3453 				     (unsigned long)dev);
3454 		} else {
3455 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3456 				     (unsigned long)dev);
3457 		}
3458 	}
3459 
3460 	priv->chan = devm_kcalloc(dev,
3461 				  priv->num_channels,
3462 				  sizeof(struct talitos_channel),
3463 				  GFP_KERNEL);
3464 	if (!priv->chan) {
3465 		dev_err(dev, "failed to allocate channel management space\n");
3466 		err = -ENOMEM;
3467 		goto err_out;
3468 	}
3469 
3470 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3471 
3472 	for (i = 0; i < priv->num_channels; i++) {
3473 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3474 		if (!priv->irq[1] || !(i & 1))
3475 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3476 
3477 		spin_lock_init(&priv->chan[i].head_lock);
3478 		spin_lock_init(&priv->chan[i].tail_lock);
3479 
3480 		priv->chan[i].fifo = devm_kcalloc(dev,
3481 						priv->fifo_len,
3482 						sizeof(struct talitos_request),
3483 						GFP_KERNEL);
3484 		if (!priv->chan[i].fifo) {
3485 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3486 			err = -ENOMEM;
3487 			goto err_out;
3488 		}
3489 
3490 		atomic_set(&priv->chan[i].submit_count,
3491 			   -(priv->chfifo_len - 1));
3492 	}
3493 
3494 	dma_set_mask(dev, DMA_BIT_MASK(36));
3495 
3496 	/* reset and initialize the h/w */
3497 	err = init_device(dev);
3498 	if (err) {
3499 		dev_err(dev, "failed to initialize device\n");
3500 		goto err_out;
3501 	}
3502 
3503 	/* register the RNG, if available */
3504 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3505 		err = talitos_register_rng(dev);
3506 		if (err) {
3507 			dev_err(dev, "failed to register hwrng: %d\n", err);
3508 			goto err_out;
3509 		} else
3510 			dev_info(dev, "hwrng\n");
3511 	}
3512 
3513 	/* register crypto algorithms the device supports */
3514 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3515 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3516 			struct talitos_crypto_alg *t_alg;
3517 			struct crypto_alg *alg = NULL;
3518 
3519 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3520 			if (IS_ERR(t_alg)) {
3521 				err = PTR_ERR(t_alg);
3522 				if (err == -ENOTSUPP)
3523 					continue;
3524 				goto err_out;
3525 			}
3526 
3527 			switch (t_alg->algt.type) {
3528 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3529 				err = crypto_register_alg(
3530 						&t_alg->algt.alg.crypto);
3531 				alg = &t_alg->algt.alg.crypto;
3532 				break;
3533 
3534 			case CRYPTO_ALG_TYPE_AEAD:
3535 				err = crypto_register_aead(
3536 					&t_alg->algt.alg.aead);
3537 				alg = &t_alg->algt.alg.aead.base;
3538 				break;
3539 
3540 			case CRYPTO_ALG_TYPE_AHASH:
3541 				err = crypto_register_ahash(
3542 						&t_alg->algt.alg.hash);
3543 				alg = &t_alg->algt.alg.hash.halg.base;
3544 				break;
3545 			}
3546 			if (err) {
3547 				dev_err(dev, "%s alg registration failed\n",
3548 					alg->cra_driver_name);
3549 				devm_kfree(dev, t_alg);
3550 			} else
3551 				list_add_tail(&t_alg->entry, &priv->alg_list);
3552 		}
3553 	}
3554 	if (!list_empty(&priv->alg_list))
3555 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3556 			 (char *)of_get_property(np, "compatible", NULL));
3557 
3558 	return 0;
3559 
3560 err_out:
3561 	talitos_remove(ofdev);
3562 
3563 	return err;
3564 }
3565 
3566 static const struct of_device_id talitos_match[] = {
3567 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3568 	{
3569 		.compatible = "fsl,sec1.0",
3570 	},
3571 #endif
3572 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3573 	{
3574 		.compatible = "fsl,sec2.0",
3575 	},
3576 #endif
3577 	{},
3578 };
3579 MODULE_DEVICE_TABLE(of, talitos_match);
3580 
3581 static struct platform_driver talitos_driver = {
3582 	.driver = {
3583 		.name = "talitos",
3584 		.of_match_table = talitos_match,
3585 	},
3586 	.probe = talitos_probe,
3587 	.remove = talitos_remove,
3588 };
3589 
3590 module_platform_driver(talitos_driver);
3591 
3592 MODULE_LICENSE("GPL");
3593 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3594 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3595