xref: /openbmc/linux/drivers/crypto/talitos.c (revision ed4bc189)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * talitos - Freescale Integrated Security Engine (SEC) device driver
4  *
5  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6  *
7  * Scatterlist Crypto API glue code copied from files with the following:
8  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * Crypto algorithm registration code copied from hifn driver:
11  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12  * All rights reserved.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/io.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
30 
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/internal/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/internal/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
42 
43 #include "talitos.h"
44 
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 			   unsigned int len, bool is_sec1)
47 {
48 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
49 	if (is_sec1) {
50 		ptr->len1 = cpu_to_be16(len);
51 	} else {
52 		ptr->len = cpu_to_be16(len);
53 		ptr->eptr = upper_32_bits(dma_addr);
54 	}
55 }
56 
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 			     struct talitos_ptr *src_ptr, bool is_sec1)
59 {
60 	dst_ptr->ptr = src_ptr->ptr;
61 	if (is_sec1) {
62 		dst_ptr->len1 = src_ptr->len1;
63 	} else {
64 		dst_ptr->len = src_ptr->len;
65 		dst_ptr->eptr = src_ptr->eptr;
66 	}
67 }
68 
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
70 					   bool is_sec1)
71 {
72 	if (is_sec1)
73 		return be16_to_cpu(ptr->len1);
74 	else
75 		return be16_to_cpu(ptr->len);
76 }
77 
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
79 				   bool is_sec1)
80 {
81 	if (!is_sec1)
82 		ptr->j_extent = val;
83 }
84 
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
86 {
87 	if (!is_sec1)
88 		ptr->j_extent |= val;
89 }
90 
91 /*
92  * map virtual single (contiguous) pointer to h/w descriptor pointer
93  */
94 static void __map_single_talitos_ptr(struct device *dev,
95 				     struct talitos_ptr *ptr,
96 				     unsigned int len, void *data,
97 				     enum dma_data_direction dir,
98 				     unsigned long attrs)
99 {
100 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 	struct talitos_private *priv = dev_get_drvdata(dev);
102 	bool is_sec1 = has_ftr_sec1(priv);
103 
104 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
105 }
106 
107 static void map_single_talitos_ptr(struct device *dev,
108 				   struct talitos_ptr *ptr,
109 				   unsigned int len, void *data,
110 				   enum dma_data_direction dir)
111 {
112 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113 }
114 
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116 					  struct talitos_ptr *ptr,
117 					  unsigned int len, void *data,
118 					  enum dma_data_direction dir)
119 {
120 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
121 				 DMA_ATTR_SKIP_CPU_SYNC);
122 }
123 
124 /*
125  * unmap bus single (contiguous) h/w descriptor pointer
126  */
127 static void unmap_single_talitos_ptr(struct device *dev,
128 				     struct talitos_ptr *ptr,
129 				     enum dma_data_direction dir)
130 {
131 	struct talitos_private *priv = dev_get_drvdata(dev);
132 	bool is_sec1 = has_ftr_sec1(priv);
133 
134 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 			 from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137 
138 static int reset_channel(struct device *dev, int ch)
139 {
140 	struct talitos_private *priv = dev_get_drvdata(dev);
141 	unsigned int timeout = TALITOS_TIMEOUT;
142 	bool is_sec1 = has_ftr_sec1(priv);
143 
144 	if (is_sec1) {
145 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 			  TALITOS1_CCCR_LO_RESET);
147 
148 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 			TALITOS1_CCCR_LO_RESET) && --timeout)
150 			cpu_relax();
151 	} else {
152 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 			  TALITOS2_CCCR_RESET);
154 
155 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 			TALITOS2_CCCR_RESET) && --timeout)
157 			cpu_relax();
158 	}
159 
160 	if (timeout == 0) {
161 		dev_err(dev, "failed to reset channel %d\n", ch);
162 		return -EIO;
163 	}
164 
165 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
166 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 	/* enable chaining descriptors */
169 	if (is_sec1)
170 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 			  TALITOS_CCCR_LO_NE);
172 
173 	/* and ICCR writeback, if available */
174 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 		          TALITOS_CCCR_LO_IWSE);
177 
178 	return 0;
179 }
180 
181 static int reset_device(struct device *dev)
182 {
183 	struct talitos_private *priv = dev_get_drvdata(dev);
184 	unsigned int timeout = TALITOS_TIMEOUT;
185 	bool is_sec1 = has_ftr_sec1(priv);
186 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
187 
188 	setbits32(priv->reg + TALITOS_MCR, mcr);
189 
190 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
191 	       && --timeout)
192 		cpu_relax();
193 
194 	if (priv->irq[1]) {
195 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 		setbits32(priv->reg + TALITOS_MCR, mcr);
197 	}
198 
199 	if (timeout == 0) {
200 		dev_err(dev, "failed to reset device\n");
201 		return -EIO;
202 	}
203 
204 	return 0;
205 }
206 
207 /*
208  * Reset and initialize the device
209  */
210 static int init_device(struct device *dev)
211 {
212 	struct talitos_private *priv = dev_get_drvdata(dev);
213 	int ch, err;
214 	bool is_sec1 = has_ftr_sec1(priv);
215 
216 	/*
217 	 * Master reset
218 	 * errata documentation: warning: certain SEC interrupts
219 	 * are not fully cleared by writing the MCR:SWR bit,
220 	 * set bit twice to completely reset
221 	 */
222 	err = reset_device(dev);
223 	if (err)
224 		return err;
225 
226 	err = reset_device(dev);
227 	if (err)
228 		return err;
229 
230 	/* reset channels */
231 	for (ch = 0; ch < priv->num_channels; ch++) {
232 		err = reset_channel(dev, ch);
233 		if (err)
234 			return err;
235 	}
236 
237 	/* enable channel done and error interrupts */
238 	if (is_sec1) {
239 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 		/* disable parity error check in DEU (erroneous? test vect.) */
242 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
243 	} else {
244 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
246 	}
247 
248 	/* disable integrity check error interrupts (use writeback instead) */
249 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 		          TALITOS_MDEUICR_LO_ICE);
252 
253 	return 0;
254 }
255 
256 /**
257  * talitos_submit - submits a descriptor to the device for processing
258  * @dev:	the SEC device to be used
259  * @ch:		the SEC device channel to be used
260  * @desc:	the descriptor to be processed by the device
261  * @callback:	whom to call when processing is complete
262  * @context:	a handle for use by caller (optional)
263  *
264  * desc must contain valid dma-mapped (bus physical) address pointers.
265  * callback must check err and feedback in descriptor header
266  * for device processing status.
267  */
268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 			  void (*callback)(struct device *dev,
270 					   struct talitos_desc *desc,
271 					   void *context, int error),
272 			  void *context)
273 {
274 	struct talitos_private *priv = dev_get_drvdata(dev);
275 	struct talitos_request *request;
276 	unsigned long flags;
277 	int head;
278 	bool is_sec1 = has_ftr_sec1(priv);
279 
280 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
281 
282 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 		/* h/w fifo is full */
284 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
285 		return -EAGAIN;
286 	}
287 
288 	head = priv->chan[ch].head;
289 	request = &priv->chan[ch].fifo[head];
290 
291 	/* map descriptor and save caller data */
292 	if (is_sec1) {
293 		desc->hdr1 = desc->hdr;
294 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
295 						   TALITOS_DESC_SIZE,
296 						   DMA_BIDIRECTIONAL);
297 	} else {
298 		request->dma_desc = dma_map_single(dev, desc,
299 						   TALITOS_DESC_SIZE,
300 						   DMA_BIDIRECTIONAL);
301 	}
302 	request->callback = callback;
303 	request->context = context;
304 
305 	/* increment fifo head */
306 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
307 
308 	smp_wmb();
309 	request->desc = desc;
310 
311 	/* GO! */
312 	wmb();
313 	out_be32(priv->chan[ch].reg + TALITOS_FF,
314 		 upper_32_bits(request->dma_desc));
315 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 		 lower_32_bits(request->dma_desc));
317 
318 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
319 
320 	return -EINPROGRESS;
321 }
322 
323 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
324 {
325 	struct talitos_edesc *edesc;
326 
327 	if (!is_sec1)
328 		return request->desc->hdr;
329 
330 	if (!request->desc->next_desc)
331 		return request->desc->hdr1;
332 
333 	edesc = container_of(request->desc, struct talitos_edesc, desc);
334 
335 	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
336 }
337 
338 /*
339  * process what was done, notify callback of error if not
340  */
341 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
342 {
343 	struct talitos_private *priv = dev_get_drvdata(dev);
344 	struct talitos_request *request, saved_req;
345 	unsigned long flags;
346 	int tail, status;
347 	bool is_sec1 = has_ftr_sec1(priv);
348 
349 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
350 
351 	tail = priv->chan[ch].tail;
352 	while (priv->chan[ch].fifo[tail].desc) {
353 		__be32 hdr;
354 
355 		request = &priv->chan[ch].fifo[tail];
356 
357 		/* descriptors with their done bits set don't get the error */
358 		rmb();
359 		hdr = get_request_hdr(request, is_sec1);
360 
361 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
362 			status = 0;
363 		else
364 			if (!error)
365 				break;
366 			else
367 				status = error;
368 
369 		dma_unmap_single(dev, request->dma_desc,
370 				 TALITOS_DESC_SIZE,
371 				 DMA_BIDIRECTIONAL);
372 
373 		/* copy entries so we can call callback outside lock */
374 		saved_req.desc = request->desc;
375 		saved_req.callback = request->callback;
376 		saved_req.context = request->context;
377 
378 		/* release request entry in fifo */
379 		smp_wmb();
380 		request->desc = NULL;
381 
382 		/* increment fifo tail */
383 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
384 
385 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
386 
387 		atomic_dec(&priv->chan[ch].submit_count);
388 
389 		saved_req.callback(dev, saved_req.desc, saved_req.context,
390 				   status);
391 		/* channel may resume processing in single desc error case */
392 		if (error && !reset_ch && status == error)
393 			return;
394 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
395 		tail = priv->chan[ch].tail;
396 	}
397 
398 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
399 }
400 
401 /*
402  * process completed requests for channels that have done status
403  */
404 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
405 static void talitos1_done_##name(unsigned long data)			\
406 {									\
407 	struct device *dev = (struct device *)data;			\
408 	struct talitos_private *priv = dev_get_drvdata(dev);		\
409 	unsigned long flags;						\
410 									\
411 	if (ch_done_mask & 0x10000000)					\
412 		flush_channel(dev, 0, 0, 0);			\
413 	if (ch_done_mask & 0x40000000)					\
414 		flush_channel(dev, 1, 0, 0);			\
415 	if (ch_done_mask & 0x00010000)					\
416 		flush_channel(dev, 2, 0, 0);			\
417 	if (ch_done_mask & 0x00040000)					\
418 		flush_channel(dev, 3, 0, 0);			\
419 									\
420 	/* At this point, all completed channels have been processed */	\
421 	/* Unmask done interrupts for channels completed later on. */	\
422 	spin_lock_irqsave(&priv->reg_lock, flags);			\
423 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
424 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
425 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
426 }
427 
428 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
429 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
430 
431 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
432 static void talitos2_done_##name(unsigned long data)			\
433 {									\
434 	struct device *dev = (struct device *)data;			\
435 	struct talitos_private *priv = dev_get_drvdata(dev);		\
436 	unsigned long flags;						\
437 									\
438 	if (ch_done_mask & 1)						\
439 		flush_channel(dev, 0, 0, 0);				\
440 	if (ch_done_mask & (1 << 2))					\
441 		flush_channel(dev, 1, 0, 0);				\
442 	if (ch_done_mask & (1 << 4))					\
443 		flush_channel(dev, 2, 0, 0);				\
444 	if (ch_done_mask & (1 << 6))					\
445 		flush_channel(dev, 3, 0, 0);				\
446 									\
447 	/* At this point, all completed channels have been processed */	\
448 	/* Unmask done interrupts for channels completed later on. */	\
449 	spin_lock_irqsave(&priv->reg_lock, flags);			\
450 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
451 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
452 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
453 }
454 
455 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
456 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
457 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
458 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
459 
460 /*
461  * locate current (offending) descriptor
462  */
463 static u32 current_desc_hdr(struct device *dev, int ch)
464 {
465 	struct talitos_private *priv = dev_get_drvdata(dev);
466 	int tail, iter;
467 	dma_addr_t cur_desc;
468 
469 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
470 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
471 
472 	if (!cur_desc) {
473 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
474 		return 0;
475 	}
476 
477 	tail = priv->chan[ch].tail;
478 
479 	iter = tail;
480 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
481 	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
482 		iter = (iter + 1) & (priv->fifo_len - 1);
483 		if (iter == tail) {
484 			dev_err(dev, "couldn't locate current descriptor\n");
485 			return 0;
486 		}
487 	}
488 
489 	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
490 		struct talitos_edesc *edesc;
491 
492 		edesc = container_of(priv->chan[ch].fifo[iter].desc,
493 				     struct talitos_edesc, desc);
494 		return ((struct talitos_desc *)
495 			(edesc->buf + edesc->dma_len))->hdr;
496 	}
497 
498 	return priv->chan[ch].fifo[iter].desc->hdr;
499 }
500 
501 /*
502  * user diagnostics; report root cause of error based on execution unit status
503  */
504 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
505 {
506 	struct talitos_private *priv = dev_get_drvdata(dev);
507 	int i;
508 
509 	if (!desc_hdr)
510 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
511 
512 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
513 	case DESC_HDR_SEL0_AFEU:
514 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
515 			in_be32(priv->reg_afeu + TALITOS_EUISR),
516 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
517 		break;
518 	case DESC_HDR_SEL0_DEU:
519 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
520 			in_be32(priv->reg_deu + TALITOS_EUISR),
521 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
522 		break;
523 	case DESC_HDR_SEL0_MDEUA:
524 	case DESC_HDR_SEL0_MDEUB:
525 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
526 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
527 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
528 		break;
529 	case DESC_HDR_SEL0_RNG:
530 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
531 			in_be32(priv->reg_rngu + TALITOS_ISR),
532 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
533 		break;
534 	case DESC_HDR_SEL0_PKEU:
535 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
536 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
537 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
538 		break;
539 	case DESC_HDR_SEL0_AESU:
540 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
541 			in_be32(priv->reg_aesu + TALITOS_EUISR),
542 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
543 		break;
544 	case DESC_HDR_SEL0_CRCU:
545 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
546 			in_be32(priv->reg_crcu + TALITOS_EUISR),
547 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
548 		break;
549 	case DESC_HDR_SEL0_KEU:
550 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
551 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
552 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
553 		break;
554 	}
555 
556 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
557 	case DESC_HDR_SEL1_MDEUA:
558 	case DESC_HDR_SEL1_MDEUB:
559 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
560 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
561 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
562 		break;
563 	case DESC_HDR_SEL1_CRCU:
564 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
565 			in_be32(priv->reg_crcu + TALITOS_EUISR),
566 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
567 		break;
568 	}
569 
570 	for (i = 0; i < 8; i++)
571 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
572 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
573 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
574 }
575 
576 /*
577  * recover from error interrupts
578  */
579 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
580 {
581 	struct talitos_private *priv = dev_get_drvdata(dev);
582 	unsigned int timeout = TALITOS_TIMEOUT;
583 	int ch, error, reset_dev = 0;
584 	u32 v_lo;
585 	bool is_sec1 = has_ftr_sec1(priv);
586 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
587 
588 	for (ch = 0; ch < priv->num_channels; ch++) {
589 		/* skip channels without errors */
590 		if (is_sec1) {
591 			/* bits 29, 31, 17, 19 */
592 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
593 				continue;
594 		} else {
595 			if (!(isr & (1 << (ch * 2 + 1))))
596 				continue;
597 		}
598 
599 		error = -EINVAL;
600 
601 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
602 
603 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
604 			dev_err(dev, "double fetch fifo overflow error\n");
605 			error = -EAGAIN;
606 			reset_ch = 1;
607 		}
608 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
609 			/* h/w dropped descriptor */
610 			dev_err(dev, "single fetch fifo overflow error\n");
611 			error = -EAGAIN;
612 		}
613 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
614 			dev_err(dev, "master data transfer error\n");
615 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
616 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
617 					     : "s/g data length zero error\n");
618 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
619 			dev_err(dev, is_sec1 ? "parity error\n"
620 					     : "fetch pointer zero error\n");
621 		if (v_lo & TALITOS_CCPSR_LO_IDH)
622 			dev_err(dev, "illegal descriptor header error\n");
623 		if (v_lo & TALITOS_CCPSR_LO_IEU)
624 			dev_err(dev, is_sec1 ? "static assignment error\n"
625 					     : "invalid exec unit error\n");
626 		if (v_lo & TALITOS_CCPSR_LO_EU)
627 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
628 		if (!is_sec1) {
629 			if (v_lo & TALITOS_CCPSR_LO_GB)
630 				dev_err(dev, "gather boundary error\n");
631 			if (v_lo & TALITOS_CCPSR_LO_GRL)
632 				dev_err(dev, "gather return/length error\n");
633 			if (v_lo & TALITOS_CCPSR_LO_SB)
634 				dev_err(dev, "scatter boundary error\n");
635 			if (v_lo & TALITOS_CCPSR_LO_SRL)
636 				dev_err(dev, "scatter return/length error\n");
637 		}
638 
639 		flush_channel(dev, ch, error, reset_ch);
640 
641 		if (reset_ch) {
642 			reset_channel(dev, ch);
643 		} else {
644 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
645 				  TALITOS2_CCCR_CONT);
646 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
647 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
648 			       TALITOS2_CCCR_CONT) && --timeout)
649 				cpu_relax();
650 			if (timeout == 0) {
651 				dev_err(dev, "failed to restart channel %d\n",
652 					ch);
653 				reset_dev = 1;
654 			}
655 		}
656 	}
657 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
658 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
659 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
660 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
661 				isr, isr_lo);
662 		else
663 			dev_err(dev, "done overflow, internal time out, or "
664 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
665 
666 		/* purge request queues */
667 		for (ch = 0; ch < priv->num_channels; ch++)
668 			flush_channel(dev, ch, -EIO, 1);
669 
670 		/* reset and reinitialize the device */
671 		init_device(dev);
672 	}
673 }
674 
675 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
676 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
677 {									       \
678 	struct device *dev = data;					       \
679 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
680 	u32 isr, isr_lo;						       \
681 	unsigned long flags;						       \
682 									       \
683 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
684 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
685 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
686 	/* Acknowledge interrupt */					       \
687 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
688 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
689 									       \
690 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
691 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
692 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
693 	}								       \
694 	else {								       \
695 		if (likely(isr & ch_done_mask)) {			       \
696 			/* mask further done interrupts. */		       \
697 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
698 			/* done_task will unmask done interrupts at exit */    \
699 			tasklet_schedule(&priv->done_task[tlet]);	       \
700 		}							       \
701 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
702 	}								       \
703 									       \
704 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
705 								IRQ_NONE;      \
706 }
707 
708 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
709 
710 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
711 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
712 {									       \
713 	struct device *dev = data;					       \
714 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
715 	u32 isr, isr_lo;						       \
716 	unsigned long flags;						       \
717 									       \
718 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
719 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
720 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
721 	/* Acknowledge interrupt */					       \
722 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
723 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
724 									       \
725 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
726 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
727 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
728 	}								       \
729 	else {								       \
730 		if (likely(isr & ch_done_mask)) {			       \
731 			/* mask further done interrupts. */		       \
732 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
733 			/* done_task will unmask done interrupts at exit */    \
734 			tasklet_schedule(&priv->done_task[tlet]);	       \
735 		}							       \
736 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
737 	}								       \
738 									       \
739 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
740 								IRQ_NONE;      \
741 }
742 
743 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
744 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
745 		       0)
746 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
747 		       1)
748 
749 /*
750  * hwrng
751  */
752 static int talitos_rng_data_present(struct hwrng *rng, int wait)
753 {
754 	struct device *dev = (struct device *)rng->priv;
755 	struct talitos_private *priv = dev_get_drvdata(dev);
756 	u32 ofl;
757 	int i;
758 
759 	for (i = 0; i < 20; i++) {
760 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
761 		      TALITOS_RNGUSR_LO_OFL;
762 		if (ofl || !wait)
763 			break;
764 		udelay(10);
765 	}
766 
767 	return !!ofl;
768 }
769 
770 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
771 {
772 	struct device *dev = (struct device *)rng->priv;
773 	struct talitos_private *priv = dev_get_drvdata(dev);
774 
775 	/* rng fifo requires 64-bit accesses */
776 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
777 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
778 
779 	return sizeof(u32);
780 }
781 
782 static int talitos_rng_init(struct hwrng *rng)
783 {
784 	struct device *dev = (struct device *)rng->priv;
785 	struct talitos_private *priv = dev_get_drvdata(dev);
786 	unsigned int timeout = TALITOS_TIMEOUT;
787 
788 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
789 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
790 		 & TALITOS_RNGUSR_LO_RD)
791 	       && --timeout)
792 		cpu_relax();
793 	if (timeout == 0) {
794 		dev_err(dev, "failed to reset rng hw\n");
795 		return -ENODEV;
796 	}
797 
798 	/* start generating */
799 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
800 
801 	return 0;
802 }
803 
804 static int talitos_register_rng(struct device *dev)
805 {
806 	struct talitos_private *priv = dev_get_drvdata(dev);
807 	int err;
808 
809 	priv->rng.name		= dev_driver_string(dev),
810 	priv->rng.init		= talitos_rng_init,
811 	priv->rng.data_present	= talitos_rng_data_present,
812 	priv->rng.data_read	= talitos_rng_data_read,
813 	priv->rng.priv		= (unsigned long)dev;
814 
815 	err = hwrng_register(&priv->rng);
816 	if (!err)
817 		priv->rng_registered = true;
818 
819 	return err;
820 }
821 
822 static void talitos_unregister_rng(struct device *dev)
823 {
824 	struct talitos_private *priv = dev_get_drvdata(dev);
825 
826 	if (!priv->rng_registered)
827 		return;
828 
829 	hwrng_unregister(&priv->rng);
830 	priv->rng_registered = false;
831 }
832 
833 /*
834  * crypto alg
835  */
836 #define TALITOS_CRA_PRIORITY		3000
837 /*
838  * Defines a priority for doing AEAD with descriptors type
839  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
840  */
841 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
842 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
843 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
844 #else
845 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
846 #endif
847 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
848 
849 struct talitos_ctx {
850 	struct device *dev;
851 	int ch;
852 	__be32 desc_hdr_template;
853 	u8 key[TALITOS_MAX_KEY_SIZE];
854 	u8 iv[TALITOS_MAX_IV_LENGTH];
855 	dma_addr_t dma_key;
856 	unsigned int keylen;
857 	unsigned int enckeylen;
858 	unsigned int authkeylen;
859 };
860 
861 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
862 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
863 
864 struct talitos_ahash_req_ctx {
865 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
866 	unsigned int hw_context_size;
867 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
868 	int buf_idx;
869 	unsigned int swinit;
870 	unsigned int first;
871 	unsigned int last;
872 	unsigned int to_hash_later;
873 	unsigned int nbuf;
874 	struct scatterlist bufsl[2];
875 	struct scatterlist *psrc;
876 };
877 
878 struct talitos_export_state {
879 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
880 	u8 buf[HASH_MAX_BLOCK_SIZE];
881 	unsigned int swinit;
882 	unsigned int first;
883 	unsigned int last;
884 	unsigned int to_hash_later;
885 	unsigned int nbuf;
886 };
887 
888 static int aead_setkey(struct crypto_aead *authenc,
889 		       const u8 *key, unsigned int keylen)
890 {
891 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
892 	struct device *dev = ctx->dev;
893 	struct crypto_authenc_keys keys;
894 
895 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
896 		goto badkey;
897 
898 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
899 		goto badkey;
900 
901 	if (ctx->keylen)
902 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
903 
904 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
905 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
906 
907 	ctx->keylen = keys.authkeylen + keys.enckeylen;
908 	ctx->enckeylen = keys.enckeylen;
909 	ctx->authkeylen = keys.authkeylen;
910 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
911 				      DMA_TO_DEVICE);
912 
913 	memzero_explicit(&keys, sizeof(keys));
914 	return 0;
915 
916 badkey:
917 	memzero_explicit(&keys, sizeof(keys));
918 	return -EINVAL;
919 }
920 
921 static int aead_des3_setkey(struct crypto_aead *authenc,
922 			    const u8 *key, unsigned int keylen)
923 {
924 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
925 	struct device *dev = ctx->dev;
926 	struct crypto_authenc_keys keys;
927 	int err;
928 
929 	err = crypto_authenc_extractkeys(&keys, key, keylen);
930 	if (unlikely(err))
931 		goto out;
932 
933 	err = -EINVAL;
934 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
935 		goto out;
936 
937 	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
938 	if (err)
939 		goto out;
940 
941 	if (ctx->keylen)
942 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
943 
944 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
945 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
946 
947 	ctx->keylen = keys.authkeylen + keys.enckeylen;
948 	ctx->enckeylen = keys.enckeylen;
949 	ctx->authkeylen = keys.authkeylen;
950 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
951 				      DMA_TO_DEVICE);
952 
953 out:
954 	memzero_explicit(&keys, sizeof(keys));
955 	return err;
956 }
957 
958 static void talitos_sg_unmap(struct device *dev,
959 			     struct talitos_edesc *edesc,
960 			     struct scatterlist *src,
961 			     struct scatterlist *dst,
962 			     unsigned int len, unsigned int offset)
963 {
964 	struct talitos_private *priv = dev_get_drvdata(dev);
965 	bool is_sec1 = has_ftr_sec1(priv);
966 	unsigned int src_nents = edesc->src_nents ? : 1;
967 	unsigned int dst_nents = edesc->dst_nents ? : 1;
968 
969 	if (is_sec1 && dst && dst_nents > 1) {
970 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
971 					   len, DMA_FROM_DEVICE);
972 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
973 				     offset);
974 	}
975 	if (src != dst) {
976 		if (src_nents == 1 || !is_sec1)
977 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
978 
979 		if (dst && (dst_nents == 1 || !is_sec1))
980 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
981 	} else if (src_nents == 1 || !is_sec1) {
982 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
983 	}
984 }
985 
986 static void ipsec_esp_unmap(struct device *dev,
987 			    struct talitos_edesc *edesc,
988 			    struct aead_request *areq, bool encrypt)
989 {
990 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
991 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
992 	unsigned int ivsize = crypto_aead_ivsize(aead);
993 	unsigned int authsize = crypto_aead_authsize(aead);
994 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
995 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
996 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
997 
998 	if (is_ipsec_esp)
999 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1000 					 DMA_FROM_DEVICE);
1001 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1002 
1003 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1004 			 cryptlen + authsize, areq->assoclen);
1005 
1006 	if (edesc->dma_len)
1007 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1008 				 DMA_BIDIRECTIONAL);
1009 
1010 	if (!is_ipsec_esp) {
1011 		unsigned int dst_nents = edesc->dst_nents ? : 1;
1012 
1013 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1014 				   areq->assoclen + cryptlen - ivsize);
1015 	}
1016 }
1017 
1018 /*
1019  * ipsec_esp descriptor callbacks
1020  */
1021 static void ipsec_esp_encrypt_done(struct device *dev,
1022 				   struct talitos_desc *desc, void *context,
1023 				   int err)
1024 {
1025 	struct aead_request *areq = context;
1026 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1027 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1028 	struct talitos_edesc *edesc;
1029 
1030 	edesc = container_of(desc, struct talitos_edesc, desc);
1031 
1032 	ipsec_esp_unmap(dev, edesc, areq, true);
1033 
1034 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1035 
1036 	kfree(edesc);
1037 
1038 	aead_request_complete(areq, err);
1039 }
1040 
1041 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1042 					  struct talitos_desc *desc,
1043 					  void *context, int err)
1044 {
1045 	struct aead_request *req = context;
1046 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1047 	unsigned int authsize = crypto_aead_authsize(authenc);
1048 	struct talitos_edesc *edesc;
1049 	char *oicv, *icv;
1050 
1051 	edesc = container_of(desc, struct talitos_edesc, desc);
1052 
1053 	ipsec_esp_unmap(dev, edesc, req, false);
1054 
1055 	if (!err) {
1056 		/* auth check */
1057 		oicv = edesc->buf + edesc->dma_len;
1058 		icv = oicv - authsize;
1059 
1060 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1061 	}
1062 
1063 	kfree(edesc);
1064 
1065 	aead_request_complete(req, err);
1066 }
1067 
1068 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1069 					  struct talitos_desc *desc,
1070 					  void *context, int err)
1071 {
1072 	struct aead_request *req = context;
1073 	struct talitos_edesc *edesc;
1074 
1075 	edesc = container_of(desc, struct talitos_edesc, desc);
1076 
1077 	ipsec_esp_unmap(dev, edesc, req, false);
1078 
1079 	/* check ICV auth status */
1080 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1081 		     DESC_HDR_LO_ICCR1_PASS))
1082 		err = -EBADMSG;
1083 
1084 	kfree(edesc);
1085 
1086 	aead_request_complete(req, err);
1087 }
1088 
1089 /*
1090  * convert scatterlist to SEC h/w link table format
1091  * stop at cryptlen bytes
1092  */
1093 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1094 				 unsigned int offset, int datalen, int elen,
1095 				 struct talitos_ptr *link_tbl_ptr)
1096 {
1097 	int n_sg = elen ? sg_count + 1 : sg_count;
1098 	int count = 0;
1099 	int cryptlen = datalen + elen;
1100 
1101 	while (cryptlen && sg && n_sg--) {
1102 		unsigned int len = sg_dma_len(sg);
1103 
1104 		if (offset >= len) {
1105 			offset -= len;
1106 			goto next;
1107 		}
1108 
1109 		len -= offset;
1110 
1111 		if (len > cryptlen)
1112 			len = cryptlen;
1113 
1114 		if (datalen > 0 && len > datalen) {
1115 			to_talitos_ptr(link_tbl_ptr + count,
1116 				       sg_dma_address(sg) + offset, datalen, 0);
1117 			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1118 			count++;
1119 			len -= datalen;
1120 			offset += datalen;
1121 		}
1122 		to_talitos_ptr(link_tbl_ptr + count,
1123 			       sg_dma_address(sg) + offset, len, 0);
1124 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1125 		count++;
1126 		cryptlen -= len;
1127 		datalen -= len;
1128 		offset = 0;
1129 
1130 next:
1131 		sg = sg_next(sg);
1132 	}
1133 
1134 	/* tag end of link table */
1135 	if (count > 0)
1136 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1137 				       DESC_PTR_LNKTBL_RET, 0);
1138 
1139 	return count;
1140 }
1141 
1142 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1143 			      unsigned int len, struct talitos_edesc *edesc,
1144 			      struct talitos_ptr *ptr, int sg_count,
1145 			      unsigned int offset, int tbl_off, int elen,
1146 			      bool force)
1147 {
1148 	struct talitos_private *priv = dev_get_drvdata(dev);
1149 	bool is_sec1 = has_ftr_sec1(priv);
1150 
1151 	if (!src) {
1152 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1153 		return 1;
1154 	}
1155 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1156 	if (sg_count == 1 && !force) {
1157 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1158 		return sg_count;
1159 	}
1160 	if (is_sec1) {
1161 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1162 		return sg_count;
1163 	}
1164 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1165 					 &edesc->link_tbl[tbl_off]);
1166 	if (sg_count == 1 && !force) {
1167 		/* Only one segment now, so no link tbl needed*/
1168 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1169 		return sg_count;
1170 	}
1171 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1172 			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1173 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1174 
1175 	return sg_count;
1176 }
1177 
1178 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1179 			  unsigned int len, struct talitos_edesc *edesc,
1180 			  struct talitos_ptr *ptr, int sg_count,
1181 			  unsigned int offset, int tbl_off)
1182 {
1183 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1184 				  tbl_off, 0, false);
1185 }
1186 
1187 /*
1188  * fill in and submit ipsec_esp descriptor
1189  */
1190 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1191 		     bool encrypt,
1192 		     void (*callback)(struct device *dev,
1193 				      struct talitos_desc *desc,
1194 				      void *context, int error))
1195 {
1196 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1197 	unsigned int authsize = crypto_aead_authsize(aead);
1198 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1199 	struct device *dev = ctx->dev;
1200 	struct talitos_desc *desc = &edesc->desc;
1201 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1202 	unsigned int ivsize = crypto_aead_ivsize(aead);
1203 	int tbl_off = 0;
1204 	int sg_count, ret;
1205 	int elen = 0;
1206 	bool sync_needed = false;
1207 	struct talitos_private *priv = dev_get_drvdata(dev);
1208 	bool is_sec1 = has_ftr_sec1(priv);
1209 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1210 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1211 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1212 	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1213 
1214 	/* hmac key */
1215 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1216 
1217 	sg_count = edesc->src_nents ?: 1;
1218 	if (is_sec1 && sg_count > 1)
1219 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1220 				  areq->assoclen + cryptlen);
1221 	else
1222 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1223 				      (areq->src == areq->dst) ?
1224 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1225 
1226 	/* hmac data */
1227 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1228 			     &desc->ptr[1], sg_count, 0, tbl_off);
1229 
1230 	if (ret > 1) {
1231 		tbl_off += ret;
1232 		sync_needed = true;
1233 	}
1234 
1235 	/* cipher iv */
1236 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1237 
1238 	/* cipher key */
1239 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1240 		       ctx->enckeylen, is_sec1);
1241 
1242 	/*
1243 	 * cipher in
1244 	 * map and adjust cipher len to aead request cryptlen.
1245 	 * extent is bytes of HMAC postpended to ciphertext,
1246 	 * typically 12 for ipsec
1247 	 */
1248 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1249 		elen = authsize;
1250 
1251 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1252 				 sg_count, areq->assoclen, tbl_off, elen,
1253 				 false);
1254 
1255 	if (ret > 1) {
1256 		tbl_off += ret;
1257 		sync_needed = true;
1258 	}
1259 
1260 	/* cipher out */
1261 	if (areq->src != areq->dst) {
1262 		sg_count = edesc->dst_nents ? : 1;
1263 		if (!is_sec1 || sg_count == 1)
1264 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1265 	}
1266 
1267 	if (is_ipsec_esp && encrypt)
1268 		elen = authsize;
1269 	else
1270 		elen = 0;
1271 	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1272 				 sg_count, areq->assoclen, tbl_off, elen,
1273 				 is_ipsec_esp && !encrypt);
1274 	tbl_off += ret;
1275 
1276 	if (!encrypt && is_ipsec_esp) {
1277 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1278 
1279 		/* Add an entry to the link table for ICV data */
1280 		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1281 		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1282 
1283 		/* icv data follows link tables */
1284 		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1285 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1286 		sync_needed = true;
1287 	} else if (!encrypt) {
1288 		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1289 		sync_needed = true;
1290 	} else if (!is_ipsec_esp) {
1291 		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1292 			       sg_count, areq->assoclen + cryptlen, tbl_off);
1293 	}
1294 
1295 	/* iv out */
1296 	if (is_ipsec_esp)
1297 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1298 				       DMA_FROM_DEVICE);
1299 
1300 	if (sync_needed)
1301 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1302 					   edesc->dma_len,
1303 					   DMA_BIDIRECTIONAL);
1304 
1305 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1306 	if (ret != -EINPROGRESS) {
1307 		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1308 		kfree(edesc);
1309 	}
1310 	return ret;
1311 }
1312 
1313 /*
1314  * allocate and map the extended descriptor
1315  */
1316 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1317 						 struct scatterlist *src,
1318 						 struct scatterlist *dst,
1319 						 u8 *iv,
1320 						 unsigned int assoclen,
1321 						 unsigned int cryptlen,
1322 						 unsigned int authsize,
1323 						 unsigned int ivsize,
1324 						 int icv_stashing,
1325 						 u32 cryptoflags,
1326 						 bool encrypt)
1327 {
1328 	struct talitos_edesc *edesc;
1329 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1330 	dma_addr_t iv_dma = 0;
1331 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1332 		      GFP_ATOMIC;
1333 	struct talitos_private *priv = dev_get_drvdata(dev);
1334 	bool is_sec1 = has_ftr_sec1(priv);
1335 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1336 
1337 	if (cryptlen + authsize > max_len) {
1338 		dev_err(dev, "length exceeds h/w max limit\n");
1339 		return ERR_PTR(-EINVAL);
1340 	}
1341 
1342 	if (!dst || dst == src) {
1343 		src_len = assoclen + cryptlen + authsize;
1344 		src_nents = sg_nents_for_len(src, src_len);
1345 		if (src_nents < 0) {
1346 			dev_err(dev, "Invalid number of src SG.\n");
1347 			return ERR_PTR(-EINVAL);
1348 		}
1349 		src_nents = (src_nents == 1) ? 0 : src_nents;
1350 		dst_nents = dst ? src_nents : 0;
1351 		dst_len = 0;
1352 	} else { /* dst && dst != src*/
1353 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1354 		src_nents = sg_nents_for_len(src, src_len);
1355 		if (src_nents < 0) {
1356 			dev_err(dev, "Invalid number of src SG.\n");
1357 			return ERR_PTR(-EINVAL);
1358 		}
1359 		src_nents = (src_nents == 1) ? 0 : src_nents;
1360 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1361 		dst_nents = sg_nents_for_len(dst, dst_len);
1362 		if (dst_nents < 0) {
1363 			dev_err(dev, "Invalid number of dst SG.\n");
1364 			return ERR_PTR(-EINVAL);
1365 		}
1366 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1367 	}
1368 
1369 	/*
1370 	 * allocate space for base edesc plus the link tables,
1371 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1372 	 * and space for two sets of ICVs (stashed and generated)
1373 	 */
1374 	alloc_len = sizeof(struct talitos_edesc);
1375 	if (src_nents || dst_nents || !encrypt) {
1376 		if (is_sec1)
1377 			dma_len = (src_nents ? src_len : 0) +
1378 				  (dst_nents ? dst_len : 0) + authsize;
1379 		else
1380 			dma_len = (src_nents + dst_nents + 2) *
1381 				  sizeof(struct talitos_ptr) + authsize;
1382 		alloc_len += dma_len;
1383 	} else {
1384 		dma_len = 0;
1385 	}
1386 	alloc_len += icv_stashing ? authsize : 0;
1387 
1388 	/* if its a ahash, add space for a second desc next to the first one */
1389 	if (is_sec1 && !dst)
1390 		alloc_len += sizeof(struct talitos_desc);
1391 	alloc_len += ivsize;
1392 
1393 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1394 	if (!edesc)
1395 		return ERR_PTR(-ENOMEM);
1396 	if (ivsize) {
1397 		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1398 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1399 	}
1400 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1401 
1402 	edesc->src_nents = src_nents;
1403 	edesc->dst_nents = dst_nents;
1404 	edesc->iv_dma = iv_dma;
1405 	edesc->dma_len = dma_len;
1406 	if (dma_len)
1407 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1408 						     edesc->dma_len,
1409 						     DMA_BIDIRECTIONAL);
1410 
1411 	return edesc;
1412 }
1413 
1414 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1415 					      int icv_stashing, bool encrypt)
1416 {
1417 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1418 	unsigned int authsize = crypto_aead_authsize(authenc);
1419 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1420 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1421 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1422 
1423 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1424 				   iv, areq->assoclen, cryptlen,
1425 				   authsize, ivsize, icv_stashing,
1426 				   areq->base.flags, encrypt);
1427 }
1428 
1429 static int aead_encrypt(struct aead_request *req)
1430 {
1431 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1432 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1433 	struct talitos_edesc *edesc;
1434 
1435 	/* allocate extended descriptor */
1436 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1437 	if (IS_ERR(edesc))
1438 		return PTR_ERR(edesc);
1439 
1440 	/* set encrypt */
1441 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1442 
1443 	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1444 }
1445 
1446 static int aead_decrypt(struct aead_request *req)
1447 {
1448 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1449 	unsigned int authsize = crypto_aead_authsize(authenc);
1450 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1451 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1452 	struct talitos_edesc *edesc;
1453 	void *icvdata;
1454 
1455 	/* allocate extended descriptor */
1456 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1457 	if (IS_ERR(edesc))
1458 		return PTR_ERR(edesc);
1459 
1460 	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1461 	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1462 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1463 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1464 
1465 		/* decrypt and check the ICV */
1466 		edesc->desc.hdr = ctx->desc_hdr_template |
1467 				  DESC_HDR_DIR_INBOUND |
1468 				  DESC_HDR_MODE1_MDEU_CICV;
1469 
1470 		/* reset integrity check result bits */
1471 
1472 		return ipsec_esp(edesc, req, false,
1473 				 ipsec_esp_decrypt_hwauth_done);
1474 	}
1475 
1476 	/* Have to check the ICV with software */
1477 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1478 
1479 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1480 	icvdata = edesc->buf + edesc->dma_len;
1481 
1482 	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1483 			   req->assoclen + req->cryptlen - authsize);
1484 
1485 	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1486 }
1487 
1488 static int skcipher_setkey(struct crypto_skcipher *cipher,
1489 			     const u8 *key, unsigned int keylen)
1490 {
1491 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1492 	struct device *dev = ctx->dev;
1493 
1494 	if (ctx->keylen)
1495 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1496 
1497 	memcpy(&ctx->key, key, keylen);
1498 	ctx->keylen = keylen;
1499 
1500 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1501 
1502 	return 0;
1503 }
1504 
1505 static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1506 				 const u8 *key, unsigned int keylen)
1507 {
1508 	return verify_skcipher_des_key(cipher, key) ?:
1509 	       skcipher_setkey(cipher, key, keylen);
1510 }
1511 
1512 static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1513 				  const u8 *key, unsigned int keylen)
1514 {
1515 	return verify_skcipher_des3_key(cipher, key) ?:
1516 	       skcipher_setkey(cipher, key, keylen);
1517 }
1518 
1519 static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1520 				  const u8 *key, unsigned int keylen)
1521 {
1522 	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1523 	    keylen == AES_KEYSIZE_256)
1524 		return skcipher_setkey(cipher, key, keylen);
1525 
1526 	return -EINVAL;
1527 }
1528 
1529 static void common_nonsnoop_unmap(struct device *dev,
1530 				  struct talitos_edesc *edesc,
1531 				  struct skcipher_request *areq)
1532 {
1533 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1534 
1535 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1536 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1537 
1538 	if (edesc->dma_len)
1539 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1540 				 DMA_BIDIRECTIONAL);
1541 }
1542 
1543 static void skcipher_done(struct device *dev,
1544 			    struct talitos_desc *desc, void *context,
1545 			    int err)
1546 {
1547 	struct skcipher_request *areq = context;
1548 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1549 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1550 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1551 	struct talitos_edesc *edesc;
1552 
1553 	edesc = container_of(desc, struct talitos_edesc, desc);
1554 
1555 	common_nonsnoop_unmap(dev, edesc, areq);
1556 	memcpy(areq->iv, ctx->iv, ivsize);
1557 
1558 	kfree(edesc);
1559 
1560 	areq->base.complete(&areq->base, err);
1561 }
1562 
1563 static int common_nonsnoop(struct talitos_edesc *edesc,
1564 			   struct skcipher_request *areq,
1565 			   void (*callback) (struct device *dev,
1566 					     struct talitos_desc *desc,
1567 					     void *context, int error))
1568 {
1569 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1570 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1571 	struct device *dev = ctx->dev;
1572 	struct talitos_desc *desc = &edesc->desc;
1573 	unsigned int cryptlen = areq->cryptlen;
1574 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1575 	int sg_count, ret;
1576 	bool sync_needed = false;
1577 	struct talitos_private *priv = dev_get_drvdata(dev);
1578 	bool is_sec1 = has_ftr_sec1(priv);
1579 
1580 	/* first DWORD empty */
1581 
1582 	/* cipher iv */
1583 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1584 
1585 	/* cipher key */
1586 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1587 
1588 	sg_count = edesc->src_nents ?: 1;
1589 	if (is_sec1 && sg_count > 1)
1590 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1591 				  cryptlen);
1592 	else
1593 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1594 				      (areq->src == areq->dst) ?
1595 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1596 	/*
1597 	 * cipher in
1598 	 */
1599 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1600 				  &desc->ptr[3], sg_count, 0, 0);
1601 	if (sg_count > 1)
1602 		sync_needed = true;
1603 
1604 	/* cipher out */
1605 	if (areq->src != areq->dst) {
1606 		sg_count = edesc->dst_nents ? : 1;
1607 		if (!is_sec1 || sg_count == 1)
1608 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1609 	}
1610 
1611 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1612 			     sg_count, 0, (edesc->src_nents + 1));
1613 	if (ret > 1)
1614 		sync_needed = true;
1615 
1616 	/* iv out */
1617 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1618 			       DMA_FROM_DEVICE);
1619 
1620 	/* last DWORD empty */
1621 
1622 	if (sync_needed)
1623 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1624 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1625 
1626 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1627 	if (ret != -EINPROGRESS) {
1628 		common_nonsnoop_unmap(dev, edesc, areq);
1629 		kfree(edesc);
1630 	}
1631 	return ret;
1632 }
1633 
1634 static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1635 						    areq, bool encrypt)
1636 {
1637 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1638 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1639 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1640 
1641 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1642 				   areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1643 				   areq->base.flags, encrypt);
1644 }
1645 
1646 static int skcipher_encrypt(struct skcipher_request *areq)
1647 {
1648 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1649 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1650 	struct talitos_edesc *edesc;
1651 	unsigned int blocksize =
1652 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1653 
1654 	if (!areq->cryptlen)
1655 		return 0;
1656 
1657 	if (areq->cryptlen % blocksize)
1658 		return -EINVAL;
1659 
1660 	/* allocate extended descriptor */
1661 	edesc = skcipher_edesc_alloc(areq, true);
1662 	if (IS_ERR(edesc))
1663 		return PTR_ERR(edesc);
1664 
1665 	/* set encrypt */
1666 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1667 
1668 	return common_nonsnoop(edesc, areq, skcipher_done);
1669 }
1670 
1671 static int skcipher_decrypt(struct skcipher_request *areq)
1672 {
1673 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1674 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1675 	struct talitos_edesc *edesc;
1676 	unsigned int blocksize =
1677 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1678 
1679 	if (!areq->cryptlen)
1680 		return 0;
1681 
1682 	if (areq->cryptlen % blocksize)
1683 		return -EINVAL;
1684 
1685 	/* allocate extended descriptor */
1686 	edesc = skcipher_edesc_alloc(areq, false);
1687 	if (IS_ERR(edesc))
1688 		return PTR_ERR(edesc);
1689 
1690 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1691 
1692 	return common_nonsnoop(edesc, areq, skcipher_done);
1693 }
1694 
1695 static void common_nonsnoop_hash_unmap(struct device *dev,
1696 				       struct talitos_edesc *edesc,
1697 				       struct ahash_request *areq)
1698 {
1699 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1700 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1701 	struct talitos_private *priv = dev_get_drvdata(dev);
1702 	bool is_sec1 = has_ftr_sec1(priv);
1703 	struct talitos_desc *desc = &edesc->desc;
1704 	struct talitos_desc *desc2 = (struct talitos_desc *)
1705 				     (edesc->buf + edesc->dma_len);
1706 
1707 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1708 	if (desc->next_desc &&
1709 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1710 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1711 	if (req_ctx->last)
1712 		memcpy(areq->result, req_ctx->hw_context,
1713 		       crypto_ahash_digestsize(tfm));
1714 
1715 	if (req_ctx->psrc)
1716 		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1717 
1718 	/* When using hashctx-in, must unmap it. */
1719 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1720 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1721 					 DMA_TO_DEVICE);
1722 	else if (desc->next_desc)
1723 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1724 					 DMA_TO_DEVICE);
1725 
1726 	if (is_sec1 && req_ctx->nbuf)
1727 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1728 					 DMA_TO_DEVICE);
1729 
1730 	if (edesc->dma_len)
1731 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1732 				 DMA_BIDIRECTIONAL);
1733 
1734 	if (edesc->desc.next_desc)
1735 		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1736 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1737 }
1738 
1739 static void ahash_done(struct device *dev,
1740 		       struct talitos_desc *desc, void *context,
1741 		       int err)
1742 {
1743 	struct ahash_request *areq = context;
1744 	struct talitos_edesc *edesc =
1745 		 container_of(desc, struct talitos_edesc, desc);
1746 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1747 
1748 	if (!req_ctx->last && req_ctx->to_hash_later) {
1749 		/* Position any partial block for next update/final/finup */
1750 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1751 		req_ctx->nbuf = req_ctx->to_hash_later;
1752 	}
1753 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1754 
1755 	kfree(edesc);
1756 
1757 	areq->base.complete(&areq->base, err);
1758 }
1759 
1760 /*
1761  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1762  * ourself and submit a padded block
1763  */
1764 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1765 			       struct talitos_edesc *edesc,
1766 			       struct talitos_ptr *ptr)
1767 {
1768 	static u8 padded_hash[64] = {
1769 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1770 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1771 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1772 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1773 	};
1774 
1775 	pr_err_once("Bug in SEC1, padding ourself\n");
1776 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1777 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1778 			       (char *)padded_hash, DMA_TO_DEVICE);
1779 }
1780 
1781 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1782 				struct ahash_request *areq, unsigned int length,
1783 				void (*callback) (struct device *dev,
1784 						  struct talitos_desc *desc,
1785 						  void *context, int error))
1786 {
1787 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1788 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1789 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1790 	struct device *dev = ctx->dev;
1791 	struct talitos_desc *desc = &edesc->desc;
1792 	int ret;
1793 	bool sync_needed = false;
1794 	struct talitos_private *priv = dev_get_drvdata(dev);
1795 	bool is_sec1 = has_ftr_sec1(priv);
1796 	int sg_count;
1797 
1798 	/* first DWORD empty */
1799 
1800 	/* hash context in */
1801 	if (!req_ctx->first || req_ctx->swinit) {
1802 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1803 					      req_ctx->hw_context_size,
1804 					      req_ctx->hw_context,
1805 					      DMA_TO_DEVICE);
1806 		req_ctx->swinit = 0;
1807 	}
1808 	/* Indicate next op is not the first. */
1809 	req_ctx->first = 0;
1810 
1811 	/* HMAC key */
1812 	if (ctx->keylen)
1813 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1814 			       is_sec1);
1815 
1816 	if (is_sec1 && req_ctx->nbuf)
1817 		length -= req_ctx->nbuf;
1818 
1819 	sg_count = edesc->src_nents ?: 1;
1820 	if (is_sec1 && sg_count > 1)
1821 		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1822 	else if (length)
1823 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1824 				      DMA_TO_DEVICE);
1825 	/*
1826 	 * data in
1827 	 */
1828 	if (is_sec1 && req_ctx->nbuf) {
1829 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1830 				       req_ctx->buf[req_ctx->buf_idx],
1831 				       DMA_TO_DEVICE);
1832 	} else {
1833 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1834 					  &desc->ptr[3], sg_count, 0, 0);
1835 		if (sg_count > 1)
1836 			sync_needed = true;
1837 	}
1838 
1839 	/* fifth DWORD empty */
1840 
1841 	/* hash/HMAC out -or- hash context out */
1842 	if (req_ctx->last)
1843 		map_single_talitos_ptr(dev, &desc->ptr[5],
1844 				       crypto_ahash_digestsize(tfm),
1845 				       req_ctx->hw_context, DMA_FROM_DEVICE);
1846 	else
1847 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1848 					      req_ctx->hw_context_size,
1849 					      req_ctx->hw_context,
1850 					      DMA_FROM_DEVICE);
1851 
1852 	/* last DWORD empty */
1853 
1854 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1855 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1856 
1857 	if (is_sec1 && req_ctx->nbuf && length) {
1858 		struct talitos_desc *desc2 = (struct talitos_desc *)
1859 					     (edesc->buf + edesc->dma_len);
1860 		dma_addr_t next_desc;
1861 
1862 		memset(desc2, 0, sizeof(*desc2));
1863 		desc2->hdr = desc->hdr;
1864 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1865 		desc2->hdr1 = desc2->hdr;
1866 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1867 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1868 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1869 
1870 		if (desc->ptr[1].ptr)
1871 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1872 					 is_sec1);
1873 		else
1874 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1875 						      req_ctx->hw_context_size,
1876 						      req_ctx->hw_context,
1877 						      DMA_TO_DEVICE);
1878 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1879 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1880 					  &desc2->ptr[3], sg_count, 0, 0);
1881 		if (sg_count > 1)
1882 			sync_needed = true;
1883 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1884 		if (req_ctx->last)
1885 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1886 						      req_ctx->hw_context_size,
1887 						      req_ctx->hw_context,
1888 						      DMA_FROM_DEVICE);
1889 
1890 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1891 					   DMA_BIDIRECTIONAL);
1892 		desc->next_desc = cpu_to_be32(next_desc);
1893 	}
1894 
1895 	if (sync_needed)
1896 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1897 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1898 
1899 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1900 	if (ret != -EINPROGRESS) {
1901 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1902 		kfree(edesc);
1903 	}
1904 	return ret;
1905 }
1906 
1907 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1908 					       unsigned int nbytes)
1909 {
1910 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1911 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1912 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1913 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1914 	bool is_sec1 = has_ftr_sec1(priv);
1915 
1916 	if (is_sec1)
1917 		nbytes -= req_ctx->nbuf;
1918 
1919 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1920 				   nbytes, 0, 0, 0, areq->base.flags, false);
1921 }
1922 
1923 static int ahash_init(struct ahash_request *areq)
1924 {
1925 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1926 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1927 	struct device *dev = ctx->dev;
1928 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1929 	unsigned int size;
1930 	dma_addr_t dma;
1931 
1932 	/* Initialize the context */
1933 	req_ctx->buf_idx = 0;
1934 	req_ctx->nbuf = 0;
1935 	req_ctx->first = 1; /* first indicates h/w must init its context */
1936 	req_ctx->swinit = 0; /* assume h/w init of context */
1937 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1938 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1939 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1940 	req_ctx->hw_context_size = size;
1941 
1942 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1943 			     DMA_TO_DEVICE);
1944 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1945 
1946 	return 0;
1947 }
1948 
1949 /*
1950  * on h/w without explicit sha224 support, we initialize h/w context
1951  * manually with sha224 constants, and tell it to run sha256.
1952  */
1953 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1954 {
1955 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1956 
1957 	req_ctx->hw_context[0] = SHA224_H0;
1958 	req_ctx->hw_context[1] = SHA224_H1;
1959 	req_ctx->hw_context[2] = SHA224_H2;
1960 	req_ctx->hw_context[3] = SHA224_H3;
1961 	req_ctx->hw_context[4] = SHA224_H4;
1962 	req_ctx->hw_context[5] = SHA224_H5;
1963 	req_ctx->hw_context[6] = SHA224_H6;
1964 	req_ctx->hw_context[7] = SHA224_H7;
1965 
1966 	/* init 64-bit count */
1967 	req_ctx->hw_context[8] = 0;
1968 	req_ctx->hw_context[9] = 0;
1969 
1970 	ahash_init(areq);
1971 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1972 
1973 	return 0;
1974 }
1975 
1976 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1977 {
1978 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1979 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1980 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1981 	struct talitos_edesc *edesc;
1982 	unsigned int blocksize =
1983 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1984 	unsigned int nbytes_to_hash;
1985 	unsigned int to_hash_later;
1986 	unsigned int nsg;
1987 	int nents;
1988 	struct device *dev = ctx->dev;
1989 	struct talitos_private *priv = dev_get_drvdata(dev);
1990 	bool is_sec1 = has_ftr_sec1(priv);
1991 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1992 
1993 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1994 		/* Buffer up to one whole block */
1995 		nents = sg_nents_for_len(areq->src, nbytes);
1996 		if (nents < 0) {
1997 			dev_err(ctx->dev, "Invalid number of src SG.\n");
1998 			return nents;
1999 		}
2000 		sg_copy_to_buffer(areq->src, nents,
2001 				  ctx_buf + req_ctx->nbuf, nbytes);
2002 		req_ctx->nbuf += nbytes;
2003 		return 0;
2004 	}
2005 
2006 	/* At least (blocksize + 1) bytes are available to hash */
2007 	nbytes_to_hash = nbytes + req_ctx->nbuf;
2008 	to_hash_later = nbytes_to_hash & (blocksize - 1);
2009 
2010 	if (req_ctx->last)
2011 		to_hash_later = 0;
2012 	else if (to_hash_later)
2013 		/* There is a partial block. Hash the full block(s) now */
2014 		nbytes_to_hash -= to_hash_later;
2015 	else {
2016 		/* Keep one block buffered */
2017 		nbytes_to_hash -= blocksize;
2018 		to_hash_later = blocksize;
2019 	}
2020 
2021 	/* Chain in any previously buffered data */
2022 	if (!is_sec1 && req_ctx->nbuf) {
2023 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2024 		sg_init_table(req_ctx->bufsl, nsg);
2025 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2026 		if (nsg > 1)
2027 			sg_chain(req_ctx->bufsl, 2, areq->src);
2028 		req_ctx->psrc = req_ctx->bufsl;
2029 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2030 		int offset;
2031 
2032 		if (nbytes_to_hash > blocksize)
2033 			offset = blocksize - req_ctx->nbuf;
2034 		else
2035 			offset = nbytes_to_hash - req_ctx->nbuf;
2036 		nents = sg_nents_for_len(areq->src, offset);
2037 		if (nents < 0) {
2038 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2039 			return nents;
2040 		}
2041 		sg_copy_to_buffer(areq->src, nents,
2042 				  ctx_buf + req_ctx->nbuf, offset);
2043 		req_ctx->nbuf += offset;
2044 		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2045 						 offset);
2046 	} else
2047 		req_ctx->psrc = areq->src;
2048 
2049 	if (to_hash_later) {
2050 		nents = sg_nents_for_len(areq->src, nbytes);
2051 		if (nents < 0) {
2052 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2053 			return nents;
2054 		}
2055 		sg_pcopy_to_buffer(areq->src, nents,
2056 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2057 				      to_hash_later,
2058 				      nbytes - to_hash_later);
2059 	}
2060 	req_ctx->to_hash_later = to_hash_later;
2061 
2062 	/* Allocate extended descriptor */
2063 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2064 	if (IS_ERR(edesc))
2065 		return PTR_ERR(edesc);
2066 
2067 	edesc->desc.hdr = ctx->desc_hdr_template;
2068 
2069 	/* On last one, request SEC to pad; otherwise continue */
2070 	if (req_ctx->last)
2071 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2072 	else
2073 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2074 
2075 	/* request SEC to INIT hash. */
2076 	if (req_ctx->first && !req_ctx->swinit)
2077 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2078 
2079 	/* When the tfm context has a keylen, it's an HMAC.
2080 	 * A first or last (ie. not middle) descriptor must request HMAC.
2081 	 */
2082 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2083 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2084 
2085 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2086 }
2087 
2088 static int ahash_update(struct ahash_request *areq)
2089 {
2090 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2091 
2092 	req_ctx->last = 0;
2093 
2094 	return ahash_process_req(areq, areq->nbytes);
2095 }
2096 
2097 static int ahash_final(struct ahash_request *areq)
2098 {
2099 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2100 
2101 	req_ctx->last = 1;
2102 
2103 	return ahash_process_req(areq, 0);
2104 }
2105 
2106 static int ahash_finup(struct ahash_request *areq)
2107 {
2108 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2109 
2110 	req_ctx->last = 1;
2111 
2112 	return ahash_process_req(areq, areq->nbytes);
2113 }
2114 
2115 static int ahash_digest(struct ahash_request *areq)
2116 {
2117 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2118 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2119 
2120 	ahash->init(areq);
2121 	req_ctx->last = 1;
2122 
2123 	return ahash_process_req(areq, areq->nbytes);
2124 }
2125 
2126 static int ahash_export(struct ahash_request *areq, void *out)
2127 {
2128 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2129 	struct talitos_export_state *export = out;
2130 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2131 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2132 	struct device *dev = ctx->dev;
2133 	dma_addr_t dma;
2134 
2135 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2136 			     DMA_FROM_DEVICE);
2137 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2138 
2139 	memcpy(export->hw_context, req_ctx->hw_context,
2140 	       req_ctx->hw_context_size);
2141 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2142 	export->swinit = req_ctx->swinit;
2143 	export->first = req_ctx->first;
2144 	export->last = req_ctx->last;
2145 	export->to_hash_later = req_ctx->to_hash_later;
2146 	export->nbuf = req_ctx->nbuf;
2147 
2148 	return 0;
2149 }
2150 
2151 static int ahash_import(struct ahash_request *areq, const void *in)
2152 {
2153 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2154 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2155 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2156 	struct device *dev = ctx->dev;
2157 	const struct talitos_export_state *export = in;
2158 	unsigned int size;
2159 	dma_addr_t dma;
2160 
2161 	memset(req_ctx, 0, sizeof(*req_ctx));
2162 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2163 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2164 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2165 	req_ctx->hw_context_size = size;
2166 	memcpy(req_ctx->hw_context, export->hw_context, size);
2167 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2168 	req_ctx->swinit = export->swinit;
2169 	req_ctx->first = export->first;
2170 	req_ctx->last = export->last;
2171 	req_ctx->to_hash_later = export->to_hash_later;
2172 	req_ctx->nbuf = export->nbuf;
2173 
2174 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2175 			     DMA_TO_DEVICE);
2176 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2177 
2178 	return 0;
2179 }
2180 
2181 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2182 		   u8 *hash)
2183 {
2184 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2185 
2186 	struct scatterlist sg[1];
2187 	struct ahash_request *req;
2188 	struct crypto_wait wait;
2189 	int ret;
2190 
2191 	crypto_init_wait(&wait);
2192 
2193 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2194 	if (!req)
2195 		return -ENOMEM;
2196 
2197 	/* Keep tfm keylen == 0 during hash of the long key */
2198 	ctx->keylen = 0;
2199 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2200 				   crypto_req_done, &wait);
2201 
2202 	sg_init_one(&sg[0], key, keylen);
2203 
2204 	ahash_request_set_crypt(req, sg, hash, keylen);
2205 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2206 
2207 	ahash_request_free(req);
2208 
2209 	return ret;
2210 }
2211 
2212 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2213 			unsigned int keylen)
2214 {
2215 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2216 	struct device *dev = ctx->dev;
2217 	unsigned int blocksize =
2218 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2219 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2220 	unsigned int keysize = keylen;
2221 	u8 hash[SHA512_DIGEST_SIZE];
2222 	int ret;
2223 
2224 	if (keylen <= blocksize)
2225 		memcpy(ctx->key, key, keysize);
2226 	else {
2227 		/* Must get the hash of the long key */
2228 		ret = keyhash(tfm, key, keylen, hash);
2229 
2230 		if (ret)
2231 			return -EINVAL;
2232 
2233 		keysize = digestsize;
2234 		memcpy(ctx->key, hash, digestsize);
2235 	}
2236 
2237 	if (ctx->keylen)
2238 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2239 
2240 	ctx->keylen = keysize;
2241 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2242 
2243 	return 0;
2244 }
2245 
2246 
2247 struct talitos_alg_template {
2248 	u32 type;
2249 	u32 priority;
2250 	union {
2251 		struct skcipher_alg skcipher;
2252 		struct ahash_alg hash;
2253 		struct aead_alg aead;
2254 	} alg;
2255 	__be32 desc_hdr_template;
2256 };
2257 
2258 static struct talitos_alg_template driver_algs[] = {
2259 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2260 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2261 		.alg.aead = {
2262 			.base = {
2263 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2264 				.cra_driver_name = "authenc-hmac-sha1-"
2265 						   "cbc-aes-talitos",
2266 				.cra_blocksize = AES_BLOCK_SIZE,
2267 				.cra_flags = CRYPTO_ALG_ASYNC |
2268 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2269 			},
2270 			.ivsize = AES_BLOCK_SIZE,
2271 			.maxauthsize = SHA1_DIGEST_SIZE,
2272 		},
2273 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2274 			             DESC_HDR_SEL0_AESU |
2275 		                     DESC_HDR_MODE0_AESU_CBC |
2276 		                     DESC_HDR_SEL1_MDEUA |
2277 		                     DESC_HDR_MODE1_MDEU_INIT |
2278 		                     DESC_HDR_MODE1_MDEU_PAD |
2279 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2280 	},
2281 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2282 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2283 		.alg.aead = {
2284 			.base = {
2285 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2286 				.cra_driver_name = "authenc-hmac-sha1-"
2287 						   "cbc-aes-talitos-hsna",
2288 				.cra_blocksize = AES_BLOCK_SIZE,
2289 				.cra_flags = CRYPTO_ALG_ASYNC |
2290 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2291 			},
2292 			.ivsize = AES_BLOCK_SIZE,
2293 			.maxauthsize = SHA1_DIGEST_SIZE,
2294 		},
2295 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2296 				     DESC_HDR_SEL0_AESU |
2297 				     DESC_HDR_MODE0_AESU_CBC |
2298 				     DESC_HDR_SEL1_MDEUA |
2299 				     DESC_HDR_MODE1_MDEU_INIT |
2300 				     DESC_HDR_MODE1_MDEU_PAD |
2301 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2302 	},
2303 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2304 		.alg.aead = {
2305 			.base = {
2306 				.cra_name = "authenc(hmac(sha1),"
2307 					    "cbc(des3_ede))",
2308 				.cra_driver_name = "authenc-hmac-sha1-"
2309 						   "cbc-3des-talitos",
2310 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2311 				.cra_flags = CRYPTO_ALG_ASYNC |
2312 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2313 			},
2314 			.ivsize = DES3_EDE_BLOCK_SIZE,
2315 			.maxauthsize = SHA1_DIGEST_SIZE,
2316 			.setkey = aead_des3_setkey,
2317 		},
2318 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2319 			             DESC_HDR_SEL0_DEU |
2320 		                     DESC_HDR_MODE0_DEU_CBC |
2321 		                     DESC_HDR_MODE0_DEU_3DES |
2322 		                     DESC_HDR_SEL1_MDEUA |
2323 		                     DESC_HDR_MODE1_MDEU_INIT |
2324 		                     DESC_HDR_MODE1_MDEU_PAD |
2325 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2326 	},
2327 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2328 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2329 		.alg.aead = {
2330 			.base = {
2331 				.cra_name = "authenc(hmac(sha1),"
2332 					    "cbc(des3_ede))",
2333 				.cra_driver_name = "authenc-hmac-sha1-"
2334 						   "cbc-3des-talitos-hsna",
2335 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2336 				.cra_flags = CRYPTO_ALG_ASYNC |
2337 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2338 			},
2339 			.ivsize = DES3_EDE_BLOCK_SIZE,
2340 			.maxauthsize = SHA1_DIGEST_SIZE,
2341 			.setkey = aead_des3_setkey,
2342 		},
2343 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2344 				     DESC_HDR_SEL0_DEU |
2345 				     DESC_HDR_MODE0_DEU_CBC |
2346 				     DESC_HDR_MODE0_DEU_3DES |
2347 				     DESC_HDR_SEL1_MDEUA |
2348 				     DESC_HDR_MODE1_MDEU_INIT |
2349 				     DESC_HDR_MODE1_MDEU_PAD |
2350 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2351 	},
2352 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2353 		.alg.aead = {
2354 			.base = {
2355 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2356 				.cra_driver_name = "authenc-hmac-sha224-"
2357 						   "cbc-aes-talitos",
2358 				.cra_blocksize = AES_BLOCK_SIZE,
2359 				.cra_flags = CRYPTO_ALG_ASYNC |
2360 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2361 			},
2362 			.ivsize = AES_BLOCK_SIZE,
2363 			.maxauthsize = SHA224_DIGEST_SIZE,
2364 		},
2365 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2366 				     DESC_HDR_SEL0_AESU |
2367 				     DESC_HDR_MODE0_AESU_CBC |
2368 				     DESC_HDR_SEL1_MDEUA |
2369 				     DESC_HDR_MODE1_MDEU_INIT |
2370 				     DESC_HDR_MODE1_MDEU_PAD |
2371 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2372 	},
2373 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2374 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2375 		.alg.aead = {
2376 			.base = {
2377 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2378 				.cra_driver_name = "authenc-hmac-sha224-"
2379 						   "cbc-aes-talitos-hsna",
2380 				.cra_blocksize = AES_BLOCK_SIZE,
2381 				.cra_flags = CRYPTO_ALG_ASYNC |
2382 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2383 			},
2384 			.ivsize = AES_BLOCK_SIZE,
2385 			.maxauthsize = SHA224_DIGEST_SIZE,
2386 		},
2387 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2388 				     DESC_HDR_SEL0_AESU |
2389 				     DESC_HDR_MODE0_AESU_CBC |
2390 				     DESC_HDR_SEL1_MDEUA |
2391 				     DESC_HDR_MODE1_MDEU_INIT |
2392 				     DESC_HDR_MODE1_MDEU_PAD |
2393 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2394 	},
2395 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2396 		.alg.aead = {
2397 			.base = {
2398 				.cra_name = "authenc(hmac(sha224),"
2399 					    "cbc(des3_ede))",
2400 				.cra_driver_name = "authenc-hmac-sha224-"
2401 						   "cbc-3des-talitos",
2402 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2403 				.cra_flags = CRYPTO_ALG_ASYNC |
2404 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2405 			},
2406 			.ivsize = DES3_EDE_BLOCK_SIZE,
2407 			.maxauthsize = SHA224_DIGEST_SIZE,
2408 			.setkey = aead_des3_setkey,
2409 		},
2410 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2411 			             DESC_HDR_SEL0_DEU |
2412 		                     DESC_HDR_MODE0_DEU_CBC |
2413 		                     DESC_HDR_MODE0_DEU_3DES |
2414 		                     DESC_HDR_SEL1_MDEUA |
2415 		                     DESC_HDR_MODE1_MDEU_INIT |
2416 		                     DESC_HDR_MODE1_MDEU_PAD |
2417 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2418 	},
2419 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2420 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2421 		.alg.aead = {
2422 			.base = {
2423 				.cra_name = "authenc(hmac(sha224),"
2424 					    "cbc(des3_ede))",
2425 				.cra_driver_name = "authenc-hmac-sha224-"
2426 						   "cbc-3des-talitos-hsna",
2427 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2428 				.cra_flags = CRYPTO_ALG_ASYNC |
2429 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2430 			},
2431 			.ivsize = DES3_EDE_BLOCK_SIZE,
2432 			.maxauthsize = SHA224_DIGEST_SIZE,
2433 			.setkey = aead_des3_setkey,
2434 		},
2435 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2436 				     DESC_HDR_SEL0_DEU |
2437 				     DESC_HDR_MODE0_DEU_CBC |
2438 				     DESC_HDR_MODE0_DEU_3DES |
2439 				     DESC_HDR_SEL1_MDEUA |
2440 				     DESC_HDR_MODE1_MDEU_INIT |
2441 				     DESC_HDR_MODE1_MDEU_PAD |
2442 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2443 	},
2444 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2445 		.alg.aead = {
2446 			.base = {
2447 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2448 				.cra_driver_name = "authenc-hmac-sha256-"
2449 						   "cbc-aes-talitos",
2450 				.cra_blocksize = AES_BLOCK_SIZE,
2451 				.cra_flags = CRYPTO_ALG_ASYNC |
2452 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2453 			},
2454 			.ivsize = AES_BLOCK_SIZE,
2455 			.maxauthsize = SHA256_DIGEST_SIZE,
2456 		},
2457 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2458 			             DESC_HDR_SEL0_AESU |
2459 		                     DESC_HDR_MODE0_AESU_CBC |
2460 		                     DESC_HDR_SEL1_MDEUA |
2461 		                     DESC_HDR_MODE1_MDEU_INIT |
2462 		                     DESC_HDR_MODE1_MDEU_PAD |
2463 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2464 	},
2465 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2466 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2467 		.alg.aead = {
2468 			.base = {
2469 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2470 				.cra_driver_name = "authenc-hmac-sha256-"
2471 						   "cbc-aes-talitos-hsna",
2472 				.cra_blocksize = AES_BLOCK_SIZE,
2473 				.cra_flags = CRYPTO_ALG_ASYNC |
2474 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2475 			},
2476 			.ivsize = AES_BLOCK_SIZE,
2477 			.maxauthsize = SHA256_DIGEST_SIZE,
2478 		},
2479 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2480 				     DESC_HDR_SEL0_AESU |
2481 				     DESC_HDR_MODE0_AESU_CBC |
2482 				     DESC_HDR_SEL1_MDEUA |
2483 				     DESC_HDR_MODE1_MDEU_INIT |
2484 				     DESC_HDR_MODE1_MDEU_PAD |
2485 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2486 	},
2487 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2488 		.alg.aead = {
2489 			.base = {
2490 				.cra_name = "authenc(hmac(sha256),"
2491 					    "cbc(des3_ede))",
2492 				.cra_driver_name = "authenc-hmac-sha256-"
2493 						   "cbc-3des-talitos",
2494 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2495 				.cra_flags = CRYPTO_ALG_ASYNC |
2496 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2497 			},
2498 			.ivsize = DES3_EDE_BLOCK_SIZE,
2499 			.maxauthsize = SHA256_DIGEST_SIZE,
2500 			.setkey = aead_des3_setkey,
2501 		},
2502 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2503 			             DESC_HDR_SEL0_DEU |
2504 		                     DESC_HDR_MODE0_DEU_CBC |
2505 		                     DESC_HDR_MODE0_DEU_3DES |
2506 		                     DESC_HDR_SEL1_MDEUA |
2507 		                     DESC_HDR_MODE1_MDEU_INIT |
2508 		                     DESC_HDR_MODE1_MDEU_PAD |
2509 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2510 	},
2511 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2512 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2513 		.alg.aead = {
2514 			.base = {
2515 				.cra_name = "authenc(hmac(sha256),"
2516 					    "cbc(des3_ede))",
2517 				.cra_driver_name = "authenc-hmac-sha256-"
2518 						   "cbc-3des-talitos-hsna",
2519 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2520 				.cra_flags = CRYPTO_ALG_ASYNC |
2521 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2522 			},
2523 			.ivsize = DES3_EDE_BLOCK_SIZE,
2524 			.maxauthsize = SHA256_DIGEST_SIZE,
2525 			.setkey = aead_des3_setkey,
2526 		},
2527 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2528 				     DESC_HDR_SEL0_DEU |
2529 				     DESC_HDR_MODE0_DEU_CBC |
2530 				     DESC_HDR_MODE0_DEU_3DES |
2531 				     DESC_HDR_SEL1_MDEUA |
2532 				     DESC_HDR_MODE1_MDEU_INIT |
2533 				     DESC_HDR_MODE1_MDEU_PAD |
2534 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2535 	},
2536 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2537 		.alg.aead = {
2538 			.base = {
2539 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2540 				.cra_driver_name = "authenc-hmac-sha384-"
2541 						   "cbc-aes-talitos",
2542 				.cra_blocksize = AES_BLOCK_SIZE,
2543 				.cra_flags = CRYPTO_ALG_ASYNC |
2544 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2545 			},
2546 			.ivsize = AES_BLOCK_SIZE,
2547 			.maxauthsize = SHA384_DIGEST_SIZE,
2548 		},
2549 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2550 			             DESC_HDR_SEL0_AESU |
2551 		                     DESC_HDR_MODE0_AESU_CBC |
2552 		                     DESC_HDR_SEL1_MDEUB |
2553 		                     DESC_HDR_MODE1_MDEU_INIT |
2554 		                     DESC_HDR_MODE1_MDEU_PAD |
2555 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2556 	},
2557 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2558 		.alg.aead = {
2559 			.base = {
2560 				.cra_name = "authenc(hmac(sha384),"
2561 					    "cbc(des3_ede))",
2562 				.cra_driver_name = "authenc-hmac-sha384-"
2563 						   "cbc-3des-talitos",
2564 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2565 				.cra_flags = CRYPTO_ALG_ASYNC |
2566 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2567 			},
2568 			.ivsize = DES3_EDE_BLOCK_SIZE,
2569 			.maxauthsize = SHA384_DIGEST_SIZE,
2570 			.setkey = aead_des3_setkey,
2571 		},
2572 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2573 			             DESC_HDR_SEL0_DEU |
2574 		                     DESC_HDR_MODE0_DEU_CBC |
2575 		                     DESC_HDR_MODE0_DEU_3DES |
2576 		                     DESC_HDR_SEL1_MDEUB |
2577 		                     DESC_HDR_MODE1_MDEU_INIT |
2578 		                     DESC_HDR_MODE1_MDEU_PAD |
2579 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2580 	},
2581 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2582 		.alg.aead = {
2583 			.base = {
2584 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2585 				.cra_driver_name = "authenc-hmac-sha512-"
2586 						   "cbc-aes-talitos",
2587 				.cra_blocksize = AES_BLOCK_SIZE,
2588 				.cra_flags = CRYPTO_ALG_ASYNC |
2589 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2590 			},
2591 			.ivsize = AES_BLOCK_SIZE,
2592 			.maxauthsize = SHA512_DIGEST_SIZE,
2593 		},
2594 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2595 			             DESC_HDR_SEL0_AESU |
2596 		                     DESC_HDR_MODE0_AESU_CBC |
2597 		                     DESC_HDR_SEL1_MDEUB |
2598 		                     DESC_HDR_MODE1_MDEU_INIT |
2599 		                     DESC_HDR_MODE1_MDEU_PAD |
2600 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2601 	},
2602 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2603 		.alg.aead = {
2604 			.base = {
2605 				.cra_name = "authenc(hmac(sha512),"
2606 					    "cbc(des3_ede))",
2607 				.cra_driver_name = "authenc-hmac-sha512-"
2608 						   "cbc-3des-talitos",
2609 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2610 				.cra_flags = CRYPTO_ALG_ASYNC |
2611 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2612 			},
2613 			.ivsize = DES3_EDE_BLOCK_SIZE,
2614 			.maxauthsize = SHA512_DIGEST_SIZE,
2615 			.setkey = aead_des3_setkey,
2616 		},
2617 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2618 			             DESC_HDR_SEL0_DEU |
2619 		                     DESC_HDR_MODE0_DEU_CBC |
2620 		                     DESC_HDR_MODE0_DEU_3DES |
2621 		                     DESC_HDR_SEL1_MDEUB |
2622 		                     DESC_HDR_MODE1_MDEU_INIT |
2623 		                     DESC_HDR_MODE1_MDEU_PAD |
2624 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2625 	},
2626 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2627 		.alg.aead = {
2628 			.base = {
2629 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2630 				.cra_driver_name = "authenc-hmac-md5-"
2631 						   "cbc-aes-talitos",
2632 				.cra_blocksize = AES_BLOCK_SIZE,
2633 				.cra_flags = CRYPTO_ALG_ASYNC |
2634 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2635 			},
2636 			.ivsize = AES_BLOCK_SIZE,
2637 			.maxauthsize = MD5_DIGEST_SIZE,
2638 		},
2639 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2640 			             DESC_HDR_SEL0_AESU |
2641 		                     DESC_HDR_MODE0_AESU_CBC |
2642 		                     DESC_HDR_SEL1_MDEUA |
2643 		                     DESC_HDR_MODE1_MDEU_INIT |
2644 		                     DESC_HDR_MODE1_MDEU_PAD |
2645 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2646 	},
2647 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2648 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2649 		.alg.aead = {
2650 			.base = {
2651 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2652 				.cra_driver_name = "authenc-hmac-md5-"
2653 						   "cbc-aes-talitos-hsna",
2654 				.cra_blocksize = AES_BLOCK_SIZE,
2655 				.cra_flags = CRYPTO_ALG_ASYNC |
2656 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2657 			},
2658 			.ivsize = AES_BLOCK_SIZE,
2659 			.maxauthsize = MD5_DIGEST_SIZE,
2660 		},
2661 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2662 				     DESC_HDR_SEL0_AESU |
2663 				     DESC_HDR_MODE0_AESU_CBC |
2664 				     DESC_HDR_SEL1_MDEUA |
2665 				     DESC_HDR_MODE1_MDEU_INIT |
2666 				     DESC_HDR_MODE1_MDEU_PAD |
2667 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2668 	},
2669 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2670 		.alg.aead = {
2671 			.base = {
2672 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2673 				.cra_driver_name = "authenc-hmac-md5-"
2674 						   "cbc-3des-talitos",
2675 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2676 				.cra_flags = CRYPTO_ALG_ASYNC |
2677 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2678 			},
2679 			.ivsize = DES3_EDE_BLOCK_SIZE,
2680 			.maxauthsize = MD5_DIGEST_SIZE,
2681 			.setkey = aead_des3_setkey,
2682 		},
2683 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2684 			             DESC_HDR_SEL0_DEU |
2685 		                     DESC_HDR_MODE0_DEU_CBC |
2686 		                     DESC_HDR_MODE0_DEU_3DES |
2687 		                     DESC_HDR_SEL1_MDEUA |
2688 		                     DESC_HDR_MODE1_MDEU_INIT |
2689 		                     DESC_HDR_MODE1_MDEU_PAD |
2690 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2691 	},
2692 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2693 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2694 		.alg.aead = {
2695 			.base = {
2696 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2697 				.cra_driver_name = "authenc-hmac-md5-"
2698 						   "cbc-3des-talitos-hsna",
2699 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2700 				.cra_flags = CRYPTO_ALG_ASYNC |
2701 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2702 			},
2703 			.ivsize = DES3_EDE_BLOCK_SIZE,
2704 			.maxauthsize = MD5_DIGEST_SIZE,
2705 			.setkey = aead_des3_setkey,
2706 		},
2707 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2708 				     DESC_HDR_SEL0_DEU |
2709 				     DESC_HDR_MODE0_DEU_CBC |
2710 				     DESC_HDR_MODE0_DEU_3DES |
2711 				     DESC_HDR_SEL1_MDEUA |
2712 				     DESC_HDR_MODE1_MDEU_INIT |
2713 				     DESC_HDR_MODE1_MDEU_PAD |
2714 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2715 	},
2716 	/* SKCIPHER algorithms. */
2717 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2718 		.alg.skcipher = {
2719 			.base.cra_name = "ecb(aes)",
2720 			.base.cra_driver_name = "ecb-aes-talitos",
2721 			.base.cra_blocksize = AES_BLOCK_SIZE,
2722 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2723 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2724 			.min_keysize = AES_MIN_KEY_SIZE,
2725 			.max_keysize = AES_MAX_KEY_SIZE,
2726 			.setkey = skcipher_aes_setkey,
2727 		},
2728 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2729 				     DESC_HDR_SEL0_AESU,
2730 	},
2731 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2732 		.alg.skcipher = {
2733 			.base.cra_name = "cbc(aes)",
2734 			.base.cra_driver_name = "cbc-aes-talitos",
2735 			.base.cra_blocksize = AES_BLOCK_SIZE,
2736 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2737 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2738 			.min_keysize = AES_MIN_KEY_SIZE,
2739 			.max_keysize = AES_MAX_KEY_SIZE,
2740 			.ivsize = AES_BLOCK_SIZE,
2741 			.setkey = skcipher_aes_setkey,
2742 		},
2743 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2744 				     DESC_HDR_SEL0_AESU |
2745 				     DESC_HDR_MODE0_AESU_CBC,
2746 	},
2747 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2748 		.alg.skcipher = {
2749 			.base.cra_name = "ctr(aes)",
2750 			.base.cra_driver_name = "ctr-aes-talitos",
2751 			.base.cra_blocksize = 1,
2752 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2753 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2754 			.min_keysize = AES_MIN_KEY_SIZE,
2755 			.max_keysize = AES_MAX_KEY_SIZE,
2756 			.ivsize = AES_BLOCK_SIZE,
2757 			.setkey = skcipher_aes_setkey,
2758 		},
2759 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2760 				     DESC_HDR_SEL0_AESU |
2761 				     DESC_HDR_MODE0_AESU_CTR,
2762 	},
2763 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2764 		.alg.skcipher = {
2765 			.base.cra_name = "ecb(des)",
2766 			.base.cra_driver_name = "ecb-des-talitos",
2767 			.base.cra_blocksize = DES_BLOCK_SIZE,
2768 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2769 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2770 			.min_keysize = DES_KEY_SIZE,
2771 			.max_keysize = DES_KEY_SIZE,
2772 			.setkey = skcipher_des_setkey,
2773 		},
2774 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2775 				     DESC_HDR_SEL0_DEU,
2776 	},
2777 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2778 		.alg.skcipher = {
2779 			.base.cra_name = "cbc(des)",
2780 			.base.cra_driver_name = "cbc-des-talitos",
2781 			.base.cra_blocksize = DES_BLOCK_SIZE,
2782 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2783 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2784 			.min_keysize = DES_KEY_SIZE,
2785 			.max_keysize = DES_KEY_SIZE,
2786 			.ivsize = DES_BLOCK_SIZE,
2787 			.setkey = skcipher_des_setkey,
2788 		},
2789 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2790 				     DESC_HDR_SEL0_DEU |
2791 				     DESC_HDR_MODE0_DEU_CBC,
2792 	},
2793 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2794 		.alg.skcipher = {
2795 			.base.cra_name = "ecb(des3_ede)",
2796 			.base.cra_driver_name = "ecb-3des-talitos",
2797 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2798 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2799 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2800 			.min_keysize = DES3_EDE_KEY_SIZE,
2801 			.max_keysize = DES3_EDE_KEY_SIZE,
2802 			.setkey = skcipher_des3_setkey,
2803 		},
2804 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2805 				     DESC_HDR_SEL0_DEU |
2806 				     DESC_HDR_MODE0_DEU_3DES,
2807 	},
2808 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2809 		.alg.skcipher = {
2810 			.base.cra_name = "cbc(des3_ede)",
2811 			.base.cra_driver_name = "cbc-3des-talitos",
2812 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2813 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2814 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2815 			.min_keysize = DES3_EDE_KEY_SIZE,
2816 			.max_keysize = DES3_EDE_KEY_SIZE,
2817 			.ivsize = DES3_EDE_BLOCK_SIZE,
2818 			.setkey = skcipher_des3_setkey,
2819 		},
2820 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2821 			             DESC_HDR_SEL0_DEU |
2822 		                     DESC_HDR_MODE0_DEU_CBC |
2823 		                     DESC_HDR_MODE0_DEU_3DES,
2824 	},
2825 	/* AHASH algorithms. */
2826 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2827 		.alg.hash = {
2828 			.halg.digestsize = MD5_DIGEST_SIZE,
2829 			.halg.statesize = sizeof(struct talitos_export_state),
2830 			.halg.base = {
2831 				.cra_name = "md5",
2832 				.cra_driver_name = "md5-talitos",
2833 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2834 				.cra_flags = CRYPTO_ALG_ASYNC |
2835 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2836 			}
2837 		},
2838 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2839 				     DESC_HDR_SEL0_MDEUA |
2840 				     DESC_HDR_MODE0_MDEU_MD5,
2841 	},
2842 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2843 		.alg.hash = {
2844 			.halg.digestsize = SHA1_DIGEST_SIZE,
2845 			.halg.statesize = sizeof(struct talitos_export_state),
2846 			.halg.base = {
2847 				.cra_name = "sha1",
2848 				.cra_driver_name = "sha1-talitos",
2849 				.cra_blocksize = SHA1_BLOCK_SIZE,
2850 				.cra_flags = CRYPTO_ALG_ASYNC |
2851 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2852 			}
2853 		},
2854 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2855 				     DESC_HDR_SEL0_MDEUA |
2856 				     DESC_HDR_MODE0_MDEU_SHA1,
2857 	},
2858 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2859 		.alg.hash = {
2860 			.halg.digestsize = SHA224_DIGEST_SIZE,
2861 			.halg.statesize = sizeof(struct talitos_export_state),
2862 			.halg.base = {
2863 				.cra_name = "sha224",
2864 				.cra_driver_name = "sha224-talitos",
2865 				.cra_blocksize = SHA224_BLOCK_SIZE,
2866 				.cra_flags = CRYPTO_ALG_ASYNC |
2867 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2868 			}
2869 		},
2870 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2871 				     DESC_HDR_SEL0_MDEUA |
2872 				     DESC_HDR_MODE0_MDEU_SHA224,
2873 	},
2874 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2875 		.alg.hash = {
2876 			.halg.digestsize = SHA256_DIGEST_SIZE,
2877 			.halg.statesize = sizeof(struct talitos_export_state),
2878 			.halg.base = {
2879 				.cra_name = "sha256",
2880 				.cra_driver_name = "sha256-talitos",
2881 				.cra_blocksize = SHA256_BLOCK_SIZE,
2882 				.cra_flags = CRYPTO_ALG_ASYNC |
2883 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2884 			}
2885 		},
2886 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2887 				     DESC_HDR_SEL0_MDEUA |
2888 				     DESC_HDR_MODE0_MDEU_SHA256,
2889 	},
2890 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2891 		.alg.hash = {
2892 			.halg.digestsize = SHA384_DIGEST_SIZE,
2893 			.halg.statesize = sizeof(struct talitos_export_state),
2894 			.halg.base = {
2895 				.cra_name = "sha384",
2896 				.cra_driver_name = "sha384-talitos",
2897 				.cra_blocksize = SHA384_BLOCK_SIZE,
2898 				.cra_flags = CRYPTO_ALG_ASYNC |
2899 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2900 			}
2901 		},
2902 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2903 				     DESC_HDR_SEL0_MDEUB |
2904 				     DESC_HDR_MODE0_MDEUB_SHA384,
2905 	},
2906 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2907 		.alg.hash = {
2908 			.halg.digestsize = SHA512_DIGEST_SIZE,
2909 			.halg.statesize = sizeof(struct talitos_export_state),
2910 			.halg.base = {
2911 				.cra_name = "sha512",
2912 				.cra_driver_name = "sha512-talitos",
2913 				.cra_blocksize = SHA512_BLOCK_SIZE,
2914 				.cra_flags = CRYPTO_ALG_ASYNC |
2915 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2916 			}
2917 		},
2918 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2919 				     DESC_HDR_SEL0_MDEUB |
2920 				     DESC_HDR_MODE0_MDEUB_SHA512,
2921 	},
2922 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2923 		.alg.hash = {
2924 			.halg.digestsize = MD5_DIGEST_SIZE,
2925 			.halg.statesize = sizeof(struct talitos_export_state),
2926 			.halg.base = {
2927 				.cra_name = "hmac(md5)",
2928 				.cra_driver_name = "hmac-md5-talitos",
2929 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2930 				.cra_flags = CRYPTO_ALG_ASYNC |
2931 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2932 			}
2933 		},
2934 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2935 				     DESC_HDR_SEL0_MDEUA |
2936 				     DESC_HDR_MODE0_MDEU_MD5,
2937 	},
2938 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2939 		.alg.hash = {
2940 			.halg.digestsize = SHA1_DIGEST_SIZE,
2941 			.halg.statesize = sizeof(struct talitos_export_state),
2942 			.halg.base = {
2943 				.cra_name = "hmac(sha1)",
2944 				.cra_driver_name = "hmac-sha1-talitos",
2945 				.cra_blocksize = SHA1_BLOCK_SIZE,
2946 				.cra_flags = CRYPTO_ALG_ASYNC |
2947 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2948 			}
2949 		},
2950 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2951 				     DESC_HDR_SEL0_MDEUA |
2952 				     DESC_HDR_MODE0_MDEU_SHA1,
2953 	},
2954 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2955 		.alg.hash = {
2956 			.halg.digestsize = SHA224_DIGEST_SIZE,
2957 			.halg.statesize = sizeof(struct talitos_export_state),
2958 			.halg.base = {
2959 				.cra_name = "hmac(sha224)",
2960 				.cra_driver_name = "hmac-sha224-talitos",
2961 				.cra_blocksize = SHA224_BLOCK_SIZE,
2962 				.cra_flags = CRYPTO_ALG_ASYNC |
2963 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2964 			}
2965 		},
2966 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2967 				     DESC_HDR_SEL0_MDEUA |
2968 				     DESC_HDR_MODE0_MDEU_SHA224,
2969 	},
2970 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2971 		.alg.hash = {
2972 			.halg.digestsize = SHA256_DIGEST_SIZE,
2973 			.halg.statesize = sizeof(struct talitos_export_state),
2974 			.halg.base = {
2975 				.cra_name = "hmac(sha256)",
2976 				.cra_driver_name = "hmac-sha256-talitos",
2977 				.cra_blocksize = SHA256_BLOCK_SIZE,
2978 				.cra_flags = CRYPTO_ALG_ASYNC |
2979 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2980 			}
2981 		},
2982 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2983 				     DESC_HDR_SEL0_MDEUA |
2984 				     DESC_HDR_MODE0_MDEU_SHA256,
2985 	},
2986 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2987 		.alg.hash = {
2988 			.halg.digestsize = SHA384_DIGEST_SIZE,
2989 			.halg.statesize = sizeof(struct talitos_export_state),
2990 			.halg.base = {
2991 				.cra_name = "hmac(sha384)",
2992 				.cra_driver_name = "hmac-sha384-talitos",
2993 				.cra_blocksize = SHA384_BLOCK_SIZE,
2994 				.cra_flags = CRYPTO_ALG_ASYNC |
2995 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2996 			}
2997 		},
2998 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2999 				     DESC_HDR_SEL0_MDEUB |
3000 				     DESC_HDR_MODE0_MDEUB_SHA384,
3001 	},
3002 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3003 		.alg.hash = {
3004 			.halg.digestsize = SHA512_DIGEST_SIZE,
3005 			.halg.statesize = sizeof(struct talitos_export_state),
3006 			.halg.base = {
3007 				.cra_name = "hmac(sha512)",
3008 				.cra_driver_name = "hmac-sha512-talitos",
3009 				.cra_blocksize = SHA512_BLOCK_SIZE,
3010 				.cra_flags = CRYPTO_ALG_ASYNC |
3011 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3012 			}
3013 		},
3014 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3015 				     DESC_HDR_SEL0_MDEUB |
3016 				     DESC_HDR_MODE0_MDEUB_SHA512,
3017 	}
3018 };
3019 
3020 struct talitos_crypto_alg {
3021 	struct list_head entry;
3022 	struct device *dev;
3023 	struct talitos_alg_template algt;
3024 };
3025 
3026 static int talitos_init_common(struct talitos_ctx *ctx,
3027 			       struct talitos_crypto_alg *talitos_alg)
3028 {
3029 	struct talitos_private *priv;
3030 
3031 	/* update context with ptr to dev */
3032 	ctx->dev = talitos_alg->dev;
3033 
3034 	/* assign SEC channel to tfm in round-robin fashion */
3035 	priv = dev_get_drvdata(ctx->dev);
3036 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3037 		  (priv->num_channels - 1);
3038 
3039 	/* copy descriptor header template value */
3040 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3041 
3042 	/* select done notification */
3043 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3044 
3045 	return 0;
3046 }
3047 
3048 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3049 {
3050 	struct aead_alg *alg = crypto_aead_alg(tfm);
3051 	struct talitos_crypto_alg *talitos_alg;
3052 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3053 
3054 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3055 				   algt.alg.aead);
3056 
3057 	return talitos_init_common(ctx, talitos_alg);
3058 }
3059 
3060 static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3061 {
3062 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3063 	struct talitos_crypto_alg *talitos_alg;
3064 	struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3065 
3066 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3067 				   algt.alg.skcipher);
3068 
3069 	return talitos_init_common(ctx, talitos_alg);
3070 }
3071 
3072 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3073 {
3074 	struct crypto_alg *alg = tfm->__crt_alg;
3075 	struct talitos_crypto_alg *talitos_alg;
3076 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3077 
3078 	talitos_alg = container_of(__crypto_ahash_alg(alg),
3079 				   struct talitos_crypto_alg,
3080 				   algt.alg.hash);
3081 
3082 	ctx->keylen = 0;
3083 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3084 				 sizeof(struct talitos_ahash_req_ctx));
3085 
3086 	return talitos_init_common(ctx, talitos_alg);
3087 }
3088 
3089 static void talitos_cra_exit(struct crypto_tfm *tfm)
3090 {
3091 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3092 	struct device *dev = ctx->dev;
3093 
3094 	if (ctx->keylen)
3095 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3096 }
3097 
3098 /*
3099  * given the alg's descriptor header template, determine whether descriptor
3100  * type and primary/secondary execution units required match the hw
3101  * capabilities description provided in the device tree node.
3102  */
3103 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3104 {
3105 	struct talitos_private *priv = dev_get_drvdata(dev);
3106 	int ret;
3107 
3108 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3109 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3110 
3111 	if (SECONDARY_EU(desc_hdr_template))
3112 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3113 		              & priv->exec_units);
3114 
3115 	return ret;
3116 }
3117 
3118 static int talitos_remove(struct platform_device *ofdev)
3119 {
3120 	struct device *dev = &ofdev->dev;
3121 	struct talitos_private *priv = dev_get_drvdata(dev);
3122 	struct talitos_crypto_alg *t_alg, *n;
3123 	int i;
3124 
3125 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3126 		switch (t_alg->algt.type) {
3127 		case CRYPTO_ALG_TYPE_SKCIPHER:
3128 			crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3129 			break;
3130 		case CRYPTO_ALG_TYPE_AEAD:
3131 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3132 			break;
3133 		case CRYPTO_ALG_TYPE_AHASH:
3134 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3135 			break;
3136 		}
3137 		list_del(&t_alg->entry);
3138 	}
3139 
3140 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3141 		talitos_unregister_rng(dev);
3142 
3143 	for (i = 0; i < 2; i++)
3144 		if (priv->irq[i]) {
3145 			free_irq(priv->irq[i], dev);
3146 			irq_dispose_mapping(priv->irq[i]);
3147 		}
3148 
3149 	tasklet_kill(&priv->done_task[0]);
3150 	if (priv->irq[1])
3151 		tasklet_kill(&priv->done_task[1]);
3152 
3153 	return 0;
3154 }
3155 
3156 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3157 						    struct talitos_alg_template
3158 						           *template)
3159 {
3160 	struct talitos_private *priv = dev_get_drvdata(dev);
3161 	struct talitos_crypto_alg *t_alg;
3162 	struct crypto_alg *alg;
3163 
3164 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3165 			     GFP_KERNEL);
3166 	if (!t_alg)
3167 		return ERR_PTR(-ENOMEM);
3168 
3169 	t_alg->algt = *template;
3170 
3171 	switch (t_alg->algt.type) {
3172 	case CRYPTO_ALG_TYPE_SKCIPHER:
3173 		alg = &t_alg->algt.alg.skcipher.base;
3174 		alg->cra_exit = talitos_cra_exit;
3175 		t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3176 		t_alg->algt.alg.skcipher.setkey =
3177 			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3178 		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3179 		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3180 		break;
3181 	case CRYPTO_ALG_TYPE_AEAD:
3182 		alg = &t_alg->algt.alg.aead.base;
3183 		alg->cra_exit = talitos_cra_exit;
3184 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3185 		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3186 					      aead_setkey;
3187 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3188 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3189 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3190 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3191 			devm_kfree(dev, t_alg);
3192 			return ERR_PTR(-ENOTSUPP);
3193 		}
3194 		break;
3195 	case CRYPTO_ALG_TYPE_AHASH:
3196 		alg = &t_alg->algt.alg.hash.halg.base;
3197 		alg->cra_init = talitos_cra_init_ahash;
3198 		alg->cra_exit = talitos_cra_exit;
3199 		t_alg->algt.alg.hash.init = ahash_init;
3200 		t_alg->algt.alg.hash.update = ahash_update;
3201 		t_alg->algt.alg.hash.final = ahash_final;
3202 		t_alg->algt.alg.hash.finup = ahash_finup;
3203 		t_alg->algt.alg.hash.digest = ahash_digest;
3204 		if (!strncmp(alg->cra_name, "hmac", 4))
3205 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3206 		t_alg->algt.alg.hash.import = ahash_import;
3207 		t_alg->algt.alg.hash.export = ahash_export;
3208 
3209 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3210 		    !strncmp(alg->cra_name, "hmac", 4)) {
3211 			devm_kfree(dev, t_alg);
3212 			return ERR_PTR(-ENOTSUPP);
3213 		}
3214 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3215 		    (!strcmp(alg->cra_name, "sha224") ||
3216 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3217 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3218 			t_alg->algt.desc_hdr_template =
3219 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3220 					DESC_HDR_SEL0_MDEUA |
3221 					DESC_HDR_MODE0_MDEU_SHA256;
3222 		}
3223 		break;
3224 	default:
3225 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3226 		devm_kfree(dev, t_alg);
3227 		return ERR_PTR(-EINVAL);
3228 	}
3229 
3230 	alg->cra_module = THIS_MODULE;
3231 	if (t_alg->algt.priority)
3232 		alg->cra_priority = t_alg->algt.priority;
3233 	else
3234 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3235 	if (has_ftr_sec1(priv))
3236 		alg->cra_alignmask = 3;
3237 	else
3238 		alg->cra_alignmask = 0;
3239 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3240 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3241 
3242 	t_alg->dev = dev;
3243 
3244 	return t_alg;
3245 }
3246 
3247 static int talitos_probe_irq(struct platform_device *ofdev)
3248 {
3249 	struct device *dev = &ofdev->dev;
3250 	struct device_node *np = ofdev->dev.of_node;
3251 	struct talitos_private *priv = dev_get_drvdata(dev);
3252 	int err;
3253 	bool is_sec1 = has_ftr_sec1(priv);
3254 
3255 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3256 	if (!priv->irq[0]) {
3257 		dev_err(dev, "failed to map irq\n");
3258 		return -EINVAL;
3259 	}
3260 	if (is_sec1) {
3261 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3262 				  dev_driver_string(dev), dev);
3263 		goto primary_out;
3264 	}
3265 
3266 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3267 
3268 	/* get the primary irq line */
3269 	if (!priv->irq[1]) {
3270 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3271 				  dev_driver_string(dev), dev);
3272 		goto primary_out;
3273 	}
3274 
3275 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3276 			  dev_driver_string(dev), dev);
3277 	if (err)
3278 		goto primary_out;
3279 
3280 	/* get the secondary irq line */
3281 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3282 			  dev_driver_string(dev), dev);
3283 	if (err) {
3284 		dev_err(dev, "failed to request secondary irq\n");
3285 		irq_dispose_mapping(priv->irq[1]);
3286 		priv->irq[1] = 0;
3287 	}
3288 
3289 	return err;
3290 
3291 primary_out:
3292 	if (err) {
3293 		dev_err(dev, "failed to request primary irq\n");
3294 		irq_dispose_mapping(priv->irq[0]);
3295 		priv->irq[0] = 0;
3296 	}
3297 
3298 	return err;
3299 }
3300 
3301 static int talitos_probe(struct platform_device *ofdev)
3302 {
3303 	struct device *dev = &ofdev->dev;
3304 	struct device_node *np = ofdev->dev.of_node;
3305 	struct talitos_private *priv;
3306 	int i, err;
3307 	int stride;
3308 	struct resource *res;
3309 
3310 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3311 	if (!priv)
3312 		return -ENOMEM;
3313 
3314 	INIT_LIST_HEAD(&priv->alg_list);
3315 
3316 	dev_set_drvdata(dev, priv);
3317 
3318 	priv->ofdev = ofdev;
3319 
3320 	spin_lock_init(&priv->reg_lock);
3321 
3322 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3323 	if (!res)
3324 		return -ENXIO;
3325 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3326 	if (!priv->reg) {
3327 		dev_err(dev, "failed to of_iomap\n");
3328 		err = -ENOMEM;
3329 		goto err_out;
3330 	}
3331 
3332 	/* get SEC version capabilities from device tree */
3333 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3334 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3335 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3336 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3337 			     &priv->desc_types);
3338 
3339 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3340 	    !priv->exec_units || !priv->desc_types) {
3341 		dev_err(dev, "invalid property data in device tree node\n");
3342 		err = -EINVAL;
3343 		goto err_out;
3344 	}
3345 
3346 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3347 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3348 
3349 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3350 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3351 				  TALITOS_FTR_SHA224_HWINIT |
3352 				  TALITOS_FTR_HMAC_OK;
3353 
3354 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3355 		priv->features |= TALITOS_FTR_SEC1;
3356 
3357 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3358 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3359 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3360 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3361 		stride = TALITOS1_CH_STRIDE;
3362 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3363 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3364 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3365 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3366 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3367 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3368 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3369 		stride = TALITOS1_CH_STRIDE;
3370 	} else {
3371 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3372 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3373 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3374 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3375 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3376 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3377 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3378 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3379 		stride = TALITOS2_CH_STRIDE;
3380 	}
3381 
3382 	err = talitos_probe_irq(ofdev);
3383 	if (err)
3384 		goto err_out;
3385 
3386 	if (has_ftr_sec1(priv)) {
3387 		if (priv->num_channels == 1)
3388 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3389 				     (unsigned long)dev);
3390 		else
3391 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3392 				     (unsigned long)dev);
3393 	} else {
3394 		if (priv->irq[1]) {
3395 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3396 				     (unsigned long)dev);
3397 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3398 				     (unsigned long)dev);
3399 		} else if (priv->num_channels == 1) {
3400 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3401 				     (unsigned long)dev);
3402 		} else {
3403 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3404 				     (unsigned long)dev);
3405 		}
3406 	}
3407 
3408 	priv->chan = devm_kcalloc(dev,
3409 				  priv->num_channels,
3410 				  sizeof(struct talitos_channel),
3411 				  GFP_KERNEL);
3412 	if (!priv->chan) {
3413 		dev_err(dev, "failed to allocate channel management space\n");
3414 		err = -ENOMEM;
3415 		goto err_out;
3416 	}
3417 
3418 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3419 
3420 	for (i = 0; i < priv->num_channels; i++) {
3421 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3422 		if (!priv->irq[1] || !(i & 1))
3423 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3424 
3425 		spin_lock_init(&priv->chan[i].head_lock);
3426 		spin_lock_init(&priv->chan[i].tail_lock);
3427 
3428 		priv->chan[i].fifo = devm_kcalloc(dev,
3429 						priv->fifo_len,
3430 						sizeof(struct talitos_request),
3431 						GFP_KERNEL);
3432 		if (!priv->chan[i].fifo) {
3433 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3434 			err = -ENOMEM;
3435 			goto err_out;
3436 		}
3437 
3438 		atomic_set(&priv->chan[i].submit_count,
3439 			   -(priv->chfifo_len - 1));
3440 	}
3441 
3442 	dma_set_mask(dev, DMA_BIT_MASK(36));
3443 
3444 	/* reset and initialize the h/w */
3445 	err = init_device(dev);
3446 	if (err) {
3447 		dev_err(dev, "failed to initialize device\n");
3448 		goto err_out;
3449 	}
3450 
3451 	/* register the RNG, if available */
3452 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3453 		err = talitos_register_rng(dev);
3454 		if (err) {
3455 			dev_err(dev, "failed to register hwrng: %d\n", err);
3456 			goto err_out;
3457 		} else
3458 			dev_info(dev, "hwrng\n");
3459 	}
3460 
3461 	/* register crypto algorithms the device supports */
3462 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3463 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3464 			struct talitos_crypto_alg *t_alg;
3465 			struct crypto_alg *alg = NULL;
3466 
3467 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3468 			if (IS_ERR(t_alg)) {
3469 				err = PTR_ERR(t_alg);
3470 				if (err == -ENOTSUPP)
3471 					continue;
3472 				goto err_out;
3473 			}
3474 
3475 			switch (t_alg->algt.type) {
3476 			case CRYPTO_ALG_TYPE_SKCIPHER:
3477 				err = crypto_register_skcipher(
3478 						&t_alg->algt.alg.skcipher);
3479 				alg = &t_alg->algt.alg.skcipher.base;
3480 				break;
3481 
3482 			case CRYPTO_ALG_TYPE_AEAD:
3483 				err = crypto_register_aead(
3484 					&t_alg->algt.alg.aead);
3485 				alg = &t_alg->algt.alg.aead.base;
3486 				break;
3487 
3488 			case CRYPTO_ALG_TYPE_AHASH:
3489 				err = crypto_register_ahash(
3490 						&t_alg->algt.alg.hash);
3491 				alg = &t_alg->algt.alg.hash.halg.base;
3492 				break;
3493 			}
3494 			if (err) {
3495 				dev_err(dev, "%s alg registration failed\n",
3496 					alg->cra_driver_name);
3497 				devm_kfree(dev, t_alg);
3498 			} else
3499 				list_add_tail(&t_alg->entry, &priv->alg_list);
3500 		}
3501 	}
3502 	if (!list_empty(&priv->alg_list))
3503 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3504 			 (char *)of_get_property(np, "compatible", NULL));
3505 
3506 	return 0;
3507 
3508 err_out:
3509 	talitos_remove(ofdev);
3510 
3511 	return err;
3512 }
3513 
3514 static const struct of_device_id talitos_match[] = {
3515 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3516 	{
3517 		.compatible = "fsl,sec1.0",
3518 	},
3519 #endif
3520 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3521 	{
3522 		.compatible = "fsl,sec2.0",
3523 	},
3524 #endif
3525 	{},
3526 };
3527 MODULE_DEVICE_TABLE(of, talitos_match);
3528 
3529 static struct platform_driver talitos_driver = {
3530 	.driver = {
3531 		.name = "talitos",
3532 		.of_match_table = talitos_match,
3533 	},
3534 	.probe = talitos_probe,
3535 	.remove = talitos_remove,
3536 };
3537 
3538 module_platform_driver(talitos_driver);
3539 
3540 MODULE_LICENSE("GPL");
3541 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3542 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3543