xref: /openbmc/linux/drivers/crypto/talitos.c (revision 36acd5e2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * talitos - Freescale Integrated Security Engine (SEC) device driver
4  *
5  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6  *
7  * Scatterlist Crypto API glue code copied from files with the following:
8  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * Crypto algorithm registration code copied from hifn driver:
11  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12  * All rights reserved.
13  */
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/io.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
30 
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/internal/des.h>
34 #include <crypto/sha1.h>
35 #include <crypto/sha2.h>
36 #include <crypto/md5.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/authenc.h>
39 #include <crypto/internal/skcipher.h>
40 #include <crypto/hash.h>
41 #include <crypto/internal/hash.h>
42 #include <crypto/scatterwalk.h>
43 
44 #include "talitos.h"
45 
46 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
47 			   unsigned int len, bool is_sec1)
48 {
49 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
50 	if (is_sec1) {
51 		ptr->len1 = cpu_to_be16(len);
52 	} else {
53 		ptr->len = cpu_to_be16(len);
54 		ptr->eptr = upper_32_bits(dma_addr);
55 	}
56 }
57 
58 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
59 			     struct talitos_ptr *src_ptr, bool is_sec1)
60 {
61 	dst_ptr->ptr = src_ptr->ptr;
62 	if (is_sec1) {
63 		dst_ptr->len1 = src_ptr->len1;
64 	} else {
65 		dst_ptr->len = src_ptr->len;
66 		dst_ptr->eptr = src_ptr->eptr;
67 	}
68 }
69 
70 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
71 					   bool is_sec1)
72 {
73 	if (is_sec1)
74 		return be16_to_cpu(ptr->len1);
75 	else
76 		return be16_to_cpu(ptr->len);
77 }
78 
79 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
80 				   bool is_sec1)
81 {
82 	if (!is_sec1)
83 		ptr->j_extent = val;
84 }
85 
86 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
87 {
88 	if (!is_sec1)
89 		ptr->j_extent |= val;
90 }
91 
92 /*
93  * map virtual single (contiguous) pointer to h/w descriptor pointer
94  */
95 static void __map_single_talitos_ptr(struct device *dev,
96 				     struct talitos_ptr *ptr,
97 				     unsigned int len, void *data,
98 				     enum dma_data_direction dir,
99 				     unsigned long attrs)
100 {
101 	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
102 	struct talitos_private *priv = dev_get_drvdata(dev);
103 	bool is_sec1 = has_ftr_sec1(priv);
104 
105 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
106 }
107 
108 static void map_single_talitos_ptr(struct device *dev,
109 				   struct talitos_ptr *ptr,
110 				   unsigned int len, void *data,
111 				   enum dma_data_direction dir)
112 {
113 	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
114 }
115 
116 static void map_single_talitos_ptr_nosync(struct device *dev,
117 					  struct talitos_ptr *ptr,
118 					  unsigned int len, void *data,
119 					  enum dma_data_direction dir)
120 {
121 	__map_single_talitos_ptr(dev, ptr, len, data, dir,
122 				 DMA_ATTR_SKIP_CPU_SYNC);
123 }
124 
125 /*
126  * unmap bus single (contiguous) h/w descriptor pointer
127  */
128 static void unmap_single_talitos_ptr(struct device *dev,
129 				     struct talitos_ptr *ptr,
130 				     enum dma_data_direction dir)
131 {
132 	struct talitos_private *priv = dev_get_drvdata(dev);
133 	bool is_sec1 = has_ftr_sec1(priv);
134 
135 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
136 			 from_talitos_ptr_len(ptr, is_sec1), dir);
137 }
138 
139 static int reset_channel(struct device *dev, int ch)
140 {
141 	struct talitos_private *priv = dev_get_drvdata(dev);
142 	unsigned int timeout = TALITOS_TIMEOUT;
143 	bool is_sec1 = has_ftr_sec1(priv);
144 
145 	if (is_sec1) {
146 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
147 			  TALITOS1_CCCR_LO_RESET);
148 
149 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
150 			TALITOS1_CCCR_LO_RESET) && --timeout)
151 			cpu_relax();
152 	} else {
153 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
154 			  TALITOS2_CCCR_RESET);
155 
156 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
157 			TALITOS2_CCCR_RESET) && --timeout)
158 			cpu_relax();
159 	}
160 
161 	if (timeout == 0) {
162 		dev_err(dev, "failed to reset channel %d\n", ch);
163 		return -EIO;
164 	}
165 
166 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
167 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
168 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
169 	/* enable chaining descriptors */
170 	if (is_sec1)
171 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 			  TALITOS_CCCR_LO_NE);
173 
174 	/* and ICCR writeback, if available */
175 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
176 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
177 		          TALITOS_CCCR_LO_IWSE);
178 
179 	return 0;
180 }
181 
182 static int reset_device(struct device *dev)
183 {
184 	struct talitos_private *priv = dev_get_drvdata(dev);
185 	unsigned int timeout = TALITOS_TIMEOUT;
186 	bool is_sec1 = has_ftr_sec1(priv);
187 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
188 
189 	setbits32(priv->reg + TALITOS_MCR, mcr);
190 
191 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
192 	       && --timeout)
193 		cpu_relax();
194 
195 	if (priv->irq[1]) {
196 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
197 		setbits32(priv->reg + TALITOS_MCR, mcr);
198 	}
199 
200 	if (timeout == 0) {
201 		dev_err(dev, "failed to reset device\n");
202 		return -EIO;
203 	}
204 
205 	return 0;
206 }
207 
208 /*
209  * Reset and initialize the device
210  */
211 static int init_device(struct device *dev)
212 {
213 	struct talitos_private *priv = dev_get_drvdata(dev);
214 	int ch, err;
215 	bool is_sec1 = has_ftr_sec1(priv);
216 
217 	/*
218 	 * Master reset
219 	 * errata documentation: warning: certain SEC interrupts
220 	 * are not fully cleared by writing the MCR:SWR bit,
221 	 * set bit twice to completely reset
222 	 */
223 	err = reset_device(dev);
224 	if (err)
225 		return err;
226 
227 	err = reset_device(dev);
228 	if (err)
229 		return err;
230 
231 	/* reset channels */
232 	for (ch = 0; ch < priv->num_channels; ch++) {
233 		err = reset_channel(dev, ch);
234 		if (err)
235 			return err;
236 	}
237 
238 	/* enable channel done and error interrupts */
239 	if (is_sec1) {
240 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
241 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
242 		/* disable parity error check in DEU (erroneous? test vect.) */
243 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 	} else {
245 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
246 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
247 	}
248 
249 	/* disable integrity check error interrupts (use writeback instead) */
250 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
251 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
252 		          TALITOS_MDEUICR_LO_ICE);
253 
254 	return 0;
255 }
256 
257 /**
258  * talitos_submit - submits a descriptor to the device for processing
259  * @dev:	the SEC device to be used
260  * @ch:		the SEC device channel to be used
261  * @desc:	the descriptor to be processed by the device
262  * @callback:	whom to call when processing is complete
263  * @context:	a handle for use by caller (optional)
264  *
265  * desc must contain valid dma-mapped (bus physical) address pointers.
266  * callback must check err and feedback in descriptor header
267  * for device processing status.
268  */
269 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
270 			  void (*callback)(struct device *dev,
271 					   struct talitos_desc *desc,
272 					   void *context, int error),
273 			  void *context)
274 {
275 	struct talitos_private *priv = dev_get_drvdata(dev);
276 	struct talitos_request *request;
277 	unsigned long flags;
278 	int head;
279 	bool is_sec1 = has_ftr_sec1(priv);
280 
281 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
282 
283 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
284 		/* h/w fifo is full */
285 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
286 		return -EAGAIN;
287 	}
288 
289 	head = priv->chan[ch].head;
290 	request = &priv->chan[ch].fifo[head];
291 
292 	/* map descriptor and save caller data */
293 	if (is_sec1) {
294 		desc->hdr1 = desc->hdr;
295 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
296 						   TALITOS_DESC_SIZE,
297 						   DMA_BIDIRECTIONAL);
298 	} else {
299 		request->dma_desc = dma_map_single(dev, desc,
300 						   TALITOS_DESC_SIZE,
301 						   DMA_BIDIRECTIONAL);
302 	}
303 	request->callback = callback;
304 	request->context = context;
305 
306 	/* increment fifo head */
307 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
308 
309 	smp_wmb();
310 	request->desc = desc;
311 
312 	/* GO! */
313 	wmb();
314 	out_be32(priv->chan[ch].reg + TALITOS_FF,
315 		 upper_32_bits(request->dma_desc));
316 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
317 		 lower_32_bits(request->dma_desc));
318 
319 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
320 
321 	return -EINPROGRESS;
322 }
323 
324 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
325 {
326 	struct talitos_edesc *edesc;
327 
328 	if (!is_sec1)
329 		return request->desc->hdr;
330 
331 	if (!request->desc->next_desc)
332 		return request->desc->hdr1;
333 
334 	edesc = container_of(request->desc, struct talitos_edesc, desc);
335 
336 	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
337 }
338 
339 /*
340  * process what was done, notify callback of error if not
341  */
342 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
343 {
344 	struct talitos_private *priv = dev_get_drvdata(dev);
345 	struct talitos_request *request, saved_req;
346 	unsigned long flags;
347 	int tail, status;
348 	bool is_sec1 = has_ftr_sec1(priv);
349 
350 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
351 
352 	tail = priv->chan[ch].tail;
353 	while (priv->chan[ch].fifo[tail].desc) {
354 		__be32 hdr;
355 
356 		request = &priv->chan[ch].fifo[tail];
357 
358 		/* descriptors with their done bits set don't get the error */
359 		rmb();
360 		hdr = get_request_hdr(request, is_sec1);
361 
362 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
363 			status = 0;
364 		else
365 			if (!error)
366 				break;
367 			else
368 				status = error;
369 
370 		dma_unmap_single(dev, request->dma_desc,
371 				 TALITOS_DESC_SIZE,
372 				 DMA_BIDIRECTIONAL);
373 
374 		/* copy entries so we can call callback outside lock */
375 		saved_req.desc = request->desc;
376 		saved_req.callback = request->callback;
377 		saved_req.context = request->context;
378 
379 		/* release request entry in fifo */
380 		smp_wmb();
381 		request->desc = NULL;
382 
383 		/* increment fifo tail */
384 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
385 
386 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
387 
388 		atomic_dec(&priv->chan[ch].submit_count);
389 
390 		saved_req.callback(dev, saved_req.desc, saved_req.context,
391 				   status);
392 		/* channel may resume processing in single desc error case */
393 		if (error && !reset_ch && status == error)
394 			return;
395 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
396 		tail = priv->chan[ch].tail;
397 	}
398 
399 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
400 }
401 
402 /*
403  * process completed requests for channels that have done status
404  */
405 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
406 static void talitos1_done_##name(unsigned long data)			\
407 {									\
408 	struct device *dev = (struct device *)data;			\
409 	struct talitos_private *priv = dev_get_drvdata(dev);		\
410 	unsigned long flags;						\
411 									\
412 	if (ch_done_mask & 0x10000000)					\
413 		flush_channel(dev, 0, 0, 0);			\
414 	if (ch_done_mask & 0x40000000)					\
415 		flush_channel(dev, 1, 0, 0);			\
416 	if (ch_done_mask & 0x00010000)					\
417 		flush_channel(dev, 2, 0, 0);			\
418 	if (ch_done_mask & 0x00040000)					\
419 		flush_channel(dev, 3, 0, 0);			\
420 									\
421 	/* At this point, all completed channels have been processed */	\
422 	/* Unmask done interrupts for channels completed later on. */	\
423 	spin_lock_irqsave(&priv->reg_lock, flags);			\
424 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
425 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
426 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
427 }
428 
429 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
430 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
431 
432 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
433 static void talitos2_done_##name(unsigned long data)			\
434 {									\
435 	struct device *dev = (struct device *)data;			\
436 	struct talitos_private *priv = dev_get_drvdata(dev);		\
437 	unsigned long flags;						\
438 									\
439 	if (ch_done_mask & 1)						\
440 		flush_channel(dev, 0, 0, 0);				\
441 	if (ch_done_mask & (1 << 2))					\
442 		flush_channel(dev, 1, 0, 0);				\
443 	if (ch_done_mask & (1 << 4))					\
444 		flush_channel(dev, 2, 0, 0);				\
445 	if (ch_done_mask & (1 << 6))					\
446 		flush_channel(dev, 3, 0, 0);				\
447 									\
448 	/* At this point, all completed channels have been processed */	\
449 	/* Unmask done interrupts for channels completed later on. */	\
450 	spin_lock_irqsave(&priv->reg_lock, flags);			\
451 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
452 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
453 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
454 }
455 
456 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
457 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
458 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
459 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
460 
461 /*
462  * locate current (offending) descriptor
463  */
464 static __be32 current_desc_hdr(struct device *dev, int ch)
465 {
466 	struct talitos_private *priv = dev_get_drvdata(dev);
467 	int tail, iter;
468 	dma_addr_t cur_desc;
469 
470 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
471 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
472 
473 	if (!cur_desc) {
474 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
475 		return 0;
476 	}
477 
478 	tail = priv->chan[ch].tail;
479 
480 	iter = tail;
481 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
482 	       priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
483 		iter = (iter + 1) & (priv->fifo_len - 1);
484 		if (iter == tail) {
485 			dev_err(dev, "couldn't locate current descriptor\n");
486 			return 0;
487 		}
488 	}
489 
490 	if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
491 		struct talitos_edesc *edesc;
492 
493 		edesc = container_of(priv->chan[ch].fifo[iter].desc,
494 				     struct talitos_edesc, desc);
495 		return ((struct talitos_desc *)
496 			(edesc->buf + edesc->dma_len))->hdr;
497 	}
498 
499 	return priv->chan[ch].fifo[iter].desc->hdr;
500 }
501 
502 /*
503  * user diagnostics; report root cause of error based on execution unit status
504  */
505 static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
506 {
507 	struct talitos_private *priv = dev_get_drvdata(dev);
508 	int i;
509 
510 	if (!desc_hdr)
511 		desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
512 
513 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
514 	case DESC_HDR_SEL0_AFEU:
515 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
516 			in_be32(priv->reg_afeu + TALITOS_EUISR),
517 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
518 		break;
519 	case DESC_HDR_SEL0_DEU:
520 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
521 			in_be32(priv->reg_deu + TALITOS_EUISR),
522 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
523 		break;
524 	case DESC_HDR_SEL0_MDEUA:
525 	case DESC_HDR_SEL0_MDEUB:
526 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
527 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
528 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
529 		break;
530 	case DESC_HDR_SEL0_RNG:
531 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
532 			in_be32(priv->reg_rngu + TALITOS_ISR),
533 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
534 		break;
535 	case DESC_HDR_SEL0_PKEU:
536 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
537 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
538 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
539 		break;
540 	case DESC_HDR_SEL0_AESU:
541 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
542 			in_be32(priv->reg_aesu + TALITOS_EUISR),
543 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
544 		break;
545 	case DESC_HDR_SEL0_CRCU:
546 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
547 			in_be32(priv->reg_crcu + TALITOS_EUISR),
548 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
549 		break;
550 	case DESC_HDR_SEL0_KEU:
551 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
552 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
553 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
554 		break;
555 	}
556 
557 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
558 	case DESC_HDR_SEL1_MDEUA:
559 	case DESC_HDR_SEL1_MDEUB:
560 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
561 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
562 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
563 		break;
564 	case DESC_HDR_SEL1_CRCU:
565 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
566 			in_be32(priv->reg_crcu + TALITOS_EUISR),
567 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
568 		break;
569 	}
570 
571 	for (i = 0; i < 8; i++)
572 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
573 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
574 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
575 }
576 
577 /*
578  * recover from error interrupts
579  */
580 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
581 {
582 	struct talitos_private *priv = dev_get_drvdata(dev);
583 	unsigned int timeout = TALITOS_TIMEOUT;
584 	int ch, error, reset_dev = 0;
585 	u32 v_lo;
586 	bool is_sec1 = has_ftr_sec1(priv);
587 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
588 
589 	for (ch = 0; ch < priv->num_channels; ch++) {
590 		/* skip channels without errors */
591 		if (is_sec1) {
592 			/* bits 29, 31, 17, 19 */
593 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
594 				continue;
595 		} else {
596 			if (!(isr & (1 << (ch * 2 + 1))))
597 				continue;
598 		}
599 
600 		error = -EINVAL;
601 
602 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
603 
604 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
605 			dev_err(dev, "double fetch fifo overflow error\n");
606 			error = -EAGAIN;
607 			reset_ch = 1;
608 		}
609 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
610 			/* h/w dropped descriptor */
611 			dev_err(dev, "single fetch fifo overflow error\n");
612 			error = -EAGAIN;
613 		}
614 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
615 			dev_err(dev, "master data transfer error\n");
616 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
617 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
618 					     : "s/g data length zero error\n");
619 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
620 			dev_err(dev, is_sec1 ? "parity error\n"
621 					     : "fetch pointer zero error\n");
622 		if (v_lo & TALITOS_CCPSR_LO_IDH)
623 			dev_err(dev, "illegal descriptor header error\n");
624 		if (v_lo & TALITOS_CCPSR_LO_IEU)
625 			dev_err(dev, is_sec1 ? "static assignment error\n"
626 					     : "invalid exec unit error\n");
627 		if (v_lo & TALITOS_CCPSR_LO_EU)
628 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
629 		if (!is_sec1) {
630 			if (v_lo & TALITOS_CCPSR_LO_GB)
631 				dev_err(dev, "gather boundary error\n");
632 			if (v_lo & TALITOS_CCPSR_LO_GRL)
633 				dev_err(dev, "gather return/length error\n");
634 			if (v_lo & TALITOS_CCPSR_LO_SB)
635 				dev_err(dev, "scatter boundary error\n");
636 			if (v_lo & TALITOS_CCPSR_LO_SRL)
637 				dev_err(dev, "scatter return/length error\n");
638 		}
639 
640 		flush_channel(dev, ch, error, reset_ch);
641 
642 		if (reset_ch) {
643 			reset_channel(dev, ch);
644 		} else {
645 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
646 				  TALITOS2_CCCR_CONT);
647 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
648 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
649 			       TALITOS2_CCCR_CONT) && --timeout)
650 				cpu_relax();
651 			if (timeout == 0) {
652 				dev_err(dev, "failed to restart channel %d\n",
653 					ch);
654 				reset_dev = 1;
655 			}
656 		}
657 	}
658 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
659 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
660 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
661 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
662 				isr, isr_lo);
663 		else
664 			dev_err(dev, "done overflow, internal time out, or "
665 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
666 
667 		/* purge request queues */
668 		for (ch = 0; ch < priv->num_channels; ch++)
669 			flush_channel(dev, ch, -EIO, 1);
670 
671 		/* reset and reinitialize the device */
672 		init_device(dev);
673 	}
674 }
675 
676 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
677 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
678 {									       \
679 	struct device *dev = data;					       \
680 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
681 	u32 isr, isr_lo;						       \
682 	unsigned long flags;						       \
683 									       \
684 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
685 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
686 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
687 	/* Acknowledge interrupt */					       \
688 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
689 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
690 									       \
691 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
692 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
693 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
694 	}								       \
695 	else {								       \
696 		if (likely(isr & ch_done_mask)) {			       \
697 			/* mask further done interrupts. */		       \
698 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
699 			/* done_task will unmask done interrupts at exit */    \
700 			tasklet_schedule(&priv->done_task[tlet]);	       \
701 		}							       \
702 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
703 	}								       \
704 									       \
705 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
706 								IRQ_NONE;      \
707 }
708 
709 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
710 
711 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
712 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
713 {									       \
714 	struct device *dev = data;					       \
715 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
716 	u32 isr, isr_lo;						       \
717 	unsigned long flags;						       \
718 									       \
719 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
720 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
721 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
722 	/* Acknowledge interrupt */					       \
723 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
724 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
725 									       \
726 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
727 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
728 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
729 	}								       \
730 	else {								       \
731 		if (likely(isr & ch_done_mask)) {			       \
732 			/* mask further done interrupts. */		       \
733 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
734 			/* done_task will unmask done interrupts at exit */    \
735 			tasklet_schedule(&priv->done_task[tlet]);	       \
736 		}							       \
737 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
738 	}								       \
739 									       \
740 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
741 								IRQ_NONE;      \
742 }
743 
744 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
745 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
746 		       0)
747 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
748 		       1)
749 
750 /*
751  * hwrng
752  */
753 static int talitos_rng_data_present(struct hwrng *rng, int wait)
754 {
755 	struct device *dev = (struct device *)rng->priv;
756 	struct talitos_private *priv = dev_get_drvdata(dev);
757 	u32 ofl;
758 	int i;
759 
760 	for (i = 0; i < 20; i++) {
761 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
762 		      TALITOS_RNGUSR_LO_OFL;
763 		if (ofl || !wait)
764 			break;
765 		udelay(10);
766 	}
767 
768 	return !!ofl;
769 }
770 
771 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
772 {
773 	struct device *dev = (struct device *)rng->priv;
774 	struct talitos_private *priv = dev_get_drvdata(dev);
775 
776 	/* rng fifo requires 64-bit accesses */
777 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
778 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
779 
780 	return sizeof(u32);
781 }
782 
783 static int talitos_rng_init(struct hwrng *rng)
784 {
785 	struct device *dev = (struct device *)rng->priv;
786 	struct talitos_private *priv = dev_get_drvdata(dev);
787 	unsigned int timeout = TALITOS_TIMEOUT;
788 
789 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
790 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
791 		 & TALITOS_RNGUSR_LO_RD)
792 	       && --timeout)
793 		cpu_relax();
794 	if (timeout == 0) {
795 		dev_err(dev, "failed to reset rng hw\n");
796 		return -ENODEV;
797 	}
798 
799 	/* start generating */
800 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
801 
802 	return 0;
803 }
804 
805 static int talitos_register_rng(struct device *dev)
806 {
807 	struct talitos_private *priv = dev_get_drvdata(dev);
808 	int err;
809 
810 	priv->rng.name		= dev_driver_string(dev);
811 	priv->rng.init		= talitos_rng_init;
812 	priv->rng.data_present	= talitos_rng_data_present;
813 	priv->rng.data_read	= talitos_rng_data_read;
814 	priv->rng.priv		= (unsigned long)dev;
815 
816 	err = hwrng_register(&priv->rng);
817 	if (!err)
818 		priv->rng_registered = true;
819 
820 	return err;
821 }
822 
823 static void talitos_unregister_rng(struct device *dev)
824 {
825 	struct talitos_private *priv = dev_get_drvdata(dev);
826 
827 	if (!priv->rng_registered)
828 		return;
829 
830 	hwrng_unregister(&priv->rng);
831 	priv->rng_registered = false;
832 }
833 
834 /*
835  * crypto alg
836  */
837 #define TALITOS_CRA_PRIORITY		3000
838 /*
839  * Defines a priority for doing AEAD with descriptors type
840  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
841  */
842 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
843 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
844 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
845 #else
846 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
847 #endif
848 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
849 
850 struct talitos_ctx {
851 	struct device *dev;
852 	int ch;
853 	__be32 desc_hdr_template;
854 	u8 key[TALITOS_MAX_KEY_SIZE];
855 	u8 iv[TALITOS_MAX_IV_LENGTH];
856 	dma_addr_t dma_key;
857 	unsigned int keylen;
858 	unsigned int enckeylen;
859 	unsigned int authkeylen;
860 };
861 
862 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
863 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
864 
865 struct talitos_ahash_req_ctx {
866 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
867 	unsigned int hw_context_size;
868 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
869 	int buf_idx;
870 	unsigned int swinit;
871 	unsigned int first;
872 	unsigned int last;
873 	unsigned int to_hash_later;
874 	unsigned int nbuf;
875 	struct scatterlist bufsl[2];
876 	struct scatterlist *psrc;
877 };
878 
879 struct talitos_export_state {
880 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
881 	u8 buf[HASH_MAX_BLOCK_SIZE];
882 	unsigned int swinit;
883 	unsigned int first;
884 	unsigned int last;
885 	unsigned int to_hash_later;
886 	unsigned int nbuf;
887 };
888 
889 static int aead_setkey(struct crypto_aead *authenc,
890 		       const u8 *key, unsigned int keylen)
891 {
892 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
893 	struct device *dev = ctx->dev;
894 	struct crypto_authenc_keys keys;
895 
896 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
897 		goto badkey;
898 
899 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
900 		goto badkey;
901 
902 	if (ctx->keylen)
903 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
904 
905 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
906 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
907 
908 	ctx->keylen = keys.authkeylen + keys.enckeylen;
909 	ctx->enckeylen = keys.enckeylen;
910 	ctx->authkeylen = keys.authkeylen;
911 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
912 				      DMA_TO_DEVICE);
913 
914 	memzero_explicit(&keys, sizeof(keys));
915 	return 0;
916 
917 badkey:
918 	memzero_explicit(&keys, sizeof(keys));
919 	return -EINVAL;
920 }
921 
922 static int aead_des3_setkey(struct crypto_aead *authenc,
923 			    const u8 *key, unsigned int keylen)
924 {
925 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 	struct device *dev = ctx->dev;
927 	struct crypto_authenc_keys keys;
928 	int err;
929 
930 	err = crypto_authenc_extractkeys(&keys, key, keylen);
931 	if (unlikely(err))
932 		goto out;
933 
934 	err = -EINVAL;
935 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
936 		goto out;
937 
938 	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
939 	if (err)
940 		goto out;
941 
942 	if (ctx->keylen)
943 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
944 
945 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
946 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
947 
948 	ctx->keylen = keys.authkeylen + keys.enckeylen;
949 	ctx->enckeylen = keys.enckeylen;
950 	ctx->authkeylen = keys.authkeylen;
951 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
952 				      DMA_TO_DEVICE);
953 
954 out:
955 	memzero_explicit(&keys, sizeof(keys));
956 	return err;
957 }
958 
959 static void talitos_sg_unmap(struct device *dev,
960 			     struct talitos_edesc *edesc,
961 			     struct scatterlist *src,
962 			     struct scatterlist *dst,
963 			     unsigned int len, unsigned int offset)
964 {
965 	struct talitos_private *priv = dev_get_drvdata(dev);
966 	bool is_sec1 = has_ftr_sec1(priv);
967 	unsigned int src_nents = edesc->src_nents ? : 1;
968 	unsigned int dst_nents = edesc->dst_nents ? : 1;
969 
970 	if (is_sec1 && dst && dst_nents > 1) {
971 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
972 					   len, DMA_FROM_DEVICE);
973 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
974 				     offset);
975 	}
976 	if (src != dst) {
977 		if (src_nents == 1 || !is_sec1)
978 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
979 
980 		if (dst && (dst_nents == 1 || !is_sec1))
981 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
982 	} else if (src_nents == 1 || !is_sec1) {
983 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
984 	}
985 }
986 
987 static void ipsec_esp_unmap(struct device *dev,
988 			    struct talitos_edesc *edesc,
989 			    struct aead_request *areq, bool encrypt)
990 {
991 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
992 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
993 	unsigned int ivsize = crypto_aead_ivsize(aead);
994 	unsigned int authsize = crypto_aead_authsize(aead);
995 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
996 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
997 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
998 
999 	if (is_ipsec_esp)
1000 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1001 					 DMA_FROM_DEVICE);
1002 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1003 
1004 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1005 			 cryptlen + authsize, areq->assoclen);
1006 
1007 	if (edesc->dma_len)
1008 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1009 				 DMA_BIDIRECTIONAL);
1010 
1011 	if (!is_ipsec_esp) {
1012 		unsigned int dst_nents = edesc->dst_nents ? : 1;
1013 
1014 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1015 				   areq->assoclen + cryptlen - ivsize);
1016 	}
1017 }
1018 
1019 /*
1020  * ipsec_esp descriptor callbacks
1021  */
1022 static void ipsec_esp_encrypt_done(struct device *dev,
1023 				   struct talitos_desc *desc, void *context,
1024 				   int err)
1025 {
1026 	struct aead_request *areq = context;
1027 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1028 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1029 	struct talitos_edesc *edesc;
1030 
1031 	edesc = container_of(desc, struct talitos_edesc, desc);
1032 
1033 	ipsec_esp_unmap(dev, edesc, areq, true);
1034 
1035 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1036 
1037 	kfree(edesc);
1038 
1039 	aead_request_complete(areq, err);
1040 }
1041 
1042 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1043 					  struct talitos_desc *desc,
1044 					  void *context, int err)
1045 {
1046 	struct aead_request *req = context;
1047 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1048 	unsigned int authsize = crypto_aead_authsize(authenc);
1049 	struct talitos_edesc *edesc;
1050 	char *oicv, *icv;
1051 
1052 	edesc = container_of(desc, struct talitos_edesc, desc);
1053 
1054 	ipsec_esp_unmap(dev, edesc, req, false);
1055 
1056 	if (!err) {
1057 		/* auth check */
1058 		oicv = edesc->buf + edesc->dma_len;
1059 		icv = oicv - authsize;
1060 
1061 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1062 	}
1063 
1064 	kfree(edesc);
1065 
1066 	aead_request_complete(req, err);
1067 }
1068 
1069 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1070 					  struct talitos_desc *desc,
1071 					  void *context, int err)
1072 {
1073 	struct aead_request *req = context;
1074 	struct talitos_edesc *edesc;
1075 
1076 	edesc = container_of(desc, struct talitos_edesc, desc);
1077 
1078 	ipsec_esp_unmap(dev, edesc, req, false);
1079 
1080 	/* check ICV auth status */
1081 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082 		     DESC_HDR_LO_ICCR1_PASS))
1083 		err = -EBADMSG;
1084 
1085 	kfree(edesc);
1086 
1087 	aead_request_complete(req, err);
1088 }
1089 
1090 /*
1091  * convert scatterlist to SEC h/w link table format
1092  * stop at cryptlen bytes
1093  */
1094 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1095 				 unsigned int offset, int datalen, int elen,
1096 				 struct talitos_ptr *link_tbl_ptr)
1097 {
1098 	int n_sg = elen ? sg_count + 1 : sg_count;
1099 	int count = 0;
1100 	int cryptlen = datalen + elen;
1101 
1102 	while (cryptlen && sg && n_sg--) {
1103 		unsigned int len = sg_dma_len(sg);
1104 
1105 		if (offset >= len) {
1106 			offset -= len;
1107 			goto next;
1108 		}
1109 
1110 		len -= offset;
1111 
1112 		if (len > cryptlen)
1113 			len = cryptlen;
1114 
1115 		if (datalen > 0 && len > datalen) {
1116 			to_talitos_ptr(link_tbl_ptr + count,
1117 				       sg_dma_address(sg) + offset, datalen, 0);
1118 			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1119 			count++;
1120 			len -= datalen;
1121 			offset += datalen;
1122 		}
1123 		to_talitos_ptr(link_tbl_ptr + count,
1124 			       sg_dma_address(sg) + offset, len, 0);
1125 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1126 		count++;
1127 		cryptlen -= len;
1128 		datalen -= len;
1129 		offset = 0;
1130 
1131 next:
1132 		sg = sg_next(sg);
1133 	}
1134 
1135 	/* tag end of link table */
1136 	if (count > 0)
1137 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1138 				       DESC_PTR_LNKTBL_RET, 0);
1139 
1140 	return count;
1141 }
1142 
1143 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1144 			      unsigned int len, struct talitos_edesc *edesc,
1145 			      struct talitos_ptr *ptr, int sg_count,
1146 			      unsigned int offset, int tbl_off, int elen,
1147 			      bool force)
1148 {
1149 	struct talitos_private *priv = dev_get_drvdata(dev);
1150 	bool is_sec1 = has_ftr_sec1(priv);
1151 
1152 	if (!src) {
1153 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1154 		return 1;
1155 	}
1156 	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1157 	if (sg_count == 1 && !force) {
1158 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1159 		return sg_count;
1160 	}
1161 	if (is_sec1) {
1162 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1163 		return sg_count;
1164 	}
1165 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1166 					 &edesc->link_tbl[tbl_off]);
1167 	if (sg_count == 1 && !force) {
1168 		/* Only one segment now, so no link tbl needed*/
1169 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1170 		return sg_count;
1171 	}
1172 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1173 			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1174 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1175 
1176 	return sg_count;
1177 }
1178 
1179 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1180 			  unsigned int len, struct talitos_edesc *edesc,
1181 			  struct talitos_ptr *ptr, int sg_count,
1182 			  unsigned int offset, int tbl_off)
1183 {
1184 	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1185 				  tbl_off, 0, false);
1186 }
1187 
1188 /*
1189  * fill in and submit ipsec_esp descriptor
1190  */
1191 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1192 		     bool encrypt,
1193 		     void (*callback)(struct device *dev,
1194 				      struct talitos_desc *desc,
1195 				      void *context, int error))
1196 {
1197 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1198 	unsigned int authsize = crypto_aead_authsize(aead);
1199 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1200 	struct device *dev = ctx->dev;
1201 	struct talitos_desc *desc = &edesc->desc;
1202 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1203 	unsigned int ivsize = crypto_aead_ivsize(aead);
1204 	int tbl_off = 0;
1205 	int sg_count, ret;
1206 	int elen = 0;
1207 	bool sync_needed = false;
1208 	struct talitos_private *priv = dev_get_drvdata(dev);
1209 	bool is_sec1 = has_ftr_sec1(priv);
1210 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1211 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1212 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1213 	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1214 
1215 	/* hmac key */
1216 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1217 
1218 	sg_count = edesc->src_nents ?: 1;
1219 	if (is_sec1 && sg_count > 1)
1220 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1221 				  areq->assoclen + cryptlen);
1222 	else
1223 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1224 				      (areq->src == areq->dst) ?
1225 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1226 
1227 	/* hmac data */
1228 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1229 			     &desc->ptr[1], sg_count, 0, tbl_off);
1230 
1231 	if (ret > 1) {
1232 		tbl_off += ret;
1233 		sync_needed = true;
1234 	}
1235 
1236 	/* cipher iv */
1237 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1238 
1239 	/* cipher key */
1240 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1241 		       ctx->enckeylen, is_sec1);
1242 
1243 	/*
1244 	 * cipher in
1245 	 * map and adjust cipher len to aead request cryptlen.
1246 	 * extent is bytes of HMAC postpended to ciphertext,
1247 	 * typically 12 for ipsec
1248 	 */
1249 	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1250 		elen = authsize;
1251 
1252 	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1253 				 sg_count, areq->assoclen, tbl_off, elen,
1254 				 false);
1255 
1256 	if (ret > 1) {
1257 		tbl_off += ret;
1258 		sync_needed = true;
1259 	}
1260 
1261 	/* cipher out */
1262 	if (areq->src != areq->dst) {
1263 		sg_count = edesc->dst_nents ? : 1;
1264 		if (!is_sec1 || sg_count == 1)
1265 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1266 	}
1267 
1268 	if (is_ipsec_esp && encrypt)
1269 		elen = authsize;
1270 	else
1271 		elen = 0;
1272 	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1273 				 sg_count, areq->assoclen, tbl_off, elen,
1274 				 is_ipsec_esp && !encrypt);
1275 	tbl_off += ret;
1276 
1277 	if (!encrypt && is_ipsec_esp) {
1278 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1279 
1280 		/* Add an entry to the link table for ICV data */
1281 		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1282 		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1283 
1284 		/* icv data follows link tables */
1285 		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1286 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1287 		sync_needed = true;
1288 	} else if (!encrypt) {
1289 		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1290 		sync_needed = true;
1291 	} else if (!is_ipsec_esp) {
1292 		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1293 			       sg_count, areq->assoclen + cryptlen, tbl_off);
1294 	}
1295 
1296 	/* iv out */
1297 	if (is_ipsec_esp)
1298 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1299 				       DMA_FROM_DEVICE);
1300 
1301 	if (sync_needed)
1302 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1303 					   edesc->dma_len,
1304 					   DMA_BIDIRECTIONAL);
1305 
1306 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1307 	if (ret != -EINPROGRESS) {
1308 		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1309 		kfree(edesc);
1310 	}
1311 	return ret;
1312 }
1313 
1314 /*
1315  * allocate and map the extended descriptor
1316  */
1317 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1318 						 struct scatterlist *src,
1319 						 struct scatterlist *dst,
1320 						 u8 *iv,
1321 						 unsigned int assoclen,
1322 						 unsigned int cryptlen,
1323 						 unsigned int authsize,
1324 						 unsigned int ivsize,
1325 						 int icv_stashing,
1326 						 u32 cryptoflags,
1327 						 bool encrypt)
1328 {
1329 	struct talitos_edesc *edesc;
1330 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1331 	dma_addr_t iv_dma = 0;
1332 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1333 		      GFP_ATOMIC;
1334 	struct talitos_private *priv = dev_get_drvdata(dev);
1335 	bool is_sec1 = has_ftr_sec1(priv);
1336 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1337 
1338 	if (cryptlen + authsize > max_len) {
1339 		dev_err(dev, "length exceeds h/w max limit\n");
1340 		return ERR_PTR(-EINVAL);
1341 	}
1342 
1343 	if (!dst || dst == src) {
1344 		src_len = assoclen + cryptlen + authsize;
1345 		src_nents = sg_nents_for_len(src, src_len);
1346 		if (src_nents < 0) {
1347 			dev_err(dev, "Invalid number of src SG.\n");
1348 			return ERR_PTR(-EINVAL);
1349 		}
1350 		src_nents = (src_nents == 1) ? 0 : src_nents;
1351 		dst_nents = dst ? src_nents : 0;
1352 		dst_len = 0;
1353 	} else { /* dst && dst != src*/
1354 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1355 		src_nents = sg_nents_for_len(src, src_len);
1356 		if (src_nents < 0) {
1357 			dev_err(dev, "Invalid number of src SG.\n");
1358 			return ERR_PTR(-EINVAL);
1359 		}
1360 		src_nents = (src_nents == 1) ? 0 : src_nents;
1361 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1362 		dst_nents = sg_nents_for_len(dst, dst_len);
1363 		if (dst_nents < 0) {
1364 			dev_err(dev, "Invalid number of dst SG.\n");
1365 			return ERR_PTR(-EINVAL);
1366 		}
1367 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1368 	}
1369 
1370 	/*
1371 	 * allocate space for base edesc plus the link tables,
1372 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1373 	 * and space for two sets of ICVs (stashed and generated)
1374 	 */
1375 	alloc_len = sizeof(struct talitos_edesc);
1376 	if (src_nents || dst_nents || !encrypt) {
1377 		if (is_sec1)
1378 			dma_len = (src_nents ? src_len : 0) +
1379 				  (dst_nents ? dst_len : 0) + authsize;
1380 		else
1381 			dma_len = (src_nents + dst_nents + 2) *
1382 				  sizeof(struct talitos_ptr) + authsize;
1383 		alloc_len += dma_len;
1384 	} else {
1385 		dma_len = 0;
1386 	}
1387 	alloc_len += icv_stashing ? authsize : 0;
1388 
1389 	/* if its a ahash, add space for a second desc next to the first one */
1390 	if (is_sec1 && !dst)
1391 		alloc_len += sizeof(struct talitos_desc);
1392 	alloc_len += ivsize;
1393 
1394 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1395 	if (!edesc)
1396 		return ERR_PTR(-ENOMEM);
1397 	if (ivsize) {
1398 		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1399 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1400 	}
1401 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1402 
1403 	edesc->src_nents = src_nents;
1404 	edesc->dst_nents = dst_nents;
1405 	edesc->iv_dma = iv_dma;
1406 	edesc->dma_len = dma_len;
1407 	if (dma_len)
1408 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1409 						     edesc->dma_len,
1410 						     DMA_BIDIRECTIONAL);
1411 
1412 	return edesc;
1413 }
1414 
1415 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1416 					      int icv_stashing, bool encrypt)
1417 {
1418 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1419 	unsigned int authsize = crypto_aead_authsize(authenc);
1420 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1421 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1422 	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1423 
1424 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1425 				   iv, areq->assoclen, cryptlen,
1426 				   authsize, ivsize, icv_stashing,
1427 				   areq->base.flags, encrypt);
1428 }
1429 
1430 static int aead_encrypt(struct aead_request *req)
1431 {
1432 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1433 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1434 	struct talitos_edesc *edesc;
1435 
1436 	/* allocate extended descriptor */
1437 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1438 	if (IS_ERR(edesc))
1439 		return PTR_ERR(edesc);
1440 
1441 	/* set encrypt */
1442 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1443 
1444 	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1445 }
1446 
1447 static int aead_decrypt(struct aead_request *req)
1448 {
1449 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1450 	unsigned int authsize = crypto_aead_authsize(authenc);
1451 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1452 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1453 	struct talitos_edesc *edesc;
1454 	void *icvdata;
1455 
1456 	/* allocate extended descriptor */
1457 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1458 	if (IS_ERR(edesc))
1459 		return PTR_ERR(edesc);
1460 
1461 	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1462 	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1463 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1464 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1465 
1466 		/* decrypt and check the ICV */
1467 		edesc->desc.hdr = ctx->desc_hdr_template |
1468 				  DESC_HDR_DIR_INBOUND |
1469 				  DESC_HDR_MODE1_MDEU_CICV;
1470 
1471 		/* reset integrity check result bits */
1472 
1473 		return ipsec_esp(edesc, req, false,
1474 				 ipsec_esp_decrypt_hwauth_done);
1475 	}
1476 
1477 	/* Have to check the ICV with software */
1478 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1479 
1480 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1481 	icvdata = edesc->buf + edesc->dma_len;
1482 
1483 	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1484 			   req->assoclen + req->cryptlen - authsize);
1485 
1486 	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1487 }
1488 
1489 static int skcipher_setkey(struct crypto_skcipher *cipher,
1490 			     const u8 *key, unsigned int keylen)
1491 {
1492 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1493 	struct device *dev = ctx->dev;
1494 
1495 	if (ctx->keylen)
1496 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1497 
1498 	memcpy(&ctx->key, key, keylen);
1499 	ctx->keylen = keylen;
1500 
1501 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1502 
1503 	return 0;
1504 }
1505 
1506 static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1507 				 const u8 *key, unsigned int keylen)
1508 {
1509 	return verify_skcipher_des_key(cipher, key) ?:
1510 	       skcipher_setkey(cipher, key, keylen);
1511 }
1512 
1513 static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1514 				  const u8 *key, unsigned int keylen)
1515 {
1516 	return verify_skcipher_des3_key(cipher, key) ?:
1517 	       skcipher_setkey(cipher, key, keylen);
1518 }
1519 
1520 static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1521 				  const u8 *key, unsigned int keylen)
1522 {
1523 	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1524 	    keylen == AES_KEYSIZE_256)
1525 		return skcipher_setkey(cipher, key, keylen);
1526 
1527 	return -EINVAL;
1528 }
1529 
1530 static void common_nonsnoop_unmap(struct device *dev,
1531 				  struct talitos_edesc *edesc,
1532 				  struct skcipher_request *areq)
1533 {
1534 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1535 
1536 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1537 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1538 
1539 	if (edesc->dma_len)
1540 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1541 				 DMA_BIDIRECTIONAL);
1542 }
1543 
1544 static void skcipher_done(struct device *dev,
1545 			    struct talitos_desc *desc, void *context,
1546 			    int err)
1547 {
1548 	struct skcipher_request *areq = context;
1549 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1550 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1551 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1552 	struct talitos_edesc *edesc;
1553 
1554 	edesc = container_of(desc, struct talitos_edesc, desc);
1555 
1556 	common_nonsnoop_unmap(dev, edesc, areq);
1557 	memcpy(areq->iv, ctx->iv, ivsize);
1558 
1559 	kfree(edesc);
1560 
1561 	areq->base.complete(&areq->base, err);
1562 }
1563 
1564 static int common_nonsnoop(struct talitos_edesc *edesc,
1565 			   struct skcipher_request *areq,
1566 			   void (*callback) (struct device *dev,
1567 					     struct talitos_desc *desc,
1568 					     void *context, int error))
1569 {
1570 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1571 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1572 	struct device *dev = ctx->dev;
1573 	struct talitos_desc *desc = &edesc->desc;
1574 	unsigned int cryptlen = areq->cryptlen;
1575 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1576 	int sg_count, ret;
1577 	bool sync_needed = false;
1578 	struct talitos_private *priv = dev_get_drvdata(dev);
1579 	bool is_sec1 = has_ftr_sec1(priv);
1580 
1581 	/* first DWORD empty */
1582 
1583 	/* cipher iv */
1584 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1585 
1586 	/* cipher key */
1587 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1588 
1589 	sg_count = edesc->src_nents ?: 1;
1590 	if (is_sec1 && sg_count > 1)
1591 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1592 				  cryptlen);
1593 	else
1594 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1595 				      (areq->src == areq->dst) ?
1596 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1597 	/*
1598 	 * cipher in
1599 	 */
1600 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1601 				  &desc->ptr[3], sg_count, 0, 0);
1602 	if (sg_count > 1)
1603 		sync_needed = true;
1604 
1605 	/* cipher out */
1606 	if (areq->src != areq->dst) {
1607 		sg_count = edesc->dst_nents ? : 1;
1608 		if (!is_sec1 || sg_count == 1)
1609 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1610 	}
1611 
1612 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1613 			     sg_count, 0, (edesc->src_nents + 1));
1614 	if (ret > 1)
1615 		sync_needed = true;
1616 
1617 	/* iv out */
1618 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1619 			       DMA_FROM_DEVICE);
1620 
1621 	/* last DWORD empty */
1622 
1623 	if (sync_needed)
1624 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1625 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1626 
1627 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1628 	if (ret != -EINPROGRESS) {
1629 		common_nonsnoop_unmap(dev, edesc, areq);
1630 		kfree(edesc);
1631 	}
1632 	return ret;
1633 }
1634 
1635 static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1636 						    areq, bool encrypt)
1637 {
1638 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1639 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1640 	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1641 
1642 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1643 				   areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1644 				   areq->base.flags, encrypt);
1645 }
1646 
1647 static int skcipher_encrypt(struct skcipher_request *areq)
1648 {
1649 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1650 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1651 	struct talitos_edesc *edesc;
1652 	unsigned int blocksize =
1653 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1654 
1655 	if (!areq->cryptlen)
1656 		return 0;
1657 
1658 	if (areq->cryptlen % blocksize)
1659 		return -EINVAL;
1660 
1661 	/* allocate extended descriptor */
1662 	edesc = skcipher_edesc_alloc(areq, true);
1663 	if (IS_ERR(edesc))
1664 		return PTR_ERR(edesc);
1665 
1666 	/* set encrypt */
1667 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1668 
1669 	return common_nonsnoop(edesc, areq, skcipher_done);
1670 }
1671 
1672 static int skcipher_decrypt(struct skcipher_request *areq)
1673 {
1674 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1675 	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1676 	struct talitos_edesc *edesc;
1677 	unsigned int blocksize =
1678 			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1679 
1680 	if (!areq->cryptlen)
1681 		return 0;
1682 
1683 	if (areq->cryptlen % blocksize)
1684 		return -EINVAL;
1685 
1686 	/* allocate extended descriptor */
1687 	edesc = skcipher_edesc_alloc(areq, false);
1688 	if (IS_ERR(edesc))
1689 		return PTR_ERR(edesc);
1690 
1691 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1692 
1693 	return common_nonsnoop(edesc, areq, skcipher_done);
1694 }
1695 
1696 static void common_nonsnoop_hash_unmap(struct device *dev,
1697 				       struct talitos_edesc *edesc,
1698 				       struct ahash_request *areq)
1699 {
1700 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1701 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1702 	struct talitos_private *priv = dev_get_drvdata(dev);
1703 	bool is_sec1 = has_ftr_sec1(priv);
1704 	struct talitos_desc *desc = &edesc->desc;
1705 	struct talitos_desc *desc2 = (struct talitos_desc *)
1706 				     (edesc->buf + edesc->dma_len);
1707 
1708 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1709 	if (desc->next_desc &&
1710 	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1711 		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1712 	if (req_ctx->last)
1713 		memcpy(areq->result, req_ctx->hw_context,
1714 		       crypto_ahash_digestsize(tfm));
1715 
1716 	if (req_ctx->psrc)
1717 		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1718 
1719 	/* When using hashctx-in, must unmap it. */
1720 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1721 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1722 					 DMA_TO_DEVICE);
1723 	else if (desc->next_desc)
1724 		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1725 					 DMA_TO_DEVICE);
1726 
1727 	if (is_sec1 && req_ctx->nbuf)
1728 		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1729 					 DMA_TO_DEVICE);
1730 
1731 	if (edesc->dma_len)
1732 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1733 				 DMA_BIDIRECTIONAL);
1734 
1735 	if (edesc->desc.next_desc)
1736 		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1737 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1738 }
1739 
1740 static void ahash_done(struct device *dev,
1741 		       struct talitos_desc *desc, void *context,
1742 		       int err)
1743 {
1744 	struct ahash_request *areq = context;
1745 	struct talitos_edesc *edesc =
1746 		 container_of(desc, struct talitos_edesc, desc);
1747 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1748 
1749 	if (!req_ctx->last && req_ctx->to_hash_later) {
1750 		/* Position any partial block for next update/final/finup */
1751 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1752 		req_ctx->nbuf = req_ctx->to_hash_later;
1753 	}
1754 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1755 
1756 	kfree(edesc);
1757 
1758 	areq->base.complete(&areq->base, err);
1759 }
1760 
1761 /*
1762  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1763  * ourself and submit a padded block
1764  */
1765 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1766 			       struct talitos_edesc *edesc,
1767 			       struct talitos_ptr *ptr)
1768 {
1769 	static u8 padded_hash[64] = {
1770 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1771 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1772 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1773 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1774 	};
1775 
1776 	pr_err_once("Bug in SEC1, padding ourself\n");
1777 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1778 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1779 			       (char *)padded_hash, DMA_TO_DEVICE);
1780 }
1781 
1782 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1783 				struct ahash_request *areq, unsigned int length,
1784 				void (*callback) (struct device *dev,
1785 						  struct talitos_desc *desc,
1786 						  void *context, int error))
1787 {
1788 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1789 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1790 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1791 	struct device *dev = ctx->dev;
1792 	struct talitos_desc *desc = &edesc->desc;
1793 	int ret;
1794 	bool sync_needed = false;
1795 	struct talitos_private *priv = dev_get_drvdata(dev);
1796 	bool is_sec1 = has_ftr_sec1(priv);
1797 	int sg_count;
1798 
1799 	/* first DWORD empty */
1800 
1801 	/* hash context in */
1802 	if (!req_ctx->first || req_ctx->swinit) {
1803 		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1804 					      req_ctx->hw_context_size,
1805 					      req_ctx->hw_context,
1806 					      DMA_TO_DEVICE);
1807 		req_ctx->swinit = 0;
1808 	}
1809 	/* Indicate next op is not the first. */
1810 	req_ctx->first = 0;
1811 
1812 	/* HMAC key */
1813 	if (ctx->keylen)
1814 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1815 			       is_sec1);
1816 
1817 	if (is_sec1 && req_ctx->nbuf)
1818 		length -= req_ctx->nbuf;
1819 
1820 	sg_count = edesc->src_nents ?: 1;
1821 	if (is_sec1 && sg_count > 1)
1822 		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1823 	else if (length)
1824 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1825 				      DMA_TO_DEVICE);
1826 	/*
1827 	 * data in
1828 	 */
1829 	if (is_sec1 && req_ctx->nbuf) {
1830 		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1831 				       req_ctx->buf[req_ctx->buf_idx],
1832 				       DMA_TO_DEVICE);
1833 	} else {
1834 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1835 					  &desc->ptr[3], sg_count, 0, 0);
1836 		if (sg_count > 1)
1837 			sync_needed = true;
1838 	}
1839 
1840 	/* fifth DWORD empty */
1841 
1842 	/* hash/HMAC out -or- hash context out */
1843 	if (req_ctx->last)
1844 		map_single_talitos_ptr(dev, &desc->ptr[5],
1845 				       crypto_ahash_digestsize(tfm),
1846 				       req_ctx->hw_context, DMA_FROM_DEVICE);
1847 	else
1848 		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1849 					      req_ctx->hw_context_size,
1850 					      req_ctx->hw_context,
1851 					      DMA_FROM_DEVICE);
1852 
1853 	/* last DWORD empty */
1854 
1855 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1856 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1857 
1858 	if (is_sec1 && req_ctx->nbuf && length) {
1859 		struct talitos_desc *desc2 = (struct talitos_desc *)
1860 					     (edesc->buf + edesc->dma_len);
1861 		dma_addr_t next_desc;
1862 
1863 		memset(desc2, 0, sizeof(*desc2));
1864 		desc2->hdr = desc->hdr;
1865 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1866 		desc2->hdr1 = desc2->hdr;
1867 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1868 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1869 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1870 
1871 		if (desc->ptr[1].ptr)
1872 			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1873 					 is_sec1);
1874 		else
1875 			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1876 						      req_ctx->hw_context_size,
1877 						      req_ctx->hw_context,
1878 						      DMA_TO_DEVICE);
1879 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1880 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1881 					  &desc2->ptr[3], sg_count, 0, 0);
1882 		if (sg_count > 1)
1883 			sync_needed = true;
1884 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1885 		if (req_ctx->last)
1886 			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1887 						      req_ctx->hw_context_size,
1888 						      req_ctx->hw_context,
1889 						      DMA_FROM_DEVICE);
1890 
1891 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1892 					   DMA_BIDIRECTIONAL);
1893 		desc->next_desc = cpu_to_be32(next_desc);
1894 	}
1895 
1896 	if (sync_needed)
1897 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1898 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1899 
1900 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1901 	if (ret != -EINPROGRESS) {
1902 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1903 		kfree(edesc);
1904 	}
1905 	return ret;
1906 }
1907 
1908 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1909 					       unsigned int nbytes)
1910 {
1911 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1912 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1913 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1914 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1915 	bool is_sec1 = has_ftr_sec1(priv);
1916 
1917 	if (is_sec1)
1918 		nbytes -= req_ctx->nbuf;
1919 
1920 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1921 				   nbytes, 0, 0, 0, areq->base.flags, false);
1922 }
1923 
1924 static int ahash_init(struct ahash_request *areq)
1925 {
1926 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1927 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1928 	struct device *dev = ctx->dev;
1929 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1930 	unsigned int size;
1931 	dma_addr_t dma;
1932 
1933 	/* Initialize the context */
1934 	req_ctx->buf_idx = 0;
1935 	req_ctx->nbuf = 0;
1936 	req_ctx->first = 1; /* first indicates h/w must init its context */
1937 	req_ctx->swinit = 0; /* assume h/w init of context */
1938 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1939 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1940 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1941 	req_ctx->hw_context_size = size;
1942 
1943 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1944 			     DMA_TO_DEVICE);
1945 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1946 
1947 	return 0;
1948 }
1949 
1950 /*
1951  * on h/w without explicit sha224 support, we initialize h/w context
1952  * manually with sha224 constants, and tell it to run sha256.
1953  */
1954 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1955 {
1956 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1957 
1958 	req_ctx->hw_context[0] = SHA224_H0;
1959 	req_ctx->hw_context[1] = SHA224_H1;
1960 	req_ctx->hw_context[2] = SHA224_H2;
1961 	req_ctx->hw_context[3] = SHA224_H3;
1962 	req_ctx->hw_context[4] = SHA224_H4;
1963 	req_ctx->hw_context[5] = SHA224_H5;
1964 	req_ctx->hw_context[6] = SHA224_H6;
1965 	req_ctx->hw_context[7] = SHA224_H7;
1966 
1967 	/* init 64-bit count */
1968 	req_ctx->hw_context[8] = 0;
1969 	req_ctx->hw_context[9] = 0;
1970 
1971 	ahash_init(areq);
1972 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1973 
1974 	return 0;
1975 }
1976 
1977 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1978 {
1979 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1980 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1981 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1982 	struct talitos_edesc *edesc;
1983 	unsigned int blocksize =
1984 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1985 	unsigned int nbytes_to_hash;
1986 	unsigned int to_hash_later;
1987 	unsigned int nsg;
1988 	int nents;
1989 	struct device *dev = ctx->dev;
1990 	struct talitos_private *priv = dev_get_drvdata(dev);
1991 	bool is_sec1 = has_ftr_sec1(priv);
1992 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1993 
1994 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1995 		/* Buffer up to one whole block */
1996 		nents = sg_nents_for_len(areq->src, nbytes);
1997 		if (nents < 0) {
1998 			dev_err(ctx->dev, "Invalid number of src SG.\n");
1999 			return nents;
2000 		}
2001 		sg_copy_to_buffer(areq->src, nents,
2002 				  ctx_buf + req_ctx->nbuf, nbytes);
2003 		req_ctx->nbuf += nbytes;
2004 		return 0;
2005 	}
2006 
2007 	/* At least (blocksize + 1) bytes are available to hash */
2008 	nbytes_to_hash = nbytes + req_ctx->nbuf;
2009 	to_hash_later = nbytes_to_hash & (blocksize - 1);
2010 
2011 	if (req_ctx->last)
2012 		to_hash_later = 0;
2013 	else if (to_hash_later)
2014 		/* There is a partial block. Hash the full block(s) now */
2015 		nbytes_to_hash -= to_hash_later;
2016 	else {
2017 		/* Keep one block buffered */
2018 		nbytes_to_hash -= blocksize;
2019 		to_hash_later = blocksize;
2020 	}
2021 
2022 	/* Chain in any previously buffered data */
2023 	if (!is_sec1 && req_ctx->nbuf) {
2024 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2025 		sg_init_table(req_ctx->bufsl, nsg);
2026 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2027 		if (nsg > 1)
2028 			sg_chain(req_ctx->bufsl, 2, areq->src);
2029 		req_ctx->psrc = req_ctx->bufsl;
2030 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2031 		int offset;
2032 
2033 		if (nbytes_to_hash > blocksize)
2034 			offset = blocksize - req_ctx->nbuf;
2035 		else
2036 			offset = nbytes_to_hash - req_ctx->nbuf;
2037 		nents = sg_nents_for_len(areq->src, offset);
2038 		if (nents < 0) {
2039 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2040 			return nents;
2041 		}
2042 		sg_copy_to_buffer(areq->src, nents,
2043 				  ctx_buf + req_ctx->nbuf, offset);
2044 		req_ctx->nbuf += offset;
2045 		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2046 						 offset);
2047 	} else
2048 		req_ctx->psrc = areq->src;
2049 
2050 	if (to_hash_later) {
2051 		nents = sg_nents_for_len(areq->src, nbytes);
2052 		if (nents < 0) {
2053 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2054 			return nents;
2055 		}
2056 		sg_pcopy_to_buffer(areq->src, nents,
2057 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2058 				      to_hash_later,
2059 				      nbytes - to_hash_later);
2060 	}
2061 	req_ctx->to_hash_later = to_hash_later;
2062 
2063 	/* Allocate extended descriptor */
2064 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2065 	if (IS_ERR(edesc))
2066 		return PTR_ERR(edesc);
2067 
2068 	edesc->desc.hdr = ctx->desc_hdr_template;
2069 
2070 	/* On last one, request SEC to pad; otherwise continue */
2071 	if (req_ctx->last)
2072 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2073 	else
2074 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2075 
2076 	/* request SEC to INIT hash. */
2077 	if (req_ctx->first && !req_ctx->swinit)
2078 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2079 
2080 	/* When the tfm context has a keylen, it's an HMAC.
2081 	 * A first or last (ie. not middle) descriptor must request HMAC.
2082 	 */
2083 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2084 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2085 
2086 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2087 }
2088 
2089 static int ahash_update(struct ahash_request *areq)
2090 {
2091 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2092 
2093 	req_ctx->last = 0;
2094 
2095 	return ahash_process_req(areq, areq->nbytes);
2096 }
2097 
2098 static int ahash_final(struct ahash_request *areq)
2099 {
2100 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2101 
2102 	req_ctx->last = 1;
2103 
2104 	return ahash_process_req(areq, 0);
2105 }
2106 
2107 static int ahash_finup(struct ahash_request *areq)
2108 {
2109 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2110 
2111 	req_ctx->last = 1;
2112 
2113 	return ahash_process_req(areq, areq->nbytes);
2114 }
2115 
2116 static int ahash_digest(struct ahash_request *areq)
2117 {
2118 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2119 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2120 
2121 	ahash->init(areq);
2122 	req_ctx->last = 1;
2123 
2124 	return ahash_process_req(areq, areq->nbytes);
2125 }
2126 
2127 static int ahash_export(struct ahash_request *areq, void *out)
2128 {
2129 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2130 	struct talitos_export_state *export = out;
2131 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2132 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2133 	struct device *dev = ctx->dev;
2134 	dma_addr_t dma;
2135 
2136 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2137 			     DMA_FROM_DEVICE);
2138 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2139 
2140 	memcpy(export->hw_context, req_ctx->hw_context,
2141 	       req_ctx->hw_context_size);
2142 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2143 	export->swinit = req_ctx->swinit;
2144 	export->first = req_ctx->first;
2145 	export->last = req_ctx->last;
2146 	export->to_hash_later = req_ctx->to_hash_later;
2147 	export->nbuf = req_ctx->nbuf;
2148 
2149 	return 0;
2150 }
2151 
2152 static int ahash_import(struct ahash_request *areq, const void *in)
2153 {
2154 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2155 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2156 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2157 	struct device *dev = ctx->dev;
2158 	const struct talitos_export_state *export = in;
2159 	unsigned int size;
2160 	dma_addr_t dma;
2161 
2162 	memset(req_ctx, 0, sizeof(*req_ctx));
2163 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2164 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2165 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2166 	req_ctx->hw_context_size = size;
2167 	memcpy(req_ctx->hw_context, export->hw_context, size);
2168 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2169 	req_ctx->swinit = export->swinit;
2170 	req_ctx->first = export->first;
2171 	req_ctx->last = export->last;
2172 	req_ctx->to_hash_later = export->to_hash_later;
2173 	req_ctx->nbuf = export->nbuf;
2174 
2175 	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2176 			     DMA_TO_DEVICE);
2177 	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2178 
2179 	return 0;
2180 }
2181 
2182 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2183 		   u8 *hash)
2184 {
2185 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2186 
2187 	struct scatterlist sg[1];
2188 	struct ahash_request *req;
2189 	struct crypto_wait wait;
2190 	int ret;
2191 
2192 	crypto_init_wait(&wait);
2193 
2194 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2195 	if (!req)
2196 		return -ENOMEM;
2197 
2198 	/* Keep tfm keylen == 0 during hash of the long key */
2199 	ctx->keylen = 0;
2200 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2201 				   crypto_req_done, &wait);
2202 
2203 	sg_init_one(&sg[0], key, keylen);
2204 
2205 	ahash_request_set_crypt(req, sg, hash, keylen);
2206 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2207 
2208 	ahash_request_free(req);
2209 
2210 	return ret;
2211 }
2212 
2213 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2214 			unsigned int keylen)
2215 {
2216 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2217 	struct device *dev = ctx->dev;
2218 	unsigned int blocksize =
2219 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2220 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2221 	unsigned int keysize = keylen;
2222 	u8 hash[SHA512_DIGEST_SIZE];
2223 	int ret;
2224 
2225 	if (keylen <= blocksize)
2226 		memcpy(ctx->key, key, keysize);
2227 	else {
2228 		/* Must get the hash of the long key */
2229 		ret = keyhash(tfm, key, keylen, hash);
2230 
2231 		if (ret)
2232 			return -EINVAL;
2233 
2234 		keysize = digestsize;
2235 		memcpy(ctx->key, hash, digestsize);
2236 	}
2237 
2238 	if (ctx->keylen)
2239 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2240 
2241 	ctx->keylen = keysize;
2242 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2243 
2244 	return 0;
2245 }
2246 
2247 
2248 struct talitos_alg_template {
2249 	u32 type;
2250 	u32 priority;
2251 	union {
2252 		struct skcipher_alg skcipher;
2253 		struct ahash_alg hash;
2254 		struct aead_alg aead;
2255 	} alg;
2256 	__be32 desc_hdr_template;
2257 };
2258 
2259 static struct talitos_alg_template driver_algs[] = {
2260 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2261 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2262 		.alg.aead = {
2263 			.base = {
2264 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2265 				.cra_driver_name = "authenc-hmac-sha1-"
2266 						   "cbc-aes-talitos",
2267 				.cra_blocksize = AES_BLOCK_SIZE,
2268 				.cra_flags = CRYPTO_ALG_ASYNC |
2269 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2270 			},
2271 			.ivsize = AES_BLOCK_SIZE,
2272 			.maxauthsize = SHA1_DIGEST_SIZE,
2273 		},
2274 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2275 			             DESC_HDR_SEL0_AESU |
2276 		                     DESC_HDR_MODE0_AESU_CBC |
2277 		                     DESC_HDR_SEL1_MDEUA |
2278 		                     DESC_HDR_MODE1_MDEU_INIT |
2279 		                     DESC_HDR_MODE1_MDEU_PAD |
2280 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2281 	},
2282 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2283 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2284 		.alg.aead = {
2285 			.base = {
2286 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2287 				.cra_driver_name = "authenc-hmac-sha1-"
2288 						   "cbc-aes-talitos-hsna",
2289 				.cra_blocksize = AES_BLOCK_SIZE,
2290 				.cra_flags = CRYPTO_ALG_ASYNC |
2291 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2292 			},
2293 			.ivsize = AES_BLOCK_SIZE,
2294 			.maxauthsize = SHA1_DIGEST_SIZE,
2295 		},
2296 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2297 				     DESC_HDR_SEL0_AESU |
2298 				     DESC_HDR_MODE0_AESU_CBC |
2299 				     DESC_HDR_SEL1_MDEUA |
2300 				     DESC_HDR_MODE1_MDEU_INIT |
2301 				     DESC_HDR_MODE1_MDEU_PAD |
2302 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2303 	},
2304 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2305 		.alg.aead = {
2306 			.base = {
2307 				.cra_name = "authenc(hmac(sha1),"
2308 					    "cbc(des3_ede))",
2309 				.cra_driver_name = "authenc-hmac-sha1-"
2310 						   "cbc-3des-talitos",
2311 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2312 				.cra_flags = CRYPTO_ALG_ASYNC |
2313 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2314 			},
2315 			.ivsize = DES3_EDE_BLOCK_SIZE,
2316 			.maxauthsize = SHA1_DIGEST_SIZE,
2317 			.setkey = aead_des3_setkey,
2318 		},
2319 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2320 			             DESC_HDR_SEL0_DEU |
2321 		                     DESC_HDR_MODE0_DEU_CBC |
2322 		                     DESC_HDR_MODE0_DEU_3DES |
2323 		                     DESC_HDR_SEL1_MDEUA |
2324 		                     DESC_HDR_MODE1_MDEU_INIT |
2325 		                     DESC_HDR_MODE1_MDEU_PAD |
2326 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2327 	},
2328 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2329 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2330 		.alg.aead = {
2331 			.base = {
2332 				.cra_name = "authenc(hmac(sha1),"
2333 					    "cbc(des3_ede))",
2334 				.cra_driver_name = "authenc-hmac-sha1-"
2335 						   "cbc-3des-talitos-hsna",
2336 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2337 				.cra_flags = CRYPTO_ALG_ASYNC |
2338 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2339 			},
2340 			.ivsize = DES3_EDE_BLOCK_SIZE,
2341 			.maxauthsize = SHA1_DIGEST_SIZE,
2342 			.setkey = aead_des3_setkey,
2343 		},
2344 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2345 				     DESC_HDR_SEL0_DEU |
2346 				     DESC_HDR_MODE0_DEU_CBC |
2347 				     DESC_HDR_MODE0_DEU_3DES |
2348 				     DESC_HDR_SEL1_MDEUA |
2349 				     DESC_HDR_MODE1_MDEU_INIT |
2350 				     DESC_HDR_MODE1_MDEU_PAD |
2351 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2352 	},
2353 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2354 		.alg.aead = {
2355 			.base = {
2356 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2357 				.cra_driver_name = "authenc-hmac-sha224-"
2358 						   "cbc-aes-talitos",
2359 				.cra_blocksize = AES_BLOCK_SIZE,
2360 				.cra_flags = CRYPTO_ALG_ASYNC |
2361 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2362 			},
2363 			.ivsize = AES_BLOCK_SIZE,
2364 			.maxauthsize = SHA224_DIGEST_SIZE,
2365 		},
2366 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2367 				     DESC_HDR_SEL0_AESU |
2368 				     DESC_HDR_MODE0_AESU_CBC |
2369 				     DESC_HDR_SEL1_MDEUA |
2370 				     DESC_HDR_MODE1_MDEU_INIT |
2371 				     DESC_HDR_MODE1_MDEU_PAD |
2372 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2373 	},
2374 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2375 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2376 		.alg.aead = {
2377 			.base = {
2378 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2379 				.cra_driver_name = "authenc-hmac-sha224-"
2380 						   "cbc-aes-talitos-hsna",
2381 				.cra_blocksize = AES_BLOCK_SIZE,
2382 				.cra_flags = CRYPTO_ALG_ASYNC |
2383 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2384 			},
2385 			.ivsize = AES_BLOCK_SIZE,
2386 			.maxauthsize = SHA224_DIGEST_SIZE,
2387 		},
2388 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2389 				     DESC_HDR_SEL0_AESU |
2390 				     DESC_HDR_MODE0_AESU_CBC |
2391 				     DESC_HDR_SEL1_MDEUA |
2392 				     DESC_HDR_MODE1_MDEU_INIT |
2393 				     DESC_HDR_MODE1_MDEU_PAD |
2394 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2395 	},
2396 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2397 		.alg.aead = {
2398 			.base = {
2399 				.cra_name = "authenc(hmac(sha224),"
2400 					    "cbc(des3_ede))",
2401 				.cra_driver_name = "authenc-hmac-sha224-"
2402 						   "cbc-3des-talitos",
2403 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2404 				.cra_flags = CRYPTO_ALG_ASYNC |
2405 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2406 			},
2407 			.ivsize = DES3_EDE_BLOCK_SIZE,
2408 			.maxauthsize = SHA224_DIGEST_SIZE,
2409 			.setkey = aead_des3_setkey,
2410 		},
2411 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2412 			             DESC_HDR_SEL0_DEU |
2413 		                     DESC_HDR_MODE0_DEU_CBC |
2414 		                     DESC_HDR_MODE0_DEU_3DES |
2415 		                     DESC_HDR_SEL1_MDEUA |
2416 		                     DESC_HDR_MODE1_MDEU_INIT |
2417 		                     DESC_HDR_MODE1_MDEU_PAD |
2418 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2419 	},
2420 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2421 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2422 		.alg.aead = {
2423 			.base = {
2424 				.cra_name = "authenc(hmac(sha224),"
2425 					    "cbc(des3_ede))",
2426 				.cra_driver_name = "authenc-hmac-sha224-"
2427 						   "cbc-3des-talitos-hsna",
2428 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2429 				.cra_flags = CRYPTO_ALG_ASYNC |
2430 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2431 			},
2432 			.ivsize = DES3_EDE_BLOCK_SIZE,
2433 			.maxauthsize = SHA224_DIGEST_SIZE,
2434 			.setkey = aead_des3_setkey,
2435 		},
2436 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2437 				     DESC_HDR_SEL0_DEU |
2438 				     DESC_HDR_MODE0_DEU_CBC |
2439 				     DESC_HDR_MODE0_DEU_3DES |
2440 				     DESC_HDR_SEL1_MDEUA |
2441 				     DESC_HDR_MODE1_MDEU_INIT |
2442 				     DESC_HDR_MODE1_MDEU_PAD |
2443 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2444 	},
2445 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2446 		.alg.aead = {
2447 			.base = {
2448 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2449 				.cra_driver_name = "authenc-hmac-sha256-"
2450 						   "cbc-aes-talitos",
2451 				.cra_blocksize = AES_BLOCK_SIZE,
2452 				.cra_flags = CRYPTO_ALG_ASYNC |
2453 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2454 			},
2455 			.ivsize = AES_BLOCK_SIZE,
2456 			.maxauthsize = SHA256_DIGEST_SIZE,
2457 		},
2458 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2459 			             DESC_HDR_SEL0_AESU |
2460 		                     DESC_HDR_MODE0_AESU_CBC |
2461 		                     DESC_HDR_SEL1_MDEUA |
2462 		                     DESC_HDR_MODE1_MDEU_INIT |
2463 		                     DESC_HDR_MODE1_MDEU_PAD |
2464 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2465 	},
2466 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2467 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2468 		.alg.aead = {
2469 			.base = {
2470 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2471 				.cra_driver_name = "authenc-hmac-sha256-"
2472 						   "cbc-aes-talitos-hsna",
2473 				.cra_blocksize = AES_BLOCK_SIZE,
2474 				.cra_flags = CRYPTO_ALG_ASYNC |
2475 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2476 			},
2477 			.ivsize = AES_BLOCK_SIZE,
2478 			.maxauthsize = SHA256_DIGEST_SIZE,
2479 		},
2480 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2481 				     DESC_HDR_SEL0_AESU |
2482 				     DESC_HDR_MODE0_AESU_CBC |
2483 				     DESC_HDR_SEL1_MDEUA |
2484 				     DESC_HDR_MODE1_MDEU_INIT |
2485 				     DESC_HDR_MODE1_MDEU_PAD |
2486 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2487 	},
2488 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2489 		.alg.aead = {
2490 			.base = {
2491 				.cra_name = "authenc(hmac(sha256),"
2492 					    "cbc(des3_ede))",
2493 				.cra_driver_name = "authenc-hmac-sha256-"
2494 						   "cbc-3des-talitos",
2495 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2496 				.cra_flags = CRYPTO_ALG_ASYNC |
2497 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2498 			},
2499 			.ivsize = DES3_EDE_BLOCK_SIZE,
2500 			.maxauthsize = SHA256_DIGEST_SIZE,
2501 			.setkey = aead_des3_setkey,
2502 		},
2503 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2504 			             DESC_HDR_SEL0_DEU |
2505 		                     DESC_HDR_MODE0_DEU_CBC |
2506 		                     DESC_HDR_MODE0_DEU_3DES |
2507 		                     DESC_HDR_SEL1_MDEUA |
2508 		                     DESC_HDR_MODE1_MDEU_INIT |
2509 		                     DESC_HDR_MODE1_MDEU_PAD |
2510 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2511 	},
2512 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2513 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2514 		.alg.aead = {
2515 			.base = {
2516 				.cra_name = "authenc(hmac(sha256),"
2517 					    "cbc(des3_ede))",
2518 				.cra_driver_name = "authenc-hmac-sha256-"
2519 						   "cbc-3des-talitos-hsna",
2520 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2521 				.cra_flags = CRYPTO_ALG_ASYNC |
2522 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2523 			},
2524 			.ivsize = DES3_EDE_BLOCK_SIZE,
2525 			.maxauthsize = SHA256_DIGEST_SIZE,
2526 			.setkey = aead_des3_setkey,
2527 		},
2528 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2529 				     DESC_HDR_SEL0_DEU |
2530 				     DESC_HDR_MODE0_DEU_CBC |
2531 				     DESC_HDR_MODE0_DEU_3DES |
2532 				     DESC_HDR_SEL1_MDEUA |
2533 				     DESC_HDR_MODE1_MDEU_INIT |
2534 				     DESC_HDR_MODE1_MDEU_PAD |
2535 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2536 	},
2537 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2538 		.alg.aead = {
2539 			.base = {
2540 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2541 				.cra_driver_name = "authenc-hmac-sha384-"
2542 						   "cbc-aes-talitos",
2543 				.cra_blocksize = AES_BLOCK_SIZE,
2544 				.cra_flags = CRYPTO_ALG_ASYNC |
2545 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2546 			},
2547 			.ivsize = AES_BLOCK_SIZE,
2548 			.maxauthsize = SHA384_DIGEST_SIZE,
2549 		},
2550 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2551 			             DESC_HDR_SEL0_AESU |
2552 		                     DESC_HDR_MODE0_AESU_CBC |
2553 		                     DESC_HDR_SEL1_MDEUB |
2554 		                     DESC_HDR_MODE1_MDEU_INIT |
2555 		                     DESC_HDR_MODE1_MDEU_PAD |
2556 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2557 	},
2558 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2559 		.alg.aead = {
2560 			.base = {
2561 				.cra_name = "authenc(hmac(sha384),"
2562 					    "cbc(des3_ede))",
2563 				.cra_driver_name = "authenc-hmac-sha384-"
2564 						   "cbc-3des-talitos",
2565 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2566 				.cra_flags = CRYPTO_ALG_ASYNC |
2567 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2568 			},
2569 			.ivsize = DES3_EDE_BLOCK_SIZE,
2570 			.maxauthsize = SHA384_DIGEST_SIZE,
2571 			.setkey = aead_des3_setkey,
2572 		},
2573 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2574 			             DESC_HDR_SEL0_DEU |
2575 		                     DESC_HDR_MODE0_DEU_CBC |
2576 		                     DESC_HDR_MODE0_DEU_3DES |
2577 		                     DESC_HDR_SEL1_MDEUB |
2578 		                     DESC_HDR_MODE1_MDEU_INIT |
2579 		                     DESC_HDR_MODE1_MDEU_PAD |
2580 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2581 	},
2582 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2583 		.alg.aead = {
2584 			.base = {
2585 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2586 				.cra_driver_name = "authenc-hmac-sha512-"
2587 						   "cbc-aes-talitos",
2588 				.cra_blocksize = AES_BLOCK_SIZE,
2589 				.cra_flags = CRYPTO_ALG_ASYNC |
2590 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2591 			},
2592 			.ivsize = AES_BLOCK_SIZE,
2593 			.maxauthsize = SHA512_DIGEST_SIZE,
2594 		},
2595 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2596 			             DESC_HDR_SEL0_AESU |
2597 		                     DESC_HDR_MODE0_AESU_CBC |
2598 		                     DESC_HDR_SEL1_MDEUB |
2599 		                     DESC_HDR_MODE1_MDEU_INIT |
2600 		                     DESC_HDR_MODE1_MDEU_PAD |
2601 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2602 	},
2603 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2604 		.alg.aead = {
2605 			.base = {
2606 				.cra_name = "authenc(hmac(sha512),"
2607 					    "cbc(des3_ede))",
2608 				.cra_driver_name = "authenc-hmac-sha512-"
2609 						   "cbc-3des-talitos",
2610 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2611 				.cra_flags = CRYPTO_ALG_ASYNC |
2612 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2613 			},
2614 			.ivsize = DES3_EDE_BLOCK_SIZE,
2615 			.maxauthsize = SHA512_DIGEST_SIZE,
2616 			.setkey = aead_des3_setkey,
2617 		},
2618 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2619 			             DESC_HDR_SEL0_DEU |
2620 		                     DESC_HDR_MODE0_DEU_CBC |
2621 		                     DESC_HDR_MODE0_DEU_3DES |
2622 		                     DESC_HDR_SEL1_MDEUB |
2623 		                     DESC_HDR_MODE1_MDEU_INIT |
2624 		                     DESC_HDR_MODE1_MDEU_PAD |
2625 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2626 	},
2627 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2628 		.alg.aead = {
2629 			.base = {
2630 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2631 				.cra_driver_name = "authenc-hmac-md5-"
2632 						   "cbc-aes-talitos",
2633 				.cra_blocksize = AES_BLOCK_SIZE,
2634 				.cra_flags = CRYPTO_ALG_ASYNC |
2635 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2636 			},
2637 			.ivsize = AES_BLOCK_SIZE,
2638 			.maxauthsize = MD5_DIGEST_SIZE,
2639 		},
2640 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2641 			             DESC_HDR_SEL0_AESU |
2642 		                     DESC_HDR_MODE0_AESU_CBC |
2643 		                     DESC_HDR_SEL1_MDEUA |
2644 		                     DESC_HDR_MODE1_MDEU_INIT |
2645 		                     DESC_HDR_MODE1_MDEU_PAD |
2646 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2647 	},
2648 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2649 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2650 		.alg.aead = {
2651 			.base = {
2652 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2653 				.cra_driver_name = "authenc-hmac-md5-"
2654 						   "cbc-aes-talitos-hsna",
2655 				.cra_blocksize = AES_BLOCK_SIZE,
2656 				.cra_flags = CRYPTO_ALG_ASYNC |
2657 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2658 			},
2659 			.ivsize = AES_BLOCK_SIZE,
2660 			.maxauthsize = MD5_DIGEST_SIZE,
2661 		},
2662 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2663 				     DESC_HDR_SEL0_AESU |
2664 				     DESC_HDR_MODE0_AESU_CBC |
2665 				     DESC_HDR_SEL1_MDEUA |
2666 				     DESC_HDR_MODE1_MDEU_INIT |
2667 				     DESC_HDR_MODE1_MDEU_PAD |
2668 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2669 	},
2670 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2671 		.alg.aead = {
2672 			.base = {
2673 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2674 				.cra_driver_name = "authenc-hmac-md5-"
2675 						   "cbc-3des-talitos",
2676 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2677 				.cra_flags = CRYPTO_ALG_ASYNC |
2678 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2679 			},
2680 			.ivsize = DES3_EDE_BLOCK_SIZE,
2681 			.maxauthsize = MD5_DIGEST_SIZE,
2682 			.setkey = aead_des3_setkey,
2683 		},
2684 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2685 			             DESC_HDR_SEL0_DEU |
2686 		                     DESC_HDR_MODE0_DEU_CBC |
2687 		                     DESC_HDR_MODE0_DEU_3DES |
2688 		                     DESC_HDR_SEL1_MDEUA |
2689 		                     DESC_HDR_MODE1_MDEU_INIT |
2690 		                     DESC_HDR_MODE1_MDEU_PAD |
2691 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2692 	},
2693 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2694 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2695 		.alg.aead = {
2696 			.base = {
2697 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2698 				.cra_driver_name = "authenc-hmac-md5-"
2699 						   "cbc-3des-talitos-hsna",
2700 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2701 				.cra_flags = CRYPTO_ALG_ASYNC |
2702 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2703 			},
2704 			.ivsize = DES3_EDE_BLOCK_SIZE,
2705 			.maxauthsize = MD5_DIGEST_SIZE,
2706 			.setkey = aead_des3_setkey,
2707 		},
2708 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2709 				     DESC_HDR_SEL0_DEU |
2710 				     DESC_HDR_MODE0_DEU_CBC |
2711 				     DESC_HDR_MODE0_DEU_3DES |
2712 				     DESC_HDR_SEL1_MDEUA |
2713 				     DESC_HDR_MODE1_MDEU_INIT |
2714 				     DESC_HDR_MODE1_MDEU_PAD |
2715 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2716 	},
2717 	/* SKCIPHER algorithms. */
2718 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2719 		.alg.skcipher = {
2720 			.base.cra_name = "ecb(aes)",
2721 			.base.cra_driver_name = "ecb-aes-talitos",
2722 			.base.cra_blocksize = AES_BLOCK_SIZE,
2723 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2724 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2725 			.min_keysize = AES_MIN_KEY_SIZE,
2726 			.max_keysize = AES_MAX_KEY_SIZE,
2727 			.setkey = skcipher_aes_setkey,
2728 		},
2729 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2730 				     DESC_HDR_SEL0_AESU,
2731 	},
2732 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2733 		.alg.skcipher = {
2734 			.base.cra_name = "cbc(aes)",
2735 			.base.cra_driver_name = "cbc-aes-talitos",
2736 			.base.cra_blocksize = AES_BLOCK_SIZE,
2737 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2738 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2739 			.min_keysize = AES_MIN_KEY_SIZE,
2740 			.max_keysize = AES_MAX_KEY_SIZE,
2741 			.ivsize = AES_BLOCK_SIZE,
2742 			.setkey = skcipher_aes_setkey,
2743 		},
2744 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2745 				     DESC_HDR_SEL0_AESU |
2746 				     DESC_HDR_MODE0_AESU_CBC,
2747 	},
2748 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2749 		.alg.skcipher = {
2750 			.base.cra_name = "ctr(aes)",
2751 			.base.cra_driver_name = "ctr-aes-talitos",
2752 			.base.cra_blocksize = 1,
2753 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2754 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2755 			.min_keysize = AES_MIN_KEY_SIZE,
2756 			.max_keysize = AES_MAX_KEY_SIZE,
2757 			.ivsize = AES_BLOCK_SIZE,
2758 			.setkey = skcipher_aes_setkey,
2759 		},
2760 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2761 				     DESC_HDR_SEL0_AESU |
2762 				     DESC_HDR_MODE0_AESU_CTR,
2763 	},
2764 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2765 		.alg.skcipher = {
2766 			.base.cra_name = "ecb(des)",
2767 			.base.cra_driver_name = "ecb-des-talitos",
2768 			.base.cra_blocksize = DES_BLOCK_SIZE,
2769 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2770 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2771 			.min_keysize = DES_KEY_SIZE,
2772 			.max_keysize = DES_KEY_SIZE,
2773 			.setkey = skcipher_des_setkey,
2774 		},
2775 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2776 				     DESC_HDR_SEL0_DEU,
2777 	},
2778 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2779 		.alg.skcipher = {
2780 			.base.cra_name = "cbc(des)",
2781 			.base.cra_driver_name = "cbc-des-talitos",
2782 			.base.cra_blocksize = DES_BLOCK_SIZE,
2783 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2784 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2785 			.min_keysize = DES_KEY_SIZE,
2786 			.max_keysize = DES_KEY_SIZE,
2787 			.ivsize = DES_BLOCK_SIZE,
2788 			.setkey = skcipher_des_setkey,
2789 		},
2790 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2791 				     DESC_HDR_SEL0_DEU |
2792 				     DESC_HDR_MODE0_DEU_CBC,
2793 	},
2794 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2795 		.alg.skcipher = {
2796 			.base.cra_name = "ecb(des3_ede)",
2797 			.base.cra_driver_name = "ecb-3des-talitos",
2798 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2799 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2800 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2801 			.min_keysize = DES3_EDE_KEY_SIZE,
2802 			.max_keysize = DES3_EDE_KEY_SIZE,
2803 			.setkey = skcipher_des3_setkey,
2804 		},
2805 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2806 				     DESC_HDR_SEL0_DEU |
2807 				     DESC_HDR_MODE0_DEU_3DES,
2808 	},
2809 	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2810 		.alg.skcipher = {
2811 			.base.cra_name = "cbc(des3_ede)",
2812 			.base.cra_driver_name = "cbc-3des-talitos",
2813 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2814 			.base.cra_flags = CRYPTO_ALG_ASYNC |
2815 					  CRYPTO_ALG_ALLOCATES_MEMORY,
2816 			.min_keysize = DES3_EDE_KEY_SIZE,
2817 			.max_keysize = DES3_EDE_KEY_SIZE,
2818 			.ivsize = DES3_EDE_BLOCK_SIZE,
2819 			.setkey = skcipher_des3_setkey,
2820 		},
2821 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2822 			             DESC_HDR_SEL0_DEU |
2823 		                     DESC_HDR_MODE0_DEU_CBC |
2824 		                     DESC_HDR_MODE0_DEU_3DES,
2825 	},
2826 	/* AHASH algorithms. */
2827 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2828 		.alg.hash = {
2829 			.halg.digestsize = MD5_DIGEST_SIZE,
2830 			.halg.statesize = sizeof(struct talitos_export_state),
2831 			.halg.base = {
2832 				.cra_name = "md5",
2833 				.cra_driver_name = "md5-talitos",
2834 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2835 				.cra_flags = CRYPTO_ALG_ASYNC |
2836 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2837 			}
2838 		},
2839 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2840 				     DESC_HDR_SEL0_MDEUA |
2841 				     DESC_HDR_MODE0_MDEU_MD5,
2842 	},
2843 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2844 		.alg.hash = {
2845 			.halg.digestsize = SHA1_DIGEST_SIZE,
2846 			.halg.statesize = sizeof(struct talitos_export_state),
2847 			.halg.base = {
2848 				.cra_name = "sha1",
2849 				.cra_driver_name = "sha1-talitos",
2850 				.cra_blocksize = SHA1_BLOCK_SIZE,
2851 				.cra_flags = CRYPTO_ALG_ASYNC |
2852 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2853 			}
2854 		},
2855 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2856 				     DESC_HDR_SEL0_MDEUA |
2857 				     DESC_HDR_MODE0_MDEU_SHA1,
2858 	},
2859 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2860 		.alg.hash = {
2861 			.halg.digestsize = SHA224_DIGEST_SIZE,
2862 			.halg.statesize = sizeof(struct talitos_export_state),
2863 			.halg.base = {
2864 				.cra_name = "sha224",
2865 				.cra_driver_name = "sha224-talitos",
2866 				.cra_blocksize = SHA224_BLOCK_SIZE,
2867 				.cra_flags = CRYPTO_ALG_ASYNC |
2868 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2869 			}
2870 		},
2871 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2872 				     DESC_HDR_SEL0_MDEUA |
2873 				     DESC_HDR_MODE0_MDEU_SHA224,
2874 	},
2875 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2876 		.alg.hash = {
2877 			.halg.digestsize = SHA256_DIGEST_SIZE,
2878 			.halg.statesize = sizeof(struct talitos_export_state),
2879 			.halg.base = {
2880 				.cra_name = "sha256",
2881 				.cra_driver_name = "sha256-talitos",
2882 				.cra_blocksize = SHA256_BLOCK_SIZE,
2883 				.cra_flags = CRYPTO_ALG_ASYNC |
2884 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2885 			}
2886 		},
2887 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2888 				     DESC_HDR_SEL0_MDEUA |
2889 				     DESC_HDR_MODE0_MDEU_SHA256,
2890 	},
2891 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2892 		.alg.hash = {
2893 			.halg.digestsize = SHA384_DIGEST_SIZE,
2894 			.halg.statesize = sizeof(struct talitos_export_state),
2895 			.halg.base = {
2896 				.cra_name = "sha384",
2897 				.cra_driver_name = "sha384-talitos",
2898 				.cra_blocksize = SHA384_BLOCK_SIZE,
2899 				.cra_flags = CRYPTO_ALG_ASYNC |
2900 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2901 			}
2902 		},
2903 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2904 				     DESC_HDR_SEL0_MDEUB |
2905 				     DESC_HDR_MODE0_MDEUB_SHA384,
2906 	},
2907 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2908 		.alg.hash = {
2909 			.halg.digestsize = SHA512_DIGEST_SIZE,
2910 			.halg.statesize = sizeof(struct talitos_export_state),
2911 			.halg.base = {
2912 				.cra_name = "sha512",
2913 				.cra_driver_name = "sha512-talitos",
2914 				.cra_blocksize = SHA512_BLOCK_SIZE,
2915 				.cra_flags = CRYPTO_ALG_ASYNC |
2916 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2917 			}
2918 		},
2919 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2920 				     DESC_HDR_SEL0_MDEUB |
2921 				     DESC_HDR_MODE0_MDEUB_SHA512,
2922 	},
2923 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2924 		.alg.hash = {
2925 			.halg.digestsize = MD5_DIGEST_SIZE,
2926 			.halg.statesize = sizeof(struct talitos_export_state),
2927 			.halg.base = {
2928 				.cra_name = "hmac(md5)",
2929 				.cra_driver_name = "hmac-md5-talitos",
2930 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2931 				.cra_flags = CRYPTO_ALG_ASYNC |
2932 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2933 			}
2934 		},
2935 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2936 				     DESC_HDR_SEL0_MDEUA |
2937 				     DESC_HDR_MODE0_MDEU_MD5,
2938 	},
2939 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2940 		.alg.hash = {
2941 			.halg.digestsize = SHA1_DIGEST_SIZE,
2942 			.halg.statesize = sizeof(struct talitos_export_state),
2943 			.halg.base = {
2944 				.cra_name = "hmac(sha1)",
2945 				.cra_driver_name = "hmac-sha1-talitos",
2946 				.cra_blocksize = SHA1_BLOCK_SIZE,
2947 				.cra_flags = CRYPTO_ALG_ASYNC |
2948 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2949 			}
2950 		},
2951 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2952 				     DESC_HDR_SEL0_MDEUA |
2953 				     DESC_HDR_MODE0_MDEU_SHA1,
2954 	},
2955 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2956 		.alg.hash = {
2957 			.halg.digestsize = SHA224_DIGEST_SIZE,
2958 			.halg.statesize = sizeof(struct talitos_export_state),
2959 			.halg.base = {
2960 				.cra_name = "hmac(sha224)",
2961 				.cra_driver_name = "hmac-sha224-talitos",
2962 				.cra_blocksize = SHA224_BLOCK_SIZE,
2963 				.cra_flags = CRYPTO_ALG_ASYNC |
2964 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2965 			}
2966 		},
2967 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2968 				     DESC_HDR_SEL0_MDEUA |
2969 				     DESC_HDR_MODE0_MDEU_SHA224,
2970 	},
2971 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2972 		.alg.hash = {
2973 			.halg.digestsize = SHA256_DIGEST_SIZE,
2974 			.halg.statesize = sizeof(struct talitos_export_state),
2975 			.halg.base = {
2976 				.cra_name = "hmac(sha256)",
2977 				.cra_driver_name = "hmac-sha256-talitos",
2978 				.cra_blocksize = SHA256_BLOCK_SIZE,
2979 				.cra_flags = CRYPTO_ALG_ASYNC |
2980 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2981 			}
2982 		},
2983 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2984 				     DESC_HDR_SEL0_MDEUA |
2985 				     DESC_HDR_MODE0_MDEU_SHA256,
2986 	},
2987 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2988 		.alg.hash = {
2989 			.halg.digestsize = SHA384_DIGEST_SIZE,
2990 			.halg.statesize = sizeof(struct talitos_export_state),
2991 			.halg.base = {
2992 				.cra_name = "hmac(sha384)",
2993 				.cra_driver_name = "hmac-sha384-talitos",
2994 				.cra_blocksize = SHA384_BLOCK_SIZE,
2995 				.cra_flags = CRYPTO_ALG_ASYNC |
2996 					     CRYPTO_ALG_ALLOCATES_MEMORY,
2997 			}
2998 		},
2999 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3000 				     DESC_HDR_SEL0_MDEUB |
3001 				     DESC_HDR_MODE0_MDEUB_SHA384,
3002 	},
3003 	{	.type = CRYPTO_ALG_TYPE_AHASH,
3004 		.alg.hash = {
3005 			.halg.digestsize = SHA512_DIGEST_SIZE,
3006 			.halg.statesize = sizeof(struct talitos_export_state),
3007 			.halg.base = {
3008 				.cra_name = "hmac(sha512)",
3009 				.cra_driver_name = "hmac-sha512-talitos",
3010 				.cra_blocksize = SHA512_BLOCK_SIZE,
3011 				.cra_flags = CRYPTO_ALG_ASYNC |
3012 					     CRYPTO_ALG_ALLOCATES_MEMORY,
3013 			}
3014 		},
3015 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3016 				     DESC_HDR_SEL0_MDEUB |
3017 				     DESC_HDR_MODE0_MDEUB_SHA512,
3018 	}
3019 };
3020 
3021 struct talitos_crypto_alg {
3022 	struct list_head entry;
3023 	struct device *dev;
3024 	struct talitos_alg_template algt;
3025 };
3026 
3027 static int talitos_init_common(struct talitos_ctx *ctx,
3028 			       struct talitos_crypto_alg *talitos_alg)
3029 {
3030 	struct talitos_private *priv;
3031 
3032 	/* update context with ptr to dev */
3033 	ctx->dev = talitos_alg->dev;
3034 
3035 	/* assign SEC channel to tfm in round-robin fashion */
3036 	priv = dev_get_drvdata(ctx->dev);
3037 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3038 		  (priv->num_channels - 1);
3039 
3040 	/* copy descriptor header template value */
3041 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3042 
3043 	/* select done notification */
3044 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3045 
3046 	return 0;
3047 }
3048 
3049 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3050 {
3051 	struct aead_alg *alg = crypto_aead_alg(tfm);
3052 	struct talitos_crypto_alg *talitos_alg;
3053 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3054 
3055 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3056 				   algt.alg.aead);
3057 
3058 	return talitos_init_common(ctx, talitos_alg);
3059 }
3060 
3061 static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3062 {
3063 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3064 	struct talitos_crypto_alg *talitos_alg;
3065 	struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3066 
3067 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3068 				   algt.alg.skcipher);
3069 
3070 	return talitos_init_common(ctx, talitos_alg);
3071 }
3072 
3073 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3074 {
3075 	struct crypto_alg *alg = tfm->__crt_alg;
3076 	struct talitos_crypto_alg *talitos_alg;
3077 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3078 
3079 	talitos_alg = container_of(__crypto_ahash_alg(alg),
3080 				   struct talitos_crypto_alg,
3081 				   algt.alg.hash);
3082 
3083 	ctx->keylen = 0;
3084 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3085 				 sizeof(struct talitos_ahash_req_ctx));
3086 
3087 	return talitos_init_common(ctx, talitos_alg);
3088 }
3089 
3090 static void talitos_cra_exit(struct crypto_tfm *tfm)
3091 {
3092 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3093 	struct device *dev = ctx->dev;
3094 
3095 	if (ctx->keylen)
3096 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3097 }
3098 
3099 /*
3100  * given the alg's descriptor header template, determine whether descriptor
3101  * type and primary/secondary execution units required match the hw
3102  * capabilities description provided in the device tree node.
3103  */
3104 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3105 {
3106 	struct talitos_private *priv = dev_get_drvdata(dev);
3107 	int ret;
3108 
3109 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3110 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3111 
3112 	if (SECONDARY_EU(desc_hdr_template))
3113 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3114 		              & priv->exec_units);
3115 
3116 	return ret;
3117 }
3118 
3119 static int talitos_remove(struct platform_device *ofdev)
3120 {
3121 	struct device *dev = &ofdev->dev;
3122 	struct talitos_private *priv = dev_get_drvdata(dev);
3123 	struct talitos_crypto_alg *t_alg, *n;
3124 	int i;
3125 
3126 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3127 		switch (t_alg->algt.type) {
3128 		case CRYPTO_ALG_TYPE_SKCIPHER:
3129 			crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3130 			break;
3131 		case CRYPTO_ALG_TYPE_AEAD:
3132 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3133 			break;
3134 		case CRYPTO_ALG_TYPE_AHASH:
3135 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3136 			break;
3137 		}
3138 		list_del(&t_alg->entry);
3139 	}
3140 
3141 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3142 		talitos_unregister_rng(dev);
3143 
3144 	for (i = 0; i < 2; i++)
3145 		if (priv->irq[i]) {
3146 			free_irq(priv->irq[i], dev);
3147 			irq_dispose_mapping(priv->irq[i]);
3148 		}
3149 
3150 	tasklet_kill(&priv->done_task[0]);
3151 	if (priv->irq[1])
3152 		tasklet_kill(&priv->done_task[1]);
3153 
3154 	return 0;
3155 }
3156 
3157 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3158 						    struct talitos_alg_template
3159 						           *template)
3160 {
3161 	struct talitos_private *priv = dev_get_drvdata(dev);
3162 	struct talitos_crypto_alg *t_alg;
3163 	struct crypto_alg *alg;
3164 
3165 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3166 			     GFP_KERNEL);
3167 	if (!t_alg)
3168 		return ERR_PTR(-ENOMEM);
3169 
3170 	t_alg->algt = *template;
3171 
3172 	switch (t_alg->algt.type) {
3173 	case CRYPTO_ALG_TYPE_SKCIPHER:
3174 		alg = &t_alg->algt.alg.skcipher.base;
3175 		alg->cra_exit = talitos_cra_exit;
3176 		t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3177 		t_alg->algt.alg.skcipher.setkey =
3178 			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3179 		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3180 		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3181 		break;
3182 	case CRYPTO_ALG_TYPE_AEAD:
3183 		alg = &t_alg->algt.alg.aead.base;
3184 		alg->cra_exit = talitos_cra_exit;
3185 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3186 		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3187 					      aead_setkey;
3188 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3189 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3190 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3191 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3192 			devm_kfree(dev, t_alg);
3193 			return ERR_PTR(-ENOTSUPP);
3194 		}
3195 		break;
3196 	case CRYPTO_ALG_TYPE_AHASH:
3197 		alg = &t_alg->algt.alg.hash.halg.base;
3198 		alg->cra_init = talitos_cra_init_ahash;
3199 		alg->cra_exit = talitos_cra_exit;
3200 		t_alg->algt.alg.hash.init = ahash_init;
3201 		t_alg->algt.alg.hash.update = ahash_update;
3202 		t_alg->algt.alg.hash.final = ahash_final;
3203 		t_alg->algt.alg.hash.finup = ahash_finup;
3204 		t_alg->algt.alg.hash.digest = ahash_digest;
3205 		if (!strncmp(alg->cra_name, "hmac", 4))
3206 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3207 		t_alg->algt.alg.hash.import = ahash_import;
3208 		t_alg->algt.alg.hash.export = ahash_export;
3209 
3210 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3211 		    !strncmp(alg->cra_name, "hmac", 4)) {
3212 			devm_kfree(dev, t_alg);
3213 			return ERR_PTR(-ENOTSUPP);
3214 		}
3215 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216 		    (!strcmp(alg->cra_name, "sha224") ||
3217 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3218 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3219 			t_alg->algt.desc_hdr_template =
3220 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3221 					DESC_HDR_SEL0_MDEUA |
3222 					DESC_HDR_MODE0_MDEU_SHA256;
3223 		}
3224 		break;
3225 	default:
3226 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3227 		devm_kfree(dev, t_alg);
3228 		return ERR_PTR(-EINVAL);
3229 	}
3230 
3231 	alg->cra_module = THIS_MODULE;
3232 	if (t_alg->algt.priority)
3233 		alg->cra_priority = t_alg->algt.priority;
3234 	else
3235 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3236 	if (has_ftr_sec1(priv))
3237 		alg->cra_alignmask = 3;
3238 	else
3239 		alg->cra_alignmask = 0;
3240 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3241 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3242 
3243 	t_alg->dev = dev;
3244 
3245 	return t_alg;
3246 }
3247 
3248 static int talitos_probe_irq(struct platform_device *ofdev)
3249 {
3250 	struct device *dev = &ofdev->dev;
3251 	struct device_node *np = ofdev->dev.of_node;
3252 	struct talitos_private *priv = dev_get_drvdata(dev);
3253 	int err;
3254 	bool is_sec1 = has_ftr_sec1(priv);
3255 
3256 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3257 	if (!priv->irq[0]) {
3258 		dev_err(dev, "failed to map irq\n");
3259 		return -EINVAL;
3260 	}
3261 	if (is_sec1) {
3262 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3263 				  dev_driver_string(dev), dev);
3264 		goto primary_out;
3265 	}
3266 
3267 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3268 
3269 	/* get the primary irq line */
3270 	if (!priv->irq[1]) {
3271 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3272 				  dev_driver_string(dev), dev);
3273 		goto primary_out;
3274 	}
3275 
3276 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3277 			  dev_driver_string(dev), dev);
3278 	if (err)
3279 		goto primary_out;
3280 
3281 	/* get the secondary irq line */
3282 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3283 			  dev_driver_string(dev), dev);
3284 	if (err) {
3285 		dev_err(dev, "failed to request secondary irq\n");
3286 		irq_dispose_mapping(priv->irq[1]);
3287 		priv->irq[1] = 0;
3288 	}
3289 
3290 	return err;
3291 
3292 primary_out:
3293 	if (err) {
3294 		dev_err(dev, "failed to request primary irq\n");
3295 		irq_dispose_mapping(priv->irq[0]);
3296 		priv->irq[0] = 0;
3297 	}
3298 
3299 	return err;
3300 }
3301 
3302 static int talitos_probe(struct platform_device *ofdev)
3303 {
3304 	struct device *dev = &ofdev->dev;
3305 	struct device_node *np = ofdev->dev.of_node;
3306 	struct talitos_private *priv;
3307 	int i, err;
3308 	int stride;
3309 	struct resource *res;
3310 
3311 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3312 	if (!priv)
3313 		return -ENOMEM;
3314 
3315 	INIT_LIST_HEAD(&priv->alg_list);
3316 
3317 	dev_set_drvdata(dev, priv);
3318 
3319 	priv->ofdev = ofdev;
3320 
3321 	spin_lock_init(&priv->reg_lock);
3322 
3323 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3324 	if (!res)
3325 		return -ENXIO;
3326 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3327 	if (!priv->reg) {
3328 		dev_err(dev, "failed to of_iomap\n");
3329 		err = -ENOMEM;
3330 		goto err_out;
3331 	}
3332 
3333 	/* get SEC version capabilities from device tree */
3334 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3335 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3336 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3337 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3338 			     &priv->desc_types);
3339 
3340 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3341 	    !priv->exec_units || !priv->desc_types) {
3342 		dev_err(dev, "invalid property data in device tree node\n");
3343 		err = -EINVAL;
3344 		goto err_out;
3345 	}
3346 
3347 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3348 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3349 
3350 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3351 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3352 				  TALITOS_FTR_SHA224_HWINIT |
3353 				  TALITOS_FTR_HMAC_OK;
3354 
3355 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3356 		priv->features |= TALITOS_FTR_SEC1;
3357 
3358 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3359 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3360 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3361 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3362 		stride = TALITOS1_CH_STRIDE;
3363 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3364 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3365 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3366 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3367 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3368 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3369 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3370 		stride = TALITOS1_CH_STRIDE;
3371 	} else {
3372 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3373 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3374 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3375 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3376 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3377 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3378 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3379 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3380 		stride = TALITOS2_CH_STRIDE;
3381 	}
3382 
3383 	err = talitos_probe_irq(ofdev);
3384 	if (err)
3385 		goto err_out;
3386 
3387 	if (has_ftr_sec1(priv)) {
3388 		if (priv->num_channels == 1)
3389 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3390 				     (unsigned long)dev);
3391 		else
3392 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3393 				     (unsigned long)dev);
3394 	} else {
3395 		if (priv->irq[1]) {
3396 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3397 				     (unsigned long)dev);
3398 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3399 				     (unsigned long)dev);
3400 		} else if (priv->num_channels == 1) {
3401 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3402 				     (unsigned long)dev);
3403 		} else {
3404 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3405 				     (unsigned long)dev);
3406 		}
3407 	}
3408 
3409 	priv->chan = devm_kcalloc(dev,
3410 				  priv->num_channels,
3411 				  sizeof(struct talitos_channel),
3412 				  GFP_KERNEL);
3413 	if (!priv->chan) {
3414 		dev_err(dev, "failed to allocate channel management space\n");
3415 		err = -ENOMEM;
3416 		goto err_out;
3417 	}
3418 
3419 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3420 
3421 	for (i = 0; i < priv->num_channels; i++) {
3422 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3423 		if (!priv->irq[1] || !(i & 1))
3424 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3425 
3426 		spin_lock_init(&priv->chan[i].head_lock);
3427 		spin_lock_init(&priv->chan[i].tail_lock);
3428 
3429 		priv->chan[i].fifo = devm_kcalloc(dev,
3430 						priv->fifo_len,
3431 						sizeof(struct talitos_request),
3432 						GFP_KERNEL);
3433 		if (!priv->chan[i].fifo) {
3434 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3435 			err = -ENOMEM;
3436 			goto err_out;
3437 		}
3438 
3439 		atomic_set(&priv->chan[i].submit_count,
3440 			   -(priv->chfifo_len - 1));
3441 	}
3442 
3443 	dma_set_mask(dev, DMA_BIT_MASK(36));
3444 
3445 	/* reset and initialize the h/w */
3446 	err = init_device(dev);
3447 	if (err) {
3448 		dev_err(dev, "failed to initialize device\n");
3449 		goto err_out;
3450 	}
3451 
3452 	/* register the RNG, if available */
3453 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3454 		err = talitos_register_rng(dev);
3455 		if (err) {
3456 			dev_err(dev, "failed to register hwrng: %d\n", err);
3457 			goto err_out;
3458 		} else
3459 			dev_info(dev, "hwrng\n");
3460 	}
3461 
3462 	/* register crypto algorithms the device supports */
3463 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3464 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3465 			struct talitos_crypto_alg *t_alg;
3466 			struct crypto_alg *alg = NULL;
3467 
3468 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3469 			if (IS_ERR(t_alg)) {
3470 				err = PTR_ERR(t_alg);
3471 				if (err == -ENOTSUPP)
3472 					continue;
3473 				goto err_out;
3474 			}
3475 
3476 			switch (t_alg->algt.type) {
3477 			case CRYPTO_ALG_TYPE_SKCIPHER:
3478 				err = crypto_register_skcipher(
3479 						&t_alg->algt.alg.skcipher);
3480 				alg = &t_alg->algt.alg.skcipher.base;
3481 				break;
3482 
3483 			case CRYPTO_ALG_TYPE_AEAD:
3484 				err = crypto_register_aead(
3485 					&t_alg->algt.alg.aead);
3486 				alg = &t_alg->algt.alg.aead.base;
3487 				break;
3488 
3489 			case CRYPTO_ALG_TYPE_AHASH:
3490 				err = crypto_register_ahash(
3491 						&t_alg->algt.alg.hash);
3492 				alg = &t_alg->algt.alg.hash.halg.base;
3493 				break;
3494 			}
3495 			if (err) {
3496 				dev_err(dev, "%s alg registration failed\n",
3497 					alg->cra_driver_name);
3498 				devm_kfree(dev, t_alg);
3499 			} else
3500 				list_add_tail(&t_alg->entry, &priv->alg_list);
3501 		}
3502 	}
3503 	if (!list_empty(&priv->alg_list))
3504 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3505 			 (char *)of_get_property(np, "compatible", NULL));
3506 
3507 	return 0;
3508 
3509 err_out:
3510 	talitos_remove(ofdev);
3511 
3512 	return err;
3513 }
3514 
3515 static const struct of_device_id talitos_match[] = {
3516 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3517 	{
3518 		.compatible = "fsl,sec1.0",
3519 	},
3520 #endif
3521 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3522 	{
3523 		.compatible = "fsl,sec2.0",
3524 	},
3525 #endif
3526 	{},
3527 };
3528 MODULE_DEVICE_TABLE(of, talitos_match);
3529 
3530 static struct platform_driver talitos_driver = {
3531 	.driver = {
3532 		.name = "talitos",
3533 		.of_match_table = talitos_match,
3534 	},
3535 	.probe = talitos_probe,
3536 	.remove = talitos_remove,
3537 };
3538 
3539 module_platform_driver(talitos_driver);
3540 
3541 MODULE_LICENSE("GPL");
3542 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3543 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3544