xref: /openbmc/linux/drivers/crypto/talitos.c (revision e3b9f1e8)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43 
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55 
56 #include "talitos.h"
57 
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 			   unsigned int len, bool is_sec1)
60 {
61 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 	if (is_sec1) {
63 		ptr->len1 = cpu_to_be16(len);
64 	} else {
65 		ptr->len = cpu_to_be16(len);
66 		ptr->eptr = upper_32_bits(dma_addr);
67 	}
68 }
69 
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 			     struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73 	dst_ptr->ptr = src_ptr->ptr;
74 	if (is_sec1) {
75 		dst_ptr->len1 = src_ptr->len1;
76 	} else {
77 		dst_ptr->len = src_ptr->len;
78 		dst_ptr->eptr = src_ptr->eptr;
79 	}
80 }
81 
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 					   bool is_sec1)
84 {
85 	if (is_sec1)
86 		return be16_to_cpu(ptr->len1);
87 	else
88 		return be16_to_cpu(ptr->len);
89 }
90 
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 				   bool is_sec1)
93 {
94 	if (!is_sec1)
95 		ptr->j_extent = val;
96 }
97 
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100 	if (!is_sec1)
101 		ptr->j_extent |= val;
102 }
103 
104 /*
105  * map virtual single (contiguous) pointer to h/w descriptor pointer
106  */
107 static void map_single_talitos_ptr(struct device *dev,
108 				   struct talitos_ptr *ptr,
109 				   unsigned int len, void *data,
110 				   enum dma_data_direction dir)
111 {
112 	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
113 	struct talitos_private *priv = dev_get_drvdata(dev);
114 	bool is_sec1 = has_ftr_sec1(priv);
115 
116 	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
117 }
118 
119 /*
120  * unmap bus single (contiguous) h/w descriptor pointer
121  */
122 static void unmap_single_talitos_ptr(struct device *dev,
123 				     struct talitos_ptr *ptr,
124 				     enum dma_data_direction dir)
125 {
126 	struct talitos_private *priv = dev_get_drvdata(dev);
127 	bool is_sec1 = has_ftr_sec1(priv);
128 
129 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
130 			 from_talitos_ptr_len(ptr, is_sec1), dir);
131 }
132 
133 static int reset_channel(struct device *dev, int ch)
134 {
135 	struct talitos_private *priv = dev_get_drvdata(dev);
136 	unsigned int timeout = TALITOS_TIMEOUT;
137 	bool is_sec1 = has_ftr_sec1(priv);
138 
139 	if (is_sec1) {
140 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
141 			  TALITOS1_CCCR_LO_RESET);
142 
143 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
144 			TALITOS1_CCCR_LO_RESET) && --timeout)
145 			cpu_relax();
146 	} else {
147 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
148 			  TALITOS2_CCCR_RESET);
149 
150 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
151 			TALITOS2_CCCR_RESET) && --timeout)
152 			cpu_relax();
153 	}
154 
155 	if (timeout == 0) {
156 		dev_err(dev, "failed to reset channel %d\n", ch);
157 		return -EIO;
158 	}
159 
160 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
161 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
162 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
163 	/* enable chaining descriptors */
164 	if (is_sec1)
165 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
166 			  TALITOS_CCCR_LO_NE);
167 
168 	/* and ICCR writeback, if available */
169 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
170 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 		          TALITOS_CCCR_LO_IWSE);
172 
173 	return 0;
174 }
175 
176 static int reset_device(struct device *dev)
177 {
178 	struct talitos_private *priv = dev_get_drvdata(dev);
179 	unsigned int timeout = TALITOS_TIMEOUT;
180 	bool is_sec1 = has_ftr_sec1(priv);
181 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
182 
183 	setbits32(priv->reg + TALITOS_MCR, mcr);
184 
185 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
186 	       && --timeout)
187 		cpu_relax();
188 
189 	if (priv->irq[1]) {
190 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
191 		setbits32(priv->reg + TALITOS_MCR, mcr);
192 	}
193 
194 	if (timeout == 0) {
195 		dev_err(dev, "failed to reset device\n");
196 		return -EIO;
197 	}
198 
199 	return 0;
200 }
201 
202 /*
203  * Reset and initialize the device
204  */
205 static int init_device(struct device *dev)
206 {
207 	struct talitos_private *priv = dev_get_drvdata(dev);
208 	int ch, err;
209 	bool is_sec1 = has_ftr_sec1(priv);
210 
211 	/*
212 	 * Master reset
213 	 * errata documentation: warning: certain SEC interrupts
214 	 * are not fully cleared by writing the MCR:SWR bit,
215 	 * set bit twice to completely reset
216 	 */
217 	err = reset_device(dev);
218 	if (err)
219 		return err;
220 
221 	err = reset_device(dev);
222 	if (err)
223 		return err;
224 
225 	/* reset channels */
226 	for (ch = 0; ch < priv->num_channels; ch++) {
227 		err = reset_channel(dev, ch);
228 		if (err)
229 			return err;
230 	}
231 
232 	/* enable channel done and error interrupts */
233 	if (is_sec1) {
234 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
235 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
236 		/* disable parity error check in DEU (erroneous? test vect.) */
237 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
238 	} else {
239 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
240 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
241 	}
242 
243 	/* disable integrity check error interrupts (use writeback instead) */
244 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
245 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
246 		          TALITOS_MDEUICR_LO_ICE);
247 
248 	return 0;
249 }
250 
251 /**
252  * talitos_submit - submits a descriptor to the device for processing
253  * @dev:	the SEC device to be used
254  * @ch:		the SEC device channel to be used
255  * @desc:	the descriptor to be processed by the device
256  * @callback:	whom to call when processing is complete
257  * @context:	a handle for use by caller (optional)
258  *
259  * desc must contain valid dma-mapped (bus physical) address pointers.
260  * callback must check err and feedback in descriptor header
261  * for device processing status.
262  */
263 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
264 		   void (*callback)(struct device *dev,
265 				    struct talitos_desc *desc,
266 				    void *context, int error),
267 		   void *context)
268 {
269 	struct talitos_private *priv = dev_get_drvdata(dev);
270 	struct talitos_request *request;
271 	unsigned long flags;
272 	int head;
273 	bool is_sec1 = has_ftr_sec1(priv);
274 
275 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
276 
277 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
278 		/* h/w fifo is full */
279 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
280 		return -EAGAIN;
281 	}
282 
283 	head = priv->chan[ch].head;
284 	request = &priv->chan[ch].fifo[head];
285 
286 	/* map descriptor and save caller data */
287 	if (is_sec1) {
288 		desc->hdr1 = desc->hdr;
289 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
290 						   TALITOS_DESC_SIZE,
291 						   DMA_BIDIRECTIONAL);
292 	} else {
293 		request->dma_desc = dma_map_single(dev, desc,
294 						   TALITOS_DESC_SIZE,
295 						   DMA_BIDIRECTIONAL);
296 	}
297 	request->callback = callback;
298 	request->context = context;
299 
300 	/* increment fifo head */
301 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
302 
303 	smp_wmb();
304 	request->desc = desc;
305 
306 	/* GO! */
307 	wmb();
308 	out_be32(priv->chan[ch].reg + TALITOS_FF,
309 		 upper_32_bits(request->dma_desc));
310 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
311 		 lower_32_bits(request->dma_desc));
312 
313 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
314 
315 	return -EINPROGRESS;
316 }
317 EXPORT_SYMBOL(talitos_submit);
318 
319 /*
320  * process what was done, notify callback of error if not
321  */
322 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
323 {
324 	struct talitos_private *priv = dev_get_drvdata(dev);
325 	struct talitos_request *request, saved_req;
326 	unsigned long flags;
327 	int tail, status;
328 	bool is_sec1 = has_ftr_sec1(priv);
329 
330 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
331 
332 	tail = priv->chan[ch].tail;
333 	while (priv->chan[ch].fifo[tail].desc) {
334 		__be32 hdr;
335 
336 		request = &priv->chan[ch].fifo[tail];
337 
338 		/* descriptors with their done bits set don't get the error */
339 		rmb();
340 		if (!is_sec1)
341 			hdr = request->desc->hdr;
342 		else if (request->desc->next_desc)
343 			hdr = (request->desc + 1)->hdr1;
344 		else
345 			hdr = request->desc->hdr1;
346 
347 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
348 			status = 0;
349 		else
350 			if (!error)
351 				break;
352 			else
353 				status = error;
354 
355 		dma_unmap_single(dev, request->dma_desc,
356 				 TALITOS_DESC_SIZE,
357 				 DMA_BIDIRECTIONAL);
358 
359 		/* copy entries so we can call callback outside lock */
360 		saved_req.desc = request->desc;
361 		saved_req.callback = request->callback;
362 		saved_req.context = request->context;
363 
364 		/* release request entry in fifo */
365 		smp_wmb();
366 		request->desc = NULL;
367 
368 		/* increment fifo tail */
369 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
370 
371 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
372 
373 		atomic_dec(&priv->chan[ch].submit_count);
374 
375 		saved_req.callback(dev, saved_req.desc, saved_req.context,
376 				   status);
377 		/* channel may resume processing in single desc error case */
378 		if (error && !reset_ch && status == error)
379 			return;
380 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
381 		tail = priv->chan[ch].tail;
382 	}
383 
384 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
385 }
386 
387 /*
388  * process completed requests for channels that have done status
389  */
390 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
391 static void talitos1_done_##name(unsigned long data)			\
392 {									\
393 	struct device *dev = (struct device *)data;			\
394 	struct talitos_private *priv = dev_get_drvdata(dev);		\
395 	unsigned long flags;						\
396 									\
397 	if (ch_done_mask & 0x10000000)					\
398 		flush_channel(dev, 0, 0, 0);			\
399 	if (ch_done_mask & 0x40000000)					\
400 		flush_channel(dev, 1, 0, 0);			\
401 	if (ch_done_mask & 0x00010000)					\
402 		flush_channel(dev, 2, 0, 0);			\
403 	if (ch_done_mask & 0x00040000)					\
404 		flush_channel(dev, 3, 0, 0);			\
405 									\
406 	/* At this point, all completed channels have been processed */	\
407 	/* Unmask done interrupts for channels completed later on. */	\
408 	spin_lock_irqsave(&priv->reg_lock, flags);			\
409 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
410 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
411 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
412 }
413 
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
416 
417 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
418 static void talitos2_done_##name(unsigned long data)			\
419 {									\
420 	struct device *dev = (struct device *)data;			\
421 	struct talitos_private *priv = dev_get_drvdata(dev);		\
422 	unsigned long flags;						\
423 									\
424 	if (ch_done_mask & 1)						\
425 		flush_channel(dev, 0, 0, 0);				\
426 	if (ch_done_mask & (1 << 2))					\
427 		flush_channel(dev, 1, 0, 0);				\
428 	if (ch_done_mask & (1 << 4))					\
429 		flush_channel(dev, 2, 0, 0);				\
430 	if (ch_done_mask & (1 << 6))					\
431 		flush_channel(dev, 3, 0, 0);				\
432 									\
433 	/* At this point, all completed channels have been processed */	\
434 	/* Unmask done interrupts for channels completed later on. */	\
435 	spin_lock_irqsave(&priv->reg_lock, flags);			\
436 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
437 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
438 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
439 }
440 
441 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
442 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
443 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
444 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
445 
446 /*
447  * locate current (offending) descriptor
448  */
449 static u32 current_desc_hdr(struct device *dev, int ch)
450 {
451 	struct talitos_private *priv = dev_get_drvdata(dev);
452 	int tail, iter;
453 	dma_addr_t cur_desc;
454 
455 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
456 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
457 
458 	if (!cur_desc) {
459 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
460 		return 0;
461 	}
462 
463 	tail = priv->chan[ch].tail;
464 
465 	iter = tail;
466 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
467 	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
468 		iter = (iter + 1) & (priv->fifo_len - 1);
469 		if (iter == tail) {
470 			dev_err(dev, "couldn't locate current descriptor\n");
471 			return 0;
472 		}
473 	}
474 
475 	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
476 		return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
477 
478 	return priv->chan[ch].fifo[iter].desc->hdr;
479 }
480 
481 /*
482  * user diagnostics; report root cause of error based on execution unit status
483  */
484 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
485 {
486 	struct talitos_private *priv = dev_get_drvdata(dev);
487 	int i;
488 
489 	if (!desc_hdr)
490 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
491 
492 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
493 	case DESC_HDR_SEL0_AFEU:
494 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
495 			in_be32(priv->reg_afeu + TALITOS_EUISR),
496 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
497 		break;
498 	case DESC_HDR_SEL0_DEU:
499 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
500 			in_be32(priv->reg_deu + TALITOS_EUISR),
501 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
502 		break;
503 	case DESC_HDR_SEL0_MDEUA:
504 	case DESC_HDR_SEL0_MDEUB:
505 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
506 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
507 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
508 		break;
509 	case DESC_HDR_SEL0_RNG:
510 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
511 			in_be32(priv->reg_rngu + TALITOS_ISR),
512 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
513 		break;
514 	case DESC_HDR_SEL0_PKEU:
515 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
516 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
517 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
518 		break;
519 	case DESC_HDR_SEL0_AESU:
520 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
521 			in_be32(priv->reg_aesu + TALITOS_EUISR),
522 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
523 		break;
524 	case DESC_HDR_SEL0_CRCU:
525 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
526 			in_be32(priv->reg_crcu + TALITOS_EUISR),
527 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
528 		break;
529 	case DESC_HDR_SEL0_KEU:
530 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
531 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
532 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
533 		break;
534 	}
535 
536 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
537 	case DESC_HDR_SEL1_MDEUA:
538 	case DESC_HDR_SEL1_MDEUB:
539 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
541 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
542 		break;
543 	case DESC_HDR_SEL1_CRCU:
544 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
545 			in_be32(priv->reg_crcu + TALITOS_EUISR),
546 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
547 		break;
548 	}
549 
550 	for (i = 0; i < 8; i++)
551 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
552 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
553 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
554 }
555 
556 /*
557  * recover from error interrupts
558  */
559 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
560 {
561 	struct talitos_private *priv = dev_get_drvdata(dev);
562 	unsigned int timeout = TALITOS_TIMEOUT;
563 	int ch, error, reset_dev = 0;
564 	u32 v_lo;
565 	bool is_sec1 = has_ftr_sec1(priv);
566 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
567 
568 	for (ch = 0; ch < priv->num_channels; ch++) {
569 		/* skip channels without errors */
570 		if (is_sec1) {
571 			/* bits 29, 31, 17, 19 */
572 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
573 				continue;
574 		} else {
575 			if (!(isr & (1 << (ch * 2 + 1))))
576 				continue;
577 		}
578 
579 		error = -EINVAL;
580 
581 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
582 
583 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
584 			dev_err(dev, "double fetch fifo overflow error\n");
585 			error = -EAGAIN;
586 			reset_ch = 1;
587 		}
588 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
589 			/* h/w dropped descriptor */
590 			dev_err(dev, "single fetch fifo overflow error\n");
591 			error = -EAGAIN;
592 		}
593 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
594 			dev_err(dev, "master data transfer error\n");
595 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
596 			dev_err(dev, is_sec1 ? "pointer not complete error\n"
597 					     : "s/g data length zero error\n");
598 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
599 			dev_err(dev, is_sec1 ? "parity error\n"
600 					     : "fetch pointer zero error\n");
601 		if (v_lo & TALITOS_CCPSR_LO_IDH)
602 			dev_err(dev, "illegal descriptor header error\n");
603 		if (v_lo & TALITOS_CCPSR_LO_IEU)
604 			dev_err(dev, is_sec1 ? "static assignment error\n"
605 					     : "invalid exec unit error\n");
606 		if (v_lo & TALITOS_CCPSR_LO_EU)
607 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
608 		if (!is_sec1) {
609 			if (v_lo & TALITOS_CCPSR_LO_GB)
610 				dev_err(dev, "gather boundary error\n");
611 			if (v_lo & TALITOS_CCPSR_LO_GRL)
612 				dev_err(dev, "gather return/length error\n");
613 			if (v_lo & TALITOS_CCPSR_LO_SB)
614 				dev_err(dev, "scatter boundary error\n");
615 			if (v_lo & TALITOS_CCPSR_LO_SRL)
616 				dev_err(dev, "scatter return/length error\n");
617 		}
618 
619 		flush_channel(dev, ch, error, reset_ch);
620 
621 		if (reset_ch) {
622 			reset_channel(dev, ch);
623 		} else {
624 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
625 				  TALITOS2_CCCR_CONT);
626 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
627 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
628 			       TALITOS2_CCCR_CONT) && --timeout)
629 				cpu_relax();
630 			if (timeout == 0) {
631 				dev_err(dev, "failed to restart channel %d\n",
632 					ch);
633 				reset_dev = 1;
634 			}
635 		}
636 	}
637 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
638 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
639 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
640 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
641 				isr, isr_lo);
642 		else
643 			dev_err(dev, "done overflow, internal time out, or "
644 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
645 
646 		/* purge request queues */
647 		for (ch = 0; ch < priv->num_channels; ch++)
648 			flush_channel(dev, ch, -EIO, 1);
649 
650 		/* reset and reinitialize the device */
651 		init_device(dev);
652 	}
653 }
654 
655 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
656 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
657 {									       \
658 	struct device *dev = data;					       \
659 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
660 	u32 isr, isr_lo;						       \
661 	unsigned long flags;						       \
662 									       \
663 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
664 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
665 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
666 	/* Acknowledge interrupt */					       \
667 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
668 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
669 									       \
670 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
671 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
672 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
673 	}								       \
674 	else {								       \
675 		if (likely(isr & ch_done_mask)) {			       \
676 			/* mask further done interrupts. */		       \
677 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
678 			/* done_task will unmask done interrupts at exit */    \
679 			tasklet_schedule(&priv->done_task[tlet]);	       \
680 		}							       \
681 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
682 	}								       \
683 									       \
684 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
685 								IRQ_NONE;      \
686 }
687 
688 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
689 
690 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
691 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
692 {									       \
693 	struct device *dev = data;					       \
694 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
695 	u32 isr, isr_lo;						       \
696 	unsigned long flags;						       \
697 									       \
698 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
699 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
700 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
701 	/* Acknowledge interrupt */					       \
702 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
703 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
704 									       \
705 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
706 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
707 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
708 	}								       \
709 	else {								       \
710 		if (likely(isr & ch_done_mask)) {			       \
711 			/* mask further done interrupts. */		       \
712 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
713 			/* done_task will unmask done interrupts at exit */    \
714 			tasklet_schedule(&priv->done_task[tlet]);	       \
715 		}							       \
716 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
717 	}								       \
718 									       \
719 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
720 								IRQ_NONE;      \
721 }
722 
723 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
724 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
725 		       0)
726 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
727 		       1)
728 
729 /*
730  * hwrng
731  */
732 static int talitos_rng_data_present(struct hwrng *rng, int wait)
733 {
734 	struct device *dev = (struct device *)rng->priv;
735 	struct talitos_private *priv = dev_get_drvdata(dev);
736 	u32 ofl;
737 	int i;
738 
739 	for (i = 0; i < 20; i++) {
740 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
741 		      TALITOS_RNGUSR_LO_OFL;
742 		if (ofl || !wait)
743 			break;
744 		udelay(10);
745 	}
746 
747 	return !!ofl;
748 }
749 
750 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
751 {
752 	struct device *dev = (struct device *)rng->priv;
753 	struct talitos_private *priv = dev_get_drvdata(dev);
754 
755 	/* rng fifo requires 64-bit accesses */
756 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
757 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
758 
759 	return sizeof(u32);
760 }
761 
762 static int talitos_rng_init(struct hwrng *rng)
763 {
764 	struct device *dev = (struct device *)rng->priv;
765 	struct talitos_private *priv = dev_get_drvdata(dev);
766 	unsigned int timeout = TALITOS_TIMEOUT;
767 
768 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
769 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
770 		 & TALITOS_RNGUSR_LO_RD)
771 	       && --timeout)
772 		cpu_relax();
773 	if (timeout == 0) {
774 		dev_err(dev, "failed to reset rng hw\n");
775 		return -ENODEV;
776 	}
777 
778 	/* start generating */
779 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
780 
781 	return 0;
782 }
783 
784 static int talitos_register_rng(struct device *dev)
785 {
786 	struct talitos_private *priv = dev_get_drvdata(dev);
787 	int err;
788 
789 	priv->rng.name		= dev_driver_string(dev),
790 	priv->rng.init		= talitos_rng_init,
791 	priv->rng.data_present	= talitos_rng_data_present,
792 	priv->rng.data_read	= talitos_rng_data_read,
793 	priv->rng.priv		= (unsigned long)dev;
794 
795 	err = hwrng_register(&priv->rng);
796 	if (!err)
797 		priv->rng_registered = true;
798 
799 	return err;
800 }
801 
802 static void talitos_unregister_rng(struct device *dev)
803 {
804 	struct talitos_private *priv = dev_get_drvdata(dev);
805 
806 	if (!priv->rng_registered)
807 		return;
808 
809 	hwrng_unregister(&priv->rng);
810 	priv->rng_registered = false;
811 }
812 
813 /*
814  * crypto alg
815  */
816 #define TALITOS_CRA_PRIORITY		3000
817 /*
818  * Defines a priority for doing AEAD with descriptors type
819  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
820  */
821 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
822 #define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
823 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
824 
825 struct talitos_ctx {
826 	struct device *dev;
827 	int ch;
828 	__be32 desc_hdr_template;
829 	u8 key[TALITOS_MAX_KEY_SIZE];
830 	u8 iv[TALITOS_MAX_IV_LENGTH];
831 	dma_addr_t dma_key;
832 	unsigned int keylen;
833 	unsigned int enckeylen;
834 	unsigned int authkeylen;
835 	dma_addr_t dma_buf;
836 	dma_addr_t dma_hw_context;
837 };
838 
839 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
840 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
841 
842 struct talitos_ahash_req_ctx {
843 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
844 	unsigned int hw_context_size;
845 	u8 buf[2][HASH_MAX_BLOCK_SIZE];
846 	int buf_idx;
847 	unsigned int swinit;
848 	unsigned int first;
849 	unsigned int last;
850 	unsigned int to_hash_later;
851 	unsigned int nbuf;
852 	struct scatterlist bufsl[2];
853 	struct scatterlist *psrc;
854 };
855 
856 struct talitos_export_state {
857 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
858 	u8 buf[HASH_MAX_BLOCK_SIZE];
859 	unsigned int swinit;
860 	unsigned int first;
861 	unsigned int last;
862 	unsigned int to_hash_later;
863 	unsigned int nbuf;
864 };
865 
866 static int aead_setkey(struct crypto_aead *authenc,
867 		       const u8 *key, unsigned int keylen)
868 {
869 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
870 	struct device *dev = ctx->dev;
871 	struct crypto_authenc_keys keys;
872 
873 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
874 		goto badkey;
875 
876 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
877 		goto badkey;
878 
879 	if (ctx->keylen)
880 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
881 
882 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
883 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
884 
885 	ctx->keylen = keys.authkeylen + keys.enckeylen;
886 	ctx->enckeylen = keys.enckeylen;
887 	ctx->authkeylen = keys.authkeylen;
888 	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
889 				      DMA_TO_DEVICE);
890 
891 	return 0;
892 
893 badkey:
894 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
895 	return -EINVAL;
896 }
897 
898 /*
899  * talitos_edesc - s/w-extended descriptor
900  * @src_nents: number of segments in input scatterlist
901  * @dst_nents: number of segments in output scatterlist
902  * @icv_ool: whether ICV is out-of-line
903  * @iv_dma: dma address of iv for checking continuity and link table
904  * @dma_len: length of dma mapped link_tbl space
905  * @dma_link_tbl: bus physical address of link_tbl/buf
906  * @desc: h/w descriptor
907  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
908  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
909  *
910  * if decrypting (with authcheck), or either one of src_nents or dst_nents
911  * is greater than 1, an integrity check value is concatenated to the end
912  * of link_tbl data
913  */
914 struct talitos_edesc {
915 	int src_nents;
916 	int dst_nents;
917 	bool icv_ool;
918 	dma_addr_t iv_dma;
919 	int dma_len;
920 	dma_addr_t dma_link_tbl;
921 	struct talitos_desc desc;
922 	union {
923 		struct talitos_ptr link_tbl[0];
924 		u8 buf[0];
925 	};
926 };
927 
928 static void talitos_sg_unmap(struct device *dev,
929 			     struct talitos_edesc *edesc,
930 			     struct scatterlist *src,
931 			     struct scatterlist *dst,
932 			     unsigned int len, unsigned int offset)
933 {
934 	struct talitos_private *priv = dev_get_drvdata(dev);
935 	bool is_sec1 = has_ftr_sec1(priv);
936 	unsigned int src_nents = edesc->src_nents ? : 1;
937 	unsigned int dst_nents = edesc->dst_nents ? : 1;
938 
939 	if (is_sec1 && dst && dst_nents > 1) {
940 		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
941 					   len, DMA_FROM_DEVICE);
942 		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
943 				     offset);
944 	}
945 	if (src != dst) {
946 		if (src_nents == 1 || !is_sec1)
947 			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
948 
949 		if (dst && (dst_nents == 1 || !is_sec1))
950 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
951 	} else if (src_nents == 1 || !is_sec1) {
952 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
953 	}
954 }
955 
956 static void ipsec_esp_unmap(struct device *dev,
957 			    struct talitos_edesc *edesc,
958 			    struct aead_request *areq)
959 {
960 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
961 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
962 	unsigned int ivsize = crypto_aead_ivsize(aead);
963 	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
964 	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
965 
966 	if (is_ipsec_esp)
967 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
968 					 DMA_FROM_DEVICE);
969 	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
970 
971 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
972 			 areq->assoclen);
973 
974 	if (edesc->dma_len)
975 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
976 				 DMA_BIDIRECTIONAL);
977 
978 	if (!is_ipsec_esp) {
979 		unsigned int dst_nents = edesc->dst_nents ? : 1;
980 
981 		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
982 				   areq->assoclen + areq->cryptlen - ivsize);
983 	}
984 }
985 
986 /*
987  * ipsec_esp descriptor callbacks
988  */
989 static void ipsec_esp_encrypt_done(struct device *dev,
990 				   struct talitos_desc *desc, void *context,
991 				   int err)
992 {
993 	struct talitos_private *priv = dev_get_drvdata(dev);
994 	bool is_sec1 = has_ftr_sec1(priv);
995 	struct aead_request *areq = context;
996 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
997 	unsigned int authsize = crypto_aead_authsize(authenc);
998 	unsigned int ivsize = crypto_aead_ivsize(authenc);
999 	struct talitos_edesc *edesc;
1000 	struct scatterlist *sg;
1001 	void *icvdata;
1002 
1003 	edesc = container_of(desc, struct talitos_edesc, desc);
1004 
1005 	ipsec_esp_unmap(dev, edesc, areq);
1006 
1007 	/* copy the generated ICV to dst */
1008 	if (edesc->icv_ool) {
1009 		if (is_sec1)
1010 			icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1011 		else
1012 			icvdata = &edesc->link_tbl[edesc->src_nents +
1013 						   edesc->dst_nents + 2];
1014 		sg = sg_last(areq->dst, edesc->dst_nents);
1015 		memcpy((char *)sg_virt(sg) + sg->length - authsize,
1016 		       icvdata, authsize);
1017 	}
1018 
1019 	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1020 
1021 	kfree(edesc);
1022 
1023 	aead_request_complete(areq, err);
1024 }
1025 
1026 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1027 					  struct talitos_desc *desc,
1028 					  void *context, int err)
1029 {
1030 	struct aead_request *req = context;
1031 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1032 	unsigned int authsize = crypto_aead_authsize(authenc);
1033 	struct talitos_edesc *edesc;
1034 	struct scatterlist *sg;
1035 	char *oicv, *icv;
1036 	struct talitos_private *priv = dev_get_drvdata(dev);
1037 	bool is_sec1 = has_ftr_sec1(priv);
1038 
1039 	edesc = container_of(desc, struct talitos_edesc, desc);
1040 
1041 	ipsec_esp_unmap(dev, edesc, req);
1042 
1043 	if (!err) {
1044 		/* auth check */
1045 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1046 		icv = (char *)sg_virt(sg) + sg->length - authsize;
1047 
1048 		if (edesc->dma_len) {
1049 			if (is_sec1)
1050 				oicv = (char *)&edesc->dma_link_tbl +
1051 					       req->assoclen + req->cryptlen;
1052 			else
1053 				oicv = (char *)
1054 				       &edesc->link_tbl[edesc->src_nents +
1055 							edesc->dst_nents + 2];
1056 			if (edesc->icv_ool)
1057 				icv = oicv + authsize;
1058 		} else
1059 			oicv = (char *)&edesc->link_tbl[0];
1060 
1061 		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1062 	}
1063 
1064 	kfree(edesc);
1065 
1066 	aead_request_complete(req, err);
1067 }
1068 
1069 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1070 					  struct talitos_desc *desc,
1071 					  void *context, int err)
1072 {
1073 	struct aead_request *req = context;
1074 	struct talitos_edesc *edesc;
1075 
1076 	edesc = container_of(desc, struct talitos_edesc, desc);
1077 
1078 	ipsec_esp_unmap(dev, edesc, req);
1079 
1080 	/* check ICV auth status */
1081 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082 		     DESC_HDR_LO_ICCR1_PASS))
1083 		err = -EBADMSG;
1084 
1085 	kfree(edesc);
1086 
1087 	aead_request_complete(req, err);
1088 }
1089 
1090 /*
1091  * convert scatterlist to SEC h/w link table format
1092  * stop at cryptlen bytes
1093  */
1094 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1095 				 unsigned int offset, int cryptlen,
1096 				 struct talitos_ptr *link_tbl_ptr)
1097 {
1098 	int n_sg = sg_count;
1099 	int count = 0;
1100 
1101 	while (cryptlen && sg && n_sg--) {
1102 		unsigned int len = sg_dma_len(sg);
1103 
1104 		if (offset >= len) {
1105 			offset -= len;
1106 			goto next;
1107 		}
1108 
1109 		len -= offset;
1110 
1111 		if (len > cryptlen)
1112 			len = cryptlen;
1113 
1114 		to_talitos_ptr(link_tbl_ptr + count,
1115 			       sg_dma_address(sg) + offset, len, 0);
1116 		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1117 		count++;
1118 		cryptlen -= len;
1119 		offset = 0;
1120 
1121 next:
1122 		sg = sg_next(sg);
1123 	}
1124 
1125 	/* tag end of link table */
1126 	if (count > 0)
1127 		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1128 				       DESC_PTR_LNKTBL_RETURN, 0);
1129 
1130 	return count;
1131 }
1132 
1133 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1134 		   unsigned int len, struct talitos_edesc *edesc,
1135 		   struct talitos_ptr *ptr,
1136 		   int sg_count, unsigned int offset, int tbl_off)
1137 {
1138 	struct talitos_private *priv = dev_get_drvdata(dev);
1139 	bool is_sec1 = has_ftr_sec1(priv);
1140 
1141 	if (!src) {
1142 		to_talitos_ptr(ptr, 0, 0, is_sec1);
1143 		return 1;
1144 	}
1145 	if (sg_count == 1) {
1146 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1147 		return sg_count;
1148 	}
1149 	if (is_sec1) {
1150 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1151 		return sg_count;
1152 	}
1153 	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
1154 					 &edesc->link_tbl[tbl_off]);
1155 	if (sg_count == 1) {
1156 		/* Only one segment now, so no link tbl needed*/
1157 		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1158 		return sg_count;
1159 	}
1160 	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1161 			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1162 	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1163 
1164 	return sg_count;
1165 }
1166 
1167 /*
1168  * fill in and submit ipsec_esp descriptor
1169  */
1170 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1171 		     void (*callback)(struct device *dev,
1172 				      struct talitos_desc *desc,
1173 				      void *context, int error))
1174 {
1175 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1176 	unsigned int authsize = crypto_aead_authsize(aead);
1177 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1178 	struct device *dev = ctx->dev;
1179 	struct talitos_desc *desc = &edesc->desc;
1180 	unsigned int cryptlen = areq->cryptlen;
1181 	unsigned int ivsize = crypto_aead_ivsize(aead);
1182 	int tbl_off = 0;
1183 	int sg_count, ret;
1184 	int sg_link_tbl_len;
1185 	bool sync_needed = false;
1186 	struct talitos_private *priv = dev_get_drvdata(dev);
1187 	bool is_sec1 = has_ftr_sec1(priv);
1188 	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1189 	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1190 	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1191 
1192 	/* hmac key */
1193 	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1194 
1195 	sg_count = edesc->src_nents ?: 1;
1196 	if (is_sec1 && sg_count > 1)
1197 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1198 				  areq->assoclen + cryptlen);
1199 	else
1200 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1201 				      (areq->src == areq->dst) ?
1202 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1203 
1204 	/* hmac data */
1205 	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1206 			     &desc->ptr[1], sg_count, 0, tbl_off);
1207 
1208 	if (ret > 1) {
1209 		tbl_off += ret;
1210 		sync_needed = true;
1211 	}
1212 
1213 	/* cipher iv */
1214 	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1215 
1216 	/* cipher key */
1217 	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1218 		       ctx->enckeylen, is_sec1);
1219 
1220 	/*
1221 	 * cipher in
1222 	 * map and adjust cipher len to aead request cryptlen.
1223 	 * extent is bytes of HMAC postpended to ciphertext,
1224 	 * typically 12 for ipsec
1225 	 */
1226 	sg_link_tbl_len = cryptlen;
1227 
1228 	if (is_ipsec_esp) {
1229 		to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1230 
1231 		if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
1232 			sg_link_tbl_len += authsize;
1233 	}
1234 
1235 	ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
1236 			     &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
1237 
1238 	if (ret > 1) {
1239 		tbl_off += ret;
1240 		sync_needed = true;
1241 	}
1242 
1243 	/* cipher out */
1244 	if (areq->src != areq->dst) {
1245 		sg_count = edesc->dst_nents ? : 1;
1246 		if (!is_sec1 || sg_count == 1)
1247 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1248 	}
1249 
1250 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1251 			     sg_count, areq->assoclen, tbl_off);
1252 
1253 	if (is_ipsec_esp)
1254 		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1255 
1256 	/* ICV data */
1257 	if (ret > 1) {
1258 		tbl_off += ret;
1259 		edesc->icv_ool = true;
1260 		sync_needed = true;
1261 
1262 		if (is_ipsec_esp) {
1263 			struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1264 			int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1265 				     sizeof(struct talitos_ptr) + authsize;
1266 
1267 			/* Add an entry to the link table for ICV data */
1268 			to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1269 			to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1270 					       is_sec1);
1271 
1272 			/* icv data follows link tables */
1273 			to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1274 				       authsize, is_sec1);
1275 		} else {
1276 			dma_addr_t addr = edesc->dma_link_tbl;
1277 
1278 			if (is_sec1)
1279 				addr += areq->assoclen + cryptlen;
1280 			else
1281 				addr += sizeof(struct talitos_ptr) * tbl_off;
1282 
1283 			to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1284 		}
1285 	} else if (!is_ipsec_esp) {
1286 		ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1287 				     &desc->ptr[6], sg_count, areq->assoclen +
1288 							      cryptlen,
1289 				     tbl_off);
1290 		if (ret > 1) {
1291 			tbl_off += ret;
1292 			edesc->icv_ool = true;
1293 			sync_needed = true;
1294 		} else {
1295 			edesc->icv_ool = false;
1296 		}
1297 	} else {
1298 		edesc->icv_ool = false;
1299 	}
1300 
1301 	/* iv out */
1302 	if (is_ipsec_esp)
1303 		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1304 				       DMA_FROM_DEVICE);
1305 
1306 	if (sync_needed)
1307 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1308 					   edesc->dma_len,
1309 					   DMA_BIDIRECTIONAL);
1310 
1311 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1312 	if (ret != -EINPROGRESS) {
1313 		ipsec_esp_unmap(dev, edesc, areq);
1314 		kfree(edesc);
1315 	}
1316 	return ret;
1317 }
1318 
1319 /*
1320  * allocate and map the extended descriptor
1321  */
1322 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1323 						 struct scatterlist *src,
1324 						 struct scatterlist *dst,
1325 						 u8 *iv,
1326 						 unsigned int assoclen,
1327 						 unsigned int cryptlen,
1328 						 unsigned int authsize,
1329 						 unsigned int ivsize,
1330 						 int icv_stashing,
1331 						 u32 cryptoflags,
1332 						 bool encrypt)
1333 {
1334 	struct talitos_edesc *edesc;
1335 	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1336 	dma_addr_t iv_dma = 0;
1337 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1338 		      GFP_ATOMIC;
1339 	struct talitos_private *priv = dev_get_drvdata(dev);
1340 	bool is_sec1 = has_ftr_sec1(priv);
1341 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1342 	void *err;
1343 
1344 	if (cryptlen + authsize > max_len) {
1345 		dev_err(dev, "length exceeds h/w max limit\n");
1346 		return ERR_PTR(-EINVAL);
1347 	}
1348 
1349 	if (ivsize)
1350 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1351 
1352 	if (!dst || dst == src) {
1353 		src_len = assoclen + cryptlen + authsize;
1354 		src_nents = sg_nents_for_len(src, src_len);
1355 		if (src_nents < 0) {
1356 			dev_err(dev, "Invalid number of src SG.\n");
1357 			err = ERR_PTR(-EINVAL);
1358 			goto error_sg;
1359 		}
1360 		src_nents = (src_nents == 1) ? 0 : src_nents;
1361 		dst_nents = dst ? src_nents : 0;
1362 		dst_len = 0;
1363 	} else { /* dst && dst != src*/
1364 		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1365 		src_nents = sg_nents_for_len(src, src_len);
1366 		if (src_nents < 0) {
1367 			dev_err(dev, "Invalid number of src SG.\n");
1368 			err = ERR_PTR(-EINVAL);
1369 			goto error_sg;
1370 		}
1371 		src_nents = (src_nents == 1) ? 0 : src_nents;
1372 		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1373 		dst_nents = sg_nents_for_len(dst, dst_len);
1374 		if (dst_nents < 0) {
1375 			dev_err(dev, "Invalid number of dst SG.\n");
1376 			err = ERR_PTR(-EINVAL);
1377 			goto error_sg;
1378 		}
1379 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1380 	}
1381 
1382 	/*
1383 	 * allocate space for base edesc plus the link tables,
1384 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1385 	 * and space for two sets of ICVs (stashed and generated)
1386 	 */
1387 	alloc_len = sizeof(struct talitos_edesc);
1388 	if (src_nents || dst_nents) {
1389 		if (is_sec1)
1390 			dma_len = (src_nents ? src_len : 0) +
1391 				  (dst_nents ? dst_len : 0);
1392 		else
1393 			dma_len = (src_nents + dst_nents + 2) *
1394 				  sizeof(struct talitos_ptr) + authsize * 2;
1395 		alloc_len += dma_len;
1396 	} else {
1397 		dma_len = 0;
1398 		alloc_len += icv_stashing ? authsize : 0;
1399 	}
1400 
1401 	/* if its a ahash, add space for a second desc next to the first one */
1402 	if (is_sec1 && !dst)
1403 		alloc_len += sizeof(struct talitos_desc);
1404 
1405 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1406 	if (!edesc) {
1407 		dev_err(dev, "could not allocate edescriptor\n");
1408 		err = ERR_PTR(-ENOMEM);
1409 		goto error_sg;
1410 	}
1411 	memset(&edesc->desc, 0, sizeof(edesc->desc));
1412 
1413 	edesc->src_nents = src_nents;
1414 	edesc->dst_nents = dst_nents;
1415 	edesc->iv_dma = iv_dma;
1416 	edesc->dma_len = dma_len;
1417 	if (dma_len) {
1418 		void *addr = &edesc->link_tbl[0];
1419 
1420 		if (is_sec1 && !dst)
1421 			addr += sizeof(struct talitos_desc);
1422 		edesc->dma_link_tbl = dma_map_single(dev, addr,
1423 						     edesc->dma_len,
1424 						     DMA_BIDIRECTIONAL);
1425 	}
1426 	return edesc;
1427 error_sg:
1428 	if (iv_dma)
1429 		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1430 	return err;
1431 }
1432 
1433 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1434 					      int icv_stashing, bool encrypt)
1435 {
1436 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1437 	unsigned int authsize = crypto_aead_authsize(authenc);
1438 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1439 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1440 
1441 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1442 				   iv, areq->assoclen, areq->cryptlen,
1443 				   authsize, ivsize, icv_stashing,
1444 				   areq->base.flags, encrypt);
1445 }
1446 
1447 static int aead_encrypt(struct aead_request *req)
1448 {
1449 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1450 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1451 	struct talitos_edesc *edesc;
1452 
1453 	/* allocate extended descriptor */
1454 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1455 	if (IS_ERR(edesc))
1456 		return PTR_ERR(edesc);
1457 
1458 	/* set encrypt */
1459 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1460 
1461 	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1462 }
1463 
1464 static int aead_decrypt(struct aead_request *req)
1465 {
1466 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1467 	unsigned int authsize = crypto_aead_authsize(authenc);
1468 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1469 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1470 	struct talitos_edesc *edesc;
1471 	struct scatterlist *sg;
1472 	void *icvdata;
1473 
1474 	req->cryptlen -= authsize;
1475 
1476 	/* allocate extended descriptor */
1477 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1478 	if (IS_ERR(edesc))
1479 		return PTR_ERR(edesc);
1480 
1481 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1482 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1483 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1484 
1485 		/* decrypt and check the ICV */
1486 		edesc->desc.hdr = ctx->desc_hdr_template |
1487 				  DESC_HDR_DIR_INBOUND |
1488 				  DESC_HDR_MODE1_MDEU_CICV;
1489 
1490 		/* reset integrity check result bits */
1491 
1492 		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1493 	}
1494 
1495 	/* Have to check the ICV with software */
1496 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1497 
1498 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1499 	if (edesc->dma_len)
1500 		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1501 						   edesc->dst_nents + 2];
1502 	else
1503 		icvdata = &edesc->link_tbl[0];
1504 
1505 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1506 
1507 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1508 
1509 	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1510 }
1511 
1512 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1513 			     const u8 *key, unsigned int keylen)
1514 {
1515 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1516 	struct device *dev = ctx->dev;
1517 	u32 tmp[DES_EXPKEY_WORDS];
1518 
1519 	if (keylen > TALITOS_MAX_KEY_SIZE) {
1520 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1521 		return -EINVAL;
1522 	}
1523 
1524 	if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1525 		     CRYPTO_TFM_REQ_WEAK_KEY) &&
1526 	    !des_ekey(tmp, key)) {
1527 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1528 		return -EINVAL;
1529 	}
1530 
1531 	if (ctx->keylen)
1532 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1533 
1534 	memcpy(&ctx->key, key, keylen);
1535 	ctx->keylen = keylen;
1536 
1537 	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1538 
1539 	return 0;
1540 }
1541 
1542 static void common_nonsnoop_unmap(struct device *dev,
1543 				  struct talitos_edesc *edesc,
1544 				  struct ablkcipher_request *areq)
1545 {
1546 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1547 
1548 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1549 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1550 
1551 	if (edesc->dma_len)
1552 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1553 				 DMA_BIDIRECTIONAL);
1554 }
1555 
1556 static void ablkcipher_done(struct device *dev,
1557 			    struct talitos_desc *desc, void *context,
1558 			    int err)
1559 {
1560 	struct ablkcipher_request *areq = context;
1561 	struct talitos_edesc *edesc;
1562 
1563 	edesc = container_of(desc, struct talitos_edesc, desc);
1564 
1565 	common_nonsnoop_unmap(dev, edesc, areq);
1566 
1567 	kfree(edesc);
1568 
1569 	areq->base.complete(&areq->base, err);
1570 }
1571 
1572 static int common_nonsnoop(struct talitos_edesc *edesc,
1573 			   struct ablkcipher_request *areq,
1574 			   void (*callback) (struct device *dev,
1575 					     struct talitos_desc *desc,
1576 					     void *context, int error))
1577 {
1578 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1579 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1580 	struct device *dev = ctx->dev;
1581 	struct talitos_desc *desc = &edesc->desc;
1582 	unsigned int cryptlen = areq->nbytes;
1583 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1584 	int sg_count, ret;
1585 	bool sync_needed = false;
1586 	struct talitos_private *priv = dev_get_drvdata(dev);
1587 	bool is_sec1 = has_ftr_sec1(priv);
1588 
1589 	/* first DWORD empty */
1590 
1591 	/* cipher iv */
1592 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1593 
1594 	/* cipher key */
1595 	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1596 
1597 	sg_count = edesc->src_nents ?: 1;
1598 	if (is_sec1 && sg_count > 1)
1599 		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1600 				  cryptlen);
1601 	else
1602 		sg_count = dma_map_sg(dev, areq->src, sg_count,
1603 				      (areq->src == areq->dst) ?
1604 				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1605 	/*
1606 	 * cipher in
1607 	 */
1608 	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1609 				  &desc->ptr[3], sg_count, 0, 0);
1610 	if (sg_count > 1)
1611 		sync_needed = true;
1612 
1613 	/* cipher out */
1614 	if (areq->src != areq->dst) {
1615 		sg_count = edesc->dst_nents ? : 1;
1616 		if (!is_sec1 || sg_count == 1)
1617 			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1618 	}
1619 
1620 	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1621 			     sg_count, 0, (edesc->src_nents + 1));
1622 	if (ret > 1)
1623 		sync_needed = true;
1624 
1625 	/* iv out */
1626 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1627 			       DMA_FROM_DEVICE);
1628 
1629 	/* last DWORD empty */
1630 
1631 	if (sync_needed)
1632 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1633 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1634 
1635 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1636 	if (ret != -EINPROGRESS) {
1637 		common_nonsnoop_unmap(dev, edesc, areq);
1638 		kfree(edesc);
1639 	}
1640 	return ret;
1641 }
1642 
1643 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1644 						    areq, bool encrypt)
1645 {
1646 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1647 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1648 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1649 
1650 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1651 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1652 				   areq->base.flags, encrypt);
1653 }
1654 
1655 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1656 {
1657 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1658 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1659 	struct talitos_edesc *edesc;
1660 
1661 	/* allocate extended descriptor */
1662 	edesc = ablkcipher_edesc_alloc(areq, true);
1663 	if (IS_ERR(edesc))
1664 		return PTR_ERR(edesc);
1665 
1666 	/* set encrypt */
1667 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1668 
1669 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1670 }
1671 
1672 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1673 {
1674 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1675 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1676 	struct talitos_edesc *edesc;
1677 
1678 	/* allocate extended descriptor */
1679 	edesc = ablkcipher_edesc_alloc(areq, false);
1680 	if (IS_ERR(edesc))
1681 		return PTR_ERR(edesc);
1682 
1683 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1684 
1685 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1686 }
1687 
1688 static void common_nonsnoop_hash_unmap(struct device *dev,
1689 				       struct talitos_edesc *edesc,
1690 				       struct ahash_request *areq)
1691 {
1692 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1693 
1694 	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1695 
1696 	if (edesc->dma_len)
1697 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1698 				 DMA_BIDIRECTIONAL);
1699 
1700 	if (edesc->desc.next_desc)
1701 		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1702 				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1703 }
1704 
1705 static void ahash_done(struct device *dev,
1706 		       struct talitos_desc *desc, void *context,
1707 		       int err)
1708 {
1709 	struct ahash_request *areq = context;
1710 	struct talitos_edesc *edesc =
1711 		 container_of(desc, struct talitos_edesc, desc);
1712 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1713 
1714 	if (!req_ctx->last && req_ctx->to_hash_later) {
1715 		/* Position any partial block for next update/final/finup */
1716 		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1717 		req_ctx->nbuf = req_ctx->to_hash_later;
1718 	}
1719 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1720 
1721 	kfree(edesc);
1722 
1723 	areq->base.complete(&areq->base, err);
1724 }
1725 
1726 /*
1727  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1728  * ourself and submit a padded block
1729  */
1730 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1731 			       struct talitos_edesc *edesc,
1732 			       struct talitos_ptr *ptr)
1733 {
1734 	static u8 padded_hash[64] = {
1735 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1736 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1737 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1738 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1739 	};
1740 
1741 	pr_err_once("Bug in SEC1, padding ourself\n");
1742 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1743 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1744 			       (char *)padded_hash, DMA_TO_DEVICE);
1745 }
1746 
1747 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1748 				struct ahash_request *areq, unsigned int length,
1749 				unsigned int offset,
1750 				void (*callback) (struct device *dev,
1751 						  struct talitos_desc *desc,
1752 						  void *context, int error))
1753 {
1754 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1755 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1756 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1757 	struct device *dev = ctx->dev;
1758 	struct talitos_desc *desc = &edesc->desc;
1759 	int ret;
1760 	bool sync_needed = false;
1761 	struct talitos_private *priv = dev_get_drvdata(dev);
1762 	bool is_sec1 = has_ftr_sec1(priv);
1763 	int sg_count;
1764 
1765 	/* first DWORD empty */
1766 
1767 	/* hash context in */
1768 	if (!req_ctx->first || req_ctx->swinit) {
1769 		to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
1770 			       req_ctx->hw_context_size, is_sec1);
1771 		req_ctx->swinit = 0;
1772 	}
1773 	/* Indicate next op is not the first. */
1774 	req_ctx->first = 0;
1775 
1776 	/* HMAC key */
1777 	if (ctx->keylen)
1778 		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1779 			       is_sec1);
1780 
1781 	if (is_sec1 && req_ctx->nbuf)
1782 		length -= req_ctx->nbuf;
1783 
1784 	sg_count = edesc->src_nents ?: 1;
1785 	if (is_sec1 && sg_count > 1)
1786 		sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1787 				   edesc->buf + sizeof(struct talitos_desc),
1788 				   length, req_ctx->nbuf);
1789 	else if (length)
1790 		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1791 				      DMA_TO_DEVICE);
1792 	/*
1793 	 * data in
1794 	 */
1795 	if (is_sec1 && req_ctx->nbuf) {
1796 		dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
1797 						    HASH_MAX_BLOCK_SIZE;
1798 
1799 		to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
1800 	} else {
1801 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1802 					  &desc->ptr[3], sg_count, offset, 0);
1803 		if (sg_count > 1)
1804 			sync_needed = true;
1805 	}
1806 
1807 	/* fifth DWORD empty */
1808 
1809 	/* hash/HMAC out -or- hash context out */
1810 	if (req_ctx->last)
1811 		map_single_talitos_ptr(dev, &desc->ptr[5],
1812 				       crypto_ahash_digestsize(tfm),
1813 				       areq->result, DMA_FROM_DEVICE);
1814 	else
1815 		to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
1816 			       req_ctx->hw_context_size, is_sec1);
1817 
1818 	/* last DWORD empty */
1819 
1820 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1821 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1822 
1823 	if (is_sec1 && req_ctx->nbuf && length) {
1824 		struct talitos_desc *desc2 = desc + 1;
1825 		dma_addr_t next_desc;
1826 
1827 		memset(desc2, 0, sizeof(*desc2));
1828 		desc2->hdr = desc->hdr;
1829 		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1830 		desc2->hdr1 = desc2->hdr;
1831 		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1832 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1833 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1834 
1835 		to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
1836 			       req_ctx->hw_context_size, is_sec1);
1837 
1838 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1839 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1840 					  &desc2->ptr[3], sg_count, offset, 0);
1841 		if (sg_count > 1)
1842 			sync_needed = true;
1843 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1844 		if (req_ctx->last)
1845 			to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
1846 				       req_ctx->hw_context_size, is_sec1);
1847 
1848 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1849 					   DMA_BIDIRECTIONAL);
1850 		desc->next_desc = cpu_to_be32(next_desc);
1851 	}
1852 
1853 	if (sync_needed)
1854 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1855 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1856 
1857 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1858 	if (ret != -EINPROGRESS) {
1859 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1860 		kfree(edesc);
1861 	}
1862 	return ret;
1863 }
1864 
1865 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1866 					       unsigned int nbytes)
1867 {
1868 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1869 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1870 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1871 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1872 	bool is_sec1 = has_ftr_sec1(priv);
1873 
1874 	if (is_sec1)
1875 		nbytes -= req_ctx->nbuf;
1876 
1877 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1878 				   nbytes, 0, 0, 0, areq->base.flags, false);
1879 }
1880 
1881 static int ahash_init(struct ahash_request *areq)
1882 {
1883 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1884 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1885 	struct device *dev = ctx->dev;
1886 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1887 	unsigned int size;
1888 	struct talitos_private *priv = dev_get_drvdata(dev);
1889 	bool is_sec1 = has_ftr_sec1(priv);
1890 
1891 	/* Initialize the context */
1892 	req_ctx->buf_idx = 0;
1893 	req_ctx->nbuf = 0;
1894 	req_ctx->first = 1; /* first indicates h/w must init its context */
1895 	req_ctx->swinit = 0; /* assume h/w init of context */
1896 	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1897 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1898 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1899 	req_ctx->hw_context_size = size;
1900 
1901 	if (ctx->dma_hw_context)
1902 		dma_unmap_single(dev, ctx->dma_hw_context, size,
1903 				 DMA_BIDIRECTIONAL);
1904 	ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
1905 					     DMA_BIDIRECTIONAL);
1906 	if (ctx->dma_buf)
1907 		dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
1908 				 DMA_TO_DEVICE);
1909 	if (is_sec1)
1910 		ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
1911 					      sizeof(req_ctx->buf),
1912 					      DMA_TO_DEVICE);
1913 	return 0;
1914 }
1915 
1916 /*
1917  * on h/w without explicit sha224 support, we initialize h/w context
1918  * manually with sha224 constants, and tell it to run sha256.
1919  */
1920 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1921 {
1922 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1923 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1924 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1925 	struct device *dev = ctx->dev;
1926 
1927 	ahash_init(areq);
1928 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1929 
1930 	req_ctx->hw_context[0] = SHA224_H0;
1931 	req_ctx->hw_context[1] = SHA224_H1;
1932 	req_ctx->hw_context[2] = SHA224_H2;
1933 	req_ctx->hw_context[3] = SHA224_H3;
1934 	req_ctx->hw_context[4] = SHA224_H4;
1935 	req_ctx->hw_context[5] = SHA224_H5;
1936 	req_ctx->hw_context[6] = SHA224_H6;
1937 	req_ctx->hw_context[7] = SHA224_H7;
1938 
1939 	/* init 64-bit count */
1940 	req_ctx->hw_context[8] = 0;
1941 	req_ctx->hw_context[9] = 0;
1942 
1943 	dma_sync_single_for_device(dev, ctx->dma_hw_context,
1944 				   req_ctx->hw_context_size, DMA_TO_DEVICE);
1945 
1946 	return 0;
1947 }
1948 
1949 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1950 {
1951 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1952 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1953 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1954 	struct talitos_edesc *edesc;
1955 	unsigned int blocksize =
1956 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1957 	unsigned int nbytes_to_hash;
1958 	unsigned int to_hash_later;
1959 	unsigned int nsg;
1960 	int nents;
1961 	struct device *dev = ctx->dev;
1962 	struct talitos_private *priv = dev_get_drvdata(dev);
1963 	bool is_sec1 = has_ftr_sec1(priv);
1964 	int offset = 0;
1965 	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1966 
1967 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1968 		/* Buffer up to one whole block */
1969 		nents = sg_nents_for_len(areq->src, nbytes);
1970 		if (nents < 0) {
1971 			dev_err(ctx->dev, "Invalid number of src SG.\n");
1972 			return nents;
1973 		}
1974 		sg_copy_to_buffer(areq->src, nents,
1975 				  ctx_buf + req_ctx->nbuf, nbytes);
1976 		req_ctx->nbuf += nbytes;
1977 		return 0;
1978 	}
1979 
1980 	/* At least (blocksize + 1) bytes are available to hash */
1981 	nbytes_to_hash = nbytes + req_ctx->nbuf;
1982 	to_hash_later = nbytes_to_hash & (blocksize - 1);
1983 
1984 	if (req_ctx->last)
1985 		to_hash_later = 0;
1986 	else if (to_hash_later)
1987 		/* There is a partial block. Hash the full block(s) now */
1988 		nbytes_to_hash -= to_hash_later;
1989 	else {
1990 		/* Keep one block buffered */
1991 		nbytes_to_hash -= blocksize;
1992 		to_hash_later = blocksize;
1993 	}
1994 
1995 	/* Chain in any previously buffered data */
1996 	if (!is_sec1 && req_ctx->nbuf) {
1997 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1998 		sg_init_table(req_ctx->bufsl, nsg);
1999 		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2000 		if (nsg > 1)
2001 			sg_chain(req_ctx->bufsl, 2, areq->src);
2002 		req_ctx->psrc = req_ctx->bufsl;
2003 	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2004 		if (nbytes_to_hash > blocksize)
2005 			offset = blocksize - req_ctx->nbuf;
2006 		else
2007 			offset = nbytes_to_hash - req_ctx->nbuf;
2008 		nents = sg_nents_for_len(areq->src, offset);
2009 		if (nents < 0) {
2010 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2011 			return nents;
2012 		}
2013 		sg_copy_to_buffer(areq->src, nents,
2014 				  ctx_buf + req_ctx->nbuf, offset);
2015 		req_ctx->nbuf += offset;
2016 		req_ctx->psrc = areq->src;
2017 	} else
2018 		req_ctx->psrc = areq->src;
2019 
2020 	if (to_hash_later) {
2021 		nents = sg_nents_for_len(areq->src, nbytes);
2022 		if (nents < 0) {
2023 			dev_err(ctx->dev, "Invalid number of src SG.\n");
2024 			return nents;
2025 		}
2026 		sg_pcopy_to_buffer(areq->src, nents,
2027 				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2028 				      to_hash_later,
2029 				      nbytes - to_hash_later);
2030 	}
2031 	req_ctx->to_hash_later = to_hash_later;
2032 
2033 	/* Allocate extended descriptor */
2034 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2035 	if (IS_ERR(edesc))
2036 		return PTR_ERR(edesc);
2037 
2038 	edesc->desc.hdr = ctx->desc_hdr_template;
2039 
2040 	/* On last one, request SEC to pad; otherwise continue */
2041 	if (req_ctx->last)
2042 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2043 	else
2044 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2045 
2046 	/* request SEC to INIT hash. */
2047 	if (req_ctx->first && !req_ctx->swinit)
2048 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2049 	if (is_sec1) {
2050 		dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
2051 						    HASH_MAX_BLOCK_SIZE;
2052 
2053 		dma_sync_single_for_device(dev, dma_buf,
2054 					   req_ctx->nbuf, DMA_TO_DEVICE);
2055 	}
2056 
2057 	/* When the tfm context has a keylen, it's an HMAC.
2058 	 * A first or last (ie. not middle) descriptor must request HMAC.
2059 	 */
2060 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2061 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2062 
2063 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2064 				    ahash_done);
2065 }
2066 
2067 static int ahash_update(struct ahash_request *areq)
2068 {
2069 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2070 
2071 	req_ctx->last = 0;
2072 
2073 	return ahash_process_req(areq, areq->nbytes);
2074 }
2075 
2076 static int ahash_final(struct ahash_request *areq)
2077 {
2078 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2079 
2080 	req_ctx->last = 1;
2081 
2082 	return ahash_process_req(areq, 0);
2083 }
2084 
2085 static int ahash_finup(struct ahash_request *areq)
2086 {
2087 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2088 
2089 	req_ctx->last = 1;
2090 
2091 	return ahash_process_req(areq, areq->nbytes);
2092 }
2093 
2094 static int ahash_digest(struct ahash_request *areq)
2095 {
2096 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2097 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2098 
2099 	ahash->init(areq);
2100 	req_ctx->last = 1;
2101 
2102 	return ahash_process_req(areq, areq->nbytes);
2103 }
2104 
2105 static int ahash_export(struct ahash_request *areq, void *out)
2106 {
2107 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2108 	struct talitos_export_state *export = out;
2109 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2110 	struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
2111 	struct device *dev = ctx->dev;
2112 
2113 	dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
2114 				req_ctx->hw_context_size, DMA_FROM_DEVICE);
2115 	memcpy(export->hw_context, req_ctx->hw_context,
2116 	       req_ctx->hw_context_size);
2117 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2118 	export->swinit = req_ctx->swinit;
2119 	export->first = req_ctx->first;
2120 	export->last = req_ctx->last;
2121 	export->to_hash_later = req_ctx->to_hash_later;
2122 	export->nbuf = req_ctx->nbuf;
2123 
2124 	return 0;
2125 }
2126 
2127 static int ahash_import(struct ahash_request *areq, const void *in)
2128 {
2129 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2130 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2131 	const struct talitos_export_state *export = in;
2132 	unsigned int size;
2133 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2134 	struct device *dev = ctx->dev;
2135 	struct talitos_private *priv = dev_get_drvdata(dev);
2136 	bool is_sec1 = has_ftr_sec1(priv);
2137 
2138 	memset(req_ctx, 0, sizeof(*req_ctx));
2139 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2140 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2141 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2142 	req_ctx->hw_context_size = size;
2143 	if (ctx->dma_hw_context)
2144 		dma_unmap_single(dev, ctx->dma_hw_context, size,
2145 				 DMA_BIDIRECTIONAL);
2146 
2147 	memcpy(req_ctx->hw_context, export->hw_context, size);
2148 	ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
2149 					     DMA_BIDIRECTIONAL);
2150 	if (ctx->dma_buf)
2151 		dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
2152 				 DMA_TO_DEVICE);
2153 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2154 	if (is_sec1)
2155 		ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
2156 					      sizeof(req_ctx->buf),
2157 					      DMA_TO_DEVICE);
2158 	req_ctx->swinit = export->swinit;
2159 	req_ctx->first = export->first;
2160 	req_ctx->last = export->last;
2161 	req_ctx->to_hash_later = export->to_hash_later;
2162 	req_ctx->nbuf = export->nbuf;
2163 
2164 	return 0;
2165 }
2166 
2167 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2168 		   u8 *hash)
2169 {
2170 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2171 
2172 	struct scatterlist sg[1];
2173 	struct ahash_request *req;
2174 	struct crypto_wait wait;
2175 	int ret;
2176 
2177 	crypto_init_wait(&wait);
2178 
2179 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2180 	if (!req)
2181 		return -ENOMEM;
2182 
2183 	/* Keep tfm keylen == 0 during hash of the long key */
2184 	ctx->keylen = 0;
2185 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2186 				   crypto_req_done, &wait);
2187 
2188 	sg_init_one(&sg[0], key, keylen);
2189 
2190 	ahash_request_set_crypt(req, sg, hash, keylen);
2191 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2192 
2193 	ahash_request_free(req);
2194 
2195 	return ret;
2196 }
2197 
2198 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2199 			unsigned int keylen)
2200 {
2201 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2202 	struct device *dev = ctx->dev;
2203 	unsigned int blocksize =
2204 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2205 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2206 	unsigned int keysize = keylen;
2207 	u8 hash[SHA512_DIGEST_SIZE];
2208 	int ret;
2209 
2210 	if (keylen <= blocksize)
2211 		memcpy(ctx->key, key, keysize);
2212 	else {
2213 		/* Must get the hash of the long key */
2214 		ret = keyhash(tfm, key, keylen, hash);
2215 
2216 		if (ret) {
2217 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2218 			return -EINVAL;
2219 		}
2220 
2221 		keysize = digestsize;
2222 		memcpy(ctx->key, hash, digestsize);
2223 	}
2224 
2225 	if (ctx->keylen)
2226 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2227 
2228 	ctx->keylen = keysize;
2229 	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2230 
2231 	return 0;
2232 }
2233 
2234 
2235 struct talitos_alg_template {
2236 	u32 type;
2237 	u32 priority;
2238 	union {
2239 		struct crypto_alg crypto;
2240 		struct ahash_alg hash;
2241 		struct aead_alg aead;
2242 	} alg;
2243 	__be32 desc_hdr_template;
2244 };
2245 
2246 static struct talitos_alg_template driver_algs[] = {
2247 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2248 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2249 		.alg.aead = {
2250 			.base = {
2251 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2252 				.cra_driver_name = "authenc-hmac-sha1-"
2253 						   "cbc-aes-talitos",
2254 				.cra_blocksize = AES_BLOCK_SIZE,
2255 				.cra_flags = CRYPTO_ALG_ASYNC,
2256 			},
2257 			.ivsize = AES_BLOCK_SIZE,
2258 			.maxauthsize = SHA1_DIGEST_SIZE,
2259 		},
2260 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2261 			             DESC_HDR_SEL0_AESU |
2262 		                     DESC_HDR_MODE0_AESU_CBC |
2263 		                     DESC_HDR_SEL1_MDEUA |
2264 		                     DESC_HDR_MODE1_MDEU_INIT |
2265 		                     DESC_HDR_MODE1_MDEU_PAD |
2266 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2267 	},
2268 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2269 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2270 		.alg.aead = {
2271 			.base = {
2272 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2273 				.cra_driver_name = "authenc-hmac-sha1-"
2274 						   "cbc-aes-talitos",
2275 				.cra_blocksize = AES_BLOCK_SIZE,
2276 				.cra_flags = CRYPTO_ALG_ASYNC,
2277 			},
2278 			.ivsize = AES_BLOCK_SIZE,
2279 			.maxauthsize = SHA1_DIGEST_SIZE,
2280 		},
2281 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2282 				     DESC_HDR_SEL0_AESU |
2283 				     DESC_HDR_MODE0_AESU_CBC |
2284 				     DESC_HDR_SEL1_MDEUA |
2285 				     DESC_HDR_MODE1_MDEU_INIT |
2286 				     DESC_HDR_MODE1_MDEU_PAD |
2287 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2288 	},
2289 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2290 		.alg.aead = {
2291 			.base = {
2292 				.cra_name = "authenc(hmac(sha1),"
2293 					    "cbc(des3_ede))",
2294 				.cra_driver_name = "authenc-hmac-sha1-"
2295 						   "cbc-3des-talitos",
2296 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2297 				.cra_flags = CRYPTO_ALG_ASYNC,
2298 			},
2299 			.ivsize = DES3_EDE_BLOCK_SIZE,
2300 			.maxauthsize = SHA1_DIGEST_SIZE,
2301 		},
2302 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2303 			             DESC_HDR_SEL0_DEU |
2304 		                     DESC_HDR_MODE0_DEU_CBC |
2305 		                     DESC_HDR_MODE0_DEU_3DES |
2306 		                     DESC_HDR_SEL1_MDEUA |
2307 		                     DESC_HDR_MODE1_MDEU_INIT |
2308 		                     DESC_HDR_MODE1_MDEU_PAD |
2309 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2310 	},
2311 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2312 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2313 		.alg.aead = {
2314 			.base = {
2315 				.cra_name = "authenc(hmac(sha1),"
2316 					    "cbc(des3_ede))",
2317 				.cra_driver_name = "authenc-hmac-sha1-"
2318 						   "cbc-3des-talitos",
2319 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2320 				.cra_flags = CRYPTO_ALG_ASYNC,
2321 			},
2322 			.ivsize = DES3_EDE_BLOCK_SIZE,
2323 			.maxauthsize = SHA1_DIGEST_SIZE,
2324 		},
2325 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2326 				     DESC_HDR_SEL0_DEU |
2327 				     DESC_HDR_MODE0_DEU_CBC |
2328 				     DESC_HDR_MODE0_DEU_3DES |
2329 				     DESC_HDR_SEL1_MDEUA |
2330 				     DESC_HDR_MODE1_MDEU_INIT |
2331 				     DESC_HDR_MODE1_MDEU_PAD |
2332 				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2333 	},
2334 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2335 		.alg.aead = {
2336 			.base = {
2337 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2338 				.cra_driver_name = "authenc-hmac-sha224-"
2339 						   "cbc-aes-talitos",
2340 				.cra_blocksize = AES_BLOCK_SIZE,
2341 				.cra_flags = CRYPTO_ALG_ASYNC,
2342 			},
2343 			.ivsize = AES_BLOCK_SIZE,
2344 			.maxauthsize = SHA224_DIGEST_SIZE,
2345 		},
2346 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2347 				     DESC_HDR_SEL0_AESU |
2348 				     DESC_HDR_MODE0_AESU_CBC |
2349 				     DESC_HDR_SEL1_MDEUA |
2350 				     DESC_HDR_MODE1_MDEU_INIT |
2351 				     DESC_HDR_MODE1_MDEU_PAD |
2352 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2353 	},
2354 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2355 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2356 		.alg.aead = {
2357 			.base = {
2358 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2359 				.cra_driver_name = "authenc-hmac-sha224-"
2360 						   "cbc-aes-talitos",
2361 				.cra_blocksize = AES_BLOCK_SIZE,
2362 				.cra_flags = CRYPTO_ALG_ASYNC,
2363 			},
2364 			.ivsize = AES_BLOCK_SIZE,
2365 			.maxauthsize = SHA224_DIGEST_SIZE,
2366 		},
2367 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2368 				     DESC_HDR_SEL0_AESU |
2369 				     DESC_HDR_MODE0_AESU_CBC |
2370 				     DESC_HDR_SEL1_MDEUA |
2371 				     DESC_HDR_MODE1_MDEU_INIT |
2372 				     DESC_HDR_MODE1_MDEU_PAD |
2373 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2374 	},
2375 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2376 		.alg.aead = {
2377 			.base = {
2378 				.cra_name = "authenc(hmac(sha224),"
2379 					    "cbc(des3_ede))",
2380 				.cra_driver_name = "authenc-hmac-sha224-"
2381 						   "cbc-3des-talitos",
2382 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2383 				.cra_flags = CRYPTO_ALG_ASYNC,
2384 			},
2385 			.ivsize = DES3_EDE_BLOCK_SIZE,
2386 			.maxauthsize = SHA224_DIGEST_SIZE,
2387 		},
2388 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2389 			             DESC_HDR_SEL0_DEU |
2390 		                     DESC_HDR_MODE0_DEU_CBC |
2391 		                     DESC_HDR_MODE0_DEU_3DES |
2392 		                     DESC_HDR_SEL1_MDEUA |
2393 		                     DESC_HDR_MODE1_MDEU_INIT |
2394 		                     DESC_HDR_MODE1_MDEU_PAD |
2395 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2396 	},
2397 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2398 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2399 		.alg.aead = {
2400 			.base = {
2401 				.cra_name = "authenc(hmac(sha224),"
2402 					    "cbc(des3_ede))",
2403 				.cra_driver_name = "authenc-hmac-sha224-"
2404 						   "cbc-3des-talitos",
2405 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2406 				.cra_flags = CRYPTO_ALG_ASYNC,
2407 			},
2408 			.ivsize = DES3_EDE_BLOCK_SIZE,
2409 			.maxauthsize = SHA224_DIGEST_SIZE,
2410 		},
2411 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2412 				     DESC_HDR_SEL0_DEU |
2413 				     DESC_HDR_MODE0_DEU_CBC |
2414 				     DESC_HDR_MODE0_DEU_3DES |
2415 				     DESC_HDR_SEL1_MDEUA |
2416 				     DESC_HDR_MODE1_MDEU_INIT |
2417 				     DESC_HDR_MODE1_MDEU_PAD |
2418 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2419 	},
2420 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2421 		.alg.aead = {
2422 			.base = {
2423 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2424 				.cra_driver_name = "authenc-hmac-sha256-"
2425 						   "cbc-aes-talitos",
2426 				.cra_blocksize = AES_BLOCK_SIZE,
2427 				.cra_flags = CRYPTO_ALG_ASYNC,
2428 			},
2429 			.ivsize = AES_BLOCK_SIZE,
2430 			.maxauthsize = SHA256_DIGEST_SIZE,
2431 		},
2432 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2433 			             DESC_HDR_SEL0_AESU |
2434 		                     DESC_HDR_MODE0_AESU_CBC |
2435 		                     DESC_HDR_SEL1_MDEUA |
2436 		                     DESC_HDR_MODE1_MDEU_INIT |
2437 		                     DESC_HDR_MODE1_MDEU_PAD |
2438 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2439 	},
2440 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2441 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2442 		.alg.aead = {
2443 			.base = {
2444 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2445 				.cra_driver_name = "authenc-hmac-sha256-"
2446 						   "cbc-aes-talitos",
2447 				.cra_blocksize = AES_BLOCK_SIZE,
2448 				.cra_flags = CRYPTO_ALG_ASYNC,
2449 			},
2450 			.ivsize = AES_BLOCK_SIZE,
2451 			.maxauthsize = SHA256_DIGEST_SIZE,
2452 		},
2453 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2454 				     DESC_HDR_SEL0_AESU |
2455 				     DESC_HDR_MODE0_AESU_CBC |
2456 				     DESC_HDR_SEL1_MDEUA |
2457 				     DESC_HDR_MODE1_MDEU_INIT |
2458 				     DESC_HDR_MODE1_MDEU_PAD |
2459 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2460 	},
2461 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2462 		.alg.aead = {
2463 			.base = {
2464 				.cra_name = "authenc(hmac(sha256),"
2465 					    "cbc(des3_ede))",
2466 				.cra_driver_name = "authenc-hmac-sha256-"
2467 						   "cbc-3des-talitos",
2468 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2469 				.cra_flags = CRYPTO_ALG_ASYNC,
2470 			},
2471 			.ivsize = DES3_EDE_BLOCK_SIZE,
2472 			.maxauthsize = SHA256_DIGEST_SIZE,
2473 		},
2474 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2475 			             DESC_HDR_SEL0_DEU |
2476 		                     DESC_HDR_MODE0_DEU_CBC |
2477 		                     DESC_HDR_MODE0_DEU_3DES |
2478 		                     DESC_HDR_SEL1_MDEUA |
2479 		                     DESC_HDR_MODE1_MDEU_INIT |
2480 		                     DESC_HDR_MODE1_MDEU_PAD |
2481 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2482 	},
2483 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2484 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2485 		.alg.aead = {
2486 			.base = {
2487 				.cra_name = "authenc(hmac(sha256),"
2488 					    "cbc(des3_ede))",
2489 				.cra_driver_name = "authenc-hmac-sha256-"
2490 						   "cbc-3des-talitos",
2491 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2492 				.cra_flags = CRYPTO_ALG_ASYNC,
2493 			},
2494 			.ivsize = DES3_EDE_BLOCK_SIZE,
2495 			.maxauthsize = SHA256_DIGEST_SIZE,
2496 		},
2497 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2498 				     DESC_HDR_SEL0_DEU |
2499 				     DESC_HDR_MODE0_DEU_CBC |
2500 				     DESC_HDR_MODE0_DEU_3DES |
2501 				     DESC_HDR_SEL1_MDEUA |
2502 				     DESC_HDR_MODE1_MDEU_INIT |
2503 				     DESC_HDR_MODE1_MDEU_PAD |
2504 				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2505 	},
2506 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2507 		.alg.aead = {
2508 			.base = {
2509 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2510 				.cra_driver_name = "authenc-hmac-sha384-"
2511 						   "cbc-aes-talitos",
2512 				.cra_blocksize = AES_BLOCK_SIZE,
2513 				.cra_flags = CRYPTO_ALG_ASYNC,
2514 			},
2515 			.ivsize = AES_BLOCK_SIZE,
2516 			.maxauthsize = SHA384_DIGEST_SIZE,
2517 		},
2518 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2519 			             DESC_HDR_SEL0_AESU |
2520 		                     DESC_HDR_MODE0_AESU_CBC |
2521 		                     DESC_HDR_SEL1_MDEUB |
2522 		                     DESC_HDR_MODE1_MDEU_INIT |
2523 		                     DESC_HDR_MODE1_MDEU_PAD |
2524 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2525 	},
2526 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2527 		.alg.aead = {
2528 			.base = {
2529 				.cra_name = "authenc(hmac(sha384),"
2530 					    "cbc(des3_ede))",
2531 				.cra_driver_name = "authenc-hmac-sha384-"
2532 						   "cbc-3des-talitos",
2533 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2534 				.cra_flags = CRYPTO_ALG_ASYNC,
2535 			},
2536 			.ivsize = DES3_EDE_BLOCK_SIZE,
2537 			.maxauthsize = SHA384_DIGEST_SIZE,
2538 		},
2539 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2540 			             DESC_HDR_SEL0_DEU |
2541 		                     DESC_HDR_MODE0_DEU_CBC |
2542 		                     DESC_HDR_MODE0_DEU_3DES |
2543 		                     DESC_HDR_SEL1_MDEUB |
2544 		                     DESC_HDR_MODE1_MDEU_INIT |
2545 		                     DESC_HDR_MODE1_MDEU_PAD |
2546 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2547 	},
2548 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2549 		.alg.aead = {
2550 			.base = {
2551 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2552 				.cra_driver_name = "authenc-hmac-sha512-"
2553 						   "cbc-aes-talitos",
2554 				.cra_blocksize = AES_BLOCK_SIZE,
2555 				.cra_flags = CRYPTO_ALG_ASYNC,
2556 			},
2557 			.ivsize = AES_BLOCK_SIZE,
2558 			.maxauthsize = SHA512_DIGEST_SIZE,
2559 		},
2560 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2561 			             DESC_HDR_SEL0_AESU |
2562 		                     DESC_HDR_MODE0_AESU_CBC |
2563 		                     DESC_HDR_SEL1_MDEUB |
2564 		                     DESC_HDR_MODE1_MDEU_INIT |
2565 		                     DESC_HDR_MODE1_MDEU_PAD |
2566 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2567 	},
2568 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2569 		.alg.aead = {
2570 			.base = {
2571 				.cra_name = "authenc(hmac(sha512),"
2572 					    "cbc(des3_ede))",
2573 				.cra_driver_name = "authenc-hmac-sha512-"
2574 						   "cbc-3des-talitos",
2575 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2576 				.cra_flags = CRYPTO_ALG_ASYNC,
2577 			},
2578 			.ivsize = DES3_EDE_BLOCK_SIZE,
2579 			.maxauthsize = SHA512_DIGEST_SIZE,
2580 		},
2581 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2582 			             DESC_HDR_SEL0_DEU |
2583 		                     DESC_HDR_MODE0_DEU_CBC |
2584 		                     DESC_HDR_MODE0_DEU_3DES |
2585 		                     DESC_HDR_SEL1_MDEUB |
2586 		                     DESC_HDR_MODE1_MDEU_INIT |
2587 		                     DESC_HDR_MODE1_MDEU_PAD |
2588 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2589 	},
2590 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2591 		.alg.aead = {
2592 			.base = {
2593 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2594 				.cra_driver_name = "authenc-hmac-md5-"
2595 						   "cbc-aes-talitos",
2596 				.cra_blocksize = AES_BLOCK_SIZE,
2597 				.cra_flags = CRYPTO_ALG_ASYNC,
2598 			},
2599 			.ivsize = AES_BLOCK_SIZE,
2600 			.maxauthsize = MD5_DIGEST_SIZE,
2601 		},
2602 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2603 			             DESC_HDR_SEL0_AESU |
2604 		                     DESC_HDR_MODE0_AESU_CBC |
2605 		                     DESC_HDR_SEL1_MDEUA |
2606 		                     DESC_HDR_MODE1_MDEU_INIT |
2607 		                     DESC_HDR_MODE1_MDEU_PAD |
2608 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2609 	},
2610 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2611 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2612 		.alg.aead = {
2613 			.base = {
2614 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2615 				.cra_driver_name = "authenc-hmac-md5-"
2616 						   "cbc-aes-talitos",
2617 				.cra_blocksize = AES_BLOCK_SIZE,
2618 				.cra_flags = CRYPTO_ALG_ASYNC,
2619 			},
2620 			.ivsize = AES_BLOCK_SIZE,
2621 			.maxauthsize = MD5_DIGEST_SIZE,
2622 		},
2623 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2624 				     DESC_HDR_SEL0_AESU |
2625 				     DESC_HDR_MODE0_AESU_CBC |
2626 				     DESC_HDR_SEL1_MDEUA |
2627 				     DESC_HDR_MODE1_MDEU_INIT |
2628 				     DESC_HDR_MODE1_MDEU_PAD |
2629 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2630 	},
2631 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2632 		.alg.aead = {
2633 			.base = {
2634 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2635 				.cra_driver_name = "authenc-hmac-md5-"
2636 						   "cbc-3des-talitos",
2637 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2638 				.cra_flags = CRYPTO_ALG_ASYNC,
2639 			},
2640 			.ivsize = DES3_EDE_BLOCK_SIZE,
2641 			.maxauthsize = MD5_DIGEST_SIZE,
2642 		},
2643 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2644 			             DESC_HDR_SEL0_DEU |
2645 		                     DESC_HDR_MODE0_DEU_CBC |
2646 		                     DESC_HDR_MODE0_DEU_3DES |
2647 		                     DESC_HDR_SEL1_MDEUA |
2648 		                     DESC_HDR_MODE1_MDEU_INIT |
2649 		                     DESC_HDR_MODE1_MDEU_PAD |
2650 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2651 	},
2652 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2653 		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2654 		.alg.aead = {
2655 			.base = {
2656 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2657 				.cra_driver_name = "authenc-hmac-md5-"
2658 						   "cbc-3des-talitos",
2659 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2660 				.cra_flags = CRYPTO_ALG_ASYNC,
2661 			},
2662 			.ivsize = DES3_EDE_BLOCK_SIZE,
2663 			.maxauthsize = MD5_DIGEST_SIZE,
2664 		},
2665 		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2666 				     DESC_HDR_SEL0_DEU |
2667 				     DESC_HDR_MODE0_DEU_CBC |
2668 				     DESC_HDR_MODE0_DEU_3DES |
2669 				     DESC_HDR_SEL1_MDEUA |
2670 				     DESC_HDR_MODE1_MDEU_INIT |
2671 				     DESC_HDR_MODE1_MDEU_PAD |
2672 				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2673 	},
2674 	/* ABLKCIPHER algorithms. */
2675 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2676 		.alg.crypto = {
2677 			.cra_name = "ecb(aes)",
2678 			.cra_driver_name = "ecb-aes-talitos",
2679 			.cra_blocksize = AES_BLOCK_SIZE,
2680 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2681 				     CRYPTO_ALG_ASYNC,
2682 			.cra_ablkcipher = {
2683 				.min_keysize = AES_MIN_KEY_SIZE,
2684 				.max_keysize = AES_MAX_KEY_SIZE,
2685 				.ivsize = AES_BLOCK_SIZE,
2686 			}
2687 		},
2688 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2689 				     DESC_HDR_SEL0_AESU,
2690 	},
2691 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2692 		.alg.crypto = {
2693 			.cra_name = "cbc(aes)",
2694 			.cra_driver_name = "cbc-aes-talitos",
2695 			.cra_blocksize = AES_BLOCK_SIZE,
2696 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2697                                      CRYPTO_ALG_ASYNC,
2698 			.cra_ablkcipher = {
2699 				.min_keysize = AES_MIN_KEY_SIZE,
2700 				.max_keysize = AES_MAX_KEY_SIZE,
2701 				.ivsize = AES_BLOCK_SIZE,
2702 			}
2703 		},
2704 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2705 				     DESC_HDR_SEL0_AESU |
2706 				     DESC_HDR_MODE0_AESU_CBC,
2707 	},
2708 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2709 		.alg.crypto = {
2710 			.cra_name = "ctr(aes)",
2711 			.cra_driver_name = "ctr-aes-talitos",
2712 			.cra_blocksize = AES_BLOCK_SIZE,
2713 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2714 				     CRYPTO_ALG_ASYNC,
2715 			.cra_ablkcipher = {
2716 				.min_keysize = AES_MIN_KEY_SIZE,
2717 				.max_keysize = AES_MAX_KEY_SIZE,
2718 				.ivsize = AES_BLOCK_SIZE,
2719 			}
2720 		},
2721 		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2722 				     DESC_HDR_SEL0_AESU |
2723 				     DESC_HDR_MODE0_AESU_CTR,
2724 	},
2725 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2726 		.alg.crypto = {
2727 			.cra_name = "ecb(des)",
2728 			.cra_driver_name = "ecb-des-talitos",
2729 			.cra_blocksize = DES_BLOCK_SIZE,
2730 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2731 				     CRYPTO_ALG_ASYNC,
2732 			.cra_ablkcipher = {
2733 				.min_keysize = DES_KEY_SIZE,
2734 				.max_keysize = DES_KEY_SIZE,
2735 				.ivsize = DES_BLOCK_SIZE,
2736 			}
2737 		},
2738 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2739 				     DESC_HDR_SEL0_DEU,
2740 	},
2741 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2742 		.alg.crypto = {
2743 			.cra_name = "cbc(des)",
2744 			.cra_driver_name = "cbc-des-talitos",
2745 			.cra_blocksize = DES_BLOCK_SIZE,
2746 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2747 				     CRYPTO_ALG_ASYNC,
2748 			.cra_ablkcipher = {
2749 				.min_keysize = DES_KEY_SIZE,
2750 				.max_keysize = DES_KEY_SIZE,
2751 				.ivsize = DES_BLOCK_SIZE,
2752 			}
2753 		},
2754 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2755 				     DESC_HDR_SEL0_DEU |
2756 				     DESC_HDR_MODE0_DEU_CBC,
2757 	},
2758 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2759 		.alg.crypto = {
2760 			.cra_name = "ecb(des3_ede)",
2761 			.cra_driver_name = "ecb-3des-talitos",
2762 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2763 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2764 				     CRYPTO_ALG_ASYNC,
2765 			.cra_ablkcipher = {
2766 				.min_keysize = DES3_EDE_KEY_SIZE,
2767 				.max_keysize = DES3_EDE_KEY_SIZE,
2768 				.ivsize = DES3_EDE_BLOCK_SIZE,
2769 			}
2770 		},
2771 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2772 				     DESC_HDR_SEL0_DEU |
2773 				     DESC_HDR_MODE0_DEU_3DES,
2774 	},
2775 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2776 		.alg.crypto = {
2777 			.cra_name = "cbc(des3_ede)",
2778 			.cra_driver_name = "cbc-3des-talitos",
2779 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2780 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2781                                      CRYPTO_ALG_ASYNC,
2782 			.cra_ablkcipher = {
2783 				.min_keysize = DES3_EDE_KEY_SIZE,
2784 				.max_keysize = DES3_EDE_KEY_SIZE,
2785 				.ivsize = DES3_EDE_BLOCK_SIZE,
2786 			}
2787 		},
2788 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2789 			             DESC_HDR_SEL0_DEU |
2790 		                     DESC_HDR_MODE0_DEU_CBC |
2791 		                     DESC_HDR_MODE0_DEU_3DES,
2792 	},
2793 	/* AHASH algorithms. */
2794 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2795 		.alg.hash = {
2796 			.halg.digestsize = MD5_DIGEST_SIZE,
2797 			.halg.statesize = sizeof(struct talitos_export_state),
2798 			.halg.base = {
2799 				.cra_name = "md5",
2800 				.cra_driver_name = "md5-talitos",
2801 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2802 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2803 					     CRYPTO_ALG_ASYNC,
2804 			}
2805 		},
2806 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2807 				     DESC_HDR_SEL0_MDEUA |
2808 				     DESC_HDR_MODE0_MDEU_MD5,
2809 	},
2810 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2811 		.alg.hash = {
2812 			.halg.digestsize = SHA1_DIGEST_SIZE,
2813 			.halg.statesize = sizeof(struct talitos_export_state),
2814 			.halg.base = {
2815 				.cra_name = "sha1",
2816 				.cra_driver_name = "sha1-talitos",
2817 				.cra_blocksize = SHA1_BLOCK_SIZE,
2818 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2819 					     CRYPTO_ALG_ASYNC,
2820 			}
2821 		},
2822 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2823 				     DESC_HDR_SEL0_MDEUA |
2824 				     DESC_HDR_MODE0_MDEU_SHA1,
2825 	},
2826 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2827 		.alg.hash = {
2828 			.halg.digestsize = SHA224_DIGEST_SIZE,
2829 			.halg.statesize = sizeof(struct talitos_export_state),
2830 			.halg.base = {
2831 				.cra_name = "sha224",
2832 				.cra_driver_name = "sha224-talitos",
2833 				.cra_blocksize = SHA224_BLOCK_SIZE,
2834 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2835 					     CRYPTO_ALG_ASYNC,
2836 			}
2837 		},
2838 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2839 				     DESC_HDR_SEL0_MDEUA |
2840 				     DESC_HDR_MODE0_MDEU_SHA224,
2841 	},
2842 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2843 		.alg.hash = {
2844 			.halg.digestsize = SHA256_DIGEST_SIZE,
2845 			.halg.statesize = sizeof(struct talitos_export_state),
2846 			.halg.base = {
2847 				.cra_name = "sha256",
2848 				.cra_driver_name = "sha256-talitos",
2849 				.cra_blocksize = SHA256_BLOCK_SIZE,
2850 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2851 					     CRYPTO_ALG_ASYNC,
2852 			}
2853 		},
2854 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2855 				     DESC_HDR_SEL0_MDEUA |
2856 				     DESC_HDR_MODE0_MDEU_SHA256,
2857 	},
2858 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2859 		.alg.hash = {
2860 			.halg.digestsize = SHA384_DIGEST_SIZE,
2861 			.halg.statesize = sizeof(struct talitos_export_state),
2862 			.halg.base = {
2863 				.cra_name = "sha384",
2864 				.cra_driver_name = "sha384-talitos",
2865 				.cra_blocksize = SHA384_BLOCK_SIZE,
2866 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2867 					     CRYPTO_ALG_ASYNC,
2868 			}
2869 		},
2870 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2871 				     DESC_HDR_SEL0_MDEUB |
2872 				     DESC_HDR_MODE0_MDEUB_SHA384,
2873 	},
2874 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2875 		.alg.hash = {
2876 			.halg.digestsize = SHA512_DIGEST_SIZE,
2877 			.halg.statesize = sizeof(struct talitos_export_state),
2878 			.halg.base = {
2879 				.cra_name = "sha512",
2880 				.cra_driver_name = "sha512-talitos",
2881 				.cra_blocksize = SHA512_BLOCK_SIZE,
2882 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2883 					     CRYPTO_ALG_ASYNC,
2884 			}
2885 		},
2886 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2887 				     DESC_HDR_SEL0_MDEUB |
2888 				     DESC_HDR_MODE0_MDEUB_SHA512,
2889 	},
2890 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2891 		.alg.hash = {
2892 			.halg.digestsize = MD5_DIGEST_SIZE,
2893 			.halg.statesize = sizeof(struct talitos_export_state),
2894 			.halg.base = {
2895 				.cra_name = "hmac(md5)",
2896 				.cra_driver_name = "hmac-md5-talitos",
2897 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2898 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2899 					     CRYPTO_ALG_ASYNC,
2900 			}
2901 		},
2902 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2903 				     DESC_HDR_SEL0_MDEUA |
2904 				     DESC_HDR_MODE0_MDEU_MD5,
2905 	},
2906 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2907 		.alg.hash = {
2908 			.halg.digestsize = SHA1_DIGEST_SIZE,
2909 			.halg.statesize = sizeof(struct talitos_export_state),
2910 			.halg.base = {
2911 				.cra_name = "hmac(sha1)",
2912 				.cra_driver_name = "hmac-sha1-talitos",
2913 				.cra_blocksize = SHA1_BLOCK_SIZE,
2914 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2915 					     CRYPTO_ALG_ASYNC,
2916 			}
2917 		},
2918 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2919 				     DESC_HDR_SEL0_MDEUA |
2920 				     DESC_HDR_MODE0_MDEU_SHA1,
2921 	},
2922 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2923 		.alg.hash = {
2924 			.halg.digestsize = SHA224_DIGEST_SIZE,
2925 			.halg.statesize = sizeof(struct talitos_export_state),
2926 			.halg.base = {
2927 				.cra_name = "hmac(sha224)",
2928 				.cra_driver_name = "hmac-sha224-talitos",
2929 				.cra_blocksize = SHA224_BLOCK_SIZE,
2930 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2931 					     CRYPTO_ALG_ASYNC,
2932 			}
2933 		},
2934 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2935 				     DESC_HDR_SEL0_MDEUA |
2936 				     DESC_HDR_MODE0_MDEU_SHA224,
2937 	},
2938 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2939 		.alg.hash = {
2940 			.halg.digestsize = SHA256_DIGEST_SIZE,
2941 			.halg.statesize = sizeof(struct talitos_export_state),
2942 			.halg.base = {
2943 				.cra_name = "hmac(sha256)",
2944 				.cra_driver_name = "hmac-sha256-talitos",
2945 				.cra_blocksize = SHA256_BLOCK_SIZE,
2946 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2947 					     CRYPTO_ALG_ASYNC,
2948 			}
2949 		},
2950 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2951 				     DESC_HDR_SEL0_MDEUA |
2952 				     DESC_HDR_MODE0_MDEU_SHA256,
2953 	},
2954 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2955 		.alg.hash = {
2956 			.halg.digestsize = SHA384_DIGEST_SIZE,
2957 			.halg.statesize = sizeof(struct talitos_export_state),
2958 			.halg.base = {
2959 				.cra_name = "hmac(sha384)",
2960 				.cra_driver_name = "hmac-sha384-talitos",
2961 				.cra_blocksize = SHA384_BLOCK_SIZE,
2962 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2963 					     CRYPTO_ALG_ASYNC,
2964 			}
2965 		},
2966 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2967 				     DESC_HDR_SEL0_MDEUB |
2968 				     DESC_HDR_MODE0_MDEUB_SHA384,
2969 	},
2970 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2971 		.alg.hash = {
2972 			.halg.digestsize = SHA512_DIGEST_SIZE,
2973 			.halg.statesize = sizeof(struct talitos_export_state),
2974 			.halg.base = {
2975 				.cra_name = "hmac(sha512)",
2976 				.cra_driver_name = "hmac-sha512-talitos",
2977 				.cra_blocksize = SHA512_BLOCK_SIZE,
2978 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2979 					     CRYPTO_ALG_ASYNC,
2980 			}
2981 		},
2982 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2983 				     DESC_HDR_SEL0_MDEUB |
2984 				     DESC_HDR_MODE0_MDEUB_SHA512,
2985 	}
2986 };
2987 
2988 struct talitos_crypto_alg {
2989 	struct list_head entry;
2990 	struct device *dev;
2991 	struct talitos_alg_template algt;
2992 };
2993 
2994 static int talitos_init_common(struct talitos_ctx *ctx,
2995 			       struct talitos_crypto_alg *talitos_alg)
2996 {
2997 	struct talitos_private *priv;
2998 
2999 	/* update context with ptr to dev */
3000 	ctx->dev = talitos_alg->dev;
3001 
3002 	/* assign SEC channel to tfm in round-robin fashion */
3003 	priv = dev_get_drvdata(ctx->dev);
3004 	ctx->ch = atomic_inc_return(&priv->last_chan) &
3005 		  (priv->num_channels - 1);
3006 
3007 	/* copy descriptor header template value */
3008 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3009 
3010 	/* select done notification */
3011 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3012 
3013 	return 0;
3014 }
3015 
3016 static int talitos_cra_init(struct crypto_tfm *tfm)
3017 {
3018 	struct crypto_alg *alg = tfm->__crt_alg;
3019 	struct talitos_crypto_alg *talitos_alg;
3020 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3021 
3022 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3023 		talitos_alg = container_of(__crypto_ahash_alg(alg),
3024 					   struct talitos_crypto_alg,
3025 					   algt.alg.hash);
3026 	else
3027 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
3028 					   algt.alg.crypto);
3029 
3030 	return talitos_init_common(ctx, talitos_alg);
3031 }
3032 
3033 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3034 {
3035 	struct aead_alg *alg = crypto_aead_alg(tfm);
3036 	struct talitos_crypto_alg *talitos_alg;
3037 	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3038 
3039 	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3040 				   algt.alg.aead);
3041 
3042 	return talitos_init_common(ctx, talitos_alg);
3043 }
3044 
3045 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3046 {
3047 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3048 
3049 	talitos_cra_init(tfm);
3050 
3051 	ctx->keylen = 0;
3052 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3053 				 sizeof(struct talitos_ahash_req_ctx));
3054 
3055 	return 0;
3056 }
3057 
3058 static void talitos_cra_exit(struct crypto_tfm *tfm)
3059 {
3060 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3061 	struct device *dev = ctx->dev;
3062 
3063 	if (ctx->keylen)
3064 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3065 }
3066 
3067 static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
3068 {
3069 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3070 	struct device *dev = ctx->dev;
3071 	unsigned int size;
3072 
3073 	talitos_cra_exit(tfm);
3074 
3075 	size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
3076 		SHA256_DIGEST_SIZE)
3077 	       ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
3078 	       : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
3079 
3080 	if (ctx->dma_hw_context)
3081 		dma_unmap_single(dev, ctx->dma_hw_context, size,
3082 				 DMA_BIDIRECTIONAL);
3083 	if (ctx->dma_buf)
3084 		dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
3085 				 DMA_TO_DEVICE);
3086 }
3087 
3088 /*
3089  * given the alg's descriptor header template, determine whether descriptor
3090  * type and primary/secondary execution units required match the hw
3091  * capabilities description provided in the device tree node.
3092  */
3093 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3094 {
3095 	struct talitos_private *priv = dev_get_drvdata(dev);
3096 	int ret;
3097 
3098 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3099 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3100 
3101 	if (SECONDARY_EU(desc_hdr_template))
3102 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3103 		              & priv->exec_units);
3104 
3105 	return ret;
3106 }
3107 
3108 static int talitos_remove(struct platform_device *ofdev)
3109 {
3110 	struct device *dev = &ofdev->dev;
3111 	struct talitos_private *priv = dev_get_drvdata(dev);
3112 	struct talitos_crypto_alg *t_alg, *n;
3113 	int i;
3114 
3115 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3116 		switch (t_alg->algt.type) {
3117 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
3118 			break;
3119 		case CRYPTO_ALG_TYPE_AEAD:
3120 			crypto_unregister_aead(&t_alg->algt.alg.aead);
3121 		case CRYPTO_ALG_TYPE_AHASH:
3122 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3123 			break;
3124 		}
3125 		list_del(&t_alg->entry);
3126 	}
3127 
3128 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3129 		talitos_unregister_rng(dev);
3130 
3131 	for (i = 0; i < 2; i++)
3132 		if (priv->irq[i]) {
3133 			free_irq(priv->irq[i], dev);
3134 			irq_dispose_mapping(priv->irq[i]);
3135 		}
3136 
3137 	tasklet_kill(&priv->done_task[0]);
3138 	if (priv->irq[1])
3139 		tasklet_kill(&priv->done_task[1]);
3140 
3141 	return 0;
3142 }
3143 
3144 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3145 						    struct talitos_alg_template
3146 						           *template)
3147 {
3148 	struct talitos_private *priv = dev_get_drvdata(dev);
3149 	struct talitos_crypto_alg *t_alg;
3150 	struct crypto_alg *alg;
3151 
3152 	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3153 			     GFP_KERNEL);
3154 	if (!t_alg)
3155 		return ERR_PTR(-ENOMEM);
3156 
3157 	t_alg->algt = *template;
3158 
3159 	switch (t_alg->algt.type) {
3160 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
3161 		alg = &t_alg->algt.alg.crypto;
3162 		alg->cra_init = talitos_cra_init;
3163 		alg->cra_exit = talitos_cra_exit;
3164 		alg->cra_type = &crypto_ablkcipher_type;
3165 		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3166 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3167 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3168 		alg->cra_ablkcipher.geniv = "eseqiv";
3169 		break;
3170 	case CRYPTO_ALG_TYPE_AEAD:
3171 		alg = &t_alg->algt.alg.aead.base;
3172 		alg->cra_exit = talitos_cra_exit;
3173 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3174 		t_alg->algt.alg.aead.setkey = aead_setkey;
3175 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3176 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3177 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3178 		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3179 			devm_kfree(dev, t_alg);
3180 			return ERR_PTR(-ENOTSUPP);
3181 		}
3182 		break;
3183 	case CRYPTO_ALG_TYPE_AHASH:
3184 		alg = &t_alg->algt.alg.hash.halg.base;
3185 		alg->cra_init = talitos_cra_init_ahash;
3186 		alg->cra_exit = talitos_cra_exit_ahash;
3187 		alg->cra_type = &crypto_ahash_type;
3188 		t_alg->algt.alg.hash.init = ahash_init;
3189 		t_alg->algt.alg.hash.update = ahash_update;
3190 		t_alg->algt.alg.hash.final = ahash_final;
3191 		t_alg->algt.alg.hash.finup = ahash_finup;
3192 		t_alg->algt.alg.hash.digest = ahash_digest;
3193 		if (!strncmp(alg->cra_name, "hmac", 4))
3194 			t_alg->algt.alg.hash.setkey = ahash_setkey;
3195 		t_alg->algt.alg.hash.import = ahash_import;
3196 		t_alg->algt.alg.hash.export = ahash_export;
3197 
3198 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3199 		    !strncmp(alg->cra_name, "hmac", 4)) {
3200 			devm_kfree(dev, t_alg);
3201 			return ERR_PTR(-ENOTSUPP);
3202 		}
3203 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3204 		    (!strcmp(alg->cra_name, "sha224") ||
3205 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3206 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3207 			t_alg->algt.desc_hdr_template =
3208 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3209 					DESC_HDR_SEL0_MDEUA |
3210 					DESC_HDR_MODE0_MDEU_SHA256;
3211 		}
3212 		break;
3213 	default:
3214 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3215 		devm_kfree(dev, t_alg);
3216 		return ERR_PTR(-EINVAL);
3217 	}
3218 
3219 	alg->cra_module = THIS_MODULE;
3220 	if (t_alg->algt.priority)
3221 		alg->cra_priority = t_alg->algt.priority;
3222 	else
3223 		alg->cra_priority = TALITOS_CRA_PRIORITY;
3224 	alg->cra_alignmask = 0;
3225 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3226 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3227 
3228 	t_alg->dev = dev;
3229 
3230 	return t_alg;
3231 }
3232 
3233 static int talitos_probe_irq(struct platform_device *ofdev)
3234 {
3235 	struct device *dev = &ofdev->dev;
3236 	struct device_node *np = ofdev->dev.of_node;
3237 	struct talitos_private *priv = dev_get_drvdata(dev);
3238 	int err;
3239 	bool is_sec1 = has_ftr_sec1(priv);
3240 
3241 	priv->irq[0] = irq_of_parse_and_map(np, 0);
3242 	if (!priv->irq[0]) {
3243 		dev_err(dev, "failed to map irq\n");
3244 		return -EINVAL;
3245 	}
3246 	if (is_sec1) {
3247 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3248 				  dev_driver_string(dev), dev);
3249 		goto primary_out;
3250 	}
3251 
3252 	priv->irq[1] = irq_of_parse_and_map(np, 1);
3253 
3254 	/* get the primary irq line */
3255 	if (!priv->irq[1]) {
3256 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3257 				  dev_driver_string(dev), dev);
3258 		goto primary_out;
3259 	}
3260 
3261 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3262 			  dev_driver_string(dev), dev);
3263 	if (err)
3264 		goto primary_out;
3265 
3266 	/* get the secondary irq line */
3267 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3268 			  dev_driver_string(dev), dev);
3269 	if (err) {
3270 		dev_err(dev, "failed to request secondary irq\n");
3271 		irq_dispose_mapping(priv->irq[1]);
3272 		priv->irq[1] = 0;
3273 	}
3274 
3275 	return err;
3276 
3277 primary_out:
3278 	if (err) {
3279 		dev_err(dev, "failed to request primary irq\n");
3280 		irq_dispose_mapping(priv->irq[0]);
3281 		priv->irq[0] = 0;
3282 	}
3283 
3284 	return err;
3285 }
3286 
3287 static int talitos_probe(struct platform_device *ofdev)
3288 {
3289 	struct device *dev = &ofdev->dev;
3290 	struct device_node *np = ofdev->dev.of_node;
3291 	struct talitos_private *priv;
3292 	int i, err;
3293 	int stride;
3294 	struct resource *res;
3295 
3296 	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3297 	if (!priv)
3298 		return -ENOMEM;
3299 
3300 	INIT_LIST_HEAD(&priv->alg_list);
3301 
3302 	dev_set_drvdata(dev, priv);
3303 
3304 	priv->ofdev = ofdev;
3305 
3306 	spin_lock_init(&priv->reg_lock);
3307 
3308 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3309 	if (!res)
3310 		return -ENXIO;
3311 	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3312 	if (!priv->reg) {
3313 		dev_err(dev, "failed to of_iomap\n");
3314 		err = -ENOMEM;
3315 		goto err_out;
3316 	}
3317 
3318 	/* get SEC version capabilities from device tree */
3319 	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3320 	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3321 	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3322 	of_property_read_u32(np, "fsl,descriptor-types-mask",
3323 			     &priv->desc_types);
3324 
3325 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3326 	    !priv->exec_units || !priv->desc_types) {
3327 		dev_err(dev, "invalid property data in device tree node\n");
3328 		err = -EINVAL;
3329 		goto err_out;
3330 	}
3331 
3332 	if (of_device_is_compatible(np, "fsl,sec3.0"))
3333 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3334 
3335 	if (of_device_is_compatible(np, "fsl,sec2.1"))
3336 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3337 				  TALITOS_FTR_SHA224_HWINIT |
3338 				  TALITOS_FTR_HMAC_OK;
3339 
3340 	if (of_device_is_compatible(np, "fsl,sec1.0"))
3341 		priv->features |= TALITOS_FTR_SEC1;
3342 
3343 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3344 		priv->reg_deu = priv->reg + TALITOS12_DEU;
3345 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3346 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3347 		stride = TALITOS1_CH_STRIDE;
3348 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3349 		priv->reg_deu = priv->reg + TALITOS10_DEU;
3350 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3351 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3352 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3353 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3354 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3355 		stride = TALITOS1_CH_STRIDE;
3356 	} else {
3357 		priv->reg_deu = priv->reg + TALITOS2_DEU;
3358 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3359 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3360 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3361 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3362 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3363 		priv->reg_keu = priv->reg + TALITOS2_KEU;
3364 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3365 		stride = TALITOS2_CH_STRIDE;
3366 	}
3367 
3368 	err = talitos_probe_irq(ofdev);
3369 	if (err)
3370 		goto err_out;
3371 
3372 	if (of_device_is_compatible(np, "fsl,sec1.0")) {
3373 		if (priv->num_channels == 1)
3374 			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3375 				     (unsigned long)dev);
3376 		else
3377 			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3378 				     (unsigned long)dev);
3379 	} else {
3380 		if (priv->irq[1]) {
3381 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3382 				     (unsigned long)dev);
3383 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3384 				     (unsigned long)dev);
3385 		} else if (priv->num_channels == 1) {
3386 			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3387 				     (unsigned long)dev);
3388 		} else {
3389 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3390 				     (unsigned long)dev);
3391 		}
3392 	}
3393 
3394 	priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
3395 				       priv->num_channels, GFP_KERNEL);
3396 	if (!priv->chan) {
3397 		dev_err(dev, "failed to allocate channel management space\n");
3398 		err = -ENOMEM;
3399 		goto err_out;
3400 	}
3401 
3402 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3403 
3404 	for (i = 0; i < priv->num_channels; i++) {
3405 		priv->chan[i].reg = priv->reg + stride * (i + 1);
3406 		if (!priv->irq[1] || !(i & 1))
3407 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3408 
3409 		spin_lock_init(&priv->chan[i].head_lock);
3410 		spin_lock_init(&priv->chan[i].tail_lock);
3411 
3412 		priv->chan[i].fifo = devm_kzalloc(dev,
3413 						sizeof(struct talitos_request) *
3414 						priv->fifo_len, GFP_KERNEL);
3415 		if (!priv->chan[i].fifo) {
3416 			dev_err(dev, "failed to allocate request fifo %d\n", i);
3417 			err = -ENOMEM;
3418 			goto err_out;
3419 		}
3420 
3421 		atomic_set(&priv->chan[i].submit_count,
3422 			   -(priv->chfifo_len - 1));
3423 	}
3424 
3425 	dma_set_mask(dev, DMA_BIT_MASK(36));
3426 
3427 	/* reset and initialize the h/w */
3428 	err = init_device(dev);
3429 	if (err) {
3430 		dev_err(dev, "failed to initialize device\n");
3431 		goto err_out;
3432 	}
3433 
3434 	/* register the RNG, if available */
3435 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3436 		err = talitos_register_rng(dev);
3437 		if (err) {
3438 			dev_err(dev, "failed to register hwrng: %d\n", err);
3439 			goto err_out;
3440 		} else
3441 			dev_info(dev, "hwrng\n");
3442 	}
3443 
3444 	/* register crypto algorithms the device supports */
3445 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3446 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3447 			struct talitos_crypto_alg *t_alg;
3448 			struct crypto_alg *alg = NULL;
3449 
3450 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3451 			if (IS_ERR(t_alg)) {
3452 				err = PTR_ERR(t_alg);
3453 				if (err == -ENOTSUPP)
3454 					continue;
3455 				goto err_out;
3456 			}
3457 
3458 			switch (t_alg->algt.type) {
3459 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3460 				err = crypto_register_alg(
3461 						&t_alg->algt.alg.crypto);
3462 				alg = &t_alg->algt.alg.crypto;
3463 				break;
3464 
3465 			case CRYPTO_ALG_TYPE_AEAD:
3466 				err = crypto_register_aead(
3467 					&t_alg->algt.alg.aead);
3468 				alg = &t_alg->algt.alg.aead.base;
3469 				break;
3470 
3471 			case CRYPTO_ALG_TYPE_AHASH:
3472 				err = crypto_register_ahash(
3473 						&t_alg->algt.alg.hash);
3474 				alg = &t_alg->algt.alg.hash.halg.base;
3475 				break;
3476 			}
3477 			if (err) {
3478 				dev_err(dev, "%s alg registration failed\n",
3479 					alg->cra_driver_name);
3480 				devm_kfree(dev, t_alg);
3481 			} else
3482 				list_add_tail(&t_alg->entry, &priv->alg_list);
3483 		}
3484 	}
3485 	if (!list_empty(&priv->alg_list))
3486 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3487 			 (char *)of_get_property(np, "compatible", NULL));
3488 
3489 	return 0;
3490 
3491 err_out:
3492 	talitos_remove(ofdev);
3493 
3494 	return err;
3495 }
3496 
3497 static const struct of_device_id talitos_match[] = {
3498 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3499 	{
3500 		.compatible = "fsl,sec1.0",
3501 	},
3502 #endif
3503 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3504 	{
3505 		.compatible = "fsl,sec2.0",
3506 	},
3507 #endif
3508 	{},
3509 };
3510 MODULE_DEVICE_TABLE(of, talitos_match);
3511 
3512 static struct platform_driver talitos_driver = {
3513 	.driver = {
3514 		.name = "talitos",
3515 		.of_match_table = talitos_match,
3516 	},
3517 	.probe = talitos_probe,
3518 	.remove = talitos_remove,
3519 };
3520 
3521 module_platform_driver(talitos_driver);
3522 
3523 MODULE_LICENSE("GPL");
3524 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3525 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3526