xref: /openbmc/linux/drivers/crypto/talitos.c (revision a8da474e)
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43 
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55 
56 #include "talitos.h"
57 
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 			   bool is_sec1)
60 {
61 	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 	if (!is_sec1)
63 		ptr->eptr = upper_32_bits(dma_addr);
64 }
65 
66 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
67 			       bool is_sec1)
68 {
69 	if (is_sec1) {
70 		ptr->res = 0;
71 		ptr->len1 = cpu_to_be16(len);
72 	} else {
73 		ptr->len = cpu_to_be16(len);
74 	}
75 }
76 
77 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 					   bool is_sec1)
79 {
80 	if (is_sec1)
81 		return be16_to_cpu(ptr->len1);
82 	else
83 		return be16_to_cpu(ptr->len);
84 }
85 
86 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
87 {
88 	if (!is_sec1)
89 		ptr->j_extent = 0;
90 }
91 
92 /*
93  * map virtual single (contiguous) pointer to h/w descriptor pointer
94  */
95 static void map_single_talitos_ptr(struct device *dev,
96 				   struct talitos_ptr *ptr,
97 				   unsigned int len, void *data,
98 				   enum dma_data_direction dir)
99 {
100 	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
101 	struct talitos_private *priv = dev_get_drvdata(dev);
102 	bool is_sec1 = has_ftr_sec1(priv);
103 
104 	to_talitos_ptr_len(ptr, len, is_sec1);
105 	to_talitos_ptr(ptr, dma_addr, is_sec1);
106 	to_talitos_ptr_extent_clear(ptr, is_sec1);
107 }
108 
109 /*
110  * unmap bus single (contiguous) h/w descriptor pointer
111  */
112 static void unmap_single_talitos_ptr(struct device *dev,
113 				     struct talitos_ptr *ptr,
114 				     enum dma_data_direction dir)
115 {
116 	struct talitos_private *priv = dev_get_drvdata(dev);
117 	bool is_sec1 = has_ftr_sec1(priv);
118 
119 	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
120 			 from_talitos_ptr_len(ptr, is_sec1), dir);
121 }
122 
123 static int reset_channel(struct device *dev, int ch)
124 {
125 	struct talitos_private *priv = dev_get_drvdata(dev);
126 	unsigned int timeout = TALITOS_TIMEOUT;
127 	bool is_sec1 = has_ftr_sec1(priv);
128 
129 	if (is_sec1) {
130 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
131 			  TALITOS1_CCCR_LO_RESET);
132 
133 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
134 			TALITOS1_CCCR_LO_RESET) && --timeout)
135 			cpu_relax();
136 	} else {
137 		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
138 			  TALITOS2_CCCR_RESET);
139 
140 		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
141 			TALITOS2_CCCR_RESET) && --timeout)
142 			cpu_relax();
143 	}
144 
145 	if (timeout == 0) {
146 		dev_err(dev, "failed to reset channel %d\n", ch);
147 		return -EIO;
148 	}
149 
150 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
151 	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
152 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
153 
154 	/* and ICCR writeback, if available */
155 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
156 		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
157 		          TALITOS_CCCR_LO_IWSE);
158 
159 	return 0;
160 }
161 
162 static int reset_device(struct device *dev)
163 {
164 	struct talitos_private *priv = dev_get_drvdata(dev);
165 	unsigned int timeout = TALITOS_TIMEOUT;
166 	bool is_sec1 = has_ftr_sec1(priv);
167 	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
168 
169 	setbits32(priv->reg + TALITOS_MCR, mcr);
170 
171 	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
172 	       && --timeout)
173 		cpu_relax();
174 
175 	if (priv->irq[1]) {
176 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
177 		setbits32(priv->reg + TALITOS_MCR, mcr);
178 	}
179 
180 	if (timeout == 0) {
181 		dev_err(dev, "failed to reset device\n");
182 		return -EIO;
183 	}
184 
185 	return 0;
186 }
187 
188 /*
189  * Reset and initialize the device
190  */
191 static int init_device(struct device *dev)
192 {
193 	struct talitos_private *priv = dev_get_drvdata(dev);
194 	int ch, err;
195 	bool is_sec1 = has_ftr_sec1(priv);
196 
197 	/*
198 	 * Master reset
199 	 * errata documentation: warning: certain SEC interrupts
200 	 * are not fully cleared by writing the MCR:SWR bit,
201 	 * set bit twice to completely reset
202 	 */
203 	err = reset_device(dev);
204 	if (err)
205 		return err;
206 
207 	err = reset_device(dev);
208 	if (err)
209 		return err;
210 
211 	/* reset channels */
212 	for (ch = 0; ch < priv->num_channels; ch++) {
213 		err = reset_channel(dev, ch);
214 		if (err)
215 			return err;
216 	}
217 
218 	/* enable channel done and error interrupts */
219 	if (is_sec1) {
220 		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
221 		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
222 		/* disable parity error check in DEU (erroneous? test vect.) */
223 		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
224 	} else {
225 		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
226 		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
227 	}
228 
229 	/* disable integrity check error interrupts (use writeback instead) */
230 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
231 		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
232 		          TALITOS_MDEUICR_LO_ICE);
233 
234 	return 0;
235 }
236 
237 /**
238  * talitos_submit - submits a descriptor to the device for processing
239  * @dev:	the SEC device to be used
240  * @ch:		the SEC device channel to be used
241  * @desc:	the descriptor to be processed by the device
242  * @callback:	whom to call when processing is complete
243  * @context:	a handle for use by caller (optional)
244  *
245  * desc must contain valid dma-mapped (bus physical) address pointers.
246  * callback must check err and feedback in descriptor header
247  * for device processing status.
248  */
249 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
250 		   void (*callback)(struct device *dev,
251 				    struct talitos_desc *desc,
252 				    void *context, int error),
253 		   void *context)
254 {
255 	struct talitos_private *priv = dev_get_drvdata(dev);
256 	struct talitos_request *request;
257 	unsigned long flags;
258 	int head;
259 	bool is_sec1 = has_ftr_sec1(priv);
260 
261 	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
262 
263 	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
264 		/* h/w fifo is full */
265 		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
266 		return -EAGAIN;
267 	}
268 
269 	head = priv->chan[ch].head;
270 	request = &priv->chan[ch].fifo[head];
271 
272 	/* map descriptor and save caller data */
273 	if (is_sec1) {
274 		desc->hdr1 = desc->hdr;
275 		desc->next_desc = 0;
276 		request->dma_desc = dma_map_single(dev, &desc->hdr1,
277 						   TALITOS_DESC_SIZE,
278 						   DMA_BIDIRECTIONAL);
279 	} else {
280 		request->dma_desc = dma_map_single(dev, desc,
281 						   TALITOS_DESC_SIZE,
282 						   DMA_BIDIRECTIONAL);
283 	}
284 	request->callback = callback;
285 	request->context = context;
286 
287 	/* increment fifo head */
288 	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
289 
290 	smp_wmb();
291 	request->desc = desc;
292 
293 	/* GO! */
294 	wmb();
295 	out_be32(priv->chan[ch].reg + TALITOS_FF,
296 		 upper_32_bits(request->dma_desc));
297 	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
298 		 lower_32_bits(request->dma_desc));
299 
300 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
301 
302 	return -EINPROGRESS;
303 }
304 EXPORT_SYMBOL(talitos_submit);
305 
306 /*
307  * process what was done, notify callback of error if not
308  */
309 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
310 {
311 	struct talitos_private *priv = dev_get_drvdata(dev);
312 	struct talitos_request *request, saved_req;
313 	unsigned long flags;
314 	int tail, status;
315 	bool is_sec1 = has_ftr_sec1(priv);
316 
317 	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
318 
319 	tail = priv->chan[ch].tail;
320 	while (priv->chan[ch].fifo[tail].desc) {
321 		__be32 hdr;
322 
323 		request = &priv->chan[ch].fifo[tail];
324 
325 		/* descriptors with their done bits set don't get the error */
326 		rmb();
327 		hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
328 
329 		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
330 			status = 0;
331 		else
332 			if (!error)
333 				break;
334 			else
335 				status = error;
336 
337 		dma_unmap_single(dev, request->dma_desc,
338 				 TALITOS_DESC_SIZE,
339 				 DMA_BIDIRECTIONAL);
340 
341 		/* copy entries so we can call callback outside lock */
342 		saved_req.desc = request->desc;
343 		saved_req.callback = request->callback;
344 		saved_req.context = request->context;
345 
346 		/* release request entry in fifo */
347 		smp_wmb();
348 		request->desc = NULL;
349 
350 		/* increment fifo tail */
351 		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
352 
353 		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
354 
355 		atomic_dec(&priv->chan[ch].submit_count);
356 
357 		saved_req.callback(dev, saved_req.desc, saved_req.context,
358 				   status);
359 		/* channel may resume processing in single desc error case */
360 		if (error && !reset_ch && status == error)
361 			return;
362 		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
363 		tail = priv->chan[ch].tail;
364 	}
365 
366 	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
367 }
368 
369 /*
370  * process completed requests for channels that have done status
371  */
372 #define DEF_TALITOS1_DONE(name, ch_done_mask)				\
373 static void talitos1_done_##name(unsigned long data)			\
374 {									\
375 	struct device *dev = (struct device *)data;			\
376 	struct talitos_private *priv = dev_get_drvdata(dev);		\
377 	unsigned long flags;						\
378 									\
379 	if (ch_done_mask & 0x10000000)					\
380 		flush_channel(dev, 0, 0, 0);			\
381 	if (priv->num_channels == 1)					\
382 		goto out;						\
383 	if (ch_done_mask & 0x40000000)					\
384 		flush_channel(dev, 1, 0, 0);			\
385 	if (ch_done_mask & 0x00010000)					\
386 		flush_channel(dev, 2, 0, 0);			\
387 	if (ch_done_mask & 0x00040000)					\
388 		flush_channel(dev, 3, 0, 0);			\
389 									\
390 out:									\
391 	/* At this point, all completed channels have been processed */	\
392 	/* Unmask done interrupts for channels completed later on. */	\
393 	spin_lock_irqsave(&priv->reg_lock, flags);			\
394 	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
395 	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
396 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
397 }
398 
399 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
400 
401 #define DEF_TALITOS2_DONE(name, ch_done_mask)				\
402 static void talitos2_done_##name(unsigned long data)			\
403 {									\
404 	struct device *dev = (struct device *)data;			\
405 	struct talitos_private *priv = dev_get_drvdata(dev);		\
406 	unsigned long flags;						\
407 									\
408 	if (ch_done_mask & 1)						\
409 		flush_channel(dev, 0, 0, 0);				\
410 	if (priv->num_channels == 1)					\
411 		goto out;						\
412 	if (ch_done_mask & (1 << 2))					\
413 		flush_channel(dev, 1, 0, 0);				\
414 	if (ch_done_mask & (1 << 4))					\
415 		flush_channel(dev, 2, 0, 0);				\
416 	if (ch_done_mask & (1 << 6))					\
417 		flush_channel(dev, 3, 0, 0);				\
418 									\
419 out:									\
420 	/* At this point, all completed channels have been processed */	\
421 	/* Unmask done interrupts for channels completed later on. */	\
422 	spin_lock_irqsave(&priv->reg_lock, flags);			\
423 	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
424 	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
425 	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
426 }
427 
428 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
429 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
430 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
431 
432 /*
433  * locate current (offending) descriptor
434  */
435 static u32 current_desc_hdr(struct device *dev, int ch)
436 {
437 	struct talitos_private *priv = dev_get_drvdata(dev);
438 	int tail, iter;
439 	dma_addr_t cur_desc;
440 
441 	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
442 	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
443 
444 	if (!cur_desc) {
445 		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
446 		return 0;
447 	}
448 
449 	tail = priv->chan[ch].tail;
450 
451 	iter = tail;
452 	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
453 		iter = (iter + 1) & (priv->fifo_len - 1);
454 		if (iter == tail) {
455 			dev_err(dev, "couldn't locate current descriptor\n");
456 			return 0;
457 		}
458 	}
459 
460 	return priv->chan[ch].fifo[iter].desc->hdr;
461 }
462 
463 /*
464  * user diagnostics; report root cause of error based on execution unit status
465  */
466 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
467 {
468 	struct talitos_private *priv = dev_get_drvdata(dev);
469 	int i;
470 
471 	if (!desc_hdr)
472 		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
473 
474 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
475 	case DESC_HDR_SEL0_AFEU:
476 		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
477 			in_be32(priv->reg_afeu + TALITOS_EUISR),
478 			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
479 		break;
480 	case DESC_HDR_SEL0_DEU:
481 		dev_err(dev, "DEUISR 0x%08x_%08x\n",
482 			in_be32(priv->reg_deu + TALITOS_EUISR),
483 			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
484 		break;
485 	case DESC_HDR_SEL0_MDEUA:
486 	case DESC_HDR_SEL0_MDEUB:
487 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
488 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
489 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
490 		break;
491 	case DESC_HDR_SEL0_RNG:
492 		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
493 			in_be32(priv->reg_rngu + TALITOS_ISR),
494 			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
495 		break;
496 	case DESC_HDR_SEL0_PKEU:
497 		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
498 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
499 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
500 		break;
501 	case DESC_HDR_SEL0_AESU:
502 		dev_err(dev, "AESUISR 0x%08x_%08x\n",
503 			in_be32(priv->reg_aesu + TALITOS_EUISR),
504 			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
505 		break;
506 	case DESC_HDR_SEL0_CRCU:
507 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
508 			in_be32(priv->reg_crcu + TALITOS_EUISR),
509 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
510 		break;
511 	case DESC_HDR_SEL0_KEU:
512 		dev_err(dev, "KEUISR 0x%08x_%08x\n",
513 			in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 		break;
516 	}
517 
518 	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
519 	case DESC_HDR_SEL1_MDEUA:
520 	case DESC_HDR_SEL1_MDEUB:
521 		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
522 			in_be32(priv->reg_mdeu + TALITOS_EUISR),
523 			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
524 		break;
525 	case DESC_HDR_SEL1_CRCU:
526 		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
527 			in_be32(priv->reg_crcu + TALITOS_EUISR),
528 			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
529 		break;
530 	}
531 
532 	for (i = 0; i < 8; i++)
533 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
534 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
535 			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
536 }
537 
538 /*
539  * recover from error interrupts
540  */
541 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
542 {
543 	struct talitos_private *priv = dev_get_drvdata(dev);
544 	unsigned int timeout = TALITOS_TIMEOUT;
545 	int ch, error, reset_dev = 0;
546 	u32 v_lo;
547 	bool is_sec1 = has_ftr_sec1(priv);
548 	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
549 
550 	for (ch = 0; ch < priv->num_channels; ch++) {
551 		/* skip channels without errors */
552 		if (is_sec1) {
553 			/* bits 29, 31, 17, 19 */
554 			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
555 				continue;
556 		} else {
557 			if (!(isr & (1 << (ch * 2 + 1))))
558 				continue;
559 		}
560 
561 		error = -EINVAL;
562 
563 		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
564 
565 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
566 			dev_err(dev, "double fetch fifo overflow error\n");
567 			error = -EAGAIN;
568 			reset_ch = 1;
569 		}
570 		if (v_lo & TALITOS_CCPSR_LO_SOF) {
571 			/* h/w dropped descriptor */
572 			dev_err(dev, "single fetch fifo overflow error\n");
573 			error = -EAGAIN;
574 		}
575 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
576 			dev_err(dev, "master data transfer error\n");
577 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
578 			dev_err(dev, is_sec1 ? "pointeur not complete error\n"
579 					     : "s/g data length zero error\n");
580 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
581 			dev_err(dev, is_sec1 ? "parity error\n"
582 					     : "fetch pointer zero error\n");
583 		if (v_lo & TALITOS_CCPSR_LO_IDH)
584 			dev_err(dev, "illegal descriptor header error\n");
585 		if (v_lo & TALITOS_CCPSR_LO_IEU)
586 			dev_err(dev, is_sec1 ? "static assignment error\n"
587 					     : "invalid exec unit error\n");
588 		if (v_lo & TALITOS_CCPSR_LO_EU)
589 			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
590 		if (!is_sec1) {
591 			if (v_lo & TALITOS_CCPSR_LO_GB)
592 				dev_err(dev, "gather boundary error\n");
593 			if (v_lo & TALITOS_CCPSR_LO_GRL)
594 				dev_err(dev, "gather return/length error\n");
595 			if (v_lo & TALITOS_CCPSR_LO_SB)
596 				dev_err(dev, "scatter boundary error\n");
597 			if (v_lo & TALITOS_CCPSR_LO_SRL)
598 				dev_err(dev, "scatter return/length error\n");
599 		}
600 
601 		flush_channel(dev, ch, error, reset_ch);
602 
603 		if (reset_ch) {
604 			reset_channel(dev, ch);
605 		} else {
606 			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
607 				  TALITOS2_CCCR_CONT);
608 			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
609 			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
610 			       TALITOS2_CCCR_CONT) && --timeout)
611 				cpu_relax();
612 			if (timeout == 0) {
613 				dev_err(dev, "failed to restart channel %d\n",
614 					ch);
615 				reset_dev = 1;
616 			}
617 		}
618 	}
619 	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
620 	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
621 		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
622 			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
623 				isr, isr_lo);
624 		else
625 			dev_err(dev, "done overflow, internal time out, or "
626 				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
627 
628 		/* purge request queues */
629 		for (ch = 0; ch < priv->num_channels; ch++)
630 			flush_channel(dev, ch, -EIO, 1);
631 
632 		/* reset and reinitialize the device */
633 		init_device(dev);
634 	}
635 }
636 
637 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
638 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
639 {									       \
640 	struct device *dev = data;					       \
641 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
642 	u32 isr, isr_lo;						       \
643 	unsigned long flags;						       \
644 									       \
645 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
646 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
647 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
648 	/* Acknowledge interrupt */					       \
649 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
651 									       \
652 	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
653 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
654 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
655 	}								       \
656 	else {								       \
657 		if (likely(isr & ch_done_mask)) {			       \
658 			/* mask further done interrupts. */		       \
659 			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
660 			/* done_task will unmask done interrupts at exit */    \
661 			tasklet_schedule(&priv->done_task[tlet]);	       \
662 		}							       \
663 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
664 	}								       \
665 									       \
666 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
667 								IRQ_NONE;      \
668 }
669 
670 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
671 
672 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
673 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
674 {									       \
675 	struct device *dev = data;					       \
676 	struct talitos_private *priv = dev_get_drvdata(dev);		       \
677 	u32 isr, isr_lo;						       \
678 	unsigned long flags;						       \
679 									       \
680 	spin_lock_irqsave(&priv->reg_lock, flags);			       \
681 	isr = in_be32(priv->reg + TALITOS_ISR);				       \
682 	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
683 	/* Acknowledge interrupt */					       \
684 	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
686 									       \
687 	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
688 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
689 		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
690 	}								       \
691 	else {								       \
692 		if (likely(isr & ch_done_mask)) {			       \
693 			/* mask further done interrupts. */		       \
694 			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
695 			/* done_task will unmask done interrupts at exit */    \
696 			tasklet_schedule(&priv->done_task[tlet]);	       \
697 		}							       \
698 		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
699 	}								       \
700 									       \
701 	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
702 								IRQ_NONE;      \
703 }
704 
705 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
706 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
707 		       0)
708 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
709 		       1)
710 
711 /*
712  * hwrng
713  */
714 static int talitos_rng_data_present(struct hwrng *rng, int wait)
715 {
716 	struct device *dev = (struct device *)rng->priv;
717 	struct talitos_private *priv = dev_get_drvdata(dev);
718 	u32 ofl;
719 	int i;
720 
721 	for (i = 0; i < 20; i++) {
722 		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
723 		      TALITOS_RNGUSR_LO_OFL;
724 		if (ofl || !wait)
725 			break;
726 		udelay(10);
727 	}
728 
729 	return !!ofl;
730 }
731 
732 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
733 {
734 	struct device *dev = (struct device *)rng->priv;
735 	struct talitos_private *priv = dev_get_drvdata(dev);
736 
737 	/* rng fifo requires 64-bit accesses */
738 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
739 	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
740 
741 	return sizeof(u32);
742 }
743 
744 static int talitos_rng_init(struct hwrng *rng)
745 {
746 	struct device *dev = (struct device *)rng->priv;
747 	struct talitos_private *priv = dev_get_drvdata(dev);
748 	unsigned int timeout = TALITOS_TIMEOUT;
749 
750 	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
751 	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
752 		 & TALITOS_RNGUSR_LO_RD)
753 	       && --timeout)
754 		cpu_relax();
755 	if (timeout == 0) {
756 		dev_err(dev, "failed to reset rng hw\n");
757 		return -ENODEV;
758 	}
759 
760 	/* start generating */
761 	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
762 
763 	return 0;
764 }
765 
766 static int talitos_register_rng(struct device *dev)
767 {
768 	struct talitos_private *priv = dev_get_drvdata(dev);
769 	int err;
770 
771 	priv->rng.name		= dev_driver_string(dev),
772 	priv->rng.init		= talitos_rng_init,
773 	priv->rng.data_present	= talitos_rng_data_present,
774 	priv->rng.data_read	= talitos_rng_data_read,
775 	priv->rng.priv		= (unsigned long)dev;
776 
777 	err = hwrng_register(&priv->rng);
778 	if (!err)
779 		priv->rng_registered = true;
780 
781 	return err;
782 }
783 
784 static void talitos_unregister_rng(struct device *dev)
785 {
786 	struct talitos_private *priv = dev_get_drvdata(dev);
787 
788 	if (!priv->rng_registered)
789 		return;
790 
791 	hwrng_unregister(&priv->rng);
792 	priv->rng_registered = false;
793 }
794 
795 /*
796  * crypto alg
797  */
798 #define TALITOS_CRA_PRIORITY		3000
799 #define TALITOS_MAX_KEY_SIZE		96
800 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
801 
802 struct talitos_ctx {
803 	struct device *dev;
804 	int ch;
805 	__be32 desc_hdr_template;
806 	u8 key[TALITOS_MAX_KEY_SIZE];
807 	u8 iv[TALITOS_MAX_IV_LENGTH];
808 	unsigned int keylen;
809 	unsigned int enckeylen;
810 	unsigned int authkeylen;
811 };
812 
813 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
814 #define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
815 
816 struct talitos_ahash_req_ctx {
817 	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
818 	unsigned int hw_context_size;
819 	u8 buf[HASH_MAX_BLOCK_SIZE];
820 	u8 bufnext[HASH_MAX_BLOCK_SIZE];
821 	unsigned int swinit;
822 	unsigned int first;
823 	unsigned int last;
824 	unsigned int to_hash_later;
825 	unsigned int nbuf;
826 	struct scatterlist bufsl[2];
827 	struct scatterlist *psrc;
828 };
829 
830 static int aead_setkey(struct crypto_aead *authenc,
831 		       const u8 *key, unsigned int keylen)
832 {
833 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
834 	struct crypto_authenc_keys keys;
835 
836 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
837 		goto badkey;
838 
839 	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
840 		goto badkey;
841 
842 	memcpy(ctx->key, keys.authkey, keys.authkeylen);
843 	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
844 
845 	ctx->keylen = keys.authkeylen + keys.enckeylen;
846 	ctx->enckeylen = keys.enckeylen;
847 	ctx->authkeylen = keys.authkeylen;
848 
849 	return 0;
850 
851 badkey:
852 	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
853 	return -EINVAL;
854 }
855 
856 /*
857  * talitos_edesc - s/w-extended descriptor
858  * @src_nents: number of segments in input scatterlist
859  * @dst_nents: number of segments in output scatterlist
860  * @icv_ool: whether ICV is out-of-line
861  * @iv_dma: dma address of iv for checking continuity and link table
862  * @dma_len: length of dma mapped link_tbl space
863  * @dma_link_tbl: bus physical address of link_tbl/buf
864  * @desc: h/w descriptor
865  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
866  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
867  *
868  * if decrypting (with authcheck), or either one of src_nents or dst_nents
869  * is greater than 1, an integrity check value is concatenated to the end
870  * of link_tbl data
871  */
872 struct talitos_edesc {
873 	int src_nents;
874 	int dst_nents;
875 	bool icv_ool;
876 	dma_addr_t iv_dma;
877 	int dma_len;
878 	dma_addr_t dma_link_tbl;
879 	struct talitos_desc desc;
880 	union {
881 		struct talitos_ptr link_tbl[0];
882 		u8 buf[0];
883 	};
884 };
885 
886 static void talitos_sg_unmap(struct device *dev,
887 			     struct talitos_edesc *edesc,
888 			     struct scatterlist *src,
889 			     struct scatterlist *dst)
890 {
891 	unsigned int src_nents = edesc->src_nents ? : 1;
892 	unsigned int dst_nents = edesc->dst_nents ? : 1;
893 
894 	if (src != dst) {
895 		dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
896 
897 		if (dst) {
898 			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
899 		}
900 	} else
901 		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
902 }
903 
904 static void ipsec_esp_unmap(struct device *dev,
905 			    struct talitos_edesc *edesc,
906 			    struct aead_request *areq)
907 {
908 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
909 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
910 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
911 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
912 
913 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
914 
915 	if (edesc->dma_len)
916 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
917 				 DMA_BIDIRECTIONAL);
918 }
919 
920 /*
921  * ipsec_esp descriptor callbacks
922  */
923 static void ipsec_esp_encrypt_done(struct device *dev,
924 				   struct talitos_desc *desc, void *context,
925 				   int err)
926 {
927 	struct aead_request *areq = context;
928 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
929 	unsigned int authsize = crypto_aead_authsize(authenc);
930 	struct talitos_edesc *edesc;
931 	struct scatterlist *sg;
932 	void *icvdata;
933 
934 	edesc = container_of(desc, struct talitos_edesc, desc);
935 
936 	ipsec_esp_unmap(dev, edesc, areq);
937 
938 	/* copy the generated ICV to dst */
939 	if (edesc->icv_ool) {
940 		icvdata = &edesc->link_tbl[edesc->src_nents +
941 					   edesc->dst_nents + 2];
942 		sg = sg_last(areq->dst, edesc->dst_nents);
943 		memcpy((char *)sg_virt(sg) + sg->length - authsize,
944 		       icvdata, authsize);
945 	}
946 
947 	kfree(edesc);
948 
949 	aead_request_complete(areq, err);
950 }
951 
952 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
953 					  struct talitos_desc *desc,
954 					  void *context, int err)
955 {
956 	struct aead_request *req = context;
957 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
958 	unsigned int authsize = crypto_aead_authsize(authenc);
959 	struct talitos_edesc *edesc;
960 	struct scatterlist *sg;
961 	char *oicv, *icv;
962 
963 	edesc = container_of(desc, struct talitos_edesc, desc);
964 
965 	ipsec_esp_unmap(dev, edesc, req);
966 
967 	if (!err) {
968 		/* auth check */
969 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
970 		icv = (char *)sg_virt(sg) + sg->length - authsize;
971 
972 		if (edesc->dma_len) {
973 			oicv = (char *)&edesc->link_tbl[edesc->src_nents +
974 							edesc->dst_nents + 2];
975 			if (edesc->icv_ool)
976 				icv = oicv + authsize;
977 		} else
978 			oicv = (char *)&edesc->link_tbl[0];
979 
980 		err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
981 	}
982 
983 	kfree(edesc);
984 
985 	aead_request_complete(req, err);
986 }
987 
988 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
989 					  struct talitos_desc *desc,
990 					  void *context, int err)
991 {
992 	struct aead_request *req = context;
993 	struct talitos_edesc *edesc;
994 
995 	edesc = container_of(desc, struct talitos_edesc, desc);
996 
997 	ipsec_esp_unmap(dev, edesc, req);
998 
999 	/* check ICV auth status */
1000 	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1001 		     DESC_HDR_LO_ICCR1_PASS))
1002 		err = -EBADMSG;
1003 
1004 	kfree(edesc);
1005 
1006 	aead_request_complete(req, err);
1007 }
1008 
1009 /*
1010  * convert scatterlist to SEC h/w link table format
1011  * stop at cryptlen bytes
1012  */
1013 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1014 				 unsigned int offset, int cryptlen,
1015 				 struct talitos_ptr *link_tbl_ptr)
1016 {
1017 	int n_sg = sg_count;
1018 	int count = 0;
1019 
1020 	while (cryptlen && sg && n_sg--) {
1021 		unsigned int len = sg_dma_len(sg);
1022 
1023 		if (offset >= len) {
1024 			offset -= len;
1025 			goto next;
1026 		}
1027 
1028 		len -= offset;
1029 
1030 		if (len > cryptlen)
1031 			len = cryptlen;
1032 
1033 		to_talitos_ptr(link_tbl_ptr + count,
1034 			       sg_dma_address(sg) + offset, 0);
1035 		link_tbl_ptr[count].len = cpu_to_be16(len);
1036 		link_tbl_ptr[count].j_extent = 0;
1037 		count++;
1038 		cryptlen -= len;
1039 		offset = 0;
1040 
1041 next:
1042 		sg = sg_next(sg);
1043 	}
1044 
1045 	/* tag end of link table */
1046 	if (count > 0)
1047 		link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1048 
1049 	return count;
1050 }
1051 
1052 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1053 				 int cryptlen,
1054 				 struct talitos_ptr *link_tbl_ptr)
1055 {
1056 	return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1057 				     link_tbl_ptr);
1058 }
1059 
1060 /*
1061  * fill in and submit ipsec_esp descriptor
1062  */
1063 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1064 		     void (*callback)(struct device *dev,
1065 				      struct talitos_desc *desc,
1066 				      void *context, int error))
1067 {
1068 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1069 	unsigned int authsize = crypto_aead_authsize(aead);
1070 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1071 	struct device *dev = ctx->dev;
1072 	struct talitos_desc *desc = &edesc->desc;
1073 	unsigned int cryptlen = areq->cryptlen;
1074 	unsigned int ivsize = crypto_aead_ivsize(aead);
1075 	int tbl_off = 0;
1076 	int sg_count, ret;
1077 	int sg_link_tbl_len;
1078 
1079 	/* hmac key */
1080 	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1081 			       DMA_TO_DEVICE);
1082 
1083 	sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1084 			      (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1085 							   : DMA_TO_DEVICE);
1086 
1087 	/* hmac data */
1088 	desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1089 	if (sg_count > 1 &&
1090 	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1091 					 areq->assoclen,
1092 					 &edesc->link_tbl[tbl_off])) > 1) {
1093 		tbl_off += ret;
1094 
1095 		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1096 			       sizeof(struct talitos_ptr), 0);
1097 		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1098 
1099 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1100 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1101 	} else {
1102 		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1103 		desc->ptr[1].j_extent = 0;
1104 	}
1105 
1106 	/* cipher iv */
1107 	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1108 	desc->ptr[2].len = cpu_to_be16(ivsize);
1109 	desc->ptr[2].j_extent = 0;
1110 
1111 	/* cipher key */
1112 	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1113 			       (char *)&ctx->key + ctx->authkeylen,
1114 			       DMA_TO_DEVICE);
1115 
1116 	/*
1117 	 * cipher in
1118 	 * map and adjust cipher len to aead request cryptlen.
1119 	 * extent is bytes of HMAC postpended to ciphertext,
1120 	 * typically 12 for ipsec
1121 	 */
1122 	desc->ptr[4].len = cpu_to_be16(cryptlen);
1123 	desc->ptr[4].j_extent = authsize;
1124 
1125 	sg_link_tbl_len = cryptlen;
1126 	if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1127 		sg_link_tbl_len += authsize;
1128 
1129 	if (sg_count > 1 &&
1130 	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1131 					 sg_link_tbl_len,
1132 					 &edesc->link_tbl[tbl_off])) > 1) {
1133 		tbl_off += ret;
1134 		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1135 		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1136 					      tbl_off *
1137 					      sizeof(struct talitos_ptr), 0);
1138 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 					   edesc->dma_len,
1140 					   DMA_BIDIRECTIONAL);
1141 	} else
1142 		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1143 
1144 	/* cipher out */
1145 	desc->ptr[5].len = cpu_to_be16(cryptlen);
1146 	desc->ptr[5].j_extent = authsize;
1147 
1148 	if (areq->src != areq->dst)
1149 		sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1150 				      DMA_FROM_DEVICE);
1151 
1152 	edesc->icv_ool = false;
1153 
1154 	if (sg_count > 1 &&
1155 	    (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1156 					      areq->assoclen, cryptlen,
1157 					      &edesc->link_tbl[tbl_off])) >
1158 	    1) {
1159 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1160 
1161 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1162 			       tbl_off * sizeof(struct talitos_ptr), 0);
1163 
1164 		/* Add an entry to the link table for ICV data */
1165 		tbl_ptr += sg_count - 1;
1166 		tbl_ptr->j_extent = 0;
1167 		tbl_ptr++;
1168 		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1169 		tbl_ptr->len = cpu_to_be16(authsize);
1170 
1171 		/* icv data follows link tables */
1172 		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1173 					(edesc->src_nents + edesc->dst_nents +
1174 					 2) * sizeof(struct talitos_ptr) +
1175 					authsize, 0);
1176 		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1177 		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1178 					   edesc->dma_len, DMA_BIDIRECTIONAL);
1179 
1180 		edesc->icv_ool = true;
1181 	} else
1182 		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1183 
1184 	/* iv out */
1185 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1186 			       DMA_FROM_DEVICE);
1187 
1188 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1189 	if (ret != -EINPROGRESS) {
1190 		ipsec_esp_unmap(dev, edesc, areq);
1191 		kfree(edesc);
1192 	}
1193 	return ret;
1194 }
1195 
1196 /*
1197  * allocate and map the extended descriptor
1198  */
1199 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1200 						 struct scatterlist *src,
1201 						 struct scatterlist *dst,
1202 						 u8 *iv,
1203 						 unsigned int assoclen,
1204 						 unsigned int cryptlen,
1205 						 unsigned int authsize,
1206 						 unsigned int ivsize,
1207 						 int icv_stashing,
1208 						 u32 cryptoflags,
1209 						 bool encrypt)
1210 {
1211 	struct talitos_edesc *edesc;
1212 	int src_nents, dst_nents, alloc_len, dma_len;
1213 	dma_addr_t iv_dma = 0;
1214 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1215 		      GFP_ATOMIC;
1216 	struct talitos_private *priv = dev_get_drvdata(dev);
1217 	bool is_sec1 = has_ftr_sec1(priv);
1218 	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1219 
1220 	if (cryptlen + authsize > max_len) {
1221 		dev_err(dev, "length exceeds h/w max limit\n");
1222 		return ERR_PTR(-EINVAL);
1223 	}
1224 
1225 	if (ivsize)
1226 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1227 
1228 	if (!dst || dst == src) {
1229 		src_nents = sg_nents_for_len(src,
1230 					     assoclen + cryptlen + authsize);
1231 		src_nents = (src_nents == 1) ? 0 : src_nents;
1232 		dst_nents = dst ? src_nents : 0;
1233 	} else { /* dst && dst != src*/
1234 		src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1235 						 (encrypt ? 0 : authsize));
1236 		src_nents = (src_nents == 1) ? 0 : src_nents;
1237 		dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1238 						 (encrypt ? authsize : 0));
1239 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1240 	}
1241 
1242 	/*
1243 	 * allocate space for base edesc plus the link tables,
1244 	 * allowing for two separate entries for AD and generated ICV (+ 2),
1245 	 * and space for two sets of ICVs (stashed and generated)
1246 	 */
1247 	alloc_len = sizeof(struct talitos_edesc);
1248 	if (src_nents || dst_nents) {
1249 		if (is_sec1)
1250 			dma_len = (src_nents ? cryptlen : 0) +
1251 				  (dst_nents ? cryptlen : 0);
1252 		else
1253 			dma_len = (src_nents + dst_nents + 2) *
1254 				  sizeof(struct talitos_ptr) + authsize * 2;
1255 		alloc_len += dma_len;
1256 	} else {
1257 		dma_len = 0;
1258 		alloc_len += icv_stashing ? authsize : 0;
1259 	}
1260 
1261 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1262 	if (!edesc) {
1263 		if (iv_dma)
1264 			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1265 
1266 		dev_err(dev, "could not allocate edescriptor\n");
1267 		return ERR_PTR(-ENOMEM);
1268 	}
1269 
1270 	edesc->src_nents = src_nents;
1271 	edesc->dst_nents = dst_nents;
1272 	edesc->iv_dma = iv_dma;
1273 	edesc->dma_len = dma_len;
1274 	if (dma_len)
1275 		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1276 						     edesc->dma_len,
1277 						     DMA_BIDIRECTIONAL);
1278 
1279 	return edesc;
1280 }
1281 
1282 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1283 					      int icv_stashing, bool encrypt)
1284 {
1285 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1286 	unsigned int authsize = crypto_aead_authsize(authenc);
1287 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1288 	unsigned int ivsize = crypto_aead_ivsize(authenc);
1289 
1290 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1291 				   iv, areq->assoclen, areq->cryptlen,
1292 				   authsize, ivsize, icv_stashing,
1293 				   areq->base.flags, encrypt);
1294 }
1295 
1296 static int aead_encrypt(struct aead_request *req)
1297 {
1298 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1299 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1300 	struct talitos_edesc *edesc;
1301 
1302 	/* allocate extended descriptor */
1303 	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1304 	if (IS_ERR(edesc))
1305 		return PTR_ERR(edesc);
1306 
1307 	/* set encrypt */
1308 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1309 
1310 	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1311 }
1312 
1313 static int aead_decrypt(struct aead_request *req)
1314 {
1315 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1316 	unsigned int authsize = crypto_aead_authsize(authenc);
1317 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1318 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1319 	struct talitos_edesc *edesc;
1320 	struct scatterlist *sg;
1321 	void *icvdata;
1322 
1323 	req->cryptlen -= authsize;
1324 
1325 	/* allocate extended descriptor */
1326 	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1327 	if (IS_ERR(edesc))
1328 		return PTR_ERR(edesc);
1329 
1330 	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1331 	    ((!edesc->src_nents && !edesc->dst_nents) ||
1332 	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1333 
1334 		/* decrypt and check the ICV */
1335 		edesc->desc.hdr = ctx->desc_hdr_template |
1336 				  DESC_HDR_DIR_INBOUND |
1337 				  DESC_HDR_MODE1_MDEU_CICV;
1338 
1339 		/* reset integrity check result bits */
1340 		edesc->desc.hdr_lo = 0;
1341 
1342 		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1343 	}
1344 
1345 	/* Have to check the ICV with software */
1346 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1347 
1348 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1349 	if (edesc->dma_len)
1350 		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1351 						   edesc->dst_nents + 2];
1352 	else
1353 		icvdata = &edesc->link_tbl[0];
1354 
1355 	sg = sg_last(req->src, edesc->src_nents ? : 1);
1356 
1357 	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1358 
1359 	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1360 }
1361 
1362 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1363 			     const u8 *key, unsigned int keylen)
1364 {
1365 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1366 
1367 	memcpy(&ctx->key, key, keylen);
1368 	ctx->keylen = keylen;
1369 
1370 	return 0;
1371 }
1372 
1373 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1374 				 struct scatterlist *dst, unsigned int len,
1375 				 struct talitos_edesc *edesc)
1376 {
1377 	struct talitos_private *priv = dev_get_drvdata(dev);
1378 	bool is_sec1 = has_ftr_sec1(priv);
1379 
1380 	if (is_sec1) {
1381 		if (!edesc->src_nents) {
1382 			dma_unmap_sg(dev, src, 1,
1383 				     dst != src ? DMA_TO_DEVICE
1384 						: DMA_BIDIRECTIONAL);
1385 		}
1386 		if (dst && edesc->dst_nents) {
1387 			dma_sync_single_for_device(dev,
1388 						   edesc->dma_link_tbl + len,
1389 						   len, DMA_FROM_DEVICE);
1390 			sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1391 					    edesc->buf + len, len);
1392 		} else if (dst && dst != src) {
1393 			dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1394 		}
1395 	} else {
1396 		talitos_sg_unmap(dev, edesc, src, dst);
1397 	}
1398 }
1399 
1400 static void common_nonsnoop_unmap(struct device *dev,
1401 				  struct talitos_edesc *edesc,
1402 				  struct ablkcipher_request *areq)
1403 {
1404 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1405 
1406 	unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1407 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1408 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1409 
1410 	if (edesc->dma_len)
1411 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1412 				 DMA_BIDIRECTIONAL);
1413 }
1414 
1415 static void ablkcipher_done(struct device *dev,
1416 			    struct talitos_desc *desc, void *context,
1417 			    int err)
1418 {
1419 	struct ablkcipher_request *areq = context;
1420 	struct talitos_edesc *edesc;
1421 
1422 	edesc = container_of(desc, struct talitos_edesc, desc);
1423 
1424 	common_nonsnoop_unmap(dev, edesc, areq);
1425 
1426 	kfree(edesc);
1427 
1428 	areq->base.complete(&areq->base, err);
1429 }
1430 
1431 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1432 			  unsigned int len, struct talitos_edesc *edesc,
1433 			  enum dma_data_direction dir, struct talitos_ptr *ptr)
1434 {
1435 	int sg_count;
1436 	struct talitos_private *priv = dev_get_drvdata(dev);
1437 	bool is_sec1 = has_ftr_sec1(priv);
1438 
1439 	to_talitos_ptr_len(ptr, len, is_sec1);
1440 
1441 	if (is_sec1) {
1442 		sg_count = edesc->src_nents ? : 1;
1443 
1444 		if (sg_count == 1) {
1445 			dma_map_sg(dev, src, 1, dir);
1446 			to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1447 		} else {
1448 			sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1449 			to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1450 			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1451 						   len, DMA_TO_DEVICE);
1452 		}
1453 	} else {
1454 		to_talitos_ptr_extent_clear(ptr, is_sec1);
1455 
1456 		sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1457 
1458 		if (sg_count == 1) {
1459 			to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1460 		} else {
1461 			sg_count = sg_to_link_tbl(src, sg_count, len,
1462 						  &edesc->link_tbl[0]);
1463 			if (sg_count > 1) {
1464 				to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1465 				ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1466 				dma_sync_single_for_device(dev,
1467 							   edesc->dma_link_tbl,
1468 							   edesc->dma_len,
1469 							   DMA_BIDIRECTIONAL);
1470 			} else {
1471 				/* Only one segment now, so no link tbl needed*/
1472 				to_talitos_ptr(ptr, sg_dma_address(src),
1473 					       is_sec1);
1474 			}
1475 		}
1476 	}
1477 	return sg_count;
1478 }
1479 
1480 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1481 			    unsigned int len, struct talitos_edesc *edesc,
1482 			    enum dma_data_direction dir,
1483 			    struct talitos_ptr *ptr, int sg_count)
1484 {
1485 	struct talitos_private *priv = dev_get_drvdata(dev);
1486 	bool is_sec1 = has_ftr_sec1(priv);
1487 
1488 	if (dir != DMA_NONE)
1489 		sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1490 
1491 	to_talitos_ptr_len(ptr, len, is_sec1);
1492 
1493 	if (is_sec1) {
1494 		if (sg_count == 1) {
1495 			if (dir != DMA_NONE)
1496 				dma_map_sg(dev, dst, 1, dir);
1497 			to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1498 		} else {
1499 			to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1500 			dma_sync_single_for_device(dev,
1501 						   edesc->dma_link_tbl + len,
1502 						   len, DMA_FROM_DEVICE);
1503 		}
1504 	} else {
1505 		to_talitos_ptr_extent_clear(ptr, is_sec1);
1506 
1507 		if (sg_count == 1) {
1508 			to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1509 		} else {
1510 			struct talitos_ptr *link_tbl_ptr =
1511 				&edesc->link_tbl[edesc->src_nents + 1];
1512 
1513 			to_talitos_ptr(ptr, edesc->dma_link_tbl +
1514 					    (edesc->src_nents + 1) *
1515 					     sizeof(struct talitos_ptr), 0);
1516 			ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1517 			sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1518 			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1519 						   edesc->dma_len,
1520 						   DMA_BIDIRECTIONAL);
1521 		}
1522 	}
1523 }
1524 
1525 static int common_nonsnoop(struct talitos_edesc *edesc,
1526 			   struct ablkcipher_request *areq,
1527 			   void (*callback) (struct device *dev,
1528 					     struct talitos_desc *desc,
1529 					     void *context, int error))
1530 {
1531 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1532 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1533 	struct device *dev = ctx->dev;
1534 	struct talitos_desc *desc = &edesc->desc;
1535 	unsigned int cryptlen = areq->nbytes;
1536 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1537 	int sg_count, ret;
1538 	struct talitos_private *priv = dev_get_drvdata(dev);
1539 	bool is_sec1 = has_ftr_sec1(priv);
1540 
1541 	/* first DWORD empty */
1542 	desc->ptr[0] = zero_entry;
1543 
1544 	/* cipher iv */
1545 	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1546 	to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1547 	to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1548 
1549 	/* cipher key */
1550 	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1551 			       (char *)&ctx->key, DMA_TO_DEVICE);
1552 
1553 	/*
1554 	 * cipher in
1555 	 */
1556 	sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1557 					 (areq->src == areq->dst) ?
1558 					  DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1559 					  &desc->ptr[3]);
1560 
1561 	/* cipher out */
1562 	map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1563 			       (areq->src == areq->dst) ? DMA_NONE
1564 							: DMA_FROM_DEVICE,
1565 			       &desc->ptr[4], sg_count);
1566 
1567 	/* iv out */
1568 	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1569 			       DMA_FROM_DEVICE);
1570 
1571 	/* last DWORD empty */
1572 	desc->ptr[6] = zero_entry;
1573 
1574 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1575 	if (ret != -EINPROGRESS) {
1576 		common_nonsnoop_unmap(dev, edesc, areq);
1577 		kfree(edesc);
1578 	}
1579 	return ret;
1580 }
1581 
1582 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1583 						    areq, bool encrypt)
1584 {
1585 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1586 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1587 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1588 
1589 	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1590 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1591 				   areq->base.flags, encrypt);
1592 }
1593 
1594 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1595 {
1596 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1597 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1598 	struct talitos_edesc *edesc;
1599 
1600 	/* allocate extended descriptor */
1601 	edesc = ablkcipher_edesc_alloc(areq, true);
1602 	if (IS_ERR(edesc))
1603 		return PTR_ERR(edesc);
1604 
1605 	/* set encrypt */
1606 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1607 
1608 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1609 }
1610 
1611 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1612 {
1613 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1614 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1615 	struct talitos_edesc *edesc;
1616 
1617 	/* allocate extended descriptor */
1618 	edesc = ablkcipher_edesc_alloc(areq, false);
1619 	if (IS_ERR(edesc))
1620 		return PTR_ERR(edesc);
1621 
1622 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1623 
1624 	return common_nonsnoop(edesc, areq, ablkcipher_done);
1625 }
1626 
1627 static void common_nonsnoop_hash_unmap(struct device *dev,
1628 				       struct talitos_edesc *edesc,
1629 				       struct ahash_request *areq)
1630 {
1631 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1632 	struct talitos_private *priv = dev_get_drvdata(dev);
1633 	bool is_sec1 = has_ftr_sec1(priv);
1634 
1635 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1636 
1637 	unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1638 
1639 	/* When using hashctx-in, must unmap it. */
1640 	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1641 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1642 					 DMA_TO_DEVICE);
1643 
1644 	if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1645 		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1646 					 DMA_TO_DEVICE);
1647 
1648 	if (edesc->dma_len)
1649 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1650 				 DMA_BIDIRECTIONAL);
1651 
1652 }
1653 
1654 static void ahash_done(struct device *dev,
1655 		       struct talitos_desc *desc, void *context,
1656 		       int err)
1657 {
1658 	struct ahash_request *areq = context;
1659 	struct talitos_edesc *edesc =
1660 		 container_of(desc, struct talitos_edesc, desc);
1661 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1662 
1663 	if (!req_ctx->last && req_ctx->to_hash_later) {
1664 		/* Position any partial block for next update/final/finup */
1665 		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1666 		req_ctx->nbuf = req_ctx->to_hash_later;
1667 	}
1668 	common_nonsnoop_hash_unmap(dev, edesc, areq);
1669 
1670 	kfree(edesc);
1671 
1672 	areq->base.complete(&areq->base, err);
1673 }
1674 
1675 /*
1676  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1677  * ourself and submit a padded block
1678  */
1679 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1680 			       struct talitos_edesc *edesc,
1681 			       struct talitos_ptr *ptr)
1682 {
1683 	static u8 padded_hash[64] = {
1684 		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1685 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1686 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1687 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1688 	};
1689 
1690 	pr_err_once("Bug in SEC1, padding ourself\n");
1691 	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1692 	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1693 			       (char *)padded_hash, DMA_TO_DEVICE);
1694 }
1695 
1696 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1697 				struct ahash_request *areq, unsigned int length,
1698 				void (*callback) (struct device *dev,
1699 						  struct talitos_desc *desc,
1700 						  void *context, int error))
1701 {
1702 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1703 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1704 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1705 	struct device *dev = ctx->dev;
1706 	struct talitos_desc *desc = &edesc->desc;
1707 	int ret;
1708 	struct talitos_private *priv = dev_get_drvdata(dev);
1709 	bool is_sec1 = has_ftr_sec1(priv);
1710 
1711 	/* first DWORD empty */
1712 	desc->ptr[0] = zero_entry;
1713 
1714 	/* hash context in */
1715 	if (!req_ctx->first || req_ctx->swinit) {
1716 		map_single_talitos_ptr(dev, &desc->ptr[1],
1717 				       req_ctx->hw_context_size,
1718 				       (char *)req_ctx->hw_context,
1719 				       DMA_TO_DEVICE);
1720 		req_ctx->swinit = 0;
1721 	} else {
1722 		desc->ptr[1] = zero_entry;
1723 		/* Indicate next op is not the first. */
1724 		req_ctx->first = 0;
1725 	}
1726 
1727 	/* HMAC key */
1728 	if (ctx->keylen)
1729 		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1730 				       (char *)&ctx->key, DMA_TO_DEVICE);
1731 	else
1732 		desc->ptr[2] = zero_entry;
1733 
1734 	/*
1735 	 * data in
1736 	 */
1737 	map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1738 			      DMA_TO_DEVICE, &desc->ptr[3]);
1739 
1740 	/* fifth DWORD empty */
1741 	desc->ptr[4] = zero_entry;
1742 
1743 	/* hash/HMAC out -or- hash context out */
1744 	if (req_ctx->last)
1745 		map_single_talitos_ptr(dev, &desc->ptr[5],
1746 				       crypto_ahash_digestsize(tfm),
1747 				       areq->result, DMA_FROM_DEVICE);
1748 	else
1749 		map_single_talitos_ptr(dev, &desc->ptr[5],
1750 				       req_ctx->hw_context_size,
1751 				       req_ctx->hw_context, DMA_FROM_DEVICE);
1752 
1753 	/* last DWORD empty */
1754 	desc->ptr[6] = zero_entry;
1755 
1756 	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1757 		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1758 
1759 	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1760 	if (ret != -EINPROGRESS) {
1761 		common_nonsnoop_hash_unmap(dev, edesc, areq);
1762 		kfree(edesc);
1763 	}
1764 	return ret;
1765 }
1766 
1767 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1768 					       unsigned int nbytes)
1769 {
1770 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1771 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1772 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1773 
1774 	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1775 				   nbytes, 0, 0, 0, areq->base.flags, false);
1776 }
1777 
1778 static int ahash_init(struct ahash_request *areq)
1779 {
1780 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1781 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1782 
1783 	/* Initialize the context */
1784 	req_ctx->nbuf = 0;
1785 	req_ctx->first = 1; /* first indicates h/w must init its context */
1786 	req_ctx->swinit = 0; /* assume h/w init of context */
1787 	req_ctx->hw_context_size =
1788 		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1789 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1790 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1791 
1792 	return 0;
1793 }
1794 
1795 /*
1796  * on h/w without explicit sha224 support, we initialize h/w context
1797  * manually with sha224 constants, and tell it to run sha256.
1798  */
1799 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1800 {
1801 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1802 
1803 	ahash_init(areq);
1804 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1805 
1806 	req_ctx->hw_context[0] = SHA224_H0;
1807 	req_ctx->hw_context[1] = SHA224_H1;
1808 	req_ctx->hw_context[2] = SHA224_H2;
1809 	req_ctx->hw_context[3] = SHA224_H3;
1810 	req_ctx->hw_context[4] = SHA224_H4;
1811 	req_ctx->hw_context[5] = SHA224_H5;
1812 	req_ctx->hw_context[6] = SHA224_H6;
1813 	req_ctx->hw_context[7] = SHA224_H7;
1814 
1815 	/* init 64-bit count */
1816 	req_ctx->hw_context[8] = 0;
1817 	req_ctx->hw_context[9] = 0;
1818 
1819 	return 0;
1820 }
1821 
1822 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1823 {
1824 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1825 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1826 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1827 	struct talitos_edesc *edesc;
1828 	unsigned int blocksize =
1829 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1830 	unsigned int nbytes_to_hash;
1831 	unsigned int to_hash_later;
1832 	unsigned int nsg;
1833 
1834 	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1835 		/* Buffer up to one whole block */
1836 		sg_copy_to_buffer(areq->src,
1837 				  sg_nents_for_len(areq->src, nbytes),
1838 				  req_ctx->buf + req_ctx->nbuf, nbytes);
1839 		req_ctx->nbuf += nbytes;
1840 		return 0;
1841 	}
1842 
1843 	/* At least (blocksize + 1) bytes are available to hash */
1844 	nbytes_to_hash = nbytes + req_ctx->nbuf;
1845 	to_hash_later = nbytes_to_hash & (blocksize - 1);
1846 
1847 	if (req_ctx->last)
1848 		to_hash_later = 0;
1849 	else if (to_hash_later)
1850 		/* There is a partial block. Hash the full block(s) now */
1851 		nbytes_to_hash -= to_hash_later;
1852 	else {
1853 		/* Keep one block buffered */
1854 		nbytes_to_hash -= blocksize;
1855 		to_hash_later = blocksize;
1856 	}
1857 
1858 	/* Chain in any previously buffered data */
1859 	if (req_ctx->nbuf) {
1860 		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1861 		sg_init_table(req_ctx->bufsl, nsg);
1862 		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1863 		if (nsg > 1)
1864 			sg_chain(req_ctx->bufsl, 2, areq->src);
1865 		req_ctx->psrc = req_ctx->bufsl;
1866 	} else
1867 		req_ctx->psrc = areq->src;
1868 
1869 	if (to_hash_later) {
1870 		int nents = sg_nents_for_len(areq->src, nbytes);
1871 		sg_pcopy_to_buffer(areq->src, nents,
1872 				      req_ctx->bufnext,
1873 				      to_hash_later,
1874 				      nbytes - to_hash_later);
1875 	}
1876 	req_ctx->to_hash_later = to_hash_later;
1877 
1878 	/* Allocate extended descriptor */
1879 	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1880 	if (IS_ERR(edesc))
1881 		return PTR_ERR(edesc);
1882 
1883 	edesc->desc.hdr = ctx->desc_hdr_template;
1884 
1885 	/* On last one, request SEC to pad; otherwise continue */
1886 	if (req_ctx->last)
1887 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1888 	else
1889 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1890 
1891 	/* request SEC to INIT hash. */
1892 	if (req_ctx->first && !req_ctx->swinit)
1893 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1894 
1895 	/* When the tfm context has a keylen, it's an HMAC.
1896 	 * A first or last (ie. not middle) descriptor must request HMAC.
1897 	 */
1898 	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1899 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1900 
1901 	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1902 				    ahash_done);
1903 }
1904 
1905 static int ahash_update(struct ahash_request *areq)
1906 {
1907 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1908 
1909 	req_ctx->last = 0;
1910 
1911 	return ahash_process_req(areq, areq->nbytes);
1912 }
1913 
1914 static int ahash_final(struct ahash_request *areq)
1915 {
1916 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1917 
1918 	req_ctx->last = 1;
1919 
1920 	return ahash_process_req(areq, 0);
1921 }
1922 
1923 static int ahash_finup(struct ahash_request *areq)
1924 {
1925 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1926 
1927 	req_ctx->last = 1;
1928 
1929 	return ahash_process_req(areq, areq->nbytes);
1930 }
1931 
1932 static int ahash_digest(struct ahash_request *areq)
1933 {
1934 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1935 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1936 
1937 	ahash->init(areq);
1938 	req_ctx->last = 1;
1939 
1940 	return ahash_process_req(areq, areq->nbytes);
1941 }
1942 
1943 struct keyhash_result {
1944 	struct completion completion;
1945 	int err;
1946 };
1947 
1948 static void keyhash_complete(struct crypto_async_request *req, int err)
1949 {
1950 	struct keyhash_result *res = req->data;
1951 
1952 	if (err == -EINPROGRESS)
1953 		return;
1954 
1955 	res->err = err;
1956 	complete(&res->completion);
1957 }
1958 
1959 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1960 		   u8 *hash)
1961 {
1962 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1963 
1964 	struct scatterlist sg[1];
1965 	struct ahash_request *req;
1966 	struct keyhash_result hresult;
1967 	int ret;
1968 
1969 	init_completion(&hresult.completion);
1970 
1971 	req = ahash_request_alloc(tfm, GFP_KERNEL);
1972 	if (!req)
1973 		return -ENOMEM;
1974 
1975 	/* Keep tfm keylen == 0 during hash of the long key */
1976 	ctx->keylen = 0;
1977 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1978 				   keyhash_complete, &hresult);
1979 
1980 	sg_init_one(&sg[0], key, keylen);
1981 
1982 	ahash_request_set_crypt(req, sg, hash, keylen);
1983 	ret = crypto_ahash_digest(req);
1984 	switch (ret) {
1985 	case 0:
1986 		break;
1987 	case -EINPROGRESS:
1988 	case -EBUSY:
1989 		ret = wait_for_completion_interruptible(
1990 			&hresult.completion);
1991 		if (!ret)
1992 			ret = hresult.err;
1993 		break;
1994 	default:
1995 		break;
1996 	}
1997 	ahash_request_free(req);
1998 
1999 	return ret;
2000 }
2001 
2002 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2003 			unsigned int keylen)
2004 {
2005 	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2006 	unsigned int blocksize =
2007 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2008 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2009 	unsigned int keysize = keylen;
2010 	u8 hash[SHA512_DIGEST_SIZE];
2011 	int ret;
2012 
2013 	if (keylen <= blocksize)
2014 		memcpy(ctx->key, key, keysize);
2015 	else {
2016 		/* Must get the hash of the long key */
2017 		ret = keyhash(tfm, key, keylen, hash);
2018 
2019 		if (ret) {
2020 			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2021 			return -EINVAL;
2022 		}
2023 
2024 		keysize = digestsize;
2025 		memcpy(ctx->key, hash, digestsize);
2026 	}
2027 
2028 	ctx->keylen = keysize;
2029 
2030 	return 0;
2031 }
2032 
2033 
2034 struct talitos_alg_template {
2035 	u32 type;
2036 	union {
2037 		struct crypto_alg crypto;
2038 		struct ahash_alg hash;
2039 		struct aead_alg aead;
2040 	} alg;
2041 	__be32 desc_hdr_template;
2042 };
2043 
2044 static struct talitos_alg_template driver_algs[] = {
2045 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2046 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2047 		.alg.aead = {
2048 			.base = {
2049 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2050 				.cra_driver_name = "authenc-hmac-sha1-"
2051 						   "cbc-aes-talitos",
2052 				.cra_blocksize = AES_BLOCK_SIZE,
2053 				.cra_flags = CRYPTO_ALG_ASYNC,
2054 			},
2055 			.ivsize = AES_BLOCK_SIZE,
2056 			.maxauthsize = SHA1_DIGEST_SIZE,
2057 		},
2058 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2059 			             DESC_HDR_SEL0_AESU |
2060 		                     DESC_HDR_MODE0_AESU_CBC |
2061 		                     DESC_HDR_SEL1_MDEUA |
2062 		                     DESC_HDR_MODE1_MDEU_INIT |
2063 		                     DESC_HDR_MODE1_MDEU_PAD |
2064 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2065 	},
2066 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2067 		.alg.aead = {
2068 			.base = {
2069 				.cra_name = "authenc(hmac(sha1),"
2070 					    "cbc(des3_ede))",
2071 				.cra_driver_name = "authenc-hmac-sha1-"
2072 						   "cbc-3des-talitos",
2073 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2074 				.cra_flags = CRYPTO_ALG_ASYNC,
2075 			},
2076 			.ivsize = DES3_EDE_BLOCK_SIZE,
2077 			.maxauthsize = SHA1_DIGEST_SIZE,
2078 		},
2079 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2080 			             DESC_HDR_SEL0_DEU |
2081 		                     DESC_HDR_MODE0_DEU_CBC |
2082 		                     DESC_HDR_MODE0_DEU_3DES |
2083 		                     DESC_HDR_SEL1_MDEUA |
2084 		                     DESC_HDR_MODE1_MDEU_INIT |
2085 		                     DESC_HDR_MODE1_MDEU_PAD |
2086 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2087 	},
2088 	{       .type = CRYPTO_ALG_TYPE_AEAD,
2089 		.alg.aead = {
2090 			.base = {
2091 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2092 				.cra_driver_name = "authenc-hmac-sha224-"
2093 						   "cbc-aes-talitos",
2094 				.cra_blocksize = AES_BLOCK_SIZE,
2095 				.cra_flags = CRYPTO_ALG_ASYNC,
2096 			},
2097 			.ivsize = AES_BLOCK_SIZE,
2098 			.maxauthsize = SHA224_DIGEST_SIZE,
2099 		},
2100 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2101 				     DESC_HDR_SEL0_AESU |
2102 				     DESC_HDR_MODE0_AESU_CBC |
2103 				     DESC_HDR_SEL1_MDEUA |
2104 				     DESC_HDR_MODE1_MDEU_INIT |
2105 				     DESC_HDR_MODE1_MDEU_PAD |
2106 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2107 	},
2108 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2109 		.alg.aead = {
2110 			.base = {
2111 				.cra_name = "authenc(hmac(sha224),"
2112 					    "cbc(des3_ede))",
2113 				.cra_driver_name = "authenc-hmac-sha224-"
2114 						   "cbc-3des-talitos",
2115 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2116 				.cra_flags = CRYPTO_ALG_ASYNC,
2117 			},
2118 			.ivsize = DES3_EDE_BLOCK_SIZE,
2119 			.maxauthsize = SHA224_DIGEST_SIZE,
2120 		},
2121 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2122 			             DESC_HDR_SEL0_DEU |
2123 		                     DESC_HDR_MODE0_DEU_CBC |
2124 		                     DESC_HDR_MODE0_DEU_3DES |
2125 		                     DESC_HDR_SEL1_MDEUA |
2126 		                     DESC_HDR_MODE1_MDEU_INIT |
2127 		                     DESC_HDR_MODE1_MDEU_PAD |
2128 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2129 	},
2130 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2131 		.alg.aead = {
2132 			.base = {
2133 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2134 				.cra_driver_name = "authenc-hmac-sha256-"
2135 						   "cbc-aes-talitos",
2136 				.cra_blocksize = AES_BLOCK_SIZE,
2137 				.cra_flags = CRYPTO_ALG_ASYNC,
2138 			},
2139 			.ivsize = AES_BLOCK_SIZE,
2140 			.maxauthsize = SHA256_DIGEST_SIZE,
2141 		},
2142 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2143 			             DESC_HDR_SEL0_AESU |
2144 		                     DESC_HDR_MODE0_AESU_CBC |
2145 		                     DESC_HDR_SEL1_MDEUA |
2146 		                     DESC_HDR_MODE1_MDEU_INIT |
2147 		                     DESC_HDR_MODE1_MDEU_PAD |
2148 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2149 	},
2150 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2151 		.alg.aead = {
2152 			.base = {
2153 				.cra_name = "authenc(hmac(sha256),"
2154 					    "cbc(des3_ede))",
2155 				.cra_driver_name = "authenc-hmac-sha256-"
2156 						   "cbc-3des-talitos",
2157 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2158 				.cra_flags = CRYPTO_ALG_ASYNC,
2159 			},
2160 			.ivsize = DES3_EDE_BLOCK_SIZE,
2161 			.maxauthsize = SHA256_DIGEST_SIZE,
2162 		},
2163 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2164 			             DESC_HDR_SEL0_DEU |
2165 		                     DESC_HDR_MODE0_DEU_CBC |
2166 		                     DESC_HDR_MODE0_DEU_3DES |
2167 		                     DESC_HDR_SEL1_MDEUA |
2168 		                     DESC_HDR_MODE1_MDEU_INIT |
2169 		                     DESC_HDR_MODE1_MDEU_PAD |
2170 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2171 	},
2172 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2173 		.alg.aead = {
2174 			.base = {
2175 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2176 				.cra_driver_name = "authenc-hmac-sha384-"
2177 						   "cbc-aes-talitos",
2178 				.cra_blocksize = AES_BLOCK_SIZE,
2179 				.cra_flags = CRYPTO_ALG_ASYNC,
2180 			},
2181 			.ivsize = AES_BLOCK_SIZE,
2182 			.maxauthsize = SHA384_DIGEST_SIZE,
2183 		},
2184 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2185 			             DESC_HDR_SEL0_AESU |
2186 		                     DESC_HDR_MODE0_AESU_CBC |
2187 		                     DESC_HDR_SEL1_MDEUB |
2188 		                     DESC_HDR_MODE1_MDEU_INIT |
2189 		                     DESC_HDR_MODE1_MDEU_PAD |
2190 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2191 	},
2192 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2193 		.alg.aead = {
2194 			.base = {
2195 				.cra_name = "authenc(hmac(sha384),"
2196 					    "cbc(des3_ede))",
2197 				.cra_driver_name = "authenc-hmac-sha384-"
2198 						   "cbc-3des-talitos",
2199 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2200 				.cra_flags = CRYPTO_ALG_ASYNC,
2201 			},
2202 			.ivsize = DES3_EDE_BLOCK_SIZE,
2203 			.maxauthsize = SHA384_DIGEST_SIZE,
2204 		},
2205 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2206 			             DESC_HDR_SEL0_DEU |
2207 		                     DESC_HDR_MODE0_DEU_CBC |
2208 		                     DESC_HDR_MODE0_DEU_3DES |
2209 		                     DESC_HDR_SEL1_MDEUB |
2210 		                     DESC_HDR_MODE1_MDEU_INIT |
2211 		                     DESC_HDR_MODE1_MDEU_PAD |
2212 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2213 	},
2214 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2215 		.alg.aead = {
2216 			.base = {
2217 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2218 				.cra_driver_name = "authenc-hmac-sha512-"
2219 						   "cbc-aes-talitos",
2220 				.cra_blocksize = AES_BLOCK_SIZE,
2221 				.cra_flags = CRYPTO_ALG_ASYNC,
2222 			},
2223 			.ivsize = AES_BLOCK_SIZE,
2224 			.maxauthsize = SHA512_DIGEST_SIZE,
2225 		},
2226 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2227 			             DESC_HDR_SEL0_AESU |
2228 		                     DESC_HDR_MODE0_AESU_CBC |
2229 		                     DESC_HDR_SEL1_MDEUB |
2230 		                     DESC_HDR_MODE1_MDEU_INIT |
2231 		                     DESC_HDR_MODE1_MDEU_PAD |
2232 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2233 	},
2234 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2235 		.alg.aead = {
2236 			.base = {
2237 				.cra_name = "authenc(hmac(sha512),"
2238 					    "cbc(des3_ede))",
2239 				.cra_driver_name = "authenc-hmac-sha512-"
2240 						   "cbc-3des-talitos",
2241 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2242 				.cra_flags = CRYPTO_ALG_ASYNC,
2243 			},
2244 			.ivsize = DES3_EDE_BLOCK_SIZE,
2245 			.maxauthsize = SHA512_DIGEST_SIZE,
2246 		},
2247 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2248 			             DESC_HDR_SEL0_DEU |
2249 		                     DESC_HDR_MODE0_DEU_CBC |
2250 		                     DESC_HDR_MODE0_DEU_3DES |
2251 		                     DESC_HDR_SEL1_MDEUB |
2252 		                     DESC_HDR_MODE1_MDEU_INIT |
2253 		                     DESC_HDR_MODE1_MDEU_PAD |
2254 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2255 	},
2256 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2257 		.alg.aead = {
2258 			.base = {
2259 				.cra_name = "authenc(hmac(md5),cbc(aes))",
2260 				.cra_driver_name = "authenc-hmac-md5-"
2261 						   "cbc-aes-talitos",
2262 				.cra_blocksize = AES_BLOCK_SIZE,
2263 				.cra_flags = CRYPTO_ALG_ASYNC,
2264 			},
2265 			.ivsize = AES_BLOCK_SIZE,
2266 			.maxauthsize = MD5_DIGEST_SIZE,
2267 		},
2268 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2269 			             DESC_HDR_SEL0_AESU |
2270 		                     DESC_HDR_MODE0_AESU_CBC |
2271 		                     DESC_HDR_SEL1_MDEUA |
2272 		                     DESC_HDR_MODE1_MDEU_INIT |
2273 		                     DESC_HDR_MODE1_MDEU_PAD |
2274 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2275 	},
2276 	{	.type = CRYPTO_ALG_TYPE_AEAD,
2277 		.alg.aead = {
2278 			.base = {
2279 				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2280 				.cra_driver_name = "authenc-hmac-md5-"
2281 						   "cbc-3des-talitos",
2282 				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2283 				.cra_flags = CRYPTO_ALG_ASYNC,
2284 			},
2285 			.ivsize = DES3_EDE_BLOCK_SIZE,
2286 			.maxauthsize = MD5_DIGEST_SIZE,
2287 		},
2288 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2289 			             DESC_HDR_SEL0_DEU |
2290 		                     DESC_HDR_MODE0_DEU_CBC |
2291 		                     DESC_HDR_MODE0_DEU_3DES |
2292 		                     DESC_HDR_SEL1_MDEUA |
2293 		                     DESC_HDR_MODE1_MDEU_INIT |
2294 		                     DESC_HDR_MODE1_MDEU_PAD |
2295 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2296 	},
2297 	/* ABLKCIPHER algorithms. */
2298 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2299 		.alg.crypto = {
2300 			.cra_name = "cbc(aes)",
2301 			.cra_driver_name = "cbc-aes-talitos",
2302 			.cra_blocksize = AES_BLOCK_SIZE,
2303 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2304                                      CRYPTO_ALG_ASYNC,
2305 			.cra_ablkcipher = {
2306 				.min_keysize = AES_MIN_KEY_SIZE,
2307 				.max_keysize = AES_MAX_KEY_SIZE,
2308 				.ivsize = AES_BLOCK_SIZE,
2309 			}
2310 		},
2311 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2312 				     DESC_HDR_SEL0_AESU |
2313 				     DESC_HDR_MODE0_AESU_CBC,
2314 	},
2315 	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2316 		.alg.crypto = {
2317 			.cra_name = "cbc(des3_ede)",
2318 			.cra_driver_name = "cbc-3des-talitos",
2319 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2320 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2321                                      CRYPTO_ALG_ASYNC,
2322 			.cra_ablkcipher = {
2323 				.min_keysize = DES3_EDE_KEY_SIZE,
2324 				.max_keysize = DES3_EDE_KEY_SIZE,
2325 				.ivsize = DES3_EDE_BLOCK_SIZE,
2326 			}
2327 		},
2328 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2329 			             DESC_HDR_SEL0_DEU |
2330 		                     DESC_HDR_MODE0_DEU_CBC |
2331 		                     DESC_HDR_MODE0_DEU_3DES,
2332 	},
2333 	/* AHASH algorithms. */
2334 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2335 		.alg.hash = {
2336 			.halg.digestsize = MD5_DIGEST_SIZE,
2337 			.halg.base = {
2338 				.cra_name = "md5",
2339 				.cra_driver_name = "md5-talitos",
2340 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2341 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2342 					     CRYPTO_ALG_ASYNC,
2343 			}
2344 		},
2345 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2346 				     DESC_HDR_SEL0_MDEUA |
2347 				     DESC_HDR_MODE0_MDEU_MD5,
2348 	},
2349 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2350 		.alg.hash = {
2351 			.halg.digestsize = SHA1_DIGEST_SIZE,
2352 			.halg.base = {
2353 				.cra_name = "sha1",
2354 				.cra_driver_name = "sha1-talitos",
2355 				.cra_blocksize = SHA1_BLOCK_SIZE,
2356 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2357 					     CRYPTO_ALG_ASYNC,
2358 			}
2359 		},
2360 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2361 				     DESC_HDR_SEL0_MDEUA |
2362 				     DESC_HDR_MODE0_MDEU_SHA1,
2363 	},
2364 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2365 		.alg.hash = {
2366 			.halg.digestsize = SHA224_DIGEST_SIZE,
2367 			.halg.base = {
2368 				.cra_name = "sha224",
2369 				.cra_driver_name = "sha224-talitos",
2370 				.cra_blocksize = SHA224_BLOCK_SIZE,
2371 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2372 					     CRYPTO_ALG_ASYNC,
2373 			}
2374 		},
2375 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2376 				     DESC_HDR_SEL0_MDEUA |
2377 				     DESC_HDR_MODE0_MDEU_SHA224,
2378 	},
2379 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2380 		.alg.hash = {
2381 			.halg.digestsize = SHA256_DIGEST_SIZE,
2382 			.halg.base = {
2383 				.cra_name = "sha256",
2384 				.cra_driver_name = "sha256-talitos",
2385 				.cra_blocksize = SHA256_BLOCK_SIZE,
2386 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2387 					     CRYPTO_ALG_ASYNC,
2388 			}
2389 		},
2390 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2391 				     DESC_HDR_SEL0_MDEUA |
2392 				     DESC_HDR_MODE0_MDEU_SHA256,
2393 	},
2394 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2395 		.alg.hash = {
2396 			.halg.digestsize = SHA384_DIGEST_SIZE,
2397 			.halg.base = {
2398 				.cra_name = "sha384",
2399 				.cra_driver_name = "sha384-talitos",
2400 				.cra_blocksize = SHA384_BLOCK_SIZE,
2401 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2402 					     CRYPTO_ALG_ASYNC,
2403 			}
2404 		},
2405 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2406 				     DESC_HDR_SEL0_MDEUB |
2407 				     DESC_HDR_MODE0_MDEUB_SHA384,
2408 	},
2409 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2410 		.alg.hash = {
2411 			.halg.digestsize = SHA512_DIGEST_SIZE,
2412 			.halg.base = {
2413 				.cra_name = "sha512",
2414 				.cra_driver_name = "sha512-talitos",
2415 				.cra_blocksize = SHA512_BLOCK_SIZE,
2416 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2417 					     CRYPTO_ALG_ASYNC,
2418 			}
2419 		},
2420 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2421 				     DESC_HDR_SEL0_MDEUB |
2422 				     DESC_HDR_MODE0_MDEUB_SHA512,
2423 	},
2424 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2425 		.alg.hash = {
2426 			.halg.digestsize = MD5_DIGEST_SIZE,
2427 			.halg.base = {
2428 				.cra_name = "hmac(md5)",
2429 				.cra_driver_name = "hmac-md5-talitos",
2430 				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2431 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2432 					     CRYPTO_ALG_ASYNC,
2433 			}
2434 		},
2435 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2436 				     DESC_HDR_SEL0_MDEUA |
2437 				     DESC_HDR_MODE0_MDEU_MD5,
2438 	},
2439 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2440 		.alg.hash = {
2441 			.halg.digestsize = SHA1_DIGEST_SIZE,
2442 			.halg.base = {
2443 				.cra_name = "hmac(sha1)",
2444 				.cra_driver_name = "hmac-sha1-talitos",
2445 				.cra_blocksize = SHA1_BLOCK_SIZE,
2446 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2447 					     CRYPTO_ALG_ASYNC,
2448 			}
2449 		},
2450 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2451 				     DESC_HDR_SEL0_MDEUA |
2452 				     DESC_HDR_MODE0_MDEU_SHA1,
2453 	},
2454 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2455 		.alg.hash = {
2456 			.halg.digestsize = SHA224_DIGEST_SIZE,
2457 			.halg.base = {
2458 				.cra_name = "hmac(sha224)",
2459 				.cra_driver_name = "hmac-sha224-talitos",
2460 				.cra_blocksize = SHA224_BLOCK_SIZE,
2461 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2462 					     CRYPTO_ALG_ASYNC,
2463 			}
2464 		},
2465 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2466 				     DESC_HDR_SEL0_MDEUA |
2467 				     DESC_HDR_MODE0_MDEU_SHA224,
2468 	},
2469 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2470 		.alg.hash = {
2471 			.halg.digestsize = SHA256_DIGEST_SIZE,
2472 			.halg.base = {
2473 				.cra_name = "hmac(sha256)",
2474 				.cra_driver_name = "hmac-sha256-talitos",
2475 				.cra_blocksize = SHA256_BLOCK_SIZE,
2476 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2477 					     CRYPTO_ALG_ASYNC,
2478 			}
2479 		},
2480 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2481 				     DESC_HDR_SEL0_MDEUA |
2482 				     DESC_HDR_MODE0_MDEU_SHA256,
2483 	},
2484 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2485 		.alg.hash = {
2486 			.halg.digestsize = SHA384_DIGEST_SIZE,
2487 			.halg.base = {
2488 				.cra_name = "hmac(sha384)",
2489 				.cra_driver_name = "hmac-sha384-talitos",
2490 				.cra_blocksize = SHA384_BLOCK_SIZE,
2491 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2492 					     CRYPTO_ALG_ASYNC,
2493 			}
2494 		},
2495 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2496 				     DESC_HDR_SEL0_MDEUB |
2497 				     DESC_HDR_MODE0_MDEUB_SHA384,
2498 	},
2499 	{	.type = CRYPTO_ALG_TYPE_AHASH,
2500 		.alg.hash = {
2501 			.halg.digestsize = SHA512_DIGEST_SIZE,
2502 			.halg.base = {
2503 				.cra_name = "hmac(sha512)",
2504 				.cra_driver_name = "hmac-sha512-talitos",
2505 				.cra_blocksize = SHA512_BLOCK_SIZE,
2506 				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2507 					     CRYPTO_ALG_ASYNC,
2508 			}
2509 		},
2510 		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2511 				     DESC_HDR_SEL0_MDEUB |
2512 				     DESC_HDR_MODE0_MDEUB_SHA512,
2513 	}
2514 };
2515 
2516 struct talitos_crypto_alg {
2517 	struct list_head entry;
2518 	struct device *dev;
2519 	struct talitos_alg_template algt;
2520 };
2521 
2522 static int talitos_cra_init(struct crypto_tfm *tfm)
2523 {
2524 	struct crypto_alg *alg = tfm->__crt_alg;
2525 	struct talitos_crypto_alg *talitos_alg;
2526 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2527 	struct talitos_private *priv;
2528 
2529 	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2530 		talitos_alg = container_of(__crypto_ahash_alg(alg),
2531 					   struct talitos_crypto_alg,
2532 					   algt.alg.hash);
2533 	else
2534 		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2535 					   algt.alg.crypto);
2536 
2537 	/* update context with ptr to dev */
2538 	ctx->dev = talitos_alg->dev;
2539 
2540 	/* assign SEC channel to tfm in round-robin fashion */
2541 	priv = dev_get_drvdata(ctx->dev);
2542 	ctx->ch = atomic_inc_return(&priv->last_chan) &
2543 		  (priv->num_channels - 1);
2544 
2545 	/* copy descriptor header template value */
2546 	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2547 
2548 	/* select done notification */
2549 	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2550 
2551 	return 0;
2552 }
2553 
2554 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2555 {
2556 	talitos_cra_init(crypto_aead_tfm(tfm));
2557 	return 0;
2558 }
2559 
2560 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2561 {
2562 	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2563 
2564 	talitos_cra_init(tfm);
2565 
2566 	ctx->keylen = 0;
2567 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2568 				 sizeof(struct talitos_ahash_req_ctx));
2569 
2570 	return 0;
2571 }
2572 
2573 /*
2574  * given the alg's descriptor header template, determine whether descriptor
2575  * type and primary/secondary execution units required match the hw
2576  * capabilities description provided in the device tree node.
2577  */
2578 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2579 {
2580 	struct talitos_private *priv = dev_get_drvdata(dev);
2581 	int ret;
2582 
2583 	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2584 	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2585 
2586 	if (SECONDARY_EU(desc_hdr_template))
2587 		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2588 		              & priv->exec_units);
2589 
2590 	return ret;
2591 }
2592 
2593 static int talitos_remove(struct platform_device *ofdev)
2594 {
2595 	struct device *dev = &ofdev->dev;
2596 	struct talitos_private *priv = dev_get_drvdata(dev);
2597 	struct talitos_crypto_alg *t_alg, *n;
2598 	int i;
2599 
2600 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2601 		switch (t_alg->algt.type) {
2602 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2603 			break;
2604 		case CRYPTO_ALG_TYPE_AEAD:
2605 			crypto_unregister_aead(&t_alg->algt.alg.aead);
2606 		case CRYPTO_ALG_TYPE_AHASH:
2607 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2608 			break;
2609 		}
2610 		list_del(&t_alg->entry);
2611 		kfree(t_alg);
2612 	}
2613 
2614 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2615 		talitos_unregister_rng(dev);
2616 
2617 	for (i = 0; priv->chan && i < priv->num_channels; i++)
2618 		kfree(priv->chan[i].fifo);
2619 
2620 	kfree(priv->chan);
2621 
2622 	for (i = 0; i < 2; i++)
2623 		if (priv->irq[i]) {
2624 			free_irq(priv->irq[i], dev);
2625 			irq_dispose_mapping(priv->irq[i]);
2626 		}
2627 
2628 	tasklet_kill(&priv->done_task[0]);
2629 	if (priv->irq[1])
2630 		tasklet_kill(&priv->done_task[1]);
2631 
2632 	iounmap(priv->reg);
2633 
2634 	kfree(priv);
2635 
2636 	return 0;
2637 }
2638 
2639 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2640 						    struct talitos_alg_template
2641 						           *template)
2642 {
2643 	struct talitos_private *priv = dev_get_drvdata(dev);
2644 	struct talitos_crypto_alg *t_alg;
2645 	struct crypto_alg *alg;
2646 
2647 	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2648 	if (!t_alg)
2649 		return ERR_PTR(-ENOMEM);
2650 
2651 	t_alg->algt = *template;
2652 
2653 	switch (t_alg->algt.type) {
2654 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2655 		alg = &t_alg->algt.alg.crypto;
2656 		alg->cra_init = talitos_cra_init;
2657 		alg->cra_type = &crypto_ablkcipher_type;
2658 		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2659 		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2660 		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2661 		alg->cra_ablkcipher.geniv = "eseqiv";
2662 		break;
2663 	case CRYPTO_ALG_TYPE_AEAD:
2664 		alg = &t_alg->algt.alg.aead.base;
2665 		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2666 		t_alg->algt.alg.aead.setkey = aead_setkey;
2667 		t_alg->algt.alg.aead.encrypt = aead_encrypt;
2668 		t_alg->algt.alg.aead.decrypt = aead_decrypt;
2669 		break;
2670 	case CRYPTO_ALG_TYPE_AHASH:
2671 		alg = &t_alg->algt.alg.hash.halg.base;
2672 		alg->cra_init = talitos_cra_init_ahash;
2673 		alg->cra_type = &crypto_ahash_type;
2674 		t_alg->algt.alg.hash.init = ahash_init;
2675 		t_alg->algt.alg.hash.update = ahash_update;
2676 		t_alg->algt.alg.hash.final = ahash_final;
2677 		t_alg->algt.alg.hash.finup = ahash_finup;
2678 		t_alg->algt.alg.hash.digest = ahash_digest;
2679 		t_alg->algt.alg.hash.setkey = ahash_setkey;
2680 
2681 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2682 		    !strncmp(alg->cra_name, "hmac", 4)) {
2683 			kfree(t_alg);
2684 			return ERR_PTR(-ENOTSUPP);
2685 		}
2686 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2687 		    (!strcmp(alg->cra_name, "sha224") ||
2688 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2689 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2690 			t_alg->algt.desc_hdr_template =
2691 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2692 					DESC_HDR_SEL0_MDEUA |
2693 					DESC_HDR_MODE0_MDEU_SHA256;
2694 		}
2695 		break;
2696 	default:
2697 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2698 		kfree(t_alg);
2699 		return ERR_PTR(-EINVAL);
2700 	}
2701 
2702 	alg->cra_module = THIS_MODULE;
2703 	alg->cra_priority = TALITOS_CRA_PRIORITY;
2704 	alg->cra_alignmask = 0;
2705 	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2706 	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2707 
2708 	t_alg->dev = dev;
2709 
2710 	return t_alg;
2711 }
2712 
2713 static int talitos_probe_irq(struct platform_device *ofdev)
2714 {
2715 	struct device *dev = &ofdev->dev;
2716 	struct device_node *np = ofdev->dev.of_node;
2717 	struct talitos_private *priv = dev_get_drvdata(dev);
2718 	int err;
2719 	bool is_sec1 = has_ftr_sec1(priv);
2720 
2721 	priv->irq[0] = irq_of_parse_and_map(np, 0);
2722 	if (!priv->irq[0]) {
2723 		dev_err(dev, "failed to map irq\n");
2724 		return -EINVAL;
2725 	}
2726 	if (is_sec1) {
2727 		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2728 				  dev_driver_string(dev), dev);
2729 		goto primary_out;
2730 	}
2731 
2732 	priv->irq[1] = irq_of_parse_and_map(np, 1);
2733 
2734 	/* get the primary irq line */
2735 	if (!priv->irq[1]) {
2736 		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2737 				  dev_driver_string(dev), dev);
2738 		goto primary_out;
2739 	}
2740 
2741 	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2742 			  dev_driver_string(dev), dev);
2743 	if (err)
2744 		goto primary_out;
2745 
2746 	/* get the secondary irq line */
2747 	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2748 			  dev_driver_string(dev), dev);
2749 	if (err) {
2750 		dev_err(dev, "failed to request secondary irq\n");
2751 		irq_dispose_mapping(priv->irq[1]);
2752 		priv->irq[1] = 0;
2753 	}
2754 
2755 	return err;
2756 
2757 primary_out:
2758 	if (err) {
2759 		dev_err(dev, "failed to request primary irq\n");
2760 		irq_dispose_mapping(priv->irq[0]);
2761 		priv->irq[0] = 0;
2762 	}
2763 
2764 	return err;
2765 }
2766 
2767 static int talitos_probe(struct platform_device *ofdev)
2768 {
2769 	struct device *dev = &ofdev->dev;
2770 	struct device_node *np = ofdev->dev.of_node;
2771 	struct talitos_private *priv;
2772 	const unsigned int *prop;
2773 	int i, err;
2774 	int stride;
2775 
2776 	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2777 	if (!priv)
2778 		return -ENOMEM;
2779 
2780 	INIT_LIST_HEAD(&priv->alg_list);
2781 
2782 	dev_set_drvdata(dev, priv);
2783 
2784 	priv->ofdev = ofdev;
2785 
2786 	spin_lock_init(&priv->reg_lock);
2787 
2788 	priv->reg = of_iomap(np, 0);
2789 	if (!priv->reg) {
2790 		dev_err(dev, "failed to of_iomap\n");
2791 		err = -ENOMEM;
2792 		goto err_out;
2793 	}
2794 
2795 	/* get SEC version capabilities from device tree */
2796 	prop = of_get_property(np, "fsl,num-channels", NULL);
2797 	if (prop)
2798 		priv->num_channels = *prop;
2799 
2800 	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2801 	if (prop)
2802 		priv->chfifo_len = *prop;
2803 
2804 	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2805 	if (prop)
2806 		priv->exec_units = *prop;
2807 
2808 	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2809 	if (prop)
2810 		priv->desc_types = *prop;
2811 
2812 	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2813 	    !priv->exec_units || !priv->desc_types) {
2814 		dev_err(dev, "invalid property data in device tree node\n");
2815 		err = -EINVAL;
2816 		goto err_out;
2817 	}
2818 
2819 	if (of_device_is_compatible(np, "fsl,sec3.0"))
2820 		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2821 
2822 	if (of_device_is_compatible(np, "fsl,sec2.1"))
2823 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2824 				  TALITOS_FTR_SHA224_HWINIT |
2825 				  TALITOS_FTR_HMAC_OK;
2826 
2827 	if (of_device_is_compatible(np, "fsl,sec1.0"))
2828 		priv->features |= TALITOS_FTR_SEC1;
2829 
2830 	if (of_device_is_compatible(np, "fsl,sec1.2")) {
2831 		priv->reg_deu = priv->reg + TALITOS12_DEU;
2832 		priv->reg_aesu = priv->reg + TALITOS12_AESU;
2833 		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2834 		stride = TALITOS1_CH_STRIDE;
2835 	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2836 		priv->reg_deu = priv->reg + TALITOS10_DEU;
2837 		priv->reg_aesu = priv->reg + TALITOS10_AESU;
2838 		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2839 		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2840 		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2841 		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2842 		stride = TALITOS1_CH_STRIDE;
2843 	} else {
2844 		priv->reg_deu = priv->reg + TALITOS2_DEU;
2845 		priv->reg_aesu = priv->reg + TALITOS2_AESU;
2846 		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2847 		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2848 		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2849 		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2850 		priv->reg_keu = priv->reg + TALITOS2_KEU;
2851 		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2852 		stride = TALITOS2_CH_STRIDE;
2853 	}
2854 
2855 	err = talitos_probe_irq(ofdev);
2856 	if (err)
2857 		goto err_out;
2858 
2859 	if (of_device_is_compatible(np, "fsl,sec1.0")) {
2860 		tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2861 			     (unsigned long)dev);
2862 	} else {
2863 		if (!priv->irq[1]) {
2864 			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2865 				     (unsigned long)dev);
2866 		} else {
2867 			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2868 				     (unsigned long)dev);
2869 			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2870 				     (unsigned long)dev);
2871 		}
2872 	}
2873 
2874 	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2875 			     priv->num_channels, GFP_KERNEL);
2876 	if (!priv->chan) {
2877 		dev_err(dev, "failed to allocate channel management space\n");
2878 		err = -ENOMEM;
2879 		goto err_out;
2880 	}
2881 
2882 	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2883 
2884 	for (i = 0; i < priv->num_channels; i++) {
2885 		priv->chan[i].reg = priv->reg + stride * (i + 1);
2886 		if (!priv->irq[1] || !(i & 1))
2887 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2888 
2889 		spin_lock_init(&priv->chan[i].head_lock);
2890 		spin_lock_init(&priv->chan[i].tail_lock);
2891 
2892 		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2893 					     priv->fifo_len, GFP_KERNEL);
2894 		if (!priv->chan[i].fifo) {
2895 			dev_err(dev, "failed to allocate request fifo %d\n", i);
2896 			err = -ENOMEM;
2897 			goto err_out;
2898 		}
2899 
2900 		atomic_set(&priv->chan[i].submit_count,
2901 			   -(priv->chfifo_len - 1));
2902 	}
2903 
2904 	dma_set_mask(dev, DMA_BIT_MASK(36));
2905 
2906 	/* reset and initialize the h/w */
2907 	err = init_device(dev);
2908 	if (err) {
2909 		dev_err(dev, "failed to initialize device\n");
2910 		goto err_out;
2911 	}
2912 
2913 	/* register the RNG, if available */
2914 	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2915 		err = talitos_register_rng(dev);
2916 		if (err) {
2917 			dev_err(dev, "failed to register hwrng: %d\n", err);
2918 			goto err_out;
2919 		} else
2920 			dev_info(dev, "hwrng\n");
2921 	}
2922 
2923 	/* register crypto algorithms the device supports */
2924 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2925 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2926 			struct talitos_crypto_alg *t_alg;
2927 			struct crypto_alg *alg = NULL;
2928 
2929 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2930 			if (IS_ERR(t_alg)) {
2931 				err = PTR_ERR(t_alg);
2932 				if (err == -ENOTSUPP)
2933 					continue;
2934 				goto err_out;
2935 			}
2936 
2937 			switch (t_alg->algt.type) {
2938 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
2939 				err = crypto_register_alg(
2940 						&t_alg->algt.alg.crypto);
2941 				alg = &t_alg->algt.alg.crypto;
2942 				break;
2943 
2944 			case CRYPTO_ALG_TYPE_AEAD:
2945 				err = crypto_register_aead(
2946 					&t_alg->algt.alg.aead);
2947 				alg = &t_alg->algt.alg.aead.base;
2948 				break;
2949 
2950 			case CRYPTO_ALG_TYPE_AHASH:
2951 				err = crypto_register_ahash(
2952 						&t_alg->algt.alg.hash);
2953 				alg = &t_alg->algt.alg.hash.halg.base;
2954 				break;
2955 			}
2956 			if (err) {
2957 				dev_err(dev, "%s alg registration failed\n",
2958 					alg->cra_driver_name);
2959 				kfree(t_alg);
2960 			} else
2961 				list_add_tail(&t_alg->entry, &priv->alg_list);
2962 		}
2963 	}
2964 	if (!list_empty(&priv->alg_list))
2965 		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2966 			 (char *)of_get_property(np, "compatible", NULL));
2967 
2968 	return 0;
2969 
2970 err_out:
2971 	talitos_remove(ofdev);
2972 
2973 	return err;
2974 }
2975 
2976 static const struct of_device_id talitos_match[] = {
2977 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
2978 	{
2979 		.compatible = "fsl,sec1.0",
2980 	},
2981 #endif
2982 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
2983 	{
2984 		.compatible = "fsl,sec2.0",
2985 	},
2986 #endif
2987 	{},
2988 };
2989 MODULE_DEVICE_TABLE(of, talitos_match);
2990 
2991 static struct platform_driver talitos_driver = {
2992 	.driver = {
2993 		.name = "talitos",
2994 		.of_match_table = talitos_match,
2995 	},
2996 	.probe = talitos_probe,
2997 	.remove = talitos_remove,
2998 };
2999 
3000 module_platform_driver(talitos_driver);
3001 
3002 MODULE_LICENSE("GPL");
3003 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3004 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
3005