xref: /openbmc/linux/drivers/crypto/atmel-tdes.c (revision 0a04480d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cryptographic API.
4  *
5  * Support for ATMEL DES/TDES HW acceleration.
6  *
7  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8  * Author: Nicolas Royer <nicolas@eukrea.com>
9  *
10  * Some ideas are from omap-aes.c drivers.
11  */
12 
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/clk.h>
19 #include <linux/io.h>
20 #include <linux/hw_random.h>
21 #include <linux/platform_device.h>
22 
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/scatterlist.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of_device.h>
31 #include <linux/delay.h>
32 #include <linux/crypto.h>
33 #include <linux/cryptohash.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/internal/des.h>
37 #include <crypto/hash.h>
38 #include <crypto/internal/hash.h>
39 #include <linux/platform_data/crypto-atmel.h>
40 #include "atmel-tdes-regs.h"
41 
42 /* TDES flags  */
43 #define TDES_FLAGS_MODE_MASK		0x00ff
44 #define TDES_FLAGS_ENCRYPT	BIT(0)
45 #define TDES_FLAGS_CBC		BIT(1)
46 #define TDES_FLAGS_CFB		BIT(2)
47 #define TDES_FLAGS_CFB8		BIT(3)
48 #define TDES_FLAGS_CFB16	BIT(4)
49 #define TDES_FLAGS_CFB32	BIT(5)
50 #define TDES_FLAGS_CFB64	BIT(6)
51 #define TDES_FLAGS_OFB		BIT(7)
52 
53 #define TDES_FLAGS_INIT		BIT(16)
54 #define TDES_FLAGS_FAST		BIT(17)
55 #define TDES_FLAGS_BUSY		BIT(18)
56 #define TDES_FLAGS_DMA		BIT(19)
57 
58 #define ATMEL_TDES_QUEUE_LENGTH	50
59 
60 #define CFB8_BLOCK_SIZE		1
61 #define CFB16_BLOCK_SIZE	2
62 #define CFB32_BLOCK_SIZE	4
63 
64 struct atmel_tdes_caps {
65 	bool	has_dma;
66 	u32		has_cfb_3keys;
67 };
68 
69 struct atmel_tdes_dev;
70 
71 struct atmel_tdes_ctx {
72 	struct atmel_tdes_dev *dd;
73 
74 	int		keylen;
75 	u32		key[3*DES_KEY_SIZE / sizeof(u32)];
76 	unsigned long	flags;
77 
78 	u16		block_size;
79 };
80 
81 struct atmel_tdes_reqctx {
82 	unsigned long mode;
83 };
84 
85 struct atmel_tdes_dma {
86 	struct dma_chan			*chan;
87 	struct dma_slave_config dma_conf;
88 };
89 
90 struct atmel_tdes_dev {
91 	struct list_head	list;
92 	unsigned long		phys_base;
93 	void __iomem		*io_base;
94 
95 	struct atmel_tdes_ctx	*ctx;
96 	struct device		*dev;
97 	struct clk			*iclk;
98 	int					irq;
99 
100 	unsigned long		flags;
101 	int			err;
102 
103 	spinlock_t		lock;
104 	struct crypto_queue	queue;
105 
106 	struct tasklet_struct	done_task;
107 	struct tasklet_struct	queue_task;
108 
109 	struct ablkcipher_request	*req;
110 	size_t				total;
111 
112 	struct scatterlist	*in_sg;
113 	unsigned int		nb_in_sg;
114 	size_t				in_offset;
115 	struct scatterlist	*out_sg;
116 	unsigned int		nb_out_sg;
117 	size_t				out_offset;
118 
119 	size_t	buflen;
120 	size_t	dma_size;
121 
122 	void	*buf_in;
123 	int		dma_in;
124 	dma_addr_t	dma_addr_in;
125 	struct atmel_tdes_dma	dma_lch_in;
126 
127 	void	*buf_out;
128 	int		dma_out;
129 	dma_addr_t	dma_addr_out;
130 	struct atmel_tdes_dma	dma_lch_out;
131 
132 	struct atmel_tdes_caps	caps;
133 
134 	u32	hw_version;
135 };
136 
137 struct atmel_tdes_drv {
138 	struct list_head	dev_list;
139 	spinlock_t		lock;
140 };
141 
142 static struct atmel_tdes_drv atmel_tdes = {
143 	.dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
144 	.lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
145 };
146 
147 static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
148 			void *buf, size_t buflen, size_t total, int out)
149 {
150 	size_t count, off = 0;
151 
152 	while (buflen && total) {
153 		count = min((*sg)->length - *offset, total);
154 		count = min(count, buflen);
155 
156 		if (!count)
157 			return off;
158 
159 		scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
160 
161 		off += count;
162 		buflen -= count;
163 		*offset += count;
164 		total -= count;
165 
166 		if (*offset == (*sg)->length) {
167 			*sg = sg_next(*sg);
168 			if (*sg)
169 				*offset = 0;
170 			else
171 				total = 0;
172 		}
173 	}
174 
175 	return off;
176 }
177 
178 static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
179 {
180 	return readl_relaxed(dd->io_base + offset);
181 }
182 
183 static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
184 					u32 offset, u32 value)
185 {
186 	writel_relaxed(value, dd->io_base + offset);
187 }
188 
189 static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
190 					u32 *value, int count)
191 {
192 	for (; count--; value++, offset += 4)
193 		atmel_tdes_write(dd, offset, *value);
194 }
195 
196 static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
197 {
198 	struct atmel_tdes_dev *tdes_dd = NULL;
199 	struct atmel_tdes_dev *tmp;
200 
201 	spin_lock_bh(&atmel_tdes.lock);
202 	if (!ctx->dd) {
203 		list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
204 			tdes_dd = tmp;
205 			break;
206 		}
207 		ctx->dd = tdes_dd;
208 	} else {
209 		tdes_dd = ctx->dd;
210 	}
211 	spin_unlock_bh(&atmel_tdes.lock);
212 
213 	return tdes_dd;
214 }
215 
216 static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
217 {
218 	int err;
219 
220 	err = clk_prepare_enable(dd->iclk);
221 	if (err)
222 		return err;
223 
224 	if (!(dd->flags & TDES_FLAGS_INIT)) {
225 		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
226 		dd->flags |= TDES_FLAGS_INIT;
227 		dd->err = 0;
228 	}
229 
230 	return 0;
231 }
232 
233 static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
234 {
235 	return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
236 }
237 
238 static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
239 {
240 	atmel_tdes_hw_init(dd);
241 
242 	dd->hw_version = atmel_tdes_get_version(dd);
243 
244 	dev_info(dd->dev,
245 			"version: 0x%x\n", dd->hw_version);
246 
247 	clk_disable_unprepare(dd->iclk);
248 }
249 
250 static void atmel_tdes_dma_callback(void *data)
251 {
252 	struct atmel_tdes_dev *dd = data;
253 
254 	/* dma_lch_out - completed */
255 	tasklet_schedule(&dd->done_task);
256 }
257 
258 static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
259 {
260 	int err;
261 	u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
262 
263 	err = atmel_tdes_hw_init(dd);
264 
265 	if (err)
266 		return err;
267 
268 	if (!dd->caps.has_dma)
269 		atmel_tdes_write(dd, TDES_PTCR,
270 			TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
271 
272 	/* MR register must be set before IV registers */
273 	if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
274 		valmr |= TDES_MR_KEYMOD_3KEY;
275 		valmr |= TDES_MR_TDESMOD_TDES;
276 	} else if (dd->ctx->keylen > DES_KEY_SIZE) {
277 		valmr |= TDES_MR_KEYMOD_2KEY;
278 		valmr |= TDES_MR_TDESMOD_TDES;
279 	} else {
280 		valmr |= TDES_MR_TDESMOD_DES;
281 	}
282 
283 	if (dd->flags & TDES_FLAGS_CBC) {
284 		valmr |= TDES_MR_OPMOD_CBC;
285 	} else if (dd->flags & TDES_FLAGS_CFB) {
286 		valmr |= TDES_MR_OPMOD_CFB;
287 
288 		if (dd->flags & TDES_FLAGS_CFB8)
289 			valmr |= TDES_MR_CFBS_8b;
290 		else if (dd->flags & TDES_FLAGS_CFB16)
291 			valmr |= TDES_MR_CFBS_16b;
292 		else if (dd->flags & TDES_FLAGS_CFB32)
293 			valmr |= TDES_MR_CFBS_32b;
294 		else if (dd->flags & TDES_FLAGS_CFB64)
295 			valmr |= TDES_MR_CFBS_64b;
296 	} else if (dd->flags & TDES_FLAGS_OFB) {
297 		valmr |= TDES_MR_OPMOD_OFB;
298 	}
299 
300 	if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
301 		valmr |= TDES_MR_CYPHER_ENC;
302 
303 	atmel_tdes_write(dd, TDES_CR, valcr);
304 	atmel_tdes_write(dd, TDES_MR, valmr);
305 
306 	atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
307 						dd->ctx->keylen >> 2);
308 
309 	if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
310 		(dd->flags & TDES_FLAGS_OFB)) && dd->req->info) {
311 		atmel_tdes_write_n(dd, TDES_IV1R, dd->req->info, 2);
312 	}
313 
314 	return 0;
315 }
316 
317 static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
318 {
319 	int err = 0;
320 	size_t count;
321 
322 	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
323 
324 	if (dd->flags & TDES_FLAGS_FAST) {
325 		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
326 		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
327 	} else {
328 		dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
329 					   dd->dma_size, DMA_FROM_DEVICE);
330 
331 		/* copy data */
332 		count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
333 				dd->buf_out, dd->buflen, dd->dma_size, 1);
334 		if (count != dd->dma_size) {
335 			err = -EINVAL;
336 			pr_err("not all data converted: %zu\n", count);
337 		}
338 	}
339 
340 	return err;
341 }
342 
343 static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
344 {
345 	int err = -ENOMEM;
346 
347 	dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
348 	dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
349 	dd->buflen = PAGE_SIZE;
350 	dd->buflen &= ~(DES_BLOCK_SIZE - 1);
351 
352 	if (!dd->buf_in || !dd->buf_out) {
353 		dev_err(dd->dev, "unable to alloc pages.\n");
354 		goto err_alloc;
355 	}
356 
357 	/* MAP here */
358 	dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
359 					dd->buflen, DMA_TO_DEVICE);
360 	if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
361 		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
362 		err = -EINVAL;
363 		goto err_map_in;
364 	}
365 
366 	dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
367 					dd->buflen, DMA_FROM_DEVICE);
368 	if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
369 		dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
370 		err = -EINVAL;
371 		goto err_map_out;
372 	}
373 
374 	return 0;
375 
376 err_map_out:
377 	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
378 		DMA_TO_DEVICE);
379 err_map_in:
380 err_alloc:
381 	free_page((unsigned long)dd->buf_out);
382 	free_page((unsigned long)dd->buf_in);
383 	if (err)
384 		pr_err("error: %d\n", err);
385 	return err;
386 }
387 
388 static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
389 {
390 	dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
391 			 DMA_FROM_DEVICE);
392 	dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
393 		DMA_TO_DEVICE);
394 	free_page((unsigned long)dd->buf_out);
395 	free_page((unsigned long)dd->buf_in);
396 }
397 
398 static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
399 			       dma_addr_t dma_addr_out, int length)
400 {
401 	struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
402 	struct atmel_tdes_dev *dd = ctx->dd;
403 	int len32;
404 
405 	dd->dma_size = length;
406 
407 	if (!(dd->flags & TDES_FLAGS_FAST)) {
408 		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
409 					   DMA_TO_DEVICE);
410 	}
411 
412 	if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
413 		len32 = DIV_ROUND_UP(length, sizeof(u8));
414 	else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
415 		len32 = DIV_ROUND_UP(length, sizeof(u16));
416 	else
417 		len32 = DIV_ROUND_UP(length, sizeof(u32));
418 
419 	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
420 	atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
421 	atmel_tdes_write(dd, TDES_TCR, len32);
422 	atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
423 	atmel_tdes_write(dd, TDES_RCR, len32);
424 
425 	/* Enable Interrupt */
426 	atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
427 
428 	/* Start DMA transfer */
429 	atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
430 
431 	return 0;
432 }
433 
434 static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
435 			       dma_addr_t dma_addr_out, int length)
436 {
437 	struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
438 	struct atmel_tdes_dev *dd = ctx->dd;
439 	struct scatterlist sg[2];
440 	struct dma_async_tx_descriptor	*in_desc, *out_desc;
441 
442 	dd->dma_size = length;
443 
444 	if (!(dd->flags & TDES_FLAGS_FAST)) {
445 		dma_sync_single_for_device(dd->dev, dma_addr_in, length,
446 					   DMA_TO_DEVICE);
447 	}
448 
449 	if (dd->flags & TDES_FLAGS_CFB8) {
450 		dd->dma_lch_in.dma_conf.dst_addr_width =
451 			DMA_SLAVE_BUSWIDTH_1_BYTE;
452 		dd->dma_lch_out.dma_conf.src_addr_width =
453 			DMA_SLAVE_BUSWIDTH_1_BYTE;
454 	} else if (dd->flags & TDES_FLAGS_CFB16) {
455 		dd->dma_lch_in.dma_conf.dst_addr_width =
456 			DMA_SLAVE_BUSWIDTH_2_BYTES;
457 		dd->dma_lch_out.dma_conf.src_addr_width =
458 			DMA_SLAVE_BUSWIDTH_2_BYTES;
459 	} else {
460 		dd->dma_lch_in.dma_conf.dst_addr_width =
461 			DMA_SLAVE_BUSWIDTH_4_BYTES;
462 		dd->dma_lch_out.dma_conf.src_addr_width =
463 			DMA_SLAVE_BUSWIDTH_4_BYTES;
464 	}
465 
466 	dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
467 	dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
468 
469 	dd->flags |= TDES_FLAGS_DMA;
470 
471 	sg_init_table(&sg[0], 1);
472 	sg_dma_address(&sg[0]) = dma_addr_in;
473 	sg_dma_len(&sg[0]) = length;
474 
475 	sg_init_table(&sg[1], 1);
476 	sg_dma_address(&sg[1]) = dma_addr_out;
477 	sg_dma_len(&sg[1]) = length;
478 
479 	in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
480 				1, DMA_MEM_TO_DEV,
481 				DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
482 	if (!in_desc)
483 		return -EINVAL;
484 
485 	out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
486 				1, DMA_DEV_TO_MEM,
487 				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
488 	if (!out_desc)
489 		return -EINVAL;
490 
491 	out_desc->callback = atmel_tdes_dma_callback;
492 	out_desc->callback_param = dd;
493 
494 	dmaengine_submit(out_desc);
495 	dma_async_issue_pending(dd->dma_lch_out.chan);
496 
497 	dmaengine_submit(in_desc);
498 	dma_async_issue_pending(dd->dma_lch_in.chan);
499 
500 	return 0;
501 }
502 
503 static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
504 {
505 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(
506 					crypto_ablkcipher_reqtfm(dd->req));
507 	int err, fast = 0, in, out;
508 	size_t count;
509 	dma_addr_t addr_in, addr_out;
510 
511 	if ((!dd->in_offset) && (!dd->out_offset)) {
512 		/* check for alignment */
513 		in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
514 			IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
515 		out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
516 			IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
517 		fast = in && out;
518 
519 		if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
520 			fast = 0;
521 	}
522 
523 
524 	if (fast)  {
525 		count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
526 		count = min_t(size_t, count, sg_dma_len(dd->out_sg));
527 
528 		err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
529 		if (!err) {
530 			dev_err(dd->dev, "dma_map_sg() error\n");
531 			return -EINVAL;
532 		}
533 
534 		err = dma_map_sg(dd->dev, dd->out_sg, 1,
535 				DMA_FROM_DEVICE);
536 		if (!err) {
537 			dev_err(dd->dev, "dma_map_sg() error\n");
538 			dma_unmap_sg(dd->dev, dd->in_sg, 1,
539 				DMA_TO_DEVICE);
540 			return -EINVAL;
541 		}
542 
543 		addr_in = sg_dma_address(dd->in_sg);
544 		addr_out = sg_dma_address(dd->out_sg);
545 
546 		dd->flags |= TDES_FLAGS_FAST;
547 
548 	} else {
549 		/* use cache buffers */
550 		count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
551 				dd->buf_in, dd->buflen, dd->total, 0);
552 
553 		addr_in = dd->dma_addr_in;
554 		addr_out = dd->dma_addr_out;
555 
556 		dd->flags &= ~TDES_FLAGS_FAST;
557 	}
558 
559 	dd->total -= count;
560 
561 	if (dd->caps.has_dma)
562 		err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
563 	else
564 		err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
565 
566 	if (err && (dd->flags & TDES_FLAGS_FAST)) {
567 		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
568 		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
569 	}
570 
571 	return err;
572 }
573 
574 static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
575 {
576 	struct ablkcipher_request *req = dd->req;
577 
578 	clk_disable_unprepare(dd->iclk);
579 
580 	dd->flags &= ~TDES_FLAGS_BUSY;
581 
582 	req->base.complete(&req->base, err);
583 }
584 
585 static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
586 			       struct ablkcipher_request *req)
587 {
588 	struct crypto_async_request *async_req, *backlog;
589 	struct atmel_tdes_ctx *ctx;
590 	struct atmel_tdes_reqctx *rctx;
591 	unsigned long flags;
592 	int err, ret = 0;
593 
594 	spin_lock_irqsave(&dd->lock, flags);
595 	if (req)
596 		ret = ablkcipher_enqueue_request(&dd->queue, req);
597 	if (dd->flags & TDES_FLAGS_BUSY) {
598 		spin_unlock_irqrestore(&dd->lock, flags);
599 		return ret;
600 	}
601 	backlog = crypto_get_backlog(&dd->queue);
602 	async_req = crypto_dequeue_request(&dd->queue);
603 	if (async_req)
604 		dd->flags |= TDES_FLAGS_BUSY;
605 	spin_unlock_irqrestore(&dd->lock, flags);
606 
607 	if (!async_req)
608 		return ret;
609 
610 	if (backlog)
611 		backlog->complete(backlog, -EINPROGRESS);
612 
613 	req = ablkcipher_request_cast(async_req);
614 
615 	/* assign new request to device */
616 	dd->req = req;
617 	dd->total = req->nbytes;
618 	dd->in_offset = 0;
619 	dd->in_sg = req->src;
620 	dd->out_offset = 0;
621 	dd->out_sg = req->dst;
622 
623 	rctx = ablkcipher_request_ctx(req);
624 	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
625 	rctx->mode &= TDES_FLAGS_MODE_MASK;
626 	dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
627 	dd->ctx = ctx;
628 	ctx->dd = dd;
629 
630 	err = atmel_tdes_write_ctrl(dd);
631 	if (!err)
632 		err = atmel_tdes_crypt_start(dd);
633 	if (err) {
634 		/* des_task will not finish it, so do it here */
635 		atmel_tdes_finish_req(dd, err);
636 		tasklet_schedule(&dd->queue_task);
637 	}
638 
639 	return ret;
640 }
641 
642 static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
643 {
644 	int err = -EINVAL;
645 	size_t count;
646 
647 	if (dd->flags & TDES_FLAGS_DMA) {
648 		err = 0;
649 		if  (dd->flags & TDES_FLAGS_FAST) {
650 			dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
651 			dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
652 		} else {
653 			dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
654 				dd->dma_size, DMA_FROM_DEVICE);
655 
656 			/* copy data */
657 			count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
658 				dd->buf_out, dd->buflen, dd->dma_size, 1);
659 			if (count != dd->dma_size) {
660 				err = -EINVAL;
661 				pr_err("not all data converted: %zu\n", count);
662 			}
663 		}
664 	}
665 	return err;
666 }
667 
668 static int atmel_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
669 {
670 	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(
671 			crypto_ablkcipher_reqtfm(req));
672 	struct atmel_tdes_reqctx *rctx = ablkcipher_request_ctx(req);
673 
674 	if (mode & TDES_FLAGS_CFB8) {
675 		if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
676 			pr_err("request size is not exact amount of CFB8 blocks\n");
677 			return -EINVAL;
678 		}
679 		ctx->block_size = CFB8_BLOCK_SIZE;
680 	} else if (mode & TDES_FLAGS_CFB16) {
681 		if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
682 			pr_err("request size is not exact amount of CFB16 blocks\n");
683 			return -EINVAL;
684 		}
685 		ctx->block_size = CFB16_BLOCK_SIZE;
686 	} else if (mode & TDES_FLAGS_CFB32) {
687 		if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
688 			pr_err("request size is not exact amount of CFB32 blocks\n");
689 			return -EINVAL;
690 		}
691 		ctx->block_size = CFB32_BLOCK_SIZE;
692 	} else {
693 		if (!IS_ALIGNED(req->nbytes, DES_BLOCK_SIZE)) {
694 			pr_err("request size is not exact amount of DES blocks\n");
695 			return -EINVAL;
696 		}
697 		ctx->block_size = DES_BLOCK_SIZE;
698 	}
699 
700 	rctx->mode = mode;
701 
702 	return atmel_tdes_handle_queue(ctx->dd, req);
703 }
704 
705 static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
706 {
707 	struct at_dma_slave	*sl = slave;
708 
709 	if (sl && sl->dma_dev == chan->device->dev) {
710 		chan->private = sl;
711 		return true;
712 	} else {
713 		return false;
714 	}
715 }
716 
717 static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
718 			struct crypto_platform_data *pdata)
719 {
720 	dma_cap_mask_t mask;
721 
722 	dma_cap_zero(mask);
723 	dma_cap_set(DMA_SLAVE, mask);
724 
725 	/* Try to grab 2 DMA channels */
726 	dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
727 			atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
728 	if (!dd->dma_lch_in.chan)
729 		goto err_dma_in;
730 
731 	dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
732 	dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
733 		TDES_IDATA1R;
734 	dd->dma_lch_in.dma_conf.src_maxburst = 1;
735 	dd->dma_lch_in.dma_conf.src_addr_width =
736 		DMA_SLAVE_BUSWIDTH_4_BYTES;
737 	dd->dma_lch_in.dma_conf.dst_maxburst = 1;
738 	dd->dma_lch_in.dma_conf.dst_addr_width =
739 		DMA_SLAVE_BUSWIDTH_4_BYTES;
740 	dd->dma_lch_in.dma_conf.device_fc = false;
741 
742 	dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
743 			atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
744 	if (!dd->dma_lch_out.chan)
745 		goto err_dma_out;
746 
747 	dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
748 	dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
749 		TDES_ODATA1R;
750 	dd->dma_lch_out.dma_conf.src_maxburst = 1;
751 	dd->dma_lch_out.dma_conf.src_addr_width =
752 		DMA_SLAVE_BUSWIDTH_4_BYTES;
753 	dd->dma_lch_out.dma_conf.dst_maxburst = 1;
754 	dd->dma_lch_out.dma_conf.dst_addr_width =
755 		DMA_SLAVE_BUSWIDTH_4_BYTES;
756 	dd->dma_lch_out.dma_conf.device_fc = false;
757 
758 	return 0;
759 
760 err_dma_out:
761 	dma_release_channel(dd->dma_lch_in.chan);
762 err_dma_in:
763 	dev_warn(dd->dev, "no DMA channel available\n");
764 	return -ENODEV;
765 }
766 
767 static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
768 {
769 	dma_release_channel(dd->dma_lch_in.chan);
770 	dma_release_channel(dd->dma_lch_out.chan);
771 }
772 
773 static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
774 			   unsigned int keylen)
775 {
776 	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
777 	int err;
778 
779 	err = verify_ablkcipher_des_key(tfm, key);
780 	if (err)
781 		return err;
782 
783 	memcpy(ctx->key, key, keylen);
784 	ctx->keylen = keylen;
785 
786 	return 0;
787 }
788 
789 static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
790 			   unsigned int keylen)
791 {
792 	struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
793 	int err;
794 
795 	err = verify_ablkcipher_des3_key(tfm, key);
796 	if (err)
797 		return err;
798 
799 	memcpy(ctx->key, key, keylen);
800 	ctx->keylen = keylen;
801 
802 	return 0;
803 }
804 
805 static int atmel_tdes_ecb_encrypt(struct ablkcipher_request *req)
806 {
807 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
808 }
809 
810 static int atmel_tdes_ecb_decrypt(struct ablkcipher_request *req)
811 {
812 	return atmel_tdes_crypt(req, 0);
813 }
814 
815 static int atmel_tdes_cbc_encrypt(struct ablkcipher_request *req)
816 {
817 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
818 }
819 
820 static int atmel_tdes_cbc_decrypt(struct ablkcipher_request *req)
821 {
822 	return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
823 }
824 static int atmel_tdes_cfb_encrypt(struct ablkcipher_request *req)
825 {
826 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
827 }
828 
829 static int atmel_tdes_cfb_decrypt(struct ablkcipher_request *req)
830 {
831 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
832 }
833 
834 static int atmel_tdes_cfb8_encrypt(struct ablkcipher_request *req)
835 {
836 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
837 						TDES_FLAGS_CFB8);
838 }
839 
840 static int atmel_tdes_cfb8_decrypt(struct ablkcipher_request *req)
841 {
842 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
843 }
844 
845 static int atmel_tdes_cfb16_encrypt(struct ablkcipher_request *req)
846 {
847 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
848 						TDES_FLAGS_CFB16);
849 }
850 
851 static int atmel_tdes_cfb16_decrypt(struct ablkcipher_request *req)
852 {
853 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
854 }
855 
856 static int atmel_tdes_cfb32_encrypt(struct ablkcipher_request *req)
857 {
858 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
859 						TDES_FLAGS_CFB32);
860 }
861 
862 static int atmel_tdes_cfb32_decrypt(struct ablkcipher_request *req)
863 {
864 	return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
865 }
866 
867 static int atmel_tdes_ofb_encrypt(struct ablkcipher_request *req)
868 {
869 	return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
870 }
871 
872 static int atmel_tdes_ofb_decrypt(struct ablkcipher_request *req)
873 {
874 	return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
875 }
876 
877 static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
878 {
879 	struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
880 	struct atmel_tdes_dev *dd;
881 
882 	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_tdes_reqctx);
883 
884 	dd = atmel_tdes_find_dev(ctx);
885 	if (!dd)
886 		return -ENODEV;
887 
888 	return 0;
889 }
890 
891 static struct crypto_alg tdes_algs[] = {
892 {
893 	.cra_name		= "ecb(des)",
894 	.cra_driver_name	= "atmel-ecb-des",
895 	.cra_priority		= 100,
896 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
897 	.cra_blocksize		= DES_BLOCK_SIZE,
898 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
899 	.cra_alignmask		= 0x7,
900 	.cra_type		= &crypto_ablkcipher_type,
901 	.cra_module		= THIS_MODULE,
902 	.cra_init		= atmel_tdes_cra_init,
903 	.cra_u.ablkcipher = {
904 		.min_keysize	= DES_KEY_SIZE,
905 		.max_keysize	= DES_KEY_SIZE,
906 		.setkey		= atmel_des_setkey,
907 		.encrypt	= atmel_tdes_ecb_encrypt,
908 		.decrypt	= atmel_tdes_ecb_decrypt,
909 	}
910 },
911 {
912 	.cra_name		= "cbc(des)",
913 	.cra_driver_name	= "atmel-cbc-des",
914 	.cra_priority		= 100,
915 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
916 	.cra_blocksize		= DES_BLOCK_SIZE,
917 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
918 	.cra_alignmask		= 0x7,
919 	.cra_type		= &crypto_ablkcipher_type,
920 	.cra_module		= THIS_MODULE,
921 	.cra_init		= atmel_tdes_cra_init,
922 	.cra_u.ablkcipher = {
923 		.min_keysize	= DES_KEY_SIZE,
924 		.max_keysize	= DES_KEY_SIZE,
925 		.ivsize		= DES_BLOCK_SIZE,
926 		.setkey		= atmel_des_setkey,
927 		.encrypt	= atmel_tdes_cbc_encrypt,
928 		.decrypt	= atmel_tdes_cbc_decrypt,
929 	}
930 },
931 {
932 	.cra_name		= "cfb(des)",
933 	.cra_driver_name	= "atmel-cfb-des",
934 	.cra_priority		= 100,
935 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
936 	.cra_blocksize		= DES_BLOCK_SIZE,
937 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
938 	.cra_alignmask		= 0x7,
939 	.cra_type		= &crypto_ablkcipher_type,
940 	.cra_module		= THIS_MODULE,
941 	.cra_init		= atmel_tdes_cra_init,
942 	.cra_u.ablkcipher = {
943 		.min_keysize	= DES_KEY_SIZE,
944 		.max_keysize	= DES_KEY_SIZE,
945 		.ivsize		= DES_BLOCK_SIZE,
946 		.setkey		= atmel_des_setkey,
947 		.encrypt	= atmel_tdes_cfb_encrypt,
948 		.decrypt	= atmel_tdes_cfb_decrypt,
949 	}
950 },
951 {
952 	.cra_name		= "cfb8(des)",
953 	.cra_driver_name	= "atmel-cfb8-des",
954 	.cra_priority		= 100,
955 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
956 	.cra_blocksize		= CFB8_BLOCK_SIZE,
957 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
958 	.cra_alignmask		= 0,
959 	.cra_type		= &crypto_ablkcipher_type,
960 	.cra_module		= THIS_MODULE,
961 	.cra_init		= atmel_tdes_cra_init,
962 	.cra_u.ablkcipher = {
963 		.min_keysize	= DES_KEY_SIZE,
964 		.max_keysize	= DES_KEY_SIZE,
965 		.ivsize		= DES_BLOCK_SIZE,
966 		.setkey		= atmel_des_setkey,
967 		.encrypt	= atmel_tdes_cfb8_encrypt,
968 		.decrypt	= atmel_tdes_cfb8_decrypt,
969 	}
970 },
971 {
972 	.cra_name		= "cfb16(des)",
973 	.cra_driver_name	= "atmel-cfb16-des",
974 	.cra_priority		= 100,
975 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
976 	.cra_blocksize		= CFB16_BLOCK_SIZE,
977 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
978 	.cra_alignmask		= 0x1,
979 	.cra_type		= &crypto_ablkcipher_type,
980 	.cra_module		= THIS_MODULE,
981 	.cra_init		= atmel_tdes_cra_init,
982 	.cra_u.ablkcipher = {
983 		.min_keysize	= DES_KEY_SIZE,
984 		.max_keysize	= DES_KEY_SIZE,
985 		.ivsize		= DES_BLOCK_SIZE,
986 		.setkey		= atmel_des_setkey,
987 		.encrypt	= atmel_tdes_cfb16_encrypt,
988 		.decrypt	= atmel_tdes_cfb16_decrypt,
989 	}
990 },
991 {
992 	.cra_name		= "cfb32(des)",
993 	.cra_driver_name	= "atmel-cfb32-des",
994 	.cra_priority		= 100,
995 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
996 	.cra_blocksize		= CFB32_BLOCK_SIZE,
997 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
998 	.cra_alignmask		= 0x3,
999 	.cra_type		= &crypto_ablkcipher_type,
1000 	.cra_module		= THIS_MODULE,
1001 	.cra_init		= atmel_tdes_cra_init,
1002 	.cra_u.ablkcipher = {
1003 		.min_keysize	= DES_KEY_SIZE,
1004 		.max_keysize	= DES_KEY_SIZE,
1005 		.ivsize		= DES_BLOCK_SIZE,
1006 		.setkey		= atmel_des_setkey,
1007 		.encrypt	= atmel_tdes_cfb32_encrypt,
1008 		.decrypt	= atmel_tdes_cfb32_decrypt,
1009 	}
1010 },
1011 {
1012 	.cra_name		= "ofb(des)",
1013 	.cra_driver_name	= "atmel-ofb-des",
1014 	.cra_priority		= 100,
1015 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1016 	.cra_blocksize		= DES_BLOCK_SIZE,
1017 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
1018 	.cra_alignmask		= 0x7,
1019 	.cra_type		= &crypto_ablkcipher_type,
1020 	.cra_module		= THIS_MODULE,
1021 	.cra_init		= atmel_tdes_cra_init,
1022 	.cra_u.ablkcipher = {
1023 		.min_keysize	= DES_KEY_SIZE,
1024 		.max_keysize	= DES_KEY_SIZE,
1025 		.ivsize		= DES_BLOCK_SIZE,
1026 		.setkey		= atmel_des_setkey,
1027 		.encrypt	= atmel_tdes_ofb_encrypt,
1028 		.decrypt	= atmel_tdes_ofb_decrypt,
1029 	}
1030 },
1031 {
1032 	.cra_name		= "ecb(des3_ede)",
1033 	.cra_driver_name	= "atmel-ecb-tdes",
1034 	.cra_priority		= 100,
1035 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1036 	.cra_blocksize		= DES_BLOCK_SIZE,
1037 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
1038 	.cra_alignmask		= 0x7,
1039 	.cra_type		= &crypto_ablkcipher_type,
1040 	.cra_module		= THIS_MODULE,
1041 	.cra_init		= atmel_tdes_cra_init,
1042 	.cra_u.ablkcipher = {
1043 		.min_keysize	= 3 * DES_KEY_SIZE,
1044 		.max_keysize	= 3 * DES_KEY_SIZE,
1045 		.setkey		= atmel_tdes_setkey,
1046 		.encrypt	= atmel_tdes_ecb_encrypt,
1047 		.decrypt	= atmel_tdes_ecb_decrypt,
1048 	}
1049 },
1050 {
1051 	.cra_name		= "cbc(des3_ede)",
1052 	.cra_driver_name	= "atmel-cbc-tdes",
1053 	.cra_priority		= 100,
1054 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1055 	.cra_blocksize		= DES_BLOCK_SIZE,
1056 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
1057 	.cra_alignmask		= 0x7,
1058 	.cra_type		= &crypto_ablkcipher_type,
1059 	.cra_module		= THIS_MODULE,
1060 	.cra_init		= atmel_tdes_cra_init,
1061 	.cra_u.ablkcipher = {
1062 		.min_keysize	= 3*DES_KEY_SIZE,
1063 		.max_keysize	= 3*DES_KEY_SIZE,
1064 		.ivsize		= DES_BLOCK_SIZE,
1065 		.setkey		= atmel_tdes_setkey,
1066 		.encrypt	= atmel_tdes_cbc_encrypt,
1067 		.decrypt	= atmel_tdes_cbc_decrypt,
1068 	}
1069 },
1070 {
1071 	.cra_name		= "ofb(des3_ede)",
1072 	.cra_driver_name	= "atmel-ofb-tdes",
1073 	.cra_priority		= 100,
1074 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1075 	.cra_blocksize		= DES_BLOCK_SIZE,
1076 	.cra_ctxsize		= sizeof(struct atmel_tdes_ctx),
1077 	.cra_alignmask		= 0x7,
1078 	.cra_type		= &crypto_ablkcipher_type,
1079 	.cra_module		= THIS_MODULE,
1080 	.cra_init		= atmel_tdes_cra_init,
1081 	.cra_u.ablkcipher = {
1082 		.min_keysize	= 3*DES_KEY_SIZE,
1083 		.max_keysize	= 3*DES_KEY_SIZE,
1084 		.ivsize		= DES_BLOCK_SIZE,
1085 		.setkey		= atmel_tdes_setkey,
1086 		.encrypt	= atmel_tdes_ofb_encrypt,
1087 		.decrypt	= atmel_tdes_ofb_decrypt,
1088 	}
1089 },
1090 };
1091 
1092 static void atmel_tdes_queue_task(unsigned long data)
1093 {
1094 	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1095 
1096 	atmel_tdes_handle_queue(dd, NULL);
1097 }
1098 
1099 static void atmel_tdes_done_task(unsigned long data)
1100 {
1101 	struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1102 	int err;
1103 
1104 	if (!(dd->flags & TDES_FLAGS_DMA))
1105 		err = atmel_tdes_crypt_pdc_stop(dd);
1106 	else
1107 		err = atmel_tdes_crypt_dma_stop(dd);
1108 
1109 	err = dd->err ? : err;
1110 
1111 	if (dd->total && !err) {
1112 		if (dd->flags & TDES_FLAGS_FAST) {
1113 			dd->in_sg = sg_next(dd->in_sg);
1114 			dd->out_sg = sg_next(dd->out_sg);
1115 			if (!dd->in_sg || !dd->out_sg)
1116 				err = -EINVAL;
1117 		}
1118 		if (!err)
1119 			err = atmel_tdes_crypt_start(dd);
1120 		if (!err)
1121 			return; /* DMA started. Not fininishing. */
1122 	}
1123 
1124 	atmel_tdes_finish_req(dd, err);
1125 	atmel_tdes_handle_queue(dd, NULL);
1126 }
1127 
1128 static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1129 {
1130 	struct atmel_tdes_dev *tdes_dd = dev_id;
1131 	u32 reg;
1132 
1133 	reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1134 	if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1135 		atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1136 		if (TDES_FLAGS_BUSY & tdes_dd->flags)
1137 			tasklet_schedule(&tdes_dd->done_task);
1138 		else
1139 			dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1140 		return IRQ_HANDLED;
1141 	}
1142 
1143 	return IRQ_NONE;
1144 }
1145 
1146 static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1147 {
1148 	int i;
1149 
1150 	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1151 		crypto_unregister_alg(&tdes_algs[i]);
1152 }
1153 
1154 static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1155 {
1156 	int err, i, j;
1157 
1158 	for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1159 		err = crypto_register_alg(&tdes_algs[i]);
1160 		if (err)
1161 			goto err_tdes_algs;
1162 	}
1163 
1164 	return 0;
1165 
1166 err_tdes_algs:
1167 	for (j = 0; j < i; j++)
1168 		crypto_unregister_alg(&tdes_algs[j]);
1169 
1170 	return err;
1171 }
1172 
1173 static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1174 {
1175 
1176 	dd->caps.has_dma = 0;
1177 	dd->caps.has_cfb_3keys = 0;
1178 
1179 	/* keep only major version number */
1180 	switch (dd->hw_version & 0xf00) {
1181 	case 0x700:
1182 		dd->caps.has_dma = 1;
1183 		dd->caps.has_cfb_3keys = 1;
1184 		break;
1185 	case 0x600:
1186 		break;
1187 	default:
1188 		dev_warn(dd->dev,
1189 				"Unmanaged tdes version, set minimum capabilities\n");
1190 		break;
1191 	}
1192 }
1193 
1194 #if defined(CONFIG_OF)
1195 static const struct of_device_id atmel_tdes_dt_ids[] = {
1196 	{ .compatible = "atmel,at91sam9g46-tdes" },
1197 	{ /* sentinel */ }
1198 };
1199 MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1200 
1201 static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
1202 {
1203 	struct device_node *np = pdev->dev.of_node;
1204 	struct crypto_platform_data *pdata;
1205 
1206 	if (!np) {
1207 		dev_err(&pdev->dev, "device node not found\n");
1208 		return ERR_PTR(-EINVAL);
1209 	}
1210 
1211 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1212 	if (!pdata)
1213 		return ERR_PTR(-ENOMEM);
1214 
1215 	pdata->dma_slave = devm_kzalloc(&pdev->dev,
1216 					sizeof(*(pdata->dma_slave)),
1217 					GFP_KERNEL);
1218 	if (!pdata->dma_slave)
1219 		return ERR_PTR(-ENOMEM);
1220 
1221 	return pdata;
1222 }
1223 #else /* CONFIG_OF */
1224 static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
1225 {
1226 	return ERR_PTR(-EINVAL);
1227 }
1228 #endif
1229 
1230 static int atmel_tdes_probe(struct platform_device *pdev)
1231 {
1232 	struct atmel_tdes_dev *tdes_dd;
1233 	struct crypto_platform_data	*pdata;
1234 	struct device *dev = &pdev->dev;
1235 	struct resource *tdes_res;
1236 	int err;
1237 
1238 	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1239 	if (tdes_dd == NULL) {
1240 		err = -ENOMEM;
1241 		goto tdes_dd_err;
1242 	}
1243 
1244 	tdes_dd->dev = dev;
1245 
1246 	platform_set_drvdata(pdev, tdes_dd);
1247 
1248 	INIT_LIST_HEAD(&tdes_dd->list);
1249 	spin_lock_init(&tdes_dd->lock);
1250 
1251 	tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1252 					(unsigned long)tdes_dd);
1253 	tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1254 					(unsigned long)tdes_dd);
1255 
1256 	crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1257 
1258 	/* Get the base address */
1259 	tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1260 	if (!tdes_res) {
1261 		dev_err(dev, "no MEM resource info\n");
1262 		err = -ENODEV;
1263 		goto res_err;
1264 	}
1265 	tdes_dd->phys_base = tdes_res->start;
1266 
1267 	/* Get the IRQ */
1268 	tdes_dd->irq = platform_get_irq(pdev,  0);
1269 	if (tdes_dd->irq < 0) {
1270 		err = tdes_dd->irq;
1271 		goto res_err;
1272 	}
1273 
1274 	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1275 			       IRQF_SHARED, "atmel-tdes", tdes_dd);
1276 	if (err) {
1277 		dev_err(dev, "unable to request tdes irq.\n");
1278 		goto res_err;
1279 	}
1280 
1281 	/* Initializing the clock */
1282 	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1283 	if (IS_ERR(tdes_dd->iclk)) {
1284 		dev_err(dev, "clock initialization failed.\n");
1285 		err = PTR_ERR(tdes_dd->iclk);
1286 		goto res_err;
1287 	}
1288 
1289 	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1290 	if (IS_ERR(tdes_dd->io_base)) {
1291 		dev_err(dev, "can't ioremap\n");
1292 		err = PTR_ERR(tdes_dd->io_base);
1293 		goto res_err;
1294 	}
1295 
1296 	atmel_tdes_hw_version_init(tdes_dd);
1297 
1298 	atmel_tdes_get_cap(tdes_dd);
1299 
1300 	err = atmel_tdes_buff_init(tdes_dd);
1301 	if (err)
1302 		goto err_tdes_buff;
1303 
1304 	if (tdes_dd->caps.has_dma) {
1305 		pdata = pdev->dev.platform_data;
1306 		if (!pdata) {
1307 			pdata = atmel_tdes_of_init(pdev);
1308 			if (IS_ERR(pdata)) {
1309 				dev_err(&pdev->dev, "platform data not available\n");
1310 				err = PTR_ERR(pdata);
1311 				goto err_pdata;
1312 			}
1313 		}
1314 		if (!pdata->dma_slave) {
1315 			err = -ENXIO;
1316 			goto err_pdata;
1317 		}
1318 		err = atmel_tdes_dma_init(tdes_dd, pdata);
1319 		if (err)
1320 			goto err_tdes_dma;
1321 
1322 		dev_info(dev, "using %s, %s for DMA transfers\n",
1323 				dma_chan_name(tdes_dd->dma_lch_in.chan),
1324 				dma_chan_name(tdes_dd->dma_lch_out.chan));
1325 	}
1326 
1327 	spin_lock(&atmel_tdes.lock);
1328 	list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1329 	spin_unlock(&atmel_tdes.lock);
1330 
1331 	err = atmel_tdes_register_algs(tdes_dd);
1332 	if (err)
1333 		goto err_algs;
1334 
1335 	dev_info(dev, "Atmel DES/TDES\n");
1336 
1337 	return 0;
1338 
1339 err_algs:
1340 	spin_lock(&atmel_tdes.lock);
1341 	list_del(&tdes_dd->list);
1342 	spin_unlock(&atmel_tdes.lock);
1343 	if (tdes_dd->caps.has_dma)
1344 		atmel_tdes_dma_cleanup(tdes_dd);
1345 err_tdes_dma:
1346 err_pdata:
1347 	atmel_tdes_buff_cleanup(tdes_dd);
1348 err_tdes_buff:
1349 res_err:
1350 	tasklet_kill(&tdes_dd->done_task);
1351 	tasklet_kill(&tdes_dd->queue_task);
1352 tdes_dd_err:
1353 	dev_err(dev, "initialization failed.\n");
1354 
1355 	return err;
1356 }
1357 
1358 static int atmel_tdes_remove(struct platform_device *pdev)
1359 {
1360 	struct atmel_tdes_dev *tdes_dd;
1361 
1362 	tdes_dd = platform_get_drvdata(pdev);
1363 	if (!tdes_dd)
1364 		return -ENODEV;
1365 	spin_lock(&atmel_tdes.lock);
1366 	list_del(&tdes_dd->list);
1367 	spin_unlock(&atmel_tdes.lock);
1368 
1369 	atmel_tdes_unregister_algs(tdes_dd);
1370 
1371 	tasklet_kill(&tdes_dd->done_task);
1372 	tasklet_kill(&tdes_dd->queue_task);
1373 
1374 	if (tdes_dd->caps.has_dma)
1375 		atmel_tdes_dma_cleanup(tdes_dd);
1376 
1377 	atmel_tdes_buff_cleanup(tdes_dd);
1378 
1379 	return 0;
1380 }
1381 
1382 static struct platform_driver atmel_tdes_driver = {
1383 	.probe		= atmel_tdes_probe,
1384 	.remove		= atmel_tdes_remove,
1385 	.driver		= {
1386 		.name	= "atmel_tdes",
1387 		.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
1388 	},
1389 };
1390 
1391 module_platform_driver(atmel_tdes_driver);
1392 
1393 MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1394 MODULE_LICENSE("GPL v2");
1395 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1396