xref: /openbmc/linux/drivers/dma/imx-dma.c (revision b78412b8)
1 /*
2  * drivers/dma/imx-dma.c
3  *
4  * This file contains a driver for the Freescale i.MX DMA engine
5  * found on i.MX1/21/27
6  *
7  * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8  * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9  *
10  * The code contained herein is licensed under the GNU General Public
11  * License. You may obtain a copy of the GNU General Public License
12  * Version 2 or later at the following locations:
13  *
14  * http://www.opensource.org/licenses/gpl-license.html
15  * http://www.gnu.org/copyleft/gpl.html
16  */
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/mm.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/slab.h>
26 #include <linux/platform_device.h>
27 #include <linux/clk.h>
28 #include <linux/dmaengine.h>
29 #include <linux/module.h>
30 #include <linux/of_device.h>
31 #include <linux/of_dma.h>
32 
33 #include <asm/irq.h>
34 #include <linux/platform_data/dma-imx.h>
35 
36 #include "dmaengine.h"
37 #define IMXDMA_MAX_CHAN_DESCRIPTORS	16
38 #define IMX_DMA_CHANNELS  16
39 
40 #define IMX_DMA_2D_SLOTS	2
41 #define IMX_DMA_2D_SLOT_A	0
42 #define IMX_DMA_2D_SLOT_B	1
43 
44 #define IMX_DMA_LENGTH_LOOP	((unsigned int)-1)
45 #define IMX_DMA_MEMSIZE_32	(0 << 4)
46 #define IMX_DMA_MEMSIZE_8	(1 << 4)
47 #define IMX_DMA_MEMSIZE_16	(2 << 4)
48 #define IMX_DMA_TYPE_LINEAR	(0 << 10)
49 #define IMX_DMA_TYPE_2D		(1 << 10)
50 #define IMX_DMA_TYPE_FIFO	(2 << 10)
51 
52 #define IMX_DMA_ERR_BURST     (1 << 0)
53 #define IMX_DMA_ERR_REQUEST   (1 << 1)
54 #define IMX_DMA_ERR_TRANSFER  (1 << 2)
55 #define IMX_DMA_ERR_BUFFER    (1 << 3)
56 #define IMX_DMA_ERR_TIMEOUT   (1 << 4)
57 
58 #define DMA_DCR     0x00		/* Control Register */
59 #define DMA_DISR    0x04		/* Interrupt status Register */
60 #define DMA_DIMR    0x08		/* Interrupt mask Register */
61 #define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
62 #define DMA_DRTOSR  0x10		/* Request timeout Register */
63 #define DMA_DSESR   0x14		/* Transfer Error Status Register */
64 #define DMA_DBOSR   0x18		/* Buffer overflow status Register */
65 #define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
66 #define DMA_WSRA    0x40		/* W-Size Register A */
67 #define DMA_XSRA    0x44		/* X-Size Register A */
68 #define DMA_YSRA    0x48		/* Y-Size Register A */
69 #define DMA_WSRB    0x4c		/* W-Size Register B */
70 #define DMA_XSRB    0x50		/* X-Size Register B */
71 #define DMA_YSRB    0x54		/* Y-Size Register B */
72 #define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
73 #define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
74 #define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
75 #define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
76 #define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
77 #define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
78 #define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
79 #define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
80 #define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */
81 
82 #define DCR_DRST           (1<<1)
83 #define DCR_DEN            (1<<0)
84 #define DBTOCR_EN          (1<<15)
85 #define DBTOCR_CNT(x)      ((x) & 0x7fff)
86 #define CNTR_CNT(x)        ((x) & 0xffffff)
87 #define CCR_ACRPT          (1<<14)
88 #define CCR_DMOD_LINEAR    (0x0 << 12)
89 #define CCR_DMOD_2D        (0x1 << 12)
90 #define CCR_DMOD_FIFO      (0x2 << 12)
91 #define CCR_DMOD_EOBFIFO   (0x3 << 12)
92 #define CCR_SMOD_LINEAR    (0x0 << 10)
93 #define CCR_SMOD_2D        (0x1 << 10)
94 #define CCR_SMOD_FIFO      (0x2 << 10)
95 #define CCR_SMOD_EOBFIFO   (0x3 << 10)
96 #define CCR_MDIR_DEC       (1<<9)
97 #define CCR_MSEL_B         (1<<8)
98 #define CCR_DSIZ_32        (0x0 << 6)
99 #define CCR_DSIZ_8         (0x1 << 6)
100 #define CCR_DSIZ_16        (0x2 << 6)
101 #define CCR_SSIZ_32        (0x0 << 4)
102 #define CCR_SSIZ_8         (0x1 << 4)
103 #define CCR_SSIZ_16        (0x2 << 4)
104 #define CCR_REN            (1<<3)
105 #define CCR_RPT            (1<<2)
106 #define CCR_FRC            (1<<1)
107 #define CCR_CEN            (1<<0)
108 #define RTOR_EN            (1<<15)
109 #define RTOR_CLK           (1<<14)
110 #define RTOR_PSC           (1<<13)
111 
112 enum  imxdma_prep_type {
113 	IMXDMA_DESC_MEMCPY,
114 	IMXDMA_DESC_INTERLEAVED,
115 	IMXDMA_DESC_SLAVE_SG,
116 	IMXDMA_DESC_CYCLIC,
117 };
118 
119 struct imx_dma_2d_config {
120 	u16		xsr;
121 	u16		ysr;
122 	u16		wsr;
123 	int		count;
124 };
125 
126 struct imxdma_desc {
127 	struct list_head		node;
128 	struct dma_async_tx_descriptor	desc;
129 	enum dma_status			status;
130 	dma_addr_t			src;
131 	dma_addr_t			dest;
132 	size_t				len;
133 	enum dma_transfer_direction	direction;
134 	enum imxdma_prep_type		type;
135 	/* For memcpy and interleaved */
136 	unsigned int			config_port;
137 	unsigned int			config_mem;
138 	/* For interleaved transfers */
139 	unsigned int			x;
140 	unsigned int			y;
141 	unsigned int			w;
142 	/* For slave sg and cyclic */
143 	struct scatterlist		*sg;
144 	unsigned int			sgcount;
145 };
146 
147 struct imxdma_channel {
148 	int				hw_chaining;
149 	struct timer_list		watchdog;
150 	struct imxdma_engine		*imxdma;
151 	unsigned int			channel;
152 
153 	struct tasklet_struct		dma_tasklet;
154 	struct list_head		ld_free;
155 	struct list_head		ld_queue;
156 	struct list_head		ld_active;
157 	int				descs_allocated;
158 	enum dma_slave_buswidth		word_size;
159 	dma_addr_t			per_address;
160 	u32				watermark_level;
161 	struct dma_chan			chan;
162 	struct dma_async_tx_descriptor	desc;
163 	enum dma_status			status;
164 	int				dma_request;
165 	struct scatterlist		*sg_list;
166 	u32				ccr_from_device;
167 	u32				ccr_to_device;
168 	bool				enabled_2d;
169 	int				slot_2d;
170 	unsigned int			irq;
171 };
172 
173 enum imx_dma_type {
174 	IMX1_DMA,
175 	IMX21_DMA,
176 	IMX27_DMA,
177 };
178 
179 struct imxdma_engine {
180 	struct device			*dev;
181 	struct device_dma_parameters	dma_parms;
182 	struct dma_device		dma_device;
183 	void __iomem			*base;
184 	struct clk			*dma_ahb;
185 	struct clk			*dma_ipg;
186 	spinlock_t			lock;
187 	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
188 	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
189 	enum imx_dma_type		devtype;
190 	unsigned int			irq;
191 	unsigned int			irq_err;
192 
193 };
194 
195 struct imxdma_filter_data {
196 	struct imxdma_engine	*imxdma;
197 	int			 request;
198 };
199 
200 static const struct platform_device_id imx_dma_devtype[] = {
201 	{
202 		.name = "imx1-dma",
203 		.driver_data = IMX1_DMA,
204 	}, {
205 		.name = "imx21-dma",
206 		.driver_data = IMX21_DMA,
207 	}, {
208 		.name = "imx27-dma",
209 		.driver_data = IMX27_DMA,
210 	}, {
211 		/* sentinel */
212 	}
213 };
214 MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
215 
216 static const struct of_device_id imx_dma_of_dev_id[] = {
217 	{
218 		.compatible = "fsl,imx1-dma",
219 		.data = &imx_dma_devtype[IMX1_DMA],
220 	}, {
221 		.compatible = "fsl,imx21-dma",
222 		.data = &imx_dma_devtype[IMX21_DMA],
223 	}, {
224 		.compatible = "fsl,imx27-dma",
225 		.data = &imx_dma_devtype[IMX27_DMA],
226 	}, {
227 		/* sentinel */
228 	}
229 };
230 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
231 
232 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
233 {
234 	return imxdma->devtype == IMX1_DMA;
235 }
236 
237 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
238 {
239 	return imxdma->devtype == IMX27_DMA;
240 }
241 
242 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
243 {
244 	return container_of(chan, struct imxdma_channel, chan);
245 }
246 
247 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
248 {
249 	struct imxdma_desc *desc;
250 
251 	if (!list_empty(&imxdmac->ld_active)) {
252 		desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
253 					node);
254 		if (desc->type == IMXDMA_DESC_CYCLIC)
255 			return true;
256 	}
257 	return false;
258 }
259 
260 
261 
262 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
263 			     unsigned offset)
264 {
265 	__raw_writel(val, imxdma->base + offset);
266 }
267 
268 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
269 {
270 	return __raw_readl(imxdma->base + offset);
271 }
272 
273 static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
274 {
275 	struct imxdma_engine *imxdma = imxdmac->imxdma;
276 
277 	if (is_imx27_dma(imxdma))
278 		return imxdmac->hw_chaining;
279 	else
280 		return 0;
281 }
282 
283 /*
284  * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
285  */
286 static inline int imxdma_sg_next(struct imxdma_desc *d)
287 {
288 	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
289 	struct imxdma_engine *imxdma = imxdmac->imxdma;
290 	struct scatterlist *sg = d->sg;
291 	unsigned long now;
292 
293 	now = min(d->len, sg_dma_len(sg));
294 	if (d->len != IMX_DMA_LENGTH_LOOP)
295 		d->len -= now;
296 
297 	if (d->direction == DMA_DEV_TO_MEM)
298 		imx_dmav1_writel(imxdma, sg->dma_address,
299 				 DMA_DAR(imxdmac->channel));
300 	else
301 		imx_dmav1_writel(imxdma, sg->dma_address,
302 				 DMA_SAR(imxdmac->channel));
303 
304 	imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
305 
306 	dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
307 		"size 0x%08x\n", __func__, imxdmac->channel,
308 		 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
309 		 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
310 		 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
311 
312 	return now;
313 }
314 
315 static void imxdma_enable_hw(struct imxdma_desc *d)
316 {
317 	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
318 	struct imxdma_engine *imxdma = imxdmac->imxdma;
319 	int channel = imxdmac->channel;
320 	unsigned long flags;
321 
322 	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
323 
324 	local_irq_save(flags);
325 
326 	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
327 	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
328 			 ~(1 << channel), DMA_DIMR);
329 	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
330 			 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
331 
332 	if (!is_imx1_dma(imxdma) &&
333 			d->sg && imxdma_hw_chain(imxdmac)) {
334 		d->sg = sg_next(d->sg);
335 		if (d->sg) {
336 			u32 tmp;
337 			imxdma_sg_next(d);
338 			tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
339 			imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
340 					 DMA_CCR(channel));
341 		}
342 	}
343 
344 	local_irq_restore(flags);
345 }
346 
347 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
348 {
349 	struct imxdma_engine *imxdma = imxdmac->imxdma;
350 	int channel = imxdmac->channel;
351 	unsigned long flags;
352 
353 	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
354 
355 	if (imxdma_hw_chain(imxdmac))
356 		del_timer(&imxdmac->watchdog);
357 
358 	local_irq_save(flags);
359 	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
360 			 (1 << channel), DMA_DIMR);
361 	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
362 			 ~CCR_CEN, DMA_CCR(channel));
363 	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
364 	local_irq_restore(flags);
365 }
366 
367 static void imxdma_watchdog(unsigned long data)
368 {
369 	struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
370 	struct imxdma_engine *imxdma = imxdmac->imxdma;
371 	int channel = imxdmac->channel;
372 
373 	imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
374 
375 	/* Tasklet watchdog error handler */
376 	tasklet_schedule(&imxdmac->dma_tasklet);
377 	dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
378 		imxdmac->channel);
379 }
380 
381 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
382 {
383 	struct imxdma_engine *imxdma = dev_id;
384 	unsigned int err_mask;
385 	int i, disr;
386 	int errcode;
387 
388 	disr = imx_dmav1_readl(imxdma, DMA_DISR);
389 
390 	err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
391 		   imx_dmav1_readl(imxdma, DMA_DRTOSR) |
392 		   imx_dmav1_readl(imxdma, DMA_DSESR)  |
393 		   imx_dmav1_readl(imxdma, DMA_DBOSR);
394 
395 	if (!err_mask)
396 		return IRQ_HANDLED;
397 
398 	imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
399 
400 	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
401 		if (!(err_mask & (1 << i)))
402 			continue;
403 		errcode = 0;
404 
405 		if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
406 			imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
407 			errcode |= IMX_DMA_ERR_BURST;
408 		}
409 		if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
410 			imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
411 			errcode |= IMX_DMA_ERR_REQUEST;
412 		}
413 		if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
414 			imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
415 			errcode |= IMX_DMA_ERR_TRANSFER;
416 		}
417 		if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
418 			imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
419 			errcode |= IMX_DMA_ERR_BUFFER;
420 		}
421 		/* Tasklet error handler */
422 		tasklet_schedule(&imxdma->channel[i].dma_tasklet);
423 
424 		dev_warn(imxdma->dev,
425 			 "DMA timeout on channel %d -%s%s%s%s\n", i,
426 			 errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
427 			 errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
428 			 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
429 			 errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
430 	}
431 	return IRQ_HANDLED;
432 }
433 
434 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
435 {
436 	struct imxdma_engine *imxdma = imxdmac->imxdma;
437 	int chno = imxdmac->channel;
438 	struct imxdma_desc *desc;
439 	unsigned long flags;
440 
441 	spin_lock_irqsave(&imxdma->lock, flags);
442 	if (list_empty(&imxdmac->ld_active)) {
443 		spin_unlock_irqrestore(&imxdma->lock, flags);
444 		goto out;
445 	}
446 
447 	desc = list_first_entry(&imxdmac->ld_active,
448 				struct imxdma_desc,
449 				node);
450 	spin_unlock_irqrestore(&imxdma->lock, flags);
451 
452 	if (desc->sg) {
453 		u32 tmp;
454 		desc->sg = sg_next(desc->sg);
455 
456 		if (desc->sg) {
457 			imxdma_sg_next(desc);
458 
459 			tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
460 
461 			if (imxdma_hw_chain(imxdmac)) {
462 				/* FIXME: The timeout should probably be
463 				 * configurable
464 				 */
465 				mod_timer(&imxdmac->watchdog,
466 					jiffies + msecs_to_jiffies(500));
467 
468 				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
469 				imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
470 			} else {
471 				imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
472 						 DMA_CCR(chno));
473 				tmp |= CCR_CEN;
474 			}
475 
476 			imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
477 
478 			if (imxdma_chan_is_doing_cyclic(imxdmac))
479 				/* Tasklet progression */
480 				tasklet_schedule(&imxdmac->dma_tasklet);
481 
482 			return;
483 		}
484 
485 		if (imxdma_hw_chain(imxdmac)) {
486 			del_timer(&imxdmac->watchdog);
487 			return;
488 		}
489 	}
490 
491 out:
492 	imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
493 	/* Tasklet irq */
494 	tasklet_schedule(&imxdmac->dma_tasklet);
495 }
496 
497 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
498 {
499 	struct imxdma_engine *imxdma = dev_id;
500 	int i, disr;
501 
502 	if (!is_imx1_dma(imxdma))
503 		imxdma_err_handler(irq, dev_id);
504 
505 	disr = imx_dmav1_readl(imxdma, DMA_DISR);
506 
507 	dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
508 
509 	imx_dmav1_writel(imxdma, disr, DMA_DISR);
510 	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
511 		if (disr & (1 << i))
512 			dma_irq_handle_channel(&imxdma->channel[i]);
513 	}
514 
515 	return IRQ_HANDLED;
516 }
517 
518 static int imxdma_xfer_desc(struct imxdma_desc *d)
519 {
520 	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
521 	struct imxdma_engine *imxdma = imxdmac->imxdma;
522 	int slot = -1;
523 	int i;
524 
525 	/* Configure and enable */
526 	switch (d->type) {
527 	case IMXDMA_DESC_INTERLEAVED:
528 		/* Try to get a free 2D slot */
529 		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
530 			if ((imxdma->slots_2d[i].count > 0) &&
531 			((imxdma->slots_2d[i].xsr != d->x) ||
532 			(imxdma->slots_2d[i].ysr != d->y) ||
533 			(imxdma->slots_2d[i].wsr != d->w)))
534 				continue;
535 			slot = i;
536 			break;
537 		}
538 		if (slot < 0)
539 			return -EBUSY;
540 
541 		imxdma->slots_2d[slot].xsr = d->x;
542 		imxdma->slots_2d[slot].ysr = d->y;
543 		imxdma->slots_2d[slot].wsr = d->w;
544 		imxdma->slots_2d[slot].count++;
545 
546 		imxdmac->slot_2d = slot;
547 		imxdmac->enabled_2d = true;
548 
549 		if (slot == IMX_DMA_2D_SLOT_A) {
550 			d->config_mem &= ~CCR_MSEL_B;
551 			d->config_port &= ~CCR_MSEL_B;
552 			imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
553 			imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
554 			imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
555 		} else {
556 			d->config_mem |= CCR_MSEL_B;
557 			d->config_port |= CCR_MSEL_B;
558 			imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
559 			imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
560 			imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
561 		}
562 		/*
563 		 * We fall-through here intentionally, since a 2D transfer is
564 		 * similar to MEMCPY just adding the 2D slot configuration.
565 		 */
566 	case IMXDMA_DESC_MEMCPY:
567 		imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
568 		imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
569 		imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
570 			 DMA_CCR(imxdmac->channel));
571 
572 		imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
573 
574 		dev_dbg(imxdma->dev,
575 			"%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
576 			__func__, imxdmac->channel,
577 			(unsigned long long)d->dest,
578 			(unsigned long long)d->src, d->len);
579 
580 		break;
581 	/* Cyclic transfer is the same as slave_sg with special sg configuration. */
582 	case IMXDMA_DESC_CYCLIC:
583 	case IMXDMA_DESC_SLAVE_SG:
584 		if (d->direction == DMA_DEV_TO_MEM) {
585 			imx_dmav1_writel(imxdma, imxdmac->per_address,
586 					 DMA_SAR(imxdmac->channel));
587 			imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
588 					 DMA_CCR(imxdmac->channel));
589 
590 			dev_dbg(imxdma->dev,
591 				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
592 				__func__, imxdmac->channel,
593 				d->sg, d->sgcount, d->len,
594 				(unsigned long long)imxdmac->per_address);
595 		} else if (d->direction == DMA_MEM_TO_DEV) {
596 			imx_dmav1_writel(imxdma, imxdmac->per_address,
597 					 DMA_DAR(imxdmac->channel));
598 			imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
599 					 DMA_CCR(imxdmac->channel));
600 
601 			dev_dbg(imxdma->dev,
602 				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
603 				__func__, imxdmac->channel,
604 				d->sg, d->sgcount, d->len,
605 				(unsigned long long)imxdmac->per_address);
606 		} else {
607 			dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
608 				__func__, imxdmac->channel);
609 			return -EINVAL;
610 		}
611 
612 		imxdma_sg_next(d);
613 
614 		break;
615 	default:
616 		return -EINVAL;
617 	}
618 	imxdma_enable_hw(d);
619 	return 0;
620 }
621 
622 static void imxdma_tasklet(unsigned long data)
623 {
624 	struct imxdma_channel *imxdmac = (void *)data;
625 	struct imxdma_engine *imxdma = imxdmac->imxdma;
626 	struct imxdma_desc *desc;
627 	unsigned long flags;
628 
629 	spin_lock_irqsave(&imxdma->lock, flags);
630 
631 	if (list_empty(&imxdmac->ld_active)) {
632 		/* Someone might have called terminate all */
633 		spin_unlock_irqrestore(&imxdma->lock, flags);
634 		return;
635 	}
636 	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
637 
638 	/* If we are dealing with a cyclic descriptor, keep it on ld_active
639 	 * and dont mark the descriptor as complete.
640 	 * Only in non-cyclic cases it would be marked as complete
641 	 */
642 	if (imxdma_chan_is_doing_cyclic(imxdmac))
643 		goto out;
644 	else
645 		dma_cookie_complete(&desc->desc);
646 
647 	/* Free 2D slot if it was an interleaved transfer */
648 	if (imxdmac->enabled_2d) {
649 		imxdma->slots_2d[imxdmac->slot_2d].count--;
650 		imxdmac->enabled_2d = false;
651 	}
652 
653 	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
654 
655 	if (!list_empty(&imxdmac->ld_queue)) {
656 		desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
657 					node);
658 		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
659 		if (imxdma_xfer_desc(desc) < 0)
660 			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
661 				 __func__, imxdmac->channel);
662 	}
663 out:
664 	spin_unlock_irqrestore(&imxdma->lock, flags);
665 
666 	dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
667 }
668 
669 static int imxdma_terminate_all(struct dma_chan *chan)
670 {
671 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
672 	struct imxdma_engine *imxdma = imxdmac->imxdma;
673 	unsigned long flags;
674 
675 	imxdma_disable_hw(imxdmac);
676 
677 	spin_lock_irqsave(&imxdma->lock, flags);
678 	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
679 	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
680 	spin_unlock_irqrestore(&imxdma->lock, flags);
681 	return 0;
682 }
683 
684 static int imxdma_config(struct dma_chan *chan,
685 			 struct dma_slave_config *dmaengine_cfg)
686 {
687 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
688 	struct imxdma_engine *imxdma = imxdmac->imxdma;
689 	unsigned int mode = 0;
690 
691 	if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
692 		imxdmac->per_address = dmaengine_cfg->src_addr;
693 		imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
694 		imxdmac->word_size = dmaengine_cfg->src_addr_width;
695 	} else {
696 		imxdmac->per_address = dmaengine_cfg->dst_addr;
697 		imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
698 		imxdmac->word_size = dmaengine_cfg->dst_addr_width;
699 	}
700 
701 	switch (imxdmac->word_size) {
702 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
703 		mode = IMX_DMA_MEMSIZE_8;
704 		break;
705 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
706 		mode = IMX_DMA_MEMSIZE_16;
707 		break;
708 	default:
709 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
710 		mode = IMX_DMA_MEMSIZE_32;
711 		break;
712 	}
713 
714 	imxdmac->hw_chaining = 0;
715 
716 	imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
717 		((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
718 		CCR_REN;
719 	imxdmac->ccr_to_device =
720 		(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
721 		((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
722 	imx_dmav1_writel(imxdma, imxdmac->dma_request,
723 			 DMA_RSSR(imxdmac->channel));
724 
725 	/* Set burst length */
726 	imx_dmav1_writel(imxdma, imxdmac->watermark_level *
727 			 imxdmac->word_size, DMA_BLR(imxdmac->channel));
728 
729 	return 0;
730 }
731 
732 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
733 					    dma_cookie_t cookie,
734 					    struct dma_tx_state *txstate)
735 {
736 	return dma_cookie_status(chan, cookie, txstate);
737 }
738 
739 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
740 {
741 	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
742 	struct imxdma_engine *imxdma = imxdmac->imxdma;
743 	dma_cookie_t cookie;
744 	unsigned long flags;
745 
746 	spin_lock_irqsave(&imxdma->lock, flags);
747 	list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
748 	cookie = dma_cookie_assign(tx);
749 	spin_unlock_irqrestore(&imxdma->lock, flags);
750 
751 	return cookie;
752 }
753 
754 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
755 {
756 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
757 	struct imx_dma_data *data = chan->private;
758 
759 	if (data != NULL)
760 		imxdmac->dma_request = data->dma_request;
761 
762 	while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
763 		struct imxdma_desc *desc;
764 
765 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
766 		if (!desc)
767 			break;
768 		__memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
769 		dma_async_tx_descriptor_init(&desc->desc, chan);
770 		desc->desc.tx_submit = imxdma_tx_submit;
771 		/* txd.flags will be overwritten in prep funcs */
772 		desc->desc.flags = DMA_CTRL_ACK;
773 		desc->status = DMA_COMPLETE;
774 
775 		list_add_tail(&desc->node, &imxdmac->ld_free);
776 		imxdmac->descs_allocated++;
777 	}
778 
779 	if (!imxdmac->descs_allocated)
780 		return -ENOMEM;
781 
782 	return imxdmac->descs_allocated;
783 }
784 
785 static void imxdma_free_chan_resources(struct dma_chan *chan)
786 {
787 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
788 	struct imxdma_engine *imxdma = imxdmac->imxdma;
789 	struct imxdma_desc *desc, *_desc;
790 	unsigned long flags;
791 
792 	spin_lock_irqsave(&imxdma->lock, flags);
793 
794 	imxdma_disable_hw(imxdmac);
795 	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
796 	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
797 
798 	spin_unlock_irqrestore(&imxdma->lock, flags);
799 
800 	list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
801 		kfree(desc);
802 		imxdmac->descs_allocated--;
803 	}
804 	INIT_LIST_HEAD(&imxdmac->ld_free);
805 
806 	kfree(imxdmac->sg_list);
807 	imxdmac->sg_list = NULL;
808 }
809 
810 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
811 		struct dma_chan *chan, struct scatterlist *sgl,
812 		unsigned int sg_len, enum dma_transfer_direction direction,
813 		unsigned long flags, void *context)
814 {
815 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
816 	struct scatterlist *sg;
817 	int i, dma_length = 0;
818 	struct imxdma_desc *desc;
819 
820 	if (list_empty(&imxdmac->ld_free) ||
821 	    imxdma_chan_is_doing_cyclic(imxdmac))
822 		return NULL;
823 
824 	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
825 
826 	for_each_sg(sgl, sg, sg_len, i) {
827 		dma_length += sg_dma_len(sg);
828 	}
829 
830 	switch (imxdmac->word_size) {
831 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
832 		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
833 			return NULL;
834 		break;
835 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
836 		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
837 			return NULL;
838 		break;
839 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
840 		break;
841 	default:
842 		return NULL;
843 	}
844 
845 	desc->type = IMXDMA_DESC_SLAVE_SG;
846 	desc->sg = sgl;
847 	desc->sgcount = sg_len;
848 	desc->len = dma_length;
849 	desc->direction = direction;
850 	if (direction == DMA_DEV_TO_MEM) {
851 		desc->src = imxdmac->per_address;
852 	} else {
853 		desc->dest = imxdmac->per_address;
854 	}
855 	desc->desc.callback = NULL;
856 	desc->desc.callback_param = NULL;
857 
858 	return &desc->desc;
859 }
860 
861 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
862 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
863 		size_t period_len, enum dma_transfer_direction direction,
864 		unsigned long flags)
865 {
866 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
867 	struct imxdma_engine *imxdma = imxdmac->imxdma;
868 	struct imxdma_desc *desc;
869 	int i;
870 	unsigned int periods = buf_len / period_len;
871 
872 	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
873 			__func__, imxdmac->channel, buf_len, period_len);
874 
875 	if (list_empty(&imxdmac->ld_free) ||
876 	    imxdma_chan_is_doing_cyclic(imxdmac))
877 		return NULL;
878 
879 	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
880 
881 	kfree(imxdmac->sg_list);
882 
883 	imxdmac->sg_list = kcalloc(periods + 1,
884 			sizeof(struct scatterlist), GFP_ATOMIC);
885 	if (!imxdmac->sg_list)
886 		return NULL;
887 
888 	sg_init_table(imxdmac->sg_list, periods);
889 
890 	for (i = 0; i < periods; i++) {
891 		sg_assign_page(&imxdmac->sg_list[i], NULL);
892 		imxdmac->sg_list[i].offset = 0;
893 		imxdmac->sg_list[i].dma_address = dma_addr;
894 		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
895 		dma_addr += period_len;
896 	}
897 
898 	/* close the loop */
899 	sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
900 
901 	desc->type = IMXDMA_DESC_CYCLIC;
902 	desc->sg = imxdmac->sg_list;
903 	desc->sgcount = periods;
904 	desc->len = IMX_DMA_LENGTH_LOOP;
905 	desc->direction = direction;
906 	if (direction == DMA_DEV_TO_MEM) {
907 		desc->src = imxdmac->per_address;
908 	} else {
909 		desc->dest = imxdmac->per_address;
910 	}
911 	desc->desc.callback = NULL;
912 	desc->desc.callback_param = NULL;
913 
914 	return &desc->desc;
915 }
916 
917 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
918 	struct dma_chan *chan, dma_addr_t dest,
919 	dma_addr_t src, size_t len, unsigned long flags)
920 {
921 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
922 	struct imxdma_engine *imxdma = imxdmac->imxdma;
923 	struct imxdma_desc *desc;
924 
925 	dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
926 		__func__, imxdmac->channel, (unsigned long long)src,
927 		(unsigned long long)dest, len);
928 
929 	if (list_empty(&imxdmac->ld_free) ||
930 	    imxdma_chan_is_doing_cyclic(imxdmac))
931 		return NULL;
932 
933 	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
934 
935 	desc->type = IMXDMA_DESC_MEMCPY;
936 	desc->src = src;
937 	desc->dest = dest;
938 	desc->len = len;
939 	desc->direction = DMA_MEM_TO_MEM;
940 	desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
941 	desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
942 	desc->desc.callback = NULL;
943 	desc->desc.callback_param = NULL;
944 
945 	return &desc->desc;
946 }
947 
948 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
949 	struct dma_chan *chan, struct dma_interleaved_template *xt,
950 	unsigned long flags)
951 {
952 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
953 	struct imxdma_engine *imxdma = imxdmac->imxdma;
954 	struct imxdma_desc *desc;
955 
956 	dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
957 		"   src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
958 		imxdmac->channel, (unsigned long long)xt->src_start,
959 		(unsigned long long) xt->dst_start,
960 		xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
961 		xt->numf, xt->frame_size);
962 
963 	if (list_empty(&imxdmac->ld_free) ||
964 	    imxdma_chan_is_doing_cyclic(imxdmac))
965 		return NULL;
966 
967 	if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
968 		return NULL;
969 
970 	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
971 
972 	desc->type = IMXDMA_DESC_INTERLEAVED;
973 	desc->src = xt->src_start;
974 	desc->dest = xt->dst_start;
975 	desc->x = xt->sgl[0].size;
976 	desc->y = xt->numf;
977 	desc->w = xt->sgl[0].icg + desc->x;
978 	desc->len = desc->x * desc->y;
979 	desc->direction = DMA_MEM_TO_MEM;
980 	desc->config_port = IMX_DMA_MEMSIZE_32;
981 	desc->config_mem = IMX_DMA_MEMSIZE_32;
982 	if (xt->src_sgl)
983 		desc->config_mem |= IMX_DMA_TYPE_2D;
984 	if (xt->dst_sgl)
985 		desc->config_port |= IMX_DMA_TYPE_2D;
986 	desc->desc.callback = NULL;
987 	desc->desc.callback_param = NULL;
988 
989 	return &desc->desc;
990 }
991 
992 static void imxdma_issue_pending(struct dma_chan *chan)
993 {
994 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
995 	struct imxdma_engine *imxdma = imxdmac->imxdma;
996 	struct imxdma_desc *desc;
997 	unsigned long flags;
998 
999 	spin_lock_irqsave(&imxdma->lock, flags);
1000 	if (list_empty(&imxdmac->ld_active) &&
1001 	    !list_empty(&imxdmac->ld_queue)) {
1002 		desc = list_first_entry(&imxdmac->ld_queue,
1003 					struct imxdma_desc, node);
1004 
1005 		if (imxdma_xfer_desc(desc) < 0) {
1006 			dev_warn(imxdma->dev,
1007 				 "%s: channel: %d couldn't issue DMA xfer\n",
1008 				 __func__, imxdmac->channel);
1009 		} else {
1010 			list_move_tail(imxdmac->ld_queue.next,
1011 				       &imxdmac->ld_active);
1012 		}
1013 	}
1014 	spin_unlock_irqrestore(&imxdma->lock, flags);
1015 }
1016 
1017 static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1018 {
1019 	struct imxdma_filter_data *fdata = param;
1020 	struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1021 
1022 	if (chan->device->dev != fdata->imxdma->dev)
1023 		return false;
1024 
1025 	imxdma_chan->dma_request = fdata->request;
1026 	chan->private = NULL;
1027 
1028 	return true;
1029 }
1030 
1031 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1032 						struct of_dma *ofdma)
1033 {
1034 	int count = dma_spec->args_count;
1035 	struct imxdma_engine *imxdma = ofdma->of_dma_data;
1036 	struct imxdma_filter_data fdata = {
1037 		.imxdma = imxdma,
1038 	};
1039 
1040 	if (count != 1)
1041 		return NULL;
1042 
1043 	fdata.request = dma_spec->args[0];
1044 
1045 	return dma_request_channel(imxdma->dma_device.cap_mask,
1046 					imxdma_filter_fn, &fdata);
1047 }
1048 
1049 static int __init imxdma_probe(struct platform_device *pdev)
1050 {
1051 	struct imxdma_engine *imxdma;
1052 	struct resource *res;
1053 	const struct of_device_id *of_id;
1054 	int ret, i;
1055 	int irq, irq_err;
1056 
1057 	of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
1058 	if (of_id)
1059 		pdev->id_entry = of_id->data;
1060 
1061 	imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1062 	if (!imxdma)
1063 		return -ENOMEM;
1064 
1065 	imxdma->dev = &pdev->dev;
1066 	imxdma->devtype = pdev->id_entry->driver_data;
1067 
1068 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1069 	imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1070 	if (IS_ERR(imxdma->base))
1071 		return PTR_ERR(imxdma->base);
1072 
1073 	irq = platform_get_irq(pdev, 0);
1074 	if (irq < 0)
1075 		return irq;
1076 
1077 	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1078 	if (IS_ERR(imxdma->dma_ipg))
1079 		return PTR_ERR(imxdma->dma_ipg);
1080 
1081 	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1082 	if (IS_ERR(imxdma->dma_ahb))
1083 		return PTR_ERR(imxdma->dma_ahb);
1084 
1085 	ret = clk_prepare_enable(imxdma->dma_ipg);
1086 	if (ret)
1087 		return ret;
1088 	ret = clk_prepare_enable(imxdma->dma_ahb);
1089 	if (ret)
1090 		goto disable_dma_ipg_clk;
1091 
1092 	/* reset DMA module */
1093 	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1094 
1095 	if (is_imx1_dma(imxdma)) {
1096 		ret = devm_request_irq(&pdev->dev, irq,
1097 				       dma_irq_handler, 0, "DMA", imxdma);
1098 		if (ret) {
1099 			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1100 			goto disable_dma_ahb_clk;
1101 		}
1102 		imxdma->irq = irq;
1103 
1104 		irq_err = platform_get_irq(pdev, 1);
1105 		if (irq_err < 0) {
1106 			ret = irq_err;
1107 			goto disable_dma_ahb_clk;
1108 		}
1109 
1110 		ret = devm_request_irq(&pdev->dev, irq_err,
1111 				       imxdma_err_handler, 0, "DMA", imxdma);
1112 		if (ret) {
1113 			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1114 			goto disable_dma_ahb_clk;
1115 		}
1116 		imxdma->irq_err = irq_err;
1117 	}
1118 
1119 	/* enable DMA module */
1120 	imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1121 
1122 	/* clear all interrupts */
1123 	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1124 
1125 	/* disable interrupts */
1126 	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1127 
1128 	INIT_LIST_HEAD(&imxdma->dma_device.channels);
1129 
1130 	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1131 	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1132 	dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1133 	dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1134 
1135 	/* Initialize 2D global parameters */
1136 	for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1137 		imxdma->slots_2d[i].count = 0;
1138 
1139 	spin_lock_init(&imxdma->lock);
1140 
1141 	/* Initialize channel parameters */
1142 	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1143 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
1144 
1145 		if (!is_imx1_dma(imxdma)) {
1146 			ret = devm_request_irq(&pdev->dev, irq + i,
1147 					dma_irq_handler, 0, "DMA", imxdma);
1148 			if (ret) {
1149 				dev_warn(imxdma->dev, "Can't register IRQ %d "
1150 					 "for DMA channel %d\n",
1151 					 irq + i, i);
1152 				goto disable_dma_ahb_clk;
1153 			}
1154 
1155 			imxdmac->irq = irq + i;
1156 			init_timer(&imxdmac->watchdog);
1157 			imxdmac->watchdog.function = &imxdma_watchdog;
1158 			imxdmac->watchdog.data = (unsigned long)imxdmac;
1159 		}
1160 
1161 		imxdmac->imxdma = imxdma;
1162 
1163 		INIT_LIST_HEAD(&imxdmac->ld_queue);
1164 		INIT_LIST_HEAD(&imxdmac->ld_free);
1165 		INIT_LIST_HEAD(&imxdmac->ld_active);
1166 
1167 		tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1168 			     (unsigned long)imxdmac);
1169 		imxdmac->chan.device = &imxdma->dma_device;
1170 		dma_cookie_init(&imxdmac->chan);
1171 		imxdmac->channel = i;
1172 
1173 		/* Add the channel to the DMAC list */
1174 		list_add_tail(&imxdmac->chan.device_node,
1175 			      &imxdma->dma_device.channels);
1176 	}
1177 
1178 	imxdma->dma_device.dev = &pdev->dev;
1179 
1180 	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1181 	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1182 	imxdma->dma_device.device_tx_status = imxdma_tx_status;
1183 	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1184 	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1185 	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1186 	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1187 	imxdma->dma_device.device_config = imxdma_config;
1188 	imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1189 	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1190 
1191 	platform_set_drvdata(pdev, imxdma);
1192 
1193 	imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1194 	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1195 	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1196 
1197 	ret = dma_async_device_register(&imxdma->dma_device);
1198 	if (ret) {
1199 		dev_err(&pdev->dev, "unable to register\n");
1200 		goto disable_dma_ahb_clk;
1201 	}
1202 
1203 	if (pdev->dev.of_node) {
1204 		ret = of_dma_controller_register(pdev->dev.of_node,
1205 				imxdma_xlate, imxdma);
1206 		if (ret) {
1207 			dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1208 			goto err_of_dma_controller;
1209 		}
1210 	}
1211 
1212 	return 0;
1213 
1214 err_of_dma_controller:
1215 	dma_async_device_unregister(&imxdma->dma_device);
1216 disable_dma_ahb_clk:
1217 	clk_disable_unprepare(imxdma->dma_ahb);
1218 disable_dma_ipg_clk:
1219 	clk_disable_unprepare(imxdma->dma_ipg);
1220 	return ret;
1221 }
1222 
1223 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1224 {
1225 	int i;
1226 
1227 	if (is_imx1_dma(imxdma)) {
1228 		disable_irq(imxdma->irq);
1229 		disable_irq(imxdma->irq_err);
1230 	}
1231 
1232 	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1233 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
1234 
1235 		if (!is_imx1_dma(imxdma))
1236 			disable_irq(imxdmac->irq);
1237 
1238 		tasklet_kill(&imxdmac->dma_tasklet);
1239 	}
1240 }
1241 
1242 static int imxdma_remove(struct platform_device *pdev)
1243 {
1244 	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1245 
1246 	imxdma_free_irq(pdev, imxdma);
1247 
1248         dma_async_device_unregister(&imxdma->dma_device);
1249 
1250 	if (pdev->dev.of_node)
1251 		of_dma_controller_free(pdev->dev.of_node);
1252 
1253 	clk_disable_unprepare(imxdma->dma_ipg);
1254 	clk_disable_unprepare(imxdma->dma_ahb);
1255 
1256         return 0;
1257 }
1258 
1259 static struct platform_driver imxdma_driver = {
1260 	.driver		= {
1261 		.name	= "imx-dma",
1262 		.of_match_table = imx_dma_of_dev_id,
1263 	},
1264 	.id_table	= imx_dma_devtype,
1265 	.remove		= imxdma_remove,
1266 };
1267 
1268 static int __init imxdma_module_init(void)
1269 {
1270 	return platform_driver_probe(&imxdma_driver, imxdma_probe);
1271 }
1272 subsys_initcall(imxdma_module_init);
1273 
1274 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1275 MODULE_DESCRIPTION("i.MX dma driver");
1276 MODULE_LICENSE("GPL");
1277