xref: /openbmc/linux/drivers/dma/mxs-dma.c (revision 8c920136)
1a580b8c5SShawn Guo /*
2a580b8c5SShawn Guo  * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3a580b8c5SShawn Guo  *
4a580b8c5SShawn Guo  * Refer to drivers/dma/imx-sdma.c
5a580b8c5SShawn Guo  *
6a580b8c5SShawn Guo  * This program is free software; you can redistribute it and/or modify
7a580b8c5SShawn Guo  * it under the terms of the GNU General Public License version 2 as
8a580b8c5SShawn Guo  * published by the Free Software Foundation.
9a580b8c5SShawn Guo  */
10a580b8c5SShawn Guo 
11a580b8c5SShawn Guo #include <linux/init.h>
12a580b8c5SShawn Guo #include <linux/types.h>
13a580b8c5SShawn Guo #include <linux/mm.h>
14a580b8c5SShawn Guo #include <linux/interrupt.h>
15a580b8c5SShawn Guo #include <linux/clk.h>
16a580b8c5SShawn Guo #include <linux/wait.h>
17a580b8c5SShawn Guo #include <linux/sched.h>
18a580b8c5SShawn Guo #include <linux/semaphore.h>
19a580b8c5SShawn Guo #include <linux/device.h>
20a580b8c5SShawn Guo #include <linux/dma-mapping.h>
21a580b8c5SShawn Guo #include <linux/slab.h>
22a580b8c5SShawn Guo #include <linux/platform_device.h>
23a580b8c5SShawn Guo #include <linux/dmaengine.h>
24a580b8c5SShawn Guo #include <linux/delay.h>
2539468604SHuang Shijie #include <linux/fsl/mxs-dma.h>
26f5b7efccSDong Aisheng #include <linux/stmp_device.h>
27a580b8c5SShawn Guo 
28a580b8c5SShawn Guo #include <asm/irq.h>
29a580b8c5SShawn Guo #include <mach/mxs.h>
30a580b8c5SShawn Guo 
31d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
32d2ebfb33SRussell King - ARM Linux 
33a580b8c5SShawn Guo /*
34a580b8c5SShawn Guo  * NOTE: The term "PIO" throughout the mxs-dma implementation means
35a580b8c5SShawn Guo  * PIO mode of mxs apbh-dma and apbx-dma.  With this working mode,
36a580b8c5SShawn Guo  * dma can program the controller registers of peripheral devices.
37a580b8c5SShawn Guo  */
38a580b8c5SShawn Guo 
398c920136SShawn Guo #define dma_is_apbh(mxs_dma)	((mxs_dma)->type == MXS_DMA_APBH)
408c920136SShawn Guo #define apbh_is_old(mxs_dma)	((mxs_dma)->dev_id == IMX23_DMA)
41a580b8c5SShawn Guo 
42a580b8c5SShawn Guo #define HW_APBHX_CTRL0				0x000
43a580b8c5SShawn Guo #define BM_APBH_CTRL0_APB_BURST8_EN		(1 << 29)
44a580b8c5SShawn Guo #define BM_APBH_CTRL0_APB_BURST_EN		(1 << 28)
45a580b8c5SShawn Guo #define BP_APBH_CTRL0_RESET_CHANNEL		16
46a580b8c5SShawn Guo #define HW_APBHX_CTRL1				0x010
47a580b8c5SShawn Guo #define HW_APBHX_CTRL2				0x020
48a580b8c5SShawn Guo #define HW_APBHX_CHANNEL_CTRL			0x030
49a580b8c5SShawn Guo #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL	16
50bb11fb63SShawn Guo /*
51bb11fb63SShawn Guo  * The offset of NXTCMDAR register is different per both dma type and version,
52bb11fb63SShawn Guo  * while stride for each channel is all the same 0x70.
53bb11fb63SShawn Guo  */
54bb11fb63SShawn Guo #define HW_APBHX_CHn_NXTCMDAR(d, n) \
55bb11fb63SShawn Guo 	(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
56bb11fb63SShawn Guo #define HW_APBHX_CHn_SEMA(d, n) \
57bb11fb63SShawn Guo 	(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
58a580b8c5SShawn Guo 
59a580b8c5SShawn Guo /*
60a580b8c5SShawn Guo  * ccw bits definitions
61a580b8c5SShawn Guo  *
62a580b8c5SShawn Guo  * COMMAND:		0..1	(2)
63a580b8c5SShawn Guo  * CHAIN:		2	(1)
64a580b8c5SShawn Guo  * IRQ:			3	(1)
65a580b8c5SShawn Guo  * NAND_LOCK:		4	(1) - not implemented
66a580b8c5SShawn Guo  * NAND_WAIT4READY:	5	(1) - not implemented
67a580b8c5SShawn Guo  * DEC_SEM:		6	(1)
68a580b8c5SShawn Guo  * WAIT4END:		7	(1)
69a580b8c5SShawn Guo  * HALT_ON_TERMINATE:	8	(1)
70a580b8c5SShawn Guo  * TERMINATE_FLUSH:	9	(1)
71a580b8c5SShawn Guo  * RESERVED:		10..11	(2)
72a580b8c5SShawn Guo  * PIO_NUM:		12..15	(4)
73a580b8c5SShawn Guo  */
74a580b8c5SShawn Guo #define BP_CCW_COMMAND		0
75a580b8c5SShawn Guo #define BM_CCW_COMMAND		(3 << 0)
76a580b8c5SShawn Guo #define CCW_CHAIN		(1 << 2)
77a580b8c5SShawn Guo #define CCW_IRQ			(1 << 3)
78a580b8c5SShawn Guo #define CCW_DEC_SEM		(1 << 6)
79a580b8c5SShawn Guo #define CCW_WAIT4END		(1 << 7)
80a580b8c5SShawn Guo #define CCW_HALT_ON_TERM	(1 << 8)
81a580b8c5SShawn Guo #define CCW_TERM_FLUSH		(1 << 9)
82a580b8c5SShawn Guo #define BP_CCW_PIO_NUM		12
83a580b8c5SShawn Guo #define BM_CCW_PIO_NUM		(0xf << 12)
84a580b8c5SShawn Guo 
85a580b8c5SShawn Guo #define BF_CCW(value, field)	(((value) << BP_CCW_##field) & BM_CCW_##field)
86a580b8c5SShawn Guo 
87a580b8c5SShawn Guo #define MXS_DMA_CMD_NO_XFER	0
88a580b8c5SShawn Guo #define MXS_DMA_CMD_WRITE	1
89a580b8c5SShawn Guo #define MXS_DMA_CMD_READ	2
90a580b8c5SShawn Guo #define MXS_DMA_CMD_DMA_SENSE	3	/* not implemented */
91a580b8c5SShawn Guo 
92a580b8c5SShawn Guo struct mxs_dma_ccw {
93a580b8c5SShawn Guo 	u32		next;
94a580b8c5SShawn Guo 	u16		bits;
95a580b8c5SShawn Guo 	u16		xfer_bytes;
96a580b8c5SShawn Guo #define MAX_XFER_BYTES	0xff00
97a580b8c5SShawn Guo 	u32		bufaddr;
98a580b8c5SShawn Guo #define MXS_PIO_WORDS	16
99a580b8c5SShawn Guo 	u32		pio_words[MXS_PIO_WORDS];
100a580b8c5SShawn Guo };
101a580b8c5SShawn Guo 
102a580b8c5SShawn Guo #define NUM_CCW	(int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
103a580b8c5SShawn Guo 
104a580b8c5SShawn Guo struct mxs_dma_chan {
105a580b8c5SShawn Guo 	struct mxs_dma_engine		*mxs_dma;
106a580b8c5SShawn Guo 	struct dma_chan			chan;
107a580b8c5SShawn Guo 	struct dma_async_tx_descriptor	desc;
108a580b8c5SShawn Guo 	struct tasklet_struct		tasklet;
109a580b8c5SShawn Guo 	int				chan_irq;
110a580b8c5SShawn Guo 	struct mxs_dma_ccw		*ccw;
111a580b8c5SShawn Guo 	dma_addr_t			ccw_phys;
1126d23ea4bSLothar Waßmann 	int				desc_count;
113a580b8c5SShawn Guo 	enum dma_status			status;
114a580b8c5SShawn Guo 	unsigned int			flags;
115a580b8c5SShawn Guo #define MXS_DMA_SG_LOOP			(1 << 0)
116a580b8c5SShawn Guo };
117a580b8c5SShawn Guo 
118a580b8c5SShawn Guo #define MXS_DMA_CHANNELS		16
119a580b8c5SShawn Guo #define MXS_DMA_CHANNELS_MASK		0xffff
120a580b8c5SShawn Guo 
1218c920136SShawn Guo enum mxs_dma_devtype {
1228c920136SShawn Guo 	MXS_DMA_APBH,
1238c920136SShawn Guo 	MXS_DMA_APBX,
1248c920136SShawn Guo };
1258c920136SShawn Guo 
1268c920136SShawn Guo enum mxs_dma_id {
1278c920136SShawn Guo 	IMX23_DMA,
1288c920136SShawn Guo 	IMX28_DMA,
1298c920136SShawn Guo };
1308c920136SShawn Guo 
131a580b8c5SShawn Guo struct mxs_dma_engine {
1328c920136SShawn Guo 	enum mxs_dma_id			dev_id;
1338c920136SShawn Guo 	enum mxs_dma_devtype		type;
134a580b8c5SShawn Guo 	void __iomem			*base;
135a580b8c5SShawn Guo 	struct clk			*clk;
136a580b8c5SShawn Guo 	struct dma_device		dma_device;
137a580b8c5SShawn Guo 	struct device_dma_parameters	dma_parms;
138a580b8c5SShawn Guo 	struct mxs_dma_chan		mxs_chans[MXS_DMA_CHANNELS];
139a580b8c5SShawn Guo };
140a580b8c5SShawn Guo 
1418c920136SShawn Guo struct mxs_dma_type {
1428c920136SShawn Guo 	enum mxs_dma_id id;
1438c920136SShawn Guo 	enum mxs_dma_devtype type;
1448c920136SShawn Guo };
1458c920136SShawn Guo 
1468c920136SShawn Guo static struct mxs_dma_type mxs_dma_types[] = {
1478c920136SShawn Guo 	{
1488c920136SShawn Guo 		.id = IMX23_DMA,
1498c920136SShawn Guo 		.type = MXS_DMA_APBH,
1508c920136SShawn Guo 	}, {
1518c920136SShawn Guo 		.id = IMX23_DMA,
1528c920136SShawn Guo 		.type = MXS_DMA_APBX,
1538c920136SShawn Guo 	}, {
1548c920136SShawn Guo 		.id = IMX28_DMA,
1558c920136SShawn Guo 		.type = MXS_DMA_APBH,
1568c920136SShawn Guo 	}, {
1578c920136SShawn Guo 		.id = IMX28_DMA,
1588c920136SShawn Guo 		.type = MXS_DMA_APBX,
1598c920136SShawn Guo 	}
1608c920136SShawn Guo };
1618c920136SShawn Guo 
1628c920136SShawn Guo static struct platform_device_id mxs_dma_ids[] = {
1638c920136SShawn Guo 	{
1648c920136SShawn Guo 		.name = "imx23-dma-apbh",
1658c920136SShawn Guo 		.driver_data = (kernel_ulong_t) &mxs_dma_types[0],
1668c920136SShawn Guo 	}, {
1678c920136SShawn Guo 		.name = "imx23-dma-apbx",
1688c920136SShawn Guo 		.driver_data = (kernel_ulong_t) &mxs_dma_types[1],
1698c920136SShawn Guo 	}, {
1708c920136SShawn Guo 		.name = "imx28-dma-apbh",
1718c920136SShawn Guo 		.driver_data = (kernel_ulong_t) &mxs_dma_types[2],
1728c920136SShawn Guo 	}, {
1738c920136SShawn Guo 		.name = "imx28-dma-apbx",
1748c920136SShawn Guo 		.driver_data = (kernel_ulong_t) &mxs_dma_types[3],
1758c920136SShawn Guo 	}, {
1768c920136SShawn Guo 		/* end of list */
1778c920136SShawn Guo 	}
1788c920136SShawn Guo };
1798c920136SShawn Guo 
1808c920136SShawn Guo static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
1818c920136SShawn Guo {
1828c920136SShawn Guo 	return container_of(chan, struct mxs_dma_chan, chan);
1838c920136SShawn Guo }
1848c920136SShawn Guo 
1858c920136SShawn Guo int mxs_dma_is_apbh(struct dma_chan *chan)
1868c920136SShawn Guo {
1878c920136SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
1888c920136SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
1898c920136SShawn Guo 
1908c920136SShawn Guo 	return dma_is_apbh(mxs_dma);
1918c920136SShawn Guo }
1928c920136SShawn Guo 
1938c920136SShawn Guo int mxs_dma_is_apbx(struct dma_chan *chan)
1948c920136SShawn Guo {
1958c920136SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
1968c920136SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
1978c920136SShawn Guo 
1988c920136SShawn Guo 	return !dma_is_apbh(mxs_dma);
1998c920136SShawn Guo }
2008c920136SShawn Guo 
201a580b8c5SShawn Guo static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
202a580b8c5SShawn Guo {
203a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
204a580b8c5SShawn Guo 	int chan_id = mxs_chan->chan.chan_id;
205a580b8c5SShawn Guo 
206bb11fb63SShawn Guo 	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
207a580b8c5SShawn Guo 		writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
208f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
209a580b8c5SShawn Guo 	else
210a580b8c5SShawn Guo 		writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
211f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
212a580b8c5SShawn Guo }
213a580b8c5SShawn Guo 
214a580b8c5SShawn Guo static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
215a580b8c5SShawn Guo {
216a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
217a580b8c5SShawn Guo 	int chan_id = mxs_chan->chan.chan_id;
218a580b8c5SShawn Guo 
219a580b8c5SShawn Guo 	/* set cmd_addr up */
220a580b8c5SShawn Guo 	writel(mxs_chan->ccw_phys,
221bb11fb63SShawn Guo 		mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
222a580b8c5SShawn Guo 
223a580b8c5SShawn Guo 	/* write 1 to SEMA to kick off the channel */
224bb11fb63SShawn Guo 	writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
225a580b8c5SShawn Guo }
226a580b8c5SShawn Guo 
227a580b8c5SShawn Guo static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
228a580b8c5SShawn Guo {
229a580b8c5SShawn Guo 	mxs_chan->status = DMA_SUCCESS;
230a580b8c5SShawn Guo }
231a580b8c5SShawn Guo 
232a580b8c5SShawn Guo static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
233a580b8c5SShawn Guo {
234a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
235a580b8c5SShawn Guo 	int chan_id = mxs_chan->chan.chan_id;
236a580b8c5SShawn Guo 
237a580b8c5SShawn Guo 	/* freeze the channel */
238bb11fb63SShawn Guo 	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
239a580b8c5SShawn Guo 		writel(1 << chan_id,
240f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
241a580b8c5SShawn Guo 	else
242a580b8c5SShawn Guo 		writel(1 << chan_id,
243f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
244a580b8c5SShawn Guo 
245a580b8c5SShawn Guo 	mxs_chan->status = DMA_PAUSED;
246a580b8c5SShawn Guo }
247a580b8c5SShawn Guo 
248a580b8c5SShawn Guo static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
249a580b8c5SShawn Guo {
250a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
251a580b8c5SShawn Guo 	int chan_id = mxs_chan->chan.chan_id;
252a580b8c5SShawn Guo 
253a580b8c5SShawn Guo 	/* unfreeze the channel */
254bb11fb63SShawn Guo 	if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
255a580b8c5SShawn Guo 		writel(1 << chan_id,
256f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
257a580b8c5SShawn Guo 	else
258a580b8c5SShawn Guo 		writel(1 << chan_id,
259f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
260a580b8c5SShawn Guo 
261a580b8c5SShawn Guo 	mxs_chan->status = DMA_IN_PROGRESS;
262a580b8c5SShawn Guo }
263a580b8c5SShawn Guo 
264a580b8c5SShawn Guo static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
265a580b8c5SShawn Guo {
266884485e1SRussell King - ARM Linux 	return dma_cookie_assign(tx);
267a580b8c5SShawn Guo }
268a580b8c5SShawn Guo 
269a580b8c5SShawn Guo static void mxs_dma_tasklet(unsigned long data)
270a580b8c5SShawn Guo {
271a580b8c5SShawn Guo 	struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
272a580b8c5SShawn Guo 
273a580b8c5SShawn Guo 	if (mxs_chan->desc.callback)
274a580b8c5SShawn Guo 		mxs_chan->desc.callback(mxs_chan->desc.callback_param);
275a580b8c5SShawn Guo }
276a580b8c5SShawn Guo 
277a580b8c5SShawn Guo static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
278a580b8c5SShawn Guo {
279a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = dev_id;
280a580b8c5SShawn Guo 	u32 stat1, stat2;
281a580b8c5SShawn Guo 
282a580b8c5SShawn Guo 	/* completion status */
283a580b8c5SShawn Guo 	stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
284a580b8c5SShawn Guo 	stat1 &= MXS_DMA_CHANNELS_MASK;
285f5b7efccSDong Aisheng 	writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
286a580b8c5SShawn Guo 
287a580b8c5SShawn Guo 	/* error status */
288a580b8c5SShawn Guo 	stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
289f5b7efccSDong Aisheng 	writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
290a580b8c5SShawn Guo 
291a580b8c5SShawn Guo 	/*
292a580b8c5SShawn Guo 	 * When both completion and error of termination bits set at the
293a580b8c5SShawn Guo 	 * same time, we do not take it as an error.  IOW, it only becomes
29440031220SLothar Waßmann 	 * an error we need to handle here in case of either it's (1) a bus
295a580b8c5SShawn Guo 	 * error or (2) a termination error with no completion.
296a580b8c5SShawn Guo 	 */
297a580b8c5SShawn Guo 	stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
298a580b8c5SShawn Guo 		(~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
299a580b8c5SShawn Guo 
300a580b8c5SShawn Guo 	/* combine error and completion status for checking */
301a580b8c5SShawn Guo 	stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
302a580b8c5SShawn Guo 	while (stat1) {
303a580b8c5SShawn Guo 		int channel = fls(stat1) - 1;
304a580b8c5SShawn Guo 		struct mxs_dma_chan *mxs_chan =
305a580b8c5SShawn Guo 			&mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
306a580b8c5SShawn Guo 
307a580b8c5SShawn Guo 		if (channel >= MXS_DMA_CHANNELS) {
308a580b8c5SShawn Guo 			dev_dbg(mxs_dma->dma_device.dev,
309a580b8c5SShawn Guo 				"%s: error in channel %d\n", __func__,
310a580b8c5SShawn Guo 				channel - MXS_DMA_CHANNELS);
311a580b8c5SShawn Guo 			mxs_chan->status = DMA_ERROR;
312a580b8c5SShawn Guo 			mxs_dma_reset_chan(mxs_chan);
313a580b8c5SShawn Guo 		} else {
314a580b8c5SShawn Guo 			if (mxs_chan->flags & MXS_DMA_SG_LOOP)
315a580b8c5SShawn Guo 				mxs_chan->status = DMA_IN_PROGRESS;
316a580b8c5SShawn Guo 			else
317a580b8c5SShawn Guo 				mxs_chan->status = DMA_SUCCESS;
318a580b8c5SShawn Guo 		}
319a580b8c5SShawn Guo 
320a580b8c5SShawn Guo 		stat1 &= ~(1 << channel);
321a580b8c5SShawn Guo 
322a580b8c5SShawn Guo 		if (mxs_chan->status == DMA_SUCCESS)
323f7fbce07SRussell King - ARM Linux 			dma_cookie_complete(&mxs_chan->desc);
324a580b8c5SShawn Guo 
325a580b8c5SShawn Guo 		/* schedule tasklet on this channel */
326a580b8c5SShawn Guo 		tasklet_schedule(&mxs_chan->tasklet);
327a580b8c5SShawn Guo 	}
328a580b8c5SShawn Guo 
329a580b8c5SShawn Guo 	return IRQ_HANDLED;
330a580b8c5SShawn Guo }
331a580b8c5SShawn Guo 
332a580b8c5SShawn Guo static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
333a580b8c5SShawn Guo {
334a580b8c5SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
335a580b8c5SShawn Guo 	struct mxs_dma_data *data = chan->private;
336a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
337a580b8c5SShawn Guo 	int ret;
338a580b8c5SShawn Guo 
339a580b8c5SShawn Guo 	if (!data)
340a580b8c5SShawn Guo 		return -EINVAL;
341a580b8c5SShawn Guo 
342a580b8c5SShawn Guo 	mxs_chan->chan_irq = data->chan_irq;
343a580b8c5SShawn Guo 
344a580b8c5SShawn Guo 	mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
345a580b8c5SShawn Guo 				&mxs_chan->ccw_phys, GFP_KERNEL);
346a580b8c5SShawn Guo 	if (!mxs_chan->ccw) {
347a580b8c5SShawn Guo 		ret = -ENOMEM;
348a580b8c5SShawn Guo 		goto err_alloc;
349a580b8c5SShawn Guo 	}
350a580b8c5SShawn Guo 
351a580b8c5SShawn Guo 	memset(mxs_chan->ccw, 0, PAGE_SIZE);
352a580b8c5SShawn Guo 
35395bfea16SShawn Guo 	if (mxs_chan->chan_irq != NO_IRQ) {
354a580b8c5SShawn Guo 		ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
355a580b8c5SShawn Guo 					0, "mxs-dma", mxs_dma);
356a580b8c5SShawn Guo 		if (ret)
357a580b8c5SShawn Guo 			goto err_irq;
35895bfea16SShawn Guo 	}
359a580b8c5SShawn Guo 
360759a2e30SShawn Guo 	ret = clk_prepare_enable(mxs_dma->clk);
361a580b8c5SShawn Guo 	if (ret)
362a580b8c5SShawn Guo 		goto err_clk;
363a580b8c5SShawn Guo 
364a580b8c5SShawn Guo 	mxs_dma_reset_chan(mxs_chan);
365a580b8c5SShawn Guo 
366a580b8c5SShawn Guo 	dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
367a580b8c5SShawn Guo 	mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
368a580b8c5SShawn Guo 
369a580b8c5SShawn Guo 	/* the descriptor is ready */
370a580b8c5SShawn Guo 	async_tx_ack(&mxs_chan->desc);
371a580b8c5SShawn Guo 
372a580b8c5SShawn Guo 	return 0;
373a580b8c5SShawn Guo 
374a580b8c5SShawn Guo err_clk:
375a580b8c5SShawn Guo 	free_irq(mxs_chan->chan_irq, mxs_dma);
376a580b8c5SShawn Guo err_irq:
377a580b8c5SShawn Guo 	dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
378a580b8c5SShawn Guo 			mxs_chan->ccw, mxs_chan->ccw_phys);
379a580b8c5SShawn Guo err_alloc:
380a580b8c5SShawn Guo 	return ret;
381a580b8c5SShawn Guo }
382a580b8c5SShawn Guo 
383a580b8c5SShawn Guo static void mxs_dma_free_chan_resources(struct dma_chan *chan)
384a580b8c5SShawn Guo {
385a580b8c5SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
386a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
387a580b8c5SShawn Guo 
388a580b8c5SShawn Guo 	mxs_dma_disable_chan(mxs_chan);
389a580b8c5SShawn Guo 
390a580b8c5SShawn Guo 	free_irq(mxs_chan->chan_irq, mxs_dma);
391a580b8c5SShawn Guo 
392a580b8c5SShawn Guo 	dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
393a580b8c5SShawn Guo 			mxs_chan->ccw, mxs_chan->ccw_phys);
394a580b8c5SShawn Guo 
395759a2e30SShawn Guo 	clk_disable_unprepare(mxs_dma->clk);
396a580b8c5SShawn Guo }
397a580b8c5SShawn Guo 
398921de864SHuang Shijie /*
399921de864SHuang Shijie  * How to use the flags for ->device_prep_slave_sg() :
400921de864SHuang Shijie  *    [1] If there is only one DMA command in the DMA chain, the code should be:
401921de864SHuang Shijie  *            ......
402921de864SHuang Shijie  *            ->device_prep_slave_sg(DMA_CTRL_ACK);
403921de864SHuang Shijie  *            ......
404921de864SHuang Shijie  *    [2] If there are two DMA commands in the DMA chain, the code should be
405921de864SHuang Shijie  *            ......
406921de864SHuang Shijie  *            ->device_prep_slave_sg(0);
407921de864SHuang Shijie  *            ......
408921de864SHuang Shijie  *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
409921de864SHuang Shijie  *            ......
410921de864SHuang Shijie  *    [3] If there are more than two DMA commands in the DMA chain, the code
411921de864SHuang Shijie  *        should be:
412921de864SHuang Shijie  *            ......
413921de864SHuang Shijie  *            ->device_prep_slave_sg(0);                                // First
414921de864SHuang Shijie  *            ......
415921de864SHuang Shijie  *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
416921de864SHuang Shijie  *            ......
417921de864SHuang Shijie  *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
418921de864SHuang Shijie  *            ......
419921de864SHuang Shijie  */
420a580b8c5SShawn Guo static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
421a580b8c5SShawn Guo 		struct dma_chan *chan, struct scatterlist *sgl,
422db8196dfSVinod Koul 		unsigned int sg_len, enum dma_transfer_direction direction,
423623ff773SLinus Torvalds 		unsigned long flags, void *context)
424a580b8c5SShawn Guo {
425a580b8c5SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
426a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
427a580b8c5SShawn Guo 	struct mxs_dma_ccw *ccw;
428a580b8c5SShawn Guo 	struct scatterlist *sg;
429a580b8c5SShawn Guo 	int i, j;
430a580b8c5SShawn Guo 	u32 *pio;
431921de864SHuang Shijie 	bool append = flags & DMA_PREP_INTERRUPT;
4326d23ea4bSLothar Waßmann 	int idx = append ? mxs_chan->desc_count : 0;
433a580b8c5SShawn Guo 
434a580b8c5SShawn Guo 	if (mxs_chan->status == DMA_IN_PROGRESS && !append)
435a580b8c5SShawn Guo 		return NULL;
436a580b8c5SShawn Guo 
437a580b8c5SShawn Guo 	if (sg_len + (append ? idx : 0) > NUM_CCW) {
438a580b8c5SShawn Guo 		dev_err(mxs_dma->dma_device.dev,
439a580b8c5SShawn Guo 				"maximum number of sg exceeded: %d > %d\n",
440a580b8c5SShawn Guo 				sg_len, NUM_CCW);
441a580b8c5SShawn Guo 		goto err_out;
442a580b8c5SShawn Guo 	}
443a580b8c5SShawn Guo 
444a580b8c5SShawn Guo 	mxs_chan->status = DMA_IN_PROGRESS;
445a580b8c5SShawn Guo 	mxs_chan->flags = 0;
446a580b8c5SShawn Guo 
447a580b8c5SShawn Guo 	/*
448a580b8c5SShawn Guo 	 * If the sg is prepared with append flag set, the sg
449a580b8c5SShawn Guo 	 * will be appended to the last prepared sg.
450a580b8c5SShawn Guo 	 */
451a580b8c5SShawn Guo 	if (append) {
452a580b8c5SShawn Guo 		BUG_ON(idx < 1);
453a580b8c5SShawn Guo 		ccw = &mxs_chan->ccw[idx - 1];
454a580b8c5SShawn Guo 		ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
455a580b8c5SShawn Guo 		ccw->bits |= CCW_CHAIN;
456a580b8c5SShawn Guo 		ccw->bits &= ~CCW_IRQ;
457a580b8c5SShawn Guo 		ccw->bits &= ~CCW_DEC_SEM;
458a580b8c5SShawn Guo 	} else {
459a580b8c5SShawn Guo 		idx = 0;
460a580b8c5SShawn Guo 	}
461a580b8c5SShawn Guo 
46262268ce9SShawn Guo 	if (direction == DMA_TRANS_NONE) {
463a580b8c5SShawn Guo 		ccw = &mxs_chan->ccw[idx++];
464a580b8c5SShawn Guo 		pio = (u32 *) sgl;
465a580b8c5SShawn Guo 
466a580b8c5SShawn Guo 		for (j = 0; j < sg_len;)
467a580b8c5SShawn Guo 			ccw->pio_words[j++] = *pio++;
468a580b8c5SShawn Guo 
469a580b8c5SShawn Guo 		ccw->bits = 0;
470a580b8c5SShawn Guo 		ccw->bits |= CCW_IRQ;
471a580b8c5SShawn Guo 		ccw->bits |= CCW_DEC_SEM;
472921de864SHuang Shijie 		if (flags & DMA_CTRL_ACK)
473a580b8c5SShawn Guo 			ccw->bits |= CCW_WAIT4END;
474a580b8c5SShawn Guo 		ccw->bits |= CCW_HALT_ON_TERM;
475a580b8c5SShawn Guo 		ccw->bits |= CCW_TERM_FLUSH;
476a580b8c5SShawn Guo 		ccw->bits |= BF_CCW(sg_len, PIO_NUM);
477a580b8c5SShawn Guo 		ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
478a580b8c5SShawn Guo 	} else {
479a580b8c5SShawn Guo 		for_each_sg(sgl, sg, sg_len, i) {
480a580b8c5SShawn Guo 			if (sg->length > MAX_XFER_BYTES) {
481a580b8c5SShawn Guo 				dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
482a580b8c5SShawn Guo 						sg->length, MAX_XFER_BYTES);
483a580b8c5SShawn Guo 				goto err_out;
484a580b8c5SShawn Guo 			}
485a580b8c5SShawn Guo 
486a580b8c5SShawn Guo 			ccw = &mxs_chan->ccw[idx++];
487a580b8c5SShawn Guo 
488a580b8c5SShawn Guo 			ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
489a580b8c5SShawn Guo 			ccw->bufaddr = sg->dma_address;
490a580b8c5SShawn Guo 			ccw->xfer_bytes = sg->length;
491a580b8c5SShawn Guo 
492a580b8c5SShawn Guo 			ccw->bits = 0;
493a580b8c5SShawn Guo 			ccw->bits |= CCW_CHAIN;
494a580b8c5SShawn Guo 			ccw->bits |= CCW_HALT_ON_TERM;
495a580b8c5SShawn Guo 			ccw->bits |= CCW_TERM_FLUSH;
496db8196dfSVinod Koul 			ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
497a580b8c5SShawn Guo 					MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
498a580b8c5SShawn Guo 					COMMAND);
499a580b8c5SShawn Guo 
500a580b8c5SShawn Guo 			if (i + 1 == sg_len) {
501a580b8c5SShawn Guo 				ccw->bits &= ~CCW_CHAIN;
502a580b8c5SShawn Guo 				ccw->bits |= CCW_IRQ;
503a580b8c5SShawn Guo 				ccw->bits |= CCW_DEC_SEM;
504921de864SHuang Shijie 				if (flags & DMA_CTRL_ACK)
505a580b8c5SShawn Guo 					ccw->bits |= CCW_WAIT4END;
506a580b8c5SShawn Guo 			}
507a580b8c5SShawn Guo 		}
508a580b8c5SShawn Guo 	}
5096d23ea4bSLothar Waßmann 	mxs_chan->desc_count = idx;
510a580b8c5SShawn Guo 
511a580b8c5SShawn Guo 	return &mxs_chan->desc;
512a580b8c5SShawn Guo 
513a580b8c5SShawn Guo err_out:
514a580b8c5SShawn Guo 	mxs_chan->status = DMA_ERROR;
515a580b8c5SShawn Guo 	return NULL;
516a580b8c5SShawn Guo }
517a580b8c5SShawn Guo 
518a580b8c5SShawn Guo static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
519a580b8c5SShawn Guo 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
520185ecb5fSAlexandre Bounine 		size_t period_len, enum dma_transfer_direction direction,
521185ecb5fSAlexandre Bounine 		void *context)
522a580b8c5SShawn Guo {
523a580b8c5SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
524a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
525a580b8c5SShawn Guo 	int num_periods = buf_len / period_len;
526a580b8c5SShawn Guo 	int i = 0, buf = 0;
527a580b8c5SShawn Guo 
528a580b8c5SShawn Guo 	if (mxs_chan->status == DMA_IN_PROGRESS)
529a580b8c5SShawn Guo 		return NULL;
530a580b8c5SShawn Guo 
531a580b8c5SShawn Guo 	mxs_chan->status = DMA_IN_PROGRESS;
532a580b8c5SShawn Guo 	mxs_chan->flags |= MXS_DMA_SG_LOOP;
533a580b8c5SShawn Guo 
534a580b8c5SShawn Guo 	if (num_periods > NUM_CCW) {
535a580b8c5SShawn Guo 		dev_err(mxs_dma->dma_device.dev,
536a580b8c5SShawn Guo 				"maximum number of sg exceeded: %d > %d\n",
537a580b8c5SShawn Guo 				num_periods, NUM_CCW);
538a580b8c5SShawn Guo 		goto err_out;
539a580b8c5SShawn Guo 	}
540a580b8c5SShawn Guo 
541a580b8c5SShawn Guo 	if (period_len > MAX_XFER_BYTES) {
542a580b8c5SShawn Guo 		dev_err(mxs_dma->dma_device.dev,
543a580b8c5SShawn Guo 				"maximum period size exceeded: %d > %d\n",
544a580b8c5SShawn Guo 				period_len, MAX_XFER_BYTES);
545a580b8c5SShawn Guo 		goto err_out;
546a580b8c5SShawn Guo 	}
547a580b8c5SShawn Guo 
548a580b8c5SShawn Guo 	while (buf < buf_len) {
549a580b8c5SShawn Guo 		struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
550a580b8c5SShawn Guo 
551a580b8c5SShawn Guo 		if (i + 1 == num_periods)
552a580b8c5SShawn Guo 			ccw->next = mxs_chan->ccw_phys;
553a580b8c5SShawn Guo 		else
554a580b8c5SShawn Guo 			ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
555a580b8c5SShawn Guo 
556a580b8c5SShawn Guo 		ccw->bufaddr = dma_addr;
557a580b8c5SShawn Guo 		ccw->xfer_bytes = period_len;
558a580b8c5SShawn Guo 
559a580b8c5SShawn Guo 		ccw->bits = 0;
560a580b8c5SShawn Guo 		ccw->bits |= CCW_CHAIN;
561a580b8c5SShawn Guo 		ccw->bits |= CCW_IRQ;
562a580b8c5SShawn Guo 		ccw->bits |= CCW_HALT_ON_TERM;
563a580b8c5SShawn Guo 		ccw->bits |= CCW_TERM_FLUSH;
564db8196dfSVinod Koul 		ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
565a580b8c5SShawn Guo 				MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
566a580b8c5SShawn Guo 
567a580b8c5SShawn Guo 		dma_addr += period_len;
568a580b8c5SShawn Guo 		buf += period_len;
569a580b8c5SShawn Guo 
570a580b8c5SShawn Guo 		i++;
571a580b8c5SShawn Guo 	}
5726d23ea4bSLothar Waßmann 	mxs_chan->desc_count = i;
573a580b8c5SShawn Guo 
574a580b8c5SShawn Guo 	return &mxs_chan->desc;
575a580b8c5SShawn Guo 
576a580b8c5SShawn Guo err_out:
577a580b8c5SShawn Guo 	mxs_chan->status = DMA_ERROR;
578a580b8c5SShawn Guo 	return NULL;
579a580b8c5SShawn Guo }
580a580b8c5SShawn Guo 
581a580b8c5SShawn Guo static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
582a580b8c5SShawn Guo 		unsigned long arg)
583a580b8c5SShawn Guo {
584a580b8c5SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
585a580b8c5SShawn Guo 	int ret = 0;
586a580b8c5SShawn Guo 
587a580b8c5SShawn Guo 	switch (cmd) {
588a580b8c5SShawn Guo 	case DMA_TERMINATE_ALL:
589a62bae98SDong Aisheng 		mxs_dma_reset_chan(mxs_chan);
5907ad7a345SLothar Waßmann 		mxs_dma_disable_chan(mxs_chan);
591a580b8c5SShawn Guo 		break;
592a580b8c5SShawn Guo 	case DMA_PAUSE:
593a580b8c5SShawn Guo 		mxs_dma_pause_chan(mxs_chan);
594a580b8c5SShawn Guo 		break;
595a580b8c5SShawn Guo 	case DMA_RESUME:
596a580b8c5SShawn Guo 		mxs_dma_resume_chan(mxs_chan);
597a580b8c5SShawn Guo 		break;
598a580b8c5SShawn Guo 	default:
599a580b8c5SShawn Guo 		ret = -ENOSYS;
600a580b8c5SShawn Guo 	}
601a580b8c5SShawn Guo 
602a580b8c5SShawn Guo 	return ret;
603a580b8c5SShawn Guo }
604a580b8c5SShawn Guo 
605a580b8c5SShawn Guo static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
606a580b8c5SShawn Guo 			dma_cookie_t cookie, struct dma_tx_state *txstate)
607a580b8c5SShawn Guo {
608a580b8c5SShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
609a580b8c5SShawn Guo 	dma_cookie_t last_used;
610a580b8c5SShawn Guo 
611a580b8c5SShawn Guo 	last_used = chan->cookie;
6124d4e58deSRussell King - ARM Linux 	dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
613a580b8c5SShawn Guo 
614a580b8c5SShawn Guo 	return mxs_chan->status;
615a580b8c5SShawn Guo }
616a580b8c5SShawn Guo 
617a580b8c5SShawn Guo static void mxs_dma_issue_pending(struct dma_chan *chan)
618a580b8c5SShawn Guo {
619d04525edSShawn Guo 	struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
620d04525edSShawn Guo 
621d04525edSShawn Guo 	mxs_dma_enable_chan(mxs_chan);
622a580b8c5SShawn Guo }
623a580b8c5SShawn Guo 
624a580b8c5SShawn Guo static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
625a580b8c5SShawn Guo {
626a580b8c5SShawn Guo 	int ret;
627a580b8c5SShawn Guo 
628759a2e30SShawn Guo 	ret = clk_prepare_enable(mxs_dma->clk);
629a580b8c5SShawn Guo 	if (ret)
630feb397deSLothar Waßmann 		return ret;
631a580b8c5SShawn Guo 
632f5b7efccSDong Aisheng 	ret = stmp_reset_block(mxs_dma->base);
633a580b8c5SShawn Guo 	if (ret)
634a580b8c5SShawn Guo 		goto err_out;
635a580b8c5SShawn Guo 
636a580b8c5SShawn Guo 	/* enable apbh burst */
637bb11fb63SShawn Guo 	if (dma_is_apbh(mxs_dma)) {
638a580b8c5SShawn Guo 		writel(BM_APBH_CTRL0_APB_BURST_EN,
639f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
640a580b8c5SShawn Guo 		writel(BM_APBH_CTRL0_APB_BURST8_EN,
641f5b7efccSDong Aisheng 			mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
642a580b8c5SShawn Guo 	}
643a580b8c5SShawn Guo 
644a580b8c5SShawn Guo 	/* enable irq for all the channels */
645a580b8c5SShawn Guo 	writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
646f5b7efccSDong Aisheng 		mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
647a580b8c5SShawn Guo 
648a580b8c5SShawn Guo err_out:
64957f2685cSLinus Torvalds 	clk_disable_unprepare(mxs_dma->clk);
650a580b8c5SShawn Guo 	return ret;
651a580b8c5SShawn Guo }
652a580b8c5SShawn Guo 
653a580b8c5SShawn Guo static int __init mxs_dma_probe(struct platform_device *pdev)
654a580b8c5SShawn Guo {
655a580b8c5SShawn Guo 	const struct platform_device_id *id_entry =
656a580b8c5SShawn Guo 				platform_get_device_id(pdev);
6578c920136SShawn Guo 	const struct mxs_dma_type *dma_type =
6588c920136SShawn Guo 			(struct mxs_dma_type *)id_entry->driver_data;
659a580b8c5SShawn Guo 	struct mxs_dma_engine *mxs_dma;
660a580b8c5SShawn Guo 	struct resource *iores;
661a580b8c5SShawn Guo 	int ret, i;
662a580b8c5SShawn Guo 
663a580b8c5SShawn Guo 	mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL);
664a580b8c5SShawn Guo 	if (!mxs_dma)
665a580b8c5SShawn Guo 		return -ENOMEM;
666a580b8c5SShawn Guo 
6678c920136SShawn Guo 	mxs_dma->dev_id = dma_type->id;
6688c920136SShawn Guo 	mxs_dma->type = dma_type->type;
669a580b8c5SShawn Guo 
670a580b8c5SShawn Guo 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
671a580b8c5SShawn Guo 
672a580b8c5SShawn Guo 	if (!request_mem_region(iores->start, resource_size(iores),
673a580b8c5SShawn Guo 				pdev->name)) {
674a580b8c5SShawn Guo 		ret = -EBUSY;
675a580b8c5SShawn Guo 		goto err_request_region;
676a580b8c5SShawn Guo 	}
677a580b8c5SShawn Guo 
678a580b8c5SShawn Guo 	mxs_dma->base = ioremap(iores->start, resource_size(iores));
679a580b8c5SShawn Guo 	if (!mxs_dma->base) {
680a580b8c5SShawn Guo 		ret = -ENOMEM;
681a580b8c5SShawn Guo 		goto err_ioremap;
682a580b8c5SShawn Guo 	}
683a580b8c5SShawn Guo 
684a580b8c5SShawn Guo 	mxs_dma->clk = clk_get(&pdev->dev, NULL);
685a580b8c5SShawn Guo 	if (IS_ERR(mxs_dma->clk)) {
686a580b8c5SShawn Guo 		ret = PTR_ERR(mxs_dma->clk);
687a580b8c5SShawn Guo 		goto err_clk;
688a580b8c5SShawn Guo 	}
689a580b8c5SShawn Guo 
690a580b8c5SShawn Guo 	dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
691a580b8c5SShawn Guo 	dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
692a580b8c5SShawn Guo 
693a580b8c5SShawn Guo 	INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
694a580b8c5SShawn Guo 
695a580b8c5SShawn Guo 	/* Initialize channel parameters */
696a580b8c5SShawn Guo 	for (i = 0; i < MXS_DMA_CHANNELS; i++) {
697a580b8c5SShawn Guo 		struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
698a580b8c5SShawn Guo 
699a580b8c5SShawn Guo 		mxs_chan->mxs_dma = mxs_dma;
700a580b8c5SShawn Guo 		mxs_chan->chan.device = &mxs_dma->dma_device;
7018ac69546SRussell King - ARM Linux 		dma_cookie_init(&mxs_chan->chan);
702a580b8c5SShawn Guo 
703a580b8c5SShawn Guo 		tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
704a580b8c5SShawn Guo 			     (unsigned long) mxs_chan);
705a580b8c5SShawn Guo 
706a580b8c5SShawn Guo 
707a580b8c5SShawn Guo 		/* Add the channel to mxs_chan list */
708a580b8c5SShawn Guo 		list_add_tail(&mxs_chan->chan.device_node,
709a580b8c5SShawn Guo 			&mxs_dma->dma_device.channels);
710a580b8c5SShawn Guo 	}
711a580b8c5SShawn Guo 
712a580b8c5SShawn Guo 	ret = mxs_dma_init(mxs_dma);
713a580b8c5SShawn Guo 	if (ret)
714a580b8c5SShawn Guo 		goto err_init;
715a580b8c5SShawn Guo 
716a580b8c5SShawn Guo 	mxs_dma->dma_device.dev = &pdev->dev;
717a580b8c5SShawn Guo 
718a580b8c5SShawn Guo 	/* mxs_dma gets 65535 bytes maximum sg size */
719a580b8c5SShawn Guo 	mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
720a580b8c5SShawn Guo 	dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
721a580b8c5SShawn Guo 
722a580b8c5SShawn Guo 	mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
723a580b8c5SShawn Guo 	mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
724a580b8c5SShawn Guo 	mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
725a580b8c5SShawn Guo 	mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
726a580b8c5SShawn Guo 	mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
727a580b8c5SShawn Guo 	mxs_dma->dma_device.device_control = mxs_dma_control;
728a580b8c5SShawn Guo 	mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
729a580b8c5SShawn Guo 
730a580b8c5SShawn Guo 	ret = dma_async_device_register(&mxs_dma->dma_device);
731a580b8c5SShawn Guo 	if (ret) {
732a580b8c5SShawn Guo 		dev_err(mxs_dma->dma_device.dev, "unable to register\n");
733a580b8c5SShawn Guo 		goto err_init;
734a580b8c5SShawn Guo 	}
735a580b8c5SShawn Guo 
736a580b8c5SShawn Guo 	dev_info(mxs_dma->dma_device.dev, "initialized\n");
737a580b8c5SShawn Guo 
738a580b8c5SShawn Guo 	return 0;
739a580b8c5SShawn Guo 
740a580b8c5SShawn Guo err_init:
741a580b8c5SShawn Guo 	clk_put(mxs_dma->clk);
742a580b8c5SShawn Guo err_clk:
743a580b8c5SShawn Guo 	iounmap(mxs_dma->base);
744a580b8c5SShawn Guo err_ioremap:
745a580b8c5SShawn Guo 	release_mem_region(iores->start, resource_size(iores));
746a580b8c5SShawn Guo err_request_region:
747a580b8c5SShawn Guo 	kfree(mxs_dma);
748a580b8c5SShawn Guo 	return ret;
749a580b8c5SShawn Guo }
750a580b8c5SShawn Guo 
751a580b8c5SShawn Guo static struct platform_driver mxs_dma_driver = {
752a580b8c5SShawn Guo 	.driver		= {
753a580b8c5SShawn Guo 		.name	= "mxs-dma",
754a580b8c5SShawn Guo 	},
7558c920136SShawn Guo 	.id_table	= mxs_dma_ids,
756a580b8c5SShawn Guo };
757a580b8c5SShawn Guo 
758a580b8c5SShawn Guo static int __init mxs_dma_module_init(void)
759a580b8c5SShawn Guo {
760a580b8c5SShawn Guo 	return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
761a580b8c5SShawn Guo }
762a580b8c5SShawn Guo subsys_initcall(mxs_dma_module_init);
763