xref: /openbmc/linux/drivers/dma/fsldma.c (revision 897500c7)
1ea2305f6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2173acc7cSZhang Wei /*
3173acc7cSZhang Wei  * Freescale MPC85xx, MPC83xx DMA Engine support
4173acc7cSZhang Wei  *
5e2c8e425SLi Yang  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
6173acc7cSZhang Wei  *
7173acc7cSZhang Wei  * Author:
8173acc7cSZhang Wei  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
9173acc7cSZhang Wei  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
10173acc7cSZhang Wei  *
11173acc7cSZhang Wei  * Description:
12173acc7cSZhang Wei  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
13173acc7cSZhang Wei  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
14c2e07b3aSStefan Weil  *   The support for MPC8349 DMA controller is also added.
15173acc7cSZhang Wei  *
16a7aea373SIra W. Snyder  * This driver instructs the DMA controller to issue the PCI Read Multiple
17a7aea373SIra W. Snyder  * command for PCI read operations, instead of using the default PCI Read Line
18a7aea373SIra W. Snyder  * command. Please be aware that this setting may result in read pre-fetching
19a7aea373SIra W. Snyder  * on some platforms.
20173acc7cSZhang Wei  */
21173acc7cSZhang Wei 
22173acc7cSZhang Wei #include <linux/init.h>
23173acc7cSZhang Wei #include <linux/module.h>
24173acc7cSZhang Wei #include <linux/pci.h>
255a0e3ad6STejun Heo #include <linux/slab.h>
26173acc7cSZhang Wei #include <linux/interrupt.h>
27173acc7cSZhang Wei #include <linux/dmaengine.h>
28173acc7cSZhang Wei #include <linux/delay.h>
29173acc7cSZhang Wei #include <linux/dma-mapping.h>
30173acc7cSZhang Wei #include <linux/dmapool.h>
31*897500c7SRob Herring #include <linux/of.h>
325af50730SRob Herring #include <linux/of_address.h>
335af50730SRob Herring #include <linux/of_irq.h>
34*897500c7SRob Herring #include <linux/platform_device.h>
350a5642beSVinod Koul #include <linux/fsldma.h>
36d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
37173acc7cSZhang Wei #include "fsldma.h"
38173acc7cSZhang Wei 
39b158471eSIra Snyder #define chan_dbg(chan, fmt, arg...)					\
40b158471eSIra Snyder 	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
41b158471eSIra Snyder #define chan_err(chan, fmt, arg...)					\
42b158471eSIra Snyder 	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
43c1433041SIra Snyder 
44b158471eSIra Snyder static const char msg_ld_oom[] = "No free memory for link descriptor";
45173acc7cSZhang Wei 
46e8bd84dfSIra Snyder /*
47e8bd84dfSIra Snyder  * Register Helpers
48173acc7cSZhang Wei  */
49173acc7cSZhang Wei 
set_sr(struct fsldma_chan * chan,u32 val)50a1c03319SIra Snyder static void set_sr(struct fsldma_chan *chan, u32 val)
51173acc7cSZhang Wei {
52a7359e76SWen He 	FSL_DMA_OUT(chan, &chan->regs->sr, val, 32);
53173acc7cSZhang Wei }
54173acc7cSZhang Wei 
get_sr(struct fsldma_chan * chan)55a1c03319SIra Snyder static u32 get_sr(struct fsldma_chan *chan)
56173acc7cSZhang Wei {
57a7359e76SWen He 	return FSL_DMA_IN(chan, &chan->regs->sr, 32);
58173acc7cSZhang Wei }
59173acc7cSZhang Wei 
set_mr(struct fsldma_chan * chan,u32 val)60ccdce9a0SHongbo Zhang static void set_mr(struct fsldma_chan *chan, u32 val)
61ccdce9a0SHongbo Zhang {
62a7359e76SWen He 	FSL_DMA_OUT(chan, &chan->regs->mr, val, 32);
63ccdce9a0SHongbo Zhang }
64ccdce9a0SHongbo Zhang 
get_mr(struct fsldma_chan * chan)65ccdce9a0SHongbo Zhang static u32 get_mr(struct fsldma_chan *chan)
66ccdce9a0SHongbo Zhang {
67a7359e76SWen He 	return FSL_DMA_IN(chan, &chan->regs->mr, 32);
68ccdce9a0SHongbo Zhang }
69ccdce9a0SHongbo Zhang 
set_cdar(struct fsldma_chan * chan,dma_addr_t addr)70a1c03319SIra Snyder static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
71173acc7cSZhang Wei {
72a7359e76SWen He 	FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
73173acc7cSZhang Wei }
74173acc7cSZhang Wei 
get_cdar(struct fsldma_chan * chan)75a1c03319SIra Snyder static dma_addr_t get_cdar(struct fsldma_chan *chan)
76173acc7cSZhang Wei {
77a7359e76SWen He 	return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
78173acc7cSZhang Wei }
79173acc7cSZhang Wei 
set_bcr(struct fsldma_chan * chan,u32 val)80ccdce9a0SHongbo Zhang static void set_bcr(struct fsldma_chan *chan, u32 val)
81ccdce9a0SHongbo Zhang {
82a7359e76SWen He 	FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32);
83ccdce9a0SHongbo Zhang }
84ccdce9a0SHongbo Zhang 
get_bcr(struct fsldma_chan * chan)85a1c03319SIra Snyder static u32 get_bcr(struct fsldma_chan *chan)
86f79abb62SZhang Wei {
87a7359e76SWen He 	return FSL_DMA_IN(chan, &chan->regs->bcr, 32);
88f79abb62SZhang Wei }
89f79abb62SZhang Wei 
90e8bd84dfSIra Snyder /*
91e8bd84dfSIra Snyder  * Descriptor Helpers
92e8bd84dfSIra Snyder  */
93e8bd84dfSIra Snyder 
set_desc_cnt(struct fsldma_chan * chan,struct fsl_dma_ld_hw * hw,u32 count)94173acc7cSZhang Wei static void set_desc_cnt(struct fsldma_chan *chan,
95173acc7cSZhang Wei 				struct fsl_dma_ld_hw *hw, u32 count)
96173acc7cSZhang Wei {
97173acc7cSZhang Wei 	hw->count = CPU_TO_DMA(chan, count, 32);
98173acc7cSZhang Wei }
99173acc7cSZhang Wei 
set_desc_src(struct fsldma_chan * chan,struct fsl_dma_ld_hw * hw,dma_addr_t src)100173acc7cSZhang Wei static void set_desc_src(struct fsldma_chan *chan,
101173acc7cSZhang Wei 			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
102173acc7cSZhang Wei {
103173acc7cSZhang Wei 	u64 snoop_bits;
104900325a6SDan Williams 
105173acc7cSZhang Wei 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
106173acc7cSZhang Wei 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
107173acc7cSZhang Wei 	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
108900325a6SDan Williams }
109272ca655SIra Snyder 
set_desc_dst(struct fsldma_chan * chan,struct fsl_dma_ld_hw * hw,dma_addr_t dst)110173acc7cSZhang Wei static void set_desc_dst(struct fsldma_chan *chan,
111173acc7cSZhang Wei 			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
112173acc7cSZhang Wei {
113173acc7cSZhang Wei 	u64 snoop_bits;
114173acc7cSZhang Wei 
115173acc7cSZhang Wei 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
116173acc7cSZhang Wei 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
117173acc7cSZhang Wei 	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
118173acc7cSZhang Wei }
119173acc7cSZhang Wei 
set_desc_next(struct fsldma_chan * chan,struct fsl_dma_ld_hw * hw,dma_addr_t next)120173acc7cSZhang Wei static void set_desc_next(struct fsldma_chan *chan,
121173acc7cSZhang Wei 			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
122173acc7cSZhang Wei {
123173acc7cSZhang Wei 	u64 snoop_bits;
124173acc7cSZhang Wei 
125173acc7cSZhang Wei 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
126173acc7cSZhang Wei 		? FSL_DMA_SNEN : 0;
127173acc7cSZhang Wei 	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
128173acc7cSZhang Wei }
129173acc7cSZhang Wei 
set_ld_eol(struct fsldma_chan * chan,struct fsl_desc_sw * desc)13031f4306cSIra Snyder static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
131173acc7cSZhang Wei {
132e8bd84dfSIra Snyder 	u64 snoop_bits;
133e8bd84dfSIra Snyder 
134a1c03319SIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
135776c8943SIra Snyder 		? FSL_DMA_SNEN : 0;
136776c8943SIra Snyder 
137a1c03319SIra Snyder 	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
138a1c03319SIra Snyder 		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
139776c8943SIra Snyder 			| snoop_bits, 64);
140173acc7cSZhang Wei }
141173acc7cSZhang Wei 
142e8bd84dfSIra Snyder /*
143e8bd84dfSIra Snyder  * DMA Engine Hardware Control Helpers
144e8bd84dfSIra Snyder  */
145173acc7cSZhang Wei 
dma_init(struct fsldma_chan * chan)146e8bd84dfSIra Snyder static void dma_init(struct fsldma_chan *chan)
147173acc7cSZhang Wei {
148e8bd84dfSIra Snyder 	/* Reset the channel */
149ccdce9a0SHongbo Zhang 	set_mr(chan, 0);
150173acc7cSZhang Wei 
151e8bd84dfSIra Snyder 	switch (chan->feature & FSL_DMA_IP_MASK) {
152e8bd84dfSIra Snyder 	case FSL_DMA_IP_85XX:
153e8bd84dfSIra Snyder 		/* Set the channel to below modes:
154e8bd84dfSIra Snyder 		 * EIE - Error interrupt enable
155e8bd84dfSIra Snyder 		 * EOLNIE - End of links interrupt enable
156e8bd84dfSIra Snyder 		 * BWC - Bandwidth sharing among channels
157e8bd84dfSIra Snyder 		 */
158ccdce9a0SHongbo Zhang 		set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
159ccdce9a0SHongbo Zhang 			| FSL_DMA_MR_EOLNIE);
160e8bd84dfSIra Snyder 		break;
161e8bd84dfSIra Snyder 	case FSL_DMA_IP_83XX:
162e8bd84dfSIra Snyder 		/* Set the channel to below modes:
163e8bd84dfSIra Snyder 		 * EOTIE - End-of-transfer interrupt enable
164e8bd84dfSIra Snyder 		 * PRC_RM - PCI read multiple
165e8bd84dfSIra Snyder 		 */
166ccdce9a0SHongbo Zhang 		set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
167e8bd84dfSIra Snyder 		break;
168e8bd84dfSIra Snyder 	}
169173acc7cSZhang Wei }
170173acc7cSZhang Wei 
dma_is_idle(struct fsldma_chan * chan)171173acc7cSZhang Wei static int dma_is_idle(struct fsldma_chan *chan)
172173acc7cSZhang Wei {
173173acc7cSZhang Wei 	u32 sr = get_sr(chan);
174173acc7cSZhang Wei 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
175173acc7cSZhang Wei }
176173acc7cSZhang Wei 
177f04cd407SIra Snyder /*
178f04cd407SIra Snyder  * Start the DMA controller
179f04cd407SIra Snyder  *
180f04cd407SIra Snyder  * Preconditions:
181f04cd407SIra Snyder  * - the CDAR register must point to the start descriptor
182f04cd407SIra Snyder  * - the MRn[CS] bit must be cleared
183f04cd407SIra Snyder  */
dma_start(struct fsldma_chan * chan)184173acc7cSZhang Wei static void dma_start(struct fsldma_chan *chan)
185173acc7cSZhang Wei {
186173acc7cSZhang Wei 	u32 mode;
187173acc7cSZhang Wei 
188ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
189173acc7cSZhang Wei 
190173acc7cSZhang Wei 	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
191ccdce9a0SHongbo Zhang 		set_bcr(chan, 0);
192173acc7cSZhang Wei 		mode |= FSL_DMA_MR_EMP_EN;
193173acc7cSZhang Wei 	} else {
194173acc7cSZhang Wei 		mode &= ~FSL_DMA_MR_EMP_EN;
195173acc7cSZhang Wei 	}
196173acc7cSZhang Wei 
197f04cd407SIra Snyder 	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
198173acc7cSZhang Wei 		mode |= FSL_DMA_MR_EMS_EN;
199f04cd407SIra Snyder 	} else {
200f04cd407SIra Snyder 		mode &= ~FSL_DMA_MR_EMS_EN;
201173acc7cSZhang Wei 		mode |= FSL_DMA_MR_CS;
202f04cd407SIra Snyder 	}
203173acc7cSZhang Wei 
204ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
205173acc7cSZhang Wei }
206173acc7cSZhang Wei 
dma_halt(struct fsldma_chan * chan)207173acc7cSZhang Wei static void dma_halt(struct fsldma_chan *chan)
208173acc7cSZhang Wei {
209173acc7cSZhang Wei 	u32 mode;
210173acc7cSZhang Wei 	int i;
211173acc7cSZhang Wei 
212a00ae34aSIra Snyder 	/* read the mode register */
213ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
214a00ae34aSIra Snyder 
215a00ae34aSIra Snyder 	/*
216a00ae34aSIra Snyder 	 * The 85xx controller supports channel abort, which will stop
217a00ae34aSIra Snyder 	 * the current transfer. On 83xx, this bit is the transfer error
218a00ae34aSIra Snyder 	 * mask bit, which should not be changed.
219a00ae34aSIra Snyder 	 */
220a00ae34aSIra Snyder 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
221173acc7cSZhang Wei 		mode |= FSL_DMA_MR_CA;
222ccdce9a0SHongbo Zhang 		set_mr(chan, mode);
223173acc7cSZhang Wei 
224a00ae34aSIra Snyder 		mode &= ~FSL_DMA_MR_CA;
225a00ae34aSIra Snyder 	}
226a00ae34aSIra Snyder 
227a00ae34aSIra Snyder 	/* stop the DMA controller */
228a00ae34aSIra Snyder 	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
229ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
230173acc7cSZhang Wei 
231a00ae34aSIra Snyder 	/* wait for the DMA controller to become idle */
232173acc7cSZhang Wei 	for (i = 0; i < 100; i++) {
233173acc7cSZhang Wei 		if (dma_is_idle(chan))
234173acc7cSZhang Wei 			return;
235173acc7cSZhang Wei 
236173acc7cSZhang Wei 		udelay(10);
237173acc7cSZhang Wei 	}
238173acc7cSZhang Wei 
239173acc7cSZhang Wei 	if (!dma_is_idle(chan))
240b158471eSIra Snyder 		chan_err(chan, "DMA halt timeout!\n");
241173acc7cSZhang Wei }
242173acc7cSZhang Wei 
243173acc7cSZhang Wei /**
244173acc7cSZhang Wei  * fsl_chan_set_src_loop_size - Set source address hold transfer size
245a1c03319SIra Snyder  * @chan : Freescale DMA channel
246173acc7cSZhang Wei  * @size     : Address loop size, 0 for disable loop
247173acc7cSZhang Wei  *
248173acc7cSZhang Wei  * The set source address hold transfer size. The source
249173acc7cSZhang Wei  * address hold or loop transfer size is when the DMA transfer
250173acc7cSZhang Wei  * data from source address (SA), if the loop size is 4, the DMA will
251173acc7cSZhang Wei  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
252173acc7cSZhang Wei  * SA + 1 ... and so on.
253173acc7cSZhang Wei  */
fsl_chan_set_src_loop_size(struct fsldma_chan * chan,int size)254a1c03319SIra Snyder static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
255173acc7cSZhang Wei {
256272ca655SIra Snyder 	u32 mode;
257272ca655SIra Snyder 
258ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
259272ca655SIra Snyder 
260173acc7cSZhang Wei 	switch (size) {
261173acc7cSZhang Wei 	case 0:
262272ca655SIra Snyder 		mode &= ~FSL_DMA_MR_SAHE;
263173acc7cSZhang Wei 		break;
264173acc7cSZhang Wei 	case 1:
265173acc7cSZhang Wei 	case 2:
266173acc7cSZhang Wei 	case 4:
267173acc7cSZhang Wei 	case 8:
268ccc07729SThomas Breitung 		mode &= ~FSL_DMA_MR_SAHTS_MASK;
269272ca655SIra Snyder 		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
270173acc7cSZhang Wei 		break;
271173acc7cSZhang Wei 	}
272272ca655SIra Snyder 
273ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
274173acc7cSZhang Wei }
275173acc7cSZhang Wei 
276173acc7cSZhang Wei /**
277738f5f7eSIra Snyder  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
278a1c03319SIra Snyder  * @chan : Freescale DMA channel
279173acc7cSZhang Wei  * @size     : Address loop size, 0 for disable loop
280173acc7cSZhang Wei  *
281173acc7cSZhang Wei  * The set destination address hold transfer size. The destination
282173acc7cSZhang Wei  * address hold or loop transfer size is when the DMA transfer
283173acc7cSZhang Wei  * data to destination address (TA), if the loop size is 4, the DMA will
284173acc7cSZhang Wei  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
285173acc7cSZhang Wei  * TA + 1 ... and so on.
286173acc7cSZhang Wei  */
fsl_chan_set_dst_loop_size(struct fsldma_chan * chan,int size)287a1c03319SIra Snyder static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
288173acc7cSZhang Wei {
289272ca655SIra Snyder 	u32 mode;
290272ca655SIra Snyder 
291ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
292272ca655SIra Snyder 
293173acc7cSZhang Wei 	switch (size) {
294173acc7cSZhang Wei 	case 0:
295272ca655SIra Snyder 		mode &= ~FSL_DMA_MR_DAHE;
296173acc7cSZhang Wei 		break;
297173acc7cSZhang Wei 	case 1:
298173acc7cSZhang Wei 	case 2:
299173acc7cSZhang Wei 	case 4:
300173acc7cSZhang Wei 	case 8:
301ccc07729SThomas Breitung 		mode &= ~FSL_DMA_MR_DAHTS_MASK;
302272ca655SIra Snyder 		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
303173acc7cSZhang Wei 		break;
304173acc7cSZhang Wei 	}
305272ca655SIra Snyder 
306ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
307173acc7cSZhang Wei }
308173acc7cSZhang Wei 
309173acc7cSZhang Wei /**
310e6c7ecb6SIra Snyder  * fsl_chan_set_request_count - Set DMA Request Count for external control
311a1c03319SIra Snyder  * @chan : Freescale DMA channel
312e6c7ecb6SIra Snyder  * @size     : Number of bytes to transfer in a single request
313173acc7cSZhang Wei  *
314e6c7ecb6SIra Snyder  * The Freescale DMA channel can be controlled by the external signal DREQ#.
315e6c7ecb6SIra Snyder  * The DMA request count is how many bytes are allowed to transfer before
316e6c7ecb6SIra Snyder  * pausing the channel, after which a new assertion of DREQ# resumes channel
317e6c7ecb6SIra Snyder  * operation.
318e6c7ecb6SIra Snyder  *
319e6c7ecb6SIra Snyder  * A size of 0 disables external pause control. The maximum size is 1024.
320173acc7cSZhang Wei  */
fsl_chan_set_request_count(struct fsldma_chan * chan,int size)321a1c03319SIra Snyder static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
322173acc7cSZhang Wei {
323272ca655SIra Snyder 	u32 mode;
324272ca655SIra Snyder 
325e6c7ecb6SIra Snyder 	BUG_ON(size > 1024);
326272ca655SIra Snyder 
327ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
328ccc07729SThomas Breitung 	mode &= ~FSL_DMA_MR_BWC_MASK;
329ccc07729SThomas Breitung 	mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
330272ca655SIra Snyder 
331ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
332e6c7ecb6SIra Snyder }
333e6c7ecb6SIra Snyder 
334e6c7ecb6SIra Snyder /**
335e6c7ecb6SIra Snyder  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
336a1c03319SIra Snyder  * @chan : Freescale DMA channel
337e6c7ecb6SIra Snyder  * @enable   : 0 is disabled, 1 is enabled.
338e6c7ecb6SIra Snyder  *
339e6c7ecb6SIra Snyder  * The Freescale DMA channel can be controlled by the external signal DREQ#.
340e6c7ecb6SIra Snyder  * The DMA Request Count feature should be used in addition to this feature
341e6c7ecb6SIra Snyder  * to set the number of bytes to transfer before pausing the channel.
342e6c7ecb6SIra Snyder  */
fsl_chan_toggle_ext_pause(struct fsldma_chan * chan,int enable)343a1c03319SIra Snyder static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
344e6c7ecb6SIra Snyder {
345e6c7ecb6SIra Snyder 	if (enable)
346a1c03319SIra Snyder 		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
347e6c7ecb6SIra Snyder 	else
348a1c03319SIra Snyder 		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
349173acc7cSZhang Wei }
350173acc7cSZhang Wei 
351173acc7cSZhang Wei /**
352173acc7cSZhang Wei  * fsl_chan_toggle_ext_start - Toggle channel external start status
353a1c03319SIra Snyder  * @chan : Freescale DMA channel
354173acc7cSZhang Wei  * @enable   : 0 is disabled, 1 is enabled.
355173acc7cSZhang Wei  *
356173acc7cSZhang Wei  * If enable the external start, the channel can be started by an
357173acc7cSZhang Wei  * external DMA start pin. So the dma_start() does not start the
358173acc7cSZhang Wei  * transfer immediately. The DMA channel will wait for the
359173acc7cSZhang Wei  * control pin asserted.
360173acc7cSZhang Wei  */
fsl_chan_toggle_ext_start(struct fsldma_chan * chan,int enable)361a1c03319SIra Snyder static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
362173acc7cSZhang Wei {
363173acc7cSZhang Wei 	if (enable)
364a1c03319SIra Snyder 		chan->feature |= FSL_DMA_CHAN_START_EXT;
365173acc7cSZhang Wei 	else
366a1c03319SIra Snyder 		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
367173acc7cSZhang Wei }
368173acc7cSZhang Wei 
fsl_dma_external_start(struct dma_chan * dchan,int enable)3690a5642beSVinod Koul int fsl_dma_external_start(struct dma_chan *dchan, int enable)
3700a5642beSVinod Koul {
3710a5642beSVinod Koul 	struct fsldma_chan *chan;
3720a5642beSVinod Koul 
3730a5642beSVinod Koul 	if (!dchan)
3740a5642beSVinod Koul 		return -EINVAL;
3750a5642beSVinod Koul 
3760a5642beSVinod Koul 	chan = to_fsl_chan(dchan);
3770a5642beSVinod Koul 
3780a5642beSVinod Koul 	fsl_chan_toggle_ext_start(chan, enable);
3790a5642beSVinod Koul 	return 0;
3800a5642beSVinod Koul }
3810a5642beSVinod Koul EXPORT_SYMBOL_GPL(fsl_dma_external_start);
3820a5642beSVinod Koul 
append_ld_queue(struct fsldma_chan * chan,struct fsl_desc_sw * desc)38331f4306cSIra Snyder static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
3849c3a50b7SIra Snyder {
3859c3a50b7SIra Snyder 	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
3869c3a50b7SIra Snyder 
3879c3a50b7SIra Snyder 	if (list_empty(&chan->ld_pending))
3889c3a50b7SIra Snyder 		goto out_splice;
3899c3a50b7SIra Snyder 
3909c3a50b7SIra Snyder 	/*
3919c3a50b7SIra Snyder 	 * Add the hardware descriptor to the chain of hardware descriptors
3929c3a50b7SIra Snyder 	 * that already exists in memory.
3939c3a50b7SIra Snyder 	 *
3949c3a50b7SIra Snyder 	 * This will un-set the EOL bit of the existing transaction, and the
3959c3a50b7SIra Snyder 	 * last link in this transaction will become the EOL descriptor.
3969c3a50b7SIra Snyder 	 */
3979c3a50b7SIra Snyder 	set_desc_next(chan, &tail->hw, desc->async_tx.phys);
3989c3a50b7SIra Snyder 
3999c3a50b7SIra Snyder 	/*
4009c3a50b7SIra Snyder 	 * Add the software descriptor and all children to the list
4019c3a50b7SIra Snyder 	 * of pending transactions
4029c3a50b7SIra Snyder 	 */
4039c3a50b7SIra Snyder out_splice:
4049c3a50b7SIra Snyder 	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
4059c3a50b7SIra Snyder }
4069c3a50b7SIra Snyder 
fsl_dma_tx_submit(struct dma_async_tx_descriptor * tx)407173acc7cSZhang Wei static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
408173acc7cSZhang Wei {
409a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
410eda34234SDan Williams 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
411eda34234SDan Williams 	struct fsl_desc_sw *child;
412bbc76560SDan Williams 	dma_cookie_t cookie = -EINVAL;
413173acc7cSZhang Wei 
4142baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
415173acc7cSZhang Wei 
41614c6a333SHongbo Zhang #ifdef CONFIG_PM
41714c6a333SHongbo Zhang 	if (unlikely(chan->pm_state != RUNNING)) {
41814c6a333SHongbo Zhang 		chan_dbg(chan, "cannot submit due to suspend\n");
41914c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
42014c6a333SHongbo Zhang 		return -1;
42114c6a333SHongbo Zhang 	}
42214c6a333SHongbo Zhang #endif
42314c6a333SHongbo Zhang 
4249c3a50b7SIra Snyder 	/*
4259c3a50b7SIra Snyder 	 * assign cookies to all of the software descriptors
4269c3a50b7SIra Snyder 	 * that make up this transaction
4279c3a50b7SIra Snyder 	 */
428eda34234SDan Williams 	list_for_each_entry(child, &desc->tx_list, node) {
429884485e1SRussell King - ARM Linux 		cookie = dma_cookie_assign(&child->async_tx);
430bcfb7465SIra Snyder 	}
431bcfb7465SIra Snyder 
4329c3a50b7SIra Snyder 	/* put this transaction onto the tail of the pending queue */
433a1c03319SIra Snyder 	append_ld_queue(chan, desc);
434173acc7cSZhang Wei 
4352baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
436173acc7cSZhang Wei 
437173acc7cSZhang Wei 	return cookie;
438173acc7cSZhang Wei }
439173acc7cSZhang Wei 
440173acc7cSZhang Wei /**
44186d19a54SHongbo Zhang  * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
44286d19a54SHongbo Zhang  * @chan : Freescale DMA channel
44386d19a54SHongbo Zhang  * @desc: descriptor to be freed
44486d19a54SHongbo Zhang  */
fsl_dma_free_descriptor(struct fsldma_chan * chan,struct fsl_desc_sw * desc)44586d19a54SHongbo Zhang static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
44686d19a54SHongbo Zhang 		struct fsl_desc_sw *desc)
44786d19a54SHongbo Zhang {
44886d19a54SHongbo Zhang 	list_del(&desc->node);
44986d19a54SHongbo Zhang 	chan_dbg(chan, "LD %p free\n", desc);
45086d19a54SHongbo Zhang 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
45186d19a54SHongbo Zhang }
45286d19a54SHongbo Zhang 
45386d19a54SHongbo Zhang /**
454173acc7cSZhang Wei  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
455a1c03319SIra Snyder  * @chan : Freescale DMA channel
456173acc7cSZhang Wei  *
457173acc7cSZhang Wei  * Return - The descriptor allocated. NULL for failed.
458173acc7cSZhang Wei  */
fsl_dma_alloc_descriptor(struct fsldma_chan * chan)45931f4306cSIra Snyder static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
460173acc7cSZhang Wei {
4619c3a50b7SIra Snyder 	struct fsl_desc_sw *desc;
462173acc7cSZhang Wei 	dma_addr_t pdesc;
463173acc7cSZhang Wei 
46443764557SJulia Lawall 	desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
4659c3a50b7SIra Snyder 	if (!desc) {
466b158471eSIra Snyder 		chan_dbg(chan, "out of memory for link descriptor\n");
4679c3a50b7SIra Snyder 		return NULL;
468173acc7cSZhang Wei 	}
469173acc7cSZhang Wei 
4709c3a50b7SIra Snyder 	INIT_LIST_HEAD(&desc->tx_list);
4719c3a50b7SIra Snyder 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
4729c3a50b7SIra Snyder 	desc->async_tx.tx_submit = fsl_dma_tx_submit;
4739c3a50b7SIra Snyder 	desc->async_tx.phys = pdesc;
4749c3a50b7SIra Snyder 
4750ab09c36SIra Snyder 	chan_dbg(chan, "LD %p allocated\n", desc);
4760ab09c36SIra Snyder 
4779c3a50b7SIra Snyder 	return desc;
478173acc7cSZhang Wei }
479173acc7cSZhang Wei 
480173acc7cSZhang Wei /**
48143452fadSHongbo Zhang  * fsldma_clean_completed_descriptor - free all descriptors which
48243452fadSHongbo Zhang  * has been completed and acked
48343452fadSHongbo Zhang  * @chan: Freescale DMA channel
48443452fadSHongbo Zhang  *
48543452fadSHongbo Zhang  * This function is used on all completed and acked descriptors.
48643452fadSHongbo Zhang  * All descriptors should only be freed in this function.
48743452fadSHongbo Zhang  */
fsldma_clean_completed_descriptor(struct fsldma_chan * chan)48843452fadSHongbo Zhang static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
48943452fadSHongbo Zhang {
49043452fadSHongbo Zhang 	struct fsl_desc_sw *desc, *_desc;
49143452fadSHongbo Zhang 
49243452fadSHongbo Zhang 	/* Run the callback for each descriptor, in order */
49343452fadSHongbo Zhang 	list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
49443452fadSHongbo Zhang 		if (async_tx_test_ack(&desc->async_tx))
49543452fadSHongbo Zhang 			fsl_dma_free_descriptor(chan, desc);
49643452fadSHongbo Zhang }
49743452fadSHongbo Zhang 
49843452fadSHongbo Zhang /**
49943452fadSHongbo Zhang  * fsldma_run_tx_complete_actions - cleanup a single link descriptor
50043452fadSHongbo Zhang  * @chan: Freescale DMA channel
50143452fadSHongbo Zhang  * @desc: descriptor to cleanup and free
50243452fadSHongbo Zhang  * @cookie: Freescale DMA transaction identifier
50343452fadSHongbo Zhang  *
50443452fadSHongbo Zhang  * This function is used on a descriptor which has been executed by the DMA
50543452fadSHongbo Zhang  * controller. It will run any callbacks, submit any dependencies.
50643452fadSHongbo Zhang  */
fsldma_run_tx_complete_actions(struct fsldma_chan * chan,struct fsl_desc_sw * desc,dma_cookie_t cookie)50743452fadSHongbo Zhang static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
50843452fadSHongbo Zhang 		struct fsl_desc_sw *desc, dma_cookie_t cookie)
50943452fadSHongbo Zhang {
51043452fadSHongbo Zhang 	struct dma_async_tx_descriptor *txd = &desc->async_tx;
51143452fadSHongbo Zhang 	dma_cookie_t ret = cookie;
51243452fadSHongbo Zhang 
51343452fadSHongbo Zhang 	BUG_ON(txd->cookie < 0);
51443452fadSHongbo Zhang 
51543452fadSHongbo Zhang 	if (txd->cookie > 0) {
51643452fadSHongbo Zhang 		ret = txd->cookie;
51743452fadSHongbo Zhang 
5189b335978SDave Jiang 		dma_descriptor_unmap(txd);
51943452fadSHongbo Zhang 		/* Run the link descriptor callback function */
520af1a5a51SDave Jiang 		dmaengine_desc_get_callback_invoke(txd, NULL);
52143452fadSHongbo Zhang 	}
52243452fadSHongbo Zhang 
52343452fadSHongbo Zhang 	/* Run any dependencies */
52443452fadSHongbo Zhang 	dma_run_dependencies(txd);
52543452fadSHongbo Zhang 
52643452fadSHongbo Zhang 	return ret;
52743452fadSHongbo Zhang }
52843452fadSHongbo Zhang 
52943452fadSHongbo Zhang /**
53043452fadSHongbo Zhang  * fsldma_clean_running_descriptor - move the completed descriptor from
53143452fadSHongbo Zhang  * ld_running to ld_completed
53243452fadSHongbo Zhang  * @chan: Freescale DMA channel
53343452fadSHongbo Zhang  * @desc: the descriptor which is completed
53443452fadSHongbo Zhang  *
53543452fadSHongbo Zhang  * Free the descriptor directly if acked by async_tx api, or move it to
53643452fadSHongbo Zhang  * queue ld_completed.
53743452fadSHongbo Zhang  */
fsldma_clean_running_descriptor(struct fsldma_chan * chan,struct fsl_desc_sw * desc)53843452fadSHongbo Zhang static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
53943452fadSHongbo Zhang 		struct fsl_desc_sw *desc)
54043452fadSHongbo Zhang {
54143452fadSHongbo Zhang 	/* Remove from the list of transactions */
54243452fadSHongbo Zhang 	list_del(&desc->node);
54343452fadSHongbo Zhang 
54443452fadSHongbo Zhang 	/*
54543452fadSHongbo Zhang 	 * the client is allowed to attach dependent operations
54643452fadSHongbo Zhang 	 * until 'ack' is set
54743452fadSHongbo Zhang 	 */
54843452fadSHongbo Zhang 	if (!async_tx_test_ack(&desc->async_tx)) {
54943452fadSHongbo Zhang 		/*
55043452fadSHongbo Zhang 		 * Move this descriptor to the list of descriptors which is
55143452fadSHongbo Zhang 		 * completed, but still awaiting the 'ack' bit to be set.
55243452fadSHongbo Zhang 		 */
55343452fadSHongbo Zhang 		list_add_tail(&desc->node, &chan->ld_completed);
55443452fadSHongbo Zhang 		return;
55543452fadSHongbo Zhang 	}
55643452fadSHongbo Zhang 
55743452fadSHongbo Zhang 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
55843452fadSHongbo Zhang }
55943452fadSHongbo Zhang 
56043452fadSHongbo Zhang /**
5612a5ecb79SHongbo Zhang  * fsl_chan_xfer_ld_queue - transfer any pending transactions
5622a5ecb79SHongbo Zhang  * @chan : Freescale DMA channel
5632a5ecb79SHongbo Zhang  *
5642a5ecb79SHongbo Zhang  * HARDWARE STATE: idle
5652a5ecb79SHongbo Zhang  * LOCKING: must hold chan->desc_lock
5662a5ecb79SHongbo Zhang  */
fsl_chan_xfer_ld_queue(struct fsldma_chan * chan)5672a5ecb79SHongbo Zhang static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
5682a5ecb79SHongbo Zhang {
5692a5ecb79SHongbo Zhang 	struct fsl_desc_sw *desc;
5702a5ecb79SHongbo Zhang 
5712a5ecb79SHongbo Zhang 	/*
5722a5ecb79SHongbo Zhang 	 * If the list of pending descriptors is empty, then we
5732a5ecb79SHongbo Zhang 	 * don't need to do any work at all
5742a5ecb79SHongbo Zhang 	 */
5752a5ecb79SHongbo Zhang 	if (list_empty(&chan->ld_pending)) {
5762a5ecb79SHongbo Zhang 		chan_dbg(chan, "no pending LDs\n");
5772a5ecb79SHongbo Zhang 		return;
5782a5ecb79SHongbo Zhang 	}
5792a5ecb79SHongbo Zhang 
5802a5ecb79SHongbo Zhang 	/*
5812a5ecb79SHongbo Zhang 	 * The DMA controller is not idle, which means that the interrupt
5822a5ecb79SHongbo Zhang 	 * handler will start any queued transactions when it runs after
5832a5ecb79SHongbo Zhang 	 * this transaction finishes
5842a5ecb79SHongbo Zhang 	 */
5852a5ecb79SHongbo Zhang 	if (!chan->idle) {
5862a5ecb79SHongbo Zhang 		chan_dbg(chan, "DMA controller still busy\n");
5872a5ecb79SHongbo Zhang 		return;
5882a5ecb79SHongbo Zhang 	}
5892a5ecb79SHongbo Zhang 
5902a5ecb79SHongbo Zhang 	/*
5912a5ecb79SHongbo Zhang 	 * If there are some link descriptors which have not been
5922a5ecb79SHongbo Zhang 	 * transferred, we need to start the controller
5932a5ecb79SHongbo Zhang 	 */
5942a5ecb79SHongbo Zhang 
5952a5ecb79SHongbo Zhang 	/*
5962a5ecb79SHongbo Zhang 	 * Move all elements from the queue of pending transactions
5972a5ecb79SHongbo Zhang 	 * onto the list of running transactions
5982a5ecb79SHongbo Zhang 	 */
5992a5ecb79SHongbo Zhang 	chan_dbg(chan, "idle, starting controller\n");
6002a5ecb79SHongbo Zhang 	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
6012a5ecb79SHongbo Zhang 	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
6022a5ecb79SHongbo Zhang 
6032a5ecb79SHongbo Zhang 	/*
6042a5ecb79SHongbo Zhang 	 * The 85xx DMA controller doesn't clear the channel start bit
6052a5ecb79SHongbo Zhang 	 * automatically at the end of a transfer. Therefore we must clear
6062a5ecb79SHongbo Zhang 	 * it in software before starting the transfer.
6072a5ecb79SHongbo Zhang 	 */
6082a5ecb79SHongbo Zhang 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
6092a5ecb79SHongbo Zhang 		u32 mode;
6102a5ecb79SHongbo Zhang 
6112a5ecb79SHongbo Zhang 		mode = get_mr(chan);
6122a5ecb79SHongbo Zhang 		mode &= ~FSL_DMA_MR_CS;
6132a5ecb79SHongbo Zhang 		set_mr(chan, mode);
6142a5ecb79SHongbo Zhang 	}
6152a5ecb79SHongbo Zhang 
6162a5ecb79SHongbo Zhang 	/*
6172a5ecb79SHongbo Zhang 	 * Program the descriptor's address into the DMA controller,
6182a5ecb79SHongbo Zhang 	 * then start the DMA transaction
6192a5ecb79SHongbo Zhang 	 */
6202a5ecb79SHongbo Zhang 	set_cdar(chan, desc->async_tx.phys);
6212a5ecb79SHongbo Zhang 	get_cdar(chan);
6222a5ecb79SHongbo Zhang 
6232a5ecb79SHongbo Zhang 	dma_start(chan);
6242a5ecb79SHongbo Zhang 	chan->idle = false;
6252a5ecb79SHongbo Zhang }
6262a5ecb79SHongbo Zhang 
6272a5ecb79SHongbo Zhang /**
62843452fadSHongbo Zhang  * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
62943452fadSHongbo Zhang  * and move them to ld_completed to free until flag 'ack' is set
6302a5ecb79SHongbo Zhang  * @chan: Freescale DMA channel
6312a5ecb79SHongbo Zhang  *
63243452fadSHongbo Zhang  * This function is used on descriptors which have been executed by the DMA
63343452fadSHongbo Zhang  * controller. It will run any callbacks, submit any dependencies, then
63443452fadSHongbo Zhang  * free these descriptors if flag 'ack' is set.
6352a5ecb79SHongbo Zhang  */
fsldma_cleanup_descriptors(struct fsldma_chan * chan)63643452fadSHongbo Zhang static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
6372a5ecb79SHongbo Zhang {
63843452fadSHongbo Zhang 	struct fsl_desc_sw *desc, *_desc;
63943452fadSHongbo Zhang 	dma_cookie_t cookie = 0;
64043452fadSHongbo Zhang 	dma_addr_t curr_phys = get_cdar(chan);
64143452fadSHongbo Zhang 	int seen_current = 0;
6422a5ecb79SHongbo Zhang 
64343452fadSHongbo Zhang 	fsldma_clean_completed_descriptor(chan);
64443452fadSHongbo Zhang 
64543452fadSHongbo Zhang 	/* Run the callback for each descriptor, in order */
64643452fadSHongbo Zhang 	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
64743452fadSHongbo Zhang 		/*
64843452fadSHongbo Zhang 		 * do not advance past the current descriptor loaded into the
64943452fadSHongbo Zhang 		 * hardware channel, subsequent descriptors are either in
65043452fadSHongbo Zhang 		 * process or have not been submitted
65143452fadSHongbo Zhang 		 */
65243452fadSHongbo Zhang 		if (seen_current)
65343452fadSHongbo Zhang 			break;
65443452fadSHongbo Zhang 
65543452fadSHongbo Zhang 		/*
65643452fadSHongbo Zhang 		 * stop the search if we reach the current descriptor and the
65743452fadSHongbo Zhang 		 * channel is busy
65843452fadSHongbo Zhang 		 */
65943452fadSHongbo Zhang 		if (desc->async_tx.phys == curr_phys) {
66043452fadSHongbo Zhang 			seen_current = 1;
66143452fadSHongbo Zhang 			if (!dma_is_idle(chan))
66243452fadSHongbo Zhang 				break;
6632a5ecb79SHongbo Zhang 		}
6642a5ecb79SHongbo Zhang 
66543452fadSHongbo Zhang 		cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
6662a5ecb79SHongbo Zhang 
66743452fadSHongbo Zhang 		fsldma_clean_running_descriptor(chan, desc);
66843452fadSHongbo Zhang 	}
66943452fadSHongbo Zhang 
67043452fadSHongbo Zhang 	/*
67143452fadSHongbo Zhang 	 * Start any pending transactions automatically
67243452fadSHongbo Zhang 	 *
67343452fadSHongbo Zhang 	 * In the ideal case, we keep the DMA controller busy while we go
67443452fadSHongbo Zhang 	 * ahead and free the descriptors below.
67543452fadSHongbo Zhang 	 */
67643452fadSHongbo Zhang 	fsl_chan_xfer_ld_queue(chan);
67743452fadSHongbo Zhang 
67843452fadSHongbo Zhang 	if (cookie > 0)
67943452fadSHongbo Zhang 		chan->common.completed_cookie = cookie;
6802a5ecb79SHongbo Zhang }
6812a5ecb79SHongbo Zhang 
6822a5ecb79SHongbo Zhang /**
683173acc7cSZhang Wei  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
684a1c03319SIra Snyder  * @chan : Freescale DMA channel
685173acc7cSZhang Wei  *
686173acc7cSZhang Wei  * This function will create a dma pool for descriptor allocation.
687173acc7cSZhang Wei  *
688173acc7cSZhang Wei  * Return - The number of descriptors allocated.
689173acc7cSZhang Wei  */
fsl_dma_alloc_chan_resources(struct dma_chan * dchan)690a1c03319SIra Snyder static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
691173acc7cSZhang Wei {
692a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
69377cd62e8STimur Tabi 
69477cd62e8STimur Tabi 	/* Has this channel already been allocated? */
695a1c03319SIra Snyder 	if (chan->desc_pool)
69677cd62e8STimur Tabi 		return 1;
697173acc7cSZhang Wei 
6989c3a50b7SIra Snyder 	/*
6999c3a50b7SIra Snyder 	 * We need the descriptor to be aligned to 32bytes
700173acc7cSZhang Wei 	 * for meeting FSL DMA specification requirement.
701173acc7cSZhang Wei 	 */
702b158471eSIra Snyder 	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
7039c3a50b7SIra Snyder 					  sizeof(struct fsl_desc_sw),
7049c3a50b7SIra Snyder 					  __alignof__(struct fsl_desc_sw), 0);
705a1c03319SIra Snyder 	if (!chan->desc_pool) {
706b158471eSIra Snyder 		chan_err(chan, "unable to allocate descriptor pool\n");
7079c3a50b7SIra Snyder 		return -ENOMEM;
708173acc7cSZhang Wei 	}
709173acc7cSZhang Wei 
7109c3a50b7SIra Snyder 	/* there is at least one descriptor free to be allocated */
711173acc7cSZhang Wei 	return 1;
712173acc7cSZhang Wei }
713173acc7cSZhang Wei 
714173acc7cSZhang Wei /**
7159c3a50b7SIra Snyder  * fsldma_free_desc_list - Free all descriptors in a queue
7169c3a50b7SIra Snyder  * @chan: Freescae DMA channel
7179c3a50b7SIra Snyder  * @list: the list to free
7189c3a50b7SIra Snyder  *
7199c3a50b7SIra Snyder  * LOCKING: must hold chan->desc_lock
7209c3a50b7SIra Snyder  */
fsldma_free_desc_list(struct fsldma_chan * chan,struct list_head * list)7219c3a50b7SIra Snyder static void fsldma_free_desc_list(struct fsldma_chan *chan,
7229c3a50b7SIra Snyder 				  struct list_head *list)
7239c3a50b7SIra Snyder {
7249c3a50b7SIra Snyder 	struct fsl_desc_sw *desc, *_desc;
7259c3a50b7SIra Snyder 
72686d19a54SHongbo Zhang 	list_for_each_entry_safe(desc, _desc, list, node)
72786d19a54SHongbo Zhang 		fsl_dma_free_descriptor(chan, desc);
7289c3a50b7SIra Snyder }
7299c3a50b7SIra Snyder 
fsldma_free_desc_list_reverse(struct fsldma_chan * chan,struct list_head * list)7309c3a50b7SIra Snyder static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
7319c3a50b7SIra Snyder 					  struct list_head *list)
7329c3a50b7SIra Snyder {
7339c3a50b7SIra Snyder 	struct fsl_desc_sw *desc, *_desc;
7349c3a50b7SIra Snyder 
73586d19a54SHongbo Zhang 	list_for_each_entry_safe_reverse(desc, _desc, list, node)
73686d19a54SHongbo Zhang 		fsl_dma_free_descriptor(chan, desc);
7379c3a50b7SIra Snyder }
7389c3a50b7SIra Snyder 
7399c3a50b7SIra Snyder /**
740173acc7cSZhang Wei  * fsl_dma_free_chan_resources - Free all resources of the channel.
741a1c03319SIra Snyder  * @chan : Freescale DMA channel
742173acc7cSZhang Wei  */
fsl_dma_free_chan_resources(struct dma_chan * dchan)743a1c03319SIra Snyder static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
744173acc7cSZhang Wei {
745a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
746173acc7cSZhang Wei 
747b158471eSIra Snyder 	chan_dbg(chan, "free all channel resources\n");
7482baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
74943452fadSHongbo Zhang 	fsldma_cleanup_descriptors(chan);
7509c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_pending);
7519c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_running);
75243452fadSHongbo Zhang 	fsldma_free_desc_list(chan, &chan->ld_completed);
7532baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
75477cd62e8STimur Tabi 
7559c3a50b7SIra Snyder 	dma_pool_destroy(chan->desc_pool);
756a1c03319SIra Snyder 	chan->desc_pool = NULL;
757173acc7cSZhang Wei }
758173acc7cSZhang Wei 
7592187c269SZhang Wei static struct dma_async_tx_descriptor *
fsl_dma_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)76031f4306cSIra Snyder fsl_dma_prep_memcpy(struct dma_chan *dchan,
76131f4306cSIra Snyder 	dma_addr_t dma_dst, dma_addr_t dma_src,
762173acc7cSZhang Wei 	size_t len, unsigned long flags)
763173acc7cSZhang Wei {
764a1c03319SIra Snyder 	struct fsldma_chan *chan;
765173acc7cSZhang Wei 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
766173acc7cSZhang Wei 	size_t copy;
767173acc7cSZhang Wei 
768a1c03319SIra Snyder 	if (!dchan)
769173acc7cSZhang Wei 		return NULL;
770173acc7cSZhang Wei 
771173acc7cSZhang Wei 	if (!len)
772173acc7cSZhang Wei 		return NULL;
773173acc7cSZhang Wei 
774a1c03319SIra Snyder 	chan = to_fsl_chan(dchan);
775173acc7cSZhang Wei 
776173acc7cSZhang Wei 	do {
777173acc7cSZhang Wei 
778173acc7cSZhang Wei 		/* Allocate the link descriptor from DMA pool */
779a1c03319SIra Snyder 		new = fsl_dma_alloc_descriptor(chan);
780173acc7cSZhang Wei 		if (!new) {
781b158471eSIra Snyder 			chan_err(chan, "%s\n", msg_ld_oom);
7822e077f8eSIra Snyder 			goto fail;
783173acc7cSZhang Wei 		}
784173acc7cSZhang Wei 
78556822843SZhang Wei 		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
786173acc7cSZhang Wei 
787a1c03319SIra Snyder 		set_desc_cnt(chan, &new->hw, copy);
788a1c03319SIra Snyder 		set_desc_src(chan, &new->hw, dma_src);
789a1c03319SIra Snyder 		set_desc_dst(chan, &new->hw, dma_dst);
790173acc7cSZhang Wei 
791173acc7cSZhang Wei 		if (!first)
792173acc7cSZhang Wei 			first = new;
793173acc7cSZhang Wei 		else
794a1c03319SIra Snyder 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
795173acc7cSZhang Wei 
796173acc7cSZhang Wei 		new->async_tx.cookie = 0;
797636bdeaaSDan Williams 		async_tx_ack(&new->async_tx);
798173acc7cSZhang Wei 
799173acc7cSZhang Wei 		prev = new;
800173acc7cSZhang Wei 		len -= copy;
801173acc7cSZhang Wei 		dma_src += copy;
802738f5f7eSIra Snyder 		dma_dst += copy;
803173acc7cSZhang Wei 
804173acc7cSZhang Wei 		/* Insert the link descriptor to the LD ring */
805eda34234SDan Williams 		list_add_tail(&new->node, &first->tx_list);
806173acc7cSZhang Wei 	} while (len);
807173acc7cSZhang Wei 
808636bdeaaSDan Williams 	new->async_tx.flags = flags; /* client is in control of this ack */
809173acc7cSZhang Wei 	new->async_tx.cookie = -EBUSY;
810173acc7cSZhang Wei 
811173acc7cSZhang Wei 	/* Set End-of-link to the last link descriptor of new list */
812a1c03319SIra Snyder 	set_ld_eol(chan, new);
813173acc7cSZhang Wei 
8142e077f8eSIra Snyder 	return &first->async_tx;
8152e077f8eSIra Snyder 
8162e077f8eSIra Snyder fail:
8172e077f8eSIra Snyder 	if (!first)
8182e077f8eSIra Snyder 		return NULL;
8192e077f8eSIra Snyder 
8209c3a50b7SIra Snyder 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
8212e077f8eSIra Snyder 	return NULL;
822173acc7cSZhang Wei }
823173acc7cSZhang Wei 
fsl_dma_device_terminate_all(struct dma_chan * dchan)824b7f7552bSMaxime Ripard static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
825bbea0b6eSIra Snyder {
826a1c03319SIra Snyder 	struct fsldma_chan *chan;
827c3635c78SLinus Walleij 
828a1c03319SIra Snyder 	if (!dchan)
829c3635c78SLinus Walleij 		return -EINVAL;
830bbea0b6eSIra Snyder 
831a1c03319SIra Snyder 	chan = to_fsl_chan(dchan);
832bbea0b6eSIra Snyder 
8332baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
834f04cd407SIra Snyder 
835bbea0b6eSIra Snyder 	/* Halt the DMA engine */
836a1c03319SIra Snyder 	dma_halt(chan);
837bbea0b6eSIra Snyder 
838bbea0b6eSIra Snyder 	/* Remove and free all of the descriptors in the LD queue */
8399c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_pending);
8409c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_running);
84143452fadSHongbo Zhang 	fsldma_free_desc_list(chan, &chan->ld_completed);
842f04cd407SIra Snyder 	chan->idle = true;
843bbea0b6eSIra Snyder 
8442baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
845968f19aeSIra Snyder 	return 0;
846b7f7552bSMaxime Ripard }
847968f19aeSIra Snyder 
fsl_dma_device_config(struct dma_chan * dchan,struct dma_slave_config * config)848b7f7552bSMaxime Ripard static int fsl_dma_device_config(struct dma_chan *dchan,
849b7f7552bSMaxime Ripard 				 struct dma_slave_config *config)
850b7f7552bSMaxime Ripard {
851b7f7552bSMaxime Ripard 	struct fsldma_chan *chan;
852b7f7552bSMaxime Ripard 	int size;
853b7f7552bSMaxime Ripard 
854b7f7552bSMaxime Ripard 	if (!dchan)
855b7f7552bSMaxime Ripard 		return -EINVAL;
856b7f7552bSMaxime Ripard 
857b7f7552bSMaxime Ripard 	chan = to_fsl_chan(dchan);
858968f19aeSIra Snyder 
859968f19aeSIra Snyder 	/* make sure the channel supports setting burst size */
860968f19aeSIra Snyder 	if (!chan->set_request_count)
861968f19aeSIra Snyder 		return -ENXIO;
862968f19aeSIra Snyder 
863968f19aeSIra Snyder 	/* we set the controller burst size depending on direction */
864db8196dfSVinod Koul 	if (config->direction == DMA_MEM_TO_DEV)
865968f19aeSIra Snyder 		size = config->dst_addr_width * config->dst_maxburst;
866968f19aeSIra Snyder 	else
867968f19aeSIra Snyder 		size = config->src_addr_width * config->src_maxburst;
868968f19aeSIra Snyder 
869968f19aeSIra Snyder 	chan->set_request_count(chan, size);
870968f19aeSIra Snyder 	return 0;
871968f19aeSIra Snyder }
872c3635c78SLinus Walleij 
873bbea0b6eSIra Snyder 
874bbea0b6eSIra Snyder /**
875173acc7cSZhang Wei  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
876a1c03319SIra Snyder  * @chan : Freescale DMA channel
877173acc7cSZhang Wei  */
fsl_dma_memcpy_issue_pending(struct dma_chan * dchan)878a1c03319SIra Snyder static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
879173acc7cSZhang Wei {
880a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
881dc8d4091SIra Snyder 
8822baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
883a1c03319SIra Snyder 	fsl_chan_xfer_ld_queue(chan);
8842baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
885173acc7cSZhang Wei }
886173acc7cSZhang Wei 
887173acc7cSZhang Wei /**
88807934481SLinus Walleij  * fsl_tx_status - Determine the DMA status
889a1c03319SIra Snyder  * @chan : Freescale DMA channel
890173acc7cSZhang Wei  */
fsl_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)89107934481SLinus Walleij static enum dma_status fsl_tx_status(struct dma_chan *dchan,
892173acc7cSZhang Wei 					dma_cookie_t cookie,
89307934481SLinus Walleij 					struct dma_tx_state *txstate)
894173acc7cSZhang Wei {
89543452fadSHongbo Zhang 	struct fsldma_chan *chan = to_fsl_chan(dchan);
89643452fadSHongbo Zhang 	enum dma_status ret;
89743452fadSHongbo Zhang 
89843452fadSHongbo Zhang 	ret = dma_cookie_status(dchan, cookie, txstate);
89943452fadSHongbo Zhang 	if (ret == DMA_COMPLETE)
90043452fadSHongbo Zhang 		return ret;
90143452fadSHongbo Zhang 
90243452fadSHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
90343452fadSHongbo Zhang 	fsldma_cleanup_descriptors(chan);
90443452fadSHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
90543452fadSHongbo Zhang 
9069b0b0bdcSAndy Shevchenko 	return dma_cookie_status(dchan, cookie, txstate);
907173acc7cSZhang Wei }
908173acc7cSZhang Wei 
909d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/
910d3f620b2SIra Snyder /* Interrupt Handling                                                         */
911d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/
912d3f620b2SIra Snyder 
fsldma_chan_irq(int irq,void * data)913e7a29151SIra Snyder static irqreturn_t fsldma_chan_irq(int irq, void *data)
914173acc7cSZhang Wei {
915a1c03319SIra Snyder 	struct fsldma_chan *chan = data;
916a1c03319SIra Snyder 	u32 stat;
917173acc7cSZhang Wei 
9189c3a50b7SIra Snyder 	/* save and clear the status register */
919a1c03319SIra Snyder 	stat = get_sr(chan);
9209c3a50b7SIra Snyder 	set_sr(chan, stat);
921b158471eSIra Snyder 	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
922173acc7cSZhang Wei 
923f04cd407SIra Snyder 	/* check that this was really our device */
924173acc7cSZhang Wei 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
925173acc7cSZhang Wei 	if (!stat)
926173acc7cSZhang Wei 		return IRQ_NONE;
927173acc7cSZhang Wei 
928173acc7cSZhang Wei 	if (stat & FSL_DMA_SR_TE)
929b158471eSIra Snyder 		chan_err(chan, "Transfer Error!\n");
930173acc7cSZhang Wei 
9319c3a50b7SIra Snyder 	/*
9329c3a50b7SIra Snyder 	 * Programming Error
933f79abb62SZhang Wei 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
934d73111c6SMasanari Iida 	 * trigger a PE interrupt.
935f79abb62SZhang Wei 	 */
936f79abb62SZhang Wei 	if (stat & FSL_DMA_SR_PE) {
937b158471eSIra Snyder 		chan_dbg(chan, "irq: Programming Error INT\n");
938f79abb62SZhang Wei 		stat &= ~FSL_DMA_SR_PE;
939f04cd407SIra Snyder 		if (get_bcr(chan) != 0)
940f04cd407SIra Snyder 			chan_err(chan, "Programming Error!\n");
9411c62979eSZhang Wei 	}
9421c62979eSZhang Wei 
9439c3a50b7SIra Snyder 	/*
9449c3a50b7SIra Snyder 	 * For MPC8349, EOCDI event need to update cookie
9451c62979eSZhang Wei 	 * and start the next transfer if it exist.
9461c62979eSZhang Wei 	 */
9471c62979eSZhang Wei 	if (stat & FSL_DMA_SR_EOCDI) {
948b158471eSIra Snyder 		chan_dbg(chan, "irq: End-of-Chain link INT\n");
9491c62979eSZhang Wei 		stat &= ~FSL_DMA_SR_EOCDI;
950173acc7cSZhang Wei 	}
951173acc7cSZhang Wei 
9529c3a50b7SIra Snyder 	/*
9539c3a50b7SIra Snyder 	 * If it current transfer is the end-of-transfer,
954173acc7cSZhang Wei 	 * we should clear the Channel Start bit for
955173acc7cSZhang Wei 	 * prepare next transfer.
956173acc7cSZhang Wei 	 */
9571c62979eSZhang Wei 	if (stat & FSL_DMA_SR_EOLNI) {
958b158471eSIra Snyder 		chan_dbg(chan, "irq: End-of-link INT\n");
959173acc7cSZhang Wei 		stat &= ~FSL_DMA_SR_EOLNI;
960173acc7cSZhang Wei 	}
961173acc7cSZhang Wei 
962f04cd407SIra Snyder 	/* check that the DMA controller is really idle */
963f04cd407SIra Snyder 	if (!dma_is_idle(chan))
964f04cd407SIra Snyder 		chan_err(chan, "irq: controller not idle!\n");
965173acc7cSZhang Wei 
966f04cd407SIra Snyder 	/* check that we handled all of the bits */
967f04cd407SIra Snyder 	if (stat)
968f04cd407SIra Snyder 		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
969f04cd407SIra Snyder 
970f04cd407SIra Snyder 	/*
971f04cd407SIra Snyder 	 * Schedule the tasklet to handle all cleanup of the current
972f04cd407SIra Snyder 	 * transaction. It will start a new transaction if there is
973f04cd407SIra Snyder 	 * one pending.
974f04cd407SIra Snyder 	 */
975a1c03319SIra Snyder 	tasklet_schedule(&chan->tasklet);
976f04cd407SIra Snyder 	chan_dbg(chan, "irq: Exit\n");
977173acc7cSZhang Wei 	return IRQ_HANDLED;
978173acc7cSZhang Wei }
979173acc7cSZhang Wei 
dma_do_tasklet(struct tasklet_struct * t)98059cd8187SAllen Pais static void dma_do_tasklet(struct tasklet_struct *t)
981173acc7cSZhang Wei {
98259cd8187SAllen Pais 	struct fsldma_chan *chan = from_tasklet(chan, t, tasklet);
983f04cd407SIra Snyder 
984f04cd407SIra Snyder 	chan_dbg(chan, "tasklet entry\n");
985f04cd407SIra Snyder 
9861297b647SBarry Song 	spin_lock(&chan->desc_lock);
987dc8d4091SIra Snyder 
988dc8d4091SIra Snyder 	/* the hardware is now idle and ready for more */
989f04cd407SIra Snyder 	chan->idle = true;
990dc8d4091SIra Snyder 
99143452fadSHongbo Zhang 	/* Run all cleanup for descriptors which have been completed */
99243452fadSHongbo Zhang 	fsldma_cleanup_descriptors(chan);
99343452fadSHongbo Zhang 
9941297b647SBarry Song 	spin_unlock(&chan->desc_lock);
995f04cd407SIra Snyder 
996f04cd407SIra Snyder 	chan_dbg(chan, "tasklet exit\n");
997173acc7cSZhang Wei }
998173acc7cSZhang Wei 
fsldma_ctrl_irq(int irq,void * data)999d3f620b2SIra Snyder static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1000d3f620b2SIra Snyder {
1001d3f620b2SIra Snyder 	struct fsldma_device *fdev = data;
1002d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1003d3f620b2SIra Snyder 	unsigned int handled = 0;
1004d3f620b2SIra Snyder 	u32 gsr, mask;
1005d3f620b2SIra Snyder 	int i;
1006d3f620b2SIra Snyder 
1007d3f620b2SIra Snyder 	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1008d3f620b2SIra Snyder 						   : in_le32(fdev->regs);
1009d3f620b2SIra Snyder 	mask = 0xff000000;
1010d3f620b2SIra Snyder 	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1011d3f620b2SIra Snyder 
1012d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1013d3f620b2SIra Snyder 		chan = fdev->chan[i];
1014d3f620b2SIra Snyder 		if (!chan)
1015d3f620b2SIra Snyder 			continue;
1016d3f620b2SIra Snyder 
1017d3f620b2SIra Snyder 		if (gsr & mask) {
1018d3f620b2SIra Snyder 			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1019d3f620b2SIra Snyder 			fsldma_chan_irq(irq, chan);
1020d3f620b2SIra Snyder 			handled++;
1021d3f620b2SIra Snyder 		}
1022d3f620b2SIra Snyder 
1023d3f620b2SIra Snyder 		gsr &= ~mask;
1024d3f620b2SIra Snyder 		mask >>= 8;
1025d3f620b2SIra Snyder 	}
1026d3f620b2SIra Snyder 
1027d3f620b2SIra Snyder 	return IRQ_RETVAL(handled);
1028d3f620b2SIra Snyder }
1029d3f620b2SIra Snyder 
fsldma_free_irqs(struct fsldma_device * fdev)1030d3f620b2SIra Snyder static void fsldma_free_irqs(struct fsldma_device *fdev)
1031d3f620b2SIra Snyder {
1032d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1033d3f620b2SIra Snyder 	int i;
1034d3f620b2SIra Snyder 
1035aa570be6SMichael Ellerman 	if (fdev->irq) {
1036d3f620b2SIra Snyder 		dev_dbg(fdev->dev, "free per-controller IRQ\n");
1037d3f620b2SIra Snyder 		free_irq(fdev->irq, fdev);
1038d3f620b2SIra Snyder 		return;
1039d3f620b2SIra Snyder 	}
1040d3f620b2SIra Snyder 
1041d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1042d3f620b2SIra Snyder 		chan = fdev->chan[i];
1043aa570be6SMichael Ellerman 		if (chan && chan->irq) {
1044b158471eSIra Snyder 			chan_dbg(chan, "free per-channel IRQ\n");
1045d3f620b2SIra Snyder 			free_irq(chan->irq, chan);
1046d3f620b2SIra Snyder 		}
1047d3f620b2SIra Snyder 	}
1048d3f620b2SIra Snyder }
1049d3f620b2SIra Snyder 
fsldma_request_irqs(struct fsldma_device * fdev)1050d3f620b2SIra Snyder static int fsldma_request_irqs(struct fsldma_device *fdev)
1051d3f620b2SIra Snyder {
1052d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1053d3f620b2SIra Snyder 	int ret;
1054d3f620b2SIra Snyder 	int i;
1055d3f620b2SIra Snyder 
1056d3f620b2SIra Snyder 	/* if we have a per-controller IRQ, use that */
1057aa570be6SMichael Ellerman 	if (fdev->irq) {
1058d3f620b2SIra Snyder 		dev_dbg(fdev->dev, "request per-controller IRQ\n");
1059d3f620b2SIra Snyder 		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1060d3f620b2SIra Snyder 				  "fsldma-controller", fdev);
1061d3f620b2SIra Snyder 		return ret;
1062d3f620b2SIra Snyder 	}
1063d3f620b2SIra Snyder 
1064d3f620b2SIra Snyder 	/* no per-controller IRQ, use the per-channel IRQs */
1065d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1066d3f620b2SIra Snyder 		chan = fdev->chan[i];
1067d3f620b2SIra Snyder 		if (!chan)
1068d3f620b2SIra Snyder 			continue;
1069d3f620b2SIra Snyder 
1070aa570be6SMichael Ellerman 		if (!chan->irq) {
1071b158471eSIra Snyder 			chan_err(chan, "interrupts property missing in device tree\n");
1072d3f620b2SIra Snyder 			ret = -ENODEV;
1073d3f620b2SIra Snyder 			goto out_unwind;
1074d3f620b2SIra Snyder 		}
1075d3f620b2SIra Snyder 
1076b158471eSIra Snyder 		chan_dbg(chan, "request per-channel IRQ\n");
1077d3f620b2SIra Snyder 		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1078d3f620b2SIra Snyder 				  "fsldma-chan", chan);
1079d3f620b2SIra Snyder 		if (ret) {
1080b158471eSIra Snyder 			chan_err(chan, "unable to request per-channel IRQ\n");
1081d3f620b2SIra Snyder 			goto out_unwind;
1082d3f620b2SIra Snyder 		}
1083d3f620b2SIra Snyder 	}
1084d3f620b2SIra Snyder 
1085d3f620b2SIra Snyder 	return 0;
1086d3f620b2SIra Snyder 
1087d3f620b2SIra Snyder out_unwind:
1088d3f620b2SIra Snyder 	for (/* none */; i >= 0; i--) {
1089d3f620b2SIra Snyder 		chan = fdev->chan[i];
1090d3f620b2SIra Snyder 		if (!chan)
1091d3f620b2SIra Snyder 			continue;
1092d3f620b2SIra Snyder 
1093aa570be6SMichael Ellerman 		if (!chan->irq)
1094d3f620b2SIra Snyder 			continue;
1095d3f620b2SIra Snyder 
1096d3f620b2SIra Snyder 		free_irq(chan->irq, chan);
1097d3f620b2SIra Snyder 	}
1098d3f620b2SIra Snyder 
1099d3f620b2SIra Snyder 	return ret;
1100d3f620b2SIra Snyder }
1101d3f620b2SIra Snyder 
1102a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1103a4f56d4bSIra Snyder /* OpenFirmware Subsystem                                                     */
1104a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1105a4f56d4bSIra Snyder 
fsl_dma_chan_probe(struct fsldma_device * fdev,struct device_node * node,u32 feature,const char * compatible)1106463a1f8bSBill Pemberton static int fsl_dma_chan_probe(struct fsldma_device *fdev,
110777cd62e8STimur Tabi 	struct device_node *node, u32 feature, const char *compatible)
1108173acc7cSZhang Wei {
1109a1c03319SIra Snyder 	struct fsldma_chan *chan;
11104ce0e953SIra Snyder 	struct resource res;
1111173acc7cSZhang Wei 	int err;
1112173acc7cSZhang Wei 
1113173acc7cSZhang Wei 	/* alloc channel */
1114a1c03319SIra Snyder 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1115a1c03319SIra Snyder 	if (!chan) {
1116e7a29151SIra Snyder 		err = -ENOMEM;
1117e7a29151SIra Snyder 		goto out_return;
1118173acc7cSZhang Wei 	}
1119173acc7cSZhang Wei 
1120e7a29151SIra Snyder 	/* ioremap registers for use */
1121a1c03319SIra Snyder 	chan->regs = of_iomap(node, 0);
1122a1c03319SIra Snyder 	if (!chan->regs) {
1123e7a29151SIra Snyder 		dev_err(fdev->dev, "unable to ioremap registers\n");
1124e7a29151SIra Snyder 		err = -ENOMEM;
1125a1c03319SIra Snyder 		goto out_free_chan;
1126e7a29151SIra Snyder 	}
1127e7a29151SIra Snyder 
11284ce0e953SIra Snyder 	err = of_address_to_resource(node, 0, &res);
1129173acc7cSZhang Wei 	if (err) {
1130e7a29151SIra Snyder 		dev_err(fdev->dev, "unable to find 'reg' property\n");
1131e7a29151SIra Snyder 		goto out_iounmap_regs;
1132173acc7cSZhang Wei 	}
1133173acc7cSZhang Wei 
1134a1c03319SIra Snyder 	chan->feature = feature;
1135173acc7cSZhang Wei 	if (!fdev->feature)
1136a1c03319SIra Snyder 		fdev->feature = chan->feature;
1137173acc7cSZhang Wei 
1138e7a29151SIra Snyder 	/*
1139e7a29151SIra Snyder 	 * If the DMA device's feature is different than the feature
1140e7a29151SIra Snyder 	 * of its channels, report the bug
1141173acc7cSZhang Wei 	 */
1142a1c03319SIra Snyder 	WARN_ON(fdev->feature != chan->feature);
1143173acc7cSZhang Wei 
1144a1c03319SIra Snyder 	chan->dev = fdev->dev;
11458de7a7d9SHongbo Zhang 	chan->id = (res.start & 0xfff) < 0x300 ?
11468de7a7d9SHongbo Zhang 		   ((res.start - 0x100) & 0xfff) >> 7 :
11478de7a7d9SHongbo Zhang 		   ((res.start - 0x200) & 0xfff) >> 7;
1148a1c03319SIra Snyder 	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1149e7a29151SIra Snyder 		dev_err(fdev->dev, "too many channels for device\n");
1150173acc7cSZhang Wei 		err = -EINVAL;
1151e7a29151SIra Snyder 		goto out_iounmap_regs;
1152173acc7cSZhang Wei 	}
1153173acc7cSZhang Wei 
1154a1c03319SIra Snyder 	fdev->chan[chan->id] = chan;
115559cd8187SAllen Pais 	tasklet_setup(&chan->tasklet, dma_do_tasklet);
1156b158471eSIra Snyder 	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1157e7a29151SIra Snyder 
1158e7a29151SIra Snyder 	/* Initialize the channel */
1159a1c03319SIra Snyder 	dma_init(chan);
1160173acc7cSZhang Wei 
1161173acc7cSZhang Wei 	/* Clear cdar registers */
1162a1c03319SIra Snyder 	set_cdar(chan, 0);
1163173acc7cSZhang Wei 
1164a1c03319SIra Snyder 	switch (chan->feature & FSL_DMA_IP_MASK) {
1165173acc7cSZhang Wei 	case FSL_DMA_IP_85XX:
1166a1c03319SIra Snyder 		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1167df561f66SGustavo A. R. Silva 		fallthrough;
1168173acc7cSZhang Wei 	case FSL_DMA_IP_83XX:
1169a1c03319SIra Snyder 		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1170a1c03319SIra Snyder 		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1171a1c03319SIra Snyder 		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1172a1c03319SIra Snyder 		chan->set_request_count = fsl_chan_set_request_count;
1173173acc7cSZhang Wei 	}
1174173acc7cSZhang Wei 
1175a1c03319SIra Snyder 	spin_lock_init(&chan->desc_lock);
11769c3a50b7SIra Snyder 	INIT_LIST_HEAD(&chan->ld_pending);
11779c3a50b7SIra Snyder 	INIT_LIST_HEAD(&chan->ld_running);
117843452fadSHongbo Zhang 	INIT_LIST_HEAD(&chan->ld_completed);
1179f04cd407SIra Snyder 	chan->idle = true;
118014c6a333SHongbo Zhang #ifdef CONFIG_PM
118114c6a333SHongbo Zhang 	chan->pm_state = RUNNING;
118214c6a333SHongbo Zhang #endif
1183173acc7cSZhang Wei 
1184a1c03319SIra Snyder 	chan->common.device = &fdev->common;
11858ac69546SRussell King - ARM Linux 	dma_cookie_init(&chan->common);
1186173acc7cSZhang Wei 
1187d3f620b2SIra Snyder 	/* find the IRQ line, if it exists in the device tree */
1188a1c03319SIra Snyder 	chan->irq = irq_of_parse_and_map(node, 0);
1189d3f620b2SIra Snyder 
1190173acc7cSZhang Wei 	/* Add the channel to DMA device channel list */
1191a1c03319SIra Snyder 	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1192173acc7cSZhang Wei 
1193a1c03319SIra Snyder 	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1194aa570be6SMichael Ellerman 		 chan->irq ? chan->irq : fdev->irq);
1195173acc7cSZhang Wei 
1196173acc7cSZhang Wei 	return 0;
119751ee87f2SLi Yang 
1198e7a29151SIra Snyder out_iounmap_regs:
1199a1c03319SIra Snyder 	iounmap(chan->regs);
1200a1c03319SIra Snyder out_free_chan:
1201a1c03319SIra Snyder 	kfree(chan);
1202e7a29151SIra Snyder out_return:
1203173acc7cSZhang Wei 	return err;
1204173acc7cSZhang Wei }
1205173acc7cSZhang Wei 
fsl_dma_chan_remove(struct fsldma_chan * chan)1206a1c03319SIra Snyder static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1207173acc7cSZhang Wei {
1208a1c03319SIra Snyder 	irq_dispose_mapping(chan->irq);
1209a1c03319SIra Snyder 	list_del(&chan->common.device_node);
1210a1c03319SIra Snyder 	iounmap(chan->regs);
1211a1c03319SIra Snyder 	kfree(chan);
1212173acc7cSZhang Wei }
1213173acc7cSZhang Wei 
fsldma_of_probe(struct platform_device * op)1214463a1f8bSBill Pemberton static int fsldma_of_probe(struct platform_device *op)
1215173acc7cSZhang Wei {
1216a4f56d4bSIra Snyder 	struct fsldma_device *fdev;
121777cd62e8STimur Tabi 	struct device_node *child;
1218b202d4e8SChristophe JAILLET 	unsigned int i;
1219e7a29151SIra Snyder 	int err;
1220173acc7cSZhang Wei 
1221a4f56d4bSIra Snyder 	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1222173acc7cSZhang Wei 	if (!fdev) {
1223e7a29151SIra Snyder 		err = -ENOMEM;
1224e7a29151SIra Snyder 		goto out_return;
1225173acc7cSZhang Wei 	}
1226e7a29151SIra Snyder 
1227e7a29151SIra Snyder 	fdev->dev = &op->dev;
1228173acc7cSZhang Wei 	INIT_LIST_HEAD(&fdev->common.channels);
1229173acc7cSZhang Wei 
1230e7a29151SIra Snyder 	/* ioremap the registers for use */
123161c7a080SGrant Likely 	fdev->regs = of_iomap(op->dev.of_node, 0);
1232e7a29151SIra Snyder 	if (!fdev->regs) {
1233e7a29151SIra Snyder 		dev_err(&op->dev, "unable to ioremap registers\n");
1234e7a29151SIra Snyder 		err = -ENOMEM;
1235585a1db1SArvind Yadav 		goto out_free;
1236173acc7cSZhang Wei 	}
1237173acc7cSZhang Wei 
1238d3f620b2SIra Snyder 	/* map the channel IRQ if it exists, but don't hookup the handler yet */
123961c7a080SGrant Likely 	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1240d3f620b2SIra Snyder 
1241173acc7cSZhang Wei 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1242bbea0b6eSIra Snyder 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1243173acc7cSZhang Wei 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1244173acc7cSZhang Wei 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1245173acc7cSZhang Wei 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
124607934481SLinus Walleij 	fdev->common.device_tx_status = fsl_tx_status;
1247173acc7cSZhang Wei 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1248b7f7552bSMaxime Ripard 	fdev->common.device_config = fsl_dma_device_config;
1249b7f7552bSMaxime Ripard 	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1250e7a29151SIra Snyder 	fdev->common.dev = &op->dev;
1251173acc7cSZhang Wei 
125275dc1775SKevin Hao 	fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
125375dc1775SKevin Hao 	fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
125475dc1775SKevin Hao 	fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
125575dc1775SKevin Hao 	fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
125675dc1775SKevin Hao 
1257e2c8e425SLi Yang 	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1258e2c8e425SLi Yang 
1259dd3daca1SJingoo Han 	platform_set_drvdata(op, fdev);
126077cd62e8STimur Tabi 
1261e7a29151SIra Snyder 	/*
1262e7a29151SIra Snyder 	 * We cannot use of_platform_bus_probe() because there is no
1263e7a29151SIra Snyder 	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
126477cd62e8STimur Tabi 	 * channel object.
126577cd62e8STimur Tabi 	 */
126661c7a080SGrant Likely 	for_each_child_of_node(op->dev.of_node, child) {
1267e7a29151SIra Snyder 		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
126877cd62e8STimur Tabi 			fsl_dma_chan_probe(fdev, child,
126977cd62e8STimur Tabi 				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
127077cd62e8STimur Tabi 				"fsl,eloplus-dma-channel");
1271e7a29151SIra Snyder 		}
1272e7a29151SIra Snyder 
1273e7a29151SIra Snyder 		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
127477cd62e8STimur Tabi 			fsl_dma_chan_probe(fdev, child,
127577cd62e8STimur Tabi 				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
127677cd62e8STimur Tabi 				"fsl,elo-dma-channel");
127777cd62e8STimur Tabi 		}
1278e7a29151SIra Snyder 	}
1279173acc7cSZhang Wei 
1280d3f620b2SIra Snyder 	/*
1281d3f620b2SIra Snyder 	 * Hookup the IRQ handler(s)
1282d3f620b2SIra Snyder 	 *
1283d3f620b2SIra Snyder 	 * If we have a per-controller interrupt, we prefer that to the
1284d3f620b2SIra Snyder 	 * per-channel interrupts to reduce the number of shared interrupt
1285d3f620b2SIra Snyder 	 * handlers on the same IRQ line
1286d3f620b2SIra Snyder 	 */
1287d3f620b2SIra Snyder 	err = fsldma_request_irqs(fdev);
1288d3f620b2SIra Snyder 	if (err) {
1289d3f620b2SIra Snyder 		dev_err(fdev->dev, "unable to request IRQs\n");
1290d3f620b2SIra Snyder 		goto out_free_fdev;
1291d3f620b2SIra Snyder 	}
1292d3f620b2SIra Snyder 
1293173acc7cSZhang Wei 	dma_async_device_register(&fdev->common);
1294173acc7cSZhang Wei 	return 0;
1295173acc7cSZhang Wei 
1296e7a29151SIra Snyder out_free_fdev:
1297b202d4e8SChristophe JAILLET 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1298b202d4e8SChristophe JAILLET 		if (fdev->chan[i])
1299b202d4e8SChristophe JAILLET 			fsl_dma_chan_remove(fdev->chan[i]);
1300b202d4e8SChristophe JAILLET 	}
1301d3f620b2SIra Snyder 	irq_dispose_mapping(fdev->irq);
1302585a1db1SArvind Yadav 	iounmap(fdev->regs);
1303585a1db1SArvind Yadav out_free:
1304173acc7cSZhang Wei 	kfree(fdev);
1305e7a29151SIra Snyder out_return:
1306173acc7cSZhang Wei 	return err;
1307173acc7cSZhang Wei }
1308173acc7cSZhang Wei 
fsldma_of_remove(struct platform_device * op)13092dc11581SGrant Likely static int fsldma_of_remove(struct platform_device *op)
131077cd62e8STimur Tabi {
1311a4f56d4bSIra Snyder 	struct fsldma_device *fdev;
131277cd62e8STimur Tabi 	unsigned int i;
131377cd62e8STimur Tabi 
1314dd3daca1SJingoo Han 	fdev = platform_get_drvdata(op);
131577cd62e8STimur Tabi 	dma_async_device_unregister(&fdev->common);
131677cd62e8STimur Tabi 
1317d3f620b2SIra Snyder 	fsldma_free_irqs(fdev);
1318d3f620b2SIra Snyder 
1319e7a29151SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
132077cd62e8STimur Tabi 		if (fdev->chan[i])
132177cd62e8STimur Tabi 			fsl_dma_chan_remove(fdev->chan[i]);
1322e7a29151SIra Snyder 	}
1323cbc0ad00SChristophe JAILLET 	irq_dispose_mapping(fdev->irq);
132477cd62e8STimur Tabi 
1325e7a29151SIra Snyder 	iounmap(fdev->regs);
132677cd62e8STimur Tabi 	kfree(fdev);
132777cd62e8STimur Tabi 
132877cd62e8STimur Tabi 	return 0;
132977cd62e8STimur Tabi }
133077cd62e8STimur Tabi 
133114c6a333SHongbo Zhang #ifdef CONFIG_PM
fsldma_suspend_late(struct device * dev)133214c6a333SHongbo Zhang static int fsldma_suspend_late(struct device *dev)
133314c6a333SHongbo Zhang {
133403bf2793SWolfram Sang 	struct fsldma_device *fdev = dev_get_drvdata(dev);
133514c6a333SHongbo Zhang 	struct fsldma_chan *chan;
133614c6a333SHongbo Zhang 	int i;
133714c6a333SHongbo Zhang 
133814c6a333SHongbo Zhang 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
133914c6a333SHongbo Zhang 		chan = fdev->chan[i];
134014c6a333SHongbo Zhang 		if (!chan)
134114c6a333SHongbo Zhang 			continue;
134214c6a333SHongbo Zhang 
134314c6a333SHongbo Zhang 		spin_lock_bh(&chan->desc_lock);
134414c6a333SHongbo Zhang 		if (unlikely(!chan->idle))
134514c6a333SHongbo Zhang 			goto out;
134614c6a333SHongbo Zhang 		chan->regs_save.mr = get_mr(chan);
134714c6a333SHongbo Zhang 		chan->pm_state = SUSPENDED;
134814c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
134914c6a333SHongbo Zhang 	}
135014c6a333SHongbo Zhang 	return 0;
135114c6a333SHongbo Zhang 
135214c6a333SHongbo Zhang out:
135314c6a333SHongbo Zhang 	for (; i >= 0; i--) {
135414c6a333SHongbo Zhang 		chan = fdev->chan[i];
135514c6a333SHongbo Zhang 		if (!chan)
135614c6a333SHongbo Zhang 			continue;
135714c6a333SHongbo Zhang 		chan->pm_state = RUNNING;
135814c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
135914c6a333SHongbo Zhang 	}
136014c6a333SHongbo Zhang 	return -EBUSY;
136114c6a333SHongbo Zhang }
136214c6a333SHongbo Zhang 
fsldma_resume_early(struct device * dev)136314c6a333SHongbo Zhang static int fsldma_resume_early(struct device *dev)
136414c6a333SHongbo Zhang {
136503bf2793SWolfram Sang 	struct fsldma_device *fdev = dev_get_drvdata(dev);
136614c6a333SHongbo Zhang 	struct fsldma_chan *chan;
136714c6a333SHongbo Zhang 	u32 mode;
136814c6a333SHongbo Zhang 	int i;
136914c6a333SHongbo Zhang 
137014c6a333SHongbo Zhang 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
137114c6a333SHongbo Zhang 		chan = fdev->chan[i];
137214c6a333SHongbo Zhang 		if (!chan)
137314c6a333SHongbo Zhang 			continue;
137414c6a333SHongbo Zhang 
137514c6a333SHongbo Zhang 		spin_lock_bh(&chan->desc_lock);
137614c6a333SHongbo Zhang 		mode = chan->regs_save.mr
137714c6a333SHongbo Zhang 			& ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
137814c6a333SHongbo Zhang 		set_mr(chan, mode);
137914c6a333SHongbo Zhang 		chan->pm_state = RUNNING;
138014c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
138114c6a333SHongbo Zhang 	}
138214c6a333SHongbo Zhang 
138314c6a333SHongbo Zhang 	return 0;
138414c6a333SHongbo Zhang }
138514c6a333SHongbo Zhang 
138614c6a333SHongbo Zhang static const struct dev_pm_ops fsldma_pm_ops = {
138714c6a333SHongbo Zhang 	.suspend_late	= fsldma_suspend_late,
138814c6a333SHongbo Zhang 	.resume_early	= fsldma_resume_early,
138914c6a333SHongbo Zhang };
139014c6a333SHongbo Zhang #endif
139114c6a333SHongbo Zhang 
13924b1cf1faSMárton Németh static const struct of_device_id fsldma_of_ids[] = {
13938de7a7d9SHongbo Zhang 	{ .compatible = "fsl,elo3-dma", },
1394049c9d45SKumar Gala 	{ .compatible = "fsl,eloplus-dma", },
1395049c9d45SKumar Gala 	{ .compatible = "fsl,elo-dma", },
1396173acc7cSZhang Wei 	{}
1397173acc7cSZhang Wei };
13987522c240SLuis de Bethencourt MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1399173acc7cSZhang Wei 
14008faa7cf8SIra W. Snyder static struct platform_driver fsldma_of_driver = {
14014018294bSGrant Likely 	.driver = {
140277cd62e8STimur Tabi 		.name = "fsl-elo-dma",
14034018294bSGrant Likely 		.of_match_table = fsldma_of_ids,
140414c6a333SHongbo Zhang #ifdef CONFIG_PM
140514c6a333SHongbo Zhang 		.pm = &fsldma_pm_ops,
140614c6a333SHongbo Zhang #endif
14074018294bSGrant Likely 	},
1408a4f56d4bSIra Snyder 	.probe = fsldma_of_probe,
1409a4f56d4bSIra Snyder 	.remove = fsldma_of_remove,
1410173acc7cSZhang Wei };
1411173acc7cSZhang Wei 
1412a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1413a4f56d4bSIra Snyder /* Module Init / Exit                                                         */
1414a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1415a4f56d4bSIra Snyder 
fsldma_init(void)1416a4f56d4bSIra Snyder static __init int fsldma_init(void)
1417173acc7cSZhang Wei {
14188de7a7d9SHongbo Zhang 	pr_info("Freescale Elo series DMA driver\n");
141900006124SGrant Likely 	return platform_driver_register(&fsldma_of_driver);
1420173acc7cSZhang Wei }
1421173acc7cSZhang Wei 
fsldma_exit(void)1422a4f56d4bSIra Snyder static void __exit fsldma_exit(void)
142377cd62e8STimur Tabi {
142400006124SGrant Likely 	platform_driver_unregister(&fsldma_of_driver);
142577cd62e8STimur Tabi }
142677cd62e8STimur Tabi 
1427a4f56d4bSIra Snyder subsys_initcall(fsldma_init);
1428a4f56d4bSIra Snyder module_exit(fsldma_exit);
142977cd62e8STimur Tabi 
14308de7a7d9SHongbo Zhang MODULE_DESCRIPTION("Freescale Elo series DMA driver");
143177cd62e8STimur Tabi MODULE_LICENSE("GPL");
1432