xref: /openbmc/linux/drivers/dma/fsldma.c (revision ccc07729)
1173acc7cSZhang Wei /*
2173acc7cSZhang Wei  * Freescale MPC85xx, MPC83xx DMA Engine support
3173acc7cSZhang Wei  *
4e2c8e425SLi Yang  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5173acc7cSZhang Wei  *
6173acc7cSZhang Wei  * Author:
7173acc7cSZhang Wei  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8173acc7cSZhang Wei  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9173acc7cSZhang Wei  *
10173acc7cSZhang Wei  * Description:
11173acc7cSZhang Wei  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
12173acc7cSZhang Wei  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13c2e07b3aSStefan Weil  *   The support for MPC8349 DMA controller is also added.
14173acc7cSZhang Wei  *
15a7aea373SIra W. Snyder  * This driver instructs the DMA controller to issue the PCI Read Multiple
16a7aea373SIra W. Snyder  * command for PCI read operations, instead of using the default PCI Read Line
17a7aea373SIra W. Snyder  * command. Please be aware that this setting may result in read pre-fetching
18a7aea373SIra W. Snyder  * on some platforms.
19a7aea373SIra W. Snyder  *
20173acc7cSZhang Wei  * This is free software; you can redistribute it and/or modify
21173acc7cSZhang Wei  * it under the terms of the GNU General Public License as published by
22173acc7cSZhang Wei  * the Free Software Foundation; either version 2 of the License, or
23173acc7cSZhang Wei  * (at your option) any later version.
24173acc7cSZhang Wei  *
25173acc7cSZhang Wei  */
26173acc7cSZhang Wei 
27173acc7cSZhang Wei #include <linux/init.h>
28173acc7cSZhang Wei #include <linux/module.h>
29173acc7cSZhang Wei #include <linux/pci.h>
305a0e3ad6STejun Heo #include <linux/slab.h>
31173acc7cSZhang Wei #include <linux/interrupt.h>
32173acc7cSZhang Wei #include <linux/dmaengine.h>
33173acc7cSZhang Wei #include <linux/delay.h>
34173acc7cSZhang Wei #include <linux/dma-mapping.h>
35173acc7cSZhang Wei #include <linux/dmapool.h>
365af50730SRob Herring #include <linux/of_address.h>
375af50730SRob Herring #include <linux/of_irq.h>
38173acc7cSZhang Wei #include <linux/of_platform.h>
390a5642beSVinod Koul #include <linux/fsldma.h>
40d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
41173acc7cSZhang Wei #include "fsldma.h"
42173acc7cSZhang Wei 
43b158471eSIra Snyder #define chan_dbg(chan, fmt, arg...)					\
44b158471eSIra Snyder 	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
45b158471eSIra Snyder #define chan_err(chan, fmt, arg...)					\
46b158471eSIra Snyder 	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
47c1433041SIra Snyder 
48b158471eSIra Snyder static const char msg_ld_oom[] = "No free memory for link descriptor";
49173acc7cSZhang Wei 
50e8bd84dfSIra Snyder /*
51e8bd84dfSIra Snyder  * Register Helpers
52173acc7cSZhang Wei  */
53173acc7cSZhang Wei 
54a1c03319SIra Snyder static void set_sr(struct fsldma_chan *chan, u32 val)
55173acc7cSZhang Wei {
56a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->sr, val, 32);
57173acc7cSZhang Wei }
58173acc7cSZhang Wei 
59a1c03319SIra Snyder static u32 get_sr(struct fsldma_chan *chan)
60173acc7cSZhang Wei {
61a1c03319SIra Snyder 	return DMA_IN(chan, &chan->regs->sr, 32);
62173acc7cSZhang Wei }
63173acc7cSZhang Wei 
64ccdce9a0SHongbo Zhang static void set_mr(struct fsldma_chan *chan, u32 val)
65ccdce9a0SHongbo Zhang {
66ccdce9a0SHongbo Zhang 	DMA_OUT(chan, &chan->regs->mr, val, 32);
67ccdce9a0SHongbo Zhang }
68ccdce9a0SHongbo Zhang 
69ccdce9a0SHongbo Zhang static u32 get_mr(struct fsldma_chan *chan)
70ccdce9a0SHongbo Zhang {
71ccdce9a0SHongbo Zhang 	return DMA_IN(chan, &chan->regs->mr, 32);
72ccdce9a0SHongbo Zhang }
73ccdce9a0SHongbo Zhang 
74a1c03319SIra Snyder static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
75173acc7cSZhang Wei {
76a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
77173acc7cSZhang Wei }
78173acc7cSZhang Wei 
79a1c03319SIra Snyder static dma_addr_t get_cdar(struct fsldma_chan *chan)
80173acc7cSZhang Wei {
81a1c03319SIra Snyder 	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
82173acc7cSZhang Wei }
83173acc7cSZhang Wei 
84ccdce9a0SHongbo Zhang static void set_bcr(struct fsldma_chan *chan, u32 val)
85ccdce9a0SHongbo Zhang {
86ccdce9a0SHongbo Zhang 	DMA_OUT(chan, &chan->regs->bcr, val, 32);
87ccdce9a0SHongbo Zhang }
88ccdce9a0SHongbo Zhang 
89a1c03319SIra Snyder static u32 get_bcr(struct fsldma_chan *chan)
90f79abb62SZhang Wei {
91a1c03319SIra Snyder 	return DMA_IN(chan, &chan->regs->bcr, 32);
92f79abb62SZhang Wei }
93f79abb62SZhang Wei 
94e8bd84dfSIra Snyder /*
95e8bd84dfSIra Snyder  * Descriptor Helpers
96e8bd84dfSIra Snyder  */
97e8bd84dfSIra Snyder 
98173acc7cSZhang Wei static void set_desc_cnt(struct fsldma_chan *chan,
99173acc7cSZhang Wei 				struct fsl_dma_ld_hw *hw, u32 count)
100173acc7cSZhang Wei {
101173acc7cSZhang Wei 	hw->count = CPU_TO_DMA(chan, count, 32);
102173acc7cSZhang Wei }
103173acc7cSZhang Wei 
104173acc7cSZhang Wei static void set_desc_src(struct fsldma_chan *chan,
105173acc7cSZhang Wei 			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
106173acc7cSZhang Wei {
107173acc7cSZhang Wei 	u64 snoop_bits;
108900325a6SDan Williams 
109173acc7cSZhang Wei 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110173acc7cSZhang Wei 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
111173acc7cSZhang Wei 	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
112900325a6SDan Williams }
113272ca655SIra Snyder 
114173acc7cSZhang Wei static void set_desc_dst(struct fsldma_chan *chan,
115173acc7cSZhang Wei 			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116173acc7cSZhang Wei {
117173acc7cSZhang Wei 	u64 snoop_bits;
118173acc7cSZhang Wei 
119173acc7cSZhang Wei 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
120173acc7cSZhang Wei 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
121173acc7cSZhang Wei 	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122173acc7cSZhang Wei }
123173acc7cSZhang Wei 
124173acc7cSZhang Wei static void set_desc_next(struct fsldma_chan *chan,
125173acc7cSZhang Wei 			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
126173acc7cSZhang Wei {
127173acc7cSZhang Wei 	u64 snoop_bits;
128173acc7cSZhang Wei 
129173acc7cSZhang Wei 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
130173acc7cSZhang Wei 		? FSL_DMA_SNEN : 0;
131173acc7cSZhang Wei 	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
132173acc7cSZhang Wei }
133173acc7cSZhang Wei 
13431f4306cSIra Snyder static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
135173acc7cSZhang Wei {
136e8bd84dfSIra Snyder 	u64 snoop_bits;
137e8bd84dfSIra Snyder 
138a1c03319SIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
139776c8943SIra Snyder 		? FSL_DMA_SNEN : 0;
140776c8943SIra Snyder 
141a1c03319SIra Snyder 	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
142a1c03319SIra Snyder 		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
143776c8943SIra Snyder 			| snoop_bits, 64);
144173acc7cSZhang Wei }
145173acc7cSZhang Wei 
146e8bd84dfSIra Snyder /*
147e8bd84dfSIra Snyder  * DMA Engine Hardware Control Helpers
148e8bd84dfSIra Snyder  */
149173acc7cSZhang Wei 
150e8bd84dfSIra Snyder static void dma_init(struct fsldma_chan *chan)
151173acc7cSZhang Wei {
152e8bd84dfSIra Snyder 	/* Reset the channel */
153ccdce9a0SHongbo Zhang 	set_mr(chan, 0);
154173acc7cSZhang Wei 
155e8bd84dfSIra Snyder 	switch (chan->feature & FSL_DMA_IP_MASK) {
156e8bd84dfSIra Snyder 	case FSL_DMA_IP_85XX:
157e8bd84dfSIra Snyder 		/* Set the channel to below modes:
158e8bd84dfSIra Snyder 		 * EIE - Error interrupt enable
159e8bd84dfSIra Snyder 		 * EOLNIE - End of links interrupt enable
160e8bd84dfSIra Snyder 		 * BWC - Bandwidth sharing among channels
161e8bd84dfSIra Snyder 		 */
162ccdce9a0SHongbo Zhang 		set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
163ccdce9a0SHongbo Zhang 			| FSL_DMA_MR_EOLNIE);
164e8bd84dfSIra Snyder 		break;
165e8bd84dfSIra Snyder 	case FSL_DMA_IP_83XX:
166e8bd84dfSIra Snyder 		/* Set the channel to below modes:
167e8bd84dfSIra Snyder 		 * EOTIE - End-of-transfer interrupt enable
168e8bd84dfSIra Snyder 		 * PRC_RM - PCI read multiple
169e8bd84dfSIra Snyder 		 */
170ccdce9a0SHongbo Zhang 		set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
171e8bd84dfSIra Snyder 		break;
172e8bd84dfSIra Snyder 	}
173173acc7cSZhang Wei }
174173acc7cSZhang Wei 
175173acc7cSZhang Wei static int dma_is_idle(struct fsldma_chan *chan)
176173acc7cSZhang Wei {
177173acc7cSZhang Wei 	u32 sr = get_sr(chan);
178173acc7cSZhang Wei 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
179173acc7cSZhang Wei }
180173acc7cSZhang Wei 
181f04cd407SIra Snyder /*
182f04cd407SIra Snyder  * Start the DMA controller
183f04cd407SIra Snyder  *
184f04cd407SIra Snyder  * Preconditions:
185f04cd407SIra Snyder  * - the CDAR register must point to the start descriptor
186f04cd407SIra Snyder  * - the MRn[CS] bit must be cleared
187f04cd407SIra Snyder  */
188173acc7cSZhang Wei static void dma_start(struct fsldma_chan *chan)
189173acc7cSZhang Wei {
190173acc7cSZhang Wei 	u32 mode;
191173acc7cSZhang Wei 
192ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
193173acc7cSZhang Wei 
194173acc7cSZhang Wei 	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
195ccdce9a0SHongbo Zhang 		set_bcr(chan, 0);
196173acc7cSZhang Wei 		mode |= FSL_DMA_MR_EMP_EN;
197173acc7cSZhang Wei 	} else {
198173acc7cSZhang Wei 		mode &= ~FSL_DMA_MR_EMP_EN;
199173acc7cSZhang Wei 	}
200173acc7cSZhang Wei 
201f04cd407SIra Snyder 	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
202173acc7cSZhang Wei 		mode |= FSL_DMA_MR_EMS_EN;
203f04cd407SIra Snyder 	} else {
204f04cd407SIra Snyder 		mode &= ~FSL_DMA_MR_EMS_EN;
205173acc7cSZhang Wei 		mode |= FSL_DMA_MR_CS;
206f04cd407SIra Snyder 	}
207173acc7cSZhang Wei 
208ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
209173acc7cSZhang Wei }
210173acc7cSZhang Wei 
211173acc7cSZhang Wei static void dma_halt(struct fsldma_chan *chan)
212173acc7cSZhang Wei {
213173acc7cSZhang Wei 	u32 mode;
214173acc7cSZhang Wei 	int i;
215173acc7cSZhang Wei 
216a00ae34aSIra Snyder 	/* read the mode register */
217ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
218a00ae34aSIra Snyder 
219a00ae34aSIra Snyder 	/*
220a00ae34aSIra Snyder 	 * The 85xx controller supports channel abort, which will stop
221a00ae34aSIra Snyder 	 * the current transfer. On 83xx, this bit is the transfer error
222a00ae34aSIra Snyder 	 * mask bit, which should not be changed.
223a00ae34aSIra Snyder 	 */
224a00ae34aSIra Snyder 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
225173acc7cSZhang Wei 		mode |= FSL_DMA_MR_CA;
226ccdce9a0SHongbo Zhang 		set_mr(chan, mode);
227173acc7cSZhang Wei 
228a00ae34aSIra Snyder 		mode &= ~FSL_DMA_MR_CA;
229a00ae34aSIra Snyder 	}
230a00ae34aSIra Snyder 
231a00ae34aSIra Snyder 	/* stop the DMA controller */
232a00ae34aSIra Snyder 	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
233ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
234173acc7cSZhang Wei 
235a00ae34aSIra Snyder 	/* wait for the DMA controller to become idle */
236173acc7cSZhang Wei 	for (i = 0; i < 100; i++) {
237173acc7cSZhang Wei 		if (dma_is_idle(chan))
238173acc7cSZhang Wei 			return;
239173acc7cSZhang Wei 
240173acc7cSZhang Wei 		udelay(10);
241173acc7cSZhang Wei 	}
242173acc7cSZhang Wei 
243173acc7cSZhang Wei 	if (!dma_is_idle(chan))
244b158471eSIra Snyder 		chan_err(chan, "DMA halt timeout!\n");
245173acc7cSZhang Wei }
246173acc7cSZhang Wei 
247173acc7cSZhang Wei /**
248173acc7cSZhang Wei  * fsl_chan_set_src_loop_size - Set source address hold transfer size
249a1c03319SIra Snyder  * @chan : Freescale DMA channel
250173acc7cSZhang Wei  * @size     : Address loop size, 0 for disable loop
251173acc7cSZhang Wei  *
252173acc7cSZhang Wei  * The set source address hold transfer size. The source
253173acc7cSZhang Wei  * address hold or loop transfer size is when the DMA transfer
254173acc7cSZhang Wei  * data from source address (SA), if the loop size is 4, the DMA will
255173acc7cSZhang Wei  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
256173acc7cSZhang Wei  * SA + 1 ... and so on.
257173acc7cSZhang Wei  */
258a1c03319SIra Snyder static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
259173acc7cSZhang Wei {
260272ca655SIra Snyder 	u32 mode;
261272ca655SIra Snyder 
262ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
263272ca655SIra Snyder 
264173acc7cSZhang Wei 	switch (size) {
265173acc7cSZhang Wei 	case 0:
266272ca655SIra Snyder 		mode &= ~FSL_DMA_MR_SAHE;
267173acc7cSZhang Wei 		break;
268173acc7cSZhang Wei 	case 1:
269173acc7cSZhang Wei 	case 2:
270173acc7cSZhang Wei 	case 4:
271173acc7cSZhang Wei 	case 8:
272ccc07729SThomas Breitung 		mode &= ~FSL_DMA_MR_SAHTS_MASK;
273272ca655SIra Snyder 		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
274173acc7cSZhang Wei 		break;
275173acc7cSZhang Wei 	}
276272ca655SIra Snyder 
277ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
278173acc7cSZhang Wei }
279173acc7cSZhang Wei 
280173acc7cSZhang Wei /**
281738f5f7eSIra Snyder  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
282a1c03319SIra Snyder  * @chan : Freescale DMA channel
283173acc7cSZhang Wei  * @size     : Address loop size, 0 for disable loop
284173acc7cSZhang Wei  *
285173acc7cSZhang Wei  * The set destination address hold transfer size. The destination
286173acc7cSZhang Wei  * address hold or loop transfer size is when the DMA transfer
287173acc7cSZhang Wei  * data to destination address (TA), if the loop size is 4, the DMA will
288173acc7cSZhang Wei  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
289173acc7cSZhang Wei  * TA + 1 ... and so on.
290173acc7cSZhang Wei  */
291a1c03319SIra Snyder static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
292173acc7cSZhang Wei {
293272ca655SIra Snyder 	u32 mode;
294272ca655SIra Snyder 
295ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
296272ca655SIra Snyder 
297173acc7cSZhang Wei 	switch (size) {
298173acc7cSZhang Wei 	case 0:
299272ca655SIra Snyder 		mode &= ~FSL_DMA_MR_DAHE;
300173acc7cSZhang Wei 		break;
301173acc7cSZhang Wei 	case 1:
302173acc7cSZhang Wei 	case 2:
303173acc7cSZhang Wei 	case 4:
304173acc7cSZhang Wei 	case 8:
305ccc07729SThomas Breitung 		mode &= ~FSL_DMA_MR_DAHTS_MASK;
306272ca655SIra Snyder 		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
307173acc7cSZhang Wei 		break;
308173acc7cSZhang Wei 	}
309272ca655SIra Snyder 
310ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
311173acc7cSZhang Wei }
312173acc7cSZhang Wei 
313173acc7cSZhang Wei /**
314e6c7ecb6SIra Snyder  * fsl_chan_set_request_count - Set DMA Request Count for external control
315a1c03319SIra Snyder  * @chan : Freescale DMA channel
316e6c7ecb6SIra Snyder  * @size     : Number of bytes to transfer in a single request
317173acc7cSZhang Wei  *
318e6c7ecb6SIra Snyder  * The Freescale DMA channel can be controlled by the external signal DREQ#.
319e6c7ecb6SIra Snyder  * The DMA request count is how many bytes are allowed to transfer before
320e6c7ecb6SIra Snyder  * pausing the channel, after which a new assertion of DREQ# resumes channel
321e6c7ecb6SIra Snyder  * operation.
322e6c7ecb6SIra Snyder  *
323e6c7ecb6SIra Snyder  * A size of 0 disables external pause control. The maximum size is 1024.
324173acc7cSZhang Wei  */
325a1c03319SIra Snyder static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
326173acc7cSZhang Wei {
327272ca655SIra Snyder 	u32 mode;
328272ca655SIra Snyder 
329e6c7ecb6SIra Snyder 	BUG_ON(size > 1024);
330272ca655SIra Snyder 
331ccdce9a0SHongbo Zhang 	mode = get_mr(chan);
332ccc07729SThomas Breitung 	mode &= ~FSL_DMA_MR_BWC_MASK;
333ccc07729SThomas Breitung 	mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
334272ca655SIra Snyder 
335ccdce9a0SHongbo Zhang 	set_mr(chan, mode);
336e6c7ecb6SIra Snyder }
337e6c7ecb6SIra Snyder 
338e6c7ecb6SIra Snyder /**
339e6c7ecb6SIra Snyder  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
340a1c03319SIra Snyder  * @chan : Freescale DMA channel
341e6c7ecb6SIra Snyder  * @enable   : 0 is disabled, 1 is enabled.
342e6c7ecb6SIra Snyder  *
343e6c7ecb6SIra Snyder  * The Freescale DMA channel can be controlled by the external signal DREQ#.
344e6c7ecb6SIra Snyder  * The DMA Request Count feature should be used in addition to this feature
345e6c7ecb6SIra Snyder  * to set the number of bytes to transfer before pausing the channel.
346e6c7ecb6SIra Snyder  */
347a1c03319SIra Snyder static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
348e6c7ecb6SIra Snyder {
349e6c7ecb6SIra Snyder 	if (enable)
350a1c03319SIra Snyder 		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
351e6c7ecb6SIra Snyder 	else
352a1c03319SIra Snyder 		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
353173acc7cSZhang Wei }
354173acc7cSZhang Wei 
355173acc7cSZhang Wei /**
356173acc7cSZhang Wei  * fsl_chan_toggle_ext_start - Toggle channel external start status
357a1c03319SIra Snyder  * @chan : Freescale DMA channel
358173acc7cSZhang Wei  * @enable   : 0 is disabled, 1 is enabled.
359173acc7cSZhang Wei  *
360173acc7cSZhang Wei  * If enable the external start, the channel can be started by an
361173acc7cSZhang Wei  * external DMA start pin. So the dma_start() does not start the
362173acc7cSZhang Wei  * transfer immediately. The DMA channel will wait for the
363173acc7cSZhang Wei  * control pin asserted.
364173acc7cSZhang Wei  */
365a1c03319SIra Snyder static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
366173acc7cSZhang Wei {
367173acc7cSZhang Wei 	if (enable)
368a1c03319SIra Snyder 		chan->feature |= FSL_DMA_CHAN_START_EXT;
369173acc7cSZhang Wei 	else
370a1c03319SIra Snyder 		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
371173acc7cSZhang Wei }
372173acc7cSZhang Wei 
3730a5642beSVinod Koul int fsl_dma_external_start(struct dma_chan *dchan, int enable)
3740a5642beSVinod Koul {
3750a5642beSVinod Koul 	struct fsldma_chan *chan;
3760a5642beSVinod Koul 
3770a5642beSVinod Koul 	if (!dchan)
3780a5642beSVinod Koul 		return -EINVAL;
3790a5642beSVinod Koul 
3800a5642beSVinod Koul 	chan = to_fsl_chan(dchan);
3810a5642beSVinod Koul 
3820a5642beSVinod Koul 	fsl_chan_toggle_ext_start(chan, enable);
3830a5642beSVinod Koul 	return 0;
3840a5642beSVinod Koul }
3850a5642beSVinod Koul EXPORT_SYMBOL_GPL(fsl_dma_external_start);
3860a5642beSVinod Koul 
38731f4306cSIra Snyder static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
3889c3a50b7SIra Snyder {
3899c3a50b7SIra Snyder 	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
3909c3a50b7SIra Snyder 
3919c3a50b7SIra Snyder 	if (list_empty(&chan->ld_pending))
3929c3a50b7SIra Snyder 		goto out_splice;
3939c3a50b7SIra Snyder 
3949c3a50b7SIra Snyder 	/*
3959c3a50b7SIra Snyder 	 * Add the hardware descriptor to the chain of hardware descriptors
3969c3a50b7SIra Snyder 	 * that already exists in memory.
3979c3a50b7SIra Snyder 	 *
3989c3a50b7SIra Snyder 	 * This will un-set the EOL bit of the existing transaction, and the
3999c3a50b7SIra Snyder 	 * last link in this transaction will become the EOL descriptor.
4009c3a50b7SIra Snyder 	 */
4019c3a50b7SIra Snyder 	set_desc_next(chan, &tail->hw, desc->async_tx.phys);
4029c3a50b7SIra Snyder 
4039c3a50b7SIra Snyder 	/*
4049c3a50b7SIra Snyder 	 * Add the software descriptor and all children to the list
4059c3a50b7SIra Snyder 	 * of pending transactions
4069c3a50b7SIra Snyder 	 */
4079c3a50b7SIra Snyder out_splice:
4089c3a50b7SIra Snyder 	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
4099c3a50b7SIra Snyder }
4109c3a50b7SIra Snyder 
411173acc7cSZhang Wei static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
412173acc7cSZhang Wei {
413a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
414eda34234SDan Williams 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
415eda34234SDan Williams 	struct fsl_desc_sw *child;
416bbc76560SDan Williams 	dma_cookie_t cookie = -EINVAL;
417173acc7cSZhang Wei 
4182baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
419173acc7cSZhang Wei 
42014c6a333SHongbo Zhang #ifdef CONFIG_PM
42114c6a333SHongbo Zhang 	if (unlikely(chan->pm_state != RUNNING)) {
42214c6a333SHongbo Zhang 		chan_dbg(chan, "cannot submit due to suspend\n");
42314c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
42414c6a333SHongbo Zhang 		return -1;
42514c6a333SHongbo Zhang 	}
42614c6a333SHongbo Zhang #endif
42714c6a333SHongbo Zhang 
4289c3a50b7SIra Snyder 	/*
4299c3a50b7SIra Snyder 	 * assign cookies to all of the software descriptors
4309c3a50b7SIra Snyder 	 * that make up this transaction
4319c3a50b7SIra Snyder 	 */
432eda34234SDan Williams 	list_for_each_entry(child, &desc->tx_list, node) {
433884485e1SRussell King - ARM Linux 		cookie = dma_cookie_assign(&child->async_tx);
434bcfb7465SIra Snyder 	}
435bcfb7465SIra Snyder 
4369c3a50b7SIra Snyder 	/* put this transaction onto the tail of the pending queue */
437a1c03319SIra Snyder 	append_ld_queue(chan, desc);
438173acc7cSZhang Wei 
4392baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
440173acc7cSZhang Wei 
441173acc7cSZhang Wei 	return cookie;
442173acc7cSZhang Wei }
443173acc7cSZhang Wei 
444173acc7cSZhang Wei /**
44586d19a54SHongbo Zhang  * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
44686d19a54SHongbo Zhang  * @chan : Freescale DMA channel
44786d19a54SHongbo Zhang  * @desc: descriptor to be freed
44886d19a54SHongbo Zhang  */
44986d19a54SHongbo Zhang static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
45086d19a54SHongbo Zhang 		struct fsl_desc_sw *desc)
45186d19a54SHongbo Zhang {
45286d19a54SHongbo Zhang 	list_del(&desc->node);
45386d19a54SHongbo Zhang 	chan_dbg(chan, "LD %p free\n", desc);
45486d19a54SHongbo Zhang 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
45586d19a54SHongbo Zhang }
45686d19a54SHongbo Zhang 
45786d19a54SHongbo Zhang /**
458173acc7cSZhang Wei  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
459a1c03319SIra Snyder  * @chan : Freescale DMA channel
460173acc7cSZhang Wei  *
461173acc7cSZhang Wei  * Return - The descriptor allocated. NULL for failed.
462173acc7cSZhang Wei  */
46331f4306cSIra Snyder static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
464173acc7cSZhang Wei {
4659c3a50b7SIra Snyder 	struct fsl_desc_sw *desc;
466173acc7cSZhang Wei 	dma_addr_t pdesc;
467173acc7cSZhang Wei 
46843764557SJulia Lawall 	desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
4699c3a50b7SIra Snyder 	if (!desc) {
470b158471eSIra Snyder 		chan_dbg(chan, "out of memory for link descriptor\n");
4719c3a50b7SIra Snyder 		return NULL;
472173acc7cSZhang Wei 	}
473173acc7cSZhang Wei 
4749c3a50b7SIra Snyder 	INIT_LIST_HEAD(&desc->tx_list);
4759c3a50b7SIra Snyder 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
4769c3a50b7SIra Snyder 	desc->async_tx.tx_submit = fsl_dma_tx_submit;
4779c3a50b7SIra Snyder 	desc->async_tx.phys = pdesc;
4789c3a50b7SIra Snyder 
4790ab09c36SIra Snyder 	chan_dbg(chan, "LD %p allocated\n", desc);
4800ab09c36SIra Snyder 
4819c3a50b7SIra Snyder 	return desc;
482173acc7cSZhang Wei }
483173acc7cSZhang Wei 
484173acc7cSZhang Wei /**
48543452fadSHongbo Zhang  * fsldma_clean_completed_descriptor - free all descriptors which
48643452fadSHongbo Zhang  * has been completed and acked
48743452fadSHongbo Zhang  * @chan: Freescale DMA channel
48843452fadSHongbo Zhang  *
48943452fadSHongbo Zhang  * This function is used on all completed and acked descriptors.
49043452fadSHongbo Zhang  * All descriptors should only be freed in this function.
49143452fadSHongbo Zhang  */
49243452fadSHongbo Zhang static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
49343452fadSHongbo Zhang {
49443452fadSHongbo Zhang 	struct fsl_desc_sw *desc, *_desc;
49543452fadSHongbo Zhang 
49643452fadSHongbo Zhang 	/* Run the callback for each descriptor, in order */
49743452fadSHongbo Zhang 	list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
49843452fadSHongbo Zhang 		if (async_tx_test_ack(&desc->async_tx))
49943452fadSHongbo Zhang 			fsl_dma_free_descriptor(chan, desc);
50043452fadSHongbo Zhang }
50143452fadSHongbo Zhang 
50243452fadSHongbo Zhang /**
50343452fadSHongbo Zhang  * fsldma_run_tx_complete_actions - cleanup a single link descriptor
50443452fadSHongbo Zhang  * @chan: Freescale DMA channel
50543452fadSHongbo Zhang  * @desc: descriptor to cleanup and free
50643452fadSHongbo Zhang  * @cookie: Freescale DMA transaction identifier
50743452fadSHongbo Zhang  *
50843452fadSHongbo Zhang  * This function is used on a descriptor which has been executed by the DMA
50943452fadSHongbo Zhang  * controller. It will run any callbacks, submit any dependencies.
51043452fadSHongbo Zhang  */
51143452fadSHongbo Zhang static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
51243452fadSHongbo Zhang 		struct fsl_desc_sw *desc, dma_cookie_t cookie)
51343452fadSHongbo Zhang {
51443452fadSHongbo Zhang 	struct dma_async_tx_descriptor *txd = &desc->async_tx;
51543452fadSHongbo Zhang 	dma_cookie_t ret = cookie;
51643452fadSHongbo Zhang 
51743452fadSHongbo Zhang 	BUG_ON(txd->cookie < 0);
51843452fadSHongbo Zhang 
51943452fadSHongbo Zhang 	if (txd->cookie > 0) {
52043452fadSHongbo Zhang 		ret = txd->cookie;
52143452fadSHongbo Zhang 
5229b335978SDave Jiang 		dma_descriptor_unmap(txd);
52343452fadSHongbo Zhang 		/* Run the link descriptor callback function */
524af1a5a51SDave Jiang 		dmaengine_desc_get_callback_invoke(txd, NULL);
52543452fadSHongbo Zhang 	}
52643452fadSHongbo Zhang 
52743452fadSHongbo Zhang 	/* Run any dependencies */
52843452fadSHongbo Zhang 	dma_run_dependencies(txd);
52943452fadSHongbo Zhang 
53043452fadSHongbo Zhang 	return ret;
53143452fadSHongbo Zhang }
53243452fadSHongbo Zhang 
53343452fadSHongbo Zhang /**
53443452fadSHongbo Zhang  * fsldma_clean_running_descriptor - move the completed descriptor from
53543452fadSHongbo Zhang  * ld_running to ld_completed
53643452fadSHongbo Zhang  * @chan: Freescale DMA channel
53743452fadSHongbo Zhang  * @desc: the descriptor which is completed
53843452fadSHongbo Zhang  *
53943452fadSHongbo Zhang  * Free the descriptor directly if acked by async_tx api, or move it to
54043452fadSHongbo Zhang  * queue ld_completed.
54143452fadSHongbo Zhang  */
54243452fadSHongbo Zhang static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
54343452fadSHongbo Zhang 		struct fsl_desc_sw *desc)
54443452fadSHongbo Zhang {
54543452fadSHongbo Zhang 	/* Remove from the list of transactions */
54643452fadSHongbo Zhang 	list_del(&desc->node);
54743452fadSHongbo Zhang 
54843452fadSHongbo Zhang 	/*
54943452fadSHongbo Zhang 	 * the client is allowed to attach dependent operations
55043452fadSHongbo Zhang 	 * until 'ack' is set
55143452fadSHongbo Zhang 	 */
55243452fadSHongbo Zhang 	if (!async_tx_test_ack(&desc->async_tx)) {
55343452fadSHongbo Zhang 		/*
55443452fadSHongbo Zhang 		 * Move this descriptor to the list of descriptors which is
55543452fadSHongbo Zhang 		 * completed, but still awaiting the 'ack' bit to be set.
55643452fadSHongbo Zhang 		 */
55743452fadSHongbo Zhang 		list_add_tail(&desc->node, &chan->ld_completed);
55843452fadSHongbo Zhang 		return;
55943452fadSHongbo Zhang 	}
56043452fadSHongbo Zhang 
56143452fadSHongbo Zhang 	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
56243452fadSHongbo Zhang }
56343452fadSHongbo Zhang 
56443452fadSHongbo Zhang /**
5652a5ecb79SHongbo Zhang  * fsl_chan_xfer_ld_queue - transfer any pending transactions
5662a5ecb79SHongbo Zhang  * @chan : Freescale DMA channel
5672a5ecb79SHongbo Zhang  *
5682a5ecb79SHongbo Zhang  * HARDWARE STATE: idle
5692a5ecb79SHongbo Zhang  * LOCKING: must hold chan->desc_lock
5702a5ecb79SHongbo Zhang  */
5712a5ecb79SHongbo Zhang static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
5722a5ecb79SHongbo Zhang {
5732a5ecb79SHongbo Zhang 	struct fsl_desc_sw *desc;
5742a5ecb79SHongbo Zhang 
5752a5ecb79SHongbo Zhang 	/*
5762a5ecb79SHongbo Zhang 	 * If the list of pending descriptors is empty, then we
5772a5ecb79SHongbo Zhang 	 * don't need to do any work at all
5782a5ecb79SHongbo Zhang 	 */
5792a5ecb79SHongbo Zhang 	if (list_empty(&chan->ld_pending)) {
5802a5ecb79SHongbo Zhang 		chan_dbg(chan, "no pending LDs\n");
5812a5ecb79SHongbo Zhang 		return;
5822a5ecb79SHongbo Zhang 	}
5832a5ecb79SHongbo Zhang 
5842a5ecb79SHongbo Zhang 	/*
5852a5ecb79SHongbo Zhang 	 * The DMA controller is not idle, which means that the interrupt
5862a5ecb79SHongbo Zhang 	 * handler will start any queued transactions when it runs after
5872a5ecb79SHongbo Zhang 	 * this transaction finishes
5882a5ecb79SHongbo Zhang 	 */
5892a5ecb79SHongbo Zhang 	if (!chan->idle) {
5902a5ecb79SHongbo Zhang 		chan_dbg(chan, "DMA controller still busy\n");
5912a5ecb79SHongbo Zhang 		return;
5922a5ecb79SHongbo Zhang 	}
5932a5ecb79SHongbo Zhang 
5942a5ecb79SHongbo Zhang 	/*
5952a5ecb79SHongbo Zhang 	 * If there are some link descriptors which have not been
5962a5ecb79SHongbo Zhang 	 * transferred, we need to start the controller
5972a5ecb79SHongbo Zhang 	 */
5982a5ecb79SHongbo Zhang 
5992a5ecb79SHongbo Zhang 	/*
6002a5ecb79SHongbo Zhang 	 * Move all elements from the queue of pending transactions
6012a5ecb79SHongbo Zhang 	 * onto the list of running transactions
6022a5ecb79SHongbo Zhang 	 */
6032a5ecb79SHongbo Zhang 	chan_dbg(chan, "idle, starting controller\n");
6042a5ecb79SHongbo Zhang 	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
6052a5ecb79SHongbo Zhang 	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
6062a5ecb79SHongbo Zhang 
6072a5ecb79SHongbo Zhang 	/*
6082a5ecb79SHongbo Zhang 	 * The 85xx DMA controller doesn't clear the channel start bit
6092a5ecb79SHongbo Zhang 	 * automatically at the end of a transfer. Therefore we must clear
6102a5ecb79SHongbo Zhang 	 * it in software before starting the transfer.
6112a5ecb79SHongbo Zhang 	 */
6122a5ecb79SHongbo Zhang 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
6132a5ecb79SHongbo Zhang 		u32 mode;
6142a5ecb79SHongbo Zhang 
6152a5ecb79SHongbo Zhang 		mode = get_mr(chan);
6162a5ecb79SHongbo Zhang 		mode &= ~FSL_DMA_MR_CS;
6172a5ecb79SHongbo Zhang 		set_mr(chan, mode);
6182a5ecb79SHongbo Zhang 	}
6192a5ecb79SHongbo Zhang 
6202a5ecb79SHongbo Zhang 	/*
6212a5ecb79SHongbo Zhang 	 * Program the descriptor's address into the DMA controller,
6222a5ecb79SHongbo Zhang 	 * then start the DMA transaction
6232a5ecb79SHongbo Zhang 	 */
6242a5ecb79SHongbo Zhang 	set_cdar(chan, desc->async_tx.phys);
6252a5ecb79SHongbo Zhang 	get_cdar(chan);
6262a5ecb79SHongbo Zhang 
6272a5ecb79SHongbo Zhang 	dma_start(chan);
6282a5ecb79SHongbo Zhang 	chan->idle = false;
6292a5ecb79SHongbo Zhang }
6302a5ecb79SHongbo Zhang 
6312a5ecb79SHongbo Zhang /**
63243452fadSHongbo Zhang  * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
63343452fadSHongbo Zhang  * and move them to ld_completed to free until flag 'ack' is set
6342a5ecb79SHongbo Zhang  * @chan: Freescale DMA channel
6352a5ecb79SHongbo Zhang  *
63643452fadSHongbo Zhang  * This function is used on descriptors which have been executed by the DMA
63743452fadSHongbo Zhang  * controller. It will run any callbacks, submit any dependencies, then
63843452fadSHongbo Zhang  * free these descriptors if flag 'ack' is set.
6392a5ecb79SHongbo Zhang  */
64043452fadSHongbo Zhang static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
6412a5ecb79SHongbo Zhang {
64243452fadSHongbo Zhang 	struct fsl_desc_sw *desc, *_desc;
64343452fadSHongbo Zhang 	dma_cookie_t cookie = 0;
64443452fadSHongbo Zhang 	dma_addr_t curr_phys = get_cdar(chan);
64543452fadSHongbo Zhang 	int seen_current = 0;
6462a5ecb79SHongbo Zhang 
64743452fadSHongbo Zhang 	fsldma_clean_completed_descriptor(chan);
64843452fadSHongbo Zhang 
64943452fadSHongbo Zhang 	/* Run the callback for each descriptor, in order */
65043452fadSHongbo Zhang 	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
65143452fadSHongbo Zhang 		/*
65243452fadSHongbo Zhang 		 * do not advance past the current descriptor loaded into the
65343452fadSHongbo Zhang 		 * hardware channel, subsequent descriptors are either in
65443452fadSHongbo Zhang 		 * process or have not been submitted
65543452fadSHongbo Zhang 		 */
65643452fadSHongbo Zhang 		if (seen_current)
65743452fadSHongbo Zhang 			break;
65843452fadSHongbo Zhang 
65943452fadSHongbo Zhang 		/*
66043452fadSHongbo Zhang 		 * stop the search if we reach the current descriptor and the
66143452fadSHongbo Zhang 		 * channel is busy
66243452fadSHongbo Zhang 		 */
66343452fadSHongbo Zhang 		if (desc->async_tx.phys == curr_phys) {
66443452fadSHongbo Zhang 			seen_current = 1;
66543452fadSHongbo Zhang 			if (!dma_is_idle(chan))
66643452fadSHongbo Zhang 				break;
6672a5ecb79SHongbo Zhang 		}
6682a5ecb79SHongbo Zhang 
66943452fadSHongbo Zhang 		cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
6702a5ecb79SHongbo Zhang 
67143452fadSHongbo Zhang 		fsldma_clean_running_descriptor(chan, desc);
67243452fadSHongbo Zhang 	}
67343452fadSHongbo Zhang 
67443452fadSHongbo Zhang 	/*
67543452fadSHongbo Zhang 	 * Start any pending transactions automatically
67643452fadSHongbo Zhang 	 *
67743452fadSHongbo Zhang 	 * In the ideal case, we keep the DMA controller busy while we go
67843452fadSHongbo Zhang 	 * ahead and free the descriptors below.
67943452fadSHongbo Zhang 	 */
68043452fadSHongbo Zhang 	fsl_chan_xfer_ld_queue(chan);
68143452fadSHongbo Zhang 
68243452fadSHongbo Zhang 	if (cookie > 0)
68343452fadSHongbo Zhang 		chan->common.completed_cookie = cookie;
6842a5ecb79SHongbo Zhang }
6852a5ecb79SHongbo Zhang 
6862a5ecb79SHongbo Zhang /**
687173acc7cSZhang Wei  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
688a1c03319SIra Snyder  * @chan : Freescale DMA channel
689173acc7cSZhang Wei  *
690173acc7cSZhang Wei  * This function will create a dma pool for descriptor allocation.
691173acc7cSZhang Wei  *
692173acc7cSZhang Wei  * Return - The number of descriptors allocated.
693173acc7cSZhang Wei  */
694a1c03319SIra Snyder static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
695173acc7cSZhang Wei {
696a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
69777cd62e8STimur Tabi 
69877cd62e8STimur Tabi 	/* Has this channel already been allocated? */
699a1c03319SIra Snyder 	if (chan->desc_pool)
70077cd62e8STimur Tabi 		return 1;
701173acc7cSZhang Wei 
7029c3a50b7SIra Snyder 	/*
7039c3a50b7SIra Snyder 	 * We need the descriptor to be aligned to 32bytes
704173acc7cSZhang Wei 	 * for meeting FSL DMA specification requirement.
705173acc7cSZhang Wei 	 */
706b158471eSIra Snyder 	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
7079c3a50b7SIra Snyder 					  sizeof(struct fsl_desc_sw),
7089c3a50b7SIra Snyder 					  __alignof__(struct fsl_desc_sw), 0);
709a1c03319SIra Snyder 	if (!chan->desc_pool) {
710b158471eSIra Snyder 		chan_err(chan, "unable to allocate descriptor pool\n");
7119c3a50b7SIra Snyder 		return -ENOMEM;
712173acc7cSZhang Wei 	}
713173acc7cSZhang Wei 
7149c3a50b7SIra Snyder 	/* there is at least one descriptor free to be allocated */
715173acc7cSZhang Wei 	return 1;
716173acc7cSZhang Wei }
717173acc7cSZhang Wei 
718173acc7cSZhang Wei /**
7199c3a50b7SIra Snyder  * fsldma_free_desc_list - Free all descriptors in a queue
7209c3a50b7SIra Snyder  * @chan: Freescae DMA channel
7219c3a50b7SIra Snyder  * @list: the list to free
7229c3a50b7SIra Snyder  *
7239c3a50b7SIra Snyder  * LOCKING: must hold chan->desc_lock
7249c3a50b7SIra Snyder  */
7259c3a50b7SIra Snyder static void fsldma_free_desc_list(struct fsldma_chan *chan,
7269c3a50b7SIra Snyder 				  struct list_head *list)
7279c3a50b7SIra Snyder {
7289c3a50b7SIra Snyder 	struct fsl_desc_sw *desc, *_desc;
7299c3a50b7SIra Snyder 
73086d19a54SHongbo Zhang 	list_for_each_entry_safe(desc, _desc, list, node)
73186d19a54SHongbo Zhang 		fsl_dma_free_descriptor(chan, desc);
7329c3a50b7SIra Snyder }
7339c3a50b7SIra Snyder 
7349c3a50b7SIra Snyder static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
7359c3a50b7SIra Snyder 					  struct list_head *list)
7369c3a50b7SIra Snyder {
7379c3a50b7SIra Snyder 	struct fsl_desc_sw *desc, *_desc;
7389c3a50b7SIra Snyder 
73986d19a54SHongbo Zhang 	list_for_each_entry_safe_reverse(desc, _desc, list, node)
74086d19a54SHongbo Zhang 		fsl_dma_free_descriptor(chan, desc);
7419c3a50b7SIra Snyder }
7429c3a50b7SIra Snyder 
7439c3a50b7SIra Snyder /**
744173acc7cSZhang Wei  * fsl_dma_free_chan_resources - Free all resources of the channel.
745a1c03319SIra Snyder  * @chan : Freescale DMA channel
746173acc7cSZhang Wei  */
747a1c03319SIra Snyder static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
748173acc7cSZhang Wei {
749a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
750173acc7cSZhang Wei 
751b158471eSIra Snyder 	chan_dbg(chan, "free all channel resources\n");
7522baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
75343452fadSHongbo Zhang 	fsldma_cleanup_descriptors(chan);
7549c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_pending);
7559c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_running);
75643452fadSHongbo Zhang 	fsldma_free_desc_list(chan, &chan->ld_completed);
7572baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
75877cd62e8STimur Tabi 
7599c3a50b7SIra Snyder 	dma_pool_destroy(chan->desc_pool);
760a1c03319SIra Snyder 	chan->desc_pool = NULL;
761173acc7cSZhang Wei }
762173acc7cSZhang Wei 
7632187c269SZhang Wei static struct dma_async_tx_descriptor *
76431f4306cSIra Snyder fsl_dma_prep_memcpy(struct dma_chan *dchan,
76531f4306cSIra Snyder 	dma_addr_t dma_dst, dma_addr_t dma_src,
766173acc7cSZhang Wei 	size_t len, unsigned long flags)
767173acc7cSZhang Wei {
768a1c03319SIra Snyder 	struct fsldma_chan *chan;
769173acc7cSZhang Wei 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
770173acc7cSZhang Wei 	size_t copy;
771173acc7cSZhang Wei 
772a1c03319SIra Snyder 	if (!dchan)
773173acc7cSZhang Wei 		return NULL;
774173acc7cSZhang Wei 
775173acc7cSZhang Wei 	if (!len)
776173acc7cSZhang Wei 		return NULL;
777173acc7cSZhang Wei 
778a1c03319SIra Snyder 	chan = to_fsl_chan(dchan);
779173acc7cSZhang Wei 
780173acc7cSZhang Wei 	do {
781173acc7cSZhang Wei 
782173acc7cSZhang Wei 		/* Allocate the link descriptor from DMA pool */
783a1c03319SIra Snyder 		new = fsl_dma_alloc_descriptor(chan);
784173acc7cSZhang Wei 		if (!new) {
785b158471eSIra Snyder 			chan_err(chan, "%s\n", msg_ld_oom);
7862e077f8eSIra Snyder 			goto fail;
787173acc7cSZhang Wei 		}
788173acc7cSZhang Wei 
78956822843SZhang Wei 		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
790173acc7cSZhang Wei 
791a1c03319SIra Snyder 		set_desc_cnt(chan, &new->hw, copy);
792a1c03319SIra Snyder 		set_desc_src(chan, &new->hw, dma_src);
793a1c03319SIra Snyder 		set_desc_dst(chan, &new->hw, dma_dst);
794173acc7cSZhang Wei 
795173acc7cSZhang Wei 		if (!first)
796173acc7cSZhang Wei 			first = new;
797173acc7cSZhang Wei 		else
798a1c03319SIra Snyder 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
799173acc7cSZhang Wei 
800173acc7cSZhang Wei 		new->async_tx.cookie = 0;
801636bdeaaSDan Williams 		async_tx_ack(&new->async_tx);
802173acc7cSZhang Wei 
803173acc7cSZhang Wei 		prev = new;
804173acc7cSZhang Wei 		len -= copy;
805173acc7cSZhang Wei 		dma_src += copy;
806738f5f7eSIra Snyder 		dma_dst += copy;
807173acc7cSZhang Wei 
808173acc7cSZhang Wei 		/* Insert the link descriptor to the LD ring */
809eda34234SDan Williams 		list_add_tail(&new->node, &first->tx_list);
810173acc7cSZhang Wei 	} while (len);
811173acc7cSZhang Wei 
812636bdeaaSDan Williams 	new->async_tx.flags = flags; /* client is in control of this ack */
813173acc7cSZhang Wei 	new->async_tx.cookie = -EBUSY;
814173acc7cSZhang Wei 
815173acc7cSZhang Wei 	/* Set End-of-link to the last link descriptor of new list */
816a1c03319SIra Snyder 	set_ld_eol(chan, new);
817173acc7cSZhang Wei 
8182e077f8eSIra Snyder 	return &first->async_tx;
8192e077f8eSIra Snyder 
8202e077f8eSIra Snyder fail:
8212e077f8eSIra Snyder 	if (!first)
8222e077f8eSIra Snyder 		return NULL;
8232e077f8eSIra Snyder 
8249c3a50b7SIra Snyder 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
8252e077f8eSIra Snyder 	return NULL;
826173acc7cSZhang Wei }
827173acc7cSZhang Wei 
828c1433041SIra Snyder static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
829c1433041SIra Snyder 	struct scatterlist *dst_sg, unsigned int dst_nents,
830c1433041SIra Snyder 	struct scatterlist *src_sg, unsigned int src_nents,
831c1433041SIra Snyder 	unsigned long flags)
832c1433041SIra Snyder {
833c1433041SIra Snyder 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
834c1433041SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
835c1433041SIra Snyder 	size_t dst_avail, src_avail;
836c1433041SIra Snyder 	dma_addr_t dst, src;
837c1433041SIra Snyder 	size_t len;
838c1433041SIra Snyder 
839c1433041SIra Snyder 	/* basic sanity checks */
840c1433041SIra Snyder 	if (dst_nents == 0 || src_nents == 0)
841c1433041SIra Snyder 		return NULL;
842c1433041SIra Snyder 
843c1433041SIra Snyder 	if (dst_sg == NULL || src_sg == NULL)
844c1433041SIra Snyder 		return NULL;
845c1433041SIra Snyder 
846c1433041SIra Snyder 	/*
847c1433041SIra Snyder 	 * TODO: should we check that both scatterlists have the same
848c1433041SIra Snyder 	 * TODO: number of bytes in total? Is that really an error?
849c1433041SIra Snyder 	 */
850c1433041SIra Snyder 
851c1433041SIra Snyder 	/* get prepared for the loop */
852c1433041SIra Snyder 	dst_avail = sg_dma_len(dst_sg);
853c1433041SIra Snyder 	src_avail = sg_dma_len(src_sg);
854c1433041SIra Snyder 
855c1433041SIra Snyder 	/* run until we are out of scatterlist entries */
856c1433041SIra Snyder 	while (true) {
857c1433041SIra Snyder 
858c1433041SIra Snyder 		/* create the largest transaction possible */
859c1433041SIra Snyder 		len = min_t(size_t, src_avail, dst_avail);
860c1433041SIra Snyder 		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
861c1433041SIra Snyder 		if (len == 0)
862c1433041SIra Snyder 			goto fetch;
863c1433041SIra Snyder 
864c1433041SIra Snyder 		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
865c1433041SIra Snyder 		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
866c1433041SIra Snyder 
867c1433041SIra Snyder 		/* allocate and populate the descriptor */
868c1433041SIra Snyder 		new = fsl_dma_alloc_descriptor(chan);
869c1433041SIra Snyder 		if (!new) {
870b158471eSIra Snyder 			chan_err(chan, "%s\n", msg_ld_oom);
871c1433041SIra Snyder 			goto fail;
872c1433041SIra Snyder 		}
873c1433041SIra Snyder 
874c1433041SIra Snyder 		set_desc_cnt(chan, &new->hw, len);
875c1433041SIra Snyder 		set_desc_src(chan, &new->hw, src);
876c1433041SIra Snyder 		set_desc_dst(chan, &new->hw, dst);
877c1433041SIra Snyder 
878c1433041SIra Snyder 		if (!first)
879c1433041SIra Snyder 			first = new;
880c1433041SIra Snyder 		else
881c1433041SIra Snyder 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
882c1433041SIra Snyder 
883c1433041SIra Snyder 		new->async_tx.cookie = 0;
884c1433041SIra Snyder 		async_tx_ack(&new->async_tx);
885c1433041SIra Snyder 		prev = new;
886c1433041SIra Snyder 
887c1433041SIra Snyder 		/* Insert the link descriptor to the LD ring */
888c1433041SIra Snyder 		list_add_tail(&new->node, &first->tx_list);
889c1433041SIra Snyder 
890c1433041SIra Snyder 		/* update metadata */
891c1433041SIra Snyder 		dst_avail -= len;
892c1433041SIra Snyder 		src_avail -= len;
893c1433041SIra Snyder 
894c1433041SIra Snyder fetch:
895c1433041SIra Snyder 		/* fetch the next dst scatterlist entry */
896c1433041SIra Snyder 		if (dst_avail == 0) {
897c1433041SIra Snyder 
898c1433041SIra Snyder 			/* no more entries: we're done */
899c1433041SIra Snyder 			if (dst_nents == 0)
900c1433041SIra Snyder 				break;
901c1433041SIra Snyder 
902c1433041SIra Snyder 			/* fetch the next entry: if there are no more: done */
903c1433041SIra Snyder 			dst_sg = sg_next(dst_sg);
904c1433041SIra Snyder 			if (dst_sg == NULL)
905c1433041SIra Snyder 				break;
906c1433041SIra Snyder 
907c1433041SIra Snyder 			dst_nents--;
908c1433041SIra Snyder 			dst_avail = sg_dma_len(dst_sg);
909c1433041SIra Snyder 		}
910c1433041SIra Snyder 
911c1433041SIra Snyder 		/* fetch the next src scatterlist entry */
912c1433041SIra Snyder 		if (src_avail == 0) {
913c1433041SIra Snyder 
914c1433041SIra Snyder 			/* no more entries: we're done */
915c1433041SIra Snyder 			if (src_nents == 0)
916c1433041SIra Snyder 				break;
917c1433041SIra Snyder 
918c1433041SIra Snyder 			/* fetch the next entry: if there are no more: done */
919c1433041SIra Snyder 			src_sg = sg_next(src_sg);
920c1433041SIra Snyder 			if (src_sg == NULL)
921c1433041SIra Snyder 				break;
922c1433041SIra Snyder 
923c1433041SIra Snyder 			src_nents--;
924c1433041SIra Snyder 			src_avail = sg_dma_len(src_sg);
925c1433041SIra Snyder 		}
926c1433041SIra Snyder 	}
927c1433041SIra Snyder 
928c1433041SIra Snyder 	new->async_tx.flags = flags; /* client is in control of this ack */
929c1433041SIra Snyder 	new->async_tx.cookie = -EBUSY;
930c1433041SIra Snyder 
931c1433041SIra Snyder 	/* Set End-of-link to the last link descriptor of new list */
932c1433041SIra Snyder 	set_ld_eol(chan, new);
933c1433041SIra Snyder 
934c1433041SIra Snyder 	return &first->async_tx;
935c1433041SIra Snyder 
936c1433041SIra Snyder fail:
937c1433041SIra Snyder 	if (!first)
938c1433041SIra Snyder 		return NULL;
939c1433041SIra Snyder 
940c1433041SIra Snyder 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
941c1433041SIra Snyder 	return NULL;
942c1433041SIra Snyder }
943c1433041SIra Snyder 
944b7f7552bSMaxime Ripard static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
945bbea0b6eSIra Snyder {
946a1c03319SIra Snyder 	struct fsldma_chan *chan;
947c3635c78SLinus Walleij 
948a1c03319SIra Snyder 	if (!dchan)
949c3635c78SLinus Walleij 		return -EINVAL;
950bbea0b6eSIra Snyder 
951a1c03319SIra Snyder 	chan = to_fsl_chan(dchan);
952bbea0b6eSIra Snyder 
9532baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
954f04cd407SIra Snyder 
955bbea0b6eSIra Snyder 	/* Halt the DMA engine */
956a1c03319SIra Snyder 	dma_halt(chan);
957bbea0b6eSIra Snyder 
958bbea0b6eSIra Snyder 	/* Remove and free all of the descriptors in the LD queue */
9599c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_pending);
9609c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_running);
96143452fadSHongbo Zhang 	fsldma_free_desc_list(chan, &chan->ld_completed);
962f04cd407SIra Snyder 	chan->idle = true;
963bbea0b6eSIra Snyder 
9642baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
965968f19aeSIra Snyder 	return 0;
966b7f7552bSMaxime Ripard }
967968f19aeSIra Snyder 
968b7f7552bSMaxime Ripard static int fsl_dma_device_config(struct dma_chan *dchan,
969b7f7552bSMaxime Ripard 				 struct dma_slave_config *config)
970b7f7552bSMaxime Ripard {
971b7f7552bSMaxime Ripard 	struct fsldma_chan *chan;
972b7f7552bSMaxime Ripard 	int size;
973b7f7552bSMaxime Ripard 
974b7f7552bSMaxime Ripard 	if (!dchan)
975b7f7552bSMaxime Ripard 		return -EINVAL;
976b7f7552bSMaxime Ripard 
977b7f7552bSMaxime Ripard 	chan = to_fsl_chan(dchan);
978968f19aeSIra Snyder 
979968f19aeSIra Snyder 	/* make sure the channel supports setting burst size */
980968f19aeSIra Snyder 	if (!chan->set_request_count)
981968f19aeSIra Snyder 		return -ENXIO;
982968f19aeSIra Snyder 
983968f19aeSIra Snyder 	/* we set the controller burst size depending on direction */
984db8196dfSVinod Koul 	if (config->direction == DMA_MEM_TO_DEV)
985968f19aeSIra Snyder 		size = config->dst_addr_width * config->dst_maxburst;
986968f19aeSIra Snyder 	else
987968f19aeSIra Snyder 		size = config->src_addr_width * config->src_maxburst;
988968f19aeSIra Snyder 
989968f19aeSIra Snyder 	chan->set_request_count(chan, size);
990968f19aeSIra Snyder 	return 0;
991968f19aeSIra Snyder }
992c3635c78SLinus Walleij 
993bbea0b6eSIra Snyder 
994bbea0b6eSIra Snyder /**
995173acc7cSZhang Wei  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
996a1c03319SIra Snyder  * @chan : Freescale DMA channel
997173acc7cSZhang Wei  */
998a1c03319SIra Snyder static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
999173acc7cSZhang Wei {
1000a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
1001dc8d4091SIra Snyder 
10022baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
1003a1c03319SIra Snyder 	fsl_chan_xfer_ld_queue(chan);
10042baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
1005173acc7cSZhang Wei }
1006173acc7cSZhang Wei 
1007173acc7cSZhang Wei /**
100807934481SLinus Walleij  * fsl_tx_status - Determine the DMA status
1009a1c03319SIra Snyder  * @chan : Freescale DMA channel
1010173acc7cSZhang Wei  */
101107934481SLinus Walleij static enum dma_status fsl_tx_status(struct dma_chan *dchan,
1012173acc7cSZhang Wei 					dma_cookie_t cookie,
101307934481SLinus Walleij 					struct dma_tx_state *txstate)
1014173acc7cSZhang Wei {
101543452fadSHongbo Zhang 	struct fsldma_chan *chan = to_fsl_chan(dchan);
101643452fadSHongbo Zhang 	enum dma_status ret;
101743452fadSHongbo Zhang 
101843452fadSHongbo Zhang 	ret = dma_cookie_status(dchan, cookie, txstate);
101943452fadSHongbo Zhang 	if (ret == DMA_COMPLETE)
102043452fadSHongbo Zhang 		return ret;
102143452fadSHongbo Zhang 
102243452fadSHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
102343452fadSHongbo Zhang 	fsldma_cleanup_descriptors(chan);
102443452fadSHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
102543452fadSHongbo Zhang 
10269b0b0bdcSAndy Shevchenko 	return dma_cookie_status(dchan, cookie, txstate);
1027173acc7cSZhang Wei }
1028173acc7cSZhang Wei 
1029d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/
1030d3f620b2SIra Snyder /* Interrupt Handling                                                         */
1031d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/
1032d3f620b2SIra Snyder 
1033e7a29151SIra Snyder static irqreturn_t fsldma_chan_irq(int irq, void *data)
1034173acc7cSZhang Wei {
1035a1c03319SIra Snyder 	struct fsldma_chan *chan = data;
1036a1c03319SIra Snyder 	u32 stat;
1037173acc7cSZhang Wei 
10389c3a50b7SIra Snyder 	/* save and clear the status register */
1039a1c03319SIra Snyder 	stat = get_sr(chan);
10409c3a50b7SIra Snyder 	set_sr(chan, stat);
1041b158471eSIra Snyder 	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1042173acc7cSZhang Wei 
1043f04cd407SIra Snyder 	/* check that this was really our device */
1044173acc7cSZhang Wei 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1045173acc7cSZhang Wei 	if (!stat)
1046173acc7cSZhang Wei 		return IRQ_NONE;
1047173acc7cSZhang Wei 
1048173acc7cSZhang Wei 	if (stat & FSL_DMA_SR_TE)
1049b158471eSIra Snyder 		chan_err(chan, "Transfer Error!\n");
1050173acc7cSZhang Wei 
10519c3a50b7SIra Snyder 	/*
10529c3a50b7SIra Snyder 	 * Programming Error
1053f79abb62SZhang Wei 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1054d73111c6SMasanari Iida 	 * trigger a PE interrupt.
1055f79abb62SZhang Wei 	 */
1056f79abb62SZhang Wei 	if (stat & FSL_DMA_SR_PE) {
1057b158471eSIra Snyder 		chan_dbg(chan, "irq: Programming Error INT\n");
1058f79abb62SZhang Wei 		stat &= ~FSL_DMA_SR_PE;
1059f04cd407SIra Snyder 		if (get_bcr(chan) != 0)
1060f04cd407SIra Snyder 			chan_err(chan, "Programming Error!\n");
10611c62979eSZhang Wei 	}
10621c62979eSZhang Wei 
10639c3a50b7SIra Snyder 	/*
10649c3a50b7SIra Snyder 	 * For MPC8349, EOCDI event need to update cookie
10651c62979eSZhang Wei 	 * and start the next transfer if it exist.
10661c62979eSZhang Wei 	 */
10671c62979eSZhang Wei 	if (stat & FSL_DMA_SR_EOCDI) {
1068b158471eSIra Snyder 		chan_dbg(chan, "irq: End-of-Chain link INT\n");
10691c62979eSZhang Wei 		stat &= ~FSL_DMA_SR_EOCDI;
1070173acc7cSZhang Wei 	}
1071173acc7cSZhang Wei 
10729c3a50b7SIra Snyder 	/*
10739c3a50b7SIra Snyder 	 * If it current transfer is the end-of-transfer,
1074173acc7cSZhang Wei 	 * we should clear the Channel Start bit for
1075173acc7cSZhang Wei 	 * prepare next transfer.
1076173acc7cSZhang Wei 	 */
10771c62979eSZhang Wei 	if (stat & FSL_DMA_SR_EOLNI) {
1078b158471eSIra Snyder 		chan_dbg(chan, "irq: End-of-link INT\n");
1079173acc7cSZhang Wei 		stat &= ~FSL_DMA_SR_EOLNI;
1080173acc7cSZhang Wei 	}
1081173acc7cSZhang Wei 
1082f04cd407SIra Snyder 	/* check that the DMA controller is really idle */
1083f04cd407SIra Snyder 	if (!dma_is_idle(chan))
1084f04cd407SIra Snyder 		chan_err(chan, "irq: controller not idle!\n");
1085173acc7cSZhang Wei 
1086f04cd407SIra Snyder 	/* check that we handled all of the bits */
1087f04cd407SIra Snyder 	if (stat)
1088f04cd407SIra Snyder 		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1089f04cd407SIra Snyder 
1090f04cd407SIra Snyder 	/*
1091f04cd407SIra Snyder 	 * Schedule the tasklet to handle all cleanup of the current
1092f04cd407SIra Snyder 	 * transaction. It will start a new transaction if there is
1093f04cd407SIra Snyder 	 * one pending.
1094f04cd407SIra Snyder 	 */
1095a1c03319SIra Snyder 	tasklet_schedule(&chan->tasklet);
1096f04cd407SIra Snyder 	chan_dbg(chan, "irq: Exit\n");
1097173acc7cSZhang Wei 	return IRQ_HANDLED;
1098173acc7cSZhang Wei }
1099173acc7cSZhang Wei 
1100173acc7cSZhang Wei static void dma_do_tasklet(unsigned long data)
1101173acc7cSZhang Wei {
1102a1c03319SIra Snyder 	struct fsldma_chan *chan = (struct fsldma_chan *)data;
1103f04cd407SIra Snyder 
1104f04cd407SIra Snyder 	chan_dbg(chan, "tasklet entry\n");
1105f04cd407SIra Snyder 
11062baff570SHongbo Zhang 	spin_lock_bh(&chan->desc_lock);
1107dc8d4091SIra Snyder 
1108dc8d4091SIra Snyder 	/* the hardware is now idle and ready for more */
1109f04cd407SIra Snyder 	chan->idle = true;
1110dc8d4091SIra Snyder 
111143452fadSHongbo Zhang 	/* Run all cleanup for descriptors which have been completed */
111243452fadSHongbo Zhang 	fsldma_cleanup_descriptors(chan);
111343452fadSHongbo Zhang 
11142baff570SHongbo Zhang 	spin_unlock_bh(&chan->desc_lock);
1115f04cd407SIra Snyder 
1116f04cd407SIra Snyder 	chan_dbg(chan, "tasklet exit\n");
1117173acc7cSZhang Wei }
1118173acc7cSZhang Wei 
1119d3f620b2SIra Snyder static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1120d3f620b2SIra Snyder {
1121d3f620b2SIra Snyder 	struct fsldma_device *fdev = data;
1122d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1123d3f620b2SIra Snyder 	unsigned int handled = 0;
1124d3f620b2SIra Snyder 	u32 gsr, mask;
1125d3f620b2SIra Snyder 	int i;
1126d3f620b2SIra Snyder 
1127d3f620b2SIra Snyder 	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1128d3f620b2SIra Snyder 						   : in_le32(fdev->regs);
1129d3f620b2SIra Snyder 	mask = 0xff000000;
1130d3f620b2SIra Snyder 	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1131d3f620b2SIra Snyder 
1132d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1133d3f620b2SIra Snyder 		chan = fdev->chan[i];
1134d3f620b2SIra Snyder 		if (!chan)
1135d3f620b2SIra Snyder 			continue;
1136d3f620b2SIra Snyder 
1137d3f620b2SIra Snyder 		if (gsr & mask) {
1138d3f620b2SIra Snyder 			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1139d3f620b2SIra Snyder 			fsldma_chan_irq(irq, chan);
1140d3f620b2SIra Snyder 			handled++;
1141d3f620b2SIra Snyder 		}
1142d3f620b2SIra Snyder 
1143d3f620b2SIra Snyder 		gsr &= ~mask;
1144d3f620b2SIra Snyder 		mask >>= 8;
1145d3f620b2SIra Snyder 	}
1146d3f620b2SIra Snyder 
1147d3f620b2SIra Snyder 	return IRQ_RETVAL(handled);
1148d3f620b2SIra Snyder }
1149d3f620b2SIra Snyder 
1150d3f620b2SIra Snyder static void fsldma_free_irqs(struct fsldma_device *fdev)
1151d3f620b2SIra Snyder {
1152d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1153d3f620b2SIra Snyder 	int i;
1154d3f620b2SIra Snyder 
1155aa570be6SMichael Ellerman 	if (fdev->irq) {
1156d3f620b2SIra Snyder 		dev_dbg(fdev->dev, "free per-controller IRQ\n");
1157d3f620b2SIra Snyder 		free_irq(fdev->irq, fdev);
1158d3f620b2SIra Snyder 		return;
1159d3f620b2SIra Snyder 	}
1160d3f620b2SIra Snyder 
1161d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1162d3f620b2SIra Snyder 		chan = fdev->chan[i];
1163aa570be6SMichael Ellerman 		if (chan && chan->irq) {
1164b158471eSIra Snyder 			chan_dbg(chan, "free per-channel IRQ\n");
1165d3f620b2SIra Snyder 			free_irq(chan->irq, chan);
1166d3f620b2SIra Snyder 		}
1167d3f620b2SIra Snyder 	}
1168d3f620b2SIra Snyder }
1169d3f620b2SIra Snyder 
1170d3f620b2SIra Snyder static int fsldma_request_irqs(struct fsldma_device *fdev)
1171d3f620b2SIra Snyder {
1172d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1173d3f620b2SIra Snyder 	int ret;
1174d3f620b2SIra Snyder 	int i;
1175d3f620b2SIra Snyder 
1176d3f620b2SIra Snyder 	/* if we have a per-controller IRQ, use that */
1177aa570be6SMichael Ellerman 	if (fdev->irq) {
1178d3f620b2SIra Snyder 		dev_dbg(fdev->dev, "request per-controller IRQ\n");
1179d3f620b2SIra Snyder 		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1180d3f620b2SIra Snyder 				  "fsldma-controller", fdev);
1181d3f620b2SIra Snyder 		return ret;
1182d3f620b2SIra Snyder 	}
1183d3f620b2SIra Snyder 
1184d3f620b2SIra Snyder 	/* no per-controller IRQ, use the per-channel IRQs */
1185d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1186d3f620b2SIra Snyder 		chan = fdev->chan[i];
1187d3f620b2SIra Snyder 		if (!chan)
1188d3f620b2SIra Snyder 			continue;
1189d3f620b2SIra Snyder 
1190aa570be6SMichael Ellerman 		if (!chan->irq) {
1191b158471eSIra Snyder 			chan_err(chan, "interrupts property missing in device tree\n");
1192d3f620b2SIra Snyder 			ret = -ENODEV;
1193d3f620b2SIra Snyder 			goto out_unwind;
1194d3f620b2SIra Snyder 		}
1195d3f620b2SIra Snyder 
1196b158471eSIra Snyder 		chan_dbg(chan, "request per-channel IRQ\n");
1197d3f620b2SIra Snyder 		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1198d3f620b2SIra Snyder 				  "fsldma-chan", chan);
1199d3f620b2SIra Snyder 		if (ret) {
1200b158471eSIra Snyder 			chan_err(chan, "unable to request per-channel IRQ\n");
1201d3f620b2SIra Snyder 			goto out_unwind;
1202d3f620b2SIra Snyder 		}
1203d3f620b2SIra Snyder 	}
1204d3f620b2SIra Snyder 
1205d3f620b2SIra Snyder 	return 0;
1206d3f620b2SIra Snyder 
1207d3f620b2SIra Snyder out_unwind:
1208d3f620b2SIra Snyder 	for (/* none */; i >= 0; i--) {
1209d3f620b2SIra Snyder 		chan = fdev->chan[i];
1210d3f620b2SIra Snyder 		if (!chan)
1211d3f620b2SIra Snyder 			continue;
1212d3f620b2SIra Snyder 
1213aa570be6SMichael Ellerman 		if (!chan->irq)
1214d3f620b2SIra Snyder 			continue;
1215d3f620b2SIra Snyder 
1216d3f620b2SIra Snyder 		free_irq(chan->irq, chan);
1217d3f620b2SIra Snyder 	}
1218d3f620b2SIra Snyder 
1219d3f620b2SIra Snyder 	return ret;
1220d3f620b2SIra Snyder }
1221d3f620b2SIra Snyder 
1222a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1223a4f56d4bSIra Snyder /* OpenFirmware Subsystem                                                     */
1224a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1225a4f56d4bSIra Snyder 
1226463a1f8bSBill Pemberton static int fsl_dma_chan_probe(struct fsldma_device *fdev,
122777cd62e8STimur Tabi 	struct device_node *node, u32 feature, const char *compatible)
1228173acc7cSZhang Wei {
1229a1c03319SIra Snyder 	struct fsldma_chan *chan;
12304ce0e953SIra Snyder 	struct resource res;
1231173acc7cSZhang Wei 	int err;
1232173acc7cSZhang Wei 
1233173acc7cSZhang Wei 	/* alloc channel */
1234a1c03319SIra Snyder 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1235a1c03319SIra Snyder 	if (!chan) {
1236e7a29151SIra Snyder 		err = -ENOMEM;
1237e7a29151SIra Snyder 		goto out_return;
1238173acc7cSZhang Wei 	}
1239173acc7cSZhang Wei 
1240e7a29151SIra Snyder 	/* ioremap registers for use */
1241a1c03319SIra Snyder 	chan->regs = of_iomap(node, 0);
1242a1c03319SIra Snyder 	if (!chan->regs) {
1243e7a29151SIra Snyder 		dev_err(fdev->dev, "unable to ioremap registers\n");
1244e7a29151SIra Snyder 		err = -ENOMEM;
1245a1c03319SIra Snyder 		goto out_free_chan;
1246e7a29151SIra Snyder 	}
1247e7a29151SIra Snyder 
12484ce0e953SIra Snyder 	err = of_address_to_resource(node, 0, &res);
1249173acc7cSZhang Wei 	if (err) {
1250e7a29151SIra Snyder 		dev_err(fdev->dev, "unable to find 'reg' property\n");
1251e7a29151SIra Snyder 		goto out_iounmap_regs;
1252173acc7cSZhang Wei 	}
1253173acc7cSZhang Wei 
1254a1c03319SIra Snyder 	chan->feature = feature;
1255173acc7cSZhang Wei 	if (!fdev->feature)
1256a1c03319SIra Snyder 		fdev->feature = chan->feature;
1257173acc7cSZhang Wei 
1258e7a29151SIra Snyder 	/*
1259e7a29151SIra Snyder 	 * If the DMA device's feature is different than the feature
1260e7a29151SIra Snyder 	 * of its channels, report the bug
1261173acc7cSZhang Wei 	 */
1262a1c03319SIra Snyder 	WARN_ON(fdev->feature != chan->feature);
1263173acc7cSZhang Wei 
1264a1c03319SIra Snyder 	chan->dev = fdev->dev;
12658de7a7d9SHongbo Zhang 	chan->id = (res.start & 0xfff) < 0x300 ?
12668de7a7d9SHongbo Zhang 		   ((res.start - 0x100) & 0xfff) >> 7 :
12678de7a7d9SHongbo Zhang 		   ((res.start - 0x200) & 0xfff) >> 7;
1268a1c03319SIra Snyder 	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1269e7a29151SIra Snyder 		dev_err(fdev->dev, "too many channels for device\n");
1270173acc7cSZhang Wei 		err = -EINVAL;
1271e7a29151SIra Snyder 		goto out_iounmap_regs;
1272173acc7cSZhang Wei 	}
1273173acc7cSZhang Wei 
1274a1c03319SIra Snyder 	fdev->chan[chan->id] = chan;
1275a1c03319SIra Snyder 	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1276b158471eSIra Snyder 	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1277e7a29151SIra Snyder 
1278e7a29151SIra Snyder 	/* Initialize the channel */
1279a1c03319SIra Snyder 	dma_init(chan);
1280173acc7cSZhang Wei 
1281173acc7cSZhang Wei 	/* Clear cdar registers */
1282a1c03319SIra Snyder 	set_cdar(chan, 0);
1283173acc7cSZhang Wei 
1284a1c03319SIra Snyder 	switch (chan->feature & FSL_DMA_IP_MASK) {
1285173acc7cSZhang Wei 	case FSL_DMA_IP_85XX:
1286a1c03319SIra Snyder 		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1287173acc7cSZhang Wei 	case FSL_DMA_IP_83XX:
1288a1c03319SIra Snyder 		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1289a1c03319SIra Snyder 		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1290a1c03319SIra Snyder 		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1291a1c03319SIra Snyder 		chan->set_request_count = fsl_chan_set_request_count;
1292173acc7cSZhang Wei 	}
1293173acc7cSZhang Wei 
1294a1c03319SIra Snyder 	spin_lock_init(&chan->desc_lock);
12959c3a50b7SIra Snyder 	INIT_LIST_HEAD(&chan->ld_pending);
12969c3a50b7SIra Snyder 	INIT_LIST_HEAD(&chan->ld_running);
129743452fadSHongbo Zhang 	INIT_LIST_HEAD(&chan->ld_completed);
1298f04cd407SIra Snyder 	chan->idle = true;
129914c6a333SHongbo Zhang #ifdef CONFIG_PM
130014c6a333SHongbo Zhang 	chan->pm_state = RUNNING;
130114c6a333SHongbo Zhang #endif
1302173acc7cSZhang Wei 
1303a1c03319SIra Snyder 	chan->common.device = &fdev->common;
13048ac69546SRussell King - ARM Linux 	dma_cookie_init(&chan->common);
1305173acc7cSZhang Wei 
1306d3f620b2SIra Snyder 	/* find the IRQ line, if it exists in the device tree */
1307a1c03319SIra Snyder 	chan->irq = irq_of_parse_and_map(node, 0);
1308d3f620b2SIra Snyder 
1309173acc7cSZhang Wei 	/* Add the channel to DMA device channel list */
1310a1c03319SIra Snyder 	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1311173acc7cSZhang Wei 
1312a1c03319SIra Snyder 	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1313aa570be6SMichael Ellerman 		 chan->irq ? chan->irq : fdev->irq);
1314173acc7cSZhang Wei 
1315173acc7cSZhang Wei 	return 0;
131651ee87f2SLi Yang 
1317e7a29151SIra Snyder out_iounmap_regs:
1318a1c03319SIra Snyder 	iounmap(chan->regs);
1319a1c03319SIra Snyder out_free_chan:
1320a1c03319SIra Snyder 	kfree(chan);
1321e7a29151SIra Snyder out_return:
1322173acc7cSZhang Wei 	return err;
1323173acc7cSZhang Wei }
1324173acc7cSZhang Wei 
1325a1c03319SIra Snyder static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1326173acc7cSZhang Wei {
1327a1c03319SIra Snyder 	irq_dispose_mapping(chan->irq);
1328a1c03319SIra Snyder 	list_del(&chan->common.device_node);
1329a1c03319SIra Snyder 	iounmap(chan->regs);
1330a1c03319SIra Snyder 	kfree(chan);
1331173acc7cSZhang Wei }
1332173acc7cSZhang Wei 
1333463a1f8bSBill Pemberton static int fsldma_of_probe(struct platform_device *op)
1334173acc7cSZhang Wei {
1335a4f56d4bSIra Snyder 	struct fsldma_device *fdev;
133677cd62e8STimur Tabi 	struct device_node *child;
1337e7a29151SIra Snyder 	int err;
1338173acc7cSZhang Wei 
1339a4f56d4bSIra Snyder 	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1340173acc7cSZhang Wei 	if (!fdev) {
1341e7a29151SIra Snyder 		err = -ENOMEM;
1342e7a29151SIra Snyder 		goto out_return;
1343173acc7cSZhang Wei 	}
1344e7a29151SIra Snyder 
1345e7a29151SIra Snyder 	fdev->dev = &op->dev;
1346173acc7cSZhang Wei 	INIT_LIST_HEAD(&fdev->common.channels);
1347173acc7cSZhang Wei 
1348e7a29151SIra Snyder 	/* ioremap the registers for use */
134961c7a080SGrant Likely 	fdev->regs = of_iomap(op->dev.of_node, 0);
1350e7a29151SIra Snyder 	if (!fdev->regs) {
1351e7a29151SIra Snyder 		dev_err(&op->dev, "unable to ioremap registers\n");
1352e7a29151SIra Snyder 		err = -ENOMEM;
1353585a1db1SArvind Yadav 		goto out_free;
1354173acc7cSZhang Wei 	}
1355173acc7cSZhang Wei 
1356d3f620b2SIra Snyder 	/* map the channel IRQ if it exists, but don't hookup the handler yet */
135761c7a080SGrant Likely 	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1358d3f620b2SIra Snyder 
1359173acc7cSZhang Wei 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1360c1433041SIra Snyder 	dma_cap_set(DMA_SG, fdev->common.cap_mask);
1361bbea0b6eSIra Snyder 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1362173acc7cSZhang Wei 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1363173acc7cSZhang Wei 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1364173acc7cSZhang Wei 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1365c1433041SIra Snyder 	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
136607934481SLinus Walleij 	fdev->common.device_tx_status = fsl_tx_status;
1367173acc7cSZhang Wei 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1368b7f7552bSMaxime Ripard 	fdev->common.device_config = fsl_dma_device_config;
1369b7f7552bSMaxime Ripard 	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1370e7a29151SIra Snyder 	fdev->common.dev = &op->dev;
1371173acc7cSZhang Wei 
137275dc1775SKevin Hao 	fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
137375dc1775SKevin Hao 	fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
137475dc1775SKevin Hao 	fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
137575dc1775SKevin Hao 	fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
137675dc1775SKevin Hao 
1377e2c8e425SLi Yang 	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1378e2c8e425SLi Yang 
1379dd3daca1SJingoo Han 	platform_set_drvdata(op, fdev);
138077cd62e8STimur Tabi 
1381e7a29151SIra Snyder 	/*
1382e7a29151SIra Snyder 	 * We cannot use of_platform_bus_probe() because there is no
1383e7a29151SIra Snyder 	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
138477cd62e8STimur Tabi 	 * channel object.
138577cd62e8STimur Tabi 	 */
138661c7a080SGrant Likely 	for_each_child_of_node(op->dev.of_node, child) {
1387e7a29151SIra Snyder 		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
138877cd62e8STimur Tabi 			fsl_dma_chan_probe(fdev, child,
138977cd62e8STimur Tabi 				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
139077cd62e8STimur Tabi 				"fsl,eloplus-dma-channel");
1391e7a29151SIra Snyder 		}
1392e7a29151SIra Snyder 
1393e7a29151SIra Snyder 		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
139477cd62e8STimur Tabi 			fsl_dma_chan_probe(fdev, child,
139577cd62e8STimur Tabi 				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
139677cd62e8STimur Tabi 				"fsl,elo-dma-channel");
139777cd62e8STimur Tabi 		}
1398e7a29151SIra Snyder 	}
1399173acc7cSZhang Wei 
1400d3f620b2SIra Snyder 	/*
1401d3f620b2SIra Snyder 	 * Hookup the IRQ handler(s)
1402d3f620b2SIra Snyder 	 *
1403d3f620b2SIra Snyder 	 * If we have a per-controller interrupt, we prefer that to the
1404d3f620b2SIra Snyder 	 * per-channel interrupts to reduce the number of shared interrupt
1405d3f620b2SIra Snyder 	 * handlers on the same IRQ line
1406d3f620b2SIra Snyder 	 */
1407d3f620b2SIra Snyder 	err = fsldma_request_irqs(fdev);
1408d3f620b2SIra Snyder 	if (err) {
1409d3f620b2SIra Snyder 		dev_err(fdev->dev, "unable to request IRQs\n");
1410d3f620b2SIra Snyder 		goto out_free_fdev;
1411d3f620b2SIra Snyder 	}
1412d3f620b2SIra Snyder 
1413173acc7cSZhang Wei 	dma_async_device_register(&fdev->common);
1414173acc7cSZhang Wei 	return 0;
1415173acc7cSZhang Wei 
1416e7a29151SIra Snyder out_free_fdev:
1417d3f620b2SIra Snyder 	irq_dispose_mapping(fdev->irq);
1418585a1db1SArvind Yadav 	iounmap(fdev->regs);
1419585a1db1SArvind Yadav out_free:
1420173acc7cSZhang Wei 	kfree(fdev);
1421e7a29151SIra Snyder out_return:
1422173acc7cSZhang Wei 	return err;
1423173acc7cSZhang Wei }
1424173acc7cSZhang Wei 
14252dc11581SGrant Likely static int fsldma_of_remove(struct platform_device *op)
142677cd62e8STimur Tabi {
1427a4f56d4bSIra Snyder 	struct fsldma_device *fdev;
142877cd62e8STimur Tabi 	unsigned int i;
142977cd62e8STimur Tabi 
1430dd3daca1SJingoo Han 	fdev = platform_get_drvdata(op);
143177cd62e8STimur Tabi 	dma_async_device_unregister(&fdev->common);
143277cd62e8STimur Tabi 
1433d3f620b2SIra Snyder 	fsldma_free_irqs(fdev);
1434d3f620b2SIra Snyder 
1435e7a29151SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
143677cd62e8STimur Tabi 		if (fdev->chan[i])
143777cd62e8STimur Tabi 			fsl_dma_chan_remove(fdev->chan[i]);
1438e7a29151SIra Snyder 	}
143977cd62e8STimur Tabi 
1440e7a29151SIra Snyder 	iounmap(fdev->regs);
144177cd62e8STimur Tabi 	kfree(fdev);
144277cd62e8STimur Tabi 
144377cd62e8STimur Tabi 	return 0;
144477cd62e8STimur Tabi }
144577cd62e8STimur Tabi 
144614c6a333SHongbo Zhang #ifdef CONFIG_PM
144714c6a333SHongbo Zhang static int fsldma_suspend_late(struct device *dev)
144814c6a333SHongbo Zhang {
144914c6a333SHongbo Zhang 	struct platform_device *pdev = to_platform_device(dev);
145014c6a333SHongbo Zhang 	struct fsldma_device *fdev = platform_get_drvdata(pdev);
145114c6a333SHongbo Zhang 	struct fsldma_chan *chan;
145214c6a333SHongbo Zhang 	int i;
145314c6a333SHongbo Zhang 
145414c6a333SHongbo Zhang 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
145514c6a333SHongbo Zhang 		chan = fdev->chan[i];
145614c6a333SHongbo Zhang 		if (!chan)
145714c6a333SHongbo Zhang 			continue;
145814c6a333SHongbo Zhang 
145914c6a333SHongbo Zhang 		spin_lock_bh(&chan->desc_lock);
146014c6a333SHongbo Zhang 		if (unlikely(!chan->idle))
146114c6a333SHongbo Zhang 			goto out;
146214c6a333SHongbo Zhang 		chan->regs_save.mr = get_mr(chan);
146314c6a333SHongbo Zhang 		chan->pm_state = SUSPENDED;
146414c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
146514c6a333SHongbo Zhang 	}
146614c6a333SHongbo Zhang 	return 0;
146714c6a333SHongbo Zhang 
146814c6a333SHongbo Zhang out:
146914c6a333SHongbo Zhang 	for (; i >= 0; i--) {
147014c6a333SHongbo Zhang 		chan = fdev->chan[i];
147114c6a333SHongbo Zhang 		if (!chan)
147214c6a333SHongbo Zhang 			continue;
147314c6a333SHongbo Zhang 		chan->pm_state = RUNNING;
147414c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
147514c6a333SHongbo Zhang 	}
147614c6a333SHongbo Zhang 	return -EBUSY;
147714c6a333SHongbo Zhang }
147814c6a333SHongbo Zhang 
147914c6a333SHongbo Zhang static int fsldma_resume_early(struct device *dev)
148014c6a333SHongbo Zhang {
148114c6a333SHongbo Zhang 	struct platform_device *pdev = to_platform_device(dev);
148214c6a333SHongbo Zhang 	struct fsldma_device *fdev = platform_get_drvdata(pdev);
148314c6a333SHongbo Zhang 	struct fsldma_chan *chan;
148414c6a333SHongbo Zhang 	u32 mode;
148514c6a333SHongbo Zhang 	int i;
148614c6a333SHongbo Zhang 
148714c6a333SHongbo Zhang 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
148814c6a333SHongbo Zhang 		chan = fdev->chan[i];
148914c6a333SHongbo Zhang 		if (!chan)
149014c6a333SHongbo Zhang 			continue;
149114c6a333SHongbo Zhang 
149214c6a333SHongbo Zhang 		spin_lock_bh(&chan->desc_lock);
149314c6a333SHongbo Zhang 		mode = chan->regs_save.mr
149414c6a333SHongbo Zhang 			& ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
149514c6a333SHongbo Zhang 		set_mr(chan, mode);
149614c6a333SHongbo Zhang 		chan->pm_state = RUNNING;
149714c6a333SHongbo Zhang 		spin_unlock_bh(&chan->desc_lock);
149814c6a333SHongbo Zhang 	}
149914c6a333SHongbo Zhang 
150014c6a333SHongbo Zhang 	return 0;
150114c6a333SHongbo Zhang }
150214c6a333SHongbo Zhang 
150314c6a333SHongbo Zhang static const struct dev_pm_ops fsldma_pm_ops = {
150414c6a333SHongbo Zhang 	.suspend_late	= fsldma_suspend_late,
150514c6a333SHongbo Zhang 	.resume_early	= fsldma_resume_early,
150614c6a333SHongbo Zhang };
150714c6a333SHongbo Zhang #endif
150814c6a333SHongbo Zhang 
15094b1cf1faSMárton Németh static const struct of_device_id fsldma_of_ids[] = {
15108de7a7d9SHongbo Zhang 	{ .compatible = "fsl,elo3-dma", },
1511049c9d45SKumar Gala 	{ .compatible = "fsl,eloplus-dma", },
1512049c9d45SKumar Gala 	{ .compatible = "fsl,elo-dma", },
1513173acc7cSZhang Wei 	{}
1514173acc7cSZhang Wei };
15157522c240SLuis de Bethencourt MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1516173acc7cSZhang Wei 
15178faa7cf8SIra W. Snyder static struct platform_driver fsldma_of_driver = {
15184018294bSGrant Likely 	.driver = {
151977cd62e8STimur Tabi 		.name = "fsl-elo-dma",
15204018294bSGrant Likely 		.of_match_table = fsldma_of_ids,
152114c6a333SHongbo Zhang #ifdef CONFIG_PM
152214c6a333SHongbo Zhang 		.pm = &fsldma_pm_ops,
152314c6a333SHongbo Zhang #endif
15244018294bSGrant Likely 	},
1525a4f56d4bSIra Snyder 	.probe = fsldma_of_probe,
1526a4f56d4bSIra Snyder 	.remove = fsldma_of_remove,
1527173acc7cSZhang Wei };
1528173acc7cSZhang Wei 
1529a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1530a4f56d4bSIra Snyder /* Module Init / Exit                                                         */
1531a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1532a4f56d4bSIra Snyder 
1533a4f56d4bSIra Snyder static __init int fsldma_init(void)
1534173acc7cSZhang Wei {
15358de7a7d9SHongbo Zhang 	pr_info("Freescale Elo series DMA driver\n");
153600006124SGrant Likely 	return platform_driver_register(&fsldma_of_driver);
1537173acc7cSZhang Wei }
1538173acc7cSZhang Wei 
1539a4f56d4bSIra Snyder static void __exit fsldma_exit(void)
154077cd62e8STimur Tabi {
154100006124SGrant Likely 	platform_driver_unregister(&fsldma_of_driver);
154277cd62e8STimur Tabi }
154377cd62e8STimur Tabi 
1544a4f56d4bSIra Snyder subsys_initcall(fsldma_init);
1545a4f56d4bSIra Snyder module_exit(fsldma_exit);
154677cd62e8STimur Tabi 
15478de7a7d9SHongbo Zhang MODULE_DESCRIPTION("Freescale Elo series DMA driver");
154877cd62e8STimur Tabi MODULE_LICENSE("GPL");
1549