xref: /openbmc/linux/drivers/dma/fsldma.c (revision dc8d4091)
1173acc7cSZhang Wei /*
2173acc7cSZhang Wei  * Freescale MPC85xx, MPC83xx DMA Engine support
3173acc7cSZhang Wei  *
4e2c8e425SLi Yang  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5173acc7cSZhang Wei  *
6173acc7cSZhang Wei  * Author:
7173acc7cSZhang Wei  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8173acc7cSZhang Wei  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9173acc7cSZhang Wei  *
10173acc7cSZhang Wei  * Description:
11173acc7cSZhang Wei  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
12173acc7cSZhang Wei  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13c2e07b3aSStefan Weil  *   The support for MPC8349 DMA controller is also added.
14173acc7cSZhang Wei  *
15a7aea373SIra W. Snyder  * This driver instructs the DMA controller to issue the PCI Read Multiple
16a7aea373SIra W. Snyder  * command for PCI read operations, instead of using the default PCI Read Line
17a7aea373SIra W. Snyder  * command. Please be aware that this setting may result in read pre-fetching
18a7aea373SIra W. Snyder  * on some platforms.
19a7aea373SIra W. Snyder  *
20173acc7cSZhang Wei  * This is free software; you can redistribute it and/or modify
21173acc7cSZhang Wei  * it under the terms of the GNU General Public License as published by
22173acc7cSZhang Wei  * the Free Software Foundation; either version 2 of the License, or
23173acc7cSZhang Wei  * (at your option) any later version.
24173acc7cSZhang Wei  *
25173acc7cSZhang Wei  */
26173acc7cSZhang Wei 
27173acc7cSZhang Wei #include <linux/init.h>
28173acc7cSZhang Wei #include <linux/module.h>
29173acc7cSZhang Wei #include <linux/pci.h>
305a0e3ad6STejun Heo #include <linux/slab.h>
31173acc7cSZhang Wei #include <linux/interrupt.h>
32173acc7cSZhang Wei #include <linux/dmaengine.h>
33173acc7cSZhang Wei #include <linux/delay.h>
34173acc7cSZhang Wei #include <linux/dma-mapping.h>
35173acc7cSZhang Wei #include <linux/dmapool.h>
36173acc7cSZhang Wei #include <linux/of_platform.h>
37173acc7cSZhang Wei 
38173acc7cSZhang Wei #include "fsldma.h"
39173acc7cSZhang Wei 
40b158471eSIra Snyder #define chan_dbg(chan, fmt, arg...)					\
41b158471eSIra Snyder 	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42b158471eSIra Snyder #define chan_err(chan, fmt, arg...)					\
43b158471eSIra Snyder 	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
44b158471eSIra Snyder 
45b158471eSIra Snyder static const char msg_ld_oom[] = "No free memory for link descriptor";
46c1433041SIra Snyder 
47e8bd84dfSIra Snyder /*
48e8bd84dfSIra Snyder  * Register Helpers
49173acc7cSZhang Wei  */
50173acc7cSZhang Wei 
51a1c03319SIra Snyder static void set_sr(struct fsldma_chan *chan, u32 val)
52173acc7cSZhang Wei {
53a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->sr, val, 32);
54173acc7cSZhang Wei }
55173acc7cSZhang Wei 
56a1c03319SIra Snyder static u32 get_sr(struct fsldma_chan *chan)
57173acc7cSZhang Wei {
58a1c03319SIra Snyder 	return DMA_IN(chan, &chan->regs->sr, 32);
59173acc7cSZhang Wei }
60173acc7cSZhang Wei 
61e8bd84dfSIra Snyder static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
62e8bd84dfSIra Snyder {
63e8bd84dfSIra Snyder 	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
64e8bd84dfSIra Snyder }
65e8bd84dfSIra Snyder 
66e8bd84dfSIra Snyder static dma_addr_t get_cdar(struct fsldma_chan *chan)
67e8bd84dfSIra Snyder {
68e8bd84dfSIra Snyder 	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
69e8bd84dfSIra Snyder }
70e8bd84dfSIra Snyder 
71e8bd84dfSIra Snyder static u32 get_bcr(struct fsldma_chan *chan)
72e8bd84dfSIra Snyder {
73e8bd84dfSIra Snyder 	return DMA_IN(chan, &chan->regs->bcr, 32);
74e8bd84dfSIra Snyder }
75e8bd84dfSIra Snyder 
76e8bd84dfSIra Snyder /*
77e8bd84dfSIra Snyder  * Descriptor Helpers
78e8bd84dfSIra Snyder  */
79e8bd84dfSIra Snyder 
80a1c03319SIra Snyder static void set_desc_cnt(struct fsldma_chan *chan,
81173acc7cSZhang Wei 				struct fsl_dma_ld_hw *hw, u32 count)
82173acc7cSZhang Wei {
83a1c03319SIra Snyder 	hw->count = CPU_TO_DMA(chan, count, 32);
84173acc7cSZhang Wei }
85173acc7cSZhang Wei 
869c4d1e7bSIra Snyder static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
879c4d1e7bSIra Snyder {
889c4d1e7bSIra Snyder 	return DMA_TO_CPU(chan, desc->hw.count, 32);
899c4d1e7bSIra Snyder }
909c4d1e7bSIra Snyder 
91a1c03319SIra Snyder static void set_desc_src(struct fsldma_chan *chan,
92173acc7cSZhang Wei 			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
93173acc7cSZhang Wei {
94173acc7cSZhang Wei 	u64 snoop_bits;
95173acc7cSZhang Wei 
96a1c03319SIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
97173acc7cSZhang Wei 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
98a1c03319SIra Snyder 	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
99173acc7cSZhang Wei }
100173acc7cSZhang Wei 
1019c4d1e7bSIra Snyder static dma_addr_t get_desc_src(struct fsldma_chan *chan,
1029c4d1e7bSIra Snyder 			       struct fsl_desc_sw *desc)
1039c4d1e7bSIra Snyder {
1049c4d1e7bSIra Snyder 	u64 snoop_bits;
1059c4d1e7bSIra Snyder 
1069c4d1e7bSIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
1079c4d1e7bSIra Snyder 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
1089c4d1e7bSIra Snyder 	return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
1099c4d1e7bSIra Snyder }
1109c4d1e7bSIra Snyder 
111a1c03319SIra Snyder static void set_desc_dst(struct fsldma_chan *chan,
112738f5f7eSIra Snyder 			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
113173acc7cSZhang Wei {
114173acc7cSZhang Wei 	u64 snoop_bits;
115173acc7cSZhang Wei 
116a1c03319SIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
117173acc7cSZhang Wei 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
118a1c03319SIra Snyder 	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
119173acc7cSZhang Wei }
120173acc7cSZhang Wei 
1219c4d1e7bSIra Snyder static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
1229c4d1e7bSIra Snyder 			       struct fsl_desc_sw *desc)
1239c4d1e7bSIra Snyder {
1249c4d1e7bSIra Snyder 	u64 snoop_bits;
1259c4d1e7bSIra Snyder 
1269c4d1e7bSIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
1279c4d1e7bSIra Snyder 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
1289c4d1e7bSIra Snyder 	return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
1299c4d1e7bSIra Snyder }
1309c4d1e7bSIra Snyder 
131a1c03319SIra Snyder static void set_desc_next(struct fsldma_chan *chan,
132173acc7cSZhang Wei 			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
133173acc7cSZhang Wei {
134173acc7cSZhang Wei 	u64 snoop_bits;
135173acc7cSZhang Wei 
136a1c03319SIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
137173acc7cSZhang Wei 		? FSL_DMA_SNEN : 0;
138a1c03319SIra Snyder 	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
139173acc7cSZhang Wei }
140173acc7cSZhang Wei 
14131f4306cSIra Snyder static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
142173acc7cSZhang Wei {
143e8bd84dfSIra Snyder 	u64 snoop_bits;
144e8bd84dfSIra Snyder 
145e8bd84dfSIra Snyder 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
146e8bd84dfSIra Snyder 		? FSL_DMA_SNEN : 0;
147e8bd84dfSIra Snyder 
148e8bd84dfSIra Snyder 	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
149e8bd84dfSIra Snyder 		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
150e8bd84dfSIra Snyder 			| snoop_bits, 64);
151173acc7cSZhang Wei }
152173acc7cSZhang Wei 
153e8bd84dfSIra Snyder /*
154e8bd84dfSIra Snyder  * DMA Engine Hardware Control Helpers
155e8bd84dfSIra Snyder  */
156173acc7cSZhang Wei 
157e8bd84dfSIra Snyder static void dma_init(struct fsldma_chan *chan)
158173acc7cSZhang Wei {
159e8bd84dfSIra Snyder 	/* Reset the channel */
160e8bd84dfSIra Snyder 	DMA_OUT(chan, &chan->regs->mr, 0, 32);
161173acc7cSZhang Wei 
162e8bd84dfSIra Snyder 	switch (chan->feature & FSL_DMA_IP_MASK) {
163e8bd84dfSIra Snyder 	case FSL_DMA_IP_85XX:
164e8bd84dfSIra Snyder 		/* Set the channel to below modes:
165e8bd84dfSIra Snyder 		 * EIE - Error interrupt enable
166e8bd84dfSIra Snyder 		 * EOLNIE - End of links interrupt enable
167e8bd84dfSIra Snyder 		 * BWC - Bandwidth sharing among channels
168e8bd84dfSIra Snyder 		 */
169e8bd84dfSIra Snyder 		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
170f04cd407SIra Snyder 				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
171e8bd84dfSIra Snyder 		break;
172e8bd84dfSIra Snyder 	case FSL_DMA_IP_83XX:
173e8bd84dfSIra Snyder 		/* Set the channel to below modes:
174e8bd84dfSIra Snyder 		 * EOTIE - End-of-transfer interrupt enable
175e8bd84dfSIra Snyder 		 * PRC_RM - PCI read multiple
176e8bd84dfSIra Snyder 		 */
177e8bd84dfSIra Snyder 		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
178e8bd84dfSIra Snyder 				| FSL_DMA_MR_PRC_RM, 32);
179e8bd84dfSIra Snyder 		break;
180e8bd84dfSIra Snyder 	}
181f79abb62SZhang Wei }
182f79abb62SZhang Wei 
183a1c03319SIra Snyder static int dma_is_idle(struct fsldma_chan *chan)
184173acc7cSZhang Wei {
185a1c03319SIra Snyder 	u32 sr = get_sr(chan);
186173acc7cSZhang Wei 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
187173acc7cSZhang Wei }
188173acc7cSZhang Wei 
189f04cd407SIra Snyder /*
190f04cd407SIra Snyder  * Start the DMA controller
191f04cd407SIra Snyder  *
192f04cd407SIra Snyder  * Preconditions:
193f04cd407SIra Snyder  * - the CDAR register must point to the start descriptor
194f04cd407SIra Snyder  * - the MRn[CS] bit must be cleared
195f04cd407SIra Snyder  */
196a1c03319SIra Snyder static void dma_start(struct fsldma_chan *chan)
197173acc7cSZhang Wei {
198272ca655SIra Snyder 	u32 mode;
199173acc7cSZhang Wei 
200a1c03319SIra Snyder 	mode = DMA_IN(chan, &chan->regs->mr, 32);
201272ca655SIra Snyder 
202a1c03319SIra Snyder 	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
203a1c03319SIra Snyder 		DMA_OUT(chan, &chan->regs->bcr, 0, 32);
204272ca655SIra Snyder 		mode |= FSL_DMA_MR_EMP_EN;
205272ca655SIra Snyder 	} else {
206272ca655SIra Snyder 		mode &= ~FSL_DMA_MR_EMP_EN;
207272ca655SIra Snyder 	}
208173acc7cSZhang Wei 
209f04cd407SIra Snyder 	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
210272ca655SIra Snyder 		mode |= FSL_DMA_MR_EMS_EN;
211f04cd407SIra Snyder 	} else {
212f04cd407SIra Snyder 		mode &= ~FSL_DMA_MR_EMS_EN;
213272ca655SIra Snyder 		mode |= FSL_DMA_MR_CS;
214f04cd407SIra Snyder 	}
215173acc7cSZhang Wei 
216a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
217173acc7cSZhang Wei }
218173acc7cSZhang Wei 
219a1c03319SIra Snyder static void dma_halt(struct fsldma_chan *chan)
220173acc7cSZhang Wei {
221272ca655SIra Snyder 	u32 mode;
222900325a6SDan Williams 	int i;
223900325a6SDan Williams 
224a1c03319SIra Snyder 	mode = DMA_IN(chan, &chan->regs->mr, 32);
225272ca655SIra Snyder 	mode |= FSL_DMA_MR_CA;
226a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
227272ca655SIra Snyder 
228272ca655SIra Snyder 	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
229a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
230173acc7cSZhang Wei 
231900325a6SDan Williams 	for (i = 0; i < 100; i++) {
232a1c03319SIra Snyder 		if (dma_is_idle(chan))
2339c3a50b7SIra Snyder 			return;
2349c3a50b7SIra Snyder 
235173acc7cSZhang Wei 		udelay(10);
236900325a6SDan Williams 	}
237272ca655SIra Snyder 
2389c3a50b7SIra Snyder 	if (!dma_is_idle(chan))
239b158471eSIra Snyder 		chan_err(chan, "DMA halt timeout!\n");
240173acc7cSZhang Wei }
241173acc7cSZhang Wei 
242173acc7cSZhang Wei /**
243173acc7cSZhang Wei  * fsl_chan_set_src_loop_size - Set source address hold transfer size
244a1c03319SIra Snyder  * @chan : Freescale DMA channel
245173acc7cSZhang Wei  * @size     : Address loop size, 0 for disable loop
246173acc7cSZhang Wei  *
247173acc7cSZhang Wei  * The set source address hold transfer size. The source
248173acc7cSZhang Wei  * address hold or loop transfer size is when the DMA transfer
249173acc7cSZhang Wei  * data from source address (SA), if the loop size is 4, the DMA will
250173acc7cSZhang Wei  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
251173acc7cSZhang Wei  * SA + 1 ... and so on.
252173acc7cSZhang Wei  */
253a1c03319SIra Snyder static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
254173acc7cSZhang Wei {
255272ca655SIra Snyder 	u32 mode;
256272ca655SIra Snyder 
257a1c03319SIra Snyder 	mode = DMA_IN(chan, &chan->regs->mr, 32);
258272ca655SIra Snyder 
259173acc7cSZhang Wei 	switch (size) {
260173acc7cSZhang Wei 	case 0:
261272ca655SIra Snyder 		mode &= ~FSL_DMA_MR_SAHE;
262173acc7cSZhang Wei 		break;
263173acc7cSZhang Wei 	case 1:
264173acc7cSZhang Wei 	case 2:
265173acc7cSZhang Wei 	case 4:
266173acc7cSZhang Wei 	case 8:
267272ca655SIra Snyder 		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
268173acc7cSZhang Wei 		break;
269173acc7cSZhang Wei 	}
270272ca655SIra Snyder 
271a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
272173acc7cSZhang Wei }
273173acc7cSZhang Wei 
274173acc7cSZhang Wei /**
275738f5f7eSIra Snyder  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
276a1c03319SIra Snyder  * @chan : Freescale DMA channel
277173acc7cSZhang Wei  * @size     : Address loop size, 0 for disable loop
278173acc7cSZhang Wei  *
279173acc7cSZhang Wei  * The set destination address hold transfer size. The destination
280173acc7cSZhang Wei  * address hold or loop transfer size is when the DMA transfer
281173acc7cSZhang Wei  * data to destination address (TA), if the loop size is 4, the DMA will
282173acc7cSZhang Wei  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
283173acc7cSZhang Wei  * TA + 1 ... and so on.
284173acc7cSZhang Wei  */
285a1c03319SIra Snyder static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
286173acc7cSZhang Wei {
287272ca655SIra Snyder 	u32 mode;
288272ca655SIra Snyder 
289a1c03319SIra Snyder 	mode = DMA_IN(chan, &chan->regs->mr, 32);
290272ca655SIra Snyder 
291173acc7cSZhang Wei 	switch (size) {
292173acc7cSZhang Wei 	case 0:
293272ca655SIra Snyder 		mode &= ~FSL_DMA_MR_DAHE;
294173acc7cSZhang Wei 		break;
295173acc7cSZhang Wei 	case 1:
296173acc7cSZhang Wei 	case 2:
297173acc7cSZhang Wei 	case 4:
298173acc7cSZhang Wei 	case 8:
299272ca655SIra Snyder 		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
300173acc7cSZhang Wei 		break;
301173acc7cSZhang Wei 	}
302272ca655SIra Snyder 
303a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
304173acc7cSZhang Wei }
305173acc7cSZhang Wei 
306173acc7cSZhang Wei /**
307e6c7ecb6SIra Snyder  * fsl_chan_set_request_count - Set DMA Request Count for external control
308a1c03319SIra Snyder  * @chan : Freescale DMA channel
309e6c7ecb6SIra Snyder  * @size     : Number of bytes to transfer in a single request
310173acc7cSZhang Wei  *
311e6c7ecb6SIra Snyder  * The Freescale DMA channel can be controlled by the external signal DREQ#.
312e6c7ecb6SIra Snyder  * The DMA request count is how many bytes are allowed to transfer before
313e6c7ecb6SIra Snyder  * pausing the channel, after which a new assertion of DREQ# resumes channel
314e6c7ecb6SIra Snyder  * operation.
315e6c7ecb6SIra Snyder  *
316e6c7ecb6SIra Snyder  * A size of 0 disables external pause control. The maximum size is 1024.
317173acc7cSZhang Wei  */
318a1c03319SIra Snyder static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
319173acc7cSZhang Wei {
320272ca655SIra Snyder 	u32 mode;
321272ca655SIra Snyder 
322e6c7ecb6SIra Snyder 	BUG_ON(size > 1024);
323272ca655SIra Snyder 
324a1c03319SIra Snyder 	mode = DMA_IN(chan, &chan->regs->mr, 32);
325272ca655SIra Snyder 	mode |= (__ilog2(size) << 24) & 0x0f000000;
326272ca655SIra Snyder 
327a1c03319SIra Snyder 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
328e6c7ecb6SIra Snyder }
329e6c7ecb6SIra Snyder 
330e6c7ecb6SIra Snyder /**
331e6c7ecb6SIra Snyder  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
332a1c03319SIra Snyder  * @chan : Freescale DMA channel
333e6c7ecb6SIra Snyder  * @enable   : 0 is disabled, 1 is enabled.
334e6c7ecb6SIra Snyder  *
335e6c7ecb6SIra Snyder  * The Freescale DMA channel can be controlled by the external signal DREQ#.
336e6c7ecb6SIra Snyder  * The DMA Request Count feature should be used in addition to this feature
337e6c7ecb6SIra Snyder  * to set the number of bytes to transfer before pausing the channel.
338e6c7ecb6SIra Snyder  */
339a1c03319SIra Snyder static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
340e6c7ecb6SIra Snyder {
341e6c7ecb6SIra Snyder 	if (enable)
342a1c03319SIra Snyder 		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
343e6c7ecb6SIra Snyder 	else
344a1c03319SIra Snyder 		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
345173acc7cSZhang Wei }
346173acc7cSZhang Wei 
347173acc7cSZhang Wei /**
348173acc7cSZhang Wei  * fsl_chan_toggle_ext_start - Toggle channel external start status
349a1c03319SIra Snyder  * @chan : Freescale DMA channel
350173acc7cSZhang Wei  * @enable   : 0 is disabled, 1 is enabled.
351173acc7cSZhang Wei  *
352173acc7cSZhang Wei  * If enable the external start, the channel can be started by an
353173acc7cSZhang Wei  * external DMA start pin. So the dma_start() does not start the
354173acc7cSZhang Wei  * transfer immediately. The DMA channel will wait for the
355173acc7cSZhang Wei  * control pin asserted.
356173acc7cSZhang Wei  */
357a1c03319SIra Snyder static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
358173acc7cSZhang Wei {
359173acc7cSZhang Wei 	if (enable)
360a1c03319SIra Snyder 		chan->feature |= FSL_DMA_CHAN_START_EXT;
361173acc7cSZhang Wei 	else
362a1c03319SIra Snyder 		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
363173acc7cSZhang Wei }
364173acc7cSZhang Wei 
36531f4306cSIra Snyder static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
3669c3a50b7SIra Snyder {
3679c3a50b7SIra Snyder 	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
3689c3a50b7SIra Snyder 
3699c3a50b7SIra Snyder 	if (list_empty(&chan->ld_pending))
3709c3a50b7SIra Snyder 		goto out_splice;
3719c3a50b7SIra Snyder 
3729c3a50b7SIra Snyder 	/*
3739c3a50b7SIra Snyder 	 * Add the hardware descriptor to the chain of hardware descriptors
3749c3a50b7SIra Snyder 	 * that already exists in memory.
3759c3a50b7SIra Snyder 	 *
3769c3a50b7SIra Snyder 	 * This will un-set the EOL bit of the existing transaction, and the
3779c3a50b7SIra Snyder 	 * last link in this transaction will become the EOL descriptor.
3789c3a50b7SIra Snyder 	 */
3799c3a50b7SIra Snyder 	set_desc_next(chan, &tail->hw, desc->async_tx.phys);
3809c3a50b7SIra Snyder 
3819c3a50b7SIra Snyder 	/*
3829c3a50b7SIra Snyder 	 * Add the software descriptor and all children to the list
3839c3a50b7SIra Snyder 	 * of pending transactions
3849c3a50b7SIra Snyder 	 */
3859c3a50b7SIra Snyder out_splice:
3869c3a50b7SIra Snyder 	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
3879c3a50b7SIra Snyder }
3889c3a50b7SIra Snyder 
389173acc7cSZhang Wei static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
390173acc7cSZhang Wei {
391a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
392eda34234SDan Williams 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
393eda34234SDan Williams 	struct fsl_desc_sw *child;
394173acc7cSZhang Wei 	unsigned long flags;
395173acc7cSZhang Wei 	dma_cookie_t cookie;
396173acc7cSZhang Wei 
397a1c03319SIra Snyder 	spin_lock_irqsave(&chan->desc_lock, flags);
398173acc7cSZhang Wei 
3999c3a50b7SIra Snyder 	/*
4009c3a50b7SIra Snyder 	 * assign cookies to all of the software descriptors
4019c3a50b7SIra Snyder 	 * that make up this transaction
4029c3a50b7SIra Snyder 	 */
403a1c03319SIra Snyder 	cookie = chan->common.cookie;
404eda34234SDan Williams 	list_for_each_entry(child, &desc->tx_list, node) {
405173acc7cSZhang Wei 		cookie++;
40631f4306cSIra Snyder 		if (cookie < DMA_MIN_COOKIE)
40731f4306cSIra Snyder 			cookie = DMA_MIN_COOKIE;
408173acc7cSZhang Wei 
4096ca3a7a9SSteven J. Magnani 		child->async_tx.cookie = cookie;
410bcfb7465SIra Snyder 	}
411bcfb7465SIra Snyder 
412a1c03319SIra Snyder 	chan->common.cookie = cookie;
4139c3a50b7SIra Snyder 
4149c3a50b7SIra Snyder 	/* put this transaction onto the tail of the pending queue */
415a1c03319SIra Snyder 	append_ld_queue(chan, desc);
416173acc7cSZhang Wei 
417a1c03319SIra Snyder 	spin_unlock_irqrestore(&chan->desc_lock, flags);
418173acc7cSZhang Wei 
419173acc7cSZhang Wei 	return cookie;
420173acc7cSZhang Wei }
421173acc7cSZhang Wei 
422173acc7cSZhang Wei /**
423173acc7cSZhang Wei  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
424a1c03319SIra Snyder  * @chan : Freescale DMA channel
425173acc7cSZhang Wei  *
426173acc7cSZhang Wei  * Return - The descriptor allocated. NULL for failed.
427173acc7cSZhang Wei  */
42831f4306cSIra Snyder static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
429173acc7cSZhang Wei {
4309c3a50b7SIra Snyder 	struct fsl_desc_sw *desc;
431173acc7cSZhang Wei 	dma_addr_t pdesc;
432173acc7cSZhang Wei 
4339c3a50b7SIra Snyder 	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
4349c3a50b7SIra Snyder 	if (!desc) {
435b158471eSIra Snyder 		chan_dbg(chan, "out of memory for link descriptor\n");
4369c3a50b7SIra Snyder 		return NULL;
437173acc7cSZhang Wei 	}
438173acc7cSZhang Wei 
4399c3a50b7SIra Snyder 	memset(desc, 0, sizeof(*desc));
4409c3a50b7SIra Snyder 	INIT_LIST_HEAD(&desc->tx_list);
4419c3a50b7SIra Snyder 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
4429c3a50b7SIra Snyder 	desc->async_tx.tx_submit = fsl_dma_tx_submit;
4439c3a50b7SIra Snyder 	desc->async_tx.phys = pdesc;
4449c3a50b7SIra Snyder 
4450ab09c36SIra Snyder #ifdef FSL_DMA_LD_DEBUG
4460ab09c36SIra Snyder 	chan_dbg(chan, "LD %p allocated\n", desc);
4470ab09c36SIra Snyder #endif
4480ab09c36SIra Snyder 
4499c3a50b7SIra Snyder 	return desc;
450173acc7cSZhang Wei }
451173acc7cSZhang Wei 
452173acc7cSZhang Wei /**
453173acc7cSZhang Wei  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
454a1c03319SIra Snyder  * @chan : Freescale DMA channel
455173acc7cSZhang Wei  *
456173acc7cSZhang Wei  * This function will create a dma pool for descriptor allocation.
457173acc7cSZhang Wei  *
458173acc7cSZhang Wei  * Return - The number of descriptors allocated.
459173acc7cSZhang Wei  */
460a1c03319SIra Snyder static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
461173acc7cSZhang Wei {
462a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
46377cd62e8STimur Tabi 
46477cd62e8STimur Tabi 	/* Has this channel already been allocated? */
465a1c03319SIra Snyder 	if (chan->desc_pool)
46677cd62e8STimur Tabi 		return 1;
467173acc7cSZhang Wei 
4689c3a50b7SIra Snyder 	/*
4699c3a50b7SIra Snyder 	 * We need the descriptor to be aligned to 32bytes
470173acc7cSZhang Wei 	 * for meeting FSL DMA specification requirement.
471173acc7cSZhang Wei 	 */
472b158471eSIra Snyder 	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
4739c3a50b7SIra Snyder 					  sizeof(struct fsl_desc_sw),
4749c3a50b7SIra Snyder 					  __alignof__(struct fsl_desc_sw), 0);
475a1c03319SIra Snyder 	if (!chan->desc_pool) {
476b158471eSIra Snyder 		chan_err(chan, "unable to allocate descriptor pool\n");
4779c3a50b7SIra Snyder 		return -ENOMEM;
478173acc7cSZhang Wei 	}
479173acc7cSZhang Wei 
4809c3a50b7SIra Snyder 	/* there is at least one descriptor free to be allocated */
481173acc7cSZhang Wei 	return 1;
482173acc7cSZhang Wei }
483173acc7cSZhang Wei 
484173acc7cSZhang Wei /**
4859c3a50b7SIra Snyder  * fsldma_free_desc_list - Free all descriptors in a queue
4869c3a50b7SIra Snyder  * @chan: Freescae DMA channel
4879c3a50b7SIra Snyder  * @list: the list to free
4889c3a50b7SIra Snyder  *
4899c3a50b7SIra Snyder  * LOCKING: must hold chan->desc_lock
4909c3a50b7SIra Snyder  */
4919c3a50b7SIra Snyder static void fsldma_free_desc_list(struct fsldma_chan *chan,
4929c3a50b7SIra Snyder 				  struct list_head *list)
4939c3a50b7SIra Snyder {
4949c3a50b7SIra Snyder 	struct fsl_desc_sw *desc, *_desc;
4959c3a50b7SIra Snyder 
4969c3a50b7SIra Snyder 	list_for_each_entry_safe(desc, _desc, list, node) {
4979c3a50b7SIra Snyder 		list_del(&desc->node);
4980ab09c36SIra Snyder #ifdef FSL_DMA_LD_DEBUG
4990ab09c36SIra Snyder 		chan_dbg(chan, "LD %p free\n", desc);
5000ab09c36SIra Snyder #endif
5019c3a50b7SIra Snyder 		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
5029c3a50b7SIra Snyder 	}
5039c3a50b7SIra Snyder }
5049c3a50b7SIra Snyder 
5059c3a50b7SIra Snyder static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
5069c3a50b7SIra Snyder 					  struct list_head *list)
5079c3a50b7SIra Snyder {
5089c3a50b7SIra Snyder 	struct fsl_desc_sw *desc, *_desc;
5099c3a50b7SIra Snyder 
5109c3a50b7SIra Snyder 	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
5119c3a50b7SIra Snyder 		list_del(&desc->node);
5120ab09c36SIra Snyder #ifdef FSL_DMA_LD_DEBUG
5130ab09c36SIra Snyder 		chan_dbg(chan, "LD %p free\n", desc);
5140ab09c36SIra Snyder #endif
5159c3a50b7SIra Snyder 		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
5169c3a50b7SIra Snyder 	}
5179c3a50b7SIra Snyder }
5189c3a50b7SIra Snyder 
5199c3a50b7SIra Snyder /**
520173acc7cSZhang Wei  * fsl_dma_free_chan_resources - Free all resources of the channel.
521a1c03319SIra Snyder  * @chan : Freescale DMA channel
522173acc7cSZhang Wei  */
523a1c03319SIra Snyder static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
524173acc7cSZhang Wei {
525a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
526173acc7cSZhang Wei 	unsigned long flags;
527173acc7cSZhang Wei 
528b158471eSIra Snyder 	chan_dbg(chan, "free all channel resources\n");
529a1c03319SIra Snyder 	spin_lock_irqsave(&chan->desc_lock, flags);
5309c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_pending);
5319c3a50b7SIra Snyder 	fsldma_free_desc_list(chan, &chan->ld_running);
532a1c03319SIra Snyder 	spin_unlock_irqrestore(&chan->desc_lock, flags);
53377cd62e8STimur Tabi 
5349c3a50b7SIra Snyder 	dma_pool_destroy(chan->desc_pool);
535a1c03319SIra Snyder 	chan->desc_pool = NULL;
536173acc7cSZhang Wei }
537173acc7cSZhang Wei 
5382187c269SZhang Wei static struct dma_async_tx_descriptor *
539a1c03319SIra Snyder fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
5402187c269SZhang Wei {
541a1c03319SIra Snyder 	struct fsldma_chan *chan;
5422187c269SZhang Wei 	struct fsl_desc_sw *new;
5432187c269SZhang Wei 
544a1c03319SIra Snyder 	if (!dchan)
5452187c269SZhang Wei 		return NULL;
5462187c269SZhang Wei 
547a1c03319SIra Snyder 	chan = to_fsl_chan(dchan);
5482187c269SZhang Wei 
549a1c03319SIra Snyder 	new = fsl_dma_alloc_descriptor(chan);
5502187c269SZhang Wei 	if (!new) {
551b158471eSIra Snyder 		chan_err(chan, "%s\n", msg_ld_oom);
5522187c269SZhang Wei 		return NULL;
5532187c269SZhang Wei 	}
5542187c269SZhang Wei 
5552187c269SZhang Wei 	new->async_tx.cookie = -EBUSY;
556636bdeaaSDan Williams 	new->async_tx.flags = flags;
5572187c269SZhang Wei 
558f79abb62SZhang Wei 	/* Insert the link descriptor to the LD ring */
559eda34234SDan Williams 	list_add_tail(&new->node, &new->tx_list);
560f79abb62SZhang Wei 
5612187c269SZhang Wei 	/* Set End-of-link to the last link descriptor of new list */
562a1c03319SIra Snyder 	set_ld_eol(chan, new);
5632187c269SZhang Wei 
5642187c269SZhang Wei 	return &new->async_tx;
5652187c269SZhang Wei }
5662187c269SZhang Wei 
56731f4306cSIra Snyder static struct dma_async_tx_descriptor *
56831f4306cSIra Snyder fsl_dma_prep_memcpy(struct dma_chan *dchan,
56931f4306cSIra Snyder 	dma_addr_t dma_dst, dma_addr_t dma_src,
570173acc7cSZhang Wei 	size_t len, unsigned long flags)
571173acc7cSZhang Wei {
572a1c03319SIra Snyder 	struct fsldma_chan *chan;
573173acc7cSZhang Wei 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
574173acc7cSZhang Wei 	size_t copy;
575173acc7cSZhang Wei 
576a1c03319SIra Snyder 	if (!dchan)
577173acc7cSZhang Wei 		return NULL;
578173acc7cSZhang Wei 
579173acc7cSZhang Wei 	if (!len)
580173acc7cSZhang Wei 		return NULL;
581173acc7cSZhang Wei 
582a1c03319SIra Snyder 	chan = to_fsl_chan(dchan);
583173acc7cSZhang Wei 
584173acc7cSZhang Wei 	do {
585173acc7cSZhang Wei 
586173acc7cSZhang Wei 		/* Allocate the link descriptor from DMA pool */
587a1c03319SIra Snyder 		new = fsl_dma_alloc_descriptor(chan);
588173acc7cSZhang Wei 		if (!new) {
589b158471eSIra Snyder 			chan_err(chan, "%s\n", msg_ld_oom);
5902e077f8eSIra Snyder 			goto fail;
591173acc7cSZhang Wei 		}
592173acc7cSZhang Wei 
59356822843SZhang Wei 		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
594173acc7cSZhang Wei 
595a1c03319SIra Snyder 		set_desc_cnt(chan, &new->hw, copy);
596a1c03319SIra Snyder 		set_desc_src(chan, &new->hw, dma_src);
597a1c03319SIra Snyder 		set_desc_dst(chan, &new->hw, dma_dst);
598173acc7cSZhang Wei 
599173acc7cSZhang Wei 		if (!first)
600173acc7cSZhang Wei 			first = new;
601173acc7cSZhang Wei 		else
602a1c03319SIra Snyder 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
603173acc7cSZhang Wei 
604173acc7cSZhang Wei 		new->async_tx.cookie = 0;
605636bdeaaSDan Williams 		async_tx_ack(&new->async_tx);
606173acc7cSZhang Wei 
607173acc7cSZhang Wei 		prev = new;
608173acc7cSZhang Wei 		len -= copy;
609173acc7cSZhang Wei 		dma_src += copy;
610738f5f7eSIra Snyder 		dma_dst += copy;
611173acc7cSZhang Wei 
612173acc7cSZhang Wei 		/* Insert the link descriptor to the LD ring */
613eda34234SDan Williams 		list_add_tail(&new->node, &first->tx_list);
614173acc7cSZhang Wei 	} while (len);
615173acc7cSZhang Wei 
616636bdeaaSDan Williams 	new->async_tx.flags = flags; /* client is in control of this ack */
617173acc7cSZhang Wei 	new->async_tx.cookie = -EBUSY;
618173acc7cSZhang Wei 
619173acc7cSZhang Wei 	/* Set End-of-link to the last link descriptor of new list */
620a1c03319SIra Snyder 	set_ld_eol(chan, new);
621173acc7cSZhang Wei 
6222e077f8eSIra Snyder 	return &first->async_tx;
6232e077f8eSIra Snyder 
6242e077f8eSIra Snyder fail:
6252e077f8eSIra Snyder 	if (!first)
6262e077f8eSIra Snyder 		return NULL;
6272e077f8eSIra Snyder 
6289c3a50b7SIra Snyder 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
6292e077f8eSIra Snyder 	return NULL;
630173acc7cSZhang Wei }
631173acc7cSZhang Wei 
632c1433041SIra Snyder static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
633c1433041SIra Snyder 	struct scatterlist *dst_sg, unsigned int dst_nents,
634c1433041SIra Snyder 	struct scatterlist *src_sg, unsigned int src_nents,
635c1433041SIra Snyder 	unsigned long flags)
636c1433041SIra Snyder {
637c1433041SIra Snyder 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
638c1433041SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
639c1433041SIra Snyder 	size_t dst_avail, src_avail;
640c1433041SIra Snyder 	dma_addr_t dst, src;
641c1433041SIra Snyder 	size_t len;
642c1433041SIra Snyder 
643c1433041SIra Snyder 	/* basic sanity checks */
644c1433041SIra Snyder 	if (dst_nents == 0 || src_nents == 0)
645c1433041SIra Snyder 		return NULL;
646c1433041SIra Snyder 
647c1433041SIra Snyder 	if (dst_sg == NULL || src_sg == NULL)
648c1433041SIra Snyder 		return NULL;
649c1433041SIra Snyder 
650c1433041SIra Snyder 	/*
651c1433041SIra Snyder 	 * TODO: should we check that both scatterlists have the same
652c1433041SIra Snyder 	 * TODO: number of bytes in total? Is that really an error?
653c1433041SIra Snyder 	 */
654c1433041SIra Snyder 
655c1433041SIra Snyder 	/* get prepared for the loop */
656c1433041SIra Snyder 	dst_avail = sg_dma_len(dst_sg);
657c1433041SIra Snyder 	src_avail = sg_dma_len(src_sg);
658c1433041SIra Snyder 
659c1433041SIra Snyder 	/* run until we are out of scatterlist entries */
660c1433041SIra Snyder 	while (true) {
661c1433041SIra Snyder 
662c1433041SIra Snyder 		/* create the largest transaction possible */
663c1433041SIra Snyder 		len = min_t(size_t, src_avail, dst_avail);
664c1433041SIra Snyder 		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
665c1433041SIra Snyder 		if (len == 0)
666c1433041SIra Snyder 			goto fetch;
667c1433041SIra Snyder 
668c1433041SIra Snyder 		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
669c1433041SIra Snyder 		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
670c1433041SIra Snyder 
671c1433041SIra Snyder 		/* allocate and populate the descriptor */
672c1433041SIra Snyder 		new = fsl_dma_alloc_descriptor(chan);
673c1433041SIra Snyder 		if (!new) {
674b158471eSIra Snyder 			chan_err(chan, "%s\n", msg_ld_oom);
675c1433041SIra Snyder 			goto fail;
676c1433041SIra Snyder 		}
677c1433041SIra Snyder 
678c1433041SIra Snyder 		set_desc_cnt(chan, &new->hw, len);
679c1433041SIra Snyder 		set_desc_src(chan, &new->hw, src);
680c1433041SIra Snyder 		set_desc_dst(chan, &new->hw, dst);
681c1433041SIra Snyder 
682c1433041SIra Snyder 		if (!first)
683c1433041SIra Snyder 			first = new;
684c1433041SIra Snyder 		else
685c1433041SIra Snyder 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
686c1433041SIra Snyder 
687c1433041SIra Snyder 		new->async_tx.cookie = 0;
688c1433041SIra Snyder 		async_tx_ack(&new->async_tx);
689c1433041SIra Snyder 		prev = new;
690c1433041SIra Snyder 
691c1433041SIra Snyder 		/* Insert the link descriptor to the LD ring */
692c1433041SIra Snyder 		list_add_tail(&new->node, &first->tx_list);
693c1433041SIra Snyder 
694c1433041SIra Snyder 		/* update metadata */
695c1433041SIra Snyder 		dst_avail -= len;
696c1433041SIra Snyder 		src_avail -= len;
697c1433041SIra Snyder 
698c1433041SIra Snyder fetch:
699c1433041SIra Snyder 		/* fetch the next dst scatterlist entry */
700c1433041SIra Snyder 		if (dst_avail == 0) {
701c1433041SIra Snyder 
702c1433041SIra Snyder 			/* no more entries: we're done */
703c1433041SIra Snyder 			if (dst_nents == 0)
704c1433041SIra Snyder 				break;
705c1433041SIra Snyder 
706c1433041SIra Snyder 			/* fetch the next entry: if there are no more: done */
707c1433041SIra Snyder 			dst_sg = sg_next(dst_sg);
708c1433041SIra Snyder 			if (dst_sg == NULL)
709c1433041SIra Snyder 				break;
710c1433041SIra Snyder 
711c1433041SIra Snyder 			dst_nents--;
712c1433041SIra Snyder 			dst_avail = sg_dma_len(dst_sg);
713c1433041SIra Snyder 		}
714c1433041SIra Snyder 
715c1433041SIra Snyder 		/* fetch the next src scatterlist entry */
716c1433041SIra Snyder 		if (src_avail == 0) {
717c1433041SIra Snyder 
718c1433041SIra Snyder 			/* no more entries: we're done */
719c1433041SIra Snyder 			if (src_nents == 0)
720c1433041SIra Snyder 				break;
721c1433041SIra Snyder 
722c1433041SIra Snyder 			/* fetch the next entry: if there are no more: done */
723c1433041SIra Snyder 			src_sg = sg_next(src_sg);
724c1433041SIra Snyder 			if (src_sg == NULL)
725c1433041SIra Snyder 				break;
726c1433041SIra Snyder 
727c1433041SIra Snyder 			src_nents--;
728c1433041SIra Snyder 			src_avail = sg_dma_len(src_sg);
729c1433041SIra Snyder 		}
730c1433041SIra Snyder 	}
731c1433041SIra Snyder 
732c1433041SIra Snyder 	new->async_tx.flags = flags; /* client is in control of this ack */
733c1433041SIra Snyder 	new->async_tx.cookie = -EBUSY;
734c1433041SIra Snyder 
735c1433041SIra Snyder 	/* Set End-of-link to the last link descriptor of new list */
736c1433041SIra Snyder 	set_ld_eol(chan, new);
737c1433041SIra Snyder 
738c1433041SIra Snyder 	return &first->async_tx;
739c1433041SIra Snyder 
740c1433041SIra Snyder fail:
741c1433041SIra Snyder 	if (!first)
742c1433041SIra Snyder 		return NULL;
743c1433041SIra Snyder 
744c1433041SIra Snyder 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
745c1433041SIra Snyder 	return NULL;
746c1433041SIra Snyder }
747c1433041SIra Snyder 
748173acc7cSZhang Wei /**
749bbea0b6eSIra Snyder  * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
750bbea0b6eSIra Snyder  * @chan: DMA channel
751bbea0b6eSIra Snyder  * @sgl: scatterlist to transfer to/from
752bbea0b6eSIra Snyder  * @sg_len: number of entries in @scatterlist
753bbea0b6eSIra Snyder  * @direction: DMA direction
754bbea0b6eSIra Snyder  * @flags: DMAEngine flags
755bbea0b6eSIra Snyder  *
756bbea0b6eSIra Snyder  * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
757bbea0b6eSIra Snyder  * DMA_SLAVE API, this gets the device-specific information from the
758bbea0b6eSIra Snyder  * chan->private variable.
759bbea0b6eSIra Snyder  */
760bbea0b6eSIra Snyder static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
761a1c03319SIra Snyder 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
762bbea0b6eSIra Snyder 	enum dma_data_direction direction, unsigned long flags)
763bbea0b6eSIra Snyder {
764bbea0b6eSIra Snyder 	/*
765968f19aeSIra Snyder 	 * This operation is not supported on the Freescale DMA controller
766bbea0b6eSIra Snyder 	 *
767968f19aeSIra Snyder 	 * However, we need to provide the function pointer to allow the
768968f19aeSIra Snyder 	 * device_control() method to work.
769bbea0b6eSIra Snyder 	 */
770bbea0b6eSIra Snyder 	return NULL;
771bbea0b6eSIra Snyder }
772bbea0b6eSIra Snyder 
773c3635c78SLinus Walleij static int fsl_dma_device_control(struct dma_chan *dchan,
77405827630SLinus Walleij 				  enum dma_ctrl_cmd cmd, unsigned long arg)
775bbea0b6eSIra Snyder {
776968f19aeSIra Snyder 	struct dma_slave_config *config;
777a1c03319SIra Snyder 	struct fsldma_chan *chan;
778bbea0b6eSIra Snyder 	unsigned long flags;
779968f19aeSIra Snyder 	int size;
780c3635c78SLinus Walleij 
781a1c03319SIra Snyder 	if (!dchan)
782c3635c78SLinus Walleij 		return -EINVAL;
783bbea0b6eSIra Snyder 
784a1c03319SIra Snyder 	chan = to_fsl_chan(dchan);
785bbea0b6eSIra Snyder 
786968f19aeSIra Snyder 	switch (cmd) {
787968f19aeSIra Snyder 	case DMA_TERMINATE_ALL:
788f04cd407SIra Snyder 		spin_lock_irqsave(&chan->desc_lock, flags);
789f04cd407SIra Snyder 
790bbea0b6eSIra Snyder 		/* Halt the DMA engine */
791a1c03319SIra Snyder 		dma_halt(chan);
792bbea0b6eSIra Snyder 
793bbea0b6eSIra Snyder 		/* Remove and free all of the descriptors in the LD queue */
7949c3a50b7SIra Snyder 		fsldma_free_desc_list(chan, &chan->ld_pending);
7959c3a50b7SIra Snyder 		fsldma_free_desc_list(chan, &chan->ld_running);
796f04cd407SIra Snyder 		chan->idle = true;
797bbea0b6eSIra Snyder 
798a1c03319SIra Snyder 		spin_unlock_irqrestore(&chan->desc_lock, flags);
799968f19aeSIra Snyder 		return 0;
800968f19aeSIra Snyder 
801968f19aeSIra Snyder 	case DMA_SLAVE_CONFIG:
802968f19aeSIra Snyder 		config = (struct dma_slave_config *)arg;
803968f19aeSIra Snyder 
804968f19aeSIra Snyder 		/* make sure the channel supports setting burst size */
805968f19aeSIra Snyder 		if (!chan->set_request_count)
806968f19aeSIra Snyder 			return -ENXIO;
807968f19aeSIra Snyder 
808968f19aeSIra Snyder 		/* we set the controller burst size depending on direction */
809968f19aeSIra Snyder 		if (config->direction == DMA_TO_DEVICE)
810968f19aeSIra Snyder 			size = config->dst_addr_width * config->dst_maxburst;
811968f19aeSIra Snyder 		else
812968f19aeSIra Snyder 			size = config->src_addr_width * config->src_maxburst;
813968f19aeSIra Snyder 
814968f19aeSIra Snyder 		chan->set_request_count(chan, size);
815968f19aeSIra Snyder 		return 0;
816968f19aeSIra Snyder 
817968f19aeSIra Snyder 	case FSLDMA_EXTERNAL_START:
818968f19aeSIra Snyder 
819968f19aeSIra Snyder 		/* make sure the channel supports external start */
820968f19aeSIra Snyder 		if (!chan->toggle_ext_start)
821968f19aeSIra Snyder 			return -ENXIO;
822968f19aeSIra Snyder 
823968f19aeSIra Snyder 		chan->toggle_ext_start(chan, arg);
824968f19aeSIra Snyder 		return 0;
825968f19aeSIra Snyder 
826968f19aeSIra Snyder 	default:
827968f19aeSIra Snyder 		return -ENXIO;
828968f19aeSIra Snyder 	}
829c3635c78SLinus Walleij 
830c3635c78SLinus Walleij 	return 0;
831bbea0b6eSIra Snyder }
832bbea0b6eSIra Snyder 
833bbea0b6eSIra Snyder /**
8349c4d1e7bSIra Snyder  * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
8359c4d1e7bSIra Snyder  * @chan: Freescale DMA channel
8369c4d1e7bSIra Snyder  * @desc: descriptor to cleanup and free
8379c4d1e7bSIra Snyder  *
8389c4d1e7bSIra Snyder  * This function is used on a descriptor which has been executed by the DMA
8399c4d1e7bSIra Snyder  * controller. It will run any callbacks, submit any dependencies, and then
8409c4d1e7bSIra Snyder  * free the descriptor.
8419c4d1e7bSIra Snyder  */
8429c4d1e7bSIra Snyder static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
8439c4d1e7bSIra Snyder 				      struct fsl_desc_sw *desc)
8449c4d1e7bSIra Snyder {
8459c4d1e7bSIra Snyder 	struct dma_async_tx_descriptor *txd = &desc->async_tx;
8469c4d1e7bSIra Snyder 	struct device *dev = chan->common.device->dev;
8479c4d1e7bSIra Snyder 	dma_addr_t src = get_desc_src(chan, desc);
8489c4d1e7bSIra Snyder 	dma_addr_t dst = get_desc_dst(chan, desc);
8499c4d1e7bSIra Snyder 	u32 len = get_desc_cnt(chan, desc);
8509c4d1e7bSIra Snyder 
8519c4d1e7bSIra Snyder 	/* Run the link descriptor callback function */
8529c4d1e7bSIra Snyder 	if (txd->callback) {
8539c4d1e7bSIra Snyder #ifdef FSL_DMA_LD_DEBUG
8549c4d1e7bSIra Snyder 		chan_dbg(chan, "LD %p callback\n", desc);
8559c4d1e7bSIra Snyder #endif
8569c4d1e7bSIra Snyder 		txd->callback(txd->callback_param);
8579c4d1e7bSIra Snyder 	}
8589c4d1e7bSIra Snyder 
8599c4d1e7bSIra Snyder 	/* Run any dependencies */
8609c4d1e7bSIra Snyder 	dma_run_dependencies(txd);
8619c4d1e7bSIra Snyder 
8629c4d1e7bSIra Snyder 	/* Unmap the dst buffer, if requested */
8639c4d1e7bSIra Snyder 	if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
8649c4d1e7bSIra Snyder 		if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
8659c4d1e7bSIra Snyder 			dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
8669c4d1e7bSIra Snyder 		else
8679c4d1e7bSIra Snyder 			dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
8689c4d1e7bSIra Snyder 	}
8699c4d1e7bSIra Snyder 
8709c4d1e7bSIra Snyder 	/* Unmap the src buffer, if requested */
8719c4d1e7bSIra Snyder 	if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
8729c4d1e7bSIra Snyder 		if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
8739c4d1e7bSIra Snyder 			dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
8749c4d1e7bSIra Snyder 		else
8759c4d1e7bSIra Snyder 			dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
8769c4d1e7bSIra Snyder 	}
8779c4d1e7bSIra Snyder 
8789c4d1e7bSIra Snyder #ifdef FSL_DMA_LD_DEBUG
8799c4d1e7bSIra Snyder 	chan_dbg(chan, "LD %p free\n", desc);
8809c4d1e7bSIra Snyder #endif
8819c4d1e7bSIra Snyder 	dma_pool_free(chan->desc_pool, desc, txd->phys);
8829c4d1e7bSIra Snyder }
8839c4d1e7bSIra Snyder 
8849c4d1e7bSIra Snyder /**
8859c3a50b7SIra Snyder  * fsl_chan_xfer_ld_queue - transfer any pending transactions
886a1c03319SIra Snyder  * @chan : Freescale DMA channel
8879c3a50b7SIra Snyder  *
888f04cd407SIra Snyder  * HARDWARE STATE: idle
889dc8d4091SIra Snyder  * LOCKING: must hold chan->desc_lock
890173acc7cSZhang Wei  */
891a1c03319SIra Snyder static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
892173acc7cSZhang Wei {
8939c3a50b7SIra Snyder 	struct fsl_desc_sw *desc;
894138ef018SIra Snyder 
8959c3a50b7SIra Snyder 	/*
8969c3a50b7SIra Snyder 	 * If the list of pending descriptors is empty, then we
8979c3a50b7SIra Snyder 	 * don't need to do any work at all
8989c3a50b7SIra Snyder 	 */
8999c3a50b7SIra Snyder 	if (list_empty(&chan->ld_pending)) {
900b158471eSIra Snyder 		chan_dbg(chan, "no pending LDs\n");
901dc8d4091SIra Snyder 		return;
9029c3a50b7SIra Snyder 	}
903173acc7cSZhang Wei 
9049c3a50b7SIra Snyder 	/*
905f04cd407SIra Snyder 	 * The DMA controller is not idle, which means that the interrupt
906f04cd407SIra Snyder 	 * handler will start any queued transactions when it runs after
907f04cd407SIra Snyder 	 * this transaction finishes
9089c3a50b7SIra Snyder 	 */
909f04cd407SIra Snyder 	if (!chan->idle) {
910b158471eSIra Snyder 		chan_dbg(chan, "DMA controller still busy\n");
911dc8d4091SIra Snyder 		return;
9129c3a50b7SIra Snyder 	}
9139c3a50b7SIra Snyder 
9149c3a50b7SIra Snyder 	/*
9159c3a50b7SIra Snyder 	 * If there are some link descriptors which have not been
9169c3a50b7SIra Snyder 	 * transferred, we need to start the controller
917173acc7cSZhang Wei 	 */
918173acc7cSZhang Wei 
9199c3a50b7SIra Snyder 	/*
9209c3a50b7SIra Snyder 	 * Move all elements from the queue of pending transactions
9219c3a50b7SIra Snyder 	 * onto the list of running transactions
9229c3a50b7SIra Snyder 	 */
923f04cd407SIra Snyder 	chan_dbg(chan, "idle, starting controller\n");
9249c3a50b7SIra Snyder 	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
9259c3a50b7SIra Snyder 	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
926173acc7cSZhang Wei 
9279c3a50b7SIra Snyder 	/*
928f04cd407SIra Snyder 	 * The 85xx DMA controller doesn't clear the channel start bit
929f04cd407SIra Snyder 	 * automatically at the end of a transfer. Therefore we must clear
930f04cd407SIra Snyder 	 * it in software before starting the transfer.
931f04cd407SIra Snyder 	 */
932f04cd407SIra Snyder 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
933f04cd407SIra Snyder 		u32 mode;
934f04cd407SIra Snyder 
935f04cd407SIra Snyder 		mode = DMA_IN(chan, &chan->regs->mr, 32);
936f04cd407SIra Snyder 		mode &= ~FSL_DMA_MR_CS;
937f04cd407SIra Snyder 		DMA_OUT(chan, &chan->regs->mr, mode, 32);
938f04cd407SIra Snyder 	}
939f04cd407SIra Snyder 
940f04cd407SIra Snyder 	/*
9419c3a50b7SIra Snyder 	 * Program the descriptor's address into the DMA controller,
9429c3a50b7SIra Snyder 	 * then start the DMA transaction
9439c3a50b7SIra Snyder 	 */
9449c3a50b7SIra Snyder 	set_cdar(chan, desc->async_tx.phys);
945f04cd407SIra Snyder 	get_cdar(chan);
946f04cd407SIra Snyder 
947a1c03319SIra Snyder 	dma_start(chan);
948f04cd407SIra Snyder 	chan->idle = false;
949173acc7cSZhang Wei }
950173acc7cSZhang Wei 
951173acc7cSZhang Wei /**
952173acc7cSZhang Wei  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
953a1c03319SIra Snyder  * @chan : Freescale DMA channel
954173acc7cSZhang Wei  */
955a1c03319SIra Snyder static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
956173acc7cSZhang Wei {
957a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
958dc8d4091SIra Snyder 	unsigned long flags;
959dc8d4091SIra Snyder 
960dc8d4091SIra Snyder 	spin_lock_irqsave(&chan->desc_lock, flags);
961a1c03319SIra Snyder 	fsl_chan_xfer_ld_queue(chan);
962dc8d4091SIra Snyder 	spin_unlock_irqrestore(&chan->desc_lock, flags);
963173acc7cSZhang Wei }
964173acc7cSZhang Wei 
965173acc7cSZhang Wei /**
96607934481SLinus Walleij  * fsl_tx_status - Determine the DMA status
967a1c03319SIra Snyder  * @chan : Freescale DMA channel
968173acc7cSZhang Wei  */
96907934481SLinus Walleij static enum dma_status fsl_tx_status(struct dma_chan *dchan,
970173acc7cSZhang Wei 					dma_cookie_t cookie,
97107934481SLinus Walleij 					struct dma_tx_state *txstate)
972173acc7cSZhang Wei {
973a1c03319SIra Snyder 	struct fsldma_chan *chan = to_fsl_chan(dchan);
974173acc7cSZhang Wei 	dma_cookie_t last_complete;
975f04cd407SIra Snyder 	dma_cookie_t last_used;
976f04cd407SIra Snyder 	unsigned long flags;
977173acc7cSZhang Wei 
978f04cd407SIra Snyder 	spin_lock_irqsave(&chan->desc_lock, flags);
979173acc7cSZhang Wei 
980a1c03319SIra Snyder 	last_complete = chan->completed_cookie;
981f04cd407SIra Snyder 	last_used = dchan->cookie;
982f04cd407SIra Snyder 
983f04cd407SIra Snyder 	spin_unlock_irqrestore(&chan->desc_lock, flags);
984173acc7cSZhang Wei 
985bca34692SDan Williams 	dma_set_tx_state(txstate, last_complete, last_used, 0);
986173acc7cSZhang Wei 	return dma_async_is_complete(cookie, last_complete, last_used);
987173acc7cSZhang Wei }
988173acc7cSZhang Wei 
989d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/
990d3f620b2SIra Snyder /* Interrupt Handling                                                         */
991d3f620b2SIra Snyder /*----------------------------------------------------------------------------*/
992d3f620b2SIra Snyder 
993e7a29151SIra Snyder static irqreturn_t fsldma_chan_irq(int irq, void *data)
994173acc7cSZhang Wei {
995a1c03319SIra Snyder 	struct fsldma_chan *chan = data;
996a1c03319SIra Snyder 	u32 stat;
997173acc7cSZhang Wei 
9989c3a50b7SIra Snyder 	/* save and clear the status register */
999a1c03319SIra Snyder 	stat = get_sr(chan);
10009c3a50b7SIra Snyder 	set_sr(chan, stat);
1001b158471eSIra Snyder 	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1002173acc7cSZhang Wei 
1003f04cd407SIra Snyder 	/* check that this was really our device */
1004173acc7cSZhang Wei 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1005173acc7cSZhang Wei 	if (!stat)
1006173acc7cSZhang Wei 		return IRQ_NONE;
1007173acc7cSZhang Wei 
1008173acc7cSZhang Wei 	if (stat & FSL_DMA_SR_TE)
1009b158471eSIra Snyder 		chan_err(chan, "Transfer Error!\n");
1010173acc7cSZhang Wei 
10119c3a50b7SIra Snyder 	/*
10129c3a50b7SIra Snyder 	 * Programming Error
1013f79abb62SZhang Wei 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1014f79abb62SZhang Wei 	 * triger a PE interrupt.
1015f79abb62SZhang Wei 	 */
1016f79abb62SZhang Wei 	if (stat & FSL_DMA_SR_PE) {
1017b158471eSIra Snyder 		chan_dbg(chan, "irq: Programming Error INT\n");
1018f79abb62SZhang Wei 		stat &= ~FSL_DMA_SR_PE;
1019f04cd407SIra Snyder 		if (get_bcr(chan) != 0)
1020f04cd407SIra Snyder 			chan_err(chan, "Programming Error!\n");
10211c62979eSZhang Wei 	}
10221c62979eSZhang Wei 
10239c3a50b7SIra Snyder 	/*
10249c3a50b7SIra Snyder 	 * For MPC8349, EOCDI event need to update cookie
10251c62979eSZhang Wei 	 * and start the next transfer if it exist.
10261c62979eSZhang Wei 	 */
10271c62979eSZhang Wei 	if (stat & FSL_DMA_SR_EOCDI) {
1028b158471eSIra Snyder 		chan_dbg(chan, "irq: End-of-Chain link INT\n");
10291c62979eSZhang Wei 		stat &= ~FSL_DMA_SR_EOCDI;
1030173acc7cSZhang Wei 	}
1031173acc7cSZhang Wei 
10329c3a50b7SIra Snyder 	/*
10339c3a50b7SIra Snyder 	 * If it current transfer is the end-of-transfer,
1034173acc7cSZhang Wei 	 * we should clear the Channel Start bit for
1035173acc7cSZhang Wei 	 * prepare next transfer.
1036173acc7cSZhang Wei 	 */
10371c62979eSZhang Wei 	if (stat & FSL_DMA_SR_EOLNI) {
1038b158471eSIra Snyder 		chan_dbg(chan, "irq: End-of-link INT\n");
1039173acc7cSZhang Wei 		stat &= ~FSL_DMA_SR_EOLNI;
1040173acc7cSZhang Wei 	}
1041173acc7cSZhang Wei 
1042f04cd407SIra Snyder 	/* check that the DMA controller is really idle */
1043f04cd407SIra Snyder 	if (!dma_is_idle(chan))
1044f04cd407SIra Snyder 		chan_err(chan, "irq: controller not idle!\n");
1045173acc7cSZhang Wei 
1046f04cd407SIra Snyder 	/* check that we handled all of the bits */
1047f04cd407SIra Snyder 	if (stat)
1048f04cd407SIra Snyder 		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1049f04cd407SIra Snyder 
1050f04cd407SIra Snyder 	/*
1051f04cd407SIra Snyder 	 * Schedule the tasklet to handle all cleanup of the current
1052f04cd407SIra Snyder 	 * transaction. It will start a new transaction if there is
1053f04cd407SIra Snyder 	 * one pending.
1054f04cd407SIra Snyder 	 */
1055a1c03319SIra Snyder 	tasklet_schedule(&chan->tasklet);
1056f04cd407SIra Snyder 	chan_dbg(chan, "irq: Exit\n");
1057173acc7cSZhang Wei 	return IRQ_HANDLED;
1058173acc7cSZhang Wei }
1059173acc7cSZhang Wei 
1060173acc7cSZhang Wei static void dma_do_tasklet(unsigned long data)
1061173acc7cSZhang Wei {
1062a1c03319SIra Snyder 	struct fsldma_chan *chan = (struct fsldma_chan *)data;
1063dc8d4091SIra Snyder 	struct fsl_desc_sw *desc, *_desc;
1064dc8d4091SIra Snyder 	LIST_HEAD(ld_cleanup);
1065f04cd407SIra Snyder 	unsigned long flags;
1066f04cd407SIra Snyder 
1067f04cd407SIra Snyder 	chan_dbg(chan, "tasklet entry\n");
1068f04cd407SIra Snyder 
1069f04cd407SIra Snyder 	spin_lock_irqsave(&chan->desc_lock, flags);
1070dc8d4091SIra Snyder 
1071dc8d4091SIra Snyder 	/* update the cookie if we have some descriptors to cleanup */
1072dc8d4091SIra Snyder 	if (!list_empty(&chan->ld_running)) {
1073dc8d4091SIra Snyder 		dma_cookie_t cookie;
1074dc8d4091SIra Snyder 
1075dc8d4091SIra Snyder 		desc = to_fsl_desc(chan->ld_running.prev);
1076dc8d4091SIra Snyder 		cookie = desc->async_tx.cookie;
1077dc8d4091SIra Snyder 
1078dc8d4091SIra Snyder 		chan->completed_cookie = cookie;
1079dc8d4091SIra Snyder 		chan_dbg(chan, "completed_cookie=%d\n", cookie);
1080dc8d4091SIra Snyder 	}
1081dc8d4091SIra Snyder 
1082dc8d4091SIra Snyder 	/*
1083dc8d4091SIra Snyder 	 * move the descriptors to a temporary list so we can drop the lock
1084dc8d4091SIra Snyder 	 * during the entire cleanup operation
1085dc8d4091SIra Snyder 	 */
1086dc8d4091SIra Snyder 	list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1087dc8d4091SIra Snyder 
1088dc8d4091SIra Snyder 	/* the hardware is now idle and ready for more */
1089f04cd407SIra Snyder 	chan->idle = true;
1090dc8d4091SIra Snyder 
1091dc8d4091SIra Snyder 	/*
1092dc8d4091SIra Snyder 	 * Start any pending transactions automatically
1093dc8d4091SIra Snyder 	 *
1094dc8d4091SIra Snyder 	 * In the ideal case, we keep the DMA controller busy while we go
1095dc8d4091SIra Snyder 	 * ahead and free the descriptors below.
1096dc8d4091SIra Snyder 	 */
1097dc8d4091SIra Snyder 	fsl_chan_xfer_ld_queue(chan);
1098f04cd407SIra Snyder 	spin_unlock_irqrestore(&chan->desc_lock, flags);
1099f04cd407SIra Snyder 
1100dc8d4091SIra Snyder 	/* Run the callback for each descriptor, in order */
1101dc8d4091SIra Snyder 	list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1102dc8d4091SIra Snyder 
1103dc8d4091SIra Snyder 		/* Remove from the list of transactions */
1104dc8d4091SIra Snyder 		list_del(&desc->node);
1105dc8d4091SIra Snyder 
1106dc8d4091SIra Snyder 		/* Run all cleanup for this descriptor */
1107dc8d4091SIra Snyder 		fsldma_cleanup_descriptor(chan, desc);
1108dc8d4091SIra Snyder 	}
1109dc8d4091SIra Snyder 
1110f04cd407SIra Snyder 	chan_dbg(chan, "tasklet exit\n");
1111173acc7cSZhang Wei }
1112173acc7cSZhang Wei 
1113d3f620b2SIra Snyder static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1114d3f620b2SIra Snyder {
1115d3f620b2SIra Snyder 	struct fsldma_device *fdev = data;
1116d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1117d3f620b2SIra Snyder 	unsigned int handled = 0;
1118d3f620b2SIra Snyder 	u32 gsr, mask;
1119d3f620b2SIra Snyder 	int i;
1120d3f620b2SIra Snyder 
1121d3f620b2SIra Snyder 	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1122d3f620b2SIra Snyder 						   : in_le32(fdev->regs);
1123d3f620b2SIra Snyder 	mask = 0xff000000;
1124d3f620b2SIra Snyder 	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1125d3f620b2SIra Snyder 
1126d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1127d3f620b2SIra Snyder 		chan = fdev->chan[i];
1128d3f620b2SIra Snyder 		if (!chan)
1129d3f620b2SIra Snyder 			continue;
1130d3f620b2SIra Snyder 
1131d3f620b2SIra Snyder 		if (gsr & mask) {
1132d3f620b2SIra Snyder 			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1133d3f620b2SIra Snyder 			fsldma_chan_irq(irq, chan);
1134d3f620b2SIra Snyder 			handled++;
1135d3f620b2SIra Snyder 		}
1136d3f620b2SIra Snyder 
1137d3f620b2SIra Snyder 		gsr &= ~mask;
1138d3f620b2SIra Snyder 		mask >>= 8;
1139d3f620b2SIra Snyder 	}
1140d3f620b2SIra Snyder 
1141d3f620b2SIra Snyder 	return IRQ_RETVAL(handled);
1142d3f620b2SIra Snyder }
1143d3f620b2SIra Snyder 
1144d3f620b2SIra Snyder static void fsldma_free_irqs(struct fsldma_device *fdev)
1145d3f620b2SIra Snyder {
1146d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1147d3f620b2SIra Snyder 	int i;
1148d3f620b2SIra Snyder 
1149d3f620b2SIra Snyder 	if (fdev->irq != NO_IRQ) {
1150d3f620b2SIra Snyder 		dev_dbg(fdev->dev, "free per-controller IRQ\n");
1151d3f620b2SIra Snyder 		free_irq(fdev->irq, fdev);
1152d3f620b2SIra Snyder 		return;
1153d3f620b2SIra Snyder 	}
1154d3f620b2SIra Snyder 
1155d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1156d3f620b2SIra Snyder 		chan = fdev->chan[i];
1157d3f620b2SIra Snyder 		if (chan && chan->irq != NO_IRQ) {
1158b158471eSIra Snyder 			chan_dbg(chan, "free per-channel IRQ\n");
1159d3f620b2SIra Snyder 			free_irq(chan->irq, chan);
1160d3f620b2SIra Snyder 		}
1161d3f620b2SIra Snyder 	}
1162d3f620b2SIra Snyder }
1163d3f620b2SIra Snyder 
1164d3f620b2SIra Snyder static int fsldma_request_irqs(struct fsldma_device *fdev)
1165d3f620b2SIra Snyder {
1166d3f620b2SIra Snyder 	struct fsldma_chan *chan;
1167d3f620b2SIra Snyder 	int ret;
1168d3f620b2SIra Snyder 	int i;
1169d3f620b2SIra Snyder 
1170d3f620b2SIra Snyder 	/* if we have a per-controller IRQ, use that */
1171d3f620b2SIra Snyder 	if (fdev->irq != NO_IRQ) {
1172d3f620b2SIra Snyder 		dev_dbg(fdev->dev, "request per-controller IRQ\n");
1173d3f620b2SIra Snyder 		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1174d3f620b2SIra Snyder 				  "fsldma-controller", fdev);
1175d3f620b2SIra Snyder 		return ret;
1176d3f620b2SIra Snyder 	}
1177d3f620b2SIra Snyder 
1178d3f620b2SIra Snyder 	/* no per-controller IRQ, use the per-channel IRQs */
1179d3f620b2SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1180d3f620b2SIra Snyder 		chan = fdev->chan[i];
1181d3f620b2SIra Snyder 		if (!chan)
1182d3f620b2SIra Snyder 			continue;
1183d3f620b2SIra Snyder 
1184d3f620b2SIra Snyder 		if (chan->irq == NO_IRQ) {
1185b158471eSIra Snyder 			chan_err(chan, "interrupts property missing in device tree\n");
1186d3f620b2SIra Snyder 			ret = -ENODEV;
1187d3f620b2SIra Snyder 			goto out_unwind;
1188d3f620b2SIra Snyder 		}
1189d3f620b2SIra Snyder 
1190b158471eSIra Snyder 		chan_dbg(chan, "request per-channel IRQ\n");
1191d3f620b2SIra Snyder 		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1192d3f620b2SIra Snyder 				  "fsldma-chan", chan);
1193d3f620b2SIra Snyder 		if (ret) {
1194b158471eSIra Snyder 			chan_err(chan, "unable to request per-channel IRQ\n");
1195d3f620b2SIra Snyder 			goto out_unwind;
1196d3f620b2SIra Snyder 		}
1197d3f620b2SIra Snyder 	}
1198d3f620b2SIra Snyder 
1199d3f620b2SIra Snyder 	return 0;
1200d3f620b2SIra Snyder 
1201d3f620b2SIra Snyder out_unwind:
1202d3f620b2SIra Snyder 	for (/* none */; i >= 0; i--) {
1203d3f620b2SIra Snyder 		chan = fdev->chan[i];
1204d3f620b2SIra Snyder 		if (!chan)
1205d3f620b2SIra Snyder 			continue;
1206d3f620b2SIra Snyder 
1207d3f620b2SIra Snyder 		if (chan->irq == NO_IRQ)
1208d3f620b2SIra Snyder 			continue;
1209d3f620b2SIra Snyder 
1210d3f620b2SIra Snyder 		free_irq(chan->irq, chan);
1211d3f620b2SIra Snyder 	}
1212d3f620b2SIra Snyder 
1213d3f620b2SIra Snyder 	return ret;
1214d3f620b2SIra Snyder }
1215d3f620b2SIra Snyder 
1216a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1217a4f56d4bSIra Snyder /* OpenFirmware Subsystem                                                     */
1218a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1219a4f56d4bSIra Snyder 
1220a4f56d4bSIra Snyder static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
122177cd62e8STimur Tabi 	struct device_node *node, u32 feature, const char *compatible)
1222173acc7cSZhang Wei {
1223a1c03319SIra Snyder 	struct fsldma_chan *chan;
12244ce0e953SIra Snyder 	struct resource res;
1225173acc7cSZhang Wei 	int err;
1226173acc7cSZhang Wei 
1227173acc7cSZhang Wei 	/* alloc channel */
1228a1c03319SIra Snyder 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1229a1c03319SIra Snyder 	if (!chan) {
1230e7a29151SIra Snyder 		dev_err(fdev->dev, "no free memory for DMA channels!\n");
1231e7a29151SIra Snyder 		err = -ENOMEM;
1232e7a29151SIra Snyder 		goto out_return;
1233173acc7cSZhang Wei 	}
1234173acc7cSZhang Wei 
1235e7a29151SIra Snyder 	/* ioremap registers for use */
1236a1c03319SIra Snyder 	chan->regs = of_iomap(node, 0);
1237a1c03319SIra Snyder 	if (!chan->regs) {
1238e7a29151SIra Snyder 		dev_err(fdev->dev, "unable to ioremap registers\n");
1239e7a29151SIra Snyder 		err = -ENOMEM;
1240a1c03319SIra Snyder 		goto out_free_chan;
1241e7a29151SIra Snyder 	}
1242e7a29151SIra Snyder 
12434ce0e953SIra Snyder 	err = of_address_to_resource(node, 0, &res);
1244173acc7cSZhang Wei 	if (err) {
1245e7a29151SIra Snyder 		dev_err(fdev->dev, "unable to find 'reg' property\n");
1246e7a29151SIra Snyder 		goto out_iounmap_regs;
1247173acc7cSZhang Wei 	}
1248173acc7cSZhang Wei 
1249a1c03319SIra Snyder 	chan->feature = feature;
1250173acc7cSZhang Wei 	if (!fdev->feature)
1251a1c03319SIra Snyder 		fdev->feature = chan->feature;
1252173acc7cSZhang Wei 
1253e7a29151SIra Snyder 	/*
1254e7a29151SIra Snyder 	 * If the DMA device's feature is different than the feature
1255e7a29151SIra Snyder 	 * of its channels, report the bug
1256173acc7cSZhang Wei 	 */
1257a1c03319SIra Snyder 	WARN_ON(fdev->feature != chan->feature);
1258173acc7cSZhang Wei 
1259a1c03319SIra Snyder 	chan->dev = fdev->dev;
1260a1c03319SIra Snyder 	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1261a1c03319SIra Snyder 	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1262e7a29151SIra Snyder 		dev_err(fdev->dev, "too many channels for device\n");
1263173acc7cSZhang Wei 		err = -EINVAL;
1264e7a29151SIra Snyder 		goto out_iounmap_regs;
1265173acc7cSZhang Wei 	}
1266173acc7cSZhang Wei 
1267a1c03319SIra Snyder 	fdev->chan[chan->id] = chan;
1268a1c03319SIra Snyder 	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1269b158471eSIra Snyder 	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1270e7a29151SIra Snyder 
1271e7a29151SIra Snyder 	/* Initialize the channel */
1272a1c03319SIra Snyder 	dma_init(chan);
1273173acc7cSZhang Wei 
1274173acc7cSZhang Wei 	/* Clear cdar registers */
1275a1c03319SIra Snyder 	set_cdar(chan, 0);
1276173acc7cSZhang Wei 
1277a1c03319SIra Snyder 	switch (chan->feature & FSL_DMA_IP_MASK) {
1278173acc7cSZhang Wei 	case FSL_DMA_IP_85XX:
1279a1c03319SIra Snyder 		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1280173acc7cSZhang Wei 	case FSL_DMA_IP_83XX:
1281a1c03319SIra Snyder 		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1282a1c03319SIra Snyder 		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1283a1c03319SIra Snyder 		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1284a1c03319SIra Snyder 		chan->set_request_count = fsl_chan_set_request_count;
1285173acc7cSZhang Wei 	}
1286173acc7cSZhang Wei 
1287a1c03319SIra Snyder 	spin_lock_init(&chan->desc_lock);
12889c3a50b7SIra Snyder 	INIT_LIST_HEAD(&chan->ld_pending);
12899c3a50b7SIra Snyder 	INIT_LIST_HEAD(&chan->ld_running);
1290f04cd407SIra Snyder 	chan->idle = true;
1291173acc7cSZhang Wei 
1292a1c03319SIra Snyder 	chan->common.device = &fdev->common;
1293173acc7cSZhang Wei 
1294d3f620b2SIra Snyder 	/* find the IRQ line, if it exists in the device tree */
1295a1c03319SIra Snyder 	chan->irq = irq_of_parse_and_map(node, 0);
1296d3f620b2SIra Snyder 
1297173acc7cSZhang Wei 	/* Add the channel to DMA device channel list */
1298a1c03319SIra Snyder 	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1299173acc7cSZhang Wei 	fdev->common.chancnt++;
1300173acc7cSZhang Wei 
1301a1c03319SIra Snyder 	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1302a1c03319SIra Snyder 		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1303173acc7cSZhang Wei 
1304173acc7cSZhang Wei 	return 0;
130551ee87f2SLi Yang 
1306e7a29151SIra Snyder out_iounmap_regs:
1307a1c03319SIra Snyder 	iounmap(chan->regs);
1308a1c03319SIra Snyder out_free_chan:
1309a1c03319SIra Snyder 	kfree(chan);
1310e7a29151SIra Snyder out_return:
1311173acc7cSZhang Wei 	return err;
1312173acc7cSZhang Wei }
1313173acc7cSZhang Wei 
1314a1c03319SIra Snyder static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1315173acc7cSZhang Wei {
1316a1c03319SIra Snyder 	irq_dispose_mapping(chan->irq);
1317a1c03319SIra Snyder 	list_del(&chan->common.device_node);
1318a1c03319SIra Snyder 	iounmap(chan->regs);
1319a1c03319SIra Snyder 	kfree(chan);
1320173acc7cSZhang Wei }
1321173acc7cSZhang Wei 
13222dc11581SGrant Likely static int __devinit fsldma_of_probe(struct platform_device *op,
1323173acc7cSZhang Wei 			const struct of_device_id *match)
1324173acc7cSZhang Wei {
1325a4f56d4bSIra Snyder 	struct fsldma_device *fdev;
132677cd62e8STimur Tabi 	struct device_node *child;
1327e7a29151SIra Snyder 	int err;
1328173acc7cSZhang Wei 
1329a4f56d4bSIra Snyder 	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1330173acc7cSZhang Wei 	if (!fdev) {
1331e7a29151SIra Snyder 		dev_err(&op->dev, "No enough memory for 'priv'\n");
1332e7a29151SIra Snyder 		err = -ENOMEM;
1333e7a29151SIra Snyder 		goto out_return;
1334173acc7cSZhang Wei 	}
1335e7a29151SIra Snyder 
1336e7a29151SIra Snyder 	fdev->dev = &op->dev;
1337173acc7cSZhang Wei 	INIT_LIST_HEAD(&fdev->common.channels);
1338173acc7cSZhang Wei 
1339e7a29151SIra Snyder 	/* ioremap the registers for use */
134061c7a080SGrant Likely 	fdev->regs = of_iomap(op->dev.of_node, 0);
1341e7a29151SIra Snyder 	if (!fdev->regs) {
1342e7a29151SIra Snyder 		dev_err(&op->dev, "unable to ioremap registers\n");
1343e7a29151SIra Snyder 		err = -ENOMEM;
1344e7a29151SIra Snyder 		goto out_free_fdev;
1345173acc7cSZhang Wei 	}
1346173acc7cSZhang Wei 
1347d3f620b2SIra Snyder 	/* map the channel IRQ if it exists, but don't hookup the handler yet */
134861c7a080SGrant Likely 	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1349d3f620b2SIra Snyder 
1350173acc7cSZhang Wei 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1351173acc7cSZhang Wei 	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1352c1433041SIra Snyder 	dma_cap_set(DMA_SG, fdev->common.cap_mask);
1353bbea0b6eSIra Snyder 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1354173acc7cSZhang Wei 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1355173acc7cSZhang Wei 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
13562187c269SZhang Wei 	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1357173acc7cSZhang Wei 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1358c1433041SIra Snyder 	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
135907934481SLinus Walleij 	fdev->common.device_tx_status = fsl_tx_status;
1360173acc7cSZhang Wei 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1361bbea0b6eSIra Snyder 	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1362c3635c78SLinus Walleij 	fdev->common.device_control = fsl_dma_device_control;
1363e7a29151SIra Snyder 	fdev->common.dev = &op->dev;
1364173acc7cSZhang Wei 
1365e2c8e425SLi Yang 	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1366e2c8e425SLi Yang 
1367e7a29151SIra Snyder 	dev_set_drvdata(&op->dev, fdev);
136877cd62e8STimur Tabi 
1369e7a29151SIra Snyder 	/*
1370e7a29151SIra Snyder 	 * We cannot use of_platform_bus_probe() because there is no
1371e7a29151SIra Snyder 	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
137277cd62e8STimur Tabi 	 * channel object.
137377cd62e8STimur Tabi 	 */
137461c7a080SGrant Likely 	for_each_child_of_node(op->dev.of_node, child) {
1375e7a29151SIra Snyder 		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
137677cd62e8STimur Tabi 			fsl_dma_chan_probe(fdev, child,
137777cd62e8STimur Tabi 				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
137877cd62e8STimur Tabi 				"fsl,eloplus-dma-channel");
1379e7a29151SIra Snyder 		}
1380e7a29151SIra Snyder 
1381e7a29151SIra Snyder 		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
138277cd62e8STimur Tabi 			fsl_dma_chan_probe(fdev, child,
138377cd62e8STimur Tabi 				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
138477cd62e8STimur Tabi 				"fsl,elo-dma-channel");
138577cd62e8STimur Tabi 		}
1386e7a29151SIra Snyder 	}
1387173acc7cSZhang Wei 
1388d3f620b2SIra Snyder 	/*
1389d3f620b2SIra Snyder 	 * Hookup the IRQ handler(s)
1390d3f620b2SIra Snyder 	 *
1391d3f620b2SIra Snyder 	 * If we have a per-controller interrupt, we prefer that to the
1392d3f620b2SIra Snyder 	 * per-channel interrupts to reduce the number of shared interrupt
1393d3f620b2SIra Snyder 	 * handlers on the same IRQ line
1394d3f620b2SIra Snyder 	 */
1395d3f620b2SIra Snyder 	err = fsldma_request_irqs(fdev);
1396d3f620b2SIra Snyder 	if (err) {
1397d3f620b2SIra Snyder 		dev_err(fdev->dev, "unable to request IRQs\n");
1398d3f620b2SIra Snyder 		goto out_free_fdev;
1399d3f620b2SIra Snyder 	}
1400d3f620b2SIra Snyder 
1401173acc7cSZhang Wei 	dma_async_device_register(&fdev->common);
1402173acc7cSZhang Wei 	return 0;
1403173acc7cSZhang Wei 
1404e7a29151SIra Snyder out_free_fdev:
1405d3f620b2SIra Snyder 	irq_dispose_mapping(fdev->irq);
1406173acc7cSZhang Wei 	kfree(fdev);
1407e7a29151SIra Snyder out_return:
1408173acc7cSZhang Wei 	return err;
1409173acc7cSZhang Wei }
1410173acc7cSZhang Wei 
14112dc11581SGrant Likely static int fsldma_of_remove(struct platform_device *op)
141277cd62e8STimur Tabi {
1413a4f56d4bSIra Snyder 	struct fsldma_device *fdev;
141477cd62e8STimur Tabi 	unsigned int i;
141577cd62e8STimur Tabi 
1416e7a29151SIra Snyder 	fdev = dev_get_drvdata(&op->dev);
141777cd62e8STimur Tabi 	dma_async_device_unregister(&fdev->common);
141877cd62e8STimur Tabi 
1419d3f620b2SIra Snyder 	fsldma_free_irqs(fdev);
1420d3f620b2SIra Snyder 
1421e7a29151SIra Snyder 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
142277cd62e8STimur Tabi 		if (fdev->chan[i])
142377cd62e8STimur Tabi 			fsl_dma_chan_remove(fdev->chan[i]);
1424e7a29151SIra Snyder 	}
142577cd62e8STimur Tabi 
1426e7a29151SIra Snyder 	iounmap(fdev->regs);
1427e7a29151SIra Snyder 	dev_set_drvdata(&op->dev, NULL);
142877cd62e8STimur Tabi 	kfree(fdev);
142977cd62e8STimur Tabi 
143077cd62e8STimur Tabi 	return 0;
143177cd62e8STimur Tabi }
143277cd62e8STimur Tabi 
14334b1cf1faSMárton Németh static const struct of_device_id fsldma_of_ids[] = {
1434049c9d45SKumar Gala 	{ .compatible = "fsl,eloplus-dma", },
1435049c9d45SKumar Gala 	{ .compatible = "fsl,elo-dma", },
1436173acc7cSZhang Wei 	{}
1437173acc7cSZhang Wei };
1438173acc7cSZhang Wei 
1439a4f56d4bSIra Snyder static struct of_platform_driver fsldma_of_driver = {
14404018294bSGrant Likely 	.driver = {
144177cd62e8STimur Tabi 		.name = "fsl-elo-dma",
14424018294bSGrant Likely 		.owner = THIS_MODULE,
14434018294bSGrant Likely 		.of_match_table = fsldma_of_ids,
14444018294bSGrant Likely 	},
1445a4f56d4bSIra Snyder 	.probe = fsldma_of_probe,
1446a4f56d4bSIra Snyder 	.remove = fsldma_of_remove,
1447173acc7cSZhang Wei };
1448173acc7cSZhang Wei 
1449a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1450a4f56d4bSIra Snyder /* Module Init / Exit                                                         */
1451a4f56d4bSIra Snyder /*----------------------------------------------------------------------------*/
1452a4f56d4bSIra Snyder 
1453a4f56d4bSIra Snyder static __init int fsldma_init(void)
1454173acc7cSZhang Wei {
145577cd62e8STimur Tabi 	int ret;
145677cd62e8STimur Tabi 
145777cd62e8STimur Tabi 	pr_info("Freescale Elo / Elo Plus DMA driver\n");
145877cd62e8STimur Tabi 
1459a4f56d4bSIra Snyder 	ret = of_register_platform_driver(&fsldma_of_driver);
146077cd62e8STimur Tabi 	if (ret)
146177cd62e8STimur Tabi 		pr_err("fsldma: failed to register platform driver\n");
146277cd62e8STimur Tabi 
146377cd62e8STimur Tabi 	return ret;
1464173acc7cSZhang Wei }
1465173acc7cSZhang Wei 
1466a4f56d4bSIra Snyder static void __exit fsldma_exit(void)
146777cd62e8STimur Tabi {
1468a4f56d4bSIra Snyder 	of_unregister_platform_driver(&fsldma_of_driver);
146977cd62e8STimur Tabi }
147077cd62e8STimur Tabi 
1471a4f56d4bSIra Snyder subsys_initcall(fsldma_init);
1472a4f56d4bSIra Snyder module_exit(fsldma_exit);
147377cd62e8STimur Tabi 
147477cd62e8STimur Tabi MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
147577cd62e8STimur Tabi MODULE_LICENSE("GPL");
1476