xref: /openbmc/linux/drivers/dma/at_hdmac.c (revision 4483320e)
1dc78baa2SNicolas Ferre /*
2dc78baa2SNicolas Ferre  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3dc78baa2SNicolas Ferre  *
4dc78baa2SNicolas Ferre  * Copyright (C) 2008 Atmel Corporation
5dc78baa2SNicolas Ferre  *
6dc78baa2SNicolas Ferre  * This program is free software; you can redistribute it and/or modify
7dc78baa2SNicolas Ferre  * it under the terms of the GNU General Public License as published by
8dc78baa2SNicolas Ferre  * the Free Software Foundation; either version 2 of the License, or
9dc78baa2SNicolas Ferre  * (at your option) any later version.
10dc78baa2SNicolas Ferre  *
11dc78baa2SNicolas Ferre  *
129102d871SNicolas Ferre  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
139102d871SNicolas Ferre  * The only Atmel DMA Controller that is not covered by this driver is the one
149102d871SNicolas Ferre  * found on AT91SAM9263.
15dc78baa2SNicolas Ferre  */
16dc78baa2SNicolas Ferre 
1762971b29SLudovic Desroches #include <dt-bindings/dma/at91.h>
18dc78baa2SNicolas Ferre #include <linux/clk.h>
19dc78baa2SNicolas Ferre #include <linux/dmaengine.h>
20dc78baa2SNicolas Ferre #include <linux/dma-mapping.h>
21dc78baa2SNicolas Ferre #include <linux/dmapool.h>
22dc78baa2SNicolas Ferre #include <linux/interrupt.h>
23dc78baa2SNicolas Ferre #include <linux/module.h>
24dc78baa2SNicolas Ferre #include <linux/platform_device.h>
255a0e3ad6STejun Heo #include <linux/slab.h>
26c5115953SNicolas Ferre #include <linux/of.h>
27c5115953SNicolas Ferre #include <linux/of_device.h>
28bbe89c8eSLudovic Desroches #include <linux/of_dma.h>
29dc78baa2SNicolas Ferre 
30dc78baa2SNicolas Ferre #include "at_hdmac_regs.h"
31d2ebfb33SRussell King - ARM Linux #include "dmaengine.h"
32dc78baa2SNicolas Ferre 
33dc78baa2SNicolas Ferre /*
34dc78baa2SNicolas Ferre  * Glossary
35dc78baa2SNicolas Ferre  * --------
36dc78baa2SNicolas Ferre  *
37dc78baa2SNicolas Ferre  * at_hdmac		: Name of the ATmel AHB DMA Controller
38dc78baa2SNicolas Ferre  * at_dma_ / atdma	: ATmel DMA controller entity related
39dc78baa2SNicolas Ferre  * atc_	/ atchan	: ATmel DMA Channel entity related
40dc78baa2SNicolas Ferre  */
41dc78baa2SNicolas Ferre 
42dc78baa2SNicolas Ferre #define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
43ae14d4b5SNicolas Ferre #define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
44ae14d4b5SNicolas Ferre 				|ATC_DIF(AT_DMA_MEM_IF))
45816070edSLudovic Desroches #define ATC_DMA_BUSWIDTHS\
46816070edSLudovic Desroches 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47816070edSLudovic Desroches 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48816070edSLudovic Desroches 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49816070edSLudovic Desroches 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50dc78baa2SNicolas Ferre 
51dc78baa2SNicolas Ferre /*
52dc78baa2SNicolas Ferre  * Initial number of descriptors to allocate for each channel. This could
53dc78baa2SNicolas Ferre  * be increased during dma usage.
54dc78baa2SNicolas Ferre  */
55dc78baa2SNicolas Ferre static unsigned int init_nr_desc_per_channel = 64;
56dc78baa2SNicolas Ferre module_param(init_nr_desc_per_channel, uint, 0644);
57dc78baa2SNicolas Ferre MODULE_PARM_DESC(init_nr_desc_per_channel,
58dc78baa2SNicolas Ferre 		 "initial descriptors per channel (default: 64)");
59dc78baa2SNicolas Ferre 
60dc78baa2SNicolas Ferre 
61dc78baa2SNicolas Ferre /* prototypes */
62dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
63d48de6f1SElen Song static void atc_issue_pending(struct dma_chan *chan);
64dc78baa2SNicolas Ferre 
65dc78baa2SNicolas Ferre 
66dc78baa2SNicolas Ferre /*----------------------------------------------------------------------*/
67dc78baa2SNicolas Ferre 
68265567fbSTorsten Fleischer static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
69265567fbSTorsten Fleischer 						size_t len)
70265567fbSTorsten Fleischer {
71265567fbSTorsten Fleischer 	unsigned int width;
72265567fbSTorsten Fleischer 
73265567fbSTorsten Fleischer 	if (!((src | dst  | len) & 3))
74265567fbSTorsten Fleischer 		width = 2;
75265567fbSTorsten Fleischer 	else if (!((src | dst | len) & 1))
76265567fbSTorsten Fleischer 		width = 1;
77265567fbSTorsten Fleischer 	else
78265567fbSTorsten Fleischer 		width = 0;
79265567fbSTorsten Fleischer 
80265567fbSTorsten Fleischer 	return width;
81265567fbSTorsten Fleischer }
82265567fbSTorsten Fleischer 
83dc78baa2SNicolas Ferre static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
84dc78baa2SNicolas Ferre {
85dc78baa2SNicolas Ferre 	return list_first_entry(&atchan->active_list,
86dc78baa2SNicolas Ferre 				struct at_desc, desc_node);
87dc78baa2SNicolas Ferre }
88dc78baa2SNicolas Ferre 
89dc78baa2SNicolas Ferre static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
90dc78baa2SNicolas Ferre {
91dc78baa2SNicolas Ferre 	return list_first_entry(&atchan->queue,
92dc78baa2SNicolas Ferre 				struct at_desc, desc_node);
93dc78baa2SNicolas Ferre }
94dc78baa2SNicolas Ferre 
95dc78baa2SNicolas Ferre /**
96421f91d2SUwe Kleine-König  * atc_alloc_descriptor - allocate and return an initialized descriptor
97dc78baa2SNicolas Ferre  * @chan: the channel to allocate descriptors for
98dc78baa2SNicolas Ferre  * @gfp_flags: GFP allocation flags
99dc78baa2SNicolas Ferre  *
100dc78baa2SNicolas Ferre  * Note: The ack-bit is positioned in the descriptor flag at creation time
101dc78baa2SNicolas Ferre  *       to make initial allocation more convenient. This bit will be cleared
102dc78baa2SNicolas Ferre  *       and control will be given to client at usage time (during
103dc78baa2SNicolas Ferre  *       preparation functions).
104dc78baa2SNicolas Ferre  */
105dc78baa2SNicolas Ferre static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
106dc78baa2SNicolas Ferre 					    gfp_t gfp_flags)
107dc78baa2SNicolas Ferre {
108dc78baa2SNicolas Ferre 	struct at_desc	*desc = NULL;
109dc78baa2SNicolas Ferre 	struct at_dma	*atdma = to_at_dma(chan->device);
110dc78baa2SNicolas Ferre 	dma_addr_t phys;
111dc78baa2SNicolas Ferre 
112dc78baa2SNicolas Ferre 	desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
113dc78baa2SNicolas Ferre 	if (desc) {
114dc78baa2SNicolas Ferre 		memset(desc, 0, sizeof(struct at_desc));
115285a3c71SDan Williams 		INIT_LIST_HEAD(&desc->tx_list);
116dc78baa2SNicolas Ferre 		dma_async_tx_descriptor_init(&desc->txd, chan);
117dc78baa2SNicolas Ferre 		/* txd.flags will be overwritten in prep functions */
118dc78baa2SNicolas Ferre 		desc->txd.flags = DMA_CTRL_ACK;
119dc78baa2SNicolas Ferre 		desc->txd.tx_submit = atc_tx_submit;
120dc78baa2SNicolas Ferre 		desc->txd.phys = phys;
121dc78baa2SNicolas Ferre 	}
122dc78baa2SNicolas Ferre 
123dc78baa2SNicolas Ferre 	return desc;
124dc78baa2SNicolas Ferre }
125dc78baa2SNicolas Ferre 
126dc78baa2SNicolas Ferre /**
127af901ca1SAndré Goddard Rosa  * atc_desc_get - get an unused descriptor from free_list
128dc78baa2SNicolas Ferre  * @atchan: channel we want a new descriptor for
129dc78baa2SNicolas Ferre  */
130dc78baa2SNicolas Ferre static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
131dc78baa2SNicolas Ferre {
132dc78baa2SNicolas Ferre 	struct at_desc *desc, *_desc;
133dc78baa2SNicolas Ferre 	struct at_desc *ret = NULL;
134d8cb04b0SNicolas Ferre 	unsigned long flags;
135dc78baa2SNicolas Ferre 	unsigned int i = 0;
136dc78baa2SNicolas Ferre 	LIST_HEAD(tmp_list);
137dc78baa2SNicolas Ferre 
138d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
139dc78baa2SNicolas Ferre 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
140dc78baa2SNicolas Ferre 		i++;
141dc78baa2SNicolas Ferre 		if (async_tx_test_ack(&desc->txd)) {
142dc78baa2SNicolas Ferre 			list_del(&desc->desc_node);
143dc78baa2SNicolas Ferre 			ret = desc;
144dc78baa2SNicolas Ferre 			break;
145dc78baa2SNicolas Ferre 		}
146dc78baa2SNicolas Ferre 		dev_dbg(chan2dev(&atchan->chan_common),
147dc78baa2SNicolas Ferre 				"desc %p not ACKed\n", desc);
148dc78baa2SNicolas Ferre 	}
149d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
150dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(&atchan->chan_common),
151dc78baa2SNicolas Ferre 		"scanned %u descriptors on freelist\n", i);
152dc78baa2SNicolas Ferre 
153dc78baa2SNicolas Ferre 	/* no more descriptor available in initial pool: create one more */
154dc78baa2SNicolas Ferre 	if (!ret) {
155dc78baa2SNicolas Ferre 		ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
156dc78baa2SNicolas Ferre 		if (ret) {
157d8cb04b0SNicolas Ferre 			spin_lock_irqsave(&atchan->lock, flags);
158dc78baa2SNicolas Ferre 			atchan->descs_allocated++;
159d8cb04b0SNicolas Ferre 			spin_unlock_irqrestore(&atchan->lock, flags);
160dc78baa2SNicolas Ferre 		} else {
161dc78baa2SNicolas Ferre 			dev_err(chan2dev(&atchan->chan_common),
162dc78baa2SNicolas Ferre 					"not enough descriptors available\n");
163dc78baa2SNicolas Ferre 		}
164dc78baa2SNicolas Ferre 	}
165dc78baa2SNicolas Ferre 
166dc78baa2SNicolas Ferre 	return ret;
167dc78baa2SNicolas Ferre }
168dc78baa2SNicolas Ferre 
169dc78baa2SNicolas Ferre /**
170dc78baa2SNicolas Ferre  * atc_desc_put - move a descriptor, including any children, to the free list
171dc78baa2SNicolas Ferre  * @atchan: channel we work on
172dc78baa2SNicolas Ferre  * @desc: descriptor, at the head of a chain, to move to free list
173dc78baa2SNicolas Ferre  */
174dc78baa2SNicolas Ferre static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
175dc78baa2SNicolas Ferre {
176dc78baa2SNicolas Ferre 	if (desc) {
177dc78baa2SNicolas Ferre 		struct at_desc *child;
178d8cb04b0SNicolas Ferre 		unsigned long flags;
179dc78baa2SNicolas Ferre 
180d8cb04b0SNicolas Ferre 		spin_lock_irqsave(&atchan->lock, flags);
181285a3c71SDan Williams 		list_for_each_entry(child, &desc->tx_list, desc_node)
182dc78baa2SNicolas Ferre 			dev_vdbg(chan2dev(&atchan->chan_common),
183dc78baa2SNicolas Ferre 					"moving child desc %p to freelist\n",
184dc78baa2SNicolas Ferre 					child);
185285a3c71SDan Williams 		list_splice_init(&desc->tx_list, &atchan->free_list);
186dc78baa2SNicolas Ferre 		dev_vdbg(chan2dev(&atchan->chan_common),
187dc78baa2SNicolas Ferre 			 "moving desc %p to freelist\n", desc);
188dc78baa2SNicolas Ferre 		list_add(&desc->desc_node, &atchan->free_list);
189d8cb04b0SNicolas Ferre 		spin_unlock_irqrestore(&atchan->lock, flags);
190dc78baa2SNicolas Ferre 	}
191dc78baa2SNicolas Ferre }
192dc78baa2SNicolas Ferre 
193dc78baa2SNicolas Ferre /**
194d73111c6SMasanari Iida  * atc_desc_chain - build chain adding a descriptor
195d73111c6SMasanari Iida  * @first: address of first descriptor of the chain
196d73111c6SMasanari Iida  * @prev: address of previous descriptor of the chain
19753830cc7SNicolas Ferre  * @desc: descriptor to queue
19853830cc7SNicolas Ferre  *
19953830cc7SNicolas Ferre  * Called from prep_* functions
20053830cc7SNicolas Ferre  */
20153830cc7SNicolas Ferre static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
20253830cc7SNicolas Ferre 			   struct at_desc *desc)
20353830cc7SNicolas Ferre {
20453830cc7SNicolas Ferre 	if (!(*first)) {
20553830cc7SNicolas Ferre 		*first = desc;
20653830cc7SNicolas Ferre 	} else {
20753830cc7SNicolas Ferre 		/* inform the HW lli about chaining */
20853830cc7SNicolas Ferre 		(*prev)->lli.dscr = desc->txd.phys;
20953830cc7SNicolas Ferre 		/* insert the link descriptor to the LD ring */
21053830cc7SNicolas Ferre 		list_add_tail(&desc->desc_node,
21153830cc7SNicolas Ferre 				&(*first)->tx_list);
21253830cc7SNicolas Ferre 	}
21353830cc7SNicolas Ferre 	*prev = desc;
21453830cc7SNicolas Ferre }
21553830cc7SNicolas Ferre 
21653830cc7SNicolas Ferre /**
217dc78baa2SNicolas Ferre  * atc_dostart - starts the DMA engine for real
218dc78baa2SNicolas Ferre  * @atchan: the channel we want to start
219dc78baa2SNicolas Ferre  * @first: first descriptor in the list we want to begin with
220dc78baa2SNicolas Ferre  *
221dc78baa2SNicolas Ferre  * Called with atchan->lock held and bh disabled
222dc78baa2SNicolas Ferre  */
223dc78baa2SNicolas Ferre static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
224dc78baa2SNicolas Ferre {
225dc78baa2SNicolas Ferre 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
226dc78baa2SNicolas Ferre 
227dc78baa2SNicolas Ferre 	/* ASSERT:  channel is idle */
228dc78baa2SNicolas Ferre 	if (atc_chan_is_enabled(atchan)) {
229dc78baa2SNicolas Ferre 		dev_err(chan2dev(&atchan->chan_common),
230dc78baa2SNicolas Ferre 			"BUG: Attempted to start non-idle channel\n");
231dc78baa2SNicolas Ferre 		dev_err(chan2dev(&atchan->chan_common),
232dc78baa2SNicolas Ferre 			"  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
233dc78baa2SNicolas Ferre 			channel_readl(atchan, SADDR),
234dc78baa2SNicolas Ferre 			channel_readl(atchan, DADDR),
235dc78baa2SNicolas Ferre 			channel_readl(atchan, CTRLA),
236dc78baa2SNicolas Ferre 			channel_readl(atchan, CTRLB),
237dc78baa2SNicolas Ferre 			channel_readl(atchan, DSCR));
238dc78baa2SNicolas Ferre 
239dc78baa2SNicolas Ferre 		/* The tasklet will hopefully advance the queue... */
240dc78baa2SNicolas Ferre 		return;
241dc78baa2SNicolas Ferre 	}
242dc78baa2SNicolas Ferre 
243dc78baa2SNicolas Ferre 	vdbg_dump_regs(atchan);
244dc78baa2SNicolas Ferre 
245dc78baa2SNicolas Ferre 	channel_writel(atchan, SADDR, 0);
246dc78baa2SNicolas Ferre 	channel_writel(atchan, DADDR, 0);
247dc78baa2SNicolas Ferre 	channel_writel(atchan, CTRLA, 0);
248dc78baa2SNicolas Ferre 	channel_writel(atchan, CTRLB, 0);
249dc78baa2SNicolas Ferre 	channel_writel(atchan, DSCR, first->txd.phys);
2505abecfa5SMaxime Ripard 	channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
2515abecfa5SMaxime Ripard 		       ATC_SPIP_BOUNDARY(first->boundary));
2525abecfa5SMaxime Ripard 	channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
2535abecfa5SMaxime Ripard 		       ATC_DPIP_BOUNDARY(first->boundary));
254dc78baa2SNicolas Ferre 	dma_writel(atdma, CHER, atchan->mask);
255dc78baa2SNicolas Ferre 
256dc78baa2SNicolas Ferre 	vdbg_dump_regs(atchan);
257dc78baa2SNicolas Ferre }
258dc78baa2SNicolas Ferre 
259d48de6f1SElen Song /*
260bdf6c792STorsten Fleischer  * atc_get_desc_by_cookie - get the descriptor of a cookie
261bdf6c792STorsten Fleischer  * @atchan: the DMA channel
262bdf6c792STorsten Fleischer  * @cookie: the cookie to get the descriptor for
263d48de6f1SElen Song  */
264bdf6c792STorsten Fleischer static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
265bdf6c792STorsten Fleischer 						dma_cookie_t cookie)
266d48de6f1SElen Song {
267bdf6c792STorsten Fleischer 	struct at_desc *desc, *_desc;
268bdf6c792STorsten Fleischer 
269bdf6c792STorsten Fleischer 	list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
270bdf6c792STorsten Fleischer 		if (desc->txd.cookie == cookie)
271bdf6c792STorsten Fleischer 			return desc;
272bdf6c792STorsten Fleischer 	}
273d48de6f1SElen Song 
274d48de6f1SElen Song 	list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
275bdf6c792STorsten Fleischer 		if (desc->txd.cookie == cookie)
276bdf6c792STorsten Fleischer 			return desc;
277d48de6f1SElen Song 	}
278d48de6f1SElen Song 
279bdf6c792STorsten Fleischer 	return NULL;
280d48de6f1SElen Song }
281d48de6f1SElen Song 
282bdf6c792STorsten Fleischer /**
283bdf6c792STorsten Fleischer  * atc_calc_bytes_left - calculates the number of bytes left according to the
284bdf6c792STorsten Fleischer  * value read from CTRLA.
285bdf6c792STorsten Fleischer  *
286bdf6c792STorsten Fleischer  * @current_len: the number of bytes left before reading CTRLA
287bdf6c792STorsten Fleischer  * @ctrla: the value of CTRLA
288bdf6c792STorsten Fleischer  * @desc: the descriptor containing the transfer width
289d48de6f1SElen Song  */
290bdf6c792STorsten Fleischer static inline int atc_calc_bytes_left(int current_len, u32 ctrla,
291bdf6c792STorsten Fleischer 					struct at_desc *desc)
292bdf6c792STorsten Fleischer {
293bdf6c792STorsten Fleischer 	return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width);
294bdf6c792STorsten Fleischer }
295bdf6c792STorsten Fleischer 
296bdf6c792STorsten Fleischer /**
297bdf6c792STorsten Fleischer  * atc_calc_bytes_left_from_reg - calculates the number of bytes left according
298bdf6c792STorsten Fleischer  * to the current value of CTRLA.
299bdf6c792STorsten Fleischer  *
300bdf6c792STorsten Fleischer  * @current_len: the number of bytes left before reading CTRLA
301bdf6c792STorsten Fleischer  * @atchan: the channel to read CTRLA for
302bdf6c792STorsten Fleischer  * @desc: the descriptor containing the transfer width
303bdf6c792STorsten Fleischer  */
304bdf6c792STorsten Fleischer static inline int atc_calc_bytes_left_from_reg(int current_len,
305bdf6c792STorsten Fleischer 			struct at_dma_chan *atchan, struct at_desc *desc)
306bdf6c792STorsten Fleischer {
307bdf6c792STorsten Fleischer 	u32 ctrla = channel_readl(atchan, CTRLA);
308bdf6c792STorsten Fleischer 
309bdf6c792STorsten Fleischer 	return atc_calc_bytes_left(current_len, ctrla, desc);
310bdf6c792STorsten Fleischer }
311bdf6c792STorsten Fleischer 
312bdf6c792STorsten Fleischer /**
313bdf6c792STorsten Fleischer  * atc_get_bytes_left - get the number of bytes residue for a cookie
314bdf6c792STorsten Fleischer  * @chan: DMA channel
315bdf6c792STorsten Fleischer  * @cookie: transaction identifier to check status of
316bdf6c792STorsten Fleischer  */
317bdf6c792STorsten Fleischer static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
318d48de6f1SElen Song {
319d48de6f1SElen Song 	struct at_dma_chan      *atchan = to_at_dma_chan(chan);
320d48de6f1SElen Song 	struct at_desc *desc_first = atc_first_active(atchan);
321bdf6c792STorsten Fleischer 	struct at_desc *desc;
322bdf6c792STorsten Fleischer 	int ret;
323bdf6c792STorsten Fleischer 	u32 ctrla, dscr;
324d48de6f1SElen Song 
325d48de6f1SElen Song 	/*
326bdf6c792STorsten Fleischer 	 * If the cookie doesn't match to the currently running transfer then
327bdf6c792STorsten Fleischer 	 * we can return the total length of the associated DMA transfer,
328bdf6c792STorsten Fleischer 	 * because it is still queued.
329d48de6f1SElen Song 	 */
330bdf6c792STorsten Fleischer 	desc = atc_get_desc_by_cookie(atchan, cookie);
331bdf6c792STorsten Fleischer 	if (desc == NULL)
332bdf6c792STorsten Fleischer 		return -EINVAL;
333bdf6c792STorsten Fleischer 	else if (desc != desc_first)
334bdf6c792STorsten Fleischer 		return desc->total_len;
335bdf6c792STorsten Fleischer 
336bdf6c792STorsten Fleischer 	/* cookie matches to the currently running transfer */
337bdf6c792STorsten Fleischer 	ret = desc_first->total_len;
338bdf6c792STorsten Fleischer 
339bdf6c792STorsten Fleischer 	if (desc_first->lli.dscr) {
340bdf6c792STorsten Fleischer 		/* hardware linked list transfer */
341d48de6f1SElen Song 
342d48de6f1SElen Song 		/*
343bdf6c792STorsten Fleischer 		 * Calculate the residue by removing the length of the child
344bdf6c792STorsten Fleischer 		 * descriptors already transferred from the total length.
345bdf6c792STorsten Fleischer 		 * To get the current child descriptor we can use the value of
346bdf6c792STorsten Fleischer 		 * the channel's DSCR register and compare it against the value
347bdf6c792STorsten Fleischer 		 * of the hardware linked list structure of each child
348bdf6c792STorsten Fleischer 		 * descriptor.
349d48de6f1SElen Song 		 */
350bdf6c792STorsten Fleischer 
351bdf6c792STorsten Fleischer 		ctrla = channel_readl(atchan, CTRLA);
352bdf6c792STorsten Fleischer 		rmb(); /* ensure CTRLA is read before DSCR */
353bdf6c792STorsten Fleischer 		dscr = channel_readl(atchan, DSCR);
354bdf6c792STorsten Fleischer 
355bdf6c792STorsten Fleischer 		/* for the first descriptor we can be more accurate */
356bdf6c792STorsten Fleischer 		if (desc_first->lli.dscr == dscr)
357bdf6c792STorsten Fleischer 			return atc_calc_bytes_left(ret, ctrla, desc_first);
358bdf6c792STorsten Fleischer 
359bdf6c792STorsten Fleischer 		ret -= desc_first->len;
360bdf6c792STorsten Fleischer 		list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
361bdf6c792STorsten Fleischer 			if (desc->lli.dscr == dscr)
362bdf6c792STorsten Fleischer 				break;
363bdf6c792STorsten Fleischer 
364bdf6c792STorsten Fleischer 			ret -= desc->len;
365d48de6f1SElen Song 		}
3666758ddafSAlexandre Belloni 
367bdf6c792STorsten Fleischer 		/*
368bdf6c792STorsten Fleischer 		 * For the last descriptor in the chain we can calculate
369bdf6c792STorsten Fleischer 		 * the remaining bytes using the channel's register.
370bdf6c792STorsten Fleischer 		 * Note that the transfer width of the first and last
371bdf6c792STorsten Fleischer 		 * descriptor may differ.
372bdf6c792STorsten Fleischer 		 */
373bdf6c792STorsten Fleischer 		if (!desc->lli.dscr)
374bdf6c792STorsten Fleischer 			ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
375d48de6f1SElen Song 	} else {
376bdf6c792STorsten Fleischer 		/* single transfer */
377bdf6c792STorsten Fleischer 		ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first);
378d48de6f1SElen Song 	}
379d48de6f1SElen Song 
380d48de6f1SElen Song 	return ret;
381d48de6f1SElen Song }
382d48de6f1SElen Song 
383dc78baa2SNicolas Ferre /**
384dc78baa2SNicolas Ferre  * atc_chain_complete - finish work for one transaction chain
385dc78baa2SNicolas Ferre  * @atchan: channel we work on
386dc78baa2SNicolas Ferre  * @desc: descriptor at the head of the chain we want do complete
387dc78baa2SNicolas Ferre  *
388dc78baa2SNicolas Ferre  * Called with atchan->lock held and bh disabled */
389dc78baa2SNicolas Ferre static void
390dc78baa2SNicolas Ferre atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
391dc78baa2SNicolas Ferre {
392dc78baa2SNicolas Ferre 	struct dma_async_tx_descriptor	*txd = &desc->txd;
393dc78baa2SNicolas Ferre 
394dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(&atchan->chan_common),
395dc78baa2SNicolas Ferre 		"descriptor %u complete\n", txd->cookie);
396dc78baa2SNicolas Ferre 
397d4116052SVinod Koul 	/* mark the descriptor as complete for non cyclic cases only */
398d4116052SVinod Koul 	if (!atc_chan_is_cyclic(atchan))
399f7fbce07SRussell King - ARM Linux 		dma_cookie_complete(txd);
400dc78baa2SNicolas Ferre 
401dc78baa2SNicolas Ferre 	/* move children to free_list */
402285a3c71SDan Williams 	list_splice_init(&desc->tx_list, &atchan->free_list);
403dc78baa2SNicolas Ferre 	/* move myself to free_list */
404dc78baa2SNicolas Ferre 	list_move(&desc->desc_node, &atchan->free_list);
405dc78baa2SNicolas Ferre 
406d38a8c62SDan Williams 	dma_descriptor_unmap(txd);
40753830cc7SNicolas Ferre 	/* for cyclic transfers,
40853830cc7SNicolas Ferre 	 * no need to replay callback function while stopping */
4093c477482SNicolas Ferre 	if (!atc_chan_is_cyclic(atchan)) {
41053830cc7SNicolas Ferre 		dma_async_tx_callback	callback = txd->callback;
41153830cc7SNicolas Ferre 		void			*param = txd->callback_param;
41253830cc7SNicolas Ferre 
413dc78baa2SNicolas Ferre 		/*
414dc78baa2SNicolas Ferre 		 * The API requires that no submissions are done from a
415dc78baa2SNicolas Ferre 		 * callback, so we don't need to drop the lock here
416dc78baa2SNicolas Ferre 		 */
417dc78baa2SNicolas Ferre 		if (callback)
418dc78baa2SNicolas Ferre 			callback(param);
41953830cc7SNicolas Ferre 	}
420dc78baa2SNicolas Ferre 
421dc78baa2SNicolas Ferre 	dma_run_dependencies(txd);
422dc78baa2SNicolas Ferre }
423dc78baa2SNicolas Ferre 
424dc78baa2SNicolas Ferre /**
425dc78baa2SNicolas Ferre  * atc_complete_all - finish work for all transactions
426dc78baa2SNicolas Ferre  * @atchan: channel to complete transactions for
427dc78baa2SNicolas Ferre  *
428dc78baa2SNicolas Ferre  * Eventually submit queued descriptors if any
429dc78baa2SNicolas Ferre  *
430dc78baa2SNicolas Ferre  * Assume channel is idle while calling this function
431dc78baa2SNicolas Ferre  * Called with atchan->lock held and bh disabled
432dc78baa2SNicolas Ferre  */
433dc78baa2SNicolas Ferre static void atc_complete_all(struct at_dma_chan *atchan)
434dc78baa2SNicolas Ferre {
435dc78baa2SNicolas Ferre 	struct at_desc *desc, *_desc;
436dc78baa2SNicolas Ferre 	LIST_HEAD(list);
437dc78baa2SNicolas Ferre 
438dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
439dc78baa2SNicolas Ferre 
440dc78baa2SNicolas Ferre 	/*
441dc78baa2SNicolas Ferre 	 * Submit queued descriptors ASAP, i.e. before we go through
442dc78baa2SNicolas Ferre 	 * the completed ones.
443dc78baa2SNicolas Ferre 	 */
444dc78baa2SNicolas Ferre 	if (!list_empty(&atchan->queue))
445dc78baa2SNicolas Ferre 		atc_dostart(atchan, atc_first_queued(atchan));
446dc78baa2SNicolas Ferre 	/* empty active_list now it is completed */
447dc78baa2SNicolas Ferre 	list_splice_init(&atchan->active_list, &list);
448dc78baa2SNicolas Ferre 	/* empty queue list by moving descriptors (if any) to active_list */
449dc78baa2SNicolas Ferre 	list_splice_init(&atchan->queue, &atchan->active_list);
450dc78baa2SNicolas Ferre 
451dc78baa2SNicolas Ferre 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
452dc78baa2SNicolas Ferre 		atc_chain_complete(atchan, desc);
453dc78baa2SNicolas Ferre }
454dc78baa2SNicolas Ferre 
455dc78baa2SNicolas Ferre /**
456dc78baa2SNicolas Ferre  * atc_advance_work - at the end of a transaction, move forward
457dc78baa2SNicolas Ferre  * @atchan: channel where the transaction ended
458dc78baa2SNicolas Ferre  *
459dc78baa2SNicolas Ferre  * Called with atchan->lock held and bh disabled
460dc78baa2SNicolas Ferre  */
461dc78baa2SNicolas Ferre static void atc_advance_work(struct at_dma_chan *atchan)
462dc78baa2SNicolas Ferre {
463dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
464dc78baa2SNicolas Ferre 
465d202f051SLudovic Desroches 	if (atc_chan_is_enabled(atchan))
466d202f051SLudovic Desroches 		return;
467d202f051SLudovic Desroches 
468dc78baa2SNicolas Ferre 	if (list_empty(&atchan->active_list) ||
469dc78baa2SNicolas Ferre 	    list_is_singular(&atchan->active_list)) {
470dc78baa2SNicolas Ferre 		atc_complete_all(atchan);
471dc78baa2SNicolas Ferre 	} else {
472dc78baa2SNicolas Ferre 		atc_chain_complete(atchan, atc_first_active(atchan));
473dc78baa2SNicolas Ferre 		/* advance work */
474dc78baa2SNicolas Ferre 		atc_dostart(atchan, atc_first_active(atchan));
475dc78baa2SNicolas Ferre 	}
476dc78baa2SNicolas Ferre }
477dc78baa2SNicolas Ferre 
478dc78baa2SNicolas Ferre 
479dc78baa2SNicolas Ferre /**
480dc78baa2SNicolas Ferre  * atc_handle_error - handle errors reported by DMA controller
481dc78baa2SNicolas Ferre  * @atchan: channel where error occurs
482dc78baa2SNicolas Ferre  *
483dc78baa2SNicolas Ferre  * Called with atchan->lock held and bh disabled
484dc78baa2SNicolas Ferre  */
485dc78baa2SNicolas Ferre static void atc_handle_error(struct at_dma_chan *atchan)
486dc78baa2SNicolas Ferre {
487dc78baa2SNicolas Ferre 	struct at_desc *bad_desc;
488dc78baa2SNicolas Ferre 	struct at_desc *child;
489dc78baa2SNicolas Ferre 
490dc78baa2SNicolas Ferre 	/*
491dc78baa2SNicolas Ferre 	 * The descriptor currently at the head of the active list is
492dc78baa2SNicolas Ferre 	 * broked. Since we don't have any way to report errors, we'll
493dc78baa2SNicolas Ferre 	 * just have to scream loudly and try to carry on.
494dc78baa2SNicolas Ferre 	 */
495dc78baa2SNicolas Ferre 	bad_desc = atc_first_active(atchan);
496dc78baa2SNicolas Ferre 	list_del_init(&bad_desc->desc_node);
497dc78baa2SNicolas Ferre 
498dc78baa2SNicolas Ferre 	/* As we are stopped, take advantage to push queued descriptors
499dc78baa2SNicolas Ferre 	 * in active_list */
500dc78baa2SNicolas Ferre 	list_splice_init(&atchan->queue, atchan->active_list.prev);
501dc78baa2SNicolas Ferre 
502dc78baa2SNicolas Ferre 	/* Try to restart the controller */
503dc78baa2SNicolas Ferre 	if (!list_empty(&atchan->active_list))
504dc78baa2SNicolas Ferre 		atc_dostart(atchan, atc_first_active(atchan));
505dc78baa2SNicolas Ferre 
506dc78baa2SNicolas Ferre 	/*
507dc78baa2SNicolas Ferre 	 * KERN_CRITICAL may seem harsh, but since this only happens
508dc78baa2SNicolas Ferre 	 * when someone submits a bad physical address in a
509dc78baa2SNicolas Ferre 	 * descriptor, we should consider ourselves lucky that the
510dc78baa2SNicolas Ferre 	 * controller flagged an error instead of scribbling over
511dc78baa2SNicolas Ferre 	 * random memory locations.
512dc78baa2SNicolas Ferre 	 */
513dc78baa2SNicolas Ferre 	dev_crit(chan2dev(&atchan->chan_common),
514dc78baa2SNicolas Ferre 			"Bad descriptor submitted for DMA!\n");
515dc78baa2SNicolas Ferre 	dev_crit(chan2dev(&atchan->chan_common),
516dc78baa2SNicolas Ferre 			"  cookie: %d\n", bad_desc->txd.cookie);
517dc78baa2SNicolas Ferre 	atc_dump_lli(atchan, &bad_desc->lli);
518285a3c71SDan Williams 	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
519dc78baa2SNicolas Ferre 		atc_dump_lli(atchan, &child->lli);
520dc78baa2SNicolas Ferre 
521dc78baa2SNicolas Ferre 	/* Pretend the descriptor completed successfully */
522dc78baa2SNicolas Ferre 	atc_chain_complete(atchan, bad_desc);
523dc78baa2SNicolas Ferre }
524dc78baa2SNicolas Ferre 
52553830cc7SNicolas Ferre /**
52653830cc7SNicolas Ferre  * atc_handle_cyclic - at the end of a period, run callback function
52753830cc7SNicolas Ferre  * @atchan: channel used for cyclic operations
52853830cc7SNicolas Ferre  *
52953830cc7SNicolas Ferre  * Called with atchan->lock held and bh disabled
53053830cc7SNicolas Ferre  */
53153830cc7SNicolas Ferre static void atc_handle_cyclic(struct at_dma_chan *atchan)
53253830cc7SNicolas Ferre {
53353830cc7SNicolas Ferre 	struct at_desc			*first = atc_first_active(atchan);
53453830cc7SNicolas Ferre 	struct dma_async_tx_descriptor	*txd = &first->txd;
53553830cc7SNicolas Ferre 	dma_async_tx_callback		callback = txd->callback;
53653830cc7SNicolas Ferre 	void				*param = txd->callback_param;
53753830cc7SNicolas Ferre 
53853830cc7SNicolas Ferre 	dev_vdbg(chan2dev(&atchan->chan_common),
53953830cc7SNicolas Ferre 			"new cyclic period llp 0x%08x\n",
54053830cc7SNicolas Ferre 			channel_readl(atchan, DSCR));
54153830cc7SNicolas Ferre 
54253830cc7SNicolas Ferre 	if (callback)
54353830cc7SNicolas Ferre 		callback(param);
54453830cc7SNicolas Ferre }
545dc78baa2SNicolas Ferre 
546dc78baa2SNicolas Ferre /*--  IRQ & Tasklet  ---------------------------------------------------*/
547dc78baa2SNicolas Ferre 
548dc78baa2SNicolas Ferre static void atc_tasklet(unsigned long data)
549dc78baa2SNicolas Ferre {
550dc78baa2SNicolas Ferre 	struct at_dma_chan *atchan = (struct at_dma_chan *)data;
551d8cb04b0SNicolas Ferre 	unsigned long flags;
552dc78baa2SNicolas Ferre 
553d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
55453830cc7SNicolas Ferre 	if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
555dc78baa2SNicolas Ferre 		atc_handle_error(atchan);
5563c477482SNicolas Ferre 	else if (atc_chan_is_cyclic(atchan))
55753830cc7SNicolas Ferre 		atc_handle_cyclic(atchan);
558dc78baa2SNicolas Ferre 	else
559dc78baa2SNicolas Ferre 		atc_advance_work(atchan);
560dc78baa2SNicolas Ferre 
561d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
562dc78baa2SNicolas Ferre }
563dc78baa2SNicolas Ferre 
564dc78baa2SNicolas Ferre static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
565dc78baa2SNicolas Ferre {
566dc78baa2SNicolas Ferre 	struct at_dma		*atdma = (struct at_dma *)dev_id;
567dc78baa2SNicolas Ferre 	struct at_dma_chan	*atchan;
568dc78baa2SNicolas Ferre 	int			i;
569dc78baa2SNicolas Ferre 	u32			status, pending, imr;
570dc78baa2SNicolas Ferre 	int			ret = IRQ_NONE;
571dc78baa2SNicolas Ferre 
572dc78baa2SNicolas Ferre 	do {
573dc78baa2SNicolas Ferre 		imr = dma_readl(atdma, EBCIMR);
574dc78baa2SNicolas Ferre 		status = dma_readl(atdma, EBCISR);
575dc78baa2SNicolas Ferre 		pending = status & imr;
576dc78baa2SNicolas Ferre 
577dc78baa2SNicolas Ferre 		if (!pending)
578dc78baa2SNicolas Ferre 			break;
579dc78baa2SNicolas Ferre 
580dc78baa2SNicolas Ferre 		dev_vdbg(atdma->dma_common.dev,
581dc78baa2SNicolas Ferre 			"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
582dc78baa2SNicolas Ferre 			 status, imr, pending);
583dc78baa2SNicolas Ferre 
584dc78baa2SNicolas Ferre 		for (i = 0; i < atdma->dma_common.chancnt; i++) {
585dc78baa2SNicolas Ferre 			atchan = &atdma->chan[i];
5869b3aa589SNicolas Ferre 			if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
587dc78baa2SNicolas Ferre 				if (pending & AT_DMA_ERR(i)) {
588dc78baa2SNicolas Ferre 					/* Disable channel on AHB error */
58923b5e3adSNicolas Ferre 					dma_writel(atdma, CHDR,
59023b5e3adSNicolas Ferre 						AT_DMA_RES(i) | atchan->mask);
591dc78baa2SNicolas Ferre 					/* Give information to tasklet */
59253830cc7SNicolas Ferre 					set_bit(ATC_IS_ERROR, &atchan->status);
593dc78baa2SNicolas Ferre 				}
594dc78baa2SNicolas Ferre 				tasklet_schedule(&atchan->tasklet);
595dc78baa2SNicolas Ferre 				ret = IRQ_HANDLED;
596dc78baa2SNicolas Ferre 			}
597dc78baa2SNicolas Ferre 		}
598dc78baa2SNicolas Ferre 
599dc78baa2SNicolas Ferre 	} while (pending);
600dc78baa2SNicolas Ferre 
601dc78baa2SNicolas Ferre 	return ret;
602dc78baa2SNicolas Ferre }
603dc78baa2SNicolas Ferre 
604dc78baa2SNicolas Ferre 
605dc78baa2SNicolas Ferre /*--  DMA Engine API  --------------------------------------------------*/
606dc78baa2SNicolas Ferre 
607dc78baa2SNicolas Ferre /**
608dc78baa2SNicolas Ferre  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
609dc78baa2SNicolas Ferre  * @desc: descriptor at the head of the transaction chain
610dc78baa2SNicolas Ferre  *
611dc78baa2SNicolas Ferre  * Queue chain if DMA engine is working already
612dc78baa2SNicolas Ferre  *
613dc78baa2SNicolas Ferre  * Cookie increment and adding to active_list or queue must be atomic
614dc78baa2SNicolas Ferre  */
615dc78baa2SNicolas Ferre static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
616dc78baa2SNicolas Ferre {
617dc78baa2SNicolas Ferre 	struct at_desc		*desc = txd_to_at_desc(tx);
618dc78baa2SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(tx->chan);
619dc78baa2SNicolas Ferre 	dma_cookie_t		cookie;
620d8cb04b0SNicolas Ferre 	unsigned long		flags;
621dc78baa2SNicolas Ferre 
622d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
623884485e1SRussell King - ARM Linux 	cookie = dma_cookie_assign(tx);
624dc78baa2SNicolas Ferre 
625dc78baa2SNicolas Ferre 	if (list_empty(&atchan->active_list)) {
626dc78baa2SNicolas Ferre 		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
627dc78baa2SNicolas Ferre 				desc->txd.cookie);
628dc78baa2SNicolas Ferre 		atc_dostart(atchan, desc);
629dc78baa2SNicolas Ferre 		list_add_tail(&desc->desc_node, &atchan->active_list);
630dc78baa2SNicolas Ferre 	} else {
631dc78baa2SNicolas Ferre 		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
632dc78baa2SNicolas Ferre 				desc->txd.cookie);
633dc78baa2SNicolas Ferre 		list_add_tail(&desc->desc_node, &atchan->queue);
634dc78baa2SNicolas Ferre 	}
635dc78baa2SNicolas Ferre 
636d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
637dc78baa2SNicolas Ferre 
638dc78baa2SNicolas Ferre 	return cookie;
639dc78baa2SNicolas Ferre }
640dc78baa2SNicolas Ferre 
641dc78baa2SNicolas Ferre /**
6425abecfa5SMaxime Ripard  * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
6435abecfa5SMaxime Ripard  * @chan: the channel to prepare operation on
6445abecfa5SMaxime Ripard  * @xt: Interleaved transfer template
6455abecfa5SMaxime Ripard  * @flags: tx descriptor status flags
6465abecfa5SMaxime Ripard  */
6475abecfa5SMaxime Ripard static struct dma_async_tx_descriptor *
6485abecfa5SMaxime Ripard atc_prep_dma_interleaved(struct dma_chan *chan,
6495abecfa5SMaxime Ripard 			 struct dma_interleaved_template *xt,
6505abecfa5SMaxime Ripard 			 unsigned long flags)
6515abecfa5SMaxime Ripard {
6525abecfa5SMaxime Ripard 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
6535abecfa5SMaxime Ripard 	struct data_chunk	*first = xt->sgl;
6545abecfa5SMaxime Ripard 	struct at_desc		*desc = NULL;
6555abecfa5SMaxime Ripard 	size_t			xfer_count;
6565abecfa5SMaxime Ripard 	unsigned int		dwidth;
6575abecfa5SMaxime Ripard 	u32			ctrla;
6585abecfa5SMaxime Ripard 	u32			ctrlb;
6595abecfa5SMaxime Ripard 	size_t			len = 0;
6605abecfa5SMaxime Ripard 	int			i;
6615abecfa5SMaxime Ripard 
6624483320eSManinder Singh 	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
6634483320eSManinder Singh 		return NULL;
6644483320eSManinder Singh 
6655abecfa5SMaxime Ripard 	dev_info(chan2dev(chan),
6665abecfa5SMaxime Ripard 		 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
6675abecfa5SMaxime Ripard 		__func__, xt->src_start, xt->dst_start, xt->numf,
6685abecfa5SMaxime Ripard 		xt->frame_size, flags);
6695abecfa5SMaxime Ripard 
6705abecfa5SMaxime Ripard 	/*
6715abecfa5SMaxime Ripard 	 * The controller can only "skip" X bytes every Y bytes, so we
6725abecfa5SMaxime Ripard 	 * need to make sure we are given a template that fit that
6735abecfa5SMaxime Ripard 	 * description, ie a template with chunks that always have the
6745abecfa5SMaxime Ripard 	 * same size, with the same ICGs.
6755abecfa5SMaxime Ripard 	 */
6765abecfa5SMaxime Ripard 	for (i = 0; i < xt->frame_size; i++) {
6775abecfa5SMaxime Ripard 		struct data_chunk *chunk = xt->sgl + i;
6785abecfa5SMaxime Ripard 
6795abecfa5SMaxime Ripard 		if ((chunk->size != xt->sgl->size) ||
6805abecfa5SMaxime Ripard 		    (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
6815abecfa5SMaxime Ripard 		    (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
6825abecfa5SMaxime Ripard 			dev_err(chan2dev(chan),
6835abecfa5SMaxime Ripard 				"%s: the controller can transfer only identical chunks\n",
6845abecfa5SMaxime Ripard 				__func__);
6855abecfa5SMaxime Ripard 			return NULL;
6865abecfa5SMaxime Ripard 		}
6875abecfa5SMaxime Ripard 
6885abecfa5SMaxime Ripard 		len += chunk->size;
6895abecfa5SMaxime Ripard 	}
6905abecfa5SMaxime Ripard 
6915abecfa5SMaxime Ripard 	dwidth = atc_get_xfer_width(xt->src_start,
6925abecfa5SMaxime Ripard 				    xt->dst_start, len);
6935abecfa5SMaxime Ripard 
6945abecfa5SMaxime Ripard 	xfer_count = len >> dwidth;
6955abecfa5SMaxime Ripard 	if (xfer_count > ATC_BTSIZE_MAX) {
6965abecfa5SMaxime Ripard 		dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
6975abecfa5SMaxime Ripard 		return NULL;
6985abecfa5SMaxime Ripard 	}
6995abecfa5SMaxime Ripard 
7005abecfa5SMaxime Ripard 	ctrla = ATC_SRC_WIDTH(dwidth) |
7015abecfa5SMaxime Ripard 		ATC_DST_WIDTH(dwidth);
7025abecfa5SMaxime Ripard 
7035abecfa5SMaxime Ripard 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
7045abecfa5SMaxime Ripard 		| ATC_SRC_ADDR_MODE_INCR
7055abecfa5SMaxime Ripard 		| ATC_DST_ADDR_MODE_INCR
7065abecfa5SMaxime Ripard 		| ATC_SRC_PIP
7075abecfa5SMaxime Ripard 		| ATC_DST_PIP
7085abecfa5SMaxime Ripard 		| ATC_FC_MEM2MEM;
7095abecfa5SMaxime Ripard 
7105abecfa5SMaxime Ripard 	/* create the transfer */
7115abecfa5SMaxime Ripard 	desc = atc_desc_get(atchan);
7125abecfa5SMaxime Ripard 	if (!desc) {
7135abecfa5SMaxime Ripard 		dev_err(chan2dev(chan),
7145abecfa5SMaxime Ripard 			"%s: couldn't allocate our descriptor\n", __func__);
7155abecfa5SMaxime Ripard 		return NULL;
7165abecfa5SMaxime Ripard 	}
7175abecfa5SMaxime Ripard 
7185abecfa5SMaxime Ripard 	desc->lli.saddr = xt->src_start;
7195abecfa5SMaxime Ripard 	desc->lli.daddr = xt->dst_start;
7205abecfa5SMaxime Ripard 	desc->lli.ctrla = ctrla | xfer_count;
7215abecfa5SMaxime Ripard 	desc->lli.ctrlb = ctrlb;
7225abecfa5SMaxime Ripard 
7235abecfa5SMaxime Ripard 	desc->boundary = first->size >> dwidth;
7245abecfa5SMaxime Ripard 	desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
7255abecfa5SMaxime Ripard 	desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
7265abecfa5SMaxime Ripard 
7275abecfa5SMaxime Ripard 	desc->txd.cookie = -EBUSY;
7285abecfa5SMaxime Ripard 	desc->total_len = desc->len = len;
7295abecfa5SMaxime Ripard 	desc->tx_width = dwidth;
7305abecfa5SMaxime Ripard 
7315abecfa5SMaxime Ripard 	/* set end-of-link to the last link descriptor of list*/
7325abecfa5SMaxime Ripard 	set_desc_eol(desc);
7335abecfa5SMaxime Ripard 
7345abecfa5SMaxime Ripard 	desc->txd.flags = flags; /* client is in control of this ack */
7355abecfa5SMaxime Ripard 
7365abecfa5SMaxime Ripard 	return &desc->txd;
7375abecfa5SMaxime Ripard }
7385abecfa5SMaxime Ripard 
7395abecfa5SMaxime Ripard /**
740dc78baa2SNicolas Ferre  * atc_prep_dma_memcpy - prepare a memcpy operation
741dc78baa2SNicolas Ferre  * @chan: the channel to prepare operation on
742dc78baa2SNicolas Ferre  * @dest: operation virtual destination address
743dc78baa2SNicolas Ferre  * @src: operation virtual source address
744dc78baa2SNicolas Ferre  * @len: operation length
745dc78baa2SNicolas Ferre  * @flags: tx descriptor status flags
746dc78baa2SNicolas Ferre  */
747dc78baa2SNicolas Ferre static struct dma_async_tx_descriptor *
748dc78baa2SNicolas Ferre atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
749dc78baa2SNicolas Ferre 		size_t len, unsigned long flags)
750dc78baa2SNicolas Ferre {
751dc78baa2SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
752dc78baa2SNicolas Ferre 	struct at_desc		*desc = NULL;
753dc78baa2SNicolas Ferre 	struct at_desc		*first = NULL;
754dc78baa2SNicolas Ferre 	struct at_desc		*prev = NULL;
755dc78baa2SNicolas Ferre 	size_t			xfer_count;
756dc78baa2SNicolas Ferre 	size_t			offset;
757dc78baa2SNicolas Ferre 	unsigned int		src_width;
758dc78baa2SNicolas Ferre 	unsigned int		dst_width;
759dc78baa2SNicolas Ferre 	u32			ctrla;
760dc78baa2SNicolas Ferre 	u32			ctrlb;
761dc78baa2SNicolas Ferre 
762dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
763dc78baa2SNicolas Ferre 			dest, src, len, flags);
764dc78baa2SNicolas Ferre 
765dc78baa2SNicolas Ferre 	if (unlikely(!len)) {
766dc78baa2SNicolas Ferre 		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
767dc78baa2SNicolas Ferre 		return NULL;
768dc78baa2SNicolas Ferre 	}
769dc78baa2SNicolas Ferre 
7709b3aa589SNicolas Ferre 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
771dc78baa2SNicolas Ferre 		| ATC_SRC_ADDR_MODE_INCR
772dc78baa2SNicolas Ferre 		| ATC_DST_ADDR_MODE_INCR
773dc78baa2SNicolas Ferre 		| ATC_FC_MEM2MEM;
774dc78baa2SNicolas Ferre 
775dc78baa2SNicolas Ferre 	/*
776dc78baa2SNicolas Ferre 	 * We can be a lot more clever here, but this should take care
777dc78baa2SNicolas Ferre 	 * of the most common optimization.
778dc78baa2SNicolas Ferre 	 */
779265567fbSTorsten Fleischer 	src_width = dst_width = atc_get_xfer_width(src, dest, len);
780265567fbSTorsten Fleischer 
781265567fbSTorsten Fleischer 	ctrla = ATC_SRC_WIDTH(src_width) |
782265567fbSTorsten Fleischer 		ATC_DST_WIDTH(dst_width);
783dc78baa2SNicolas Ferre 
784dc78baa2SNicolas Ferre 	for (offset = 0; offset < len; offset += xfer_count << src_width) {
785dc78baa2SNicolas Ferre 		xfer_count = min_t(size_t, (len - offset) >> src_width,
786dc78baa2SNicolas Ferre 				ATC_BTSIZE_MAX);
787dc78baa2SNicolas Ferre 
788dc78baa2SNicolas Ferre 		desc = atc_desc_get(atchan);
789dc78baa2SNicolas Ferre 		if (!desc)
790dc78baa2SNicolas Ferre 			goto err_desc_get;
791dc78baa2SNicolas Ferre 
792dc78baa2SNicolas Ferre 		desc->lli.saddr = src + offset;
793dc78baa2SNicolas Ferre 		desc->lli.daddr = dest + offset;
794dc78baa2SNicolas Ferre 		desc->lli.ctrla = ctrla | xfer_count;
795dc78baa2SNicolas Ferre 		desc->lli.ctrlb = ctrlb;
796dc78baa2SNicolas Ferre 
797dc78baa2SNicolas Ferre 		desc->txd.cookie = 0;
798bdf6c792STorsten Fleischer 		desc->len = xfer_count << src_width;
799dc78baa2SNicolas Ferre 
800e257e156SNicolas Ferre 		atc_desc_chain(&first, &prev, desc);
801dc78baa2SNicolas Ferre 	}
802dc78baa2SNicolas Ferre 
803dc78baa2SNicolas Ferre 	/* First descriptor of the chain embedds additional information */
804dc78baa2SNicolas Ferre 	first->txd.cookie = -EBUSY;
805bdf6c792STorsten Fleischer 	first->total_len = len;
806bdf6c792STorsten Fleischer 
807bdf6c792STorsten Fleischer 	/* set transfer width for the calculation of the residue */
808d088c33bSElen Song 	first->tx_width = src_width;
809bdf6c792STorsten Fleischer 	prev->tx_width = src_width;
810dc78baa2SNicolas Ferre 
811dc78baa2SNicolas Ferre 	/* set end-of-link to the last link descriptor of list*/
812dc78baa2SNicolas Ferre 	set_desc_eol(desc);
813dc78baa2SNicolas Ferre 
814568f7f0cSNicolas Ferre 	first->txd.flags = flags; /* client is in control of this ack */
815dc78baa2SNicolas Ferre 
816dc78baa2SNicolas Ferre 	return &first->txd;
817dc78baa2SNicolas Ferre 
818dc78baa2SNicolas Ferre err_desc_get:
819dc78baa2SNicolas Ferre 	atc_desc_put(atchan, first);
820dc78baa2SNicolas Ferre 	return NULL;
821dc78baa2SNicolas Ferre }
822dc78baa2SNicolas Ferre 
823808347f6SNicolas Ferre 
824808347f6SNicolas Ferre /**
825808347f6SNicolas Ferre  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
826808347f6SNicolas Ferre  * @chan: DMA channel
827808347f6SNicolas Ferre  * @sgl: scatterlist to transfer to/from
828808347f6SNicolas Ferre  * @sg_len: number of entries in @scatterlist
829808347f6SNicolas Ferre  * @direction: DMA direction
830808347f6SNicolas Ferre  * @flags: tx descriptor status flags
831185ecb5fSAlexandre Bounine  * @context: transaction context (ignored)
832808347f6SNicolas Ferre  */
833808347f6SNicolas Ferre static struct dma_async_tx_descriptor *
834808347f6SNicolas Ferre atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
835db8196dfSVinod Koul 		unsigned int sg_len, enum dma_transfer_direction direction,
836185ecb5fSAlexandre Bounine 		unsigned long flags, void *context)
837808347f6SNicolas Ferre {
838808347f6SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
839808347f6SNicolas Ferre 	struct at_dma_slave	*atslave = chan->private;
840beeaa103SNicolas Ferre 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
841808347f6SNicolas Ferre 	struct at_desc		*first = NULL;
842808347f6SNicolas Ferre 	struct at_desc		*prev = NULL;
843808347f6SNicolas Ferre 	u32			ctrla;
844808347f6SNicolas Ferre 	u32			ctrlb;
845808347f6SNicolas Ferre 	dma_addr_t		reg;
846808347f6SNicolas Ferre 	unsigned int		reg_width;
847808347f6SNicolas Ferre 	unsigned int		mem_width;
848808347f6SNicolas Ferre 	unsigned int		i;
849808347f6SNicolas Ferre 	struct scatterlist	*sg;
850808347f6SNicolas Ferre 	size_t			total_len = 0;
851808347f6SNicolas Ferre 
852cc52a10aSNicolas Ferre 	dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
853cc52a10aSNicolas Ferre 			sg_len,
854db8196dfSVinod Koul 			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
855808347f6SNicolas Ferre 			flags);
856808347f6SNicolas Ferre 
857808347f6SNicolas Ferre 	if (unlikely(!atslave || !sg_len)) {
858c618a9beSNicolas Ferre 		dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
859808347f6SNicolas Ferre 		return NULL;
860808347f6SNicolas Ferre 	}
861808347f6SNicolas Ferre 
8621dd1ea8eSNicolas Ferre 	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
8631dd1ea8eSNicolas Ferre 		| ATC_DCSIZE(sconfig->dst_maxburst);
864ae14d4b5SNicolas Ferre 	ctrlb = ATC_IEN;
865808347f6SNicolas Ferre 
866808347f6SNicolas Ferre 	switch (direction) {
867db8196dfSVinod Koul 	case DMA_MEM_TO_DEV:
868beeaa103SNicolas Ferre 		reg_width = convert_buswidth(sconfig->dst_addr_width);
869808347f6SNicolas Ferre 		ctrla |=  ATC_DST_WIDTH(reg_width);
870808347f6SNicolas Ferre 		ctrlb |=  ATC_DST_ADDR_MODE_FIXED
871808347f6SNicolas Ferre 			| ATC_SRC_ADDR_MODE_INCR
872ae14d4b5SNicolas Ferre 			| ATC_FC_MEM2PER
873bbe89c8eSLudovic Desroches 			| ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
874beeaa103SNicolas Ferre 		reg = sconfig->dst_addr;
875808347f6SNicolas Ferre 		for_each_sg(sgl, sg, sg_len, i) {
876808347f6SNicolas Ferre 			struct at_desc	*desc;
877808347f6SNicolas Ferre 			u32		len;
878808347f6SNicolas Ferre 			u32		mem;
879808347f6SNicolas Ferre 
880808347f6SNicolas Ferre 			desc = atc_desc_get(atchan);
881808347f6SNicolas Ferre 			if (!desc)
882808347f6SNicolas Ferre 				goto err_desc_get;
883808347f6SNicolas Ferre 
8840f70e8ceSNicolas Ferre 			mem = sg_dma_address(sg);
885808347f6SNicolas Ferre 			len = sg_dma_len(sg);
886c4567976SNicolas Ferre 			if (unlikely(!len)) {
887c4567976SNicolas Ferre 				dev_dbg(chan2dev(chan),
888c4567976SNicolas Ferre 					"prep_slave_sg: sg(%d) data length is zero\n", i);
889c4567976SNicolas Ferre 				goto err;
890c4567976SNicolas Ferre 			}
891808347f6SNicolas Ferre 			mem_width = 2;
892808347f6SNicolas Ferre 			if (unlikely(mem & 3 || len & 3))
893808347f6SNicolas Ferre 				mem_width = 0;
894808347f6SNicolas Ferre 
895808347f6SNicolas Ferre 			desc->lli.saddr = mem;
896808347f6SNicolas Ferre 			desc->lli.daddr = reg;
897808347f6SNicolas Ferre 			desc->lli.ctrla = ctrla
898808347f6SNicolas Ferre 					| ATC_SRC_WIDTH(mem_width)
899808347f6SNicolas Ferre 					| len >> mem_width;
900808347f6SNicolas Ferre 			desc->lli.ctrlb = ctrlb;
901bdf6c792STorsten Fleischer 			desc->len = len;
902808347f6SNicolas Ferre 
903e257e156SNicolas Ferre 			atc_desc_chain(&first, &prev, desc);
904808347f6SNicolas Ferre 			total_len += len;
905808347f6SNicolas Ferre 		}
906808347f6SNicolas Ferre 		break;
907db8196dfSVinod Koul 	case DMA_DEV_TO_MEM:
908beeaa103SNicolas Ferre 		reg_width = convert_buswidth(sconfig->src_addr_width);
909808347f6SNicolas Ferre 		ctrla |=  ATC_SRC_WIDTH(reg_width);
910808347f6SNicolas Ferre 		ctrlb |=  ATC_DST_ADDR_MODE_INCR
911808347f6SNicolas Ferre 			| ATC_SRC_ADDR_MODE_FIXED
912ae14d4b5SNicolas Ferre 			| ATC_FC_PER2MEM
913bbe89c8eSLudovic Desroches 			| ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
914808347f6SNicolas Ferre 
915beeaa103SNicolas Ferre 		reg = sconfig->src_addr;
916808347f6SNicolas Ferre 		for_each_sg(sgl, sg, sg_len, i) {
917808347f6SNicolas Ferre 			struct at_desc	*desc;
918808347f6SNicolas Ferre 			u32		len;
919808347f6SNicolas Ferre 			u32		mem;
920808347f6SNicolas Ferre 
921808347f6SNicolas Ferre 			desc = atc_desc_get(atchan);
922808347f6SNicolas Ferre 			if (!desc)
923808347f6SNicolas Ferre 				goto err_desc_get;
924808347f6SNicolas Ferre 
9250f70e8ceSNicolas Ferre 			mem = sg_dma_address(sg);
926808347f6SNicolas Ferre 			len = sg_dma_len(sg);
927c4567976SNicolas Ferre 			if (unlikely(!len)) {
928c4567976SNicolas Ferre 				dev_dbg(chan2dev(chan),
929c4567976SNicolas Ferre 					"prep_slave_sg: sg(%d) data length is zero\n", i);
930c4567976SNicolas Ferre 				goto err;
931c4567976SNicolas Ferre 			}
932808347f6SNicolas Ferre 			mem_width = 2;
933808347f6SNicolas Ferre 			if (unlikely(mem & 3 || len & 3))
934808347f6SNicolas Ferre 				mem_width = 0;
935808347f6SNicolas Ferre 
936808347f6SNicolas Ferre 			desc->lli.saddr = reg;
937808347f6SNicolas Ferre 			desc->lli.daddr = mem;
938808347f6SNicolas Ferre 			desc->lli.ctrla = ctrla
939808347f6SNicolas Ferre 					| ATC_DST_WIDTH(mem_width)
94059a609d9SNicolas Ferre 					| len >> reg_width;
941808347f6SNicolas Ferre 			desc->lli.ctrlb = ctrlb;
942bdf6c792STorsten Fleischer 			desc->len = len;
943808347f6SNicolas Ferre 
944e257e156SNicolas Ferre 			atc_desc_chain(&first, &prev, desc);
945808347f6SNicolas Ferre 			total_len += len;
946808347f6SNicolas Ferre 		}
947808347f6SNicolas Ferre 		break;
948808347f6SNicolas Ferre 	default:
949808347f6SNicolas Ferre 		return NULL;
950808347f6SNicolas Ferre 	}
951808347f6SNicolas Ferre 
952808347f6SNicolas Ferre 	/* set end-of-link to the last link descriptor of list*/
953808347f6SNicolas Ferre 	set_desc_eol(prev);
954808347f6SNicolas Ferre 
955808347f6SNicolas Ferre 	/* First descriptor of the chain embedds additional information */
956808347f6SNicolas Ferre 	first->txd.cookie = -EBUSY;
957bdf6c792STorsten Fleischer 	first->total_len = total_len;
958bdf6c792STorsten Fleischer 
959bdf6c792STorsten Fleischer 	/* set transfer width for the calculation of the residue */
960d088c33bSElen Song 	first->tx_width = reg_width;
961bdf6c792STorsten Fleischer 	prev->tx_width = reg_width;
962808347f6SNicolas Ferre 
963568f7f0cSNicolas Ferre 	/* first link descriptor of list is responsible of flags */
964568f7f0cSNicolas Ferre 	first->txd.flags = flags; /* client is in control of this ack */
965808347f6SNicolas Ferre 
966808347f6SNicolas Ferre 	return &first->txd;
967808347f6SNicolas Ferre 
968808347f6SNicolas Ferre err_desc_get:
969808347f6SNicolas Ferre 	dev_err(chan2dev(chan), "not enough descriptors available\n");
970c4567976SNicolas Ferre err:
971808347f6SNicolas Ferre 	atc_desc_put(atchan, first);
972808347f6SNicolas Ferre 	return NULL;
973808347f6SNicolas Ferre }
974808347f6SNicolas Ferre 
97553830cc7SNicolas Ferre /**
976265567fbSTorsten Fleischer  * atc_prep_dma_sg - prepare memory to memory scather-gather operation
977265567fbSTorsten Fleischer  * @chan: the channel to prepare operation on
978265567fbSTorsten Fleischer  * @dst_sg: destination scatterlist
979265567fbSTorsten Fleischer  * @dst_nents: number of destination scatterlist entries
980265567fbSTorsten Fleischer  * @src_sg: source scatterlist
981265567fbSTorsten Fleischer  * @src_nents: number of source scatterlist entries
982265567fbSTorsten Fleischer  * @flags: tx descriptor status flags
983265567fbSTorsten Fleischer  */
984265567fbSTorsten Fleischer static struct dma_async_tx_descriptor *
985265567fbSTorsten Fleischer atc_prep_dma_sg(struct dma_chan *chan,
986265567fbSTorsten Fleischer 		struct scatterlist *dst_sg, unsigned int dst_nents,
987265567fbSTorsten Fleischer 		struct scatterlist *src_sg, unsigned int src_nents,
988265567fbSTorsten Fleischer 		unsigned long flags)
989265567fbSTorsten Fleischer {
990265567fbSTorsten Fleischer 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
991265567fbSTorsten Fleischer 	struct at_desc		*desc = NULL;
992265567fbSTorsten Fleischer 	struct at_desc		*first = NULL;
993265567fbSTorsten Fleischer 	struct at_desc		*prev = NULL;
994265567fbSTorsten Fleischer 	unsigned int		src_width;
995265567fbSTorsten Fleischer 	unsigned int		dst_width;
996265567fbSTorsten Fleischer 	size_t			xfer_count;
997265567fbSTorsten Fleischer 	u32			ctrla;
998265567fbSTorsten Fleischer 	u32			ctrlb;
999265567fbSTorsten Fleischer 	size_t			dst_len = 0, src_len = 0;
1000265567fbSTorsten Fleischer 	dma_addr_t		dst = 0, src = 0;
1001265567fbSTorsten Fleischer 	size_t			len = 0, total_len = 0;
1002265567fbSTorsten Fleischer 
1003265567fbSTorsten Fleischer 	if (unlikely(dst_nents == 0 || src_nents == 0))
1004265567fbSTorsten Fleischer 		return NULL;
1005265567fbSTorsten Fleischer 
1006265567fbSTorsten Fleischer 	if (unlikely(dst_sg == NULL || src_sg == NULL))
1007265567fbSTorsten Fleischer 		return NULL;
1008265567fbSTorsten Fleischer 
1009265567fbSTorsten Fleischer 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
1010265567fbSTorsten Fleischer 		| ATC_SRC_ADDR_MODE_INCR
1011265567fbSTorsten Fleischer 		| ATC_DST_ADDR_MODE_INCR
1012265567fbSTorsten Fleischer 		| ATC_FC_MEM2MEM;
1013265567fbSTorsten Fleischer 
1014265567fbSTorsten Fleischer 	/*
1015265567fbSTorsten Fleischer 	 * loop until there is either no more source or no more destination
1016265567fbSTorsten Fleischer 	 * scatterlist entry
1017265567fbSTorsten Fleischer 	 */
1018265567fbSTorsten Fleischer 	while (true) {
1019265567fbSTorsten Fleischer 
1020265567fbSTorsten Fleischer 		/* prepare the next transfer */
1021265567fbSTorsten Fleischer 		if (dst_len == 0) {
1022265567fbSTorsten Fleischer 
1023265567fbSTorsten Fleischer 			/* no more destination scatterlist entries */
1024265567fbSTorsten Fleischer 			if (!dst_sg || !dst_nents)
1025265567fbSTorsten Fleischer 				break;
1026265567fbSTorsten Fleischer 
1027265567fbSTorsten Fleischer 			dst = sg_dma_address(dst_sg);
1028265567fbSTorsten Fleischer 			dst_len = sg_dma_len(dst_sg);
1029265567fbSTorsten Fleischer 
1030265567fbSTorsten Fleischer 			dst_sg = sg_next(dst_sg);
1031265567fbSTorsten Fleischer 			dst_nents--;
1032265567fbSTorsten Fleischer 		}
1033265567fbSTorsten Fleischer 
1034265567fbSTorsten Fleischer 		if (src_len == 0) {
1035265567fbSTorsten Fleischer 
1036265567fbSTorsten Fleischer 			/* no more source scatterlist entries */
1037265567fbSTorsten Fleischer 			if (!src_sg || !src_nents)
1038265567fbSTorsten Fleischer 				break;
1039265567fbSTorsten Fleischer 
1040265567fbSTorsten Fleischer 			src = sg_dma_address(src_sg);
1041265567fbSTorsten Fleischer 			src_len = sg_dma_len(src_sg);
1042265567fbSTorsten Fleischer 
1043265567fbSTorsten Fleischer 			src_sg = sg_next(src_sg);
1044265567fbSTorsten Fleischer 			src_nents--;
1045265567fbSTorsten Fleischer 		}
1046265567fbSTorsten Fleischer 
1047265567fbSTorsten Fleischer 		len = min_t(size_t, src_len, dst_len);
1048265567fbSTorsten Fleischer 		if (len == 0)
1049265567fbSTorsten Fleischer 			continue;
1050265567fbSTorsten Fleischer 
1051265567fbSTorsten Fleischer 		/* take care for the alignment */
1052265567fbSTorsten Fleischer 		src_width = dst_width = atc_get_xfer_width(src, dst, len);
1053265567fbSTorsten Fleischer 
1054265567fbSTorsten Fleischer 		ctrla = ATC_SRC_WIDTH(src_width) |
1055265567fbSTorsten Fleischer 			ATC_DST_WIDTH(dst_width);
1056265567fbSTorsten Fleischer 
1057265567fbSTorsten Fleischer 		/*
1058265567fbSTorsten Fleischer 		 * The number of transfers to set up refer to the source width
1059265567fbSTorsten Fleischer 		 * that depends on the alignment.
1060265567fbSTorsten Fleischer 		 */
1061265567fbSTorsten Fleischer 		xfer_count = len >> src_width;
1062265567fbSTorsten Fleischer 		if (xfer_count > ATC_BTSIZE_MAX) {
1063265567fbSTorsten Fleischer 			xfer_count = ATC_BTSIZE_MAX;
1064265567fbSTorsten Fleischer 			len = ATC_BTSIZE_MAX << src_width;
1065265567fbSTorsten Fleischer 		}
1066265567fbSTorsten Fleischer 
1067265567fbSTorsten Fleischer 		/* create the transfer */
1068265567fbSTorsten Fleischer 		desc = atc_desc_get(atchan);
1069265567fbSTorsten Fleischer 		if (!desc)
1070265567fbSTorsten Fleischer 			goto err_desc_get;
1071265567fbSTorsten Fleischer 
1072265567fbSTorsten Fleischer 		desc->lli.saddr = src;
1073265567fbSTorsten Fleischer 		desc->lli.daddr = dst;
1074265567fbSTorsten Fleischer 		desc->lli.ctrla = ctrla | xfer_count;
1075265567fbSTorsten Fleischer 		desc->lli.ctrlb = ctrlb;
1076265567fbSTorsten Fleischer 
1077265567fbSTorsten Fleischer 		desc->txd.cookie = 0;
1078265567fbSTorsten Fleischer 		desc->len = len;
1079265567fbSTorsten Fleischer 
1080265567fbSTorsten Fleischer 		/*
1081265567fbSTorsten Fleischer 		 * Although we only need the transfer width for the first and
1082265567fbSTorsten Fleischer 		 * the last descriptor, its easier to set it to all descriptors.
1083265567fbSTorsten Fleischer 		 */
1084265567fbSTorsten Fleischer 		desc->tx_width = src_width;
1085265567fbSTorsten Fleischer 
1086265567fbSTorsten Fleischer 		atc_desc_chain(&first, &prev, desc);
1087265567fbSTorsten Fleischer 
1088265567fbSTorsten Fleischer 		/* update the lengths and addresses for the next loop cycle */
1089265567fbSTorsten Fleischer 		dst_len -= len;
1090265567fbSTorsten Fleischer 		src_len -= len;
1091265567fbSTorsten Fleischer 		dst += len;
1092265567fbSTorsten Fleischer 		src += len;
1093265567fbSTorsten Fleischer 
1094265567fbSTorsten Fleischer 		total_len += len;
1095265567fbSTorsten Fleischer 	}
1096265567fbSTorsten Fleischer 
1097265567fbSTorsten Fleischer 	/* First descriptor of the chain embedds additional information */
1098265567fbSTorsten Fleischer 	first->txd.cookie = -EBUSY;
1099265567fbSTorsten Fleischer 	first->total_len = total_len;
1100265567fbSTorsten Fleischer 
1101265567fbSTorsten Fleischer 	/* set end-of-link to the last link descriptor of list*/
1102265567fbSTorsten Fleischer 	set_desc_eol(desc);
1103265567fbSTorsten Fleischer 
1104265567fbSTorsten Fleischer 	first->txd.flags = flags; /* client is in control of this ack */
1105265567fbSTorsten Fleischer 
1106265567fbSTorsten Fleischer 	return &first->txd;
1107265567fbSTorsten Fleischer 
1108265567fbSTorsten Fleischer err_desc_get:
1109265567fbSTorsten Fleischer 	atc_desc_put(atchan, first);
1110265567fbSTorsten Fleischer 	return NULL;
1111265567fbSTorsten Fleischer }
1112265567fbSTorsten Fleischer 
1113265567fbSTorsten Fleischer /**
111453830cc7SNicolas Ferre  * atc_dma_cyclic_check_values
111553830cc7SNicolas Ferre  * Check for too big/unaligned periods and unaligned DMA buffer
111653830cc7SNicolas Ferre  */
111753830cc7SNicolas Ferre static int
111853830cc7SNicolas Ferre atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
11190e7264ccSAndy Shevchenko 		size_t period_len)
112053830cc7SNicolas Ferre {
112153830cc7SNicolas Ferre 	if (period_len > (ATC_BTSIZE_MAX << reg_width))
112253830cc7SNicolas Ferre 		goto err_out;
112353830cc7SNicolas Ferre 	if (unlikely(period_len & ((1 << reg_width) - 1)))
112453830cc7SNicolas Ferre 		goto err_out;
112553830cc7SNicolas Ferre 	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
112653830cc7SNicolas Ferre 		goto err_out;
112753830cc7SNicolas Ferre 
112853830cc7SNicolas Ferre 	return 0;
112953830cc7SNicolas Ferre 
113053830cc7SNicolas Ferre err_out:
113153830cc7SNicolas Ferre 	return -EINVAL;
113253830cc7SNicolas Ferre }
113353830cc7SNicolas Ferre 
113453830cc7SNicolas Ferre /**
1135d73111c6SMasanari Iida  * atc_dma_cyclic_fill_desc - Fill one period descriptor
113653830cc7SNicolas Ferre  */
113753830cc7SNicolas Ferre static int
1138beeaa103SNicolas Ferre atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
113953830cc7SNicolas Ferre 		unsigned int period_index, dma_addr_t buf_addr,
1140beeaa103SNicolas Ferre 		unsigned int reg_width, size_t period_len,
1141beeaa103SNicolas Ferre 		enum dma_transfer_direction direction)
114253830cc7SNicolas Ferre {
1143beeaa103SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1144beeaa103SNicolas Ferre 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
114553830cc7SNicolas Ferre 	u32			ctrla;
114653830cc7SNicolas Ferre 
114753830cc7SNicolas Ferre 	/* prepare common CRTLA value */
11481dd1ea8eSNicolas Ferre 	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
11491dd1ea8eSNicolas Ferre 		| ATC_DCSIZE(sconfig->dst_maxburst)
115053830cc7SNicolas Ferre 		| ATC_DST_WIDTH(reg_width)
115153830cc7SNicolas Ferre 		| ATC_SRC_WIDTH(reg_width)
115253830cc7SNicolas Ferre 		| period_len >> reg_width;
115353830cc7SNicolas Ferre 
115453830cc7SNicolas Ferre 	switch (direction) {
1155db8196dfSVinod Koul 	case DMA_MEM_TO_DEV:
115653830cc7SNicolas Ferre 		desc->lli.saddr = buf_addr + (period_len * period_index);
1157beeaa103SNicolas Ferre 		desc->lli.daddr = sconfig->dst_addr;
115853830cc7SNicolas Ferre 		desc->lli.ctrla = ctrla;
1159ae14d4b5SNicolas Ferre 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
116053830cc7SNicolas Ferre 				| ATC_SRC_ADDR_MODE_INCR
1161ae14d4b5SNicolas Ferre 				| ATC_FC_MEM2PER
1162bbe89c8eSLudovic Desroches 				| ATC_SIF(atchan->mem_if)
1163bbe89c8eSLudovic Desroches 				| ATC_DIF(atchan->per_if);
1164bdf6c792STorsten Fleischer 		desc->len = period_len;
116553830cc7SNicolas Ferre 		break;
116653830cc7SNicolas Ferre 
1167db8196dfSVinod Koul 	case DMA_DEV_TO_MEM:
1168beeaa103SNicolas Ferre 		desc->lli.saddr = sconfig->src_addr;
116953830cc7SNicolas Ferre 		desc->lli.daddr = buf_addr + (period_len * period_index);
117053830cc7SNicolas Ferre 		desc->lli.ctrla = ctrla;
1171ae14d4b5SNicolas Ferre 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
117253830cc7SNicolas Ferre 				| ATC_SRC_ADDR_MODE_FIXED
1173ae14d4b5SNicolas Ferre 				| ATC_FC_PER2MEM
1174bbe89c8eSLudovic Desroches 				| ATC_SIF(atchan->per_if)
1175bbe89c8eSLudovic Desroches 				| ATC_DIF(atchan->mem_if);
1176bdf6c792STorsten Fleischer 		desc->len = period_len;
117753830cc7SNicolas Ferre 		break;
117853830cc7SNicolas Ferre 
117953830cc7SNicolas Ferre 	default:
118053830cc7SNicolas Ferre 		return -EINVAL;
118153830cc7SNicolas Ferre 	}
118253830cc7SNicolas Ferre 
118353830cc7SNicolas Ferre 	return 0;
118453830cc7SNicolas Ferre }
118553830cc7SNicolas Ferre 
118653830cc7SNicolas Ferre /**
118753830cc7SNicolas Ferre  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
118853830cc7SNicolas Ferre  * @chan: the DMA channel to prepare
118953830cc7SNicolas Ferre  * @buf_addr: physical DMA address where the buffer starts
119053830cc7SNicolas Ferre  * @buf_len: total number of bytes for the entire buffer
119153830cc7SNicolas Ferre  * @period_len: number of bytes for each period
119253830cc7SNicolas Ferre  * @direction: transfer direction, to or from device
1193ec8b5e48SPeter Ujfalusi  * @flags: tx descriptor status flags
119453830cc7SNicolas Ferre  */
119553830cc7SNicolas Ferre static struct dma_async_tx_descriptor *
119653830cc7SNicolas Ferre atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1197185ecb5fSAlexandre Bounine 		size_t period_len, enum dma_transfer_direction direction,
119831c1e5a1SLaurent Pinchart 		unsigned long flags)
119953830cc7SNicolas Ferre {
120053830cc7SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
120153830cc7SNicolas Ferre 	struct at_dma_slave	*atslave = chan->private;
1202beeaa103SNicolas Ferre 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
120353830cc7SNicolas Ferre 	struct at_desc		*first = NULL;
120453830cc7SNicolas Ferre 	struct at_desc		*prev = NULL;
120553830cc7SNicolas Ferre 	unsigned long		was_cyclic;
1206beeaa103SNicolas Ferre 	unsigned int		reg_width;
120753830cc7SNicolas Ferre 	unsigned int		periods = buf_len / period_len;
120853830cc7SNicolas Ferre 	unsigned int		i;
120953830cc7SNicolas Ferre 
121053830cc7SNicolas Ferre 	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
1211db8196dfSVinod Koul 			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
121253830cc7SNicolas Ferre 			buf_addr,
121353830cc7SNicolas Ferre 			periods, buf_len, period_len);
121453830cc7SNicolas Ferre 
121553830cc7SNicolas Ferre 	if (unlikely(!atslave || !buf_len || !period_len)) {
121653830cc7SNicolas Ferre 		dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
121753830cc7SNicolas Ferre 		return NULL;
121853830cc7SNicolas Ferre 	}
121953830cc7SNicolas Ferre 
122053830cc7SNicolas Ferre 	was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
122153830cc7SNicolas Ferre 	if (was_cyclic) {
122253830cc7SNicolas Ferre 		dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
122353830cc7SNicolas Ferre 		return NULL;
122453830cc7SNicolas Ferre 	}
122553830cc7SNicolas Ferre 
12260e7264ccSAndy Shevchenko 	if (unlikely(!is_slave_direction(direction)))
12270e7264ccSAndy Shevchenko 		goto err_out;
12280e7264ccSAndy Shevchenko 
1229beeaa103SNicolas Ferre 	if (sconfig->direction == DMA_MEM_TO_DEV)
1230beeaa103SNicolas Ferre 		reg_width = convert_buswidth(sconfig->dst_addr_width);
1231beeaa103SNicolas Ferre 	else
1232beeaa103SNicolas Ferre 		reg_width = convert_buswidth(sconfig->src_addr_width);
1233beeaa103SNicolas Ferre 
123453830cc7SNicolas Ferre 	/* Check for too big/unaligned periods and unaligned DMA buffer */
12350e7264ccSAndy Shevchenko 	if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
123653830cc7SNicolas Ferre 		goto err_out;
123753830cc7SNicolas Ferre 
123853830cc7SNicolas Ferre 	/* build cyclic linked list */
123953830cc7SNicolas Ferre 	for (i = 0; i < periods; i++) {
124053830cc7SNicolas Ferre 		struct at_desc	*desc;
124153830cc7SNicolas Ferre 
124253830cc7SNicolas Ferre 		desc = atc_desc_get(atchan);
124353830cc7SNicolas Ferre 		if (!desc)
124453830cc7SNicolas Ferre 			goto err_desc_get;
124553830cc7SNicolas Ferre 
1246beeaa103SNicolas Ferre 		if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1247beeaa103SNicolas Ferre 					     reg_width, period_len, direction))
124853830cc7SNicolas Ferre 			goto err_desc_get;
124953830cc7SNicolas Ferre 
125053830cc7SNicolas Ferre 		atc_desc_chain(&first, &prev, desc);
125153830cc7SNicolas Ferre 	}
125253830cc7SNicolas Ferre 
125353830cc7SNicolas Ferre 	/* lets make a cyclic list */
125453830cc7SNicolas Ferre 	prev->lli.dscr = first->txd.phys;
125553830cc7SNicolas Ferre 
125653830cc7SNicolas Ferre 	/* First descriptor of the chain embedds additional information */
125753830cc7SNicolas Ferre 	first->txd.cookie = -EBUSY;
1258bdf6c792STorsten Fleischer 	first->total_len = buf_len;
1259d088c33bSElen Song 	first->tx_width = reg_width;
126053830cc7SNicolas Ferre 
126153830cc7SNicolas Ferre 	return &first->txd;
126253830cc7SNicolas Ferre 
126353830cc7SNicolas Ferre err_desc_get:
126453830cc7SNicolas Ferre 	dev_err(chan2dev(chan), "not enough descriptors available\n");
126553830cc7SNicolas Ferre 	atc_desc_put(atchan, first);
126653830cc7SNicolas Ferre err_out:
126753830cc7SNicolas Ferre 	clear_bit(ATC_IS_CYCLIC, &atchan->status);
126853830cc7SNicolas Ferre 	return NULL;
126953830cc7SNicolas Ferre }
127053830cc7SNicolas Ferre 
12714facfe7fSMaxime Ripard static int atc_config(struct dma_chan *chan,
1272beeaa103SNicolas Ferre 		      struct dma_slave_config *sconfig)
1273beeaa103SNicolas Ferre {
1274beeaa103SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1275beeaa103SNicolas Ferre 
12764facfe7fSMaxime Ripard 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
12774facfe7fSMaxime Ripard 
1278beeaa103SNicolas Ferre 	/* Check if it is chan is configured for slave transfers */
1279beeaa103SNicolas Ferre 	if (!chan->private)
1280beeaa103SNicolas Ferre 		return -EINVAL;
1281beeaa103SNicolas Ferre 
1282beeaa103SNicolas Ferre 	memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1283beeaa103SNicolas Ferre 
1284beeaa103SNicolas Ferre 	convert_burst(&atchan->dma_sconfig.src_maxburst);
1285beeaa103SNicolas Ferre 	convert_burst(&atchan->dma_sconfig.dst_maxburst);
1286beeaa103SNicolas Ferre 
1287beeaa103SNicolas Ferre 	return 0;
1288beeaa103SNicolas Ferre }
1289beeaa103SNicolas Ferre 
12904facfe7fSMaxime Ripard static int atc_pause(struct dma_chan *chan)
1291808347f6SNicolas Ferre {
1292808347f6SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1293808347f6SNicolas Ferre 	struct at_dma		*atdma = to_at_dma(chan->device);
129423b5e3adSNicolas Ferre 	int			chan_id = atchan->chan_common.chan_id;
1295d8cb04b0SNicolas Ferre 	unsigned long		flags;
129623b5e3adSNicolas Ferre 
1297808347f6SNicolas Ferre 	LIST_HEAD(list);
1298808347f6SNicolas Ferre 
12994facfe7fSMaxime Ripard 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1300c3635c78SLinus Walleij 
1301d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
130223b5e3adSNicolas Ferre 
130323b5e3adSNicolas Ferre 	dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
130423b5e3adSNicolas Ferre 	set_bit(ATC_IS_PAUSED, &atchan->status);
130523b5e3adSNicolas Ferre 
1306d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
13074facfe7fSMaxime Ripard 
13084facfe7fSMaxime Ripard 	return 0;
13094facfe7fSMaxime Ripard }
13104facfe7fSMaxime Ripard 
13114facfe7fSMaxime Ripard static int atc_resume(struct dma_chan *chan)
13124facfe7fSMaxime Ripard {
13134facfe7fSMaxime Ripard 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
13144facfe7fSMaxime Ripard 	struct at_dma		*atdma = to_at_dma(chan->device);
13154facfe7fSMaxime Ripard 	int			chan_id = atchan->chan_common.chan_id;
13164facfe7fSMaxime Ripard 	unsigned long		flags;
13174facfe7fSMaxime Ripard 
13184facfe7fSMaxime Ripard 	LIST_HEAD(list);
13194facfe7fSMaxime Ripard 
13204facfe7fSMaxime Ripard 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
13214facfe7fSMaxime Ripard 
13223c477482SNicolas Ferre 	if (!atc_chan_is_paused(atchan))
132323b5e3adSNicolas Ferre 		return 0;
132423b5e3adSNicolas Ferre 
1325d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
132623b5e3adSNicolas Ferre 
132723b5e3adSNicolas Ferre 	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
132823b5e3adSNicolas Ferre 	clear_bit(ATC_IS_PAUSED, &atchan->status);
132923b5e3adSNicolas Ferre 
1330d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
13314facfe7fSMaxime Ripard 
13324facfe7fSMaxime Ripard 	return 0;
13334facfe7fSMaxime Ripard }
13344facfe7fSMaxime Ripard 
13354facfe7fSMaxime Ripard static int atc_terminate_all(struct dma_chan *chan)
13364facfe7fSMaxime Ripard {
13374facfe7fSMaxime Ripard 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
13384facfe7fSMaxime Ripard 	struct at_dma		*atdma = to_at_dma(chan->device);
13394facfe7fSMaxime Ripard 	int			chan_id = atchan->chan_common.chan_id;
134023b5e3adSNicolas Ferre 	struct at_desc		*desc, *_desc;
13414facfe7fSMaxime Ripard 	unsigned long		flags;
13424facfe7fSMaxime Ripard 
13434facfe7fSMaxime Ripard 	LIST_HEAD(list);
13444facfe7fSMaxime Ripard 
13454facfe7fSMaxime Ripard 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
13464facfe7fSMaxime Ripard 
1347808347f6SNicolas Ferre 	/*
1348808347f6SNicolas Ferre 	 * This is only called when something went wrong elsewhere, so
1349808347f6SNicolas Ferre 	 * we don't really care about the data. Just disable the
1350808347f6SNicolas Ferre 	 * channel. We still have to poll the channel enable bit due
1351808347f6SNicolas Ferre 	 * to AHB/HSB limitations.
1352808347f6SNicolas Ferre 	 */
1353d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
1354808347f6SNicolas Ferre 
135523b5e3adSNicolas Ferre 	/* disabling channel: must also remove suspend state */
135623b5e3adSNicolas Ferre 	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1357808347f6SNicolas Ferre 
1358808347f6SNicolas Ferre 	/* confirm that this channel is disabled */
1359808347f6SNicolas Ferre 	while (dma_readl(atdma, CHSR) & atchan->mask)
1360808347f6SNicolas Ferre 		cpu_relax();
1361808347f6SNicolas Ferre 
1362808347f6SNicolas Ferre 	/* active_list entries will end up before queued entries */
1363808347f6SNicolas Ferre 	list_splice_init(&atchan->queue, &list);
1364808347f6SNicolas Ferre 	list_splice_init(&atchan->active_list, &list);
1365808347f6SNicolas Ferre 
1366808347f6SNicolas Ferre 	/* Flush all pending and queued descriptors */
1367808347f6SNicolas Ferre 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
1368808347f6SNicolas Ferre 		atc_chain_complete(atchan, desc);
1369c3635c78SLinus Walleij 
137023b5e3adSNicolas Ferre 	clear_bit(ATC_IS_PAUSED, &atchan->status);
137153830cc7SNicolas Ferre 	/* if channel dedicated to cyclic operations, free it */
137253830cc7SNicolas Ferre 	clear_bit(ATC_IS_CYCLIC, &atchan->status);
137353830cc7SNicolas Ferre 
1374d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
1375b0ebeb9cSYong Wang 
1376c3635c78SLinus Walleij 	return 0;
1377808347f6SNicolas Ferre }
1378808347f6SNicolas Ferre 
1379dc78baa2SNicolas Ferre /**
138007934481SLinus Walleij  * atc_tx_status - poll for transaction completion
1381dc78baa2SNicolas Ferre  * @chan: DMA channel
1382dc78baa2SNicolas Ferre  * @cookie: transaction identifier to check status of
138307934481SLinus Walleij  * @txstate: if not %NULL updated with transaction state
1384dc78baa2SNicolas Ferre  *
138507934481SLinus Walleij  * If @txstate is passed in, upon return it reflect the driver
1386dc78baa2SNicolas Ferre  * internal state and can be used with dma_async_is_complete() to check
1387dc78baa2SNicolas Ferre  * the status of multiple cookies without re-checking hardware state.
1388dc78baa2SNicolas Ferre  */
1389dc78baa2SNicolas Ferre static enum dma_status
139007934481SLinus Walleij atc_tx_status(struct dma_chan *chan,
1391dc78baa2SNicolas Ferre 		dma_cookie_t cookie,
139207934481SLinus Walleij 		struct dma_tx_state *txstate)
1393dc78baa2SNicolas Ferre {
1394dc78baa2SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1395d8cb04b0SNicolas Ferre 	unsigned long		flags;
1396dc78baa2SNicolas Ferre 	enum dma_status		ret;
1397d48de6f1SElen Song 	int bytes = 0;
1398d48de6f1SElen Song 
1399d48de6f1SElen Song 	ret = dma_cookie_status(chan, cookie, txstate);
14006d203d1eSVinod Koul 	if (ret == DMA_COMPLETE)
1401d48de6f1SElen Song 		return ret;
1402d48de6f1SElen Song 	/*
1403d48de6f1SElen Song 	 * There's no point calculating the residue if there's
1404d48de6f1SElen Song 	 * no txstate to store the value.
1405d48de6f1SElen Song 	 */
1406d48de6f1SElen Song 	if (!txstate)
1407d48de6f1SElen Song 		return DMA_ERROR;
1408dc78baa2SNicolas Ferre 
1409d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
1410dc78baa2SNicolas Ferre 
1411d48de6f1SElen Song 	/*  Get number of bytes left in the active transactions */
1412bdf6c792STorsten Fleischer 	bytes = atc_get_bytes_left(chan, cookie);
1413dc78baa2SNicolas Ferre 
1414d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
1415dc78baa2SNicolas Ferre 
1416d48de6f1SElen Song 	if (unlikely(bytes < 0)) {
1417d48de6f1SElen Song 		dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1418d48de6f1SElen Song 		return DMA_ERROR;
1419c3dbc60cSNicolas Ferre 	} else {
1420d48de6f1SElen Song 		dma_set_residue(txstate, bytes);
1421c3dbc60cSNicolas Ferre 	}
1422543aabc7SNicolas Ferre 
1423d48de6f1SElen Song 	dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1424d48de6f1SElen Song 		 ret, cookie, bytes);
1425dc78baa2SNicolas Ferre 
1426dc78baa2SNicolas Ferre 	return ret;
1427dc78baa2SNicolas Ferre }
1428dc78baa2SNicolas Ferre 
1429dc78baa2SNicolas Ferre /**
1430dc78baa2SNicolas Ferre  * atc_issue_pending - try to finish work
1431dc78baa2SNicolas Ferre  * @chan: target DMA channel
1432dc78baa2SNicolas Ferre  */
1433dc78baa2SNicolas Ferre static void atc_issue_pending(struct dma_chan *chan)
1434dc78baa2SNicolas Ferre {
1435dc78baa2SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1436d8cb04b0SNicolas Ferre 	unsigned long		flags;
1437dc78baa2SNicolas Ferre 
1438dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(chan), "issue_pending\n");
1439dc78baa2SNicolas Ferre 
144053830cc7SNicolas Ferre 	/* Not needed for cyclic transfers */
14413c477482SNicolas Ferre 	if (atc_chan_is_cyclic(atchan))
144253830cc7SNicolas Ferre 		return;
144353830cc7SNicolas Ferre 
1444d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
1445dc78baa2SNicolas Ferre 	atc_advance_work(atchan);
1446d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
1447dc78baa2SNicolas Ferre }
1448dc78baa2SNicolas Ferre 
1449dc78baa2SNicolas Ferre /**
1450dc78baa2SNicolas Ferre  * atc_alloc_chan_resources - allocate resources for DMA channel
1451dc78baa2SNicolas Ferre  * @chan: allocate descriptor resources for this channel
1452dc78baa2SNicolas Ferre  * @client: current client requesting the channel be ready for requests
1453dc78baa2SNicolas Ferre  *
1454dc78baa2SNicolas Ferre  * return - the number of allocated descriptors
1455dc78baa2SNicolas Ferre  */
1456dc78baa2SNicolas Ferre static int atc_alloc_chan_resources(struct dma_chan *chan)
1457dc78baa2SNicolas Ferre {
1458dc78baa2SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1459dc78baa2SNicolas Ferre 	struct at_dma		*atdma = to_at_dma(chan->device);
1460dc78baa2SNicolas Ferre 	struct at_desc		*desc;
1461808347f6SNicolas Ferre 	struct at_dma_slave	*atslave;
1462d8cb04b0SNicolas Ferre 	unsigned long		flags;
1463dc78baa2SNicolas Ferre 	int			i;
1464808347f6SNicolas Ferre 	u32			cfg;
1465dc78baa2SNicolas Ferre 	LIST_HEAD(tmp_list);
1466dc78baa2SNicolas Ferre 
1467dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1468dc78baa2SNicolas Ferre 
1469dc78baa2SNicolas Ferre 	/* ASSERT:  channel is idle */
1470dc78baa2SNicolas Ferre 	if (atc_chan_is_enabled(atchan)) {
1471dc78baa2SNicolas Ferre 		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1472dc78baa2SNicolas Ferre 		return -EIO;
1473dc78baa2SNicolas Ferre 	}
1474dc78baa2SNicolas Ferre 
1475808347f6SNicolas Ferre 	cfg = ATC_DEFAULT_CFG;
1476808347f6SNicolas Ferre 
1477808347f6SNicolas Ferre 	atslave = chan->private;
1478808347f6SNicolas Ferre 	if (atslave) {
1479808347f6SNicolas Ferre 		/*
1480808347f6SNicolas Ferre 		 * We need controller-specific data to set up slave
1481808347f6SNicolas Ferre 		 * transfers.
1482808347f6SNicolas Ferre 		 */
1483808347f6SNicolas Ferre 		BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1484808347f6SNicolas Ferre 
1485ea7e7906SNicolas Ferre 		/* if cfg configuration specified take it instead of default */
1486808347f6SNicolas Ferre 		if (atslave->cfg)
1487808347f6SNicolas Ferre 			cfg = atslave->cfg;
1488808347f6SNicolas Ferre 	}
1489808347f6SNicolas Ferre 
1490808347f6SNicolas Ferre 	/* have we already been set up?
1491808347f6SNicolas Ferre 	 * reconfigure channel but no need to reallocate descriptors */
1492dc78baa2SNicolas Ferre 	if (!list_empty(&atchan->free_list))
1493dc78baa2SNicolas Ferre 		return atchan->descs_allocated;
1494dc78baa2SNicolas Ferre 
1495dc78baa2SNicolas Ferre 	/* Allocate initial pool of descriptors */
1496dc78baa2SNicolas Ferre 	for (i = 0; i < init_nr_desc_per_channel; i++) {
1497dc78baa2SNicolas Ferre 		desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1498dc78baa2SNicolas Ferre 		if (!desc) {
1499dc78baa2SNicolas Ferre 			dev_err(atdma->dma_common.dev,
1500dc78baa2SNicolas Ferre 				"Only %d initial descriptors\n", i);
1501dc78baa2SNicolas Ferre 			break;
1502dc78baa2SNicolas Ferre 		}
1503dc78baa2SNicolas Ferre 		list_add_tail(&desc->desc_node, &tmp_list);
1504dc78baa2SNicolas Ferre 	}
1505dc78baa2SNicolas Ferre 
1506d8cb04b0SNicolas Ferre 	spin_lock_irqsave(&atchan->lock, flags);
1507dc78baa2SNicolas Ferre 	atchan->descs_allocated = i;
1508dc78baa2SNicolas Ferre 	list_splice(&tmp_list, &atchan->free_list);
1509d3ee98cdSRussell King - ARM Linux 	dma_cookie_init(chan);
1510d8cb04b0SNicolas Ferre 	spin_unlock_irqrestore(&atchan->lock, flags);
1511dc78baa2SNicolas Ferre 
1512dc78baa2SNicolas Ferre 	/* channel parameters */
1513808347f6SNicolas Ferre 	channel_writel(atchan, CFG, cfg);
1514dc78baa2SNicolas Ferre 
1515dc78baa2SNicolas Ferre 	dev_dbg(chan2dev(chan),
1516dc78baa2SNicolas Ferre 		"alloc_chan_resources: allocated %d descriptors\n",
1517dc78baa2SNicolas Ferre 		atchan->descs_allocated);
1518dc78baa2SNicolas Ferre 
1519dc78baa2SNicolas Ferre 	return atchan->descs_allocated;
1520dc78baa2SNicolas Ferre }
1521dc78baa2SNicolas Ferre 
1522dc78baa2SNicolas Ferre /**
1523dc78baa2SNicolas Ferre  * atc_free_chan_resources - free all channel resources
1524dc78baa2SNicolas Ferre  * @chan: DMA channel
1525dc78baa2SNicolas Ferre  */
1526dc78baa2SNicolas Ferre static void atc_free_chan_resources(struct dma_chan *chan)
1527dc78baa2SNicolas Ferre {
1528dc78baa2SNicolas Ferre 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1529dc78baa2SNicolas Ferre 	struct at_dma		*atdma = to_at_dma(chan->device);
1530dc78baa2SNicolas Ferre 	struct at_desc		*desc, *_desc;
1531dc78baa2SNicolas Ferre 	LIST_HEAD(list);
1532dc78baa2SNicolas Ferre 
1533dc78baa2SNicolas Ferre 	dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1534dc78baa2SNicolas Ferre 		atchan->descs_allocated);
1535dc78baa2SNicolas Ferre 
1536dc78baa2SNicolas Ferre 	/* ASSERT:  channel is idle */
1537dc78baa2SNicolas Ferre 	BUG_ON(!list_empty(&atchan->active_list));
1538dc78baa2SNicolas Ferre 	BUG_ON(!list_empty(&atchan->queue));
1539dc78baa2SNicolas Ferre 	BUG_ON(atc_chan_is_enabled(atchan));
1540dc78baa2SNicolas Ferre 
1541dc78baa2SNicolas Ferre 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1542dc78baa2SNicolas Ferre 		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1543dc78baa2SNicolas Ferre 		list_del(&desc->desc_node);
1544dc78baa2SNicolas Ferre 		/* free link descriptor */
1545dc78baa2SNicolas Ferre 		dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1546dc78baa2SNicolas Ferre 	}
1547dc78baa2SNicolas Ferre 	list_splice_init(&atchan->free_list, &list);
1548dc78baa2SNicolas Ferre 	atchan->descs_allocated = 0;
154953830cc7SNicolas Ferre 	atchan->status = 0;
1550dc78baa2SNicolas Ferre 
1551dc78baa2SNicolas Ferre 	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1552dc78baa2SNicolas Ferre }
1553dc78baa2SNicolas Ferre 
1554bbe89c8eSLudovic Desroches #ifdef CONFIG_OF
1555bbe89c8eSLudovic Desroches static bool at_dma_filter(struct dma_chan *chan, void *slave)
1556bbe89c8eSLudovic Desroches {
1557bbe89c8eSLudovic Desroches 	struct at_dma_slave *atslave = slave;
1558bbe89c8eSLudovic Desroches 
1559bbe89c8eSLudovic Desroches 	if (atslave->dma_dev == chan->device->dev) {
1560bbe89c8eSLudovic Desroches 		chan->private = atslave;
1561bbe89c8eSLudovic Desroches 		return true;
1562bbe89c8eSLudovic Desroches 	} else {
1563bbe89c8eSLudovic Desroches 		return false;
1564bbe89c8eSLudovic Desroches 	}
1565bbe89c8eSLudovic Desroches }
1566bbe89c8eSLudovic Desroches 
1567bbe89c8eSLudovic Desroches static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1568bbe89c8eSLudovic Desroches 				     struct of_dma *of_dma)
1569bbe89c8eSLudovic Desroches {
1570bbe89c8eSLudovic Desroches 	struct dma_chan *chan;
1571bbe89c8eSLudovic Desroches 	struct at_dma_chan *atchan;
1572bbe89c8eSLudovic Desroches 	struct at_dma_slave *atslave;
1573bbe89c8eSLudovic Desroches 	dma_cap_mask_t mask;
1574bbe89c8eSLudovic Desroches 	unsigned int per_id;
1575bbe89c8eSLudovic Desroches 	struct platform_device *dmac_pdev;
1576bbe89c8eSLudovic Desroches 
1577bbe89c8eSLudovic Desroches 	if (dma_spec->args_count != 2)
1578bbe89c8eSLudovic Desroches 		return NULL;
1579bbe89c8eSLudovic Desroches 
1580bbe89c8eSLudovic Desroches 	dmac_pdev = of_find_device_by_node(dma_spec->np);
1581bbe89c8eSLudovic Desroches 
1582bbe89c8eSLudovic Desroches 	dma_cap_zero(mask);
1583bbe89c8eSLudovic Desroches 	dma_cap_set(DMA_SLAVE, mask);
1584bbe89c8eSLudovic Desroches 
1585bbe89c8eSLudovic Desroches 	atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
1586bbe89c8eSLudovic Desroches 	if (!atslave)
1587bbe89c8eSLudovic Desroches 		return NULL;
158862971b29SLudovic Desroches 
158962971b29SLudovic Desroches 	atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1590bbe89c8eSLudovic Desroches 	/*
1591bbe89c8eSLudovic Desroches 	 * We can fill both SRC_PER and DST_PER, one of these fields will be
1592bbe89c8eSLudovic Desroches 	 * ignored depending on DMA transfer direction.
1593bbe89c8eSLudovic Desroches 	 */
159462971b29SLudovic Desroches 	per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
159562971b29SLudovic Desroches 	atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
15966c22770fSNicolas Ferre 		     | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
159762971b29SLudovic Desroches 	/*
159862971b29SLudovic Desroches 	 * We have to translate the value we get from the device tree since
159962971b29SLudovic Desroches 	 * the half FIFO configuration value had to be 0 to keep backward
160062971b29SLudovic Desroches 	 * compatibility.
160162971b29SLudovic Desroches 	 */
160262971b29SLudovic Desroches 	switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
160362971b29SLudovic Desroches 	case AT91_DMA_CFG_FIFOCFG_ALAP:
160462971b29SLudovic Desroches 		atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
160562971b29SLudovic Desroches 		break;
160662971b29SLudovic Desroches 	case AT91_DMA_CFG_FIFOCFG_ASAP:
160762971b29SLudovic Desroches 		atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
160862971b29SLudovic Desroches 		break;
160962971b29SLudovic Desroches 	case AT91_DMA_CFG_FIFOCFG_HALF:
161062971b29SLudovic Desroches 	default:
161162971b29SLudovic Desroches 		atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
161262971b29SLudovic Desroches 	}
1613bbe89c8eSLudovic Desroches 	atslave->dma_dev = &dmac_pdev->dev;
1614bbe89c8eSLudovic Desroches 
1615bbe89c8eSLudovic Desroches 	chan = dma_request_channel(mask, at_dma_filter, atslave);
1616bbe89c8eSLudovic Desroches 	if (!chan)
1617bbe89c8eSLudovic Desroches 		return NULL;
1618bbe89c8eSLudovic Desroches 
1619bbe89c8eSLudovic Desroches 	atchan = to_at_dma_chan(chan);
1620bbe89c8eSLudovic Desroches 	atchan->per_if = dma_spec->args[0] & 0xff;
1621bbe89c8eSLudovic Desroches 	atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1622bbe89c8eSLudovic Desroches 
1623bbe89c8eSLudovic Desroches 	return chan;
1624bbe89c8eSLudovic Desroches }
1625bbe89c8eSLudovic Desroches #else
1626bbe89c8eSLudovic Desroches static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1627bbe89c8eSLudovic Desroches 				     struct of_dma *of_dma)
1628bbe89c8eSLudovic Desroches {
1629bbe89c8eSLudovic Desroches 	return NULL;
1630bbe89c8eSLudovic Desroches }
1631bbe89c8eSLudovic Desroches #endif
1632dc78baa2SNicolas Ferre 
1633dc78baa2SNicolas Ferre /*--  Module Management  -----------------------------------------------*/
1634dc78baa2SNicolas Ferre 
163502f88be9SNicolas Ferre /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
163602f88be9SNicolas Ferre static struct at_dma_platform_data at91sam9rl_config = {
163702f88be9SNicolas Ferre 	.nr_channels = 2,
163802f88be9SNicolas Ferre };
163902f88be9SNicolas Ferre static struct at_dma_platform_data at91sam9g45_config = {
164002f88be9SNicolas Ferre 	.nr_channels = 8,
164102f88be9SNicolas Ferre };
164202f88be9SNicolas Ferre 
1643c5115953SNicolas Ferre #if defined(CONFIG_OF)
1644c5115953SNicolas Ferre static const struct of_device_id atmel_dma_dt_ids[] = {
1645c5115953SNicolas Ferre 	{
1646c5115953SNicolas Ferre 		.compatible = "atmel,at91sam9rl-dma",
164702f88be9SNicolas Ferre 		.data = &at91sam9rl_config,
1648c5115953SNicolas Ferre 	}, {
1649c5115953SNicolas Ferre 		.compatible = "atmel,at91sam9g45-dma",
165002f88be9SNicolas Ferre 		.data = &at91sam9g45_config,
1651dcc81734SNicolas Ferre 	}, {
1652dcc81734SNicolas Ferre 		/* sentinel */
1653dcc81734SNicolas Ferre 	}
1654c5115953SNicolas Ferre };
1655c5115953SNicolas Ferre 
1656c5115953SNicolas Ferre MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1657c5115953SNicolas Ferre #endif
1658c5115953SNicolas Ferre 
16590ab88a01SNicolas Ferre static const struct platform_device_id atdma_devtypes[] = {
166067348450SNicolas Ferre 	{
166167348450SNicolas Ferre 		.name = "at91sam9rl_dma",
166202f88be9SNicolas Ferre 		.driver_data = (unsigned long) &at91sam9rl_config,
166367348450SNicolas Ferre 	}, {
166467348450SNicolas Ferre 		.name = "at91sam9g45_dma",
166502f88be9SNicolas Ferre 		.driver_data = (unsigned long) &at91sam9g45_config,
166667348450SNicolas Ferre 	}, {
166767348450SNicolas Ferre 		/* sentinel */
166867348450SNicolas Ferre 	}
166967348450SNicolas Ferre };
167067348450SNicolas Ferre 
16717fd63ccdSUwe Kleine-König static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1672c5115953SNicolas Ferre 						struct platform_device *pdev)
1673c5115953SNicolas Ferre {
1674c5115953SNicolas Ferre 	if (pdev->dev.of_node) {
1675c5115953SNicolas Ferre 		const struct of_device_id *match;
1676c5115953SNicolas Ferre 		match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1677c5115953SNicolas Ferre 		if (match == NULL)
167802f88be9SNicolas Ferre 			return NULL;
167902f88be9SNicolas Ferre 		return match->data;
1680c5115953SNicolas Ferre 	}
168102f88be9SNicolas Ferre 	return (struct at_dma_platform_data *)
168202f88be9SNicolas Ferre 			platform_get_device_id(pdev)->driver_data;
1683c5115953SNicolas Ferre }
1684c5115953SNicolas Ferre 
1685dc78baa2SNicolas Ferre /**
1686dc78baa2SNicolas Ferre  * at_dma_off - disable DMA controller
1687dc78baa2SNicolas Ferre  * @atdma: the Atmel HDAMC device
1688dc78baa2SNicolas Ferre  */
1689dc78baa2SNicolas Ferre static void at_dma_off(struct at_dma *atdma)
1690dc78baa2SNicolas Ferre {
1691dc78baa2SNicolas Ferre 	dma_writel(atdma, EN, 0);
1692dc78baa2SNicolas Ferre 
1693dc78baa2SNicolas Ferre 	/* disable all interrupts */
1694dc78baa2SNicolas Ferre 	dma_writel(atdma, EBCIDR, -1L);
1695dc78baa2SNicolas Ferre 
1696dc78baa2SNicolas Ferre 	/* confirm that all channels are disabled */
1697dc78baa2SNicolas Ferre 	while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1698dc78baa2SNicolas Ferre 		cpu_relax();
1699dc78baa2SNicolas Ferre }
1700dc78baa2SNicolas Ferre 
1701dc78baa2SNicolas Ferre static int __init at_dma_probe(struct platform_device *pdev)
1702dc78baa2SNicolas Ferre {
1703dc78baa2SNicolas Ferre 	struct resource		*io;
1704dc78baa2SNicolas Ferre 	struct at_dma		*atdma;
1705dc78baa2SNicolas Ferre 	size_t			size;
1706dc78baa2SNicolas Ferre 	int			irq;
1707dc78baa2SNicolas Ferre 	int			err;
1708dc78baa2SNicolas Ferre 	int			i;
17097fd63ccdSUwe Kleine-König 	const struct at_dma_platform_data *plat_dat;
1710dc78baa2SNicolas Ferre 
171102f88be9SNicolas Ferre 	/* setup platform data for each SoC */
171202f88be9SNicolas Ferre 	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1713265567fbSTorsten Fleischer 	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
17145abecfa5SMaxime Ripard 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
171502f88be9SNicolas Ferre 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
171602f88be9SNicolas Ferre 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1717265567fbSTorsten Fleischer 	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
171867348450SNicolas Ferre 
171967348450SNicolas Ferre 	/* get DMA parameters from controller type */
172002f88be9SNicolas Ferre 	plat_dat = at_dma_get_driver_data(pdev);
172102f88be9SNicolas Ferre 	if (!plat_dat)
172202f88be9SNicolas Ferre 		return -ENODEV;
1723dc78baa2SNicolas Ferre 
1724dc78baa2SNicolas Ferre 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1725dc78baa2SNicolas Ferre 	if (!io)
1726dc78baa2SNicolas Ferre 		return -EINVAL;
1727dc78baa2SNicolas Ferre 
1728dc78baa2SNicolas Ferre 	irq = platform_get_irq(pdev, 0);
1729dc78baa2SNicolas Ferre 	if (irq < 0)
1730dc78baa2SNicolas Ferre 		return irq;
1731dc78baa2SNicolas Ferre 
1732dc78baa2SNicolas Ferre 	size = sizeof(struct at_dma);
173302f88be9SNicolas Ferre 	size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1734dc78baa2SNicolas Ferre 	atdma = kzalloc(size, GFP_KERNEL);
1735dc78baa2SNicolas Ferre 	if (!atdma)
1736dc78baa2SNicolas Ferre 		return -ENOMEM;
1737dc78baa2SNicolas Ferre 
173867348450SNicolas Ferre 	/* discover transaction capabilities */
173902f88be9SNicolas Ferre 	atdma->dma_common.cap_mask = plat_dat->cap_mask;
174002f88be9SNicolas Ferre 	atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1741dc78baa2SNicolas Ferre 
1742114df7d6SH Hartley Sweeten 	size = resource_size(io);
1743dc78baa2SNicolas Ferre 	if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1744dc78baa2SNicolas Ferre 		err = -EBUSY;
1745dc78baa2SNicolas Ferre 		goto err_kfree;
1746dc78baa2SNicolas Ferre 	}
1747dc78baa2SNicolas Ferre 
1748dc78baa2SNicolas Ferre 	atdma->regs = ioremap(io->start, size);
1749dc78baa2SNicolas Ferre 	if (!atdma->regs) {
1750dc78baa2SNicolas Ferre 		err = -ENOMEM;
1751dc78baa2SNicolas Ferre 		goto err_release_r;
1752dc78baa2SNicolas Ferre 	}
1753dc78baa2SNicolas Ferre 
1754dc78baa2SNicolas Ferre 	atdma->clk = clk_get(&pdev->dev, "dma_clk");
1755dc78baa2SNicolas Ferre 	if (IS_ERR(atdma->clk)) {
1756dc78baa2SNicolas Ferre 		err = PTR_ERR(atdma->clk);
1757dc78baa2SNicolas Ferre 		goto err_clk;
1758dc78baa2SNicolas Ferre 	}
1759f784d9c9SBoris BREZILLON 	err = clk_prepare_enable(atdma->clk);
1760f784d9c9SBoris BREZILLON 	if (err)
1761f784d9c9SBoris BREZILLON 		goto err_clk_prepare;
1762dc78baa2SNicolas Ferre 
1763dc78baa2SNicolas Ferre 	/* force dma off, just in case */
1764dc78baa2SNicolas Ferre 	at_dma_off(atdma);
1765dc78baa2SNicolas Ferre 
1766dc78baa2SNicolas Ferre 	err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1767dc78baa2SNicolas Ferre 	if (err)
1768dc78baa2SNicolas Ferre 		goto err_irq;
1769dc78baa2SNicolas Ferre 
1770dc78baa2SNicolas Ferre 	platform_set_drvdata(pdev, atdma);
1771dc78baa2SNicolas Ferre 
1772dc78baa2SNicolas Ferre 	/* create a pool of consistent memory blocks for hardware descriptors */
1773dc78baa2SNicolas Ferre 	atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1774dc78baa2SNicolas Ferre 			&pdev->dev, sizeof(struct at_desc),
1775dc78baa2SNicolas Ferre 			4 /* word alignment */, 0);
1776dc78baa2SNicolas Ferre 	if (!atdma->dma_desc_pool) {
1777dc78baa2SNicolas Ferre 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1778dc78baa2SNicolas Ferre 		err = -ENOMEM;
1779dc78baa2SNicolas Ferre 		goto err_pool_create;
1780dc78baa2SNicolas Ferre 	}
1781dc78baa2SNicolas Ferre 
1782dc78baa2SNicolas Ferre 	/* clear any pending interrupt */
1783dc78baa2SNicolas Ferre 	while (dma_readl(atdma, EBCISR))
1784dc78baa2SNicolas Ferre 		cpu_relax();
1785dc78baa2SNicolas Ferre 
1786dc78baa2SNicolas Ferre 	/* initialize channels related values */
1787dc78baa2SNicolas Ferre 	INIT_LIST_HEAD(&atdma->dma_common.channels);
178802f88be9SNicolas Ferre 	for (i = 0; i < plat_dat->nr_channels; i++) {
1789dc78baa2SNicolas Ferre 		struct at_dma_chan	*atchan = &atdma->chan[i];
1790dc78baa2SNicolas Ferre 
1791bbe89c8eSLudovic Desroches 		atchan->mem_if = AT_DMA_MEM_IF;
1792bbe89c8eSLudovic Desroches 		atchan->per_if = AT_DMA_PER_IF;
1793dc78baa2SNicolas Ferre 		atchan->chan_common.device = &atdma->dma_common;
1794d3ee98cdSRussell King - ARM Linux 		dma_cookie_init(&atchan->chan_common);
1795dc78baa2SNicolas Ferre 		list_add_tail(&atchan->chan_common.device_node,
1796dc78baa2SNicolas Ferre 				&atdma->dma_common.channels);
1797dc78baa2SNicolas Ferre 
1798dc78baa2SNicolas Ferre 		atchan->ch_regs = atdma->regs + ch_regs(i);
1799dc78baa2SNicolas Ferre 		spin_lock_init(&atchan->lock);
1800dc78baa2SNicolas Ferre 		atchan->mask = 1 << i;
1801dc78baa2SNicolas Ferre 
1802dc78baa2SNicolas Ferre 		INIT_LIST_HEAD(&atchan->active_list);
1803dc78baa2SNicolas Ferre 		INIT_LIST_HEAD(&atchan->queue);
1804dc78baa2SNicolas Ferre 		INIT_LIST_HEAD(&atchan->free_list);
1805dc78baa2SNicolas Ferre 
1806dc78baa2SNicolas Ferre 		tasklet_init(&atchan->tasklet, atc_tasklet,
1807dc78baa2SNicolas Ferre 				(unsigned long)atchan);
1808bda3a47cSNikolaus Voss 		atc_enable_chan_irq(atdma, i);
1809dc78baa2SNicolas Ferre 	}
1810dc78baa2SNicolas Ferre 
1811dc78baa2SNicolas Ferre 	/* set base routines */
1812dc78baa2SNicolas Ferre 	atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1813dc78baa2SNicolas Ferre 	atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
181407934481SLinus Walleij 	atdma->dma_common.device_tx_status = atc_tx_status;
1815dc78baa2SNicolas Ferre 	atdma->dma_common.device_issue_pending = atc_issue_pending;
1816dc78baa2SNicolas Ferre 	atdma->dma_common.dev = &pdev->dev;
1817dc78baa2SNicolas Ferre 
1818dc78baa2SNicolas Ferre 	/* set prep routines based on capability */
18195abecfa5SMaxime Ripard 	if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
18205abecfa5SMaxime Ripard 		atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
18215abecfa5SMaxime Ripard 
1822dc78baa2SNicolas Ferre 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1823dc78baa2SNicolas Ferre 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1824dc78baa2SNicolas Ferre 
1825d7db8080SNicolas Ferre 	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1826808347f6SNicolas Ferre 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1827d7db8080SNicolas Ferre 		/* controller can do slave DMA: can trigger cyclic transfers */
1828d7db8080SNicolas Ferre 		dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
182953830cc7SNicolas Ferre 		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
18304facfe7fSMaxime Ripard 		atdma->dma_common.device_config = atc_config;
18314facfe7fSMaxime Ripard 		atdma->dma_common.device_pause = atc_pause;
18324facfe7fSMaxime Ripard 		atdma->dma_common.device_resume = atc_resume;
18334facfe7fSMaxime Ripard 		atdma->dma_common.device_terminate_all = atc_terminate_all;
1834816070edSLudovic Desroches 		atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1835816070edSLudovic Desroches 		atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1836816070edSLudovic Desroches 		atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1837816070edSLudovic Desroches 		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1838d7db8080SNicolas Ferre 	}
1839808347f6SNicolas Ferre 
1840265567fbSTorsten Fleischer 	if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
1841265567fbSTorsten Fleischer 		atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
1842265567fbSTorsten Fleischer 
1843dc78baa2SNicolas Ferre 	dma_writel(atdma, EN, AT_DMA_ENABLE);
1844dc78baa2SNicolas Ferre 
1845265567fbSTorsten Fleischer 	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1846dc78baa2SNicolas Ferre 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1847dc78baa2SNicolas Ferre 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1848265567fbSTorsten Fleischer 	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
184902f88be9SNicolas Ferre 	  plat_dat->nr_channels);
1850dc78baa2SNicolas Ferre 
1851dc78baa2SNicolas Ferre 	dma_async_device_register(&atdma->dma_common);
1852dc78baa2SNicolas Ferre 
1853bbe89c8eSLudovic Desroches 	/*
1854bbe89c8eSLudovic Desroches 	 * Do not return an error if the dmac node is not present in order to
1855bbe89c8eSLudovic Desroches 	 * not break the existing way of requesting channel with
1856bbe89c8eSLudovic Desroches 	 * dma_request_channel().
1857bbe89c8eSLudovic Desroches 	 */
1858bbe89c8eSLudovic Desroches 	if (pdev->dev.of_node) {
1859bbe89c8eSLudovic Desroches 		err = of_dma_controller_register(pdev->dev.of_node,
1860bbe89c8eSLudovic Desroches 						 at_dma_xlate, atdma);
1861bbe89c8eSLudovic Desroches 		if (err) {
1862bbe89c8eSLudovic Desroches 			dev_err(&pdev->dev, "could not register of_dma_controller\n");
1863bbe89c8eSLudovic Desroches 			goto err_of_dma_controller_register;
1864bbe89c8eSLudovic Desroches 		}
1865bbe89c8eSLudovic Desroches 	}
1866bbe89c8eSLudovic Desroches 
1867dc78baa2SNicolas Ferre 	return 0;
1868dc78baa2SNicolas Ferre 
1869bbe89c8eSLudovic Desroches err_of_dma_controller_register:
1870bbe89c8eSLudovic Desroches 	dma_async_device_unregister(&atdma->dma_common);
1871bbe89c8eSLudovic Desroches 	dma_pool_destroy(atdma->dma_desc_pool);
1872dc78baa2SNicolas Ferre err_pool_create:
1873dc78baa2SNicolas Ferre 	free_irq(platform_get_irq(pdev, 0), atdma);
1874dc78baa2SNicolas Ferre err_irq:
1875f784d9c9SBoris BREZILLON 	clk_disable_unprepare(atdma->clk);
1876f784d9c9SBoris BREZILLON err_clk_prepare:
1877dc78baa2SNicolas Ferre 	clk_put(atdma->clk);
1878dc78baa2SNicolas Ferre err_clk:
1879dc78baa2SNicolas Ferre 	iounmap(atdma->regs);
1880dc78baa2SNicolas Ferre 	atdma->regs = NULL;
1881dc78baa2SNicolas Ferre err_release_r:
1882dc78baa2SNicolas Ferre 	release_mem_region(io->start, size);
1883dc78baa2SNicolas Ferre err_kfree:
1884dc78baa2SNicolas Ferre 	kfree(atdma);
1885dc78baa2SNicolas Ferre 	return err;
1886dc78baa2SNicolas Ferre }
1887dc78baa2SNicolas Ferre 
18881d1bbd30SMaxin B. John static int at_dma_remove(struct platform_device *pdev)
1889dc78baa2SNicolas Ferre {
1890dc78baa2SNicolas Ferre 	struct at_dma		*atdma = platform_get_drvdata(pdev);
1891dc78baa2SNicolas Ferre 	struct dma_chan		*chan, *_chan;
1892dc78baa2SNicolas Ferre 	struct resource		*io;
1893dc78baa2SNicolas Ferre 
1894dc78baa2SNicolas Ferre 	at_dma_off(atdma);
1895dc78baa2SNicolas Ferre 	dma_async_device_unregister(&atdma->dma_common);
1896dc78baa2SNicolas Ferre 
1897dc78baa2SNicolas Ferre 	dma_pool_destroy(atdma->dma_desc_pool);
1898dc78baa2SNicolas Ferre 	free_irq(platform_get_irq(pdev, 0), atdma);
1899dc78baa2SNicolas Ferre 
1900dc78baa2SNicolas Ferre 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1901dc78baa2SNicolas Ferre 			device_node) {
1902dc78baa2SNicolas Ferre 		struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1903dc78baa2SNicolas Ferre 
1904dc78baa2SNicolas Ferre 		/* Disable interrupts */
1905bda3a47cSNikolaus Voss 		atc_disable_chan_irq(atdma, chan->chan_id);
1906dc78baa2SNicolas Ferre 
1907dc78baa2SNicolas Ferre 		tasklet_kill(&atchan->tasklet);
1908dc78baa2SNicolas Ferre 		list_del(&chan->device_node);
1909dc78baa2SNicolas Ferre 	}
1910dc78baa2SNicolas Ferre 
1911f784d9c9SBoris BREZILLON 	clk_disable_unprepare(atdma->clk);
1912dc78baa2SNicolas Ferre 	clk_put(atdma->clk);
1913dc78baa2SNicolas Ferre 
1914dc78baa2SNicolas Ferre 	iounmap(atdma->regs);
1915dc78baa2SNicolas Ferre 	atdma->regs = NULL;
1916dc78baa2SNicolas Ferre 
1917dc78baa2SNicolas Ferre 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1918114df7d6SH Hartley Sweeten 	release_mem_region(io->start, resource_size(io));
1919dc78baa2SNicolas Ferre 
1920dc78baa2SNicolas Ferre 	kfree(atdma);
1921dc78baa2SNicolas Ferre 
1922dc78baa2SNicolas Ferre 	return 0;
1923dc78baa2SNicolas Ferre }
1924dc78baa2SNicolas Ferre 
1925dc78baa2SNicolas Ferre static void at_dma_shutdown(struct platform_device *pdev)
1926dc78baa2SNicolas Ferre {
1927dc78baa2SNicolas Ferre 	struct at_dma	*atdma = platform_get_drvdata(pdev);
1928dc78baa2SNicolas Ferre 
1929dc78baa2SNicolas Ferre 	at_dma_off(platform_get_drvdata(pdev));
1930f784d9c9SBoris BREZILLON 	clk_disable_unprepare(atdma->clk);
1931dc78baa2SNicolas Ferre }
1932dc78baa2SNicolas Ferre 
1933c0ba5947SNicolas Ferre static int at_dma_prepare(struct device *dev)
1934c0ba5947SNicolas Ferre {
1935c0ba5947SNicolas Ferre 	struct platform_device *pdev = to_platform_device(dev);
1936c0ba5947SNicolas Ferre 	struct at_dma *atdma = platform_get_drvdata(pdev);
1937c0ba5947SNicolas Ferre 	struct dma_chan *chan, *_chan;
1938c0ba5947SNicolas Ferre 
1939c0ba5947SNicolas Ferre 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1940c0ba5947SNicolas Ferre 			device_node) {
1941c0ba5947SNicolas Ferre 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
1942c0ba5947SNicolas Ferre 		/* wait for transaction completion (except in cyclic case) */
19433c477482SNicolas Ferre 		if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1944c0ba5947SNicolas Ferre 			return -EAGAIN;
1945c0ba5947SNicolas Ferre 	}
1946c0ba5947SNicolas Ferre 	return 0;
1947c0ba5947SNicolas Ferre }
1948c0ba5947SNicolas Ferre 
1949c0ba5947SNicolas Ferre static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1950c0ba5947SNicolas Ferre {
1951c0ba5947SNicolas Ferre 	struct dma_chan	*chan = &atchan->chan_common;
1952c0ba5947SNicolas Ferre 
1953c0ba5947SNicolas Ferre 	/* Channel should be paused by user
1954c0ba5947SNicolas Ferre 	 * do it anyway even if it is not done already */
19553c477482SNicolas Ferre 	if (!atc_chan_is_paused(atchan)) {
1956c0ba5947SNicolas Ferre 		dev_warn(chan2dev(chan),
1957c0ba5947SNicolas Ferre 		"cyclic channel not paused, should be done by channel user\n");
19584facfe7fSMaxime Ripard 		atc_pause(chan);
1959c0ba5947SNicolas Ferre 	}
1960c0ba5947SNicolas Ferre 
1961c0ba5947SNicolas Ferre 	/* now preserve additional data for cyclic operations */
1962c0ba5947SNicolas Ferre 	/* next descriptor address in the cyclic list */
1963c0ba5947SNicolas Ferre 	atchan->save_dscr = channel_readl(atchan, DSCR);
1964c0ba5947SNicolas Ferre 
1965c0ba5947SNicolas Ferre 	vdbg_dump_regs(atchan);
1966c0ba5947SNicolas Ferre }
1967c0ba5947SNicolas Ferre 
196833f82d14SDan Williams static int at_dma_suspend_noirq(struct device *dev)
1969dc78baa2SNicolas Ferre {
197033f82d14SDan Williams 	struct platform_device *pdev = to_platform_device(dev);
1971dc78baa2SNicolas Ferre 	struct at_dma *atdma = platform_get_drvdata(pdev);
1972c0ba5947SNicolas Ferre 	struct dma_chan *chan, *_chan;
1973dc78baa2SNicolas Ferre 
1974c0ba5947SNicolas Ferre 	/* preserve data */
1975c0ba5947SNicolas Ferre 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1976c0ba5947SNicolas Ferre 			device_node) {
1977c0ba5947SNicolas Ferre 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
1978c0ba5947SNicolas Ferre 
19793c477482SNicolas Ferre 		if (atc_chan_is_cyclic(atchan))
1980c0ba5947SNicolas Ferre 			atc_suspend_cyclic(atchan);
1981c0ba5947SNicolas Ferre 		atchan->save_cfg = channel_readl(atchan, CFG);
1982c0ba5947SNicolas Ferre 	}
1983c0ba5947SNicolas Ferre 	atdma->save_imr = dma_readl(atdma, EBCIMR);
1984c0ba5947SNicolas Ferre 
1985c0ba5947SNicolas Ferre 	/* disable DMA controller */
1986c0ba5947SNicolas Ferre 	at_dma_off(atdma);
1987f784d9c9SBoris BREZILLON 	clk_disable_unprepare(atdma->clk);
1988dc78baa2SNicolas Ferre 	return 0;
1989dc78baa2SNicolas Ferre }
1990dc78baa2SNicolas Ferre 
1991c0ba5947SNicolas Ferre static void atc_resume_cyclic(struct at_dma_chan *atchan)
1992c0ba5947SNicolas Ferre {
1993c0ba5947SNicolas Ferre 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
1994c0ba5947SNicolas Ferre 
1995c0ba5947SNicolas Ferre 	/* restore channel status for cyclic descriptors list:
1996c0ba5947SNicolas Ferre 	 * next descriptor in the cyclic list at the time of suspend */
1997c0ba5947SNicolas Ferre 	channel_writel(atchan, SADDR, 0);
1998c0ba5947SNicolas Ferre 	channel_writel(atchan, DADDR, 0);
1999c0ba5947SNicolas Ferre 	channel_writel(atchan, CTRLA, 0);
2000c0ba5947SNicolas Ferre 	channel_writel(atchan, CTRLB, 0);
2001c0ba5947SNicolas Ferre 	channel_writel(atchan, DSCR, atchan->save_dscr);
2002c0ba5947SNicolas Ferre 	dma_writel(atdma, CHER, atchan->mask);
2003c0ba5947SNicolas Ferre 
2004c0ba5947SNicolas Ferre 	/* channel pause status should be removed by channel user
2005c0ba5947SNicolas Ferre 	 * We cannot take the initiative to do it here */
2006c0ba5947SNicolas Ferre 
2007c0ba5947SNicolas Ferre 	vdbg_dump_regs(atchan);
2008c0ba5947SNicolas Ferre }
2009c0ba5947SNicolas Ferre 
201033f82d14SDan Williams static int at_dma_resume_noirq(struct device *dev)
2011dc78baa2SNicolas Ferre {
201233f82d14SDan Williams 	struct platform_device *pdev = to_platform_device(dev);
2013dc78baa2SNicolas Ferre 	struct at_dma *atdma = platform_get_drvdata(pdev);
2014c0ba5947SNicolas Ferre 	struct dma_chan *chan, *_chan;
2015dc78baa2SNicolas Ferre 
2016c0ba5947SNicolas Ferre 	/* bring back DMA controller */
2017f784d9c9SBoris BREZILLON 	clk_prepare_enable(atdma->clk);
2018dc78baa2SNicolas Ferre 	dma_writel(atdma, EN, AT_DMA_ENABLE);
2019c0ba5947SNicolas Ferre 
2020c0ba5947SNicolas Ferre 	/* clear any pending interrupt */
2021c0ba5947SNicolas Ferre 	while (dma_readl(atdma, EBCISR))
2022c0ba5947SNicolas Ferre 		cpu_relax();
2023c0ba5947SNicolas Ferre 
2024c0ba5947SNicolas Ferre 	/* restore saved data */
2025c0ba5947SNicolas Ferre 	dma_writel(atdma, EBCIER, atdma->save_imr);
2026c0ba5947SNicolas Ferre 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2027c0ba5947SNicolas Ferre 			device_node) {
2028c0ba5947SNicolas Ferre 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2029c0ba5947SNicolas Ferre 
2030c0ba5947SNicolas Ferre 		channel_writel(atchan, CFG, atchan->save_cfg);
20313c477482SNicolas Ferre 		if (atc_chan_is_cyclic(atchan))
2032c0ba5947SNicolas Ferre 			atc_resume_cyclic(atchan);
2033c0ba5947SNicolas Ferre 	}
2034dc78baa2SNicolas Ferre 	return 0;
2035dc78baa2SNicolas Ferre }
2036dc78baa2SNicolas Ferre 
203747145210SAlexey Dobriyan static const struct dev_pm_ops at_dma_dev_pm_ops = {
2038c0ba5947SNicolas Ferre 	.prepare = at_dma_prepare,
203933f82d14SDan Williams 	.suspend_noirq = at_dma_suspend_noirq,
204033f82d14SDan Williams 	.resume_noirq = at_dma_resume_noirq,
204133f82d14SDan Williams };
204233f82d14SDan Williams 
2043dc78baa2SNicolas Ferre static struct platform_driver at_dma_driver = {
20441d1bbd30SMaxin B. John 	.remove		= at_dma_remove,
2045dc78baa2SNicolas Ferre 	.shutdown	= at_dma_shutdown,
204667348450SNicolas Ferre 	.id_table	= atdma_devtypes,
2047dc78baa2SNicolas Ferre 	.driver = {
2048dc78baa2SNicolas Ferre 		.name	= "at_hdmac",
204933f82d14SDan Williams 		.pm	= &at_dma_dev_pm_ops,
2050c5115953SNicolas Ferre 		.of_match_table	= of_match_ptr(atmel_dma_dt_ids),
2051dc78baa2SNicolas Ferre 	},
2052dc78baa2SNicolas Ferre };
2053dc78baa2SNicolas Ferre 
2054dc78baa2SNicolas Ferre static int __init at_dma_init(void)
2055dc78baa2SNicolas Ferre {
2056dc78baa2SNicolas Ferre 	return platform_driver_probe(&at_dma_driver, at_dma_probe);
2057dc78baa2SNicolas Ferre }
205893d0bec2SEric Xu subsys_initcall(at_dma_init);
2059dc78baa2SNicolas Ferre 
2060dc78baa2SNicolas Ferre static void __exit at_dma_exit(void)
2061dc78baa2SNicolas Ferre {
2062dc78baa2SNicolas Ferre 	platform_driver_unregister(&at_dma_driver);
2063dc78baa2SNicolas Ferre }
2064dc78baa2SNicolas Ferre module_exit(at_dma_exit);
2065dc78baa2SNicolas Ferre 
2066dc78baa2SNicolas Ferre MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2067dc78baa2SNicolas Ferre MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2068dc78baa2SNicolas Ferre MODULE_LICENSE("GPL");
2069dc78baa2SNicolas Ferre MODULE_ALIAS("platform:at_hdmac");
2070