xref: /openbmc/linux/drivers/dma/at_hdmac.c (revision 62257638)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4  *
5  * Copyright (C) 2008 Atmel Corporation
6  *
7  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
8  * The only Atmel DMA Controller that is not covered by this driver is the one
9  * found on AT91SAM9263.
10  */
11 
12 #include <dt-bindings/dma/at91.h>
13 #include <linux/clk.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/of_dma.h>
24 
25 #include "at_hdmac_regs.h"
26 #include "dmaengine.h"
27 
28 /*
29  * Glossary
30  * --------
31  *
32  * at_hdmac		: Name of the ATmel AHB DMA Controller
33  * at_dma_ / atdma	: ATmel DMA controller entity related
34  * atc_	/ atchan	: ATmel DMA Channel entity related
35  */
36 
37 #define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
38 #define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
39 				|ATC_DIF(AT_DMA_MEM_IF))
40 #define ATC_DMA_BUSWIDTHS\
41 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
42 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
43 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
44 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
45 
46 #define ATC_MAX_DSCR_TRIALS	10
47 
48 /*
49  * Initial number of descriptors to allocate for each channel. This could
50  * be increased during dma usage.
51  */
52 static unsigned int init_nr_desc_per_channel = 64;
53 module_param(init_nr_desc_per_channel, uint, 0644);
54 MODULE_PARM_DESC(init_nr_desc_per_channel,
55 		 "initial descriptors per channel (default: 64)");
56 
57 /**
58  * struct at_dma_platform_data - Controller configuration parameters
59  * @nr_channels: Number of channels supported by hardware (max 8)
60  * @cap_mask: dma_capability flags supported by the platform
61  */
62 struct at_dma_platform_data {
63 	unsigned int	nr_channels;
64 	dma_cap_mask_t  cap_mask;
65 };
66 
67 /**
68  * struct at_dma_slave - Controller-specific information about a slave
69  * @dma_dev: required DMA master device
70  * @cfg: Platform-specific initializer for the CFG register
71  */
72 struct at_dma_slave {
73 	struct device		*dma_dev;
74 	u32			cfg;
75 };
76 
77 /* prototypes */
78 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
79 static void atc_issue_pending(struct dma_chan *chan);
80 
81 
82 /*----------------------------------------------------------------------*/
83 
84 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
85 						size_t len)
86 {
87 	unsigned int width;
88 
89 	if (!((src | dst  | len) & 3))
90 		width = 2;
91 	else if (!((src | dst | len) & 1))
92 		width = 1;
93 	else
94 		width = 0;
95 
96 	return width;
97 }
98 
99 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
100 {
101 	return list_first_entry(&atchan->active_list,
102 				struct at_desc, desc_node);
103 }
104 
105 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
106 {
107 	return list_first_entry(&atchan->queue,
108 				struct at_desc, desc_node);
109 }
110 
111 /**
112  * atc_alloc_descriptor - allocate and return an initialized descriptor
113  * @chan: the channel to allocate descriptors for
114  * @gfp_flags: GFP allocation flags
115  *
116  * Note: The ack-bit is positioned in the descriptor flag at creation time
117  *       to make initial allocation more convenient. This bit will be cleared
118  *       and control will be given to client at usage time (during
119  *       preparation functions).
120  */
121 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
122 					    gfp_t gfp_flags)
123 {
124 	struct at_desc	*desc = NULL;
125 	struct at_dma	*atdma = to_at_dma(chan->device);
126 	dma_addr_t phys;
127 
128 	desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
129 	if (desc) {
130 		INIT_LIST_HEAD(&desc->tx_list);
131 		dma_async_tx_descriptor_init(&desc->txd, chan);
132 		/* txd.flags will be overwritten in prep functions */
133 		desc->txd.flags = DMA_CTRL_ACK;
134 		desc->txd.tx_submit = atc_tx_submit;
135 		desc->txd.phys = phys;
136 	}
137 
138 	return desc;
139 }
140 
141 /**
142  * atc_desc_get - get an unused descriptor from free_list
143  * @atchan: channel we want a new descriptor for
144  */
145 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
146 {
147 	struct at_desc *desc, *_desc;
148 	struct at_desc *ret = NULL;
149 	unsigned long flags;
150 	unsigned int i = 0;
151 
152 	spin_lock_irqsave(&atchan->lock, flags);
153 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
154 		i++;
155 		if (async_tx_test_ack(&desc->txd)) {
156 			list_del(&desc->desc_node);
157 			ret = desc;
158 			break;
159 		}
160 		dev_dbg(chan2dev(&atchan->chan_common),
161 				"desc %p not ACKed\n", desc);
162 	}
163 	spin_unlock_irqrestore(&atchan->lock, flags);
164 	dev_vdbg(chan2dev(&atchan->chan_common),
165 		"scanned %u descriptors on freelist\n", i);
166 
167 	/* no more descriptor available in initial pool: create one more */
168 	if (!ret)
169 		ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
170 
171 	return ret;
172 }
173 
174 /**
175  * atc_desc_put - move a descriptor, including any children, to the free list
176  * @atchan: channel we work on
177  * @desc: descriptor, at the head of a chain, to move to free list
178  */
179 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
180 {
181 	if (desc) {
182 		struct at_desc *child;
183 		unsigned long flags;
184 
185 		spin_lock_irqsave(&atchan->lock, flags);
186 		list_for_each_entry(child, &desc->tx_list, desc_node)
187 			dev_vdbg(chan2dev(&atchan->chan_common),
188 					"moving child desc %p to freelist\n",
189 					child);
190 		list_splice_init(&desc->tx_list, &atchan->free_list);
191 		dev_vdbg(chan2dev(&atchan->chan_common),
192 			 "moving desc %p to freelist\n", desc);
193 		list_add(&desc->desc_node, &atchan->free_list);
194 		spin_unlock_irqrestore(&atchan->lock, flags);
195 	}
196 }
197 
198 /**
199  * atc_desc_chain - build chain adding a descriptor
200  * @first: address of first descriptor of the chain
201  * @prev: address of previous descriptor of the chain
202  * @desc: descriptor to queue
203  *
204  * Called from prep_* functions
205  */
206 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
207 			   struct at_desc *desc)
208 {
209 	if (!(*first)) {
210 		*first = desc;
211 	} else {
212 		/* inform the HW lli about chaining */
213 		(*prev)->lli.dscr = desc->txd.phys;
214 		/* insert the link descriptor to the LD ring */
215 		list_add_tail(&desc->desc_node,
216 				&(*first)->tx_list);
217 	}
218 	*prev = desc;
219 }
220 
221 /**
222  * atc_dostart - starts the DMA engine for real
223  * @atchan: the channel we want to start
224  * @first: first descriptor in the list we want to begin with
225  *
226  * Called with atchan->lock held and bh disabled
227  */
228 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
229 {
230 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
231 
232 	/* ASSERT:  channel is idle */
233 	if (atc_chan_is_enabled(atchan)) {
234 		dev_err(chan2dev(&atchan->chan_common),
235 			"BUG: Attempted to start non-idle channel\n");
236 		dev_err(chan2dev(&atchan->chan_common),
237 			"  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
238 			channel_readl(atchan, SADDR),
239 			channel_readl(atchan, DADDR),
240 			channel_readl(atchan, CTRLA),
241 			channel_readl(atchan, CTRLB),
242 			channel_readl(atchan, DSCR));
243 
244 		/* The tasklet will hopefully advance the queue... */
245 		return;
246 	}
247 
248 	vdbg_dump_regs(atchan);
249 
250 	channel_writel(atchan, SADDR, 0);
251 	channel_writel(atchan, DADDR, 0);
252 	channel_writel(atchan, CTRLA, 0);
253 	channel_writel(atchan, CTRLB, 0);
254 	channel_writel(atchan, DSCR, first->txd.phys);
255 	channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
256 		       ATC_SPIP_BOUNDARY(first->boundary));
257 	channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
258 		       ATC_DPIP_BOUNDARY(first->boundary));
259 	dma_writel(atdma, CHER, atchan->mask);
260 
261 	vdbg_dump_regs(atchan);
262 }
263 
264 /*
265  * atc_get_desc_by_cookie - get the descriptor of a cookie
266  * @atchan: the DMA channel
267  * @cookie: the cookie to get the descriptor for
268  */
269 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
270 						dma_cookie_t cookie)
271 {
272 	struct at_desc *desc, *_desc;
273 
274 	list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
275 		if (desc->txd.cookie == cookie)
276 			return desc;
277 	}
278 
279 	list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
280 		if (desc->txd.cookie == cookie)
281 			return desc;
282 	}
283 
284 	return NULL;
285 }
286 
287 /**
288  * atc_calc_bytes_left - calculates the number of bytes left according to the
289  * value read from CTRLA.
290  *
291  * @current_len: the number of bytes left before reading CTRLA
292  * @ctrla: the value of CTRLA
293  */
294 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
295 {
296 	u32 btsize = (ctrla & ATC_BTSIZE_MAX);
297 	u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
298 
299 	/*
300 	 * According to the datasheet, when reading the Control A Register
301 	 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
302 	 * number of transfers completed on the Source Interface.
303 	 * So btsize is always a number of source width transfers.
304 	 */
305 	return current_len - (btsize << src_width);
306 }
307 
308 /**
309  * atc_get_bytes_left - get the number of bytes residue for a cookie
310  * @chan: DMA channel
311  * @cookie: transaction identifier to check status of
312  */
313 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
314 {
315 	struct at_dma_chan      *atchan = to_at_dma_chan(chan);
316 	struct at_desc *desc_first = atc_first_active(atchan);
317 	struct at_desc *desc;
318 	int ret;
319 	u32 ctrla, dscr, trials;
320 
321 	/*
322 	 * If the cookie doesn't match to the currently running transfer then
323 	 * we can return the total length of the associated DMA transfer,
324 	 * because it is still queued.
325 	 */
326 	desc = atc_get_desc_by_cookie(atchan, cookie);
327 	if (desc == NULL)
328 		return -EINVAL;
329 	else if (desc != desc_first)
330 		return desc->total_len;
331 
332 	/* cookie matches to the currently running transfer */
333 	ret = desc_first->total_len;
334 
335 	if (desc_first->lli.dscr) {
336 		/* hardware linked list transfer */
337 
338 		/*
339 		 * Calculate the residue by removing the length of the child
340 		 * descriptors already transferred from the total length.
341 		 * To get the current child descriptor we can use the value of
342 		 * the channel's DSCR register and compare it against the value
343 		 * of the hardware linked list structure of each child
344 		 * descriptor.
345 		 *
346 		 * The CTRLA register provides us with the amount of data
347 		 * already read from the source for the current child
348 		 * descriptor. So we can compute a more accurate residue by also
349 		 * removing the number of bytes corresponding to this amount of
350 		 * data.
351 		 *
352 		 * However, the DSCR and CTRLA registers cannot be read both
353 		 * atomically. Hence a race condition may occur: the first read
354 		 * register may refer to one child descriptor whereas the second
355 		 * read may refer to a later child descriptor in the list
356 		 * because of the DMA transfer progression inbetween the two
357 		 * reads.
358 		 *
359 		 * One solution could have been to pause the DMA transfer, read
360 		 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
361 		 * this approach presents some drawbacks:
362 		 * - If the DMA transfer is paused, RX overruns or TX underruns
363 		 *   are more likey to occur depending on the system latency.
364 		 *   Taking the USART driver as an example, it uses a cyclic DMA
365 		 *   transfer to read data from the Receive Holding Register
366 		 *   (RHR) to avoid RX overruns since the RHR is not protected
367 		 *   by any FIFO on most Atmel SoCs. So pausing the DMA transfer
368 		 *   to compute the residue would break the USART driver design.
369 		 * - The atc_pause() function masks interrupts but we'd rather
370 		 *   avoid to do so for system latency purpose.
371 		 *
372 		 * Then we'd rather use another solution: the DSCR is read a
373 		 * first time, the CTRLA is read in turn, next the DSCR is read
374 		 * a second time. If the two consecutive read values of the DSCR
375 		 * are the same then we assume both refers to the very same
376 		 * child descriptor as well as the CTRLA value read inbetween
377 		 * does. For cyclic tranfers, the assumption is that a full loop
378 		 * is "not so fast".
379 		 * If the two DSCR values are different, we read again the CTRLA
380 		 * then the DSCR till two consecutive read values from DSCR are
381 		 * equal or till the maxium trials is reach.
382 		 * This algorithm is very unlikely not to find a stable value for
383 		 * DSCR.
384 		 */
385 
386 		dscr = channel_readl(atchan, DSCR);
387 		rmb(); /* ensure DSCR is read before CTRLA */
388 		ctrla = channel_readl(atchan, CTRLA);
389 		for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
390 			u32 new_dscr;
391 
392 			rmb(); /* ensure DSCR is read after CTRLA */
393 			new_dscr = channel_readl(atchan, DSCR);
394 
395 			/*
396 			 * If the DSCR register value has not changed inside the
397 			 * DMA controller since the previous read, we assume
398 			 * that both the dscr and ctrla values refers to the
399 			 * very same descriptor.
400 			 */
401 			if (likely(new_dscr == dscr))
402 				break;
403 
404 			/*
405 			 * DSCR has changed inside the DMA controller, so the
406 			 * previouly read value of CTRLA may refer to an already
407 			 * processed descriptor hence could be outdated.
408 			 * We need to update ctrla to match the current
409 			 * descriptor.
410 			 */
411 			dscr = new_dscr;
412 			rmb(); /* ensure DSCR is read before CTRLA */
413 			ctrla = channel_readl(atchan, CTRLA);
414 		}
415 		if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
416 			return -ETIMEDOUT;
417 
418 		/* for the first descriptor we can be more accurate */
419 		if (desc_first->lli.dscr == dscr)
420 			return atc_calc_bytes_left(ret, ctrla);
421 
422 		ret -= desc_first->len;
423 		list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
424 			if (desc->lli.dscr == dscr)
425 				break;
426 
427 			ret -= desc->len;
428 		}
429 
430 		/*
431 		 * For the current descriptor in the chain we can calculate
432 		 * the remaining bytes using the channel's register.
433 		 */
434 		ret = atc_calc_bytes_left(ret, ctrla);
435 	} else {
436 		/* single transfer */
437 		ctrla = channel_readl(atchan, CTRLA);
438 		ret = atc_calc_bytes_left(ret, ctrla);
439 	}
440 
441 	return ret;
442 }
443 
444 /**
445  * atc_chain_complete - finish work for one transaction chain
446  * @atchan: channel we work on
447  * @desc: descriptor at the head of the chain we want do complete
448  */
449 static void
450 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
451 {
452 	struct dma_async_tx_descriptor	*txd = &desc->txd;
453 	struct at_dma			*atdma = to_at_dma(atchan->chan_common.device);
454 	unsigned long flags;
455 
456 	dev_vdbg(chan2dev(&atchan->chan_common),
457 		"descriptor %u complete\n", txd->cookie);
458 
459 	spin_lock_irqsave(&atchan->lock, flags);
460 
461 	/* mark the descriptor as complete for non cyclic cases only */
462 	if (!atc_chan_is_cyclic(atchan))
463 		dma_cookie_complete(txd);
464 
465 	/* If the transfer was a memset, free our temporary buffer */
466 	if (desc->memset_buffer) {
467 		dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
468 			      desc->memset_paddr);
469 		desc->memset_buffer = false;
470 	}
471 
472 	/* move children to free_list */
473 	list_splice_init(&desc->tx_list, &atchan->free_list);
474 	/* move myself to free_list */
475 	list_move(&desc->desc_node, &atchan->free_list);
476 
477 	spin_unlock_irqrestore(&atchan->lock, flags);
478 
479 	dma_descriptor_unmap(txd);
480 	/* for cyclic transfers,
481 	 * no need to replay callback function while stopping */
482 	if (!atc_chan_is_cyclic(atchan))
483 		dmaengine_desc_get_callback_invoke(txd, NULL);
484 
485 	dma_run_dependencies(txd);
486 }
487 
488 /**
489  * atc_complete_all - finish work for all transactions
490  * @atchan: channel to complete transactions for
491  *
492  * Eventually submit queued descriptors if any
493  *
494  * Assume channel is idle while calling this function
495  * Called with atchan->lock held and bh disabled
496  */
497 static void atc_complete_all(struct at_dma_chan *atchan)
498 {
499 	struct at_desc *desc, *_desc;
500 	LIST_HEAD(list);
501 	unsigned long flags;
502 
503 	dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
504 
505 	spin_lock_irqsave(&atchan->lock, flags);
506 
507 	/*
508 	 * Submit queued descriptors ASAP, i.e. before we go through
509 	 * the completed ones.
510 	 */
511 	if (!list_empty(&atchan->queue))
512 		atc_dostart(atchan, atc_first_queued(atchan));
513 	/* empty active_list now it is completed */
514 	list_splice_init(&atchan->active_list, &list);
515 	/* empty queue list by moving descriptors (if any) to active_list */
516 	list_splice_init(&atchan->queue, &atchan->active_list);
517 
518 	spin_unlock_irqrestore(&atchan->lock, flags);
519 
520 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
521 		atc_chain_complete(atchan, desc);
522 }
523 
524 /**
525  * atc_advance_work - at the end of a transaction, move forward
526  * @atchan: channel where the transaction ended
527  */
528 static void atc_advance_work(struct at_dma_chan *atchan)
529 {
530 	unsigned long flags;
531 	int ret;
532 
533 	dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
534 
535 	spin_lock_irqsave(&atchan->lock, flags);
536 	ret = atc_chan_is_enabled(atchan);
537 	spin_unlock_irqrestore(&atchan->lock, flags);
538 	if (ret)
539 		return;
540 
541 	if (list_empty(&atchan->active_list) ||
542 	    list_is_singular(&atchan->active_list))
543 		return atc_complete_all(atchan);
544 
545 	atc_chain_complete(atchan, atc_first_active(atchan));
546 
547 	/* advance work */
548 	spin_lock_irqsave(&atchan->lock, flags);
549 	atc_dostart(atchan, atc_first_active(atchan));
550 	spin_unlock_irqrestore(&atchan->lock, flags);
551 }
552 
553 
554 /**
555  * atc_handle_error - handle errors reported by DMA controller
556  * @atchan: channel where error occurs
557  */
558 static void atc_handle_error(struct at_dma_chan *atchan)
559 {
560 	struct at_desc *bad_desc;
561 	struct at_desc *child;
562 	unsigned long flags;
563 
564 	spin_lock_irqsave(&atchan->lock, flags);
565 	/*
566 	 * The descriptor currently at the head of the active list is
567 	 * broked. Since we don't have any way to report errors, we'll
568 	 * just have to scream loudly and try to carry on.
569 	 */
570 	bad_desc = atc_first_active(atchan);
571 	list_del_init(&bad_desc->desc_node);
572 
573 	/* As we are stopped, take advantage to push queued descriptors
574 	 * in active_list */
575 	list_splice_init(&atchan->queue, atchan->active_list.prev);
576 
577 	/* Try to restart the controller */
578 	if (!list_empty(&atchan->active_list))
579 		atc_dostart(atchan, atc_first_active(atchan));
580 
581 	/*
582 	 * KERN_CRITICAL may seem harsh, but since this only happens
583 	 * when someone submits a bad physical address in a
584 	 * descriptor, we should consider ourselves lucky that the
585 	 * controller flagged an error instead of scribbling over
586 	 * random memory locations.
587 	 */
588 	dev_crit(chan2dev(&atchan->chan_common),
589 			"Bad descriptor submitted for DMA!\n");
590 	dev_crit(chan2dev(&atchan->chan_common),
591 			"  cookie: %d\n", bad_desc->txd.cookie);
592 	atc_dump_lli(atchan, &bad_desc->lli);
593 	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
594 		atc_dump_lli(atchan, &child->lli);
595 
596 	spin_unlock_irqrestore(&atchan->lock, flags);
597 
598 	/* Pretend the descriptor completed successfully */
599 	atc_chain_complete(atchan, bad_desc);
600 }
601 
602 /**
603  * atc_handle_cyclic - at the end of a period, run callback function
604  * @atchan: channel used for cyclic operations
605  */
606 static void atc_handle_cyclic(struct at_dma_chan *atchan)
607 {
608 	struct at_desc			*first = atc_first_active(atchan);
609 	struct dma_async_tx_descriptor	*txd = &first->txd;
610 
611 	dev_vdbg(chan2dev(&atchan->chan_common),
612 			"new cyclic period llp 0x%08x\n",
613 			channel_readl(atchan, DSCR));
614 
615 	dmaengine_desc_get_callback_invoke(txd, NULL);
616 }
617 
618 /*--  IRQ & Tasklet  ---------------------------------------------------*/
619 
620 static void atc_tasklet(struct tasklet_struct *t)
621 {
622 	struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
623 
624 	if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
625 		return atc_handle_error(atchan);
626 
627 	if (atc_chan_is_cyclic(atchan))
628 		return atc_handle_cyclic(atchan);
629 
630 	atc_advance_work(atchan);
631 }
632 
633 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
634 {
635 	struct at_dma		*atdma = (struct at_dma *)dev_id;
636 	struct at_dma_chan	*atchan;
637 	int			i;
638 	u32			status, pending, imr;
639 	int			ret = IRQ_NONE;
640 
641 	do {
642 		imr = dma_readl(atdma, EBCIMR);
643 		status = dma_readl(atdma, EBCISR);
644 		pending = status & imr;
645 
646 		if (!pending)
647 			break;
648 
649 		dev_vdbg(atdma->dma_common.dev,
650 			"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
651 			 status, imr, pending);
652 
653 		for (i = 0; i < atdma->dma_common.chancnt; i++) {
654 			atchan = &atdma->chan[i];
655 			if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
656 				if (pending & AT_DMA_ERR(i)) {
657 					/* Disable channel on AHB error */
658 					dma_writel(atdma, CHDR,
659 						AT_DMA_RES(i) | atchan->mask);
660 					/* Give information to tasklet */
661 					set_bit(ATC_IS_ERROR, &atchan->status);
662 				}
663 				tasklet_schedule(&atchan->tasklet);
664 				ret = IRQ_HANDLED;
665 			}
666 		}
667 
668 	} while (pending);
669 
670 	return ret;
671 }
672 
673 
674 /*--  DMA Engine API  --------------------------------------------------*/
675 
676 /**
677  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
678  * @tx: descriptor at the head of the transaction chain
679  *
680  * Queue chain if DMA engine is working already
681  *
682  * Cookie increment and adding to active_list or queue must be atomic
683  */
684 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
685 {
686 	struct at_desc		*desc = txd_to_at_desc(tx);
687 	struct at_dma_chan	*atchan = to_at_dma_chan(tx->chan);
688 	dma_cookie_t		cookie;
689 	unsigned long		flags;
690 
691 	spin_lock_irqsave(&atchan->lock, flags);
692 	cookie = dma_cookie_assign(tx);
693 
694 	if (list_empty(&atchan->active_list)) {
695 		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
696 				desc->txd.cookie);
697 		atc_dostart(atchan, desc);
698 		list_add_tail(&desc->desc_node, &atchan->active_list);
699 	} else {
700 		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
701 				desc->txd.cookie);
702 		list_add_tail(&desc->desc_node, &atchan->queue);
703 	}
704 
705 	spin_unlock_irqrestore(&atchan->lock, flags);
706 
707 	return cookie;
708 }
709 
710 /**
711  * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
712  * @chan: the channel to prepare operation on
713  * @xt: Interleaved transfer template
714  * @flags: tx descriptor status flags
715  */
716 static struct dma_async_tx_descriptor *
717 atc_prep_dma_interleaved(struct dma_chan *chan,
718 			 struct dma_interleaved_template *xt,
719 			 unsigned long flags)
720 {
721 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
722 	struct data_chunk	*first;
723 	struct at_desc		*desc = NULL;
724 	size_t			xfer_count;
725 	unsigned int		dwidth;
726 	u32			ctrla;
727 	u32			ctrlb;
728 	size_t			len = 0;
729 	int			i;
730 
731 	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
732 		return NULL;
733 
734 	first = xt->sgl;
735 
736 	dev_info(chan2dev(chan),
737 		 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
738 		__func__, &xt->src_start, &xt->dst_start, xt->numf,
739 		xt->frame_size, flags);
740 
741 	/*
742 	 * The controller can only "skip" X bytes every Y bytes, so we
743 	 * need to make sure we are given a template that fit that
744 	 * description, ie a template with chunks that always have the
745 	 * same size, with the same ICGs.
746 	 */
747 	for (i = 0; i < xt->frame_size; i++) {
748 		struct data_chunk *chunk = xt->sgl + i;
749 
750 		if ((chunk->size != xt->sgl->size) ||
751 		    (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
752 		    (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
753 			dev_err(chan2dev(chan),
754 				"%s: the controller can transfer only identical chunks\n",
755 				__func__);
756 			return NULL;
757 		}
758 
759 		len += chunk->size;
760 	}
761 
762 	dwidth = atc_get_xfer_width(xt->src_start,
763 				    xt->dst_start, len);
764 
765 	xfer_count = len >> dwidth;
766 	if (xfer_count > ATC_BTSIZE_MAX) {
767 		dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
768 		return NULL;
769 	}
770 
771 	ctrla = ATC_SRC_WIDTH(dwidth) |
772 		ATC_DST_WIDTH(dwidth);
773 
774 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
775 		| ATC_SRC_ADDR_MODE_INCR
776 		| ATC_DST_ADDR_MODE_INCR
777 		| ATC_SRC_PIP
778 		| ATC_DST_PIP
779 		| ATC_FC_MEM2MEM;
780 
781 	/* create the transfer */
782 	desc = atc_desc_get(atchan);
783 	if (!desc) {
784 		dev_err(chan2dev(chan),
785 			"%s: couldn't allocate our descriptor\n", __func__);
786 		return NULL;
787 	}
788 
789 	desc->lli.saddr = xt->src_start;
790 	desc->lli.daddr = xt->dst_start;
791 	desc->lli.ctrla = ctrla | xfer_count;
792 	desc->lli.ctrlb = ctrlb;
793 
794 	desc->boundary = first->size >> dwidth;
795 	desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
796 	desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
797 
798 	desc->txd.cookie = -EBUSY;
799 	desc->total_len = desc->len = len;
800 
801 	/* set end-of-link to the last link descriptor of list*/
802 	set_desc_eol(desc);
803 
804 	desc->txd.flags = flags; /* client is in control of this ack */
805 
806 	return &desc->txd;
807 }
808 
809 /**
810  * atc_prep_dma_memcpy - prepare a memcpy operation
811  * @chan: the channel to prepare operation on
812  * @dest: operation virtual destination address
813  * @src: operation virtual source address
814  * @len: operation length
815  * @flags: tx descriptor status flags
816  */
817 static struct dma_async_tx_descriptor *
818 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
819 		size_t len, unsigned long flags)
820 {
821 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
822 	struct at_desc		*desc = NULL;
823 	struct at_desc		*first = NULL;
824 	struct at_desc		*prev = NULL;
825 	size_t			xfer_count;
826 	size_t			offset;
827 	unsigned int		src_width;
828 	unsigned int		dst_width;
829 	u32			ctrla;
830 	u32			ctrlb;
831 
832 	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
833 			&dest, &src, len, flags);
834 
835 	if (unlikely(!len)) {
836 		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
837 		return NULL;
838 	}
839 
840 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
841 		| ATC_SRC_ADDR_MODE_INCR
842 		| ATC_DST_ADDR_MODE_INCR
843 		| ATC_FC_MEM2MEM;
844 
845 	/*
846 	 * We can be a lot more clever here, but this should take care
847 	 * of the most common optimization.
848 	 */
849 	src_width = dst_width = atc_get_xfer_width(src, dest, len);
850 
851 	ctrla = ATC_SRC_WIDTH(src_width) |
852 		ATC_DST_WIDTH(dst_width);
853 
854 	for (offset = 0; offset < len; offset += xfer_count << src_width) {
855 		xfer_count = min_t(size_t, (len - offset) >> src_width,
856 				ATC_BTSIZE_MAX);
857 
858 		desc = atc_desc_get(atchan);
859 		if (!desc)
860 			goto err_desc_get;
861 
862 		desc->lli.saddr = src + offset;
863 		desc->lli.daddr = dest + offset;
864 		desc->lli.ctrla = ctrla | xfer_count;
865 		desc->lli.ctrlb = ctrlb;
866 
867 		desc->txd.cookie = 0;
868 		desc->len = xfer_count << src_width;
869 
870 		atc_desc_chain(&first, &prev, desc);
871 	}
872 
873 	/* First descriptor of the chain embedds additional information */
874 	first->txd.cookie = -EBUSY;
875 	first->total_len = len;
876 
877 	/* set end-of-link to the last link descriptor of list*/
878 	set_desc_eol(desc);
879 
880 	first->txd.flags = flags; /* client is in control of this ack */
881 
882 	return &first->txd;
883 
884 err_desc_get:
885 	atc_desc_put(atchan, first);
886 	return NULL;
887 }
888 
889 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
890 					      dma_addr_t psrc,
891 					      dma_addr_t pdst,
892 					      size_t len)
893 {
894 	struct at_dma_chan *atchan = to_at_dma_chan(chan);
895 	struct at_desc *desc;
896 	size_t xfer_count;
897 
898 	u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
899 	u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
900 		ATC_SRC_ADDR_MODE_FIXED |
901 		ATC_DST_ADDR_MODE_INCR |
902 		ATC_FC_MEM2MEM;
903 
904 	xfer_count = len >> 2;
905 	if (xfer_count > ATC_BTSIZE_MAX) {
906 		dev_err(chan2dev(chan), "%s: buffer is too big\n",
907 			__func__);
908 		return NULL;
909 	}
910 
911 	desc = atc_desc_get(atchan);
912 	if (!desc) {
913 		dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
914 			__func__);
915 		return NULL;
916 	}
917 
918 	desc->lli.saddr = psrc;
919 	desc->lli.daddr = pdst;
920 	desc->lli.ctrla = ctrla | xfer_count;
921 	desc->lli.ctrlb = ctrlb;
922 
923 	desc->txd.cookie = 0;
924 	desc->len = len;
925 
926 	return desc;
927 }
928 
929 /**
930  * atc_prep_dma_memset - prepare a memcpy operation
931  * @chan: the channel to prepare operation on
932  * @dest: operation virtual destination address
933  * @value: value to set memory buffer to
934  * @len: operation length
935  * @flags: tx descriptor status flags
936  */
937 static struct dma_async_tx_descriptor *
938 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
939 		    size_t len, unsigned long flags)
940 {
941 	struct at_dma		*atdma = to_at_dma(chan->device);
942 	struct at_desc		*desc;
943 	void __iomem		*vaddr;
944 	dma_addr_t		paddr;
945 	char			fill_pattern;
946 
947 	dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
948 		&dest, value, len, flags);
949 
950 	if (unlikely(!len)) {
951 		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
952 		return NULL;
953 	}
954 
955 	if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
956 		dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
957 			__func__);
958 		return NULL;
959 	}
960 
961 	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
962 	if (!vaddr) {
963 		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
964 			__func__);
965 		return NULL;
966 	}
967 
968 	/* Only the first byte of value is to be used according to dmaengine */
969 	fill_pattern = (char)value;
970 
971 	*(u32*)vaddr = (fill_pattern << 24) |
972 		       (fill_pattern << 16) |
973 		       (fill_pattern << 8) |
974 		       fill_pattern;
975 
976 	desc = atc_create_memset_desc(chan, paddr, dest, len);
977 	if (!desc) {
978 		dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
979 			__func__);
980 		goto err_free_buffer;
981 	}
982 
983 	desc->memset_paddr = paddr;
984 	desc->memset_vaddr = vaddr;
985 	desc->memset_buffer = true;
986 
987 	desc->txd.cookie = -EBUSY;
988 	desc->total_len = len;
989 
990 	/* set end-of-link on the descriptor */
991 	set_desc_eol(desc);
992 
993 	desc->txd.flags = flags;
994 
995 	return &desc->txd;
996 
997 err_free_buffer:
998 	dma_pool_free(atdma->memset_pool, vaddr, paddr);
999 	return NULL;
1000 }
1001 
1002 static struct dma_async_tx_descriptor *
1003 atc_prep_dma_memset_sg(struct dma_chan *chan,
1004 		       struct scatterlist *sgl,
1005 		       unsigned int sg_len, int value,
1006 		       unsigned long flags)
1007 {
1008 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1009 	struct at_dma		*atdma = to_at_dma(chan->device);
1010 	struct at_desc		*desc = NULL, *first = NULL, *prev = NULL;
1011 	struct scatterlist	*sg;
1012 	void __iomem		*vaddr;
1013 	dma_addr_t		paddr;
1014 	size_t			total_len = 0;
1015 	int			i;
1016 
1017 	dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
1018 		 value, sg_len, flags);
1019 
1020 	if (unlikely(!sgl || !sg_len)) {
1021 		dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1022 			__func__);
1023 		return NULL;
1024 	}
1025 
1026 	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
1027 	if (!vaddr) {
1028 		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1029 			__func__);
1030 		return NULL;
1031 	}
1032 	*(u32*)vaddr = value;
1033 
1034 	for_each_sg(sgl, sg, sg_len, i) {
1035 		dma_addr_t dest = sg_dma_address(sg);
1036 		size_t len = sg_dma_len(sg);
1037 
1038 		dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1039 			 __func__, &dest, len);
1040 
1041 		if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1042 			dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1043 				__func__);
1044 			goto err_put_desc;
1045 		}
1046 
1047 		desc = atc_create_memset_desc(chan, paddr, dest, len);
1048 		if (!desc)
1049 			goto err_put_desc;
1050 
1051 		atc_desc_chain(&first, &prev, desc);
1052 
1053 		total_len += len;
1054 	}
1055 
1056 	/*
1057 	 * Only set the buffer pointers on the last descriptor to
1058 	 * avoid free'ing while we have our transfer still going
1059 	 */
1060 	desc->memset_paddr = paddr;
1061 	desc->memset_vaddr = vaddr;
1062 	desc->memset_buffer = true;
1063 
1064 	first->txd.cookie = -EBUSY;
1065 	first->total_len = total_len;
1066 
1067 	/* set end-of-link on the descriptor */
1068 	set_desc_eol(desc);
1069 
1070 	first->txd.flags = flags;
1071 
1072 	return &first->txd;
1073 
1074 err_put_desc:
1075 	atc_desc_put(atchan, first);
1076 	return NULL;
1077 }
1078 
1079 /**
1080  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1081  * @chan: DMA channel
1082  * @sgl: scatterlist to transfer to/from
1083  * @sg_len: number of entries in @scatterlist
1084  * @direction: DMA direction
1085  * @flags: tx descriptor status flags
1086  * @context: transaction context (ignored)
1087  */
1088 static struct dma_async_tx_descriptor *
1089 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1090 		unsigned int sg_len, enum dma_transfer_direction direction,
1091 		unsigned long flags, void *context)
1092 {
1093 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1094 	struct at_dma_slave	*atslave = chan->private;
1095 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1096 	struct at_desc		*first = NULL;
1097 	struct at_desc		*prev = NULL;
1098 	u32			ctrla;
1099 	u32			ctrlb;
1100 	dma_addr_t		reg;
1101 	unsigned int		reg_width;
1102 	unsigned int		mem_width;
1103 	unsigned int		i;
1104 	struct scatterlist	*sg;
1105 	size_t			total_len = 0;
1106 
1107 	dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1108 			sg_len,
1109 			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1110 			flags);
1111 
1112 	if (unlikely(!atslave || !sg_len)) {
1113 		dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1114 		return NULL;
1115 	}
1116 
1117 	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1118 		| ATC_DCSIZE(sconfig->dst_maxburst);
1119 	ctrlb = ATC_IEN;
1120 
1121 	switch (direction) {
1122 	case DMA_MEM_TO_DEV:
1123 		reg_width = convert_buswidth(sconfig->dst_addr_width);
1124 		ctrla |=  ATC_DST_WIDTH(reg_width);
1125 		ctrlb |=  ATC_DST_ADDR_MODE_FIXED
1126 			| ATC_SRC_ADDR_MODE_INCR
1127 			| ATC_FC_MEM2PER
1128 			| ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1129 		reg = sconfig->dst_addr;
1130 		for_each_sg(sgl, sg, sg_len, i) {
1131 			struct at_desc	*desc;
1132 			u32		len;
1133 			u32		mem;
1134 
1135 			desc = atc_desc_get(atchan);
1136 			if (!desc)
1137 				goto err_desc_get;
1138 
1139 			mem = sg_dma_address(sg);
1140 			len = sg_dma_len(sg);
1141 			if (unlikely(!len)) {
1142 				dev_dbg(chan2dev(chan),
1143 					"prep_slave_sg: sg(%d) data length is zero\n", i);
1144 				goto err;
1145 			}
1146 			mem_width = 2;
1147 			if (unlikely(mem & 3 || len & 3))
1148 				mem_width = 0;
1149 
1150 			desc->lli.saddr = mem;
1151 			desc->lli.daddr = reg;
1152 			desc->lli.ctrla = ctrla
1153 					| ATC_SRC_WIDTH(mem_width)
1154 					| len >> mem_width;
1155 			desc->lli.ctrlb = ctrlb;
1156 			desc->len = len;
1157 
1158 			atc_desc_chain(&first, &prev, desc);
1159 			total_len += len;
1160 		}
1161 		break;
1162 	case DMA_DEV_TO_MEM:
1163 		reg_width = convert_buswidth(sconfig->src_addr_width);
1164 		ctrla |=  ATC_SRC_WIDTH(reg_width);
1165 		ctrlb |=  ATC_DST_ADDR_MODE_INCR
1166 			| ATC_SRC_ADDR_MODE_FIXED
1167 			| ATC_FC_PER2MEM
1168 			| ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1169 
1170 		reg = sconfig->src_addr;
1171 		for_each_sg(sgl, sg, sg_len, i) {
1172 			struct at_desc	*desc;
1173 			u32		len;
1174 			u32		mem;
1175 
1176 			desc = atc_desc_get(atchan);
1177 			if (!desc)
1178 				goto err_desc_get;
1179 
1180 			mem = sg_dma_address(sg);
1181 			len = sg_dma_len(sg);
1182 			if (unlikely(!len)) {
1183 				dev_dbg(chan2dev(chan),
1184 					"prep_slave_sg: sg(%d) data length is zero\n", i);
1185 				goto err;
1186 			}
1187 			mem_width = 2;
1188 			if (unlikely(mem & 3 || len & 3))
1189 				mem_width = 0;
1190 
1191 			desc->lli.saddr = reg;
1192 			desc->lli.daddr = mem;
1193 			desc->lli.ctrla = ctrla
1194 					| ATC_DST_WIDTH(mem_width)
1195 					| len >> reg_width;
1196 			desc->lli.ctrlb = ctrlb;
1197 			desc->len = len;
1198 
1199 			atc_desc_chain(&first, &prev, desc);
1200 			total_len += len;
1201 		}
1202 		break;
1203 	default:
1204 		return NULL;
1205 	}
1206 
1207 	/* set end-of-link to the last link descriptor of list*/
1208 	set_desc_eol(prev);
1209 
1210 	/* First descriptor of the chain embedds additional information */
1211 	first->txd.cookie = -EBUSY;
1212 	first->total_len = total_len;
1213 
1214 	/* first link descriptor of list is responsible of flags */
1215 	first->txd.flags = flags; /* client is in control of this ack */
1216 
1217 	return &first->txd;
1218 
1219 err_desc_get:
1220 	dev_err(chan2dev(chan), "not enough descriptors available\n");
1221 err:
1222 	atc_desc_put(atchan, first);
1223 	return NULL;
1224 }
1225 
1226 /*
1227  * atc_dma_cyclic_check_values
1228  * Check for too big/unaligned periods and unaligned DMA buffer
1229  */
1230 static int
1231 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1232 		size_t period_len)
1233 {
1234 	if (period_len > (ATC_BTSIZE_MAX << reg_width))
1235 		goto err_out;
1236 	if (unlikely(period_len & ((1 << reg_width) - 1)))
1237 		goto err_out;
1238 	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1239 		goto err_out;
1240 
1241 	return 0;
1242 
1243 err_out:
1244 	return -EINVAL;
1245 }
1246 
1247 /*
1248  * atc_dma_cyclic_fill_desc - Fill one period descriptor
1249  */
1250 static int
1251 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1252 		unsigned int period_index, dma_addr_t buf_addr,
1253 		unsigned int reg_width, size_t period_len,
1254 		enum dma_transfer_direction direction)
1255 {
1256 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1257 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1258 	u32			ctrla;
1259 
1260 	/* prepare common CRTLA value */
1261 	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1262 		| ATC_DCSIZE(sconfig->dst_maxburst)
1263 		| ATC_DST_WIDTH(reg_width)
1264 		| ATC_SRC_WIDTH(reg_width)
1265 		| period_len >> reg_width;
1266 
1267 	switch (direction) {
1268 	case DMA_MEM_TO_DEV:
1269 		desc->lli.saddr = buf_addr + (period_len * period_index);
1270 		desc->lli.daddr = sconfig->dst_addr;
1271 		desc->lli.ctrla = ctrla;
1272 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1273 				| ATC_SRC_ADDR_MODE_INCR
1274 				| ATC_FC_MEM2PER
1275 				| ATC_SIF(atchan->mem_if)
1276 				| ATC_DIF(atchan->per_if);
1277 		desc->len = period_len;
1278 		break;
1279 
1280 	case DMA_DEV_TO_MEM:
1281 		desc->lli.saddr = sconfig->src_addr;
1282 		desc->lli.daddr = buf_addr + (period_len * period_index);
1283 		desc->lli.ctrla = ctrla;
1284 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1285 				| ATC_SRC_ADDR_MODE_FIXED
1286 				| ATC_FC_PER2MEM
1287 				| ATC_SIF(atchan->per_if)
1288 				| ATC_DIF(atchan->mem_if);
1289 		desc->len = period_len;
1290 		break;
1291 
1292 	default:
1293 		return -EINVAL;
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 /**
1300  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1301  * @chan: the DMA channel to prepare
1302  * @buf_addr: physical DMA address where the buffer starts
1303  * @buf_len: total number of bytes for the entire buffer
1304  * @period_len: number of bytes for each period
1305  * @direction: transfer direction, to or from device
1306  * @flags: tx descriptor status flags
1307  */
1308 static struct dma_async_tx_descriptor *
1309 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1310 		size_t period_len, enum dma_transfer_direction direction,
1311 		unsigned long flags)
1312 {
1313 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1314 	struct at_dma_slave	*atslave = chan->private;
1315 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1316 	struct at_desc		*first = NULL;
1317 	struct at_desc		*prev = NULL;
1318 	unsigned long		was_cyclic;
1319 	unsigned int		reg_width;
1320 	unsigned int		periods = buf_len / period_len;
1321 	unsigned int		i;
1322 
1323 	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1324 			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1325 			&buf_addr,
1326 			periods, buf_len, period_len);
1327 
1328 	if (unlikely(!atslave || !buf_len || !period_len)) {
1329 		dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1330 		return NULL;
1331 	}
1332 
1333 	was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1334 	if (was_cyclic) {
1335 		dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1336 		return NULL;
1337 	}
1338 
1339 	if (unlikely(!is_slave_direction(direction)))
1340 		goto err_out;
1341 
1342 	if (direction == DMA_MEM_TO_DEV)
1343 		reg_width = convert_buswidth(sconfig->dst_addr_width);
1344 	else
1345 		reg_width = convert_buswidth(sconfig->src_addr_width);
1346 
1347 	/* Check for too big/unaligned periods and unaligned DMA buffer */
1348 	if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1349 		goto err_out;
1350 
1351 	/* build cyclic linked list */
1352 	for (i = 0; i < periods; i++) {
1353 		struct at_desc	*desc;
1354 
1355 		desc = atc_desc_get(atchan);
1356 		if (!desc)
1357 			goto err_desc_get;
1358 
1359 		if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1360 					     reg_width, period_len, direction))
1361 			goto err_desc_get;
1362 
1363 		atc_desc_chain(&first, &prev, desc);
1364 	}
1365 
1366 	/* lets make a cyclic list */
1367 	prev->lli.dscr = first->txd.phys;
1368 
1369 	/* First descriptor of the chain embedds additional information */
1370 	first->txd.cookie = -EBUSY;
1371 	first->total_len = buf_len;
1372 
1373 	return &first->txd;
1374 
1375 err_desc_get:
1376 	dev_err(chan2dev(chan), "not enough descriptors available\n");
1377 	atc_desc_put(atchan, first);
1378 err_out:
1379 	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1380 	return NULL;
1381 }
1382 
1383 static int atc_config(struct dma_chan *chan,
1384 		      struct dma_slave_config *sconfig)
1385 {
1386 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1387 
1388 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1389 
1390 	/* Check if it is chan is configured for slave transfers */
1391 	if (!chan->private)
1392 		return -EINVAL;
1393 
1394 	memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1395 
1396 	convert_burst(&atchan->dma_sconfig.src_maxburst);
1397 	convert_burst(&atchan->dma_sconfig.dst_maxburst);
1398 
1399 	return 0;
1400 }
1401 
1402 static int atc_pause(struct dma_chan *chan)
1403 {
1404 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1405 	struct at_dma		*atdma = to_at_dma(chan->device);
1406 	int			chan_id = atchan->chan_common.chan_id;
1407 	unsigned long		flags;
1408 
1409 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1410 
1411 	spin_lock_irqsave(&atchan->lock, flags);
1412 
1413 	dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1414 	set_bit(ATC_IS_PAUSED, &atchan->status);
1415 
1416 	spin_unlock_irqrestore(&atchan->lock, flags);
1417 
1418 	return 0;
1419 }
1420 
1421 static int atc_resume(struct dma_chan *chan)
1422 {
1423 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1424 	struct at_dma		*atdma = to_at_dma(chan->device);
1425 	int			chan_id = atchan->chan_common.chan_id;
1426 	unsigned long		flags;
1427 
1428 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1429 
1430 	if (!atc_chan_is_paused(atchan))
1431 		return 0;
1432 
1433 	spin_lock_irqsave(&atchan->lock, flags);
1434 
1435 	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1436 	clear_bit(ATC_IS_PAUSED, &atchan->status);
1437 
1438 	spin_unlock_irqrestore(&atchan->lock, flags);
1439 
1440 	return 0;
1441 }
1442 
1443 static int atc_terminate_all(struct dma_chan *chan)
1444 {
1445 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1446 	struct at_dma		*atdma = to_at_dma(chan->device);
1447 	int			chan_id = atchan->chan_common.chan_id;
1448 	struct at_desc		*desc, *_desc;
1449 	unsigned long		flags;
1450 
1451 	LIST_HEAD(list);
1452 
1453 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1454 
1455 	/*
1456 	 * This is only called when something went wrong elsewhere, so
1457 	 * we don't really care about the data. Just disable the
1458 	 * channel. We still have to poll the channel enable bit due
1459 	 * to AHB/HSB limitations.
1460 	 */
1461 	spin_lock_irqsave(&atchan->lock, flags);
1462 
1463 	/* disabling channel: must also remove suspend state */
1464 	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1465 
1466 	/* confirm that this channel is disabled */
1467 	while (dma_readl(atdma, CHSR) & atchan->mask)
1468 		cpu_relax();
1469 
1470 	/* active_list entries will end up before queued entries */
1471 	list_splice_init(&atchan->queue, &list);
1472 	list_splice_init(&atchan->active_list, &list);
1473 
1474 	spin_unlock_irqrestore(&atchan->lock, flags);
1475 
1476 	/* Flush all pending and queued descriptors */
1477 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
1478 		atc_chain_complete(atchan, desc);
1479 
1480 	clear_bit(ATC_IS_PAUSED, &atchan->status);
1481 	/* if channel dedicated to cyclic operations, free it */
1482 	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1483 
1484 	return 0;
1485 }
1486 
1487 /**
1488  * atc_tx_status - poll for transaction completion
1489  * @chan: DMA channel
1490  * @cookie: transaction identifier to check status of
1491  * @txstate: if not %NULL updated with transaction state
1492  *
1493  * If @txstate is passed in, upon return it reflect the driver
1494  * internal state and can be used with dma_async_is_complete() to check
1495  * the status of multiple cookies without re-checking hardware state.
1496  */
1497 static enum dma_status
1498 atc_tx_status(struct dma_chan *chan,
1499 		dma_cookie_t cookie,
1500 		struct dma_tx_state *txstate)
1501 {
1502 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1503 	unsigned long		flags;
1504 	enum dma_status		ret;
1505 	int bytes = 0;
1506 
1507 	ret = dma_cookie_status(chan, cookie, txstate);
1508 	if (ret == DMA_COMPLETE)
1509 		return ret;
1510 	/*
1511 	 * There's no point calculating the residue if there's
1512 	 * no txstate to store the value.
1513 	 */
1514 	if (!txstate)
1515 		return DMA_ERROR;
1516 
1517 	spin_lock_irqsave(&atchan->lock, flags);
1518 
1519 	/*  Get number of bytes left in the active transactions */
1520 	bytes = atc_get_bytes_left(chan, cookie);
1521 
1522 	spin_unlock_irqrestore(&atchan->lock, flags);
1523 
1524 	if (unlikely(bytes < 0)) {
1525 		dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1526 		return DMA_ERROR;
1527 	} else {
1528 		dma_set_residue(txstate, bytes);
1529 	}
1530 
1531 	dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1532 		 ret, cookie, bytes);
1533 
1534 	return ret;
1535 }
1536 
1537 /**
1538  * atc_issue_pending - try to finish work
1539  * @chan: target DMA channel
1540  */
1541 static void atc_issue_pending(struct dma_chan *chan)
1542 {
1543 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1544 
1545 	dev_vdbg(chan2dev(chan), "issue_pending\n");
1546 
1547 	/* Not needed for cyclic transfers */
1548 	if (atc_chan_is_cyclic(atchan))
1549 		return;
1550 
1551 	atc_advance_work(atchan);
1552 }
1553 
1554 /**
1555  * atc_alloc_chan_resources - allocate resources for DMA channel
1556  * @chan: allocate descriptor resources for this channel
1557  *
1558  * return - the number of allocated descriptors
1559  */
1560 static int atc_alloc_chan_resources(struct dma_chan *chan)
1561 {
1562 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1563 	struct at_dma		*atdma = to_at_dma(chan->device);
1564 	struct at_desc		*desc;
1565 	struct at_dma_slave	*atslave;
1566 	int			i;
1567 	u32			cfg;
1568 
1569 	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1570 
1571 	/* ASSERT:  channel is idle */
1572 	if (atc_chan_is_enabled(atchan)) {
1573 		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1574 		return -EIO;
1575 	}
1576 
1577 	if (!list_empty(&atchan->free_list)) {
1578 		dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
1579 		return -EIO;
1580 	}
1581 
1582 	cfg = ATC_DEFAULT_CFG;
1583 
1584 	atslave = chan->private;
1585 	if (atslave) {
1586 		/*
1587 		 * We need controller-specific data to set up slave
1588 		 * transfers.
1589 		 */
1590 		BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1591 
1592 		/* if cfg configuration specified take it instead of default */
1593 		if (atslave->cfg)
1594 			cfg = atslave->cfg;
1595 	}
1596 
1597 	/* Allocate initial pool of descriptors */
1598 	for (i = 0; i < init_nr_desc_per_channel; i++) {
1599 		desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1600 		if (!desc) {
1601 			dev_err(atdma->dma_common.dev,
1602 				"Only %d initial descriptors\n", i);
1603 			break;
1604 		}
1605 		list_add_tail(&desc->desc_node, &atchan->free_list);
1606 	}
1607 
1608 	dma_cookie_init(chan);
1609 
1610 	/* channel parameters */
1611 	channel_writel(atchan, CFG, cfg);
1612 
1613 	dev_dbg(chan2dev(chan),
1614 		"alloc_chan_resources: allocated %d descriptors\n", i);
1615 
1616 	return i;
1617 }
1618 
1619 /**
1620  * atc_free_chan_resources - free all channel resources
1621  * @chan: DMA channel
1622  */
1623 static void atc_free_chan_resources(struct dma_chan *chan)
1624 {
1625 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1626 	struct at_dma		*atdma = to_at_dma(chan->device);
1627 	struct at_desc		*desc, *_desc;
1628 	LIST_HEAD(list);
1629 
1630 	/* ASSERT:  channel is idle */
1631 	BUG_ON(!list_empty(&atchan->active_list));
1632 	BUG_ON(!list_empty(&atchan->queue));
1633 	BUG_ON(atc_chan_is_enabled(atchan));
1634 
1635 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1636 		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1637 		list_del(&desc->desc_node);
1638 		/* free link descriptor */
1639 		dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1640 	}
1641 	list_splice_init(&atchan->free_list, &list);
1642 	atchan->status = 0;
1643 
1644 	/*
1645 	 * Free atslave allocated in at_dma_xlate()
1646 	 */
1647 	kfree(chan->private);
1648 	chan->private = NULL;
1649 
1650 	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1651 }
1652 
1653 #ifdef CONFIG_OF
1654 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1655 {
1656 	struct at_dma_slave *atslave = slave;
1657 
1658 	if (atslave->dma_dev == chan->device->dev) {
1659 		chan->private = atslave;
1660 		return true;
1661 	} else {
1662 		return false;
1663 	}
1664 }
1665 
1666 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1667 				     struct of_dma *of_dma)
1668 {
1669 	struct dma_chan *chan;
1670 	struct at_dma_chan *atchan;
1671 	struct at_dma_slave *atslave;
1672 	dma_cap_mask_t mask;
1673 	unsigned int per_id;
1674 	struct platform_device *dmac_pdev;
1675 
1676 	if (dma_spec->args_count != 2)
1677 		return NULL;
1678 
1679 	dmac_pdev = of_find_device_by_node(dma_spec->np);
1680 	if (!dmac_pdev)
1681 		return NULL;
1682 
1683 	dma_cap_zero(mask);
1684 	dma_cap_set(DMA_SLAVE, mask);
1685 
1686 	atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1687 	if (!atslave) {
1688 		put_device(&dmac_pdev->dev);
1689 		return NULL;
1690 	}
1691 
1692 	atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1693 	/*
1694 	 * We can fill both SRC_PER and DST_PER, one of these fields will be
1695 	 * ignored depending on DMA transfer direction.
1696 	 */
1697 	per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1698 	atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1699 		     | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1700 	/*
1701 	 * We have to translate the value we get from the device tree since
1702 	 * the half FIFO configuration value had to be 0 to keep backward
1703 	 * compatibility.
1704 	 */
1705 	switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1706 	case AT91_DMA_CFG_FIFOCFG_ALAP:
1707 		atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1708 		break;
1709 	case AT91_DMA_CFG_FIFOCFG_ASAP:
1710 		atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1711 		break;
1712 	case AT91_DMA_CFG_FIFOCFG_HALF:
1713 	default:
1714 		atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1715 	}
1716 	atslave->dma_dev = &dmac_pdev->dev;
1717 
1718 	chan = dma_request_channel(mask, at_dma_filter, atslave);
1719 	if (!chan) {
1720 		put_device(&dmac_pdev->dev);
1721 		kfree(atslave);
1722 		return NULL;
1723 	}
1724 
1725 	atchan = to_at_dma_chan(chan);
1726 	atchan->per_if = dma_spec->args[0] & 0xff;
1727 	atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1728 
1729 	return chan;
1730 }
1731 #else
1732 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1733 				     struct of_dma *of_dma)
1734 {
1735 	return NULL;
1736 }
1737 #endif
1738 
1739 /*--  Module Management  -----------------------------------------------*/
1740 
1741 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1742 static struct at_dma_platform_data at91sam9rl_config = {
1743 	.nr_channels = 2,
1744 };
1745 static struct at_dma_platform_data at91sam9g45_config = {
1746 	.nr_channels = 8,
1747 };
1748 
1749 #if defined(CONFIG_OF)
1750 static const struct of_device_id atmel_dma_dt_ids[] = {
1751 	{
1752 		.compatible = "atmel,at91sam9rl-dma",
1753 		.data = &at91sam9rl_config,
1754 	}, {
1755 		.compatible = "atmel,at91sam9g45-dma",
1756 		.data = &at91sam9g45_config,
1757 	}, {
1758 		/* sentinel */
1759 	}
1760 };
1761 
1762 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1763 #endif
1764 
1765 static const struct platform_device_id atdma_devtypes[] = {
1766 	{
1767 		.name = "at91sam9rl_dma",
1768 		.driver_data = (unsigned long) &at91sam9rl_config,
1769 	}, {
1770 		.name = "at91sam9g45_dma",
1771 		.driver_data = (unsigned long) &at91sam9g45_config,
1772 	}, {
1773 		/* sentinel */
1774 	}
1775 };
1776 
1777 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1778 						struct platform_device *pdev)
1779 {
1780 	if (pdev->dev.of_node) {
1781 		const struct of_device_id *match;
1782 		match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1783 		if (match == NULL)
1784 			return NULL;
1785 		return match->data;
1786 	}
1787 	return (struct at_dma_platform_data *)
1788 			platform_get_device_id(pdev)->driver_data;
1789 }
1790 
1791 /**
1792  * at_dma_off - disable DMA controller
1793  * @atdma: the Atmel HDAMC device
1794  */
1795 static void at_dma_off(struct at_dma *atdma)
1796 {
1797 	dma_writel(atdma, EN, 0);
1798 
1799 	/* disable all interrupts */
1800 	dma_writel(atdma, EBCIDR, -1L);
1801 
1802 	/* confirm that all channels are disabled */
1803 	while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1804 		cpu_relax();
1805 }
1806 
1807 static int __init at_dma_probe(struct platform_device *pdev)
1808 {
1809 	struct resource		*io;
1810 	struct at_dma		*atdma;
1811 	size_t			size;
1812 	int			irq;
1813 	int			err;
1814 	int			i;
1815 	const struct at_dma_platform_data *plat_dat;
1816 
1817 	/* setup platform data for each SoC */
1818 	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1819 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1820 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1821 	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1822 	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1823 	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1824 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1825 
1826 	/* get DMA parameters from controller type */
1827 	plat_dat = at_dma_get_driver_data(pdev);
1828 	if (!plat_dat)
1829 		return -ENODEV;
1830 
1831 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1832 	if (!io)
1833 		return -EINVAL;
1834 
1835 	irq = platform_get_irq(pdev, 0);
1836 	if (irq < 0)
1837 		return irq;
1838 
1839 	size = sizeof(struct at_dma);
1840 	size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1841 	atdma = kzalloc(size, GFP_KERNEL);
1842 	if (!atdma)
1843 		return -ENOMEM;
1844 
1845 	/* discover transaction capabilities */
1846 	atdma->dma_common.cap_mask = plat_dat->cap_mask;
1847 	atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1848 
1849 	size = resource_size(io);
1850 	if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1851 		err = -EBUSY;
1852 		goto err_kfree;
1853 	}
1854 
1855 	atdma->regs = ioremap(io->start, size);
1856 	if (!atdma->regs) {
1857 		err = -ENOMEM;
1858 		goto err_release_r;
1859 	}
1860 
1861 	atdma->clk = clk_get(&pdev->dev, "dma_clk");
1862 	if (IS_ERR(atdma->clk)) {
1863 		err = PTR_ERR(atdma->clk);
1864 		goto err_clk;
1865 	}
1866 	err = clk_prepare_enable(atdma->clk);
1867 	if (err)
1868 		goto err_clk_prepare;
1869 
1870 	/* force dma off, just in case */
1871 	at_dma_off(atdma);
1872 
1873 	err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1874 	if (err)
1875 		goto err_irq;
1876 
1877 	platform_set_drvdata(pdev, atdma);
1878 
1879 	/* create a pool of consistent memory blocks for hardware descriptors */
1880 	atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1881 			&pdev->dev, sizeof(struct at_desc),
1882 			4 /* word alignment */, 0);
1883 	if (!atdma->dma_desc_pool) {
1884 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1885 		err = -ENOMEM;
1886 		goto err_desc_pool_create;
1887 	}
1888 
1889 	/* create a pool of consistent memory blocks for memset blocks */
1890 	atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1891 					     &pdev->dev, sizeof(int), 4, 0);
1892 	if (!atdma->memset_pool) {
1893 		dev_err(&pdev->dev, "No memory for memset dma pool\n");
1894 		err = -ENOMEM;
1895 		goto err_memset_pool_create;
1896 	}
1897 
1898 	/* clear any pending interrupt */
1899 	while (dma_readl(atdma, EBCISR))
1900 		cpu_relax();
1901 
1902 	/* initialize channels related values */
1903 	INIT_LIST_HEAD(&atdma->dma_common.channels);
1904 	for (i = 0; i < plat_dat->nr_channels; i++) {
1905 		struct at_dma_chan	*atchan = &atdma->chan[i];
1906 
1907 		atchan->mem_if = AT_DMA_MEM_IF;
1908 		atchan->per_if = AT_DMA_PER_IF;
1909 		atchan->chan_common.device = &atdma->dma_common;
1910 		dma_cookie_init(&atchan->chan_common);
1911 		list_add_tail(&atchan->chan_common.device_node,
1912 				&atdma->dma_common.channels);
1913 
1914 		atchan->ch_regs = atdma->regs + ch_regs(i);
1915 		spin_lock_init(&atchan->lock);
1916 		atchan->mask = 1 << i;
1917 
1918 		INIT_LIST_HEAD(&atchan->active_list);
1919 		INIT_LIST_HEAD(&atchan->queue);
1920 		INIT_LIST_HEAD(&atchan->free_list);
1921 
1922 		tasklet_setup(&atchan->tasklet, atc_tasklet);
1923 		atc_enable_chan_irq(atdma, i);
1924 	}
1925 
1926 	/* set base routines */
1927 	atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1928 	atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1929 	atdma->dma_common.device_tx_status = atc_tx_status;
1930 	atdma->dma_common.device_issue_pending = atc_issue_pending;
1931 	atdma->dma_common.dev = &pdev->dev;
1932 
1933 	/* set prep routines based on capability */
1934 	if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1935 		atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1936 
1937 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1938 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1939 
1940 	if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1941 		atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1942 		atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1943 		atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1944 	}
1945 
1946 	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1947 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1948 		/* controller can do slave DMA: can trigger cyclic transfers */
1949 		dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1950 		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1951 		atdma->dma_common.device_config = atc_config;
1952 		atdma->dma_common.device_pause = atc_pause;
1953 		atdma->dma_common.device_resume = atc_resume;
1954 		atdma->dma_common.device_terminate_all = atc_terminate_all;
1955 		atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1956 		atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1957 		atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1958 		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1959 	}
1960 
1961 	dma_writel(atdma, EN, AT_DMA_ENABLE);
1962 
1963 	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1964 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1965 	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1966 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1967 	  plat_dat->nr_channels);
1968 
1969 	dma_async_device_register(&atdma->dma_common);
1970 
1971 	/*
1972 	 * Do not return an error if the dmac node is not present in order to
1973 	 * not break the existing way of requesting channel with
1974 	 * dma_request_channel().
1975 	 */
1976 	if (pdev->dev.of_node) {
1977 		err = of_dma_controller_register(pdev->dev.of_node,
1978 						 at_dma_xlate, atdma);
1979 		if (err) {
1980 			dev_err(&pdev->dev, "could not register of_dma_controller\n");
1981 			goto err_of_dma_controller_register;
1982 		}
1983 	}
1984 
1985 	return 0;
1986 
1987 err_of_dma_controller_register:
1988 	dma_async_device_unregister(&atdma->dma_common);
1989 	dma_pool_destroy(atdma->memset_pool);
1990 err_memset_pool_create:
1991 	dma_pool_destroy(atdma->dma_desc_pool);
1992 err_desc_pool_create:
1993 	free_irq(platform_get_irq(pdev, 0), atdma);
1994 err_irq:
1995 	clk_disable_unprepare(atdma->clk);
1996 err_clk_prepare:
1997 	clk_put(atdma->clk);
1998 err_clk:
1999 	iounmap(atdma->regs);
2000 	atdma->regs = NULL;
2001 err_release_r:
2002 	release_mem_region(io->start, size);
2003 err_kfree:
2004 	kfree(atdma);
2005 	return err;
2006 }
2007 
2008 static int at_dma_remove(struct platform_device *pdev)
2009 {
2010 	struct at_dma		*atdma = platform_get_drvdata(pdev);
2011 	struct dma_chan		*chan, *_chan;
2012 	struct resource		*io;
2013 
2014 	at_dma_off(atdma);
2015 	if (pdev->dev.of_node)
2016 		of_dma_controller_free(pdev->dev.of_node);
2017 	dma_async_device_unregister(&atdma->dma_common);
2018 
2019 	dma_pool_destroy(atdma->memset_pool);
2020 	dma_pool_destroy(atdma->dma_desc_pool);
2021 	free_irq(platform_get_irq(pdev, 0), atdma);
2022 
2023 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2024 			device_node) {
2025 		struct at_dma_chan	*atchan = to_at_dma_chan(chan);
2026 
2027 		/* Disable interrupts */
2028 		atc_disable_chan_irq(atdma, chan->chan_id);
2029 
2030 		tasklet_kill(&atchan->tasklet);
2031 		list_del(&chan->device_node);
2032 	}
2033 
2034 	clk_disable_unprepare(atdma->clk);
2035 	clk_put(atdma->clk);
2036 
2037 	iounmap(atdma->regs);
2038 	atdma->regs = NULL;
2039 
2040 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2041 	release_mem_region(io->start, resource_size(io));
2042 
2043 	kfree(atdma);
2044 
2045 	return 0;
2046 }
2047 
2048 static void at_dma_shutdown(struct platform_device *pdev)
2049 {
2050 	struct at_dma	*atdma = platform_get_drvdata(pdev);
2051 
2052 	at_dma_off(platform_get_drvdata(pdev));
2053 	clk_disable_unprepare(atdma->clk);
2054 }
2055 
2056 static int at_dma_prepare(struct device *dev)
2057 {
2058 	struct at_dma *atdma = dev_get_drvdata(dev);
2059 	struct dma_chan *chan, *_chan;
2060 
2061 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2062 			device_node) {
2063 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2064 		/* wait for transaction completion (except in cyclic case) */
2065 		if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2066 			return -EAGAIN;
2067 	}
2068 	return 0;
2069 }
2070 
2071 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2072 {
2073 	struct dma_chan	*chan = &atchan->chan_common;
2074 
2075 	/* Channel should be paused by user
2076 	 * do it anyway even if it is not done already */
2077 	if (!atc_chan_is_paused(atchan)) {
2078 		dev_warn(chan2dev(chan),
2079 		"cyclic channel not paused, should be done by channel user\n");
2080 		atc_pause(chan);
2081 	}
2082 
2083 	/* now preserve additional data for cyclic operations */
2084 	/* next descriptor address in the cyclic list */
2085 	atchan->save_dscr = channel_readl(atchan, DSCR);
2086 
2087 	vdbg_dump_regs(atchan);
2088 }
2089 
2090 static int at_dma_suspend_noirq(struct device *dev)
2091 {
2092 	struct at_dma *atdma = dev_get_drvdata(dev);
2093 	struct dma_chan *chan, *_chan;
2094 
2095 	/* preserve data */
2096 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2097 			device_node) {
2098 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2099 
2100 		if (atc_chan_is_cyclic(atchan))
2101 			atc_suspend_cyclic(atchan);
2102 		atchan->save_cfg = channel_readl(atchan, CFG);
2103 	}
2104 	atdma->save_imr = dma_readl(atdma, EBCIMR);
2105 
2106 	/* disable DMA controller */
2107 	at_dma_off(atdma);
2108 	clk_disable_unprepare(atdma->clk);
2109 	return 0;
2110 }
2111 
2112 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2113 {
2114 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
2115 
2116 	/* restore channel status for cyclic descriptors list:
2117 	 * next descriptor in the cyclic list at the time of suspend */
2118 	channel_writel(atchan, SADDR, 0);
2119 	channel_writel(atchan, DADDR, 0);
2120 	channel_writel(atchan, CTRLA, 0);
2121 	channel_writel(atchan, CTRLB, 0);
2122 	channel_writel(atchan, DSCR, atchan->save_dscr);
2123 	dma_writel(atdma, CHER, atchan->mask);
2124 
2125 	/* channel pause status should be removed by channel user
2126 	 * We cannot take the initiative to do it here */
2127 
2128 	vdbg_dump_regs(atchan);
2129 }
2130 
2131 static int at_dma_resume_noirq(struct device *dev)
2132 {
2133 	struct at_dma *atdma = dev_get_drvdata(dev);
2134 	struct dma_chan *chan, *_chan;
2135 
2136 	/* bring back DMA controller */
2137 	clk_prepare_enable(atdma->clk);
2138 	dma_writel(atdma, EN, AT_DMA_ENABLE);
2139 
2140 	/* clear any pending interrupt */
2141 	while (dma_readl(atdma, EBCISR))
2142 		cpu_relax();
2143 
2144 	/* restore saved data */
2145 	dma_writel(atdma, EBCIER, atdma->save_imr);
2146 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2147 			device_node) {
2148 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2149 
2150 		channel_writel(atchan, CFG, atchan->save_cfg);
2151 		if (atc_chan_is_cyclic(atchan))
2152 			atc_resume_cyclic(atchan);
2153 	}
2154 	return 0;
2155 }
2156 
2157 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2158 	.prepare = at_dma_prepare,
2159 	.suspend_noirq = at_dma_suspend_noirq,
2160 	.resume_noirq = at_dma_resume_noirq,
2161 };
2162 
2163 static struct platform_driver at_dma_driver = {
2164 	.remove		= at_dma_remove,
2165 	.shutdown	= at_dma_shutdown,
2166 	.id_table	= atdma_devtypes,
2167 	.driver = {
2168 		.name	= "at_hdmac",
2169 		.pm	= &at_dma_dev_pm_ops,
2170 		.of_match_table	= of_match_ptr(atmel_dma_dt_ids),
2171 	},
2172 };
2173 
2174 static int __init at_dma_init(void)
2175 {
2176 	return platform_driver_probe(&at_dma_driver, at_dma_probe);
2177 }
2178 subsys_initcall(at_dma_init);
2179 
2180 static void __exit at_dma_exit(void)
2181 {
2182 	platform_driver_unregister(&at_dma_driver);
2183 }
2184 module_exit(at_dma_exit);
2185 
2186 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2187 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2188 MODULE_LICENSE("GPL");
2189 MODULE_ALIAS("platform:at_hdmac");
2190