xref: /openbmc/linux/drivers/dma/at_hdmac.c (revision 9cb37357)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4  *
5  * Copyright (C) 2008 Atmel Corporation
6  *
7  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
8  * The only Atmel DMA Controller that is not covered by this driver is the one
9  * found on AT91SAM9263.
10  */
11 
12 #include <dt-bindings/dma/at91.h>
13 #include <linux/clk.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
21 #include <linux/of.h>
22 #include <linux/of_device.h>
23 #include <linux/of_dma.h>
24 
25 #include "at_hdmac_regs.h"
26 #include "dmaengine.h"
27 
28 /*
29  * Glossary
30  * --------
31  *
32  * at_hdmac		: Name of the ATmel AHB DMA Controller
33  * at_dma_ / atdma	: ATmel DMA controller entity related
34  * atc_	/ atchan	: ATmel DMA Channel entity related
35  */
36 
37 #define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
38 #define	ATC_DEFAULT_CTRLB	(ATC_SIF(AT_DMA_MEM_IF) \
39 				|ATC_DIF(AT_DMA_MEM_IF))
40 #define ATC_DMA_BUSWIDTHS\
41 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
42 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
43 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
44 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
45 
46 #define ATC_MAX_DSCR_TRIALS	10
47 
48 /*
49  * Initial number of descriptors to allocate for each channel. This could
50  * be increased during dma usage.
51  */
52 static unsigned int init_nr_desc_per_channel = 64;
53 module_param(init_nr_desc_per_channel, uint, 0644);
54 MODULE_PARM_DESC(init_nr_desc_per_channel,
55 		 "initial descriptors per channel (default: 64)");
56 
57 /**
58  * struct at_dma_platform_data - Controller configuration parameters
59  * @nr_channels: Number of channels supported by hardware (max 8)
60  * @cap_mask: dma_capability flags supported by the platform
61  */
62 struct at_dma_platform_data {
63 	unsigned int	nr_channels;
64 	dma_cap_mask_t  cap_mask;
65 };
66 
67 /**
68  * struct at_dma_slave - Controller-specific information about a slave
69  * @dma_dev: required DMA master device
70  * @cfg: Platform-specific initializer for the CFG register
71  */
72 struct at_dma_slave {
73 	struct device		*dma_dev;
74 	u32			cfg;
75 };
76 
77 /* prototypes */
78 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
79 static void atc_issue_pending(struct dma_chan *chan);
80 
81 
82 /*----------------------------------------------------------------------*/
83 
84 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
85 						size_t len)
86 {
87 	unsigned int width;
88 
89 	if (!((src | dst  | len) & 3))
90 		width = 2;
91 	else if (!((src | dst | len) & 1))
92 		width = 1;
93 	else
94 		width = 0;
95 
96 	return width;
97 }
98 
99 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
100 {
101 	return list_first_entry(&atchan->active_list,
102 				struct at_desc, desc_node);
103 }
104 
105 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
106 {
107 	return list_first_entry(&atchan->queue,
108 				struct at_desc, desc_node);
109 }
110 
111 /**
112  * atc_alloc_descriptor - allocate and return an initialized descriptor
113  * @chan: the channel to allocate descriptors for
114  * @gfp_flags: GFP allocation flags
115  *
116  * Note: The ack-bit is positioned in the descriptor flag at creation time
117  *       to make initial allocation more convenient. This bit will be cleared
118  *       and control will be given to client at usage time (during
119  *       preparation functions).
120  */
121 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
122 					    gfp_t gfp_flags)
123 {
124 	struct at_desc	*desc = NULL;
125 	struct at_dma	*atdma = to_at_dma(chan->device);
126 	dma_addr_t phys;
127 
128 	desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
129 	if (desc) {
130 		INIT_LIST_HEAD(&desc->tx_list);
131 		dma_async_tx_descriptor_init(&desc->txd, chan);
132 		/* txd.flags will be overwritten in prep functions */
133 		desc->txd.flags = DMA_CTRL_ACK;
134 		desc->txd.tx_submit = atc_tx_submit;
135 		desc->txd.phys = phys;
136 	}
137 
138 	return desc;
139 }
140 
141 /**
142  * atc_desc_get - get an unused descriptor from free_list
143  * @atchan: channel we want a new descriptor for
144  */
145 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
146 {
147 	struct at_desc *desc, *_desc;
148 	struct at_desc *ret = NULL;
149 	unsigned long flags;
150 	unsigned int i = 0;
151 
152 	spin_lock_irqsave(&atchan->lock, flags);
153 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
154 		i++;
155 		if (async_tx_test_ack(&desc->txd)) {
156 			list_del(&desc->desc_node);
157 			ret = desc;
158 			break;
159 		}
160 		dev_dbg(chan2dev(&atchan->chan_common),
161 				"desc %p not ACKed\n", desc);
162 	}
163 	spin_unlock_irqrestore(&atchan->lock, flags);
164 	dev_vdbg(chan2dev(&atchan->chan_common),
165 		"scanned %u descriptors on freelist\n", i);
166 
167 	/* no more descriptor available in initial pool: create one more */
168 	if (!ret)
169 		ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
170 
171 	return ret;
172 }
173 
174 /**
175  * atc_desc_put - move a descriptor, including any children, to the free list
176  * @atchan: channel we work on
177  * @desc: descriptor, at the head of a chain, to move to free list
178  */
179 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
180 {
181 	if (desc) {
182 		struct at_desc *child;
183 		unsigned long flags;
184 
185 		spin_lock_irqsave(&atchan->lock, flags);
186 		list_for_each_entry(child, &desc->tx_list, desc_node)
187 			dev_vdbg(chan2dev(&atchan->chan_common),
188 					"moving child desc %p to freelist\n",
189 					child);
190 		list_splice_init(&desc->tx_list, &atchan->free_list);
191 		dev_vdbg(chan2dev(&atchan->chan_common),
192 			 "moving desc %p to freelist\n", desc);
193 		list_add(&desc->desc_node, &atchan->free_list);
194 		spin_unlock_irqrestore(&atchan->lock, flags);
195 	}
196 }
197 
198 /**
199  * atc_desc_chain - build chain adding a descriptor
200  * @first: address of first descriptor of the chain
201  * @prev: address of previous descriptor of the chain
202  * @desc: descriptor to queue
203  *
204  * Called from prep_* functions
205  */
206 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
207 			   struct at_desc *desc)
208 {
209 	if (!(*first)) {
210 		*first = desc;
211 	} else {
212 		/* inform the HW lli about chaining */
213 		(*prev)->lli.dscr = desc->txd.phys;
214 		/* insert the link descriptor to the LD ring */
215 		list_add_tail(&desc->desc_node,
216 				&(*first)->tx_list);
217 	}
218 	*prev = desc;
219 }
220 
221 /**
222  * atc_dostart - starts the DMA engine for real
223  * @atchan: the channel we want to start
224  * @first: first descriptor in the list we want to begin with
225  *
226  * Called with atchan->lock held and bh disabled
227  */
228 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
229 {
230 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
231 
232 	/* ASSERT:  channel is idle */
233 	if (atc_chan_is_enabled(atchan)) {
234 		dev_err(chan2dev(&atchan->chan_common),
235 			"BUG: Attempted to start non-idle channel\n");
236 		dev_err(chan2dev(&atchan->chan_common),
237 			"  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
238 			channel_readl(atchan, SADDR),
239 			channel_readl(atchan, DADDR),
240 			channel_readl(atchan, CTRLA),
241 			channel_readl(atchan, CTRLB),
242 			channel_readl(atchan, DSCR));
243 
244 		/* The tasklet will hopefully advance the queue... */
245 		return;
246 	}
247 
248 	vdbg_dump_regs(atchan);
249 
250 	channel_writel(atchan, SADDR, 0);
251 	channel_writel(atchan, DADDR, 0);
252 	channel_writel(atchan, CTRLA, 0);
253 	channel_writel(atchan, CTRLB, 0);
254 	channel_writel(atchan, DSCR, first->txd.phys);
255 	channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
256 		       ATC_SPIP_BOUNDARY(first->boundary));
257 	channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
258 		       ATC_DPIP_BOUNDARY(first->boundary));
259 	/* Don't allow CPU to reorder channel enable. */
260 	wmb();
261 	dma_writel(atdma, CHER, atchan->mask);
262 
263 	vdbg_dump_regs(atchan);
264 }
265 
266 /*
267  * atc_get_desc_by_cookie - get the descriptor of a cookie
268  * @atchan: the DMA channel
269  * @cookie: the cookie to get the descriptor for
270  */
271 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
272 						dma_cookie_t cookie)
273 {
274 	struct at_desc *desc, *_desc;
275 
276 	list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
277 		if (desc->txd.cookie == cookie)
278 			return desc;
279 	}
280 
281 	list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
282 		if (desc->txd.cookie == cookie)
283 			return desc;
284 	}
285 
286 	return NULL;
287 }
288 
289 /**
290  * atc_calc_bytes_left - calculates the number of bytes left according to the
291  * value read from CTRLA.
292  *
293  * @current_len: the number of bytes left before reading CTRLA
294  * @ctrla: the value of CTRLA
295  */
296 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
297 {
298 	u32 btsize = (ctrla & ATC_BTSIZE_MAX);
299 	u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
300 
301 	/*
302 	 * According to the datasheet, when reading the Control A Register
303 	 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
304 	 * number of transfers completed on the Source Interface.
305 	 * So btsize is always a number of source width transfers.
306 	 */
307 	return current_len - (btsize << src_width);
308 }
309 
310 /**
311  * atc_get_bytes_left - get the number of bytes residue for a cookie
312  * @chan: DMA channel
313  * @cookie: transaction identifier to check status of
314  */
315 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
316 {
317 	struct at_dma_chan      *atchan = to_at_dma_chan(chan);
318 	struct at_desc *desc_first = atc_first_active(atchan);
319 	struct at_desc *desc;
320 	int ret;
321 	u32 ctrla, dscr;
322 	unsigned int i;
323 
324 	/*
325 	 * If the cookie doesn't match to the currently running transfer then
326 	 * we can return the total length of the associated DMA transfer,
327 	 * because it is still queued.
328 	 */
329 	desc = atc_get_desc_by_cookie(atchan, cookie);
330 	if (desc == NULL)
331 		return -EINVAL;
332 	else if (desc != desc_first)
333 		return desc->total_len;
334 
335 	/* cookie matches to the currently running transfer */
336 	ret = desc_first->total_len;
337 
338 	if (desc_first->lli.dscr) {
339 		/* hardware linked list transfer */
340 
341 		/*
342 		 * Calculate the residue by removing the length of the child
343 		 * descriptors already transferred from the total length.
344 		 * To get the current child descriptor we can use the value of
345 		 * the channel's DSCR register and compare it against the value
346 		 * of the hardware linked list structure of each child
347 		 * descriptor.
348 		 *
349 		 * The CTRLA register provides us with the amount of data
350 		 * already read from the source for the current child
351 		 * descriptor. So we can compute a more accurate residue by also
352 		 * removing the number of bytes corresponding to this amount of
353 		 * data.
354 		 *
355 		 * However, the DSCR and CTRLA registers cannot be read both
356 		 * atomically. Hence a race condition may occur: the first read
357 		 * register may refer to one child descriptor whereas the second
358 		 * read may refer to a later child descriptor in the list
359 		 * because of the DMA transfer progression inbetween the two
360 		 * reads.
361 		 *
362 		 * One solution could have been to pause the DMA transfer, read
363 		 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
364 		 * this approach presents some drawbacks:
365 		 * - If the DMA transfer is paused, RX overruns or TX underruns
366 		 *   are more likey to occur depending on the system latency.
367 		 *   Taking the USART driver as an example, it uses a cyclic DMA
368 		 *   transfer to read data from the Receive Holding Register
369 		 *   (RHR) to avoid RX overruns since the RHR is not protected
370 		 *   by any FIFO on most Atmel SoCs. So pausing the DMA transfer
371 		 *   to compute the residue would break the USART driver design.
372 		 * - The atc_pause() function masks interrupts but we'd rather
373 		 *   avoid to do so for system latency purpose.
374 		 *
375 		 * Then we'd rather use another solution: the DSCR is read a
376 		 * first time, the CTRLA is read in turn, next the DSCR is read
377 		 * a second time. If the two consecutive read values of the DSCR
378 		 * are the same then we assume both refers to the very same
379 		 * child descriptor as well as the CTRLA value read inbetween
380 		 * does. For cyclic tranfers, the assumption is that a full loop
381 		 * is "not so fast".
382 		 * If the two DSCR values are different, we read again the CTRLA
383 		 * then the DSCR till two consecutive read values from DSCR are
384 		 * equal or till the maxium trials is reach.
385 		 * This algorithm is very unlikely not to find a stable value for
386 		 * DSCR.
387 		 */
388 
389 		dscr = channel_readl(atchan, DSCR);
390 		rmb(); /* ensure DSCR is read before CTRLA */
391 		ctrla = channel_readl(atchan, CTRLA);
392 		for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
393 			u32 new_dscr;
394 
395 			rmb(); /* ensure DSCR is read after CTRLA */
396 			new_dscr = channel_readl(atchan, DSCR);
397 
398 			/*
399 			 * If the DSCR register value has not changed inside the
400 			 * DMA controller since the previous read, we assume
401 			 * that both the dscr and ctrla values refers to the
402 			 * very same descriptor.
403 			 */
404 			if (likely(new_dscr == dscr))
405 				break;
406 
407 			/*
408 			 * DSCR has changed inside the DMA controller, so the
409 			 * previouly read value of CTRLA may refer to an already
410 			 * processed descriptor hence could be outdated.
411 			 * We need to update ctrla to match the current
412 			 * descriptor.
413 			 */
414 			dscr = new_dscr;
415 			rmb(); /* ensure DSCR is read before CTRLA */
416 			ctrla = channel_readl(atchan, CTRLA);
417 		}
418 		if (unlikely(i == ATC_MAX_DSCR_TRIALS))
419 			return -ETIMEDOUT;
420 
421 		/* for the first descriptor we can be more accurate */
422 		if (desc_first->lli.dscr == dscr)
423 			return atc_calc_bytes_left(ret, ctrla);
424 
425 		ret -= desc_first->len;
426 		list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
427 			if (desc->lli.dscr == dscr)
428 				break;
429 
430 			ret -= desc->len;
431 		}
432 
433 		/*
434 		 * For the current descriptor in the chain we can calculate
435 		 * the remaining bytes using the channel's register.
436 		 */
437 		ret = atc_calc_bytes_left(ret, ctrla);
438 	} else {
439 		/* single transfer */
440 		ctrla = channel_readl(atchan, CTRLA);
441 		ret = atc_calc_bytes_left(ret, ctrla);
442 	}
443 
444 	return ret;
445 }
446 
447 /**
448  * atc_chain_complete - finish work for one transaction chain
449  * @atchan: channel we work on
450  * @desc: descriptor at the head of the chain we want do complete
451  */
452 static void
453 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
454 {
455 	struct dma_async_tx_descriptor	*txd = &desc->txd;
456 	struct at_dma			*atdma = to_at_dma(atchan->chan_common.device);
457 	unsigned long flags;
458 
459 	dev_vdbg(chan2dev(&atchan->chan_common),
460 		"descriptor %u complete\n", txd->cookie);
461 
462 	spin_lock_irqsave(&atchan->lock, flags);
463 
464 	/* mark the descriptor as complete for non cyclic cases only */
465 	if (!atc_chan_is_cyclic(atchan))
466 		dma_cookie_complete(txd);
467 
468 	spin_unlock_irqrestore(&atchan->lock, flags);
469 
470 	dma_descriptor_unmap(txd);
471 	/* for cyclic transfers,
472 	 * no need to replay callback function while stopping */
473 	if (!atc_chan_is_cyclic(atchan))
474 		dmaengine_desc_get_callback_invoke(txd, NULL);
475 
476 	dma_run_dependencies(txd);
477 
478 	spin_lock_irqsave(&atchan->lock, flags);
479 	/* move children to free_list */
480 	list_splice_init(&desc->tx_list, &atchan->free_list);
481 	/* add myself to free_list */
482 	list_add(&desc->desc_node, &atchan->free_list);
483 	spin_unlock_irqrestore(&atchan->lock, flags);
484 
485 	/* If the transfer was a memset, free our temporary buffer */
486 	if (desc->memset_buffer) {
487 		dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
488 			      desc->memset_paddr);
489 		desc->memset_buffer = false;
490 	}
491 }
492 
493 /**
494  * atc_advance_work - at the end of a transaction, move forward
495  * @atchan: channel where the transaction ended
496  */
497 static void atc_advance_work(struct at_dma_chan *atchan)
498 {
499 	struct at_desc *desc;
500 	unsigned long flags;
501 
502 	dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
503 
504 	spin_lock_irqsave(&atchan->lock, flags);
505 	if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
506 		return spin_unlock_irqrestore(&atchan->lock, flags);
507 
508 	desc = atc_first_active(atchan);
509 	/* Remove the transfer node from the active list. */
510 	list_del_init(&desc->desc_node);
511 	spin_unlock_irqrestore(&atchan->lock, flags);
512 	atc_chain_complete(atchan, desc);
513 
514 	/* advance work */
515 	spin_lock_irqsave(&atchan->lock, flags);
516 	if (!list_empty(&atchan->active_list)) {
517 		desc = atc_first_queued(atchan);
518 		list_move_tail(&desc->desc_node, &atchan->active_list);
519 		atc_dostart(atchan, desc);
520 	}
521 	spin_unlock_irqrestore(&atchan->lock, flags);
522 }
523 
524 
525 /**
526  * atc_handle_error - handle errors reported by DMA controller
527  * @atchan: channel where error occurs
528  */
529 static void atc_handle_error(struct at_dma_chan *atchan)
530 {
531 	struct at_desc *bad_desc;
532 	struct at_desc *desc;
533 	struct at_desc *child;
534 	unsigned long flags;
535 
536 	spin_lock_irqsave(&atchan->lock, flags);
537 	/*
538 	 * The descriptor currently at the head of the active list is
539 	 * broked. Since we don't have any way to report errors, we'll
540 	 * just have to scream loudly and try to carry on.
541 	 */
542 	bad_desc = atc_first_active(atchan);
543 	list_del_init(&bad_desc->desc_node);
544 
545 	/* Try to restart the controller */
546 	if (!list_empty(&atchan->active_list)) {
547 		desc = atc_first_queued(atchan);
548 		list_move_tail(&desc->desc_node, &atchan->active_list);
549 		atc_dostart(atchan, desc);
550 	}
551 
552 	/*
553 	 * KERN_CRITICAL may seem harsh, but since this only happens
554 	 * when someone submits a bad physical address in a
555 	 * descriptor, we should consider ourselves lucky that the
556 	 * controller flagged an error instead of scribbling over
557 	 * random memory locations.
558 	 */
559 	dev_crit(chan2dev(&atchan->chan_common),
560 			"Bad descriptor submitted for DMA!\n");
561 	dev_crit(chan2dev(&atchan->chan_common),
562 			"  cookie: %d\n", bad_desc->txd.cookie);
563 	atc_dump_lli(atchan, &bad_desc->lli);
564 	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
565 		atc_dump_lli(atchan, &child->lli);
566 
567 	spin_unlock_irqrestore(&atchan->lock, flags);
568 
569 	/* Pretend the descriptor completed successfully */
570 	atc_chain_complete(atchan, bad_desc);
571 }
572 
573 /**
574  * atc_handle_cyclic - at the end of a period, run callback function
575  * @atchan: channel used for cyclic operations
576  */
577 static void atc_handle_cyclic(struct at_dma_chan *atchan)
578 {
579 	struct at_desc			*first = atc_first_active(atchan);
580 	struct dma_async_tx_descriptor	*txd = &first->txd;
581 
582 	dev_vdbg(chan2dev(&atchan->chan_common),
583 			"new cyclic period llp 0x%08x\n",
584 			channel_readl(atchan, DSCR));
585 
586 	dmaengine_desc_get_callback_invoke(txd, NULL);
587 }
588 
589 /*--  IRQ & Tasklet  ---------------------------------------------------*/
590 
591 static void atc_tasklet(struct tasklet_struct *t)
592 {
593 	struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
594 
595 	if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
596 		return atc_handle_error(atchan);
597 
598 	if (atc_chan_is_cyclic(atchan))
599 		return atc_handle_cyclic(atchan);
600 
601 	atc_advance_work(atchan);
602 }
603 
604 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
605 {
606 	struct at_dma		*atdma = (struct at_dma *)dev_id;
607 	struct at_dma_chan	*atchan;
608 	int			i;
609 	u32			status, pending, imr;
610 	int			ret = IRQ_NONE;
611 
612 	do {
613 		imr = dma_readl(atdma, EBCIMR);
614 		status = dma_readl(atdma, EBCISR);
615 		pending = status & imr;
616 
617 		if (!pending)
618 			break;
619 
620 		dev_vdbg(atdma->dma_common.dev,
621 			"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
622 			 status, imr, pending);
623 
624 		for (i = 0; i < atdma->dma_common.chancnt; i++) {
625 			atchan = &atdma->chan[i];
626 			if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
627 				if (pending & AT_DMA_ERR(i)) {
628 					/* Disable channel on AHB error */
629 					dma_writel(atdma, CHDR,
630 						AT_DMA_RES(i) | atchan->mask);
631 					/* Give information to tasklet */
632 					set_bit(ATC_IS_ERROR, &atchan->status);
633 				}
634 				tasklet_schedule(&atchan->tasklet);
635 				ret = IRQ_HANDLED;
636 			}
637 		}
638 
639 	} while (pending);
640 
641 	return ret;
642 }
643 
644 
645 /*--  DMA Engine API  --------------------------------------------------*/
646 
647 /**
648  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
649  * @tx: descriptor at the head of the transaction chain
650  *
651  * Queue chain if DMA engine is working already
652  *
653  * Cookie increment and adding to active_list or queue must be atomic
654  */
655 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
656 {
657 	struct at_desc		*desc = txd_to_at_desc(tx);
658 	struct at_dma_chan	*atchan = to_at_dma_chan(tx->chan);
659 	dma_cookie_t		cookie;
660 	unsigned long		flags;
661 
662 	spin_lock_irqsave(&atchan->lock, flags);
663 	cookie = dma_cookie_assign(tx);
664 
665 	list_add_tail(&desc->desc_node, &atchan->queue);
666 	spin_unlock_irqrestore(&atchan->lock, flags);
667 
668 	dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
669 		 desc->txd.cookie);
670 	return cookie;
671 }
672 
673 /**
674  * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
675  * @chan: the channel to prepare operation on
676  * @xt: Interleaved transfer template
677  * @flags: tx descriptor status flags
678  */
679 static struct dma_async_tx_descriptor *
680 atc_prep_dma_interleaved(struct dma_chan *chan,
681 			 struct dma_interleaved_template *xt,
682 			 unsigned long flags)
683 {
684 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
685 	struct data_chunk	*first;
686 	struct at_desc		*desc = NULL;
687 	size_t			xfer_count;
688 	unsigned int		dwidth;
689 	u32			ctrla;
690 	u32			ctrlb;
691 	size_t			len = 0;
692 	int			i;
693 
694 	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
695 		return NULL;
696 
697 	first = xt->sgl;
698 
699 	dev_info(chan2dev(chan),
700 		 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
701 		__func__, &xt->src_start, &xt->dst_start, xt->numf,
702 		xt->frame_size, flags);
703 
704 	/*
705 	 * The controller can only "skip" X bytes every Y bytes, so we
706 	 * need to make sure we are given a template that fit that
707 	 * description, ie a template with chunks that always have the
708 	 * same size, with the same ICGs.
709 	 */
710 	for (i = 0; i < xt->frame_size; i++) {
711 		struct data_chunk *chunk = xt->sgl + i;
712 
713 		if ((chunk->size != xt->sgl->size) ||
714 		    (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
715 		    (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
716 			dev_err(chan2dev(chan),
717 				"%s: the controller can transfer only identical chunks\n",
718 				__func__);
719 			return NULL;
720 		}
721 
722 		len += chunk->size;
723 	}
724 
725 	dwidth = atc_get_xfer_width(xt->src_start,
726 				    xt->dst_start, len);
727 
728 	xfer_count = len >> dwidth;
729 	if (xfer_count > ATC_BTSIZE_MAX) {
730 		dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
731 		return NULL;
732 	}
733 
734 	ctrla = ATC_SRC_WIDTH(dwidth) |
735 		ATC_DST_WIDTH(dwidth);
736 
737 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
738 		| ATC_SRC_ADDR_MODE_INCR
739 		| ATC_DST_ADDR_MODE_INCR
740 		| ATC_SRC_PIP
741 		| ATC_DST_PIP
742 		| ATC_FC_MEM2MEM;
743 
744 	/* create the transfer */
745 	desc = atc_desc_get(atchan);
746 	if (!desc) {
747 		dev_err(chan2dev(chan),
748 			"%s: couldn't allocate our descriptor\n", __func__);
749 		return NULL;
750 	}
751 
752 	desc->lli.saddr = xt->src_start;
753 	desc->lli.daddr = xt->dst_start;
754 	desc->lli.ctrla = ctrla | xfer_count;
755 	desc->lli.ctrlb = ctrlb;
756 
757 	desc->boundary = first->size >> dwidth;
758 	desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
759 	desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
760 
761 	desc->txd.cookie = -EBUSY;
762 	desc->total_len = desc->len = len;
763 
764 	/* set end-of-link to the last link descriptor of list*/
765 	set_desc_eol(desc);
766 
767 	desc->txd.flags = flags; /* client is in control of this ack */
768 
769 	return &desc->txd;
770 }
771 
772 /**
773  * atc_prep_dma_memcpy - prepare a memcpy operation
774  * @chan: the channel to prepare operation on
775  * @dest: operation virtual destination address
776  * @src: operation virtual source address
777  * @len: operation length
778  * @flags: tx descriptor status flags
779  */
780 static struct dma_async_tx_descriptor *
781 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
782 		size_t len, unsigned long flags)
783 {
784 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
785 	struct at_desc		*desc = NULL;
786 	struct at_desc		*first = NULL;
787 	struct at_desc		*prev = NULL;
788 	size_t			xfer_count;
789 	size_t			offset;
790 	unsigned int		src_width;
791 	unsigned int		dst_width;
792 	u32			ctrla;
793 	u32			ctrlb;
794 
795 	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
796 			&dest, &src, len, flags);
797 
798 	if (unlikely(!len)) {
799 		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
800 		return NULL;
801 	}
802 
803 	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
804 		| ATC_SRC_ADDR_MODE_INCR
805 		| ATC_DST_ADDR_MODE_INCR
806 		| ATC_FC_MEM2MEM;
807 
808 	/*
809 	 * We can be a lot more clever here, but this should take care
810 	 * of the most common optimization.
811 	 */
812 	src_width = dst_width = atc_get_xfer_width(src, dest, len);
813 
814 	ctrla = ATC_SRC_WIDTH(src_width) |
815 		ATC_DST_WIDTH(dst_width);
816 
817 	for (offset = 0; offset < len; offset += xfer_count << src_width) {
818 		xfer_count = min_t(size_t, (len - offset) >> src_width,
819 				ATC_BTSIZE_MAX);
820 
821 		desc = atc_desc_get(atchan);
822 		if (!desc)
823 			goto err_desc_get;
824 
825 		desc->lli.saddr = src + offset;
826 		desc->lli.daddr = dest + offset;
827 		desc->lli.ctrla = ctrla | xfer_count;
828 		desc->lli.ctrlb = ctrlb;
829 
830 		desc->txd.cookie = 0;
831 		desc->len = xfer_count << src_width;
832 
833 		atc_desc_chain(&first, &prev, desc);
834 	}
835 
836 	/* First descriptor of the chain embedds additional information */
837 	first->txd.cookie = -EBUSY;
838 	first->total_len = len;
839 
840 	/* set end-of-link to the last link descriptor of list*/
841 	set_desc_eol(desc);
842 
843 	first->txd.flags = flags; /* client is in control of this ack */
844 
845 	return &first->txd;
846 
847 err_desc_get:
848 	atc_desc_put(atchan, first);
849 	return NULL;
850 }
851 
852 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
853 					      dma_addr_t psrc,
854 					      dma_addr_t pdst,
855 					      size_t len)
856 {
857 	struct at_dma_chan *atchan = to_at_dma_chan(chan);
858 	struct at_desc *desc;
859 	size_t xfer_count;
860 
861 	u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
862 	u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
863 		ATC_SRC_ADDR_MODE_FIXED |
864 		ATC_DST_ADDR_MODE_INCR |
865 		ATC_FC_MEM2MEM;
866 
867 	xfer_count = len >> 2;
868 	if (xfer_count > ATC_BTSIZE_MAX) {
869 		dev_err(chan2dev(chan), "%s: buffer is too big\n",
870 			__func__);
871 		return NULL;
872 	}
873 
874 	desc = atc_desc_get(atchan);
875 	if (!desc) {
876 		dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
877 			__func__);
878 		return NULL;
879 	}
880 
881 	desc->lli.saddr = psrc;
882 	desc->lli.daddr = pdst;
883 	desc->lli.ctrla = ctrla | xfer_count;
884 	desc->lli.ctrlb = ctrlb;
885 
886 	desc->txd.cookie = 0;
887 	desc->len = len;
888 
889 	return desc;
890 }
891 
892 /**
893  * atc_prep_dma_memset - prepare a memcpy operation
894  * @chan: the channel to prepare operation on
895  * @dest: operation virtual destination address
896  * @value: value to set memory buffer to
897  * @len: operation length
898  * @flags: tx descriptor status flags
899  */
900 static struct dma_async_tx_descriptor *
901 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
902 		    size_t len, unsigned long flags)
903 {
904 	struct at_dma		*atdma = to_at_dma(chan->device);
905 	struct at_desc		*desc;
906 	void __iomem		*vaddr;
907 	dma_addr_t		paddr;
908 	char			fill_pattern;
909 
910 	dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
911 		&dest, value, len, flags);
912 
913 	if (unlikely(!len)) {
914 		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
915 		return NULL;
916 	}
917 
918 	if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
919 		dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
920 			__func__);
921 		return NULL;
922 	}
923 
924 	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
925 	if (!vaddr) {
926 		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
927 			__func__);
928 		return NULL;
929 	}
930 
931 	/* Only the first byte of value is to be used according to dmaengine */
932 	fill_pattern = (char)value;
933 
934 	*(u32*)vaddr = (fill_pattern << 24) |
935 		       (fill_pattern << 16) |
936 		       (fill_pattern << 8) |
937 		       fill_pattern;
938 
939 	desc = atc_create_memset_desc(chan, paddr, dest, len);
940 	if (!desc) {
941 		dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
942 			__func__);
943 		goto err_free_buffer;
944 	}
945 
946 	desc->memset_paddr = paddr;
947 	desc->memset_vaddr = vaddr;
948 	desc->memset_buffer = true;
949 
950 	desc->txd.cookie = -EBUSY;
951 	desc->total_len = len;
952 
953 	/* set end-of-link on the descriptor */
954 	set_desc_eol(desc);
955 
956 	desc->txd.flags = flags;
957 
958 	return &desc->txd;
959 
960 err_free_buffer:
961 	dma_pool_free(atdma->memset_pool, vaddr, paddr);
962 	return NULL;
963 }
964 
965 static struct dma_async_tx_descriptor *
966 atc_prep_dma_memset_sg(struct dma_chan *chan,
967 		       struct scatterlist *sgl,
968 		       unsigned int sg_len, int value,
969 		       unsigned long flags)
970 {
971 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
972 	struct at_dma		*atdma = to_at_dma(chan->device);
973 	struct at_desc		*desc = NULL, *first = NULL, *prev = NULL;
974 	struct scatterlist	*sg;
975 	void __iomem		*vaddr;
976 	dma_addr_t		paddr;
977 	size_t			total_len = 0;
978 	int			i;
979 
980 	dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
981 		 value, sg_len, flags);
982 
983 	if (unlikely(!sgl || !sg_len)) {
984 		dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
985 			__func__);
986 		return NULL;
987 	}
988 
989 	vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
990 	if (!vaddr) {
991 		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
992 			__func__);
993 		return NULL;
994 	}
995 	*(u32*)vaddr = value;
996 
997 	for_each_sg(sgl, sg, sg_len, i) {
998 		dma_addr_t dest = sg_dma_address(sg);
999 		size_t len = sg_dma_len(sg);
1000 
1001 		dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1002 			 __func__, &dest, len);
1003 
1004 		if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1005 			dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1006 				__func__);
1007 			goto err_put_desc;
1008 		}
1009 
1010 		desc = atc_create_memset_desc(chan, paddr, dest, len);
1011 		if (!desc)
1012 			goto err_put_desc;
1013 
1014 		atc_desc_chain(&first, &prev, desc);
1015 
1016 		total_len += len;
1017 	}
1018 
1019 	/*
1020 	 * Only set the buffer pointers on the last descriptor to
1021 	 * avoid free'ing while we have our transfer still going
1022 	 */
1023 	desc->memset_paddr = paddr;
1024 	desc->memset_vaddr = vaddr;
1025 	desc->memset_buffer = true;
1026 
1027 	first->txd.cookie = -EBUSY;
1028 	first->total_len = total_len;
1029 
1030 	/* set end-of-link on the descriptor */
1031 	set_desc_eol(desc);
1032 
1033 	first->txd.flags = flags;
1034 
1035 	return &first->txd;
1036 
1037 err_put_desc:
1038 	atc_desc_put(atchan, first);
1039 	return NULL;
1040 }
1041 
1042 /**
1043  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1044  * @chan: DMA channel
1045  * @sgl: scatterlist to transfer to/from
1046  * @sg_len: number of entries in @scatterlist
1047  * @direction: DMA direction
1048  * @flags: tx descriptor status flags
1049  * @context: transaction context (ignored)
1050  */
1051 static struct dma_async_tx_descriptor *
1052 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1053 		unsigned int sg_len, enum dma_transfer_direction direction,
1054 		unsigned long flags, void *context)
1055 {
1056 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1057 	struct at_dma_slave	*atslave = chan->private;
1058 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1059 	struct at_desc		*first = NULL;
1060 	struct at_desc		*prev = NULL;
1061 	u32			ctrla;
1062 	u32			ctrlb;
1063 	dma_addr_t		reg;
1064 	unsigned int		reg_width;
1065 	unsigned int		mem_width;
1066 	unsigned int		i;
1067 	struct scatterlist	*sg;
1068 	size_t			total_len = 0;
1069 
1070 	dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1071 			sg_len,
1072 			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1073 			flags);
1074 
1075 	if (unlikely(!atslave || !sg_len)) {
1076 		dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1077 		return NULL;
1078 	}
1079 
1080 	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1081 		| ATC_DCSIZE(sconfig->dst_maxburst);
1082 	ctrlb = ATC_IEN;
1083 
1084 	switch (direction) {
1085 	case DMA_MEM_TO_DEV:
1086 		reg_width = convert_buswidth(sconfig->dst_addr_width);
1087 		ctrla |=  ATC_DST_WIDTH(reg_width);
1088 		ctrlb |=  ATC_DST_ADDR_MODE_FIXED
1089 			| ATC_SRC_ADDR_MODE_INCR
1090 			| ATC_FC_MEM2PER
1091 			| ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1092 		reg = sconfig->dst_addr;
1093 		for_each_sg(sgl, sg, sg_len, i) {
1094 			struct at_desc	*desc;
1095 			u32		len;
1096 			u32		mem;
1097 
1098 			desc = atc_desc_get(atchan);
1099 			if (!desc)
1100 				goto err_desc_get;
1101 
1102 			mem = sg_dma_address(sg);
1103 			len = sg_dma_len(sg);
1104 			if (unlikely(!len)) {
1105 				dev_dbg(chan2dev(chan),
1106 					"prep_slave_sg: sg(%d) data length is zero\n", i);
1107 				goto err;
1108 			}
1109 			mem_width = 2;
1110 			if (unlikely(mem & 3 || len & 3))
1111 				mem_width = 0;
1112 
1113 			desc->lli.saddr = mem;
1114 			desc->lli.daddr = reg;
1115 			desc->lli.ctrla = ctrla
1116 					| ATC_SRC_WIDTH(mem_width)
1117 					| len >> mem_width;
1118 			desc->lli.ctrlb = ctrlb;
1119 			desc->len = len;
1120 
1121 			atc_desc_chain(&first, &prev, desc);
1122 			total_len += len;
1123 		}
1124 		break;
1125 	case DMA_DEV_TO_MEM:
1126 		reg_width = convert_buswidth(sconfig->src_addr_width);
1127 		ctrla |=  ATC_SRC_WIDTH(reg_width);
1128 		ctrlb |=  ATC_DST_ADDR_MODE_INCR
1129 			| ATC_SRC_ADDR_MODE_FIXED
1130 			| ATC_FC_PER2MEM
1131 			| ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1132 
1133 		reg = sconfig->src_addr;
1134 		for_each_sg(sgl, sg, sg_len, i) {
1135 			struct at_desc	*desc;
1136 			u32		len;
1137 			u32		mem;
1138 
1139 			desc = atc_desc_get(atchan);
1140 			if (!desc)
1141 				goto err_desc_get;
1142 
1143 			mem = sg_dma_address(sg);
1144 			len = sg_dma_len(sg);
1145 			if (unlikely(!len)) {
1146 				dev_dbg(chan2dev(chan),
1147 					"prep_slave_sg: sg(%d) data length is zero\n", i);
1148 				goto err;
1149 			}
1150 			mem_width = 2;
1151 			if (unlikely(mem & 3 || len & 3))
1152 				mem_width = 0;
1153 
1154 			desc->lli.saddr = reg;
1155 			desc->lli.daddr = mem;
1156 			desc->lli.ctrla = ctrla
1157 					| ATC_DST_WIDTH(mem_width)
1158 					| len >> reg_width;
1159 			desc->lli.ctrlb = ctrlb;
1160 			desc->len = len;
1161 
1162 			atc_desc_chain(&first, &prev, desc);
1163 			total_len += len;
1164 		}
1165 		break;
1166 	default:
1167 		return NULL;
1168 	}
1169 
1170 	/* set end-of-link to the last link descriptor of list*/
1171 	set_desc_eol(prev);
1172 
1173 	/* First descriptor of the chain embedds additional information */
1174 	first->txd.cookie = -EBUSY;
1175 	first->total_len = total_len;
1176 
1177 	/* first link descriptor of list is responsible of flags */
1178 	first->txd.flags = flags; /* client is in control of this ack */
1179 
1180 	return &first->txd;
1181 
1182 err_desc_get:
1183 	dev_err(chan2dev(chan), "not enough descriptors available\n");
1184 err:
1185 	atc_desc_put(atchan, first);
1186 	return NULL;
1187 }
1188 
1189 /*
1190  * atc_dma_cyclic_check_values
1191  * Check for too big/unaligned periods and unaligned DMA buffer
1192  */
1193 static int
1194 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1195 		size_t period_len)
1196 {
1197 	if (period_len > (ATC_BTSIZE_MAX << reg_width))
1198 		goto err_out;
1199 	if (unlikely(period_len & ((1 << reg_width) - 1)))
1200 		goto err_out;
1201 	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1202 		goto err_out;
1203 
1204 	return 0;
1205 
1206 err_out:
1207 	return -EINVAL;
1208 }
1209 
1210 /*
1211  * atc_dma_cyclic_fill_desc - Fill one period descriptor
1212  */
1213 static int
1214 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1215 		unsigned int period_index, dma_addr_t buf_addr,
1216 		unsigned int reg_width, size_t period_len,
1217 		enum dma_transfer_direction direction)
1218 {
1219 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1220 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1221 	u32			ctrla;
1222 
1223 	/* prepare common CRTLA value */
1224 	ctrla =   ATC_SCSIZE(sconfig->src_maxburst)
1225 		| ATC_DCSIZE(sconfig->dst_maxburst)
1226 		| ATC_DST_WIDTH(reg_width)
1227 		| ATC_SRC_WIDTH(reg_width)
1228 		| period_len >> reg_width;
1229 
1230 	switch (direction) {
1231 	case DMA_MEM_TO_DEV:
1232 		desc->lli.saddr = buf_addr + (period_len * period_index);
1233 		desc->lli.daddr = sconfig->dst_addr;
1234 		desc->lli.ctrla = ctrla;
1235 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1236 				| ATC_SRC_ADDR_MODE_INCR
1237 				| ATC_FC_MEM2PER
1238 				| ATC_SIF(atchan->mem_if)
1239 				| ATC_DIF(atchan->per_if);
1240 		desc->len = period_len;
1241 		break;
1242 
1243 	case DMA_DEV_TO_MEM:
1244 		desc->lli.saddr = sconfig->src_addr;
1245 		desc->lli.daddr = buf_addr + (period_len * period_index);
1246 		desc->lli.ctrla = ctrla;
1247 		desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1248 				| ATC_SRC_ADDR_MODE_FIXED
1249 				| ATC_FC_PER2MEM
1250 				| ATC_SIF(atchan->per_if)
1251 				| ATC_DIF(atchan->mem_if);
1252 		desc->len = period_len;
1253 		break;
1254 
1255 	default:
1256 		return -EINVAL;
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 /**
1263  * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1264  * @chan: the DMA channel to prepare
1265  * @buf_addr: physical DMA address where the buffer starts
1266  * @buf_len: total number of bytes for the entire buffer
1267  * @period_len: number of bytes for each period
1268  * @direction: transfer direction, to or from device
1269  * @flags: tx descriptor status flags
1270  */
1271 static struct dma_async_tx_descriptor *
1272 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1273 		size_t period_len, enum dma_transfer_direction direction,
1274 		unsigned long flags)
1275 {
1276 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1277 	struct at_dma_slave	*atslave = chan->private;
1278 	struct dma_slave_config	*sconfig = &atchan->dma_sconfig;
1279 	struct at_desc		*first = NULL;
1280 	struct at_desc		*prev = NULL;
1281 	unsigned long		was_cyclic;
1282 	unsigned int		reg_width;
1283 	unsigned int		periods = buf_len / period_len;
1284 	unsigned int		i;
1285 
1286 	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1287 			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1288 			&buf_addr,
1289 			periods, buf_len, period_len);
1290 
1291 	if (unlikely(!atslave || !buf_len || !period_len)) {
1292 		dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1293 		return NULL;
1294 	}
1295 
1296 	was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1297 	if (was_cyclic) {
1298 		dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1299 		return NULL;
1300 	}
1301 
1302 	if (unlikely(!is_slave_direction(direction)))
1303 		goto err_out;
1304 
1305 	if (direction == DMA_MEM_TO_DEV)
1306 		reg_width = convert_buswidth(sconfig->dst_addr_width);
1307 	else
1308 		reg_width = convert_buswidth(sconfig->src_addr_width);
1309 
1310 	/* Check for too big/unaligned periods and unaligned DMA buffer */
1311 	if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1312 		goto err_out;
1313 
1314 	/* build cyclic linked list */
1315 	for (i = 0; i < periods; i++) {
1316 		struct at_desc	*desc;
1317 
1318 		desc = atc_desc_get(atchan);
1319 		if (!desc)
1320 			goto err_desc_get;
1321 
1322 		if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1323 					     reg_width, period_len, direction))
1324 			goto err_desc_get;
1325 
1326 		atc_desc_chain(&first, &prev, desc);
1327 	}
1328 
1329 	/* lets make a cyclic list */
1330 	prev->lli.dscr = first->txd.phys;
1331 
1332 	/* First descriptor of the chain embedds additional information */
1333 	first->txd.cookie = -EBUSY;
1334 	first->total_len = buf_len;
1335 
1336 	return &first->txd;
1337 
1338 err_desc_get:
1339 	dev_err(chan2dev(chan), "not enough descriptors available\n");
1340 	atc_desc_put(atchan, first);
1341 err_out:
1342 	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1343 	return NULL;
1344 }
1345 
1346 static int atc_config(struct dma_chan *chan,
1347 		      struct dma_slave_config *sconfig)
1348 {
1349 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1350 
1351 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1352 
1353 	/* Check if it is chan is configured for slave transfers */
1354 	if (!chan->private)
1355 		return -EINVAL;
1356 
1357 	memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1358 
1359 	convert_burst(&atchan->dma_sconfig.src_maxburst);
1360 	convert_burst(&atchan->dma_sconfig.dst_maxburst);
1361 
1362 	return 0;
1363 }
1364 
1365 static int atc_pause(struct dma_chan *chan)
1366 {
1367 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1368 	struct at_dma		*atdma = to_at_dma(chan->device);
1369 	int			chan_id = atchan->chan_common.chan_id;
1370 	unsigned long		flags;
1371 
1372 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1373 
1374 	spin_lock_irqsave(&atchan->lock, flags);
1375 
1376 	dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1377 	set_bit(ATC_IS_PAUSED, &atchan->status);
1378 
1379 	spin_unlock_irqrestore(&atchan->lock, flags);
1380 
1381 	return 0;
1382 }
1383 
1384 static int atc_resume(struct dma_chan *chan)
1385 {
1386 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1387 	struct at_dma		*atdma = to_at_dma(chan->device);
1388 	int			chan_id = atchan->chan_common.chan_id;
1389 	unsigned long		flags;
1390 
1391 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1392 
1393 	if (!atc_chan_is_paused(atchan))
1394 		return 0;
1395 
1396 	spin_lock_irqsave(&atchan->lock, flags);
1397 
1398 	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1399 	clear_bit(ATC_IS_PAUSED, &atchan->status);
1400 
1401 	spin_unlock_irqrestore(&atchan->lock, flags);
1402 
1403 	return 0;
1404 }
1405 
1406 static int atc_terminate_all(struct dma_chan *chan)
1407 {
1408 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1409 	struct at_dma		*atdma = to_at_dma(chan->device);
1410 	int			chan_id = atchan->chan_common.chan_id;
1411 	unsigned long		flags;
1412 
1413 	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1414 
1415 	/*
1416 	 * This is only called when something went wrong elsewhere, so
1417 	 * we don't really care about the data. Just disable the
1418 	 * channel. We still have to poll the channel enable bit due
1419 	 * to AHB/HSB limitations.
1420 	 */
1421 	spin_lock_irqsave(&atchan->lock, flags);
1422 
1423 	/* disabling channel: must also remove suspend state */
1424 	dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1425 
1426 	/* confirm that this channel is disabled */
1427 	while (dma_readl(atdma, CHSR) & atchan->mask)
1428 		cpu_relax();
1429 
1430 	/* active_list entries will end up before queued entries */
1431 	list_splice_tail_init(&atchan->queue, &atchan->free_list);
1432 	list_splice_tail_init(&atchan->active_list, &atchan->free_list);
1433 
1434 	clear_bit(ATC_IS_PAUSED, &atchan->status);
1435 	/* if channel dedicated to cyclic operations, free it */
1436 	clear_bit(ATC_IS_CYCLIC, &atchan->status);
1437 
1438 	spin_unlock_irqrestore(&atchan->lock, flags);
1439 
1440 	return 0;
1441 }
1442 
1443 /**
1444  * atc_tx_status - poll for transaction completion
1445  * @chan: DMA channel
1446  * @cookie: transaction identifier to check status of
1447  * @txstate: if not %NULL updated with transaction state
1448  *
1449  * If @txstate is passed in, upon return it reflect the driver
1450  * internal state and can be used with dma_async_is_complete() to check
1451  * the status of multiple cookies without re-checking hardware state.
1452  */
1453 static enum dma_status
1454 atc_tx_status(struct dma_chan *chan,
1455 		dma_cookie_t cookie,
1456 		struct dma_tx_state *txstate)
1457 {
1458 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1459 	unsigned long		flags;
1460 	enum dma_status		ret;
1461 	int bytes = 0;
1462 
1463 	ret = dma_cookie_status(chan, cookie, txstate);
1464 	if (ret == DMA_COMPLETE)
1465 		return ret;
1466 	/*
1467 	 * There's no point calculating the residue if there's
1468 	 * no txstate to store the value.
1469 	 */
1470 	if (!txstate)
1471 		return DMA_ERROR;
1472 
1473 	spin_lock_irqsave(&atchan->lock, flags);
1474 
1475 	/*  Get number of bytes left in the active transactions */
1476 	bytes = atc_get_bytes_left(chan, cookie);
1477 
1478 	spin_unlock_irqrestore(&atchan->lock, flags);
1479 
1480 	if (unlikely(bytes < 0)) {
1481 		dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1482 		return DMA_ERROR;
1483 	} else {
1484 		dma_set_residue(txstate, bytes);
1485 	}
1486 
1487 	dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1488 		 ret, cookie, bytes);
1489 
1490 	return ret;
1491 }
1492 
1493 /**
1494  * atc_issue_pending - takes the first transaction descriptor in the pending
1495  * queue and starts the transfer.
1496  * @chan: target DMA channel
1497  */
1498 static void atc_issue_pending(struct dma_chan *chan)
1499 {
1500 	struct at_dma_chan *atchan = to_at_dma_chan(chan);
1501 	struct at_desc *desc;
1502 	unsigned long flags;
1503 
1504 	dev_vdbg(chan2dev(chan), "issue_pending\n");
1505 
1506 	spin_lock_irqsave(&atchan->lock, flags);
1507 	if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
1508 		return spin_unlock_irqrestore(&atchan->lock, flags);
1509 
1510 	desc = atc_first_queued(atchan);
1511 	list_move_tail(&desc->desc_node, &atchan->active_list);
1512 	atc_dostart(atchan, desc);
1513 	spin_unlock_irqrestore(&atchan->lock, flags);
1514 }
1515 
1516 /**
1517  * atc_alloc_chan_resources - allocate resources for DMA channel
1518  * @chan: allocate descriptor resources for this channel
1519  *
1520  * return - the number of allocated descriptors
1521  */
1522 static int atc_alloc_chan_resources(struct dma_chan *chan)
1523 {
1524 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1525 	struct at_dma		*atdma = to_at_dma(chan->device);
1526 	struct at_desc		*desc;
1527 	struct at_dma_slave	*atslave;
1528 	int			i;
1529 	u32			cfg;
1530 
1531 	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1532 
1533 	/* ASSERT:  channel is idle */
1534 	if (atc_chan_is_enabled(atchan)) {
1535 		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1536 		return -EIO;
1537 	}
1538 
1539 	if (!list_empty(&atchan->free_list)) {
1540 		dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
1541 		return -EIO;
1542 	}
1543 
1544 	cfg = ATC_DEFAULT_CFG;
1545 
1546 	atslave = chan->private;
1547 	if (atslave) {
1548 		/*
1549 		 * We need controller-specific data to set up slave
1550 		 * transfers.
1551 		 */
1552 		BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1553 
1554 		/* if cfg configuration specified take it instead of default */
1555 		if (atslave->cfg)
1556 			cfg = atslave->cfg;
1557 	}
1558 
1559 	/* Allocate initial pool of descriptors */
1560 	for (i = 0; i < init_nr_desc_per_channel; i++) {
1561 		desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1562 		if (!desc) {
1563 			dev_err(atdma->dma_common.dev,
1564 				"Only %d initial descriptors\n", i);
1565 			break;
1566 		}
1567 		list_add_tail(&desc->desc_node, &atchan->free_list);
1568 	}
1569 
1570 	dma_cookie_init(chan);
1571 
1572 	/* channel parameters */
1573 	channel_writel(atchan, CFG, cfg);
1574 
1575 	dev_dbg(chan2dev(chan),
1576 		"alloc_chan_resources: allocated %d descriptors\n", i);
1577 
1578 	return i;
1579 }
1580 
1581 /**
1582  * atc_free_chan_resources - free all channel resources
1583  * @chan: DMA channel
1584  */
1585 static void atc_free_chan_resources(struct dma_chan *chan)
1586 {
1587 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1588 	struct at_dma		*atdma = to_at_dma(chan->device);
1589 	struct at_desc		*desc, *_desc;
1590 	LIST_HEAD(list);
1591 
1592 	/* ASSERT:  channel is idle */
1593 	BUG_ON(!list_empty(&atchan->active_list));
1594 	BUG_ON(!list_empty(&atchan->queue));
1595 	BUG_ON(atc_chan_is_enabled(atchan));
1596 
1597 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1598 		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1599 		list_del(&desc->desc_node);
1600 		/* free link descriptor */
1601 		dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1602 	}
1603 	list_splice_init(&atchan->free_list, &list);
1604 	atchan->status = 0;
1605 
1606 	/*
1607 	 * Free atslave allocated in at_dma_xlate()
1608 	 */
1609 	kfree(chan->private);
1610 	chan->private = NULL;
1611 
1612 	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1613 }
1614 
1615 #ifdef CONFIG_OF
1616 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1617 {
1618 	struct at_dma_slave *atslave = slave;
1619 
1620 	if (atslave->dma_dev == chan->device->dev) {
1621 		chan->private = atslave;
1622 		return true;
1623 	} else {
1624 		return false;
1625 	}
1626 }
1627 
1628 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1629 				     struct of_dma *of_dma)
1630 {
1631 	struct dma_chan *chan;
1632 	struct at_dma_chan *atchan;
1633 	struct at_dma_slave *atslave;
1634 	dma_cap_mask_t mask;
1635 	unsigned int per_id;
1636 	struct platform_device *dmac_pdev;
1637 
1638 	if (dma_spec->args_count != 2)
1639 		return NULL;
1640 
1641 	dmac_pdev = of_find_device_by_node(dma_spec->np);
1642 	if (!dmac_pdev)
1643 		return NULL;
1644 
1645 	dma_cap_zero(mask);
1646 	dma_cap_set(DMA_SLAVE, mask);
1647 
1648 	atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1649 	if (!atslave) {
1650 		put_device(&dmac_pdev->dev);
1651 		return NULL;
1652 	}
1653 
1654 	atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1655 	/*
1656 	 * We can fill both SRC_PER and DST_PER, one of these fields will be
1657 	 * ignored depending on DMA transfer direction.
1658 	 */
1659 	per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1660 	atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1661 		     | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1662 	/*
1663 	 * We have to translate the value we get from the device tree since
1664 	 * the half FIFO configuration value had to be 0 to keep backward
1665 	 * compatibility.
1666 	 */
1667 	switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1668 	case AT91_DMA_CFG_FIFOCFG_ALAP:
1669 		atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1670 		break;
1671 	case AT91_DMA_CFG_FIFOCFG_ASAP:
1672 		atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1673 		break;
1674 	case AT91_DMA_CFG_FIFOCFG_HALF:
1675 	default:
1676 		atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1677 	}
1678 	atslave->dma_dev = &dmac_pdev->dev;
1679 
1680 	chan = dma_request_channel(mask, at_dma_filter, atslave);
1681 	if (!chan) {
1682 		put_device(&dmac_pdev->dev);
1683 		kfree(atslave);
1684 		return NULL;
1685 	}
1686 
1687 	atchan = to_at_dma_chan(chan);
1688 	atchan->per_if = dma_spec->args[0] & 0xff;
1689 	atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1690 
1691 	return chan;
1692 }
1693 #else
1694 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1695 				     struct of_dma *of_dma)
1696 {
1697 	return NULL;
1698 }
1699 #endif
1700 
1701 /*--  Module Management  -----------------------------------------------*/
1702 
1703 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1704 static struct at_dma_platform_data at91sam9rl_config = {
1705 	.nr_channels = 2,
1706 };
1707 static struct at_dma_platform_data at91sam9g45_config = {
1708 	.nr_channels = 8,
1709 };
1710 
1711 #if defined(CONFIG_OF)
1712 static const struct of_device_id atmel_dma_dt_ids[] = {
1713 	{
1714 		.compatible = "atmel,at91sam9rl-dma",
1715 		.data = &at91sam9rl_config,
1716 	}, {
1717 		.compatible = "atmel,at91sam9g45-dma",
1718 		.data = &at91sam9g45_config,
1719 	}, {
1720 		/* sentinel */
1721 	}
1722 };
1723 
1724 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1725 #endif
1726 
1727 static const struct platform_device_id atdma_devtypes[] = {
1728 	{
1729 		.name = "at91sam9rl_dma",
1730 		.driver_data = (unsigned long) &at91sam9rl_config,
1731 	}, {
1732 		.name = "at91sam9g45_dma",
1733 		.driver_data = (unsigned long) &at91sam9g45_config,
1734 	}, {
1735 		/* sentinel */
1736 	}
1737 };
1738 
1739 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1740 						struct platform_device *pdev)
1741 {
1742 	if (pdev->dev.of_node) {
1743 		const struct of_device_id *match;
1744 		match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1745 		if (match == NULL)
1746 			return NULL;
1747 		return match->data;
1748 	}
1749 	return (struct at_dma_platform_data *)
1750 			platform_get_device_id(pdev)->driver_data;
1751 }
1752 
1753 /**
1754  * at_dma_off - disable DMA controller
1755  * @atdma: the Atmel HDAMC device
1756  */
1757 static void at_dma_off(struct at_dma *atdma)
1758 {
1759 	dma_writel(atdma, EN, 0);
1760 
1761 	/* disable all interrupts */
1762 	dma_writel(atdma, EBCIDR, -1L);
1763 
1764 	/* confirm that all channels are disabled */
1765 	while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1766 		cpu_relax();
1767 }
1768 
1769 static int __init at_dma_probe(struct platform_device *pdev)
1770 {
1771 	struct resource		*io;
1772 	struct at_dma		*atdma;
1773 	size_t			size;
1774 	int			irq;
1775 	int			err;
1776 	int			i;
1777 	const struct at_dma_platform_data *plat_dat;
1778 
1779 	/* setup platform data for each SoC */
1780 	dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1781 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1782 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1783 	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1784 	dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1785 	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1786 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1787 
1788 	/* get DMA parameters from controller type */
1789 	plat_dat = at_dma_get_driver_data(pdev);
1790 	if (!plat_dat)
1791 		return -ENODEV;
1792 
1793 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1794 	if (!io)
1795 		return -EINVAL;
1796 
1797 	irq = platform_get_irq(pdev, 0);
1798 	if (irq < 0)
1799 		return irq;
1800 
1801 	size = sizeof(struct at_dma);
1802 	size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1803 	atdma = kzalloc(size, GFP_KERNEL);
1804 	if (!atdma)
1805 		return -ENOMEM;
1806 
1807 	/* discover transaction capabilities */
1808 	atdma->dma_common.cap_mask = plat_dat->cap_mask;
1809 	atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1810 
1811 	size = resource_size(io);
1812 	if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1813 		err = -EBUSY;
1814 		goto err_kfree;
1815 	}
1816 
1817 	atdma->regs = ioremap(io->start, size);
1818 	if (!atdma->regs) {
1819 		err = -ENOMEM;
1820 		goto err_release_r;
1821 	}
1822 
1823 	atdma->clk = clk_get(&pdev->dev, "dma_clk");
1824 	if (IS_ERR(atdma->clk)) {
1825 		err = PTR_ERR(atdma->clk);
1826 		goto err_clk;
1827 	}
1828 	err = clk_prepare_enable(atdma->clk);
1829 	if (err)
1830 		goto err_clk_prepare;
1831 
1832 	/* force dma off, just in case */
1833 	at_dma_off(atdma);
1834 
1835 	err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1836 	if (err)
1837 		goto err_irq;
1838 
1839 	platform_set_drvdata(pdev, atdma);
1840 
1841 	/* create a pool of consistent memory blocks for hardware descriptors */
1842 	atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1843 			&pdev->dev, sizeof(struct at_desc),
1844 			4 /* word alignment */, 0);
1845 	if (!atdma->dma_desc_pool) {
1846 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1847 		err = -ENOMEM;
1848 		goto err_desc_pool_create;
1849 	}
1850 
1851 	/* create a pool of consistent memory blocks for memset blocks */
1852 	atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1853 					     &pdev->dev, sizeof(int), 4, 0);
1854 	if (!atdma->memset_pool) {
1855 		dev_err(&pdev->dev, "No memory for memset dma pool\n");
1856 		err = -ENOMEM;
1857 		goto err_memset_pool_create;
1858 	}
1859 
1860 	/* clear any pending interrupt */
1861 	while (dma_readl(atdma, EBCISR))
1862 		cpu_relax();
1863 
1864 	/* initialize channels related values */
1865 	INIT_LIST_HEAD(&atdma->dma_common.channels);
1866 	for (i = 0; i < plat_dat->nr_channels; i++) {
1867 		struct at_dma_chan	*atchan = &atdma->chan[i];
1868 
1869 		atchan->mem_if = AT_DMA_MEM_IF;
1870 		atchan->per_if = AT_DMA_PER_IF;
1871 		atchan->chan_common.device = &atdma->dma_common;
1872 		dma_cookie_init(&atchan->chan_common);
1873 		list_add_tail(&atchan->chan_common.device_node,
1874 				&atdma->dma_common.channels);
1875 
1876 		atchan->ch_regs = atdma->regs + ch_regs(i);
1877 		spin_lock_init(&atchan->lock);
1878 		atchan->mask = 1 << i;
1879 
1880 		INIT_LIST_HEAD(&atchan->active_list);
1881 		INIT_LIST_HEAD(&atchan->queue);
1882 		INIT_LIST_HEAD(&atchan->free_list);
1883 
1884 		tasklet_setup(&atchan->tasklet, atc_tasklet);
1885 		atc_enable_chan_irq(atdma, i);
1886 	}
1887 
1888 	/* set base routines */
1889 	atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1890 	atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1891 	atdma->dma_common.device_tx_status = atc_tx_status;
1892 	atdma->dma_common.device_issue_pending = atc_issue_pending;
1893 	atdma->dma_common.dev = &pdev->dev;
1894 
1895 	/* set prep routines based on capability */
1896 	if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1897 		atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1898 
1899 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1900 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1901 
1902 	if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1903 		atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1904 		atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1905 		atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1906 	}
1907 
1908 	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1909 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1910 		/* controller can do slave DMA: can trigger cyclic transfers */
1911 		dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1912 		atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1913 		atdma->dma_common.device_config = atc_config;
1914 		atdma->dma_common.device_pause = atc_pause;
1915 		atdma->dma_common.device_resume = atc_resume;
1916 		atdma->dma_common.device_terminate_all = atc_terminate_all;
1917 		atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1918 		atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1919 		atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1920 		atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1921 	}
1922 
1923 	dma_writel(atdma, EN, AT_DMA_ENABLE);
1924 
1925 	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1926 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1927 	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1928 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1929 	  plat_dat->nr_channels);
1930 
1931 	err = dma_async_device_register(&atdma->dma_common);
1932 	if (err) {
1933 		dev_err(&pdev->dev, "Unable to register: %d.\n", err);
1934 		goto err_dma_async_device_register;
1935 	}
1936 
1937 	/*
1938 	 * Do not return an error if the dmac node is not present in order to
1939 	 * not break the existing way of requesting channel with
1940 	 * dma_request_channel().
1941 	 */
1942 	if (pdev->dev.of_node) {
1943 		err = of_dma_controller_register(pdev->dev.of_node,
1944 						 at_dma_xlate, atdma);
1945 		if (err) {
1946 			dev_err(&pdev->dev, "could not register of_dma_controller\n");
1947 			goto err_of_dma_controller_register;
1948 		}
1949 	}
1950 
1951 	return 0;
1952 
1953 err_of_dma_controller_register:
1954 	dma_async_device_unregister(&atdma->dma_common);
1955 err_dma_async_device_register:
1956 	dma_pool_destroy(atdma->memset_pool);
1957 err_memset_pool_create:
1958 	dma_pool_destroy(atdma->dma_desc_pool);
1959 err_desc_pool_create:
1960 	free_irq(platform_get_irq(pdev, 0), atdma);
1961 err_irq:
1962 	clk_disable_unprepare(atdma->clk);
1963 err_clk_prepare:
1964 	clk_put(atdma->clk);
1965 err_clk:
1966 	iounmap(atdma->regs);
1967 	atdma->regs = NULL;
1968 err_release_r:
1969 	release_mem_region(io->start, size);
1970 err_kfree:
1971 	kfree(atdma);
1972 	return err;
1973 }
1974 
1975 static int at_dma_remove(struct platform_device *pdev)
1976 {
1977 	struct at_dma		*atdma = platform_get_drvdata(pdev);
1978 	struct dma_chan		*chan, *_chan;
1979 	struct resource		*io;
1980 
1981 	at_dma_off(atdma);
1982 	if (pdev->dev.of_node)
1983 		of_dma_controller_free(pdev->dev.of_node);
1984 	dma_async_device_unregister(&atdma->dma_common);
1985 
1986 	dma_pool_destroy(atdma->memset_pool);
1987 	dma_pool_destroy(atdma->dma_desc_pool);
1988 	free_irq(platform_get_irq(pdev, 0), atdma);
1989 
1990 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1991 			device_node) {
1992 		struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1993 
1994 		/* Disable interrupts */
1995 		atc_disable_chan_irq(atdma, chan->chan_id);
1996 
1997 		tasklet_kill(&atchan->tasklet);
1998 		list_del(&chan->device_node);
1999 	}
2000 
2001 	clk_disable_unprepare(atdma->clk);
2002 	clk_put(atdma->clk);
2003 
2004 	iounmap(atdma->regs);
2005 	atdma->regs = NULL;
2006 
2007 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2008 	release_mem_region(io->start, resource_size(io));
2009 
2010 	kfree(atdma);
2011 
2012 	return 0;
2013 }
2014 
2015 static void at_dma_shutdown(struct platform_device *pdev)
2016 {
2017 	struct at_dma	*atdma = platform_get_drvdata(pdev);
2018 
2019 	at_dma_off(platform_get_drvdata(pdev));
2020 	clk_disable_unprepare(atdma->clk);
2021 }
2022 
2023 static int at_dma_prepare(struct device *dev)
2024 {
2025 	struct at_dma *atdma = dev_get_drvdata(dev);
2026 	struct dma_chan *chan, *_chan;
2027 
2028 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2029 			device_node) {
2030 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2031 		/* wait for transaction completion (except in cyclic case) */
2032 		if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2033 			return -EAGAIN;
2034 	}
2035 	return 0;
2036 }
2037 
2038 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2039 {
2040 	struct dma_chan	*chan = &atchan->chan_common;
2041 
2042 	/* Channel should be paused by user
2043 	 * do it anyway even if it is not done already */
2044 	if (!atc_chan_is_paused(atchan)) {
2045 		dev_warn(chan2dev(chan),
2046 		"cyclic channel not paused, should be done by channel user\n");
2047 		atc_pause(chan);
2048 	}
2049 
2050 	/* now preserve additional data for cyclic operations */
2051 	/* next descriptor address in the cyclic list */
2052 	atchan->save_dscr = channel_readl(atchan, DSCR);
2053 
2054 	vdbg_dump_regs(atchan);
2055 }
2056 
2057 static int at_dma_suspend_noirq(struct device *dev)
2058 {
2059 	struct at_dma *atdma = dev_get_drvdata(dev);
2060 	struct dma_chan *chan, *_chan;
2061 
2062 	/* preserve data */
2063 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2064 			device_node) {
2065 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2066 
2067 		if (atc_chan_is_cyclic(atchan))
2068 			atc_suspend_cyclic(atchan);
2069 		atchan->save_cfg = channel_readl(atchan, CFG);
2070 	}
2071 	atdma->save_imr = dma_readl(atdma, EBCIMR);
2072 
2073 	/* disable DMA controller */
2074 	at_dma_off(atdma);
2075 	clk_disable_unprepare(atdma->clk);
2076 	return 0;
2077 }
2078 
2079 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2080 {
2081 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
2082 
2083 	/* restore channel status for cyclic descriptors list:
2084 	 * next descriptor in the cyclic list at the time of suspend */
2085 	channel_writel(atchan, SADDR, 0);
2086 	channel_writel(atchan, DADDR, 0);
2087 	channel_writel(atchan, CTRLA, 0);
2088 	channel_writel(atchan, CTRLB, 0);
2089 	channel_writel(atchan, DSCR, atchan->save_dscr);
2090 	dma_writel(atdma, CHER, atchan->mask);
2091 
2092 	/* channel pause status should be removed by channel user
2093 	 * We cannot take the initiative to do it here */
2094 
2095 	vdbg_dump_regs(atchan);
2096 }
2097 
2098 static int at_dma_resume_noirq(struct device *dev)
2099 {
2100 	struct at_dma *atdma = dev_get_drvdata(dev);
2101 	struct dma_chan *chan, *_chan;
2102 
2103 	/* bring back DMA controller */
2104 	clk_prepare_enable(atdma->clk);
2105 	dma_writel(atdma, EN, AT_DMA_ENABLE);
2106 
2107 	/* clear any pending interrupt */
2108 	while (dma_readl(atdma, EBCISR))
2109 		cpu_relax();
2110 
2111 	/* restore saved data */
2112 	dma_writel(atdma, EBCIER, atdma->save_imr);
2113 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2114 			device_node) {
2115 		struct at_dma_chan *atchan = to_at_dma_chan(chan);
2116 
2117 		channel_writel(atchan, CFG, atchan->save_cfg);
2118 		if (atc_chan_is_cyclic(atchan))
2119 			atc_resume_cyclic(atchan);
2120 	}
2121 	return 0;
2122 }
2123 
2124 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2125 	.prepare = at_dma_prepare,
2126 	.suspend_noirq = at_dma_suspend_noirq,
2127 	.resume_noirq = at_dma_resume_noirq,
2128 };
2129 
2130 static struct platform_driver at_dma_driver = {
2131 	.remove		= at_dma_remove,
2132 	.shutdown	= at_dma_shutdown,
2133 	.id_table	= atdma_devtypes,
2134 	.driver = {
2135 		.name	= "at_hdmac",
2136 		.pm	= &at_dma_dev_pm_ops,
2137 		.of_match_table	= of_match_ptr(atmel_dma_dt_ids),
2138 	},
2139 };
2140 
2141 static int __init at_dma_init(void)
2142 {
2143 	return platform_driver_probe(&at_dma_driver, at_dma_probe);
2144 }
2145 subsys_initcall(at_dma_init);
2146 
2147 static void __exit at_dma_exit(void)
2148 {
2149 	platform_driver_unregister(&at_dma_driver);
2150 }
2151 module_exit(at_dma_exit);
2152 
2153 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2154 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2155 MODULE_LICENSE("GPL");
2156 MODULE_ALIAS("platform:at_hdmac");
2157