xref: /openbmc/linux/drivers/dma/at_hdmac.c (revision fd589a8f)
1 /*
2  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3  *
4  * Copyright (C) 2008 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  *
12  * This supports the Atmel AHB DMA Controller,
13  *
14  * The driver has currently been tested with the Atmel AT91SAM9RL
15  * and AT91SAM9G45 series.
16  */
17 
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 
26 #include "at_hdmac_regs.h"
27 
28 /*
29  * Glossary
30  * --------
31  *
32  * at_hdmac		: Name of the ATmel AHB DMA Controller
33  * at_dma_ / atdma	: ATmel DMA controller entity related
34  * atc_	/ atchan	: ATmel DMA Channel entity related
35  */
36 
37 #define	ATC_DEFAULT_CFG		(ATC_FIFOCFG_HALFFIFO)
38 #define	ATC_DEFAULT_CTRLA	(0)
39 #define	ATC_DEFAULT_CTRLB	(ATC_SIF(0)	\
40 				|ATC_DIF(1))
41 
42 /*
43  * Initial number of descriptors to allocate for each channel. This could
44  * be increased during dma usage.
45  */
46 static unsigned int init_nr_desc_per_channel = 64;
47 module_param(init_nr_desc_per_channel, uint, 0644);
48 MODULE_PARM_DESC(init_nr_desc_per_channel,
49 		 "initial descriptors per channel (default: 64)");
50 
51 
52 /* prototypes */
53 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
54 
55 
56 /*----------------------------------------------------------------------*/
57 
58 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
59 {
60 	return list_first_entry(&atchan->active_list,
61 				struct at_desc, desc_node);
62 }
63 
64 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
65 {
66 	return list_first_entry(&atchan->queue,
67 				struct at_desc, desc_node);
68 }
69 
70 /**
71  * atc_alloc_descriptor - allocate and return an initilized descriptor
72  * @chan: the channel to allocate descriptors for
73  * @gfp_flags: GFP allocation flags
74  *
75  * Note: The ack-bit is positioned in the descriptor flag at creation time
76  *       to make initial allocation more convenient. This bit will be cleared
77  *       and control will be given to client at usage time (during
78  *       preparation functions).
79  */
80 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
81 					    gfp_t gfp_flags)
82 {
83 	struct at_desc	*desc = NULL;
84 	struct at_dma	*atdma = to_at_dma(chan->device);
85 	dma_addr_t phys;
86 
87 	desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
88 	if (desc) {
89 		memset(desc, 0, sizeof(struct at_desc));
90 		dma_async_tx_descriptor_init(&desc->txd, chan);
91 		/* txd.flags will be overwritten in prep functions */
92 		desc->txd.flags = DMA_CTRL_ACK;
93 		desc->txd.tx_submit = atc_tx_submit;
94 		desc->txd.phys = phys;
95 	}
96 
97 	return desc;
98 }
99 
100 /**
101  * atc_desc_get - get a unsused descriptor from free_list
102  * @atchan: channel we want a new descriptor for
103  */
104 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
105 {
106 	struct at_desc *desc, *_desc;
107 	struct at_desc *ret = NULL;
108 	unsigned int i = 0;
109 	LIST_HEAD(tmp_list);
110 
111 	spin_lock_bh(&atchan->lock);
112 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
113 		i++;
114 		if (async_tx_test_ack(&desc->txd)) {
115 			list_del(&desc->desc_node);
116 			ret = desc;
117 			break;
118 		}
119 		dev_dbg(chan2dev(&atchan->chan_common),
120 				"desc %p not ACKed\n", desc);
121 	}
122 	spin_unlock_bh(&atchan->lock);
123 	dev_vdbg(chan2dev(&atchan->chan_common),
124 		"scanned %u descriptors on freelist\n", i);
125 
126 	/* no more descriptor available in initial pool: create one more */
127 	if (!ret) {
128 		ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
129 		if (ret) {
130 			spin_lock_bh(&atchan->lock);
131 			atchan->descs_allocated++;
132 			spin_unlock_bh(&atchan->lock);
133 		} else {
134 			dev_err(chan2dev(&atchan->chan_common),
135 					"not enough descriptors available\n");
136 		}
137 	}
138 
139 	return ret;
140 }
141 
142 /**
143  * atc_desc_put - move a descriptor, including any children, to the free list
144  * @atchan: channel we work on
145  * @desc: descriptor, at the head of a chain, to move to free list
146  */
147 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
148 {
149 	if (desc) {
150 		struct at_desc *child;
151 
152 		spin_lock_bh(&atchan->lock);
153 		list_for_each_entry(child, &desc->txd.tx_list, desc_node)
154 			dev_vdbg(chan2dev(&atchan->chan_common),
155 					"moving child desc %p to freelist\n",
156 					child);
157 		list_splice_init(&desc->txd.tx_list, &atchan->free_list);
158 		dev_vdbg(chan2dev(&atchan->chan_common),
159 			 "moving desc %p to freelist\n", desc);
160 		list_add(&desc->desc_node, &atchan->free_list);
161 		spin_unlock_bh(&atchan->lock);
162 	}
163 }
164 
165 /**
166  * atc_assign_cookie - compute and assign new cookie
167  * @atchan: channel we work on
168  * @desc: descriptor to asign cookie for
169  *
170  * Called with atchan->lock held and bh disabled
171  */
172 static dma_cookie_t
173 atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
174 {
175 	dma_cookie_t cookie = atchan->chan_common.cookie;
176 
177 	if (++cookie < 0)
178 		cookie = 1;
179 
180 	atchan->chan_common.cookie = cookie;
181 	desc->txd.cookie = cookie;
182 
183 	return cookie;
184 }
185 
186 /**
187  * atc_dostart - starts the DMA engine for real
188  * @atchan: the channel we want to start
189  * @first: first descriptor in the list we want to begin with
190  *
191  * Called with atchan->lock held and bh disabled
192  */
193 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
194 {
195 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
196 
197 	/* ASSERT:  channel is idle */
198 	if (atc_chan_is_enabled(atchan)) {
199 		dev_err(chan2dev(&atchan->chan_common),
200 			"BUG: Attempted to start non-idle channel\n");
201 		dev_err(chan2dev(&atchan->chan_common),
202 			"  channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
203 			channel_readl(atchan, SADDR),
204 			channel_readl(atchan, DADDR),
205 			channel_readl(atchan, CTRLA),
206 			channel_readl(atchan, CTRLB),
207 			channel_readl(atchan, DSCR));
208 
209 		/* The tasklet will hopefully advance the queue... */
210 		return;
211 	}
212 
213 	vdbg_dump_regs(atchan);
214 
215 	/* clear any pending interrupt */
216 	while (dma_readl(atdma, EBCISR))
217 		cpu_relax();
218 
219 	channel_writel(atchan, SADDR, 0);
220 	channel_writel(atchan, DADDR, 0);
221 	channel_writel(atchan, CTRLA, 0);
222 	channel_writel(atchan, CTRLB, 0);
223 	channel_writel(atchan, DSCR, first->txd.phys);
224 	dma_writel(atdma, CHER, atchan->mask);
225 
226 	vdbg_dump_regs(atchan);
227 }
228 
229 /**
230  * atc_chain_complete - finish work for one transaction chain
231  * @atchan: channel we work on
232  * @desc: descriptor at the head of the chain we want do complete
233  *
234  * Called with atchan->lock held and bh disabled */
235 static void
236 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
237 {
238 	dma_async_tx_callback		callback;
239 	void				*param;
240 	struct dma_async_tx_descriptor	*txd = &desc->txd;
241 
242 	dev_vdbg(chan2dev(&atchan->chan_common),
243 		"descriptor %u complete\n", txd->cookie);
244 
245 	atchan->completed_cookie = txd->cookie;
246 	callback = txd->callback;
247 	param = txd->callback_param;
248 
249 	/* move children to free_list */
250 	list_splice_init(&txd->tx_list, &atchan->free_list);
251 	/* move myself to free_list */
252 	list_move(&desc->desc_node, &atchan->free_list);
253 
254 	/* unmap dma addresses */
255 	if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
256 		if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
257 			dma_unmap_single(chan2parent(&atchan->chan_common),
258 					desc->lli.daddr,
259 					desc->len, DMA_FROM_DEVICE);
260 		else
261 			dma_unmap_page(chan2parent(&atchan->chan_common),
262 					desc->lli.daddr,
263 					desc->len, DMA_FROM_DEVICE);
264 	}
265 	if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
266 		if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
267 			dma_unmap_single(chan2parent(&atchan->chan_common),
268 					desc->lli.saddr,
269 					desc->len, DMA_TO_DEVICE);
270 		else
271 			dma_unmap_page(chan2parent(&atchan->chan_common),
272 					desc->lli.saddr,
273 					desc->len, DMA_TO_DEVICE);
274 	}
275 
276 	/*
277 	 * The API requires that no submissions are done from a
278 	 * callback, so we don't need to drop the lock here
279 	 */
280 	if (callback)
281 		callback(param);
282 
283 	dma_run_dependencies(txd);
284 }
285 
286 /**
287  * atc_complete_all - finish work for all transactions
288  * @atchan: channel to complete transactions for
289  *
290  * Eventually submit queued descriptors if any
291  *
292  * Assume channel is idle while calling this function
293  * Called with atchan->lock held and bh disabled
294  */
295 static void atc_complete_all(struct at_dma_chan *atchan)
296 {
297 	struct at_desc *desc, *_desc;
298 	LIST_HEAD(list);
299 
300 	dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
301 
302 	BUG_ON(atc_chan_is_enabled(atchan));
303 
304 	/*
305 	 * Submit queued descriptors ASAP, i.e. before we go through
306 	 * the completed ones.
307 	 */
308 	if (!list_empty(&atchan->queue))
309 		atc_dostart(atchan, atc_first_queued(atchan));
310 	/* empty active_list now it is completed */
311 	list_splice_init(&atchan->active_list, &list);
312 	/* empty queue list by moving descriptors (if any) to active_list */
313 	list_splice_init(&atchan->queue, &atchan->active_list);
314 
315 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
316 		atc_chain_complete(atchan, desc);
317 }
318 
319 /**
320  * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
321  * @atchan: channel to be cleaned up
322  *
323  * Called with atchan->lock held and bh disabled
324  */
325 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
326 {
327 	struct at_desc	*desc, *_desc;
328 	struct at_desc	*child;
329 
330 	dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
331 
332 	list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
333 		if (!(desc->lli.ctrla & ATC_DONE))
334 			/* This one is currently in progress */
335 			return;
336 
337 		list_for_each_entry(child, &desc->txd.tx_list, desc_node)
338 			if (!(child->lli.ctrla & ATC_DONE))
339 				/* Currently in progress */
340 				return;
341 
342 		/*
343 		 * No descriptors so far seem to be in progress, i.e.
344 		 * this chain must be done.
345 		 */
346 		atc_chain_complete(atchan, desc);
347 	}
348 }
349 
350 /**
351  * atc_advance_work - at the end of a transaction, move forward
352  * @atchan: channel where the transaction ended
353  *
354  * Called with atchan->lock held and bh disabled
355  */
356 static void atc_advance_work(struct at_dma_chan *atchan)
357 {
358 	dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
359 
360 	if (list_empty(&atchan->active_list) ||
361 	    list_is_singular(&atchan->active_list)) {
362 		atc_complete_all(atchan);
363 	} else {
364 		atc_chain_complete(atchan, atc_first_active(atchan));
365 		/* advance work */
366 		atc_dostart(atchan, atc_first_active(atchan));
367 	}
368 }
369 
370 
371 /**
372  * atc_handle_error - handle errors reported by DMA controller
373  * @atchan: channel where error occurs
374  *
375  * Called with atchan->lock held and bh disabled
376  */
377 static void atc_handle_error(struct at_dma_chan *atchan)
378 {
379 	struct at_desc *bad_desc;
380 	struct at_desc *child;
381 
382 	/*
383 	 * The descriptor currently at the head of the active list is
384 	 * broked. Since we don't have any way to report errors, we'll
385 	 * just have to scream loudly and try to carry on.
386 	 */
387 	bad_desc = atc_first_active(atchan);
388 	list_del_init(&bad_desc->desc_node);
389 
390 	/* As we are stopped, take advantage to push queued descriptors
391 	 * in active_list */
392 	list_splice_init(&atchan->queue, atchan->active_list.prev);
393 
394 	/* Try to restart the controller */
395 	if (!list_empty(&atchan->active_list))
396 		atc_dostart(atchan, atc_first_active(atchan));
397 
398 	/*
399 	 * KERN_CRITICAL may seem harsh, but since this only happens
400 	 * when someone submits a bad physical address in a
401 	 * descriptor, we should consider ourselves lucky that the
402 	 * controller flagged an error instead of scribbling over
403 	 * random memory locations.
404 	 */
405 	dev_crit(chan2dev(&atchan->chan_common),
406 			"Bad descriptor submitted for DMA!\n");
407 	dev_crit(chan2dev(&atchan->chan_common),
408 			"  cookie: %d\n", bad_desc->txd.cookie);
409 	atc_dump_lli(atchan, &bad_desc->lli);
410 	list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
411 		atc_dump_lli(atchan, &child->lli);
412 
413 	/* Pretend the descriptor completed successfully */
414 	atc_chain_complete(atchan, bad_desc);
415 }
416 
417 
418 /*--  IRQ & Tasklet  ---------------------------------------------------*/
419 
420 static void atc_tasklet(unsigned long data)
421 {
422 	struct at_dma_chan *atchan = (struct at_dma_chan *)data;
423 
424 	/* Channel cannot be enabled here */
425 	if (atc_chan_is_enabled(atchan)) {
426 		dev_err(chan2dev(&atchan->chan_common),
427 			"BUG: channel enabled in tasklet\n");
428 		return;
429 	}
430 
431 	spin_lock(&atchan->lock);
432 	if (test_and_clear_bit(0, &atchan->error_status))
433 		atc_handle_error(atchan);
434 	else
435 		atc_advance_work(atchan);
436 
437 	spin_unlock(&atchan->lock);
438 }
439 
440 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
441 {
442 	struct at_dma		*atdma = (struct at_dma *)dev_id;
443 	struct at_dma_chan	*atchan;
444 	int			i;
445 	u32			status, pending, imr;
446 	int			ret = IRQ_NONE;
447 
448 	do {
449 		imr = dma_readl(atdma, EBCIMR);
450 		status = dma_readl(atdma, EBCISR);
451 		pending = status & imr;
452 
453 		if (!pending)
454 			break;
455 
456 		dev_vdbg(atdma->dma_common.dev,
457 			"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
458 			 status, imr, pending);
459 
460 		for (i = 0; i < atdma->dma_common.chancnt; i++) {
461 			atchan = &atdma->chan[i];
462 			if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) {
463 				if (pending & AT_DMA_ERR(i)) {
464 					/* Disable channel on AHB error */
465 					dma_writel(atdma, CHDR, atchan->mask);
466 					/* Give information to tasklet */
467 					set_bit(0, &atchan->error_status);
468 				}
469 				tasklet_schedule(&atchan->tasklet);
470 				ret = IRQ_HANDLED;
471 			}
472 		}
473 
474 	} while (pending);
475 
476 	return ret;
477 }
478 
479 
480 /*--  DMA Engine API  --------------------------------------------------*/
481 
482 /**
483  * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
484  * @desc: descriptor at the head of the transaction chain
485  *
486  * Queue chain if DMA engine is working already
487  *
488  * Cookie increment and adding to active_list or queue must be atomic
489  */
490 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
491 {
492 	struct at_desc		*desc = txd_to_at_desc(tx);
493 	struct at_dma_chan	*atchan = to_at_dma_chan(tx->chan);
494 	dma_cookie_t		cookie;
495 
496 	spin_lock_bh(&atchan->lock);
497 	cookie = atc_assign_cookie(atchan, desc);
498 
499 	if (list_empty(&atchan->active_list)) {
500 		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
501 				desc->txd.cookie);
502 		atc_dostart(atchan, desc);
503 		list_add_tail(&desc->desc_node, &atchan->active_list);
504 	} else {
505 		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
506 				desc->txd.cookie);
507 		list_add_tail(&desc->desc_node, &atchan->queue);
508 	}
509 
510 	spin_unlock_bh(&atchan->lock);
511 
512 	return cookie;
513 }
514 
515 /**
516  * atc_prep_dma_memcpy - prepare a memcpy operation
517  * @chan: the channel to prepare operation on
518  * @dest: operation virtual destination address
519  * @src: operation virtual source address
520  * @len: operation length
521  * @flags: tx descriptor status flags
522  */
523 static struct dma_async_tx_descriptor *
524 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
525 		size_t len, unsigned long flags)
526 {
527 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
528 	struct at_desc		*desc = NULL;
529 	struct at_desc		*first = NULL;
530 	struct at_desc		*prev = NULL;
531 	size_t			xfer_count;
532 	size_t			offset;
533 	unsigned int		src_width;
534 	unsigned int		dst_width;
535 	u32			ctrla;
536 	u32			ctrlb;
537 
538 	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
539 			dest, src, len, flags);
540 
541 	if (unlikely(!len)) {
542 		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
543 		return NULL;
544 	}
545 
546 	ctrla =   ATC_DEFAULT_CTRLA;
547 	ctrlb =   ATC_DEFAULT_CTRLB
548 		| ATC_SRC_ADDR_MODE_INCR
549 		| ATC_DST_ADDR_MODE_INCR
550 		| ATC_FC_MEM2MEM;
551 
552 	/*
553 	 * We can be a lot more clever here, but this should take care
554 	 * of the most common optimization.
555 	 */
556 	if (!((src | dest  | len) & 3)) {
557 		ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
558 		src_width = dst_width = 2;
559 	} else if (!((src | dest | len) & 1)) {
560 		ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
561 		src_width = dst_width = 1;
562 	} else {
563 		ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
564 		src_width = dst_width = 0;
565 	}
566 
567 	for (offset = 0; offset < len; offset += xfer_count << src_width) {
568 		xfer_count = min_t(size_t, (len - offset) >> src_width,
569 				ATC_BTSIZE_MAX);
570 
571 		desc = atc_desc_get(atchan);
572 		if (!desc)
573 			goto err_desc_get;
574 
575 		desc->lli.saddr = src + offset;
576 		desc->lli.daddr = dest + offset;
577 		desc->lli.ctrla = ctrla | xfer_count;
578 		desc->lli.ctrlb = ctrlb;
579 
580 		desc->txd.cookie = 0;
581 		async_tx_ack(&desc->txd);
582 
583 		if (!first) {
584 			first = desc;
585 		} else {
586 			/* inform the HW lli about chaining */
587 			prev->lli.dscr = desc->txd.phys;
588 			/* insert the link descriptor to the LD ring */
589 			list_add_tail(&desc->desc_node,
590 					&first->txd.tx_list);
591 		}
592 		prev = desc;
593 	}
594 
595 	/* First descriptor of the chain embedds additional information */
596 	first->txd.cookie = -EBUSY;
597 	first->len = len;
598 
599 	/* set end-of-link to the last link descriptor of list*/
600 	set_desc_eol(desc);
601 
602 	desc->txd.flags = flags; /* client is in control of this ack */
603 
604 	return &first->txd;
605 
606 err_desc_get:
607 	atc_desc_put(atchan, first);
608 	return NULL;
609 }
610 
611 
612 /**
613  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
614  * @chan: DMA channel
615  * @sgl: scatterlist to transfer to/from
616  * @sg_len: number of entries in @scatterlist
617  * @direction: DMA direction
618  * @flags: tx descriptor status flags
619  */
620 static struct dma_async_tx_descriptor *
621 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
622 		unsigned int sg_len, enum dma_data_direction direction,
623 		unsigned long flags)
624 {
625 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
626 	struct at_dma_slave	*atslave = chan->private;
627 	struct at_desc		*first = NULL;
628 	struct at_desc		*prev = NULL;
629 	u32			ctrla;
630 	u32			ctrlb;
631 	dma_addr_t		reg;
632 	unsigned int		reg_width;
633 	unsigned int		mem_width;
634 	unsigned int		i;
635 	struct scatterlist	*sg;
636 	size_t			total_len = 0;
637 
638 	dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n",
639 			direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
640 			flags);
641 
642 	if (unlikely(!atslave || !sg_len)) {
643 		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
644 		return NULL;
645 	}
646 
647 	reg_width = atslave->reg_width;
648 
649 	sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
650 
651 	ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
652 	ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN;
653 
654 	switch (direction) {
655 	case DMA_TO_DEVICE:
656 		ctrla |=  ATC_DST_WIDTH(reg_width);
657 		ctrlb |=  ATC_DST_ADDR_MODE_FIXED
658 			| ATC_SRC_ADDR_MODE_INCR
659 			| ATC_FC_MEM2PER;
660 		reg = atslave->tx_reg;
661 		for_each_sg(sgl, sg, sg_len, i) {
662 			struct at_desc	*desc;
663 			u32		len;
664 			u32		mem;
665 
666 			desc = atc_desc_get(atchan);
667 			if (!desc)
668 				goto err_desc_get;
669 
670 			mem = sg_phys(sg);
671 			len = sg_dma_len(sg);
672 			mem_width = 2;
673 			if (unlikely(mem & 3 || len & 3))
674 				mem_width = 0;
675 
676 			desc->lli.saddr = mem;
677 			desc->lli.daddr = reg;
678 			desc->lli.ctrla = ctrla
679 					| ATC_SRC_WIDTH(mem_width)
680 					| len >> mem_width;
681 			desc->lli.ctrlb = ctrlb;
682 
683 			if (!first) {
684 				first = desc;
685 			} else {
686 				/* inform the HW lli about chaining */
687 				prev->lli.dscr = desc->txd.phys;
688 				/* insert the link descriptor to the LD ring */
689 				list_add_tail(&desc->desc_node,
690 						&first->txd.tx_list);
691 			}
692 			prev = desc;
693 			total_len += len;
694 		}
695 		break;
696 	case DMA_FROM_DEVICE:
697 		ctrla |=  ATC_SRC_WIDTH(reg_width);
698 		ctrlb |=  ATC_DST_ADDR_MODE_INCR
699 			| ATC_SRC_ADDR_MODE_FIXED
700 			| ATC_FC_PER2MEM;
701 
702 		reg = atslave->rx_reg;
703 		for_each_sg(sgl, sg, sg_len, i) {
704 			struct at_desc	*desc;
705 			u32		len;
706 			u32		mem;
707 
708 			desc = atc_desc_get(atchan);
709 			if (!desc)
710 				goto err_desc_get;
711 
712 			mem = sg_phys(sg);
713 			len = sg_dma_len(sg);
714 			mem_width = 2;
715 			if (unlikely(mem & 3 || len & 3))
716 				mem_width = 0;
717 
718 			desc->lli.saddr = reg;
719 			desc->lli.daddr = mem;
720 			desc->lli.ctrla = ctrla
721 					| ATC_DST_WIDTH(mem_width)
722 					| len >> mem_width;
723 			desc->lli.ctrlb = ctrlb;
724 
725 			if (!first) {
726 				first = desc;
727 			} else {
728 				/* inform the HW lli about chaining */
729 				prev->lli.dscr = desc->txd.phys;
730 				/* insert the link descriptor to the LD ring */
731 				list_add_tail(&desc->desc_node,
732 						&first->txd.tx_list);
733 			}
734 			prev = desc;
735 			total_len += len;
736 		}
737 		break;
738 	default:
739 		return NULL;
740 	}
741 
742 	/* set end-of-link to the last link descriptor of list*/
743 	set_desc_eol(prev);
744 
745 	/* First descriptor of the chain embedds additional information */
746 	first->txd.cookie = -EBUSY;
747 	first->len = total_len;
748 
749 	/* last link descriptor of list is responsible of flags */
750 	prev->txd.flags = flags; /* client is in control of this ack */
751 
752 	return &first->txd;
753 
754 err_desc_get:
755 	dev_err(chan2dev(chan), "not enough descriptors available\n");
756 	atc_desc_put(atchan, first);
757 	return NULL;
758 }
759 
760 static void atc_terminate_all(struct dma_chan *chan)
761 {
762 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
763 	struct at_dma		*atdma = to_at_dma(chan->device);
764 	struct at_desc		*desc, *_desc;
765 	LIST_HEAD(list);
766 
767 	/*
768 	 * This is only called when something went wrong elsewhere, so
769 	 * we don't really care about the data. Just disable the
770 	 * channel. We still have to poll the channel enable bit due
771 	 * to AHB/HSB limitations.
772 	 */
773 	spin_lock_bh(&atchan->lock);
774 
775 	dma_writel(atdma, CHDR, atchan->mask);
776 
777 	/* confirm that this channel is disabled */
778 	while (dma_readl(atdma, CHSR) & atchan->mask)
779 		cpu_relax();
780 
781 	/* active_list entries will end up before queued entries */
782 	list_splice_init(&atchan->queue, &list);
783 	list_splice_init(&atchan->active_list, &list);
784 
785 	spin_unlock_bh(&atchan->lock);
786 
787 	/* Flush all pending and queued descriptors */
788 	list_for_each_entry_safe(desc, _desc, &list, desc_node)
789 		atc_chain_complete(atchan, desc);
790 }
791 
792 /**
793  * atc_is_tx_complete - poll for transaction completion
794  * @chan: DMA channel
795  * @cookie: transaction identifier to check status of
796  * @done: if not %NULL, updated with last completed transaction
797  * @used: if not %NULL, updated with last used transaction
798  *
799  * If @done and @used are passed in, upon return they reflect the driver
800  * internal state and can be used with dma_async_is_complete() to check
801  * the status of multiple cookies without re-checking hardware state.
802  */
803 static enum dma_status
804 atc_is_tx_complete(struct dma_chan *chan,
805 		dma_cookie_t cookie,
806 		dma_cookie_t *done, dma_cookie_t *used)
807 {
808 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
809 	dma_cookie_t		last_used;
810 	dma_cookie_t		last_complete;
811 	enum dma_status		ret;
812 
813 	dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
814 			cookie, done ? *done : 0, used ? *used : 0);
815 
816 	spin_lock_bh(atchan->lock);
817 
818 	last_complete = atchan->completed_cookie;
819 	last_used = chan->cookie;
820 
821 	ret = dma_async_is_complete(cookie, last_complete, last_used);
822 	if (ret != DMA_SUCCESS) {
823 		atc_cleanup_descriptors(atchan);
824 
825 		last_complete = atchan->completed_cookie;
826 		last_used = chan->cookie;
827 
828 		ret = dma_async_is_complete(cookie, last_complete, last_used);
829 	}
830 
831 	spin_unlock_bh(atchan->lock);
832 
833 	if (done)
834 		*done = last_complete;
835 	if (used)
836 		*used = last_used;
837 
838 	return ret;
839 }
840 
841 /**
842  * atc_issue_pending - try to finish work
843  * @chan: target DMA channel
844  */
845 static void atc_issue_pending(struct dma_chan *chan)
846 {
847 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
848 
849 	dev_vdbg(chan2dev(chan), "issue_pending\n");
850 
851 	if (!atc_chan_is_enabled(atchan)) {
852 		spin_lock_bh(&atchan->lock);
853 		atc_advance_work(atchan);
854 		spin_unlock_bh(&atchan->lock);
855 	}
856 }
857 
858 /**
859  * atc_alloc_chan_resources - allocate resources for DMA channel
860  * @chan: allocate descriptor resources for this channel
861  * @client: current client requesting the channel be ready for requests
862  *
863  * return - the number of allocated descriptors
864  */
865 static int atc_alloc_chan_resources(struct dma_chan *chan)
866 {
867 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
868 	struct at_dma		*atdma = to_at_dma(chan->device);
869 	struct at_desc		*desc;
870 	struct at_dma_slave	*atslave;
871 	int			i;
872 	u32			cfg;
873 	LIST_HEAD(tmp_list);
874 
875 	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
876 
877 	/* ASSERT:  channel is idle */
878 	if (atc_chan_is_enabled(atchan)) {
879 		dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
880 		return -EIO;
881 	}
882 
883 	cfg = ATC_DEFAULT_CFG;
884 
885 	atslave = chan->private;
886 	if (atslave) {
887 		/*
888 		 * We need controller-specific data to set up slave
889 		 * transfers.
890 		 */
891 		BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
892 
893 		/* if cfg configuration specified take it instad of default */
894 		if (atslave->cfg)
895 			cfg = atslave->cfg;
896 	}
897 
898 	/* have we already been set up?
899 	 * reconfigure channel but no need to reallocate descriptors */
900 	if (!list_empty(&atchan->free_list))
901 		return atchan->descs_allocated;
902 
903 	/* Allocate initial pool of descriptors */
904 	for (i = 0; i < init_nr_desc_per_channel; i++) {
905 		desc = atc_alloc_descriptor(chan, GFP_KERNEL);
906 		if (!desc) {
907 			dev_err(atdma->dma_common.dev,
908 				"Only %d initial descriptors\n", i);
909 			break;
910 		}
911 		list_add_tail(&desc->desc_node, &tmp_list);
912 	}
913 
914 	spin_lock_bh(&atchan->lock);
915 	atchan->descs_allocated = i;
916 	list_splice(&tmp_list, &atchan->free_list);
917 	atchan->completed_cookie = chan->cookie = 1;
918 	spin_unlock_bh(&atchan->lock);
919 
920 	/* channel parameters */
921 	channel_writel(atchan, CFG, cfg);
922 
923 	dev_dbg(chan2dev(chan),
924 		"alloc_chan_resources: allocated %d descriptors\n",
925 		atchan->descs_allocated);
926 
927 	return atchan->descs_allocated;
928 }
929 
930 /**
931  * atc_free_chan_resources - free all channel resources
932  * @chan: DMA channel
933  */
934 static void atc_free_chan_resources(struct dma_chan *chan)
935 {
936 	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
937 	struct at_dma		*atdma = to_at_dma(chan->device);
938 	struct at_desc		*desc, *_desc;
939 	LIST_HEAD(list);
940 
941 	dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
942 		atchan->descs_allocated);
943 
944 	/* ASSERT:  channel is idle */
945 	BUG_ON(!list_empty(&atchan->active_list));
946 	BUG_ON(!list_empty(&atchan->queue));
947 	BUG_ON(atc_chan_is_enabled(atchan));
948 
949 	list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
950 		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
951 		list_del(&desc->desc_node);
952 		/* free link descriptor */
953 		dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
954 	}
955 	list_splice_init(&atchan->free_list, &list);
956 	atchan->descs_allocated = 0;
957 
958 	dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
959 }
960 
961 
962 /*--  Module Management  -----------------------------------------------*/
963 
964 /**
965  * at_dma_off - disable DMA controller
966  * @atdma: the Atmel HDAMC device
967  */
968 static void at_dma_off(struct at_dma *atdma)
969 {
970 	dma_writel(atdma, EN, 0);
971 
972 	/* disable all interrupts */
973 	dma_writel(atdma, EBCIDR, -1L);
974 
975 	/* confirm that all channels are disabled */
976 	while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
977 		cpu_relax();
978 }
979 
980 static int __init at_dma_probe(struct platform_device *pdev)
981 {
982 	struct at_dma_platform_data *pdata;
983 	struct resource		*io;
984 	struct at_dma		*atdma;
985 	size_t			size;
986 	int			irq;
987 	int			err;
988 	int			i;
989 
990 	/* get DMA Controller parameters from platform */
991 	pdata = pdev->dev.platform_data;
992 	if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
993 		return -EINVAL;
994 
995 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
996 	if (!io)
997 		return -EINVAL;
998 
999 	irq = platform_get_irq(pdev, 0);
1000 	if (irq < 0)
1001 		return irq;
1002 
1003 	size = sizeof(struct at_dma);
1004 	size += pdata->nr_channels * sizeof(struct at_dma_chan);
1005 	atdma = kzalloc(size, GFP_KERNEL);
1006 	if (!atdma)
1007 		return -ENOMEM;
1008 
1009 	/* discover transaction capabilites from the platform data */
1010 	atdma->dma_common.cap_mask = pdata->cap_mask;
1011 	atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1012 
1013 	size = io->end - io->start + 1;
1014 	if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1015 		err = -EBUSY;
1016 		goto err_kfree;
1017 	}
1018 
1019 	atdma->regs = ioremap(io->start, size);
1020 	if (!atdma->regs) {
1021 		err = -ENOMEM;
1022 		goto err_release_r;
1023 	}
1024 
1025 	atdma->clk = clk_get(&pdev->dev, "dma_clk");
1026 	if (IS_ERR(atdma->clk)) {
1027 		err = PTR_ERR(atdma->clk);
1028 		goto err_clk;
1029 	}
1030 	clk_enable(atdma->clk);
1031 
1032 	/* force dma off, just in case */
1033 	at_dma_off(atdma);
1034 
1035 	err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1036 	if (err)
1037 		goto err_irq;
1038 
1039 	platform_set_drvdata(pdev, atdma);
1040 
1041 	/* create a pool of consistent memory blocks for hardware descriptors */
1042 	atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1043 			&pdev->dev, sizeof(struct at_desc),
1044 			4 /* word alignment */, 0);
1045 	if (!atdma->dma_desc_pool) {
1046 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1047 		err = -ENOMEM;
1048 		goto err_pool_create;
1049 	}
1050 
1051 	/* clear any pending interrupt */
1052 	while (dma_readl(atdma, EBCISR))
1053 		cpu_relax();
1054 
1055 	/* initialize channels related values */
1056 	INIT_LIST_HEAD(&atdma->dma_common.channels);
1057 	for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1058 		struct at_dma_chan	*atchan = &atdma->chan[i];
1059 
1060 		atchan->chan_common.device = &atdma->dma_common;
1061 		atchan->chan_common.cookie = atchan->completed_cookie = 1;
1062 		atchan->chan_common.chan_id = i;
1063 		list_add_tail(&atchan->chan_common.device_node,
1064 				&atdma->dma_common.channels);
1065 
1066 		atchan->ch_regs = atdma->regs + ch_regs(i);
1067 		spin_lock_init(&atchan->lock);
1068 		atchan->mask = 1 << i;
1069 
1070 		INIT_LIST_HEAD(&atchan->active_list);
1071 		INIT_LIST_HEAD(&atchan->queue);
1072 		INIT_LIST_HEAD(&atchan->free_list);
1073 
1074 		tasklet_init(&atchan->tasklet, atc_tasklet,
1075 				(unsigned long)atchan);
1076 		atc_enable_irq(atchan);
1077 	}
1078 
1079 	/* set base routines */
1080 	atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1081 	atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1082 	atdma->dma_common.device_is_tx_complete = atc_is_tx_complete;
1083 	atdma->dma_common.device_issue_pending = atc_issue_pending;
1084 	atdma->dma_common.dev = &pdev->dev;
1085 
1086 	/* set prep routines based on capability */
1087 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1088 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1089 
1090 	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1091 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1092 		atdma->dma_common.device_terminate_all = atc_terminate_all;
1093 	}
1094 
1095 	dma_writel(atdma, EN, AT_DMA_ENABLE);
1096 
1097 	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1098 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1099 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
1100 	  atdma->dma_common.chancnt);
1101 
1102 	dma_async_device_register(&atdma->dma_common);
1103 
1104 	return 0;
1105 
1106 err_pool_create:
1107 	platform_set_drvdata(pdev, NULL);
1108 	free_irq(platform_get_irq(pdev, 0), atdma);
1109 err_irq:
1110 	clk_disable(atdma->clk);
1111 	clk_put(atdma->clk);
1112 err_clk:
1113 	iounmap(atdma->regs);
1114 	atdma->regs = NULL;
1115 err_release_r:
1116 	release_mem_region(io->start, size);
1117 err_kfree:
1118 	kfree(atdma);
1119 	return err;
1120 }
1121 
1122 static int __exit at_dma_remove(struct platform_device *pdev)
1123 {
1124 	struct at_dma		*atdma = platform_get_drvdata(pdev);
1125 	struct dma_chan		*chan, *_chan;
1126 	struct resource		*io;
1127 
1128 	at_dma_off(atdma);
1129 	dma_async_device_unregister(&atdma->dma_common);
1130 
1131 	dma_pool_destroy(atdma->dma_desc_pool);
1132 	platform_set_drvdata(pdev, NULL);
1133 	free_irq(platform_get_irq(pdev, 0), atdma);
1134 
1135 	list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1136 			device_node) {
1137 		struct at_dma_chan	*atchan = to_at_dma_chan(chan);
1138 
1139 		/* Disable interrupts */
1140 		atc_disable_irq(atchan);
1141 		tasklet_disable(&atchan->tasklet);
1142 
1143 		tasklet_kill(&atchan->tasklet);
1144 		list_del(&chan->device_node);
1145 	}
1146 
1147 	clk_disable(atdma->clk);
1148 	clk_put(atdma->clk);
1149 
1150 	iounmap(atdma->regs);
1151 	atdma->regs = NULL;
1152 
1153 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1154 	release_mem_region(io->start, io->end - io->start + 1);
1155 
1156 	kfree(atdma);
1157 
1158 	return 0;
1159 }
1160 
1161 static void at_dma_shutdown(struct platform_device *pdev)
1162 {
1163 	struct at_dma	*atdma = platform_get_drvdata(pdev);
1164 
1165 	at_dma_off(platform_get_drvdata(pdev));
1166 	clk_disable(atdma->clk);
1167 }
1168 
1169 static int at_dma_suspend_noirq(struct device *dev)
1170 {
1171 	struct platform_device *pdev = to_platform_device(dev);
1172 	struct at_dma *atdma = platform_get_drvdata(pdev);
1173 
1174 	at_dma_off(platform_get_drvdata(pdev));
1175 	clk_disable(atdma->clk);
1176 	return 0;
1177 }
1178 
1179 static int at_dma_resume_noirq(struct device *dev)
1180 {
1181 	struct platform_device *pdev = to_platform_device(dev);
1182 	struct at_dma *atdma = platform_get_drvdata(pdev);
1183 
1184 	clk_enable(atdma->clk);
1185 	dma_writel(atdma, EN, AT_DMA_ENABLE);
1186 	return 0;
1187 }
1188 
1189 static struct dev_pm_ops at_dma_dev_pm_ops = {
1190 	.suspend_noirq = at_dma_suspend_noirq,
1191 	.resume_noirq = at_dma_resume_noirq,
1192 };
1193 
1194 static struct platform_driver at_dma_driver = {
1195 	.remove		= __exit_p(at_dma_remove),
1196 	.shutdown	= at_dma_shutdown,
1197 	.driver = {
1198 		.name	= "at_hdmac",
1199 		.pm	= &at_dma_dev_pm_ops,
1200 	},
1201 };
1202 
1203 static int __init at_dma_init(void)
1204 {
1205 	return platform_driver_probe(&at_dma_driver, at_dma_probe);
1206 }
1207 module_init(at_dma_init);
1208 
1209 static void __exit at_dma_exit(void)
1210 {
1211 	platform_driver_unregister(&at_dma_driver);
1212 }
1213 module_exit(at_dma_exit);
1214 
1215 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1216 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1217 MODULE_LICENSE("GPL");
1218 MODULE_ALIAS("platform:at_hdmac");
1219