xref: /openbmc/linux/drivers/dma/fsldma.c (revision be30b226)
1 /*
2  * Freescale MPC85xx, MPC83xx DMA Engine support
3  *
4  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5  *
6  * Author:
7  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9  *
10  * Description:
11  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
12  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13  *   The support for MPC8349 DMA contorller is also added.
14  *
15  * This driver instructs the DMA controller to issue the PCI Read Multiple
16  * command for PCI read operations, instead of using the default PCI Read Line
17  * command. Please be aware that this setting may result in read pre-fetching
18  * on some platforms.
19  *
20  * This is free software; you can redistribute it and/or modify
21  * it under the terms of the GNU General Public License as published by
22  * the Free Software Foundation; either version 2 of the License, or
23  * (at your option) any later version.
24  *
25  */
26 
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h>
32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/dmapool.h>
35 #include <linux/of_platform.h>
36 
37 #include "fsldma.h"
38 
39 static void dma_init(struct fsl_dma_chan *fsl_chan)
40 {
41 	/* Reset the channel */
42 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
43 
44 	switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
45 	case FSL_DMA_IP_85XX:
46 		/* Set the channel to below modes:
47 		 * EIE - Error interrupt enable
48 		 * EOSIE - End of segments interrupt enable (basic mode)
49 		 * EOLNIE - End of links interrupt enable
50 		 */
51 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
52 				| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
53 		break;
54 	case FSL_DMA_IP_83XX:
55 		/* Set the channel to below modes:
56 		 * EOTIE - End-of-transfer interrupt enable
57 		 * PRC_RM - PCI read multiple
58 		 */
59 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
60 				| FSL_DMA_MR_PRC_RM, 32);
61 		break;
62 	}
63 
64 }
65 
66 static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
67 {
68 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
69 }
70 
71 static u32 get_sr(struct fsl_dma_chan *fsl_chan)
72 {
73 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
74 }
75 
76 static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
77 				struct fsl_dma_ld_hw *hw, u32 count)
78 {
79 	hw->count = CPU_TO_DMA(fsl_chan, count, 32);
80 }
81 
82 static void set_desc_src(struct fsl_dma_chan *fsl_chan,
83 				struct fsl_dma_ld_hw *hw, dma_addr_t src)
84 {
85 	u64 snoop_bits;
86 
87 	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
88 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
89 	hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
90 }
91 
92 static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
93 				struct fsl_dma_ld_hw *hw, dma_addr_t dest)
94 {
95 	u64 snoop_bits;
96 
97 	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
98 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
99 	hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
100 }
101 
102 static void set_desc_next(struct fsl_dma_chan *fsl_chan,
103 				struct fsl_dma_ld_hw *hw, dma_addr_t next)
104 {
105 	u64 snoop_bits;
106 
107 	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
108 		? FSL_DMA_SNEN : 0;
109 	hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
110 }
111 
112 static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
113 {
114 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
115 }
116 
117 static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
118 {
119 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
120 }
121 
122 static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
123 {
124 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
125 }
126 
127 static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
128 {
129 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
130 }
131 
132 static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
133 {
134 	return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
135 }
136 
137 static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
138 {
139 	u32 sr = get_sr(fsl_chan);
140 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
141 }
142 
143 static void dma_start(struct fsl_dma_chan *fsl_chan)
144 {
145 	u32 mr_set = 0;;
146 
147 	if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
148 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
149 		mr_set |= FSL_DMA_MR_EMP_EN;
150 	} else
151 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
152 			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
153 				& ~FSL_DMA_MR_EMP_EN, 32);
154 
155 	if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
156 		mr_set |= FSL_DMA_MR_EMS_EN;
157 	else
158 		mr_set |= FSL_DMA_MR_CS;
159 
160 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
161 			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
162 			| mr_set, 32);
163 }
164 
165 static void dma_halt(struct fsl_dma_chan *fsl_chan)
166 {
167 	int i;
168 
169 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
170 		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
171 		32);
172 	DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
173 		DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
174 		| FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
175 
176 	for (i = 0; i < 100; i++) {
177 		if (dma_is_idle(fsl_chan))
178 			break;
179 		udelay(10);
180 	}
181 	if (i >= 100 && !dma_is_idle(fsl_chan))
182 		dev_err(fsl_chan->dev, "DMA halt timeout!\n");
183 }
184 
185 static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
186 			struct fsl_desc_sw *desc)
187 {
188 	u64 snoop_bits;
189 
190 	snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
191 		? FSL_DMA_SNEN : 0;
192 
193 	desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
194 		DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
195 			| snoop_bits, 64);
196 }
197 
198 static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
199 		struct fsl_desc_sw *new_desc)
200 {
201 	struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
202 
203 	if (list_empty(&fsl_chan->ld_queue))
204 		return;
205 
206 	/* Link to the new descriptor physical address and
207 	 * Enable End-of-segment interrupt for
208 	 * the last link descriptor.
209 	 * (the previous node's next link descriptor)
210 	 *
211 	 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
212 	 */
213 	queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
214 			new_desc->async_tx.phys | FSL_DMA_EOSIE |
215 			(((fsl_chan->feature & FSL_DMA_IP_MASK)
216 				== FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
217 }
218 
219 /**
220  * fsl_chan_set_src_loop_size - Set source address hold transfer size
221  * @fsl_chan : Freescale DMA channel
222  * @size     : Address loop size, 0 for disable loop
223  *
224  * The set source address hold transfer size. The source
225  * address hold or loop transfer size is when the DMA transfer
226  * data from source address (SA), if the loop size is 4, the DMA will
227  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
228  * SA + 1 ... and so on.
229  */
230 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
231 {
232 	switch (size) {
233 	case 0:
234 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
235 			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
236 			(~FSL_DMA_MR_SAHE), 32);
237 		break;
238 	case 1:
239 	case 2:
240 	case 4:
241 	case 8:
242 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
243 			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
244 			FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
245 			32);
246 		break;
247 	}
248 }
249 
250 /**
251  * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
252  * @fsl_chan : Freescale DMA channel
253  * @size     : Address loop size, 0 for disable loop
254  *
255  * The set destination address hold transfer size. The destination
256  * address hold or loop transfer size is when the DMA transfer
257  * data to destination address (TA), if the loop size is 4, the DMA will
258  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
259  * TA + 1 ... and so on.
260  */
261 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
262 {
263 	switch (size) {
264 	case 0:
265 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
266 			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
267 			(~FSL_DMA_MR_DAHE), 32);
268 		break;
269 	case 1:
270 	case 2:
271 	case 4:
272 	case 8:
273 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
274 			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
275 			FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
276 			32);
277 		break;
278 	}
279 }
280 
281 /**
282  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
283  * @fsl_chan : Freescale DMA channel
284  * @size     : Pause control size, 0 for disable external pause control.
285  *             The maximum is 1024.
286  *
287  * The Freescale DMA channel can be controlled by the external
288  * signal DREQ#. The pause control size is how many bytes are allowed
289  * to transfer before pausing the channel, after which a new assertion
290  * of DREQ# resumes channel operation.
291  */
292 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
293 {
294 	if (size > 1024)
295 		return;
296 
297 	if (size) {
298 		DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
299 			DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
300 				| ((__ilog2(size) << 24) & 0x0f000000),
301 			32);
302 		fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
303 	} else
304 		fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
305 }
306 
307 /**
308  * fsl_chan_toggle_ext_start - Toggle channel external start status
309  * @fsl_chan : Freescale DMA channel
310  * @enable   : 0 is disabled, 1 is enabled.
311  *
312  * If enable the external start, the channel can be started by an
313  * external DMA start pin. So the dma_start() does not start the
314  * transfer immediately. The DMA channel will wait for the
315  * control pin asserted.
316  */
317 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
318 {
319 	if (enable)
320 		fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
321 	else
322 		fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
323 }
324 
325 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
326 {
327 	struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
328 	struct fsl_desc_sw *desc;
329 	unsigned long flags;
330 	dma_cookie_t cookie;
331 
332 	/* cookie increment and adding to ld_queue must be atomic */
333 	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
334 
335 	cookie = fsl_chan->common.cookie;
336 	list_for_each_entry(desc, &tx->tx_list, node) {
337 		cookie++;
338 		if (cookie < 0)
339 			cookie = 1;
340 
341 		desc->async_tx.cookie = cookie;
342 	}
343 
344 	fsl_chan->common.cookie = cookie;
345 	append_ld_queue(fsl_chan, tx_to_fsl_desc(tx));
346 	list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev);
347 
348 	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
349 
350 	return cookie;
351 }
352 
353 /**
354  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
355  * @fsl_chan : Freescale DMA channel
356  *
357  * Return - The descriptor allocated. NULL for failed.
358  */
359 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
360 					struct fsl_dma_chan *fsl_chan)
361 {
362 	dma_addr_t pdesc;
363 	struct fsl_desc_sw *desc_sw;
364 
365 	desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
366 	if (desc_sw) {
367 		memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
368 		dma_async_tx_descriptor_init(&desc_sw->async_tx,
369 						&fsl_chan->common);
370 		desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
371 		desc_sw->async_tx.phys = pdesc;
372 	}
373 
374 	return desc_sw;
375 }
376 
377 
378 /**
379  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
380  * @fsl_chan : Freescale DMA channel
381  *
382  * This function will create a dma pool for descriptor allocation.
383  *
384  * Return - The number of descriptors allocated.
385  */
386 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
387 {
388 	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
389 
390 	/* Has this channel already been allocated? */
391 	if (fsl_chan->desc_pool)
392 		return 1;
393 
394 	/* We need the descriptor to be aligned to 32bytes
395 	 * for meeting FSL DMA specification requirement.
396 	 */
397 	fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
398 			fsl_chan->dev, sizeof(struct fsl_desc_sw),
399 			32, 0);
400 	if (!fsl_chan->desc_pool) {
401 		dev_err(fsl_chan->dev, "No memory for channel %d "
402 			"descriptor dma pool.\n", fsl_chan->id);
403 		return 0;
404 	}
405 
406 	return 1;
407 }
408 
409 /**
410  * fsl_dma_free_chan_resources - Free all resources of the channel.
411  * @fsl_chan : Freescale DMA channel
412  */
413 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
414 {
415 	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
416 	struct fsl_desc_sw *desc, *_desc;
417 	unsigned long flags;
418 
419 	dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
420 	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
421 	list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
422 #ifdef FSL_DMA_LD_DEBUG
423 		dev_dbg(fsl_chan->dev,
424 				"LD %p will be released.\n", desc);
425 #endif
426 		list_del(&desc->node);
427 		/* free link descriptor */
428 		dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
429 	}
430 	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
431 	dma_pool_destroy(fsl_chan->desc_pool);
432 
433 	fsl_chan->desc_pool = NULL;
434 }
435 
436 static struct dma_async_tx_descriptor *
437 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
438 {
439 	struct fsl_dma_chan *fsl_chan;
440 	struct fsl_desc_sw *new;
441 
442 	if (!chan)
443 		return NULL;
444 
445 	fsl_chan = to_fsl_chan(chan);
446 
447 	new = fsl_dma_alloc_descriptor(fsl_chan);
448 	if (!new) {
449 		dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
450 		return NULL;
451 	}
452 
453 	new->async_tx.cookie = -EBUSY;
454 	new->async_tx.flags = flags;
455 
456 	/* Insert the link descriptor to the LD ring */
457 	list_add_tail(&new->node, &new->async_tx.tx_list);
458 
459 	/* Set End-of-link to the last link descriptor of new list*/
460 	set_ld_eol(fsl_chan, new);
461 
462 	return &new->async_tx;
463 }
464 
465 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
466 	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
467 	size_t len, unsigned long flags)
468 {
469 	struct fsl_dma_chan *fsl_chan;
470 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
471 	struct list_head *list;
472 	size_t copy;
473 
474 	if (!chan)
475 		return NULL;
476 
477 	if (!len)
478 		return NULL;
479 
480 	fsl_chan = to_fsl_chan(chan);
481 
482 	do {
483 
484 		/* Allocate the link descriptor from DMA pool */
485 		new = fsl_dma_alloc_descriptor(fsl_chan);
486 		if (!new) {
487 			dev_err(fsl_chan->dev,
488 					"No free memory for link descriptor\n");
489 			goto fail;
490 		}
491 #ifdef FSL_DMA_LD_DEBUG
492 		dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
493 #endif
494 
495 		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
496 
497 		set_desc_cnt(fsl_chan, &new->hw, copy);
498 		set_desc_src(fsl_chan, &new->hw, dma_src);
499 		set_desc_dest(fsl_chan, &new->hw, dma_dest);
500 
501 		if (!first)
502 			first = new;
503 		else
504 			set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
505 
506 		new->async_tx.cookie = 0;
507 		async_tx_ack(&new->async_tx);
508 
509 		prev = new;
510 		len -= copy;
511 		dma_src += copy;
512 		dma_dest += copy;
513 
514 		/* Insert the link descriptor to the LD ring */
515 		list_add_tail(&new->node, &first->async_tx.tx_list);
516 	} while (len);
517 
518 	new->async_tx.flags = flags; /* client is in control of this ack */
519 	new->async_tx.cookie = -EBUSY;
520 
521 	/* Set End-of-link to the last link descriptor of new list*/
522 	set_ld_eol(fsl_chan, new);
523 
524 	return &first->async_tx;
525 
526 fail:
527 	if (!first)
528 		return NULL;
529 
530 	list = &first->async_tx.tx_list;
531 	list_for_each_entry_safe_reverse(new, prev, list, node) {
532 		list_del(&new->node);
533 		dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
534 	}
535 
536 	return NULL;
537 }
538 
539 /**
540  * fsl_dma_update_completed_cookie - Update the completed cookie.
541  * @fsl_chan : Freescale DMA channel
542  */
543 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
544 {
545 	struct fsl_desc_sw *cur_desc, *desc;
546 	dma_addr_t ld_phy;
547 
548 	ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
549 
550 	if (ld_phy) {
551 		cur_desc = NULL;
552 		list_for_each_entry(desc, &fsl_chan->ld_queue, node)
553 			if (desc->async_tx.phys == ld_phy) {
554 				cur_desc = desc;
555 				break;
556 			}
557 
558 		if (cur_desc && cur_desc->async_tx.cookie) {
559 			if (dma_is_idle(fsl_chan))
560 				fsl_chan->completed_cookie =
561 					cur_desc->async_tx.cookie;
562 			else
563 				fsl_chan->completed_cookie =
564 					cur_desc->async_tx.cookie - 1;
565 		}
566 	}
567 }
568 
569 /**
570  * fsl_chan_ld_cleanup - Clean up link descriptors
571  * @fsl_chan : Freescale DMA channel
572  *
573  * This function clean up the ld_queue of DMA channel.
574  * If 'in_intr' is set, the function will move the link descriptor to
575  * the recycle list. Otherwise, free it directly.
576  */
577 static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
578 {
579 	struct fsl_desc_sw *desc, *_desc;
580 	unsigned long flags;
581 
582 	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
583 
584 	dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
585 			fsl_chan->completed_cookie);
586 	list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
587 		dma_async_tx_callback callback;
588 		void *callback_param;
589 
590 		if (dma_async_is_complete(desc->async_tx.cookie,
591 			    fsl_chan->completed_cookie, fsl_chan->common.cookie)
592 				== DMA_IN_PROGRESS)
593 			break;
594 
595 		callback = desc->async_tx.callback;
596 		callback_param = desc->async_tx.callback_param;
597 
598 		/* Remove from ld_queue list */
599 		list_del(&desc->node);
600 
601 		dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
602 				desc);
603 		dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
604 
605 		/* Run the link descriptor callback function */
606 		if (callback) {
607 			spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
608 			dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
609 					desc);
610 			callback(callback_param);
611 			spin_lock_irqsave(&fsl_chan->desc_lock, flags);
612 		}
613 	}
614 	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
615 }
616 
617 /**
618  * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
619  * @fsl_chan : Freescale DMA channel
620  */
621 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
622 {
623 	struct list_head *ld_node;
624 	dma_addr_t next_dest_addr;
625 	unsigned long flags;
626 
627 	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
628 
629 	if (!dma_is_idle(fsl_chan))
630 		goto out_unlock;
631 
632 	dma_halt(fsl_chan);
633 
634 	/* If there are some link descriptors
635 	 * not transfered in queue. We need to start it.
636 	 */
637 
638 	/* Find the first un-transfer desciptor */
639 	for (ld_node = fsl_chan->ld_queue.next;
640 		(ld_node != &fsl_chan->ld_queue)
641 			&& (dma_async_is_complete(
642 				to_fsl_desc(ld_node)->async_tx.cookie,
643 				fsl_chan->completed_cookie,
644 				fsl_chan->common.cookie) == DMA_SUCCESS);
645 		ld_node = ld_node->next);
646 
647 	if (ld_node != &fsl_chan->ld_queue) {
648 		/* Get the ld start address from ld_queue */
649 		next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
650 		dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
651 				(unsigned long long)next_dest_addr);
652 		set_cdar(fsl_chan, next_dest_addr);
653 		dma_start(fsl_chan);
654 	} else {
655 		set_cdar(fsl_chan, 0);
656 		set_ndar(fsl_chan, 0);
657 	}
658 
659 out_unlock:
660 	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
661 }
662 
663 /**
664  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
665  * @fsl_chan : Freescale DMA channel
666  */
667 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
668 {
669 	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
670 
671 #ifdef FSL_DMA_LD_DEBUG
672 	struct fsl_desc_sw *ld;
673 	unsigned long flags;
674 
675 	spin_lock_irqsave(&fsl_chan->desc_lock, flags);
676 	if (list_empty(&fsl_chan->ld_queue)) {
677 		spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
678 		return;
679 	}
680 
681 	dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
682 	list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
683 		int i;
684 		dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
685 				fsl_chan->id, ld->async_tx.phys);
686 		for (i = 0; i < 8; i++)
687 			dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
688 					i, *(((u32 *)&ld->hw) + i));
689 	}
690 	dev_dbg(fsl_chan->dev, "----------------\n");
691 	spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
692 #endif
693 
694 	fsl_chan_xfer_ld_queue(fsl_chan);
695 }
696 
697 /**
698  * fsl_dma_is_complete - Determine the DMA status
699  * @fsl_chan : Freescale DMA channel
700  */
701 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
702 					dma_cookie_t cookie,
703 					dma_cookie_t *done,
704 					dma_cookie_t *used)
705 {
706 	struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
707 	dma_cookie_t last_used;
708 	dma_cookie_t last_complete;
709 
710 	fsl_chan_ld_cleanup(fsl_chan);
711 
712 	last_used = chan->cookie;
713 	last_complete = fsl_chan->completed_cookie;
714 
715 	if (done)
716 		*done = last_complete;
717 
718 	if (used)
719 		*used = last_used;
720 
721 	return dma_async_is_complete(cookie, last_complete, last_used);
722 }
723 
724 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
725 {
726 	struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
727 	u32 stat;
728 	int update_cookie = 0;
729 	int xfer_ld_q = 0;
730 
731 	stat = get_sr(fsl_chan);
732 	dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
733 						fsl_chan->id, stat);
734 	set_sr(fsl_chan, stat);		/* Clear the event register */
735 
736 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
737 	if (!stat)
738 		return IRQ_NONE;
739 
740 	if (stat & FSL_DMA_SR_TE)
741 		dev_err(fsl_chan->dev, "Transfer Error!\n");
742 
743 	/* Programming Error
744 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
745 	 * triger a PE interrupt.
746 	 */
747 	if (stat & FSL_DMA_SR_PE) {
748 		dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
749 		if (get_bcr(fsl_chan) == 0) {
750 			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
751 			 * Now, update the completed cookie, and continue the
752 			 * next uncompleted transfer.
753 			 */
754 			update_cookie = 1;
755 			xfer_ld_q = 1;
756 		}
757 		stat &= ~FSL_DMA_SR_PE;
758 	}
759 
760 	/* If the link descriptor segment transfer finishes,
761 	 * we will recycle the used descriptor.
762 	 */
763 	if (stat & FSL_DMA_SR_EOSI) {
764 		dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
765 		dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
766 			(unsigned long long)get_cdar(fsl_chan),
767 			(unsigned long long)get_ndar(fsl_chan));
768 		stat &= ~FSL_DMA_SR_EOSI;
769 		update_cookie = 1;
770 	}
771 
772 	/* For MPC8349, EOCDI event need to update cookie
773 	 * and start the next transfer if it exist.
774 	 */
775 	if (stat & FSL_DMA_SR_EOCDI) {
776 		dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
777 		stat &= ~FSL_DMA_SR_EOCDI;
778 		update_cookie = 1;
779 		xfer_ld_q = 1;
780 	}
781 
782 	/* If it current transfer is the end-of-transfer,
783 	 * we should clear the Channel Start bit for
784 	 * prepare next transfer.
785 	 */
786 	if (stat & FSL_DMA_SR_EOLNI) {
787 		dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
788 		stat &= ~FSL_DMA_SR_EOLNI;
789 		xfer_ld_q = 1;
790 	}
791 
792 	if (update_cookie)
793 		fsl_dma_update_completed_cookie(fsl_chan);
794 	if (xfer_ld_q)
795 		fsl_chan_xfer_ld_queue(fsl_chan);
796 	if (stat)
797 		dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
798 					stat);
799 
800 	dev_dbg(fsl_chan->dev, "event: Exit\n");
801 	tasklet_schedule(&fsl_chan->tasklet);
802 	return IRQ_HANDLED;
803 }
804 
805 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
806 {
807 	struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
808 	u32 gsr;
809 	int ch_nr;
810 
811 	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
812 			: in_le32(fdev->reg_base);
813 	ch_nr = (32 - ffs(gsr)) / 8;
814 
815 	return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
816 			fdev->chan[ch_nr]) : IRQ_NONE;
817 }
818 
819 static void dma_do_tasklet(unsigned long data)
820 {
821 	struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
822 	fsl_chan_ld_cleanup(fsl_chan);
823 }
824 
825 static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
826 	struct device_node *node, u32 feature, const char *compatible)
827 {
828 	struct fsl_dma_chan *new_fsl_chan;
829 	int err;
830 
831 	/* alloc channel */
832 	new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
833 	if (!new_fsl_chan) {
834 		dev_err(fdev->dev, "No free memory for allocating "
835 				"dma channels!\n");
836 		return -ENOMEM;
837 	}
838 
839 	/* get dma channel register base */
840 	err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
841 	if (err) {
842 		dev_err(fdev->dev, "Can't get %s property 'reg'\n",
843 				node->full_name);
844 		goto err_no_reg;
845 	}
846 
847 	new_fsl_chan->feature = feature;
848 
849 	if (!fdev->feature)
850 		fdev->feature = new_fsl_chan->feature;
851 
852 	/* If the DMA device's feature is different than its channels',
853 	 * report the bug.
854 	 */
855 	WARN_ON(fdev->feature != new_fsl_chan->feature);
856 
857 	new_fsl_chan->dev = fdev->dev;
858 	new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
859 			new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
860 
861 	new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
862 	if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
863 		dev_err(fdev->dev, "There is no %d channel!\n",
864 				new_fsl_chan->id);
865 		err = -EINVAL;
866 		goto err_no_chan;
867 	}
868 	fdev->chan[new_fsl_chan->id] = new_fsl_chan;
869 	tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
870 			(unsigned long)new_fsl_chan);
871 
872 	/* Init the channel */
873 	dma_init(new_fsl_chan);
874 
875 	/* Clear cdar registers */
876 	set_cdar(new_fsl_chan, 0);
877 
878 	switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
879 	case FSL_DMA_IP_85XX:
880 		new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
881 	case FSL_DMA_IP_83XX:
882 		new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
883 		new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
884 		new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
885 	}
886 
887 	spin_lock_init(&new_fsl_chan->desc_lock);
888 	INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
889 
890 	new_fsl_chan->common.device = &fdev->common;
891 
892 	/* Add the channel to DMA device channel list */
893 	list_add_tail(&new_fsl_chan->common.device_node,
894 			&fdev->common.channels);
895 	fdev->common.chancnt++;
896 
897 	new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
898 	if (new_fsl_chan->irq != NO_IRQ) {
899 		err = request_irq(new_fsl_chan->irq,
900 					&fsl_dma_chan_do_interrupt, IRQF_SHARED,
901 					"fsldma-channel", new_fsl_chan);
902 		if (err) {
903 			dev_err(fdev->dev, "DMA channel %s request_irq error "
904 				"with return %d\n", node->full_name, err);
905 			goto err_no_irq;
906 		}
907 	}
908 
909 	dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
910 		 compatible,
911 		 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
912 
913 	return 0;
914 
915 err_no_irq:
916 	list_del(&new_fsl_chan->common.device_node);
917 err_no_chan:
918 	iounmap(new_fsl_chan->reg_base);
919 err_no_reg:
920 	kfree(new_fsl_chan);
921 	return err;
922 }
923 
924 static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
925 {
926 	if (fchan->irq != NO_IRQ)
927 		free_irq(fchan->irq, fchan);
928 	list_del(&fchan->common.device_node);
929 	iounmap(fchan->reg_base);
930 	kfree(fchan);
931 }
932 
933 static int __devinit of_fsl_dma_probe(struct of_device *dev,
934 			const struct of_device_id *match)
935 {
936 	int err;
937 	struct fsl_dma_device *fdev;
938 	struct device_node *child;
939 
940 	fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
941 	if (!fdev) {
942 		dev_err(&dev->dev, "No enough memory for 'priv'\n");
943 		return -ENOMEM;
944 	}
945 	fdev->dev = &dev->dev;
946 	INIT_LIST_HEAD(&fdev->common.channels);
947 
948 	/* get DMA controller register base */
949 	err = of_address_to_resource(dev->node, 0, &fdev->reg);
950 	if (err) {
951 		dev_err(&dev->dev, "Can't get %s property 'reg'\n",
952 				dev->node->full_name);
953 		goto err_no_reg;
954 	}
955 
956 	dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
957 			"controller at 0x%llx...\n",
958 			match->compatible, (unsigned long long)fdev->reg.start);
959 	fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
960 						- fdev->reg.start + 1);
961 
962 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
963 	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
964 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
965 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
966 	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
967 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
968 	fdev->common.device_is_tx_complete = fsl_dma_is_complete;
969 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
970 	fdev->common.dev = &dev->dev;
971 
972 	fdev->irq = irq_of_parse_and_map(dev->node, 0);
973 	if (fdev->irq != NO_IRQ) {
974 		err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
975 					"fsldma-device", fdev);
976 		if (err) {
977 			dev_err(&dev->dev, "DMA device request_irq error "
978 				"with return %d\n", err);
979 			goto err;
980 		}
981 	}
982 
983 	dev_set_drvdata(&(dev->dev), fdev);
984 
985 	/* We cannot use of_platform_bus_probe() because there is no
986 	 * of_platform_bus_remove.  Instead, we manually instantiate every DMA
987 	 * channel object.
988 	 */
989 	for_each_child_of_node(dev->node, child) {
990 		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
991 			fsl_dma_chan_probe(fdev, child,
992 				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
993 				"fsl,eloplus-dma-channel");
994 		if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
995 			fsl_dma_chan_probe(fdev, child,
996 				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
997 				"fsl,elo-dma-channel");
998 	}
999 
1000 	dma_async_device_register(&fdev->common);
1001 	return 0;
1002 
1003 err:
1004 	iounmap(fdev->reg_base);
1005 err_no_reg:
1006 	kfree(fdev);
1007 	return err;
1008 }
1009 
1010 static int of_fsl_dma_remove(struct of_device *of_dev)
1011 {
1012 	struct fsl_dma_device *fdev;
1013 	unsigned int i;
1014 
1015 	fdev = dev_get_drvdata(&of_dev->dev);
1016 
1017 	dma_async_device_unregister(&fdev->common);
1018 
1019 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
1020 		if (fdev->chan[i])
1021 			fsl_dma_chan_remove(fdev->chan[i]);
1022 
1023 	if (fdev->irq != NO_IRQ)
1024 		free_irq(fdev->irq, fdev);
1025 
1026 	iounmap(fdev->reg_base);
1027 
1028 	kfree(fdev);
1029 	dev_set_drvdata(&of_dev->dev, NULL);
1030 
1031 	return 0;
1032 }
1033 
1034 static struct of_device_id of_fsl_dma_ids[] = {
1035 	{ .compatible = "fsl,eloplus-dma", },
1036 	{ .compatible = "fsl,elo-dma", },
1037 	{}
1038 };
1039 
1040 static struct of_platform_driver of_fsl_dma_driver = {
1041 	.name = "fsl-elo-dma",
1042 	.match_table = of_fsl_dma_ids,
1043 	.probe = of_fsl_dma_probe,
1044 	.remove = of_fsl_dma_remove,
1045 };
1046 
1047 static __init int of_fsl_dma_init(void)
1048 {
1049 	int ret;
1050 
1051 	pr_info("Freescale Elo / Elo Plus DMA driver\n");
1052 
1053 	ret = of_register_platform_driver(&of_fsl_dma_driver);
1054 	if (ret)
1055 		pr_err("fsldma: failed to register platform driver\n");
1056 
1057 	return ret;
1058 }
1059 
1060 static void __exit of_fsl_dma_exit(void)
1061 {
1062 	of_unregister_platform_driver(&of_fsl_dma_driver);
1063 }
1064 
1065 subsys_initcall(of_fsl_dma_init);
1066 module_exit(of_fsl_dma_exit);
1067 
1068 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1069 MODULE_LICENSE("GPL");
1070