xref: /openbmc/linux/drivers/dma/mpc512x_dma.c (revision 4a44a19b)
1 /*
2  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3  * Copyright (C) Semihalf 2009
4  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5  * Copyright (C) Alexander Popov, Promcontroller 2014
6  *
7  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
8  * (defines, structures and comments) was taken from MPC5121 DMA driver
9  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
10  *
11  * Approved as OSADL project by a majority of OSADL members and funded
12  * by OSADL membership fees in 2009;  for details see www.osadl.org.
13  *
14  * This program is free software; you can redistribute it and/or modify it
15  * under the terms of the GNU General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful, but WITHOUT
20  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
22  * more details.
23  *
24  * You should have received a copy of the GNU General Public License along with
25  * this program; if not, write to the Free Software Foundation, Inc., 59
26  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
27  *
28  * The full GNU General Public License is included in this distribution in the
29  * file called COPYING.
30  */
31 
32 /*
33  * MPC512x and MPC8308 DMA driver. It supports
34  * memory to memory data transfers (tested using dmatest module) and
35  * data transfers between memory and peripheral I/O memory
36  * by means of slave scatter/gather with these limitations:
37  *  - chunked transfers (described by s/g lists with more than one item)
38  *     are refused as long as proper support for scatter/gather is missing;
39  *  - transfers on MPC8308 always start from software as this SoC appears
40  *     not to have external request lines for peripheral flow control;
41  *  - only peripheral devices with 4-byte FIFO access register are supported;
42  *  - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
43  *     source and destination addresses must be 4-byte aligned
44  *     and transfer size must be aligned on (4 * maxburst) boundary;
45  */
46 
47 #include <linux/module.h>
48 #include <linux/dmaengine.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/interrupt.h>
51 #include <linux/io.h>
52 #include <linux/slab.h>
53 #include <linux/of_address.h>
54 #include <linux/of_device.h>
55 #include <linux/of_irq.h>
56 #include <linux/of_dma.h>
57 #include <linux/of_platform.h>
58 
59 #include <linux/random.h>
60 
61 #include "dmaengine.h"
62 
63 /* Number of DMA Transfer descriptors allocated per channel */
64 #define MPC_DMA_DESCRIPTORS	64
65 
66 /* Macro definitions */
67 #define MPC_DMA_TCD_OFFSET	0x1000
68 
69 /*
70  * Maximum channel counts for individual hardware variants
71  * and the maximum channel count over all supported controllers,
72  * used for data structure size
73  */
74 #define MPC8308_DMACHAN_MAX	16
75 #define MPC512x_DMACHAN_MAX	64
76 #define MPC_DMA_CHANNELS	64
77 
78 /* Arbitration mode of group and channel */
79 #define MPC_DMA_DMACR_EDCG	(1 << 31)
80 #define MPC_DMA_DMACR_ERGA	(1 << 3)
81 #define MPC_DMA_DMACR_ERCA	(1 << 2)
82 
83 /* Error codes */
84 #define MPC_DMA_DMAES_VLD	(1 << 31)
85 #define MPC_DMA_DMAES_GPE	(1 << 15)
86 #define MPC_DMA_DMAES_CPE	(1 << 14)
87 #define MPC_DMA_DMAES_ERRCHN(err) \
88 				(((err) >> 8) & 0x3f)
89 #define MPC_DMA_DMAES_SAE	(1 << 7)
90 #define MPC_DMA_DMAES_SOE	(1 << 6)
91 #define MPC_DMA_DMAES_DAE	(1 << 5)
92 #define MPC_DMA_DMAES_DOE	(1 << 4)
93 #define MPC_DMA_DMAES_NCE	(1 << 3)
94 #define MPC_DMA_DMAES_SGE	(1 << 2)
95 #define MPC_DMA_DMAES_SBE	(1 << 1)
96 #define MPC_DMA_DMAES_DBE	(1 << 0)
97 
98 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
99 
100 #define MPC_DMA_TSIZE_1		0x00
101 #define MPC_DMA_TSIZE_2		0x01
102 #define MPC_DMA_TSIZE_4		0x02
103 #define MPC_DMA_TSIZE_16	0x04
104 #define MPC_DMA_TSIZE_32	0x05
105 
106 /* MPC5121 DMA engine registers */
107 struct __attribute__ ((__packed__)) mpc_dma_regs {
108 	/* 0x00 */
109 	u32 dmacr;		/* DMA control register */
110 	u32 dmaes;		/* DMA error status */
111 	/* 0x08 */
112 	u32 dmaerqh;		/* DMA enable request high(channels 63~32) */
113 	u32 dmaerql;		/* DMA enable request low(channels 31~0) */
114 	u32 dmaeeih;		/* DMA enable error interrupt high(ch63~32) */
115 	u32 dmaeeil;		/* DMA enable error interrupt low(ch31~0) */
116 	/* 0x18 */
117 	u8 dmaserq;		/* DMA set enable request */
118 	u8 dmacerq;		/* DMA clear enable request */
119 	u8 dmaseei;		/* DMA set enable error interrupt */
120 	u8 dmaceei;		/* DMA clear enable error interrupt */
121 	/* 0x1c */
122 	u8 dmacint;		/* DMA clear interrupt request */
123 	u8 dmacerr;		/* DMA clear error */
124 	u8 dmassrt;		/* DMA set start bit */
125 	u8 dmacdne;		/* DMA clear DONE status bit */
126 	/* 0x20 */
127 	u32 dmainth;		/* DMA interrupt request high(ch63~32) */
128 	u32 dmaintl;		/* DMA interrupt request low(ch31~0) */
129 	u32 dmaerrh;		/* DMA error high(ch63~32) */
130 	u32 dmaerrl;		/* DMA error low(ch31~0) */
131 	/* 0x30 */
132 	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
133 	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
134 	union {
135 		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
136 		u32 dmagpor;	/* (General purpose register on MPC8308) */
137 	};
138 	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
139 	/* 0x40 ~ 0xff */
140 	u32 reserve0[48];	/* Reserved */
141 	/* 0x100 */
142 	u8 dchpri[MPC_DMA_CHANNELS];
143 	/* DMA channels(0~63) priority */
144 };
145 
146 struct __attribute__ ((__packed__)) mpc_dma_tcd {
147 	/* 0x00 */
148 	u32 saddr;		/* Source address */
149 
150 	u32 smod:5;		/* Source address modulo */
151 	u32 ssize:3;		/* Source data transfer size */
152 	u32 dmod:5;		/* Destination address modulo */
153 	u32 dsize:3;		/* Destination data transfer size */
154 	u32 soff:16;		/* Signed source address offset */
155 
156 	/* 0x08 */
157 	u32 nbytes;		/* Inner "minor" byte count */
158 	u32 slast;		/* Last source address adjustment */
159 	u32 daddr;		/* Destination address */
160 
161 	/* 0x14 */
162 	u32 citer_elink:1;	/* Enable channel-to-channel linking on
163 				 * minor loop complete
164 				 */
165 	u32 citer_linkch:6;	/* Link channel for minor loop complete */
166 	u32 citer:9;		/* Current "major" iteration count */
167 	u32 doff:16;		/* Signed destination address offset */
168 
169 	/* 0x18 */
170 	u32 dlast_sga;		/* Last Destination address adjustment/scatter
171 				 * gather address
172 				 */
173 
174 	/* 0x1c */
175 	u32 biter_elink:1;	/* Enable channel-to-channel linking on major
176 				 * loop complete
177 				 */
178 	u32 biter_linkch:6;
179 	u32 biter:9;		/* Beginning "major" iteration count */
180 	u32 bwc:2;		/* Bandwidth control */
181 	u32 major_linkch:6;	/* Link channel number */
182 	u32 done:1;		/* Channel done */
183 	u32 active:1;		/* Channel active */
184 	u32 major_elink:1;	/* Enable channel-to-channel linking on major
185 				 * loop complete
186 				 */
187 	u32 e_sg:1;		/* Enable scatter/gather processing */
188 	u32 d_req:1;		/* Disable request */
189 	u32 int_half:1;		/* Enable an interrupt when major counter is
190 				 * half complete
191 				 */
192 	u32 int_maj:1;		/* Enable an interrupt when major iteration
193 				 * count completes
194 				 */
195 	u32 start:1;		/* Channel start */
196 };
197 
198 struct mpc_dma_desc {
199 	struct dma_async_tx_descriptor	desc;
200 	struct mpc_dma_tcd		*tcd;
201 	dma_addr_t			tcd_paddr;
202 	int				error;
203 	struct list_head		node;
204 	int				will_access_peripheral;
205 };
206 
207 struct mpc_dma_chan {
208 	struct dma_chan			chan;
209 	struct list_head		free;
210 	struct list_head		prepared;
211 	struct list_head		queued;
212 	struct list_head		active;
213 	struct list_head		completed;
214 	struct mpc_dma_tcd		*tcd;
215 	dma_addr_t			tcd_paddr;
216 
217 	/* Settings for access to peripheral FIFO */
218 	dma_addr_t			src_per_paddr;
219 	u32				src_tcd_nunits;
220 	dma_addr_t			dst_per_paddr;
221 	u32				dst_tcd_nunits;
222 
223 	/* Lock for this structure */
224 	spinlock_t			lock;
225 };
226 
227 struct mpc_dma {
228 	struct dma_device		dma;
229 	struct tasklet_struct		tasklet;
230 	struct mpc_dma_chan		channels[MPC_DMA_CHANNELS];
231 	struct mpc_dma_regs __iomem	*regs;
232 	struct mpc_dma_tcd __iomem	*tcd;
233 	int				irq;
234 	int				irq2;
235 	uint				error_status;
236 	int				is_mpc8308;
237 
238 	/* Lock for error_status field in this structure */
239 	spinlock_t			error_status_lock;
240 };
241 
242 #define DRV_NAME	"mpc512x_dma"
243 
244 /* Convert struct dma_chan to struct mpc_dma_chan */
245 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
246 {
247 	return container_of(c, struct mpc_dma_chan, chan);
248 }
249 
250 /* Convert struct dma_chan to struct mpc_dma */
251 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
252 {
253 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
254 	return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
255 }
256 
257 /*
258  * Execute all queued DMA descriptors.
259  *
260  * Following requirements must be met while calling mpc_dma_execute():
261  * 	a) mchan->lock is acquired,
262  * 	b) mchan->active list is empty,
263  * 	c) mchan->queued list contains at least one entry.
264  */
265 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
266 {
267 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
268 	struct mpc_dma_desc *first = NULL;
269 	struct mpc_dma_desc *prev = NULL;
270 	struct mpc_dma_desc *mdesc;
271 	int cid = mchan->chan.chan_id;
272 
273 	while (!list_empty(&mchan->queued)) {
274 		mdesc = list_first_entry(&mchan->queued,
275 						struct mpc_dma_desc, node);
276 		/*
277 		 * Grab either several mem-to-mem transfer descriptors
278 		 * or one peripheral transfer descriptor,
279 		 * don't mix mem-to-mem and peripheral transfer descriptors
280 		 * within the same 'active' list.
281 		 */
282 		if (mdesc->will_access_peripheral) {
283 			if (list_empty(&mchan->active))
284 				list_move_tail(&mdesc->node, &mchan->active);
285 			break;
286 		} else {
287 			list_move_tail(&mdesc->node, &mchan->active);
288 		}
289 	}
290 
291 	/* Chain descriptors into one transaction */
292 	list_for_each_entry(mdesc, &mchan->active, node) {
293 		if (!first)
294 			first = mdesc;
295 
296 		if (!prev) {
297 			prev = mdesc;
298 			continue;
299 		}
300 
301 		prev->tcd->dlast_sga = mdesc->tcd_paddr;
302 		prev->tcd->e_sg = 1;
303 		mdesc->tcd->start = 1;
304 
305 		prev = mdesc;
306 	}
307 
308 	prev->tcd->int_maj = 1;
309 
310 	/* Send first descriptor in chain into hardware */
311 	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
312 
313 	if (first != prev)
314 		mdma->tcd[cid].e_sg = 1;
315 
316 	if (mdma->is_mpc8308) {
317 		/* MPC8308, no request lines, software initiated start */
318 		out_8(&mdma->regs->dmassrt, cid);
319 	} else if (first->will_access_peripheral) {
320 		/* Peripherals involved, start by external request signal */
321 		out_8(&mdma->regs->dmaserq, cid);
322 	} else {
323 		/* Memory to memory transfer, software initiated start */
324 		out_8(&mdma->regs->dmassrt, cid);
325 	}
326 }
327 
328 /* Handle interrupt on one half of DMA controller (32 channels) */
329 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
330 {
331 	struct mpc_dma_chan *mchan;
332 	struct mpc_dma_desc *mdesc;
333 	u32 status = is | es;
334 	int ch;
335 
336 	while ((ch = fls(status) - 1) >= 0) {
337 		status &= ~(1 << ch);
338 		mchan = &mdma->channels[ch + off];
339 
340 		spin_lock(&mchan->lock);
341 
342 		out_8(&mdma->regs->dmacint, ch + off);
343 		out_8(&mdma->regs->dmacerr, ch + off);
344 
345 		/* Check error status */
346 		if (es & (1 << ch))
347 			list_for_each_entry(mdesc, &mchan->active, node)
348 				mdesc->error = -EIO;
349 
350 		/* Execute queued descriptors */
351 		list_splice_tail_init(&mchan->active, &mchan->completed);
352 		if (!list_empty(&mchan->queued))
353 			mpc_dma_execute(mchan);
354 
355 		spin_unlock(&mchan->lock);
356 	}
357 }
358 
359 /* Interrupt handler */
360 static irqreturn_t mpc_dma_irq(int irq, void *data)
361 {
362 	struct mpc_dma *mdma = data;
363 	uint es;
364 
365 	/* Save error status register */
366 	es = in_be32(&mdma->regs->dmaes);
367 	spin_lock(&mdma->error_status_lock);
368 	if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
369 		mdma->error_status = es;
370 	spin_unlock(&mdma->error_status_lock);
371 
372 	/* Handle interrupt on each channel */
373 	if (mdma->dma.chancnt > 32) {
374 		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
375 					in_be32(&mdma->regs->dmaerrh), 32);
376 	}
377 	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
378 					in_be32(&mdma->regs->dmaerrl), 0);
379 
380 	/* Schedule tasklet */
381 	tasklet_schedule(&mdma->tasklet);
382 
383 	return IRQ_HANDLED;
384 }
385 
386 /* process completed descriptors */
387 static void mpc_dma_process_completed(struct mpc_dma *mdma)
388 {
389 	dma_cookie_t last_cookie = 0;
390 	struct mpc_dma_chan *mchan;
391 	struct mpc_dma_desc *mdesc;
392 	struct dma_async_tx_descriptor *desc;
393 	unsigned long flags;
394 	LIST_HEAD(list);
395 	int i;
396 
397 	for (i = 0; i < mdma->dma.chancnt; i++) {
398 		mchan = &mdma->channels[i];
399 
400 		/* Get all completed descriptors */
401 		spin_lock_irqsave(&mchan->lock, flags);
402 		if (!list_empty(&mchan->completed))
403 			list_splice_tail_init(&mchan->completed, &list);
404 		spin_unlock_irqrestore(&mchan->lock, flags);
405 
406 		if (list_empty(&list))
407 			continue;
408 
409 		/* Execute callbacks and run dependencies */
410 		list_for_each_entry(mdesc, &list, node) {
411 			desc = &mdesc->desc;
412 
413 			if (desc->callback)
414 				desc->callback(desc->callback_param);
415 
416 			last_cookie = desc->cookie;
417 			dma_run_dependencies(desc);
418 		}
419 
420 		/* Free descriptors */
421 		spin_lock_irqsave(&mchan->lock, flags);
422 		list_splice_tail_init(&list, &mchan->free);
423 		mchan->chan.completed_cookie = last_cookie;
424 		spin_unlock_irqrestore(&mchan->lock, flags);
425 	}
426 }
427 
428 /* DMA Tasklet */
429 static void mpc_dma_tasklet(unsigned long data)
430 {
431 	struct mpc_dma *mdma = (void *)data;
432 	unsigned long flags;
433 	uint es;
434 
435 	spin_lock_irqsave(&mdma->error_status_lock, flags);
436 	es = mdma->error_status;
437 	mdma->error_status = 0;
438 	spin_unlock_irqrestore(&mdma->error_status_lock, flags);
439 
440 	/* Print nice error report */
441 	if (es) {
442 		dev_err(mdma->dma.dev,
443 			"Hardware reported following error(s) on channel %u:\n",
444 						      MPC_DMA_DMAES_ERRCHN(es));
445 
446 		if (es & MPC_DMA_DMAES_GPE)
447 			dev_err(mdma->dma.dev, "- Group Priority Error\n");
448 		if (es & MPC_DMA_DMAES_CPE)
449 			dev_err(mdma->dma.dev, "- Channel Priority Error\n");
450 		if (es & MPC_DMA_DMAES_SAE)
451 			dev_err(mdma->dma.dev, "- Source Address Error\n");
452 		if (es & MPC_DMA_DMAES_SOE)
453 			dev_err(mdma->dma.dev, "- Source Offset"
454 						" Configuration Error\n");
455 		if (es & MPC_DMA_DMAES_DAE)
456 			dev_err(mdma->dma.dev, "- Destination Address"
457 								" Error\n");
458 		if (es & MPC_DMA_DMAES_DOE)
459 			dev_err(mdma->dma.dev, "- Destination Offset"
460 						" Configuration Error\n");
461 		if (es & MPC_DMA_DMAES_NCE)
462 			dev_err(mdma->dma.dev, "- NBytes/Citter"
463 						" Configuration Error\n");
464 		if (es & MPC_DMA_DMAES_SGE)
465 			dev_err(mdma->dma.dev, "- Scatter/Gather"
466 						" Configuration Error\n");
467 		if (es & MPC_DMA_DMAES_SBE)
468 			dev_err(mdma->dma.dev, "- Source Bus Error\n");
469 		if (es & MPC_DMA_DMAES_DBE)
470 			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
471 	}
472 
473 	mpc_dma_process_completed(mdma);
474 }
475 
476 /* Submit descriptor to hardware */
477 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
478 {
479 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
480 	struct mpc_dma_desc *mdesc;
481 	unsigned long flags;
482 	dma_cookie_t cookie;
483 
484 	mdesc = container_of(txd, struct mpc_dma_desc, desc);
485 
486 	spin_lock_irqsave(&mchan->lock, flags);
487 
488 	/* Move descriptor to queue */
489 	list_move_tail(&mdesc->node, &mchan->queued);
490 
491 	/* If channel is idle, execute all queued descriptors */
492 	if (list_empty(&mchan->active))
493 		mpc_dma_execute(mchan);
494 
495 	/* Update cookie */
496 	cookie = dma_cookie_assign(txd);
497 	spin_unlock_irqrestore(&mchan->lock, flags);
498 
499 	return cookie;
500 }
501 
502 /* Alloc channel resources */
503 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
504 {
505 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
506 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
507 	struct mpc_dma_desc *mdesc;
508 	struct mpc_dma_tcd *tcd;
509 	dma_addr_t tcd_paddr;
510 	unsigned long flags;
511 	LIST_HEAD(descs);
512 	int i;
513 
514 	/* Alloc DMA memory for Transfer Control Descriptors */
515 	tcd = dma_alloc_coherent(mdma->dma.dev,
516 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
517 							&tcd_paddr, GFP_KERNEL);
518 	if (!tcd)
519 		return -ENOMEM;
520 
521 	/* Alloc descriptors for this channel */
522 	for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
523 		mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
524 		if (!mdesc) {
525 			dev_notice(mdma->dma.dev, "Memory allocation error. "
526 					"Allocated only %u descriptors\n", i);
527 			break;
528 		}
529 
530 		dma_async_tx_descriptor_init(&mdesc->desc, chan);
531 		mdesc->desc.flags = DMA_CTRL_ACK;
532 		mdesc->desc.tx_submit = mpc_dma_tx_submit;
533 
534 		mdesc->tcd = &tcd[i];
535 		mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
536 
537 		list_add_tail(&mdesc->node, &descs);
538 	}
539 
540 	/* Return error only if no descriptors were allocated */
541 	if (i == 0) {
542 		dma_free_coherent(mdma->dma.dev,
543 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
544 								tcd, tcd_paddr);
545 		return -ENOMEM;
546 	}
547 
548 	spin_lock_irqsave(&mchan->lock, flags);
549 	mchan->tcd = tcd;
550 	mchan->tcd_paddr = tcd_paddr;
551 	list_splice_tail_init(&descs, &mchan->free);
552 	spin_unlock_irqrestore(&mchan->lock, flags);
553 
554 	/* Enable Error Interrupt */
555 	out_8(&mdma->regs->dmaseei, chan->chan_id);
556 
557 	return 0;
558 }
559 
560 /* Free channel resources */
561 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
562 {
563 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
564 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
565 	struct mpc_dma_desc *mdesc, *tmp;
566 	struct mpc_dma_tcd *tcd;
567 	dma_addr_t tcd_paddr;
568 	unsigned long flags;
569 	LIST_HEAD(descs);
570 
571 	spin_lock_irqsave(&mchan->lock, flags);
572 
573 	/* Channel must be idle */
574 	BUG_ON(!list_empty(&mchan->prepared));
575 	BUG_ON(!list_empty(&mchan->queued));
576 	BUG_ON(!list_empty(&mchan->active));
577 	BUG_ON(!list_empty(&mchan->completed));
578 
579 	/* Move data */
580 	list_splice_tail_init(&mchan->free, &descs);
581 	tcd = mchan->tcd;
582 	tcd_paddr = mchan->tcd_paddr;
583 
584 	spin_unlock_irqrestore(&mchan->lock, flags);
585 
586 	/* Free DMA memory used by descriptors */
587 	dma_free_coherent(mdma->dma.dev,
588 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
589 								tcd, tcd_paddr);
590 
591 	/* Free descriptors */
592 	list_for_each_entry_safe(mdesc, tmp, &descs, node)
593 		kfree(mdesc);
594 
595 	/* Disable Error Interrupt */
596 	out_8(&mdma->regs->dmaceei, chan->chan_id);
597 }
598 
599 /* Send all pending descriptor to hardware */
600 static void mpc_dma_issue_pending(struct dma_chan *chan)
601 {
602 	/*
603 	 * We are posting descriptors to the hardware as soon as
604 	 * they are ready, so this function does nothing.
605 	 */
606 }
607 
608 /* Check request completion status */
609 static enum dma_status
610 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
611 	       struct dma_tx_state *txstate)
612 {
613 	return dma_cookie_status(chan, cookie, txstate);
614 }
615 
616 /* Prepare descriptor for memory to memory copy */
617 static struct dma_async_tx_descriptor *
618 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
619 					size_t len, unsigned long flags)
620 {
621 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
622 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
623 	struct mpc_dma_desc *mdesc = NULL;
624 	struct mpc_dma_tcd *tcd;
625 	unsigned long iflags;
626 
627 	/* Get free descriptor */
628 	spin_lock_irqsave(&mchan->lock, iflags);
629 	if (!list_empty(&mchan->free)) {
630 		mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
631 									node);
632 		list_del(&mdesc->node);
633 	}
634 	spin_unlock_irqrestore(&mchan->lock, iflags);
635 
636 	if (!mdesc) {
637 		/* try to free completed descriptors */
638 		mpc_dma_process_completed(mdma);
639 		return NULL;
640 	}
641 
642 	mdesc->error = 0;
643 	mdesc->will_access_peripheral = 0;
644 	tcd = mdesc->tcd;
645 
646 	/* Prepare Transfer Control Descriptor for this transaction */
647 	memset(tcd, 0, sizeof(struct mpc_dma_tcd));
648 
649 	if (IS_ALIGNED(src | dst | len, 32)) {
650 		tcd->ssize = MPC_DMA_TSIZE_32;
651 		tcd->dsize = MPC_DMA_TSIZE_32;
652 		tcd->soff = 32;
653 		tcd->doff = 32;
654 	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
655 		/* MPC8308 doesn't support 16 byte transfers */
656 		tcd->ssize = MPC_DMA_TSIZE_16;
657 		tcd->dsize = MPC_DMA_TSIZE_16;
658 		tcd->soff = 16;
659 		tcd->doff = 16;
660 	} else if (IS_ALIGNED(src | dst | len, 4)) {
661 		tcd->ssize = MPC_DMA_TSIZE_4;
662 		tcd->dsize = MPC_DMA_TSIZE_4;
663 		tcd->soff = 4;
664 		tcd->doff = 4;
665 	} else if (IS_ALIGNED(src | dst | len, 2)) {
666 		tcd->ssize = MPC_DMA_TSIZE_2;
667 		tcd->dsize = MPC_DMA_TSIZE_2;
668 		tcd->soff = 2;
669 		tcd->doff = 2;
670 	} else {
671 		tcd->ssize = MPC_DMA_TSIZE_1;
672 		tcd->dsize = MPC_DMA_TSIZE_1;
673 		tcd->soff = 1;
674 		tcd->doff = 1;
675 	}
676 
677 	tcd->saddr = src;
678 	tcd->daddr = dst;
679 	tcd->nbytes = len;
680 	tcd->biter = 1;
681 	tcd->citer = 1;
682 
683 	/* Place descriptor in prepared list */
684 	spin_lock_irqsave(&mchan->lock, iflags);
685 	list_add_tail(&mdesc->node, &mchan->prepared);
686 	spin_unlock_irqrestore(&mchan->lock, iflags);
687 
688 	return &mdesc->desc;
689 }
690 
691 static struct dma_async_tx_descriptor *
692 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
693 		unsigned int sg_len, enum dma_transfer_direction direction,
694 		unsigned long flags, void *context)
695 {
696 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
697 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
698 	struct mpc_dma_desc *mdesc = NULL;
699 	dma_addr_t per_paddr;
700 	u32 tcd_nunits;
701 	struct mpc_dma_tcd *tcd;
702 	unsigned long iflags;
703 	struct scatterlist *sg;
704 	size_t len;
705 	int iter, i;
706 
707 	/* Currently there is no proper support for scatter/gather */
708 	if (sg_len != 1)
709 		return NULL;
710 
711 	if (!is_slave_direction(direction))
712 		return NULL;
713 
714 	for_each_sg(sgl, sg, sg_len, i) {
715 		spin_lock_irqsave(&mchan->lock, iflags);
716 
717 		mdesc = list_first_entry(&mchan->free,
718 						struct mpc_dma_desc, node);
719 		if (!mdesc) {
720 			spin_unlock_irqrestore(&mchan->lock, iflags);
721 			/* Try to free completed descriptors */
722 			mpc_dma_process_completed(mdma);
723 			return NULL;
724 		}
725 
726 		list_del(&mdesc->node);
727 
728 		if (direction == DMA_DEV_TO_MEM) {
729 			per_paddr = mchan->src_per_paddr;
730 			tcd_nunits = mchan->src_tcd_nunits;
731 		} else {
732 			per_paddr = mchan->dst_per_paddr;
733 			tcd_nunits = mchan->dst_tcd_nunits;
734 		}
735 
736 		spin_unlock_irqrestore(&mchan->lock, iflags);
737 
738 		if (per_paddr == 0 || tcd_nunits == 0)
739 			goto err_prep;
740 
741 		mdesc->error = 0;
742 		mdesc->will_access_peripheral = 1;
743 
744 		/* Prepare Transfer Control Descriptor for this transaction */
745 		tcd = mdesc->tcd;
746 
747 		memset(tcd, 0, sizeof(struct mpc_dma_tcd));
748 
749 		if (!IS_ALIGNED(sg_dma_address(sg), 4))
750 			goto err_prep;
751 
752 		if (direction == DMA_DEV_TO_MEM) {
753 			tcd->saddr = per_paddr;
754 			tcd->daddr = sg_dma_address(sg);
755 			tcd->soff = 0;
756 			tcd->doff = 4;
757 		} else {
758 			tcd->saddr = sg_dma_address(sg);
759 			tcd->daddr = per_paddr;
760 			tcd->soff = 4;
761 			tcd->doff = 0;
762 		}
763 
764 		tcd->ssize = MPC_DMA_TSIZE_4;
765 		tcd->dsize = MPC_DMA_TSIZE_4;
766 
767 		len = sg_dma_len(sg);
768 		tcd->nbytes = tcd_nunits * 4;
769 		if (!IS_ALIGNED(len, tcd->nbytes))
770 			goto err_prep;
771 
772 		iter = len / tcd->nbytes;
773 		if (iter >= 1 << 15) {
774 			/* len is too big */
775 			goto err_prep;
776 		}
777 		/* citer_linkch contains the high bits of iter */
778 		tcd->biter = iter & 0x1ff;
779 		tcd->biter_linkch = iter >> 9;
780 		tcd->citer = tcd->biter;
781 		tcd->citer_linkch = tcd->biter_linkch;
782 
783 		tcd->e_sg = 0;
784 		tcd->d_req = 1;
785 
786 		/* Place descriptor in prepared list */
787 		spin_lock_irqsave(&mchan->lock, iflags);
788 		list_add_tail(&mdesc->node, &mchan->prepared);
789 		spin_unlock_irqrestore(&mchan->lock, iflags);
790 	}
791 
792 	return &mdesc->desc;
793 
794 err_prep:
795 	/* Put the descriptor back */
796 	spin_lock_irqsave(&mchan->lock, iflags);
797 	list_add_tail(&mdesc->node, &mchan->free);
798 	spin_unlock_irqrestore(&mchan->lock, iflags);
799 
800 	return NULL;
801 }
802 
803 static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
804 							unsigned long arg)
805 {
806 	struct mpc_dma_chan *mchan;
807 	struct mpc_dma *mdma;
808 	struct dma_slave_config *cfg;
809 	unsigned long flags;
810 
811 	mchan = dma_chan_to_mpc_dma_chan(chan);
812 	switch (cmd) {
813 	case DMA_TERMINATE_ALL:
814 		/* Disable channel requests */
815 		mdma = dma_chan_to_mpc_dma(chan);
816 
817 		spin_lock_irqsave(&mchan->lock, flags);
818 
819 		out_8(&mdma->regs->dmacerq, chan->chan_id);
820 		list_splice_tail_init(&mchan->prepared, &mchan->free);
821 		list_splice_tail_init(&mchan->queued, &mchan->free);
822 		list_splice_tail_init(&mchan->active, &mchan->free);
823 
824 		spin_unlock_irqrestore(&mchan->lock, flags);
825 
826 		return 0;
827 
828 	case DMA_SLAVE_CONFIG:
829 		/*
830 		 * Software constraints:
831 		 *  - only transfers between a peripheral device and
832 		 *     memory are supported;
833 		 *  - only peripheral devices with 4-byte FIFO access register
834 		 *     are supported;
835 		 *  - minimal transfer chunk is 4 bytes and consequently
836 		 *     source and destination addresses must be 4-byte aligned
837 		 *     and transfer size must be aligned on (4 * maxburst)
838 		 *     boundary;
839 		 *  - during the transfer RAM address is being incremented by
840 		 *     the size of minimal transfer chunk;
841 		 *  - peripheral port's address is constant during the transfer.
842 		 */
843 
844 		cfg = (void *)arg;
845 
846 		if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
847 		    cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
848 		    !IS_ALIGNED(cfg->src_addr, 4) ||
849 		    !IS_ALIGNED(cfg->dst_addr, 4)) {
850 			return -EINVAL;
851 		}
852 
853 		spin_lock_irqsave(&mchan->lock, flags);
854 
855 		mchan->src_per_paddr = cfg->src_addr;
856 		mchan->src_tcd_nunits = cfg->src_maxburst;
857 		mchan->dst_per_paddr = cfg->dst_addr;
858 		mchan->dst_tcd_nunits = cfg->dst_maxburst;
859 
860 		/* Apply defaults */
861 		if (mchan->src_tcd_nunits == 0)
862 			mchan->src_tcd_nunits = 1;
863 		if (mchan->dst_tcd_nunits == 0)
864 			mchan->dst_tcd_nunits = 1;
865 
866 		spin_unlock_irqrestore(&mchan->lock, flags);
867 
868 		return 0;
869 
870 	default:
871 		/* Unknown command */
872 		break;
873 	}
874 
875 	return -ENXIO;
876 }
877 
878 static int mpc_dma_probe(struct platform_device *op)
879 {
880 	struct device_node *dn = op->dev.of_node;
881 	struct device *dev = &op->dev;
882 	struct dma_device *dma;
883 	struct mpc_dma *mdma;
884 	struct mpc_dma_chan *mchan;
885 	struct resource res;
886 	ulong regs_start, regs_size;
887 	int retval, i;
888 
889 	mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
890 	if (!mdma) {
891 		dev_err(dev, "Memory exhausted!\n");
892 		retval = -ENOMEM;
893 		goto err;
894 	}
895 
896 	mdma->irq = irq_of_parse_and_map(dn, 0);
897 	if (mdma->irq == NO_IRQ) {
898 		dev_err(dev, "Error mapping IRQ!\n");
899 		retval = -EINVAL;
900 		goto err;
901 	}
902 
903 	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
904 		mdma->is_mpc8308 = 1;
905 		mdma->irq2 = irq_of_parse_and_map(dn, 1);
906 		if (mdma->irq2 == NO_IRQ) {
907 			dev_err(dev, "Error mapping IRQ!\n");
908 			retval = -EINVAL;
909 			goto err_dispose1;
910 		}
911 	}
912 
913 	retval = of_address_to_resource(dn, 0, &res);
914 	if (retval) {
915 		dev_err(dev, "Error parsing memory region!\n");
916 		goto err_dispose2;
917 	}
918 
919 	regs_start = res.start;
920 	regs_size = resource_size(&res);
921 
922 	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
923 		dev_err(dev, "Error requesting memory region!\n");
924 		retval = -EBUSY;
925 		goto err_dispose2;
926 	}
927 
928 	mdma->regs = devm_ioremap(dev, regs_start, regs_size);
929 	if (!mdma->regs) {
930 		dev_err(dev, "Error mapping memory region!\n");
931 		retval = -ENOMEM;
932 		goto err_dispose2;
933 	}
934 
935 	mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
936 							+ MPC_DMA_TCD_OFFSET);
937 
938 	retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
939 	if (retval) {
940 		dev_err(dev, "Error requesting IRQ!\n");
941 		retval = -EINVAL;
942 		goto err_dispose2;
943 	}
944 
945 	if (mdma->is_mpc8308) {
946 		retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
947 							DRV_NAME, mdma);
948 		if (retval) {
949 			dev_err(dev, "Error requesting IRQ2!\n");
950 			retval = -EINVAL;
951 			goto err_free1;
952 		}
953 	}
954 
955 	spin_lock_init(&mdma->error_status_lock);
956 
957 	dma = &mdma->dma;
958 	dma->dev = dev;
959 	if (mdma->is_mpc8308)
960 		dma->chancnt = MPC8308_DMACHAN_MAX;
961 	else
962 		dma->chancnt = MPC512x_DMACHAN_MAX;
963 	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
964 	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
965 	dma->device_issue_pending = mpc_dma_issue_pending;
966 	dma->device_tx_status = mpc_dma_tx_status;
967 	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
968 	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
969 	dma->device_control = mpc_dma_device_control;
970 
971 	INIT_LIST_HEAD(&dma->channels);
972 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
973 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
974 
975 	for (i = 0; i < dma->chancnt; i++) {
976 		mchan = &mdma->channels[i];
977 
978 		mchan->chan.device = dma;
979 		dma_cookie_init(&mchan->chan);
980 
981 		INIT_LIST_HEAD(&mchan->free);
982 		INIT_LIST_HEAD(&mchan->prepared);
983 		INIT_LIST_HEAD(&mchan->queued);
984 		INIT_LIST_HEAD(&mchan->active);
985 		INIT_LIST_HEAD(&mchan->completed);
986 
987 		spin_lock_init(&mchan->lock);
988 		list_add_tail(&mchan->chan.device_node, &dma->channels);
989 	}
990 
991 	tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
992 
993 	/*
994 	 * Configure DMA Engine:
995 	 * - Dynamic clock,
996 	 * - Round-robin group arbitration,
997 	 * - Round-robin channel arbitration.
998 	 */
999 	if (mdma->is_mpc8308) {
1000 		/* MPC8308 has 16 channels and lacks some registers */
1001 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1002 
1003 		/* enable snooping */
1004 		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1005 		/* Disable error interrupts */
1006 		out_be32(&mdma->regs->dmaeeil, 0);
1007 
1008 		/* Clear interrupts status */
1009 		out_be32(&mdma->regs->dmaintl, 0xFFFF);
1010 		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1011 	} else {
1012 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1013 					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
1014 
1015 		/* Disable hardware DMA requests */
1016 		out_be32(&mdma->regs->dmaerqh, 0);
1017 		out_be32(&mdma->regs->dmaerql, 0);
1018 
1019 		/* Disable error interrupts */
1020 		out_be32(&mdma->regs->dmaeeih, 0);
1021 		out_be32(&mdma->regs->dmaeeil, 0);
1022 
1023 		/* Clear interrupts status */
1024 		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1025 		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1026 		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1027 		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1028 
1029 		/* Route interrupts to IPIC */
1030 		out_be32(&mdma->regs->dmaihsa, 0);
1031 		out_be32(&mdma->regs->dmailsa, 0);
1032 	}
1033 
1034 	/* Register DMA engine */
1035 	dev_set_drvdata(dev, mdma);
1036 	retval = dma_async_device_register(dma);
1037 	if (retval)
1038 		goto err_free2;
1039 
1040 	/* Register with OF helpers for DMA lookups (nonfatal) */
1041 	if (dev->of_node) {
1042 		retval = of_dma_controller_register(dev->of_node,
1043 						of_dma_xlate_by_chan_id, mdma);
1044 		if (retval)
1045 			dev_warn(dev, "Could not register for OF lookup\n");
1046 	}
1047 
1048 	return 0;
1049 
1050 err_free2:
1051 	if (mdma->is_mpc8308)
1052 		free_irq(mdma->irq2, mdma);
1053 err_free1:
1054 	free_irq(mdma->irq, mdma);
1055 err_dispose2:
1056 	if (mdma->is_mpc8308)
1057 		irq_dispose_mapping(mdma->irq2);
1058 err_dispose1:
1059 	irq_dispose_mapping(mdma->irq);
1060 err:
1061 	return retval;
1062 }
1063 
1064 static int mpc_dma_remove(struct platform_device *op)
1065 {
1066 	struct device *dev = &op->dev;
1067 	struct mpc_dma *mdma = dev_get_drvdata(dev);
1068 
1069 	if (dev->of_node)
1070 		of_dma_controller_free(dev->of_node);
1071 	dma_async_device_unregister(&mdma->dma);
1072 	if (mdma->is_mpc8308) {
1073 		free_irq(mdma->irq2, mdma);
1074 		irq_dispose_mapping(mdma->irq2);
1075 	}
1076 	free_irq(mdma->irq, mdma);
1077 	irq_dispose_mapping(mdma->irq);
1078 
1079 	return 0;
1080 }
1081 
1082 static struct of_device_id mpc_dma_match[] = {
1083 	{ .compatible = "fsl,mpc5121-dma", },
1084 	{ .compatible = "fsl,mpc8308-dma", },
1085 	{},
1086 };
1087 
1088 static struct platform_driver mpc_dma_driver = {
1089 	.probe		= mpc_dma_probe,
1090 	.remove		= mpc_dma_remove,
1091 	.driver = {
1092 		.name = DRV_NAME,
1093 		.owner = THIS_MODULE,
1094 		.of_match_table	= mpc_dma_match,
1095 	},
1096 };
1097 
1098 module_platform_driver(mpc_dma_driver);
1099 
1100 MODULE_LICENSE("GPL");
1101 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
1102