xref: /openbmc/linux/drivers/dma/mpc512x_dma.c (revision 78a4f036)
1 /*
2  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3  * Copyright (C) Semihalf 2009
4  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5  *
6  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
7  * (defines, structures and comments) was taken from MPC5121 DMA driver
8  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
9  *
10  * Approved as OSADL project by a majority of OSADL members and funded
11  * by OSADL membership fees in 2009;  for details see www.osadl.org.
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  * This program is distributed in the hope that it will be useful, but WITHOUT
19  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
21  * more details.
22  *
23  * You should have received a copy of the GNU General Public License along with
24  * this program; if not, write to the Free Software Foundation, Inc., 59
25  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
26  *
27  * The full GNU General Public License is included in this distribution in the
28  * file called COPYING.
29  */
30 
31 /*
32  * This is initial version of MPC5121 DMA driver. Only memory to memory
33  * transfers are supported (tested using dmatest module).
34  */
35 
36 #include <linux/module.h>
37 #include <linux/dmaengine.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/interrupt.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/of_address.h>
43 #include <linux/of_device.h>
44 #include <linux/of_irq.h>
45 #include <linux/of_platform.h>
46 
47 #include <linux/random.h>
48 
49 #include "dmaengine.h"
50 
51 /* Number of DMA Transfer descriptors allocated per channel */
52 #define MPC_DMA_DESCRIPTORS	64
53 
54 /* Macro definitions */
55 #define MPC_DMA_TCD_OFFSET	0x1000
56 
57 /*
58  * Maximum channel counts for individual hardware variants
59  * and the maximum channel count over all supported controllers,
60  * used for data structure size
61  */
62 #define MPC8308_DMACHAN_MAX	16
63 #define MPC512x_DMACHAN_MAX	64
64 #define MPC_DMA_CHANNELS	64
65 
66 /* Arbitration mode of group and channel */
67 #define MPC_DMA_DMACR_EDCG	(1 << 31)
68 #define MPC_DMA_DMACR_ERGA	(1 << 3)
69 #define MPC_DMA_DMACR_ERCA	(1 << 2)
70 
71 /* Error codes */
72 #define MPC_DMA_DMAES_VLD	(1 << 31)
73 #define MPC_DMA_DMAES_GPE	(1 << 15)
74 #define MPC_DMA_DMAES_CPE	(1 << 14)
75 #define MPC_DMA_DMAES_ERRCHN(err) \
76 				(((err) >> 8) & 0x3f)
77 #define MPC_DMA_DMAES_SAE	(1 << 7)
78 #define MPC_DMA_DMAES_SOE	(1 << 6)
79 #define MPC_DMA_DMAES_DAE	(1 << 5)
80 #define MPC_DMA_DMAES_DOE	(1 << 4)
81 #define MPC_DMA_DMAES_NCE	(1 << 3)
82 #define MPC_DMA_DMAES_SGE	(1 << 2)
83 #define MPC_DMA_DMAES_SBE	(1 << 1)
84 #define MPC_DMA_DMAES_DBE	(1 << 0)
85 
86 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
87 
88 #define MPC_DMA_TSIZE_1		0x00
89 #define MPC_DMA_TSIZE_2		0x01
90 #define MPC_DMA_TSIZE_4		0x02
91 #define MPC_DMA_TSIZE_16	0x04
92 #define MPC_DMA_TSIZE_32	0x05
93 
94 /* MPC5121 DMA engine registers */
95 struct __attribute__ ((__packed__)) mpc_dma_regs {
96 	/* 0x00 */
97 	u32 dmacr;		/* DMA control register */
98 	u32 dmaes;		/* DMA error status */
99 	/* 0x08 */
100 	u32 dmaerqh;		/* DMA enable request high(channels 63~32) */
101 	u32 dmaerql;		/* DMA enable request low(channels 31~0) */
102 	u32 dmaeeih;		/* DMA enable error interrupt high(ch63~32) */
103 	u32 dmaeeil;		/* DMA enable error interrupt low(ch31~0) */
104 	/* 0x18 */
105 	u8 dmaserq;		/* DMA set enable request */
106 	u8 dmacerq;		/* DMA clear enable request */
107 	u8 dmaseei;		/* DMA set enable error interrupt */
108 	u8 dmaceei;		/* DMA clear enable error interrupt */
109 	/* 0x1c */
110 	u8 dmacint;		/* DMA clear interrupt request */
111 	u8 dmacerr;		/* DMA clear error */
112 	u8 dmassrt;		/* DMA set start bit */
113 	u8 dmacdne;		/* DMA clear DONE status bit */
114 	/* 0x20 */
115 	u32 dmainth;		/* DMA interrupt request high(ch63~32) */
116 	u32 dmaintl;		/* DMA interrupt request low(ch31~0) */
117 	u32 dmaerrh;		/* DMA error high(ch63~32) */
118 	u32 dmaerrl;		/* DMA error low(ch31~0) */
119 	/* 0x30 */
120 	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
121 	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
122 	union {
123 		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
124 		u32 dmagpor;	/* (General purpose register on MPC8308) */
125 	};
126 	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
127 	/* 0x40 ~ 0xff */
128 	u32 reserve0[48];	/* Reserved */
129 	/* 0x100 */
130 	u8 dchpri[MPC_DMA_CHANNELS];
131 	/* DMA channels(0~63) priority */
132 };
133 
134 struct __attribute__ ((__packed__)) mpc_dma_tcd {
135 	/* 0x00 */
136 	u32 saddr;		/* Source address */
137 
138 	u32 smod:5;		/* Source address modulo */
139 	u32 ssize:3;		/* Source data transfer size */
140 	u32 dmod:5;		/* Destination address modulo */
141 	u32 dsize:3;		/* Destination data transfer size */
142 	u32 soff:16;		/* Signed source address offset */
143 
144 	/* 0x08 */
145 	u32 nbytes;		/* Inner "minor" byte count */
146 	u32 slast;		/* Last source address adjustment */
147 	u32 daddr;		/* Destination address */
148 
149 	/* 0x14 */
150 	u32 citer_elink:1;	/* Enable channel-to-channel linking on
151 				 * minor loop complete
152 				 */
153 	u32 citer_linkch:6;	/* Link channel for minor loop complete */
154 	u32 citer:9;		/* Current "major" iteration count */
155 	u32 doff:16;		/* Signed destination address offset */
156 
157 	/* 0x18 */
158 	u32 dlast_sga;		/* Last Destination address adjustment/scatter
159 				 * gather address
160 				 */
161 
162 	/* 0x1c */
163 	u32 biter_elink:1;	/* Enable channel-to-channel linking on major
164 				 * loop complete
165 				 */
166 	u32 biter_linkch:6;
167 	u32 biter:9;		/* Beginning "major" iteration count */
168 	u32 bwc:2;		/* Bandwidth control */
169 	u32 major_linkch:6;	/* Link channel number */
170 	u32 done:1;		/* Channel done */
171 	u32 active:1;		/* Channel active */
172 	u32 major_elink:1;	/* Enable channel-to-channel linking on major
173 				 * loop complete
174 				 */
175 	u32 e_sg:1;		/* Enable scatter/gather processing */
176 	u32 d_req:1;		/* Disable request */
177 	u32 int_half:1;		/* Enable an interrupt when major counter is
178 				 * half complete
179 				 */
180 	u32 int_maj:1;		/* Enable an interrupt when major iteration
181 				 * count completes
182 				 */
183 	u32 start:1;		/* Channel start */
184 };
185 
186 struct mpc_dma_desc {
187 	struct dma_async_tx_descriptor	desc;
188 	struct mpc_dma_tcd		*tcd;
189 	dma_addr_t			tcd_paddr;
190 	int				error;
191 	struct list_head		node;
192 };
193 
194 struct mpc_dma_chan {
195 	struct dma_chan			chan;
196 	struct list_head		free;
197 	struct list_head		prepared;
198 	struct list_head		queued;
199 	struct list_head		active;
200 	struct list_head		completed;
201 	struct mpc_dma_tcd		*tcd;
202 	dma_addr_t			tcd_paddr;
203 
204 	/* Lock for this structure */
205 	spinlock_t			lock;
206 };
207 
208 struct mpc_dma {
209 	struct dma_device		dma;
210 	struct tasklet_struct		tasklet;
211 	struct mpc_dma_chan		channels[MPC_DMA_CHANNELS];
212 	struct mpc_dma_regs __iomem	*regs;
213 	struct mpc_dma_tcd __iomem	*tcd;
214 	int				irq;
215 	int				irq2;
216 	uint				error_status;
217 	int				is_mpc8308;
218 
219 	/* Lock for error_status field in this structure */
220 	spinlock_t			error_status_lock;
221 };
222 
223 #define DRV_NAME	"mpc512x_dma"
224 
225 /* Convert struct dma_chan to struct mpc_dma_chan */
226 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
227 {
228 	return container_of(c, struct mpc_dma_chan, chan);
229 }
230 
231 /* Convert struct dma_chan to struct mpc_dma */
232 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
233 {
234 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
235 	return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
236 }
237 
238 /*
239  * Execute all queued DMA descriptors.
240  *
241  * Following requirements must be met while calling mpc_dma_execute():
242  * 	a) mchan->lock is acquired,
243  * 	b) mchan->active list is empty,
244  * 	c) mchan->queued list contains at least one entry.
245  */
246 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
247 {
248 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
249 	struct mpc_dma_desc *first = NULL;
250 	struct mpc_dma_desc *prev = NULL;
251 	struct mpc_dma_desc *mdesc;
252 	int cid = mchan->chan.chan_id;
253 
254 	/* Move all queued descriptors to active list */
255 	list_splice_tail_init(&mchan->queued, &mchan->active);
256 
257 	/* Chain descriptors into one transaction */
258 	list_for_each_entry(mdesc, &mchan->active, node) {
259 		if (!first)
260 			first = mdesc;
261 
262 		if (!prev) {
263 			prev = mdesc;
264 			continue;
265 		}
266 
267 		prev->tcd->dlast_sga = mdesc->tcd_paddr;
268 		prev->tcd->e_sg = 1;
269 		mdesc->tcd->start = 1;
270 
271 		prev = mdesc;
272 	}
273 
274 	prev->tcd->int_maj = 1;
275 
276 	/* Send first descriptor in chain into hardware */
277 	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
278 
279 	if (first != prev)
280 		mdma->tcd[cid].e_sg = 1;
281 	out_8(&mdma->regs->dmassrt, cid);
282 }
283 
284 /* Handle interrupt on one half of DMA controller (32 channels) */
285 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
286 {
287 	struct mpc_dma_chan *mchan;
288 	struct mpc_dma_desc *mdesc;
289 	u32 status = is | es;
290 	int ch;
291 
292 	while ((ch = fls(status) - 1) >= 0) {
293 		status &= ~(1 << ch);
294 		mchan = &mdma->channels[ch + off];
295 
296 		spin_lock(&mchan->lock);
297 
298 		out_8(&mdma->regs->dmacint, ch + off);
299 		out_8(&mdma->regs->dmacerr, ch + off);
300 
301 		/* Check error status */
302 		if (es & (1 << ch))
303 			list_for_each_entry(mdesc, &mchan->active, node)
304 				mdesc->error = -EIO;
305 
306 		/* Execute queued descriptors */
307 		list_splice_tail_init(&mchan->active, &mchan->completed);
308 		if (!list_empty(&mchan->queued))
309 			mpc_dma_execute(mchan);
310 
311 		spin_unlock(&mchan->lock);
312 	}
313 }
314 
315 /* Interrupt handler */
316 static irqreturn_t mpc_dma_irq(int irq, void *data)
317 {
318 	struct mpc_dma *mdma = data;
319 	uint es;
320 
321 	/* Save error status register */
322 	es = in_be32(&mdma->regs->dmaes);
323 	spin_lock(&mdma->error_status_lock);
324 	if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
325 		mdma->error_status = es;
326 	spin_unlock(&mdma->error_status_lock);
327 
328 	/* Handle interrupt on each channel */
329 	if (mdma->dma.chancnt > 32) {
330 		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
331 					in_be32(&mdma->regs->dmaerrh), 32);
332 	}
333 	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
334 					in_be32(&mdma->regs->dmaerrl), 0);
335 
336 	/* Schedule tasklet */
337 	tasklet_schedule(&mdma->tasklet);
338 
339 	return IRQ_HANDLED;
340 }
341 
342 /* process completed descriptors */
343 static void mpc_dma_process_completed(struct mpc_dma *mdma)
344 {
345 	dma_cookie_t last_cookie = 0;
346 	struct mpc_dma_chan *mchan;
347 	struct mpc_dma_desc *mdesc;
348 	struct dma_async_tx_descriptor *desc;
349 	unsigned long flags;
350 	LIST_HEAD(list);
351 	int i;
352 
353 	for (i = 0; i < mdma->dma.chancnt; i++) {
354 		mchan = &mdma->channels[i];
355 
356 		/* Get all completed descriptors */
357 		spin_lock_irqsave(&mchan->lock, flags);
358 		if (!list_empty(&mchan->completed))
359 			list_splice_tail_init(&mchan->completed, &list);
360 		spin_unlock_irqrestore(&mchan->lock, flags);
361 
362 		if (list_empty(&list))
363 			continue;
364 
365 		/* Execute callbacks and run dependencies */
366 		list_for_each_entry(mdesc, &list, node) {
367 			desc = &mdesc->desc;
368 
369 			if (desc->callback)
370 				desc->callback(desc->callback_param);
371 
372 			last_cookie = desc->cookie;
373 			dma_run_dependencies(desc);
374 		}
375 
376 		/* Free descriptors */
377 		spin_lock_irqsave(&mchan->lock, flags);
378 		list_splice_tail_init(&list, &mchan->free);
379 		mchan->chan.completed_cookie = last_cookie;
380 		spin_unlock_irqrestore(&mchan->lock, flags);
381 	}
382 }
383 
384 /* DMA Tasklet */
385 static void mpc_dma_tasklet(unsigned long data)
386 {
387 	struct mpc_dma *mdma = (void *)data;
388 	unsigned long flags;
389 	uint es;
390 
391 	spin_lock_irqsave(&mdma->error_status_lock, flags);
392 	es = mdma->error_status;
393 	mdma->error_status = 0;
394 	spin_unlock_irqrestore(&mdma->error_status_lock, flags);
395 
396 	/* Print nice error report */
397 	if (es) {
398 		dev_err(mdma->dma.dev,
399 			"Hardware reported following error(s) on channel %u:\n",
400 						      MPC_DMA_DMAES_ERRCHN(es));
401 
402 		if (es & MPC_DMA_DMAES_GPE)
403 			dev_err(mdma->dma.dev, "- Group Priority Error\n");
404 		if (es & MPC_DMA_DMAES_CPE)
405 			dev_err(mdma->dma.dev, "- Channel Priority Error\n");
406 		if (es & MPC_DMA_DMAES_SAE)
407 			dev_err(mdma->dma.dev, "- Source Address Error\n");
408 		if (es & MPC_DMA_DMAES_SOE)
409 			dev_err(mdma->dma.dev, "- Source Offset"
410 						" Configuration Error\n");
411 		if (es & MPC_DMA_DMAES_DAE)
412 			dev_err(mdma->dma.dev, "- Destination Address"
413 								" Error\n");
414 		if (es & MPC_DMA_DMAES_DOE)
415 			dev_err(mdma->dma.dev, "- Destination Offset"
416 						" Configuration Error\n");
417 		if (es & MPC_DMA_DMAES_NCE)
418 			dev_err(mdma->dma.dev, "- NBytes/Citter"
419 						" Configuration Error\n");
420 		if (es & MPC_DMA_DMAES_SGE)
421 			dev_err(mdma->dma.dev, "- Scatter/Gather"
422 						" Configuration Error\n");
423 		if (es & MPC_DMA_DMAES_SBE)
424 			dev_err(mdma->dma.dev, "- Source Bus Error\n");
425 		if (es & MPC_DMA_DMAES_DBE)
426 			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
427 	}
428 
429 	mpc_dma_process_completed(mdma);
430 }
431 
432 /* Submit descriptor to hardware */
433 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
434 {
435 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
436 	struct mpc_dma_desc *mdesc;
437 	unsigned long flags;
438 	dma_cookie_t cookie;
439 
440 	mdesc = container_of(txd, struct mpc_dma_desc, desc);
441 
442 	spin_lock_irqsave(&mchan->lock, flags);
443 
444 	/* Move descriptor to queue */
445 	list_move_tail(&mdesc->node, &mchan->queued);
446 
447 	/* If channel is idle, execute all queued descriptors */
448 	if (list_empty(&mchan->active))
449 		mpc_dma_execute(mchan);
450 
451 	/* Update cookie */
452 	cookie = dma_cookie_assign(txd);
453 	spin_unlock_irqrestore(&mchan->lock, flags);
454 
455 	return cookie;
456 }
457 
458 /* Alloc channel resources */
459 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
460 {
461 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
462 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
463 	struct mpc_dma_desc *mdesc;
464 	struct mpc_dma_tcd *tcd;
465 	dma_addr_t tcd_paddr;
466 	unsigned long flags;
467 	LIST_HEAD(descs);
468 	int i;
469 
470 	/* Alloc DMA memory for Transfer Control Descriptors */
471 	tcd = dma_alloc_coherent(mdma->dma.dev,
472 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
473 							&tcd_paddr, GFP_KERNEL);
474 	if (!tcd)
475 		return -ENOMEM;
476 
477 	/* Alloc descriptors for this channel */
478 	for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
479 		mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
480 		if (!mdesc) {
481 			dev_notice(mdma->dma.dev, "Memory allocation error. "
482 					"Allocated only %u descriptors\n", i);
483 			break;
484 		}
485 
486 		dma_async_tx_descriptor_init(&mdesc->desc, chan);
487 		mdesc->desc.flags = DMA_CTRL_ACK;
488 		mdesc->desc.tx_submit = mpc_dma_tx_submit;
489 
490 		mdesc->tcd = &tcd[i];
491 		mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
492 
493 		list_add_tail(&mdesc->node, &descs);
494 	}
495 
496 	/* Return error only if no descriptors were allocated */
497 	if (i == 0) {
498 		dma_free_coherent(mdma->dma.dev,
499 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
500 								tcd, tcd_paddr);
501 		return -ENOMEM;
502 	}
503 
504 	spin_lock_irqsave(&mchan->lock, flags);
505 	mchan->tcd = tcd;
506 	mchan->tcd_paddr = tcd_paddr;
507 	list_splice_tail_init(&descs, &mchan->free);
508 	spin_unlock_irqrestore(&mchan->lock, flags);
509 
510 	/* Enable Error Interrupt */
511 	out_8(&mdma->regs->dmaseei, chan->chan_id);
512 
513 	return 0;
514 }
515 
516 /* Free channel resources */
517 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
518 {
519 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
520 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
521 	struct mpc_dma_desc *mdesc, *tmp;
522 	struct mpc_dma_tcd *tcd;
523 	dma_addr_t tcd_paddr;
524 	unsigned long flags;
525 	LIST_HEAD(descs);
526 
527 	spin_lock_irqsave(&mchan->lock, flags);
528 
529 	/* Channel must be idle */
530 	BUG_ON(!list_empty(&mchan->prepared));
531 	BUG_ON(!list_empty(&mchan->queued));
532 	BUG_ON(!list_empty(&mchan->active));
533 	BUG_ON(!list_empty(&mchan->completed));
534 
535 	/* Move data */
536 	list_splice_tail_init(&mchan->free, &descs);
537 	tcd = mchan->tcd;
538 	tcd_paddr = mchan->tcd_paddr;
539 
540 	spin_unlock_irqrestore(&mchan->lock, flags);
541 
542 	/* Free DMA memory used by descriptors */
543 	dma_free_coherent(mdma->dma.dev,
544 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
545 								tcd, tcd_paddr);
546 
547 	/* Free descriptors */
548 	list_for_each_entry_safe(mdesc, tmp, &descs, node)
549 		kfree(mdesc);
550 
551 	/* Disable Error Interrupt */
552 	out_8(&mdma->regs->dmaceei, chan->chan_id);
553 }
554 
555 /* Send all pending descriptor to hardware */
556 static void mpc_dma_issue_pending(struct dma_chan *chan)
557 {
558 	/*
559 	 * We are posting descriptors to the hardware as soon as
560 	 * they are ready, so this function does nothing.
561 	 */
562 }
563 
564 /* Check request completion status */
565 static enum dma_status
566 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
567 	       struct dma_tx_state *txstate)
568 {
569 	return dma_cookie_status(chan, cookie, txstate);
570 }
571 
572 /* Prepare descriptor for memory to memory copy */
573 static struct dma_async_tx_descriptor *
574 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
575 					size_t len, unsigned long flags)
576 {
577 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
578 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
579 	struct mpc_dma_desc *mdesc = NULL;
580 	struct mpc_dma_tcd *tcd;
581 	unsigned long iflags;
582 
583 	/* Get free descriptor */
584 	spin_lock_irqsave(&mchan->lock, iflags);
585 	if (!list_empty(&mchan->free)) {
586 		mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
587 									node);
588 		list_del(&mdesc->node);
589 	}
590 	spin_unlock_irqrestore(&mchan->lock, iflags);
591 
592 	if (!mdesc) {
593 		/* try to free completed descriptors */
594 		mpc_dma_process_completed(mdma);
595 		return NULL;
596 	}
597 
598 	mdesc->error = 0;
599 	tcd = mdesc->tcd;
600 
601 	/* Prepare Transfer Control Descriptor for this transaction */
602 	memset(tcd, 0, sizeof(struct mpc_dma_tcd));
603 
604 	if (IS_ALIGNED(src | dst | len, 32)) {
605 		tcd->ssize = MPC_DMA_TSIZE_32;
606 		tcd->dsize = MPC_DMA_TSIZE_32;
607 		tcd->soff = 32;
608 		tcd->doff = 32;
609 	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
610 		/* MPC8308 doesn't support 16 byte transfers */
611 		tcd->ssize = MPC_DMA_TSIZE_16;
612 		tcd->dsize = MPC_DMA_TSIZE_16;
613 		tcd->soff = 16;
614 		tcd->doff = 16;
615 	} else if (IS_ALIGNED(src | dst | len, 4)) {
616 		tcd->ssize = MPC_DMA_TSIZE_4;
617 		tcd->dsize = MPC_DMA_TSIZE_4;
618 		tcd->soff = 4;
619 		tcd->doff = 4;
620 	} else if (IS_ALIGNED(src | dst | len, 2)) {
621 		tcd->ssize = MPC_DMA_TSIZE_2;
622 		tcd->dsize = MPC_DMA_TSIZE_2;
623 		tcd->soff = 2;
624 		tcd->doff = 2;
625 	} else {
626 		tcd->ssize = MPC_DMA_TSIZE_1;
627 		tcd->dsize = MPC_DMA_TSIZE_1;
628 		tcd->soff = 1;
629 		tcd->doff = 1;
630 	}
631 
632 	tcd->saddr = src;
633 	tcd->daddr = dst;
634 	tcd->nbytes = len;
635 	tcd->biter = 1;
636 	tcd->citer = 1;
637 
638 	/* Place descriptor in prepared list */
639 	spin_lock_irqsave(&mchan->lock, iflags);
640 	list_add_tail(&mdesc->node, &mchan->prepared);
641 	spin_unlock_irqrestore(&mchan->lock, iflags);
642 
643 	return &mdesc->desc;
644 }
645 
646 static int mpc_dma_probe(struct platform_device *op)
647 {
648 	struct device_node *dn = op->dev.of_node;
649 	struct device *dev = &op->dev;
650 	struct dma_device *dma;
651 	struct mpc_dma *mdma;
652 	struct mpc_dma_chan *mchan;
653 	struct resource res;
654 	ulong regs_start, regs_size;
655 	int retval, i;
656 
657 	mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
658 	if (!mdma) {
659 		dev_err(dev, "Memory exhausted!\n");
660 		return -ENOMEM;
661 	}
662 
663 	mdma->irq = irq_of_parse_and_map(dn, 0);
664 	if (mdma->irq == NO_IRQ) {
665 		dev_err(dev, "Error mapping IRQ!\n");
666 		return -EINVAL;
667 	}
668 
669 	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
670 		mdma->is_mpc8308 = 1;
671 		mdma->irq2 = irq_of_parse_and_map(dn, 1);
672 		if (mdma->irq2 == NO_IRQ) {
673 			dev_err(dev, "Error mapping IRQ!\n");
674 			return -EINVAL;
675 		}
676 	}
677 
678 	retval = of_address_to_resource(dn, 0, &res);
679 	if (retval) {
680 		dev_err(dev, "Error parsing memory region!\n");
681 		return retval;
682 	}
683 
684 	regs_start = res.start;
685 	regs_size = resource_size(&res);
686 
687 	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
688 		dev_err(dev, "Error requesting memory region!\n");
689 		return -EBUSY;
690 	}
691 
692 	mdma->regs = devm_ioremap(dev, regs_start, regs_size);
693 	if (!mdma->regs) {
694 		dev_err(dev, "Error mapping memory region!\n");
695 		return -ENOMEM;
696 	}
697 
698 	mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
699 							+ MPC_DMA_TCD_OFFSET);
700 
701 	retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
702 									mdma);
703 	if (retval) {
704 		dev_err(dev, "Error requesting IRQ!\n");
705 		return -EINVAL;
706 	}
707 
708 	if (mdma->is_mpc8308) {
709 		retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
710 				DRV_NAME, mdma);
711 		if (retval) {
712 			dev_err(dev, "Error requesting IRQ2!\n");
713 			return -EINVAL;
714 		}
715 	}
716 
717 	spin_lock_init(&mdma->error_status_lock);
718 
719 	dma = &mdma->dma;
720 	dma->dev = dev;
721 	if (mdma->is_mpc8308)
722 		dma->chancnt = MPC8308_DMACHAN_MAX;
723 	else
724 		dma->chancnt = MPC512x_DMACHAN_MAX;
725 	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
726 	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
727 	dma->device_issue_pending = mpc_dma_issue_pending;
728 	dma->device_tx_status = mpc_dma_tx_status;
729 	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
730 
731 	INIT_LIST_HEAD(&dma->channels);
732 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
733 
734 	for (i = 0; i < dma->chancnt; i++) {
735 		mchan = &mdma->channels[i];
736 
737 		mchan->chan.device = dma;
738 		dma_cookie_init(&mchan->chan);
739 
740 		INIT_LIST_HEAD(&mchan->free);
741 		INIT_LIST_HEAD(&mchan->prepared);
742 		INIT_LIST_HEAD(&mchan->queued);
743 		INIT_LIST_HEAD(&mchan->active);
744 		INIT_LIST_HEAD(&mchan->completed);
745 
746 		spin_lock_init(&mchan->lock);
747 		list_add_tail(&mchan->chan.device_node, &dma->channels);
748 	}
749 
750 	tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
751 
752 	/*
753 	 * Configure DMA Engine:
754 	 * - Dynamic clock,
755 	 * - Round-robin group arbitration,
756 	 * - Round-robin channel arbitration.
757 	 */
758 	if (mdma->is_mpc8308) {
759 		/* MPC8308 has 16 channels and lacks some registers */
760 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
761 
762 		/* enable snooping */
763 		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
764 		/* Disable error interrupts */
765 		out_be32(&mdma->regs->dmaeeil, 0);
766 
767 		/* Clear interrupts status */
768 		out_be32(&mdma->regs->dmaintl, 0xFFFF);
769 		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
770 	} else {
771 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
772 					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
773 
774 		/* Disable hardware DMA requests */
775 		out_be32(&mdma->regs->dmaerqh, 0);
776 		out_be32(&mdma->regs->dmaerql, 0);
777 
778 		/* Disable error interrupts */
779 		out_be32(&mdma->regs->dmaeeih, 0);
780 		out_be32(&mdma->regs->dmaeeil, 0);
781 
782 		/* Clear interrupts status */
783 		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
784 		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
785 		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
786 		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
787 
788 		/* Route interrupts to IPIC */
789 		out_be32(&mdma->regs->dmaihsa, 0);
790 		out_be32(&mdma->regs->dmailsa, 0);
791 	}
792 
793 	/* Register DMA engine */
794 	dev_set_drvdata(dev, mdma);
795 	retval = dma_async_device_register(dma);
796 	if (retval) {
797 		devm_free_irq(dev, mdma->irq, mdma);
798 		irq_dispose_mapping(mdma->irq);
799 	}
800 
801 	return retval;
802 }
803 
804 static int mpc_dma_remove(struct platform_device *op)
805 {
806 	struct device *dev = &op->dev;
807 	struct mpc_dma *mdma = dev_get_drvdata(dev);
808 
809 	dma_async_device_unregister(&mdma->dma);
810 	devm_free_irq(dev, mdma->irq, mdma);
811 	irq_dispose_mapping(mdma->irq);
812 
813 	return 0;
814 }
815 
816 static struct of_device_id mpc_dma_match[] = {
817 	{ .compatible = "fsl,mpc5121-dma", },
818 	{},
819 };
820 
821 static struct platform_driver mpc_dma_driver = {
822 	.probe		= mpc_dma_probe,
823 	.remove		= mpc_dma_remove,
824 	.driver = {
825 		.name = DRV_NAME,
826 		.owner = THIS_MODULE,
827 		.of_match_table	= mpc_dma_match,
828 	},
829 };
830 
831 module_platform_driver(mpc_dma_driver);
832 
833 MODULE_LICENSE("GPL");
834 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
835