xref: /openbmc/linux/drivers/dma/sh/shdmac.c (revision 4981c4dc194efb18f0e9a02f1b43e926f2f0d2bb)
1 /*
2  * Renesas SuperH DMA Engine support
3  *
4  * base is drivers/dma/flsdma.c
5  *
6  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10  *
11  * This is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * - DMA of SuperH does not have Hardware DMA chain mode.
17  * - MAX DMA size is 16MB.
18  *
19  */
20 
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/dmaengine.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/sh_dma.h>
32 #include <linux/notifier.h>
33 #include <linux/kdebug.h>
34 #include <linux/spinlock.h>
35 #include <linux/rculist.h>
36 
37 #include "../dmaengine.h"
38 #include "shdma.h"
39 
40 #define SH_DMAE_DRV_NAME "sh-dma-engine"
41 
42 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
43 #define LOG2_DEFAULT_XFER_SIZE	2
44 #define SH_DMA_SLAVE_NUMBER 256
45 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
46 
47 /*
48  * Used for write-side mutual exclusion for the global device list,
49  * read-side synchronization by way of RCU, and per-controller data.
50  */
51 static DEFINE_SPINLOCK(sh_dmae_lock);
52 static LIST_HEAD(sh_dmae_devices);
53 
54 /*
55  * Different DMAC implementations provide different ways to clear DMA channels:
56  * (1) none - no CHCLR registers are available
57  * (2) one CHCLR register per channel - 0 has to be written to it to clear
58  *     channel buffers
59  * (3) one CHCLR per several channels - 1 has to be written to the bit,
60  *     corresponding to the specific channel to reset it
61  */
62 static void channel_clear(struct sh_dmae_chan *sh_dc)
63 {
64 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
65 	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
66 		sh_dc->shdma_chan.id;
67 	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
68 
69 	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
70 }
71 
72 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
73 {
74 	__raw_writel(data, sh_dc->base + reg);
75 }
76 
77 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
78 {
79 	return __raw_readl(sh_dc->base + reg);
80 }
81 
82 static u16 dmaor_read(struct sh_dmae_device *shdev)
83 {
84 	void __iomem *addr = shdev->chan_reg + DMAOR;
85 
86 	if (shdev->pdata->dmaor_is_32bit)
87 		return __raw_readl(addr);
88 	else
89 		return __raw_readw(addr);
90 }
91 
92 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
93 {
94 	void __iomem *addr = shdev->chan_reg + DMAOR;
95 
96 	if (shdev->pdata->dmaor_is_32bit)
97 		__raw_writel(data, addr);
98 	else
99 		__raw_writew(data, addr);
100 }
101 
102 static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
103 {
104 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
105 
106 	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
107 }
108 
109 static u32 chcr_read(struct sh_dmae_chan *sh_dc)
110 {
111 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
112 
113 	return __raw_readl(sh_dc->base + shdev->chcr_offset);
114 }
115 
116 /*
117  * Reset DMA controller
118  *
119  * SH7780 has two DMAOR register
120  */
121 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
122 {
123 	unsigned short dmaor;
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&sh_dmae_lock, flags);
127 
128 	dmaor = dmaor_read(shdev);
129 	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
130 
131 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
132 }
133 
134 static int sh_dmae_rst(struct sh_dmae_device *shdev)
135 {
136 	unsigned short dmaor;
137 	unsigned long flags;
138 
139 	spin_lock_irqsave(&sh_dmae_lock, flags);
140 
141 	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
142 
143 	if (shdev->pdata->chclr_present) {
144 		int i;
145 		for (i = 0; i < shdev->pdata->channel_num; i++) {
146 			struct sh_dmae_chan *sh_chan = shdev->chan[i];
147 			if (sh_chan)
148 				channel_clear(sh_chan);
149 		}
150 	}
151 
152 	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
153 
154 	dmaor = dmaor_read(shdev);
155 
156 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
157 
158 	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
159 		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
160 		return -EIO;
161 	}
162 	if (shdev->pdata->dmaor_init & ~dmaor)
163 		dev_warn(shdev->shdma_dev.dma_dev.dev,
164 			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
165 			 dmaor, shdev->pdata->dmaor_init);
166 	return 0;
167 }
168 
169 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
170 {
171 	u32 chcr = chcr_read(sh_chan);
172 
173 	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
174 		return true; /* working */
175 
176 	return false; /* waiting */
177 }
178 
179 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
180 {
181 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
182 	const struct sh_dmae_pdata *pdata = shdev->pdata;
183 	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
184 		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
185 
186 	if (cnt >= pdata->ts_shift_num)
187 		cnt = 0;
188 
189 	return pdata->ts_shift[cnt];
190 }
191 
192 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
193 {
194 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
195 	const struct sh_dmae_pdata *pdata = shdev->pdata;
196 	int i;
197 
198 	for (i = 0; i < pdata->ts_shift_num; i++)
199 		if (pdata->ts_shift[i] == l2size)
200 			break;
201 
202 	if (i == pdata->ts_shift_num)
203 		i = 0;
204 
205 	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
206 		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
207 }
208 
209 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
210 {
211 	sh_dmae_writel(sh_chan, hw->sar, SAR);
212 	sh_dmae_writel(sh_chan, hw->dar, DAR);
213 	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
214 }
215 
216 static void dmae_start(struct sh_dmae_chan *sh_chan)
217 {
218 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
219 	u32 chcr = chcr_read(sh_chan);
220 
221 	if (shdev->pdata->needs_tend_set)
222 		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
223 
224 	chcr |= CHCR_DE | shdev->chcr_ie_bit;
225 	chcr_write(sh_chan, chcr & ~CHCR_TE);
226 }
227 
228 static void dmae_init(struct sh_dmae_chan *sh_chan)
229 {
230 	/*
231 	 * Default configuration for dual address memory-memory transfer.
232 	 * 0x400 represents auto-request.
233 	 */
234 	u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
235 						   LOG2_DEFAULT_XFER_SIZE);
236 	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
237 	chcr_write(sh_chan, chcr);
238 }
239 
240 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
241 {
242 	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
243 	if (dmae_is_busy(sh_chan))
244 		return -EBUSY;
245 
246 	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
247 	chcr_write(sh_chan, val);
248 
249 	return 0;
250 }
251 
252 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
253 {
254 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
255 	const struct sh_dmae_pdata *pdata = shdev->pdata;
256 	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
257 	void __iomem *addr = shdev->dmars;
258 	unsigned int shift = chan_pdata->dmars_bit;
259 
260 	if (dmae_is_busy(sh_chan))
261 		return -EBUSY;
262 
263 	if (pdata->no_dmars)
264 		return 0;
265 
266 	/* in the case of a missing DMARS resource use first memory window */
267 	if (!addr)
268 		addr = shdev->chan_reg;
269 	addr += chan_pdata->dmars;
270 
271 	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
272 		     addr);
273 
274 	return 0;
275 }
276 
277 static void sh_dmae_start_xfer(struct shdma_chan *schan,
278 			       struct shdma_desc *sdesc)
279 {
280 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
281 						    shdma_chan);
282 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
283 					struct sh_dmae_desc, shdma_desc);
284 	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
285 		sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
286 		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
287 	/* Get the ld start address from ld_queue */
288 	dmae_set_reg(sh_chan, &sh_desc->hw);
289 	dmae_start(sh_chan);
290 }
291 
292 static bool sh_dmae_channel_busy(struct shdma_chan *schan)
293 {
294 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
295 						    shdma_chan);
296 	return dmae_is_busy(sh_chan);
297 }
298 
299 static void sh_dmae_setup_xfer(struct shdma_chan *schan,
300 			       int slave_id)
301 {
302 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
303 						    shdma_chan);
304 
305 	if (slave_id >= 0) {
306 		const struct sh_dmae_slave_config *cfg =
307 			sh_chan->config;
308 
309 		dmae_set_dmars(sh_chan, cfg->mid_rid);
310 		dmae_set_chcr(sh_chan, cfg->chcr);
311 	} else {
312 		dmae_init(sh_chan);
313 	}
314 }
315 
316 /*
317  * Find a slave channel configuration from the contoller list by either a slave
318  * ID in the non-DT case, or by a MID/RID value in the DT case
319  */
320 static const struct sh_dmae_slave_config *dmae_find_slave(
321 	struct sh_dmae_chan *sh_chan, int match)
322 {
323 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
324 	const struct sh_dmae_pdata *pdata = shdev->pdata;
325 	const struct sh_dmae_slave_config *cfg;
326 	int i;
327 
328 	if (!sh_chan->shdma_chan.dev->of_node) {
329 		if (match >= SH_DMA_SLAVE_NUMBER)
330 			return NULL;
331 
332 		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
333 			if (cfg->slave_id == match)
334 				return cfg;
335 	} else {
336 		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
337 			if (cfg->mid_rid == match) {
338 				sh_chan->shdma_chan.slave_id = i;
339 				return cfg;
340 			}
341 	}
342 
343 	return NULL;
344 }
345 
346 static int sh_dmae_set_slave(struct shdma_chan *schan,
347 			     int slave_id, dma_addr_t slave_addr, bool try)
348 {
349 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
350 						    shdma_chan);
351 	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
352 	if (!cfg)
353 		return -ENXIO;
354 
355 	if (!try) {
356 		sh_chan->config = cfg;
357 		sh_chan->slave_addr = slave_addr ? : cfg->addr;
358 	}
359 
360 	return 0;
361 }
362 
363 static void dmae_halt(struct sh_dmae_chan *sh_chan)
364 {
365 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
366 	u32 chcr = chcr_read(sh_chan);
367 
368 	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
369 	chcr_write(sh_chan, chcr);
370 }
371 
372 static int sh_dmae_desc_setup(struct shdma_chan *schan,
373 			      struct shdma_desc *sdesc,
374 			      dma_addr_t src, dma_addr_t dst, size_t *len)
375 {
376 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
377 					struct sh_dmae_desc, shdma_desc);
378 
379 	if (*len > schan->max_xfer_len)
380 		*len = schan->max_xfer_len;
381 
382 	sh_desc->hw.sar = src;
383 	sh_desc->hw.dar = dst;
384 	sh_desc->hw.tcr = *len;
385 
386 	return 0;
387 }
388 
389 static void sh_dmae_halt(struct shdma_chan *schan)
390 {
391 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
392 						    shdma_chan);
393 	dmae_halt(sh_chan);
394 }
395 
396 static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
397 {
398 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
399 						    shdma_chan);
400 
401 	if (!(chcr_read(sh_chan) & CHCR_TE))
402 		return false;
403 
404 	/* DMA stop */
405 	dmae_halt(sh_chan);
406 
407 	return true;
408 }
409 
410 static size_t sh_dmae_get_partial(struct shdma_chan *schan,
411 				  struct shdma_desc *sdesc)
412 {
413 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
414 						    shdma_chan);
415 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
416 					struct sh_dmae_desc, shdma_desc);
417 	return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
418 		sh_chan->xmit_shift;
419 }
420 
421 /* Called from error IRQ or NMI */
422 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
423 {
424 	bool ret;
425 
426 	/* halt the dma controller */
427 	sh_dmae_ctl_stop(shdev);
428 
429 	/* We cannot detect, which channel caused the error, have to reset all */
430 	ret = shdma_reset(&shdev->shdma_dev);
431 
432 	sh_dmae_rst(shdev);
433 
434 	return ret;
435 }
436 
437 static irqreturn_t sh_dmae_err(int irq, void *data)
438 {
439 	struct sh_dmae_device *shdev = data;
440 
441 	if (!(dmaor_read(shdev) & DMAOR_AE))
442 		return IRQ_NONE;
443 
444 	sh_dmae_reset(shdev);
445 	return IRQ_HANDLED;
446 }
447 
448 static bool sh_dmae_desc_completed(struct shdma_chan *schan,
449 				   struct shdma_desc *sdesc)
450 {
451 	struct sh_dmae_chan *sh_chan = container_of(schan,
452 					struct sh_dmae_chan, shdma_chan);
453 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
454 					struct sh_dmae_desc, shdma_desc);
455 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
456 	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
457 
458 	return	(sdesc->direction == DMA_DEV_TO_MEM &&
459 		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
460 		(sdesc->direction != DMA_DEV_TO_MEM &&
461 		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
462 }
463 
464 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
465 {
466 	/* Fast path out if NMIF is not asserted for this controller */
467 	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
468 		return false;
469 
470 	return sh_dmae_reset(shdev);
471 }
472 
473 static int sh_dmae_nmi_handler(struct notifier_block *self,
474 			       unsigned long cmd, void *data)
475 {
476 	struct sh_dmae_device *shdev;
477 	int ret = NOTIFY_DONE;
478 	bool triggered;
479 
480 	/*
481 	 * Only concern ourselves with NMI events.
482 	 *
483 	 * Normally we would check the die chain value, but as this needs
484 	 * to be architecture independent, check for NMI context instead.
485 	 */
486 	if (!in_nmi())
487 		return NOTIFY_DONE;
488 
489 	rcu_read_lock();
490 	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
491 		/*
492 		 * Only stop if one of the controllers has NMIF asserted,
493 		 * we do not want to interfere with regular address error
494 		 * handling or NMI events that don't concern the DMACs.
495 		 */
496 		triggered = sh_dmae_nmi_notify(shdev);
497 		if (triggered == true)
498 			ret = NOTIFY_OK;
499 	}
500 	rcu_read_unlock();
501 
502 	return ret;
503 }
504 
505 static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
506 	.notifier_call	= sh_dmae_nmi_handler,
507 
508 	/* Run before NMI debug handler and KGDB */
509 	.priority	= 1,
510 };
511 
512 static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
513 					int irq, unsigned long flags)
514 {
515 	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
516 	struct shdma_dev *sdev = &shdev->shdma_dev;
517 	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
518 	struct sh_dmae_chan *sh_chan;
519 	struct shdma_chan *schan;
520 	int err;
521 
522 	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
523 			       GFP_KERNEL);
524 	if (!sh_chan) {
525 		dev_err(sdev->dma_dev.dev,
526 			"No free memory for allocating dma channels!\n");
527 		return -ENOMEM;
528 	}
529 
530 	schan = &sh_chan->shdma_chan;
531 	schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
532 
533 	shdma_chan_probe(sdev, schan, id);
534 
535 	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
536 
537 	/* set up channel irq */
538 	if (pdev->id >= 0)
539 		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
540 			 "sh-dmae%d.%d", pdev->id, id);
541 	else
542 		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
543 			 "sh-dma%d", id);
544 
545 	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
546 	if (err) {
547 		dev_err(sdev->dma_dev.dev,
548 			"DMA channel %d request_irq error %d\n",
549 			id, err);
550 		goto err_no_irq;
551 	}
552 
553 	shdev->chan[id] = sh_chan;
554 	return 0;
555 
556 err_no_irq:
557 	/* remove from dmaengine device node */
558 	shdma_chan_remove(schan);
559 	return err;
560 }
561 
562 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
563 {
564 	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
565 	struct shdma_chan *schan;
566 	int i;
567 
568 	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
569 		BUG_ON(!schan);
570 
571 		shdma_chan_remove(schan);
572 	}
573 	dma_dev->chancnt = 0;
574 }
575 
576 static void sh_dmae_shutdown(struct platform_device *pdev)
577 {
578 	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
579 	sh_dmae_ctl_stop(shdev);
580 }
581 
582 static int sh_dmae_runtime_suspend(struct device *dev)
583 {
584 	return 0;
585 }
586 
587 static int sh_dmae_runtime_resume(struct device *dev)
588 {
589 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
590 
591 	return sh_dmae_rst(shdev);
592 }
593 
594 #ifdef CONFIG_PM
595 static int sh_dmae_suspend(struct device *dev)
596 {
597 	return 0;
598 }
599 
600 static int sh_dmae_resume(struct device *dev)
601 {
602 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
603 	int i, ret;
604 
605 	ret = sh_dmae_rst(shdev);
606 	if (ret < 0)
607 		dev_err(dev, "Failed to reset!\n");
608 
609 	for (i = 0; i < shdev->pdata->channel_num; i++) {
610 		struct sh_dmae_chan *sh_chan = shdev->chan[i];
611 
612 		if (!sh_chan->shdma_chan.desc_num)
613 			continue;
614 
615 		if (sh_chan->shdma_chan.slave_id >= 0) {
616 			const struct sh_dmae_slave_config *cfg = sh_chan->config;
617 			dmae_set_dmars(sh_chan, cfg->mid_rid);
618 			dmae_set_chcr(sh_chan, cfg->chcr);
619 		} else {
620 			dmae_init(sh_chan);
621 		}
622 	}
623 
624 	return 0;
625 }
626 #else
627 #define sh_dmae_suspend NULL
628 #define sh_dmae_resume NULL
629 #endif
630 
631 const struct dev_pm_ops sh_dmae_pm = {
632 	.suspend		= sh_dmae_suspend,
633 	.resume			= sh_dmae_resume,
634 	.runtime_suspend	= sh_dmae_runtime_suspend,
635 	.runtime_resume		= sh_dmae_runtime_resume,
636 };
637 
638 static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
639 {
640 	struct sh_dmae_chan *sh_chan = container_of(schan,
641 					struct sh_dmae_chan, shdma_chan);
642 
643 	/*
644 	 * Implicit BUG_ON(!sh_chan->config)
645 	 * This is an exclusive slave DMA operation, may only be called after a
646 	 * successful slave configuration.
647 	 */
648 	return sh_chan->slave_addr;
649 }
650 
651 static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
652 {
653 	return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
654 }
655 
656 static const struct shdma_ops sh_dmae_shdma_ops = {
657 	.desc_completed = sh_dmae_desc_completed,
658 	.halt_channel = sh_dmae_halt,
659 	.channel_busy = sh_dmae_channel_busy,
660 	.slave_addr = sh_dmae_slave_addr,
661 	.desc_setup = sh_dmae_desc_setup,
662 	.set_slave = sh_dmae_set_slave,
663 	.setup_xfer = sh_dmae_setup_xfer,
664 	.start_xfer = sh_dmae_start_xfer,
665 	.embedded_desc = sh_dmae_embedded_desc,
666 	.chan_irq = sh_dmae_chan_irq,
667 	.get_partial = sh_dmae_get_partial,
668 };
669 
670 static const struct of_device_id sh_dmae_of_match[] = {
671 	{}
672 };
673 MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
674 
675 static int sh_dmae_probe(struct platform_device *pdev)
676 {
677 	const struct sh_dmae_pdata *pdata;
678 	unsigned long irqflags = IRQF_DISABLED,
679 		chan_flag[SH_DMAE_MAX_CHANNELS] = {};
680 	int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
681 	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
682 	struct sh_dmae_device *shdev;
683 	struct dma_device *dma_dev;
684 	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
685 
686 	if (pdev->dev.of_node)
687 		pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
688 	else
689 		pdata = pdev->dev.platform_data;
690 
691 	/* get platform data */
692 	if (!pdata || !pdata->channel_num)
693 		return -ENODEV;
694 
695 	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
696 	/* DMARS area is optional */
697 	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
698 	/*
699 	 * IRQ resources:
700 	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
701 	 *    the error IRQ, in which case it is the only IRQ in this resource:
702 	 *    start == end. If it is the only IRQ resource, all channels also
703 	 *    use the same IRQ.
704 	 * 2. DMA channel IRQ resources can be specified one per resource or in
705 	 *    ranges (start != end)
706 	 * 3. iff all events (channels and, optionally, error) on this
707 	 *    controller use the same IRQ, only one IRQ resource can be
708 	 *    specified, otherwise there must be one IRQ per channel, even if
709 	 *    some of them are equal
710 	 * 4. if all IRQs on this controller are equal or if some specific IRQs
711 	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
712 	 *    requested with the IRQF_SHARED flag
713 	 */
714 	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
715 	if (!chan || !errirq_res)
716 		return -ENODEV;
717 
718 	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
719 			     GFP_KERNEL);
720 	if (!shdev) {
721 		dev_err(&pdev->dev, "Not enough memory\n");
722 		return -ENOMEM;
723 	}
724 
725 	dma_dev = &shdev->shdma_dev.dma_dev;
726 
727 	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
728 	if (IS_ERR(shdev->chan_reg))
729 		return PTR_ERR(shdev->chan_reg);
730 	if (dmars) {
731 		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
732 		if (IS_ERR(shdev->dmars))
733 			return PTR_ERR(shdev->dmars);
734 	}
735 
736 	if (!pdata->slave_only)
737 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
738 	if (pdata->slave && pdata->slave_num)
739 		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
740 
741 	/* Default transfer size of 32 bytes requires 32-byte alignment */
742 	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
743 
744 	shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
745 	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
746 	err = shdma_init(&pdev->dev, &shdev->shdma_dev,
747 			      pdata->channel_num);
748 	if (err < 0)
749 		goto eshdma;
750 
751 	/* platform data */
752 	shdev->pdata = pdata;
753 
754 	if (pdata->chcr_offset)
755 		shdev->chcr_offset = pdata->chcr_offset;
756 	else
757 		shdev->chcr_offset = CHCR;
758 
759 	if (pdata->chcr_ie_bit)
760 		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
761 	else
762 		shdev->chcr_ie_bit = CHCR_IE;
763 
764 	platform_set_drvdata(pdev, shdev);
765 
766 	pm_runtime_enable(&pdev->dev);
767 	err = pm_runtime_get_sync(&pdev->dev);
768 	if (err < 0)
769 		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
770 
771 	spin_lock_irq(&sh_dmae_lock);
772 	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
773 	spin_unlock_irq(&sh_dmae_lock);
774 
775 	/* reset dma controller - only needed as a test */
776 	err = sh_dmae_rst(shdev);
777 	if (err)
778 		goto rst_err;
779 
780 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
781 	chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
782 
783 	if (!chanirq_res)
784 		chanirq_res = errirq_res;
785 	else
786 		irqres++;
787 
788 	if (chanirq_res == errirq_res ||
789 	    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
790 		irqflags = IRQF_SHARED;
791 
792 	errirq = errirq_res->start;
793 
794 	err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
795 			       "DMAC Address Error", shdev);
796 	if (err) {
797 		dev_err(&pdev->dev,
798 			"DMA failed requesting irq #%d, error %d\n",
799 			errirq, err);
800 		goto eirq_err;
801 	}
802 
803 #else
804 	chanirq_res = errirq_res;
805 #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
806 
807 	if (chanirq_res->start == chanirq_res->end &&
808 	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
809 		/* Special case - all multiplexed */
810 		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
811 			if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
812 				chan_irq[irq_cnt] = chanirq_res->start;
813 				chan_flag[irq_cnt] = IRQF_SHARED;
814 			} else {
815 				irq_cap = 1;
816 				break;
817 			}
818 		}
819 	} else {
820 		do {
821 			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
822 				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
823 					irq_cap = 1;
824 					break;
825 				}
826 
827 				if ((errirq_res->flags & IORESOURCE_BITS) ==
828 				    IORESOURCE_IRQ_SHAREABLE)
829 					chan_flag[irq_cnt] = IRQF_SHARED;
830 				else
831 					chan_flag[irq_cnt] = IRQF_DISABLED;
832 				dev_dbg(&pdev->dev,
833 					"Found IRQ %d for channel %d\n",
834 					i, irq_cnt);
835 				chan_irq[irq_cnt++] = i;
836 			}
837 
838 			if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
839 				break;
840 
841 			chanirq_res = platform_get_resource(pdev,
842 						IORESOURCE_IRQ, ++irqres);
843 		} while (irq_cnt < pdata->channel_num && chanirq_res);
844 	}
845 
846 	/* Create DMA Channel */
847 	for (i = 0; i < irq_cnt; i++) {
848 		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
849 		if (err)
850 			goto chan_probe_err;
851 	}
852 
853 	if (irq_cap)
854 		dev_notice(&pdev->dev, "Attempting to register %d DMA "
855 			   "channels when a maximum of %d are supported.\n",
856 			   pdata->channel_num, SH_DMAE_MAX_CHANNELS);
857 
858 	pm_runtime_put(&pdev->dev);
859 
860 	err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
861 	if (err < 0)
862 		goto edmadevreg;
863 
864 	return err;
865 
866 edmadevreg:
867 	pm_runtime_get(&pdev->dev);
868 
869 chan_probe_err:
870 	sh_dmae_chan_remove(shdev);
871 
872 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
873 eirq_err:
874 #endif
875 rst_err:
876 	spin_lock_irq(&sh_dmae_lock);
877 	list_del_rcu(&shdev->node);
878 	spin_unlock_irq(&sh_dmae_lock);
879 
880 	pm_runtime_put(&pdev->dev);
881 	pm_runtime_disable(&pdev->dev);
882 
883 	platform_set_drvdata(pdev, NULL);
884 	shdma_cleanup(&shdev->shdma_dev);
885 eshdma:
886 	synchronize_rcu();
887 
888 	return err;
889 }
890 
891 static int sh_dmae_remove(struct platform_device *pdev)
892 {
893 	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
894 	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
895 	struct resource *res;
896 	int errirq = platform_get_irq(pdev, 0);
897 
898 	dma_async_device_unregister(dma_dev);
899 
900 	if (errirq > 0)
901 		free_irq(errirq, shdev);
902 
903 	spin_lock_irq(&sh_dmae_lock);
904 	list_del_rcu(&shdev->node);
905 	spin_unlock_irq(&sh_dmae_lock);
906 
907 	pm_runtime_disable(&pdev->dev);
908 
909 	sh_dmae_chan_remove(shdev);
910 	shdma_cleanup(&shdev->shdma_dev);
911 
912 	platform_set_drvdata(pdev, NULL);
913 
914 	synchronize_rcu();
915 
916 	return 0;
917 }
918 
919 static struct platform_driver sh_dmae_driver = {
920 	.driver 	= {
921 		.owner	= THIS_MODULE,
922 		.pm	= &sh_dmae_pm,
923 		.name	= SH_DMAE_DRV_NAME,
924 		.of_match_table = sh_dmae_of_match,
925 	},
926 	.remove		= sh_dmae_remove,
927 	.shutdown	= sh_dmae_shutdown,
928 };
929 
930 static int __init sh_dmae_init(void)
931 {
932 	/* Wire up NMI handling */
933 	int err = register_die_notifier(&sh_dmae_nmi_notifier);
934 	if (err)
935 		return err;
936 
937 	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
938 }
939 module_init(sh_dmae_init);
940 
941 static void __exit sh_dmae_exit(void)
942 {
943 	platform_driver_unregister(&sh_dmae_driver);
944 
945 	unregister_die_notifier(&sh_dmae_nmi_notifier);
946 }
947 module_exit(sh_dmae_exit);
948 
949 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
950 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
951 MODULE_LICENSE("GPL");
952 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
953