14981c4dcSGuennadi Liakhovetski /* 24981c4dcSGuennadi Liakhovetski * Renesas SuperH DMA Engine support 34981c4dcSGuennadi Liakhovetski * 44981c4dcSGuennadi Liakhovetski * base is drivers/dma/flsdma.c 54981c4dcSGuennadi Liakhovetski * 64981c4dcSGuennadi Liakhovetski * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 74981c4dcSGuennadi Liakhovetski * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 84981c4dcSGuennadi Liakhovetski * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 94981c4dcSGuennadi Liakhovetski * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 104981c4dcSGuennadi Liakhovetski * 114981c4dcSGuennadi Liakhovetski * This is free software; you can redistribute it and/or modify 124981c4dcSGuennadi Liakhovetski * it under the terms of the GNU General Public License as published by 134981c4dcSGuennadi Liakhovetski * the Free Software Foundation; either version 2 of the License, or 144981c4dcSGuennadi Liakhovetski * (at your option) any later version. 154981c4dcSGuennadi Liakhovetski * 164981c4dcSGuennadi Liakhovetski * - DMA of SuperH does not have Hardware DMA chain mode. 174981c4dcSGuennadi Liakhovetski * - MAX DMA size is 16MB. 184981c4dcSGuennadi Liakhovetski * 194981c4dcSGuennadi Liakhovetski */ 204981c4dcSGuennadi Liakhovetski 21a5cdc1c1SLaurent Pinchart #include <linux/delay.h> 22a5cdc1c1SLaurent Pinchart #include <linux/dmaengine.h> 23*c46b9af2SLaurent Pinchart #include <linux/err.h> 244981c4dcSGuennadi Liakhovetski #include <linux/init.h> 25a5cdc1c1SLaurent Pinchart #include <linux/interrupt.h> 26a5cdc1c1SLaurent Pinchart #include <linux/kdebug.h> 274981c4dcSGuennadi Liakhovetski #include <linux/module.h> 28a5cdc1c1SLaurent Pinchart #include <linux/notifier.h> 294981c4dcSGuennadi Liakhovetski #include <linux/of.h> 304981c4dcSGuennadi Liakhovetski #include <linux/of_device.h> 314981c4dcSGuennadi Liakhovetski #include <linux/platform_device.h> 324981c4dcSGuennadi Liakhovetski #include <linux/pm_runtime.h> 334981c4dcSGuennadi Liakhovetski #include <linux/rculist.h> 34a5cdc1c1SLaurent Pinchart #include <linux/sh_dma.h> 35a5cdc1c1SLaurent Pinchart #include <linux/slab.h> 36a5cdc1c1SLaurent Pinchart #include <linux/spinlock.h> 374981c4dcSGuennadi Liakhovetski 384981c4dcSGuennadi Liakhovetski #include "../dmaengine.h" 394981c4dcSGuennadi Liakhovetski #include "shdma.h" 404981c4dcSGuennadi Liakhovetski 414620ad54SGuennadi Liakhovetski /* DMA register */ 424620ad54SGuennadi Liakhovetski #define SAR 0x00 434620ad54SGuennadi Liakhovetski #define DAR 0x04 444620ad54SGuennadi Liakhovetski #define TCR 0x08 454620ad54SGuennadi Liakhovetski #define CHCR 0x0C 464620ad54SGuennadi Liakhovetski #define DMAOR 0x40 474620ad54SGuennadi Liakhovetski 484620ad54SGuennadi Liakhovetski #define TEND 0x18 /* USB-DMAC */ 494620ad54SGuennadi Liakhovetski 504981c4dcSGuennadi Liakhovetski #define SH_DMAE_DRV_NAME "sh-dma-engine" 514981c4dcSGuennadi Liakhovetski 524981c4dcSGuennadi Liakhovetski /* Default MEMCPY transfer size = 2^2 = 4 bytes */ 534981c4dcSGuennadi Liakhovetski #define LOG2_DEFAULT_XFER_SIZE 2 544981c4dcSGuennadi Liakhovetski #define SH_DMA_SLAVE_NUMBER 256 554981c4dcSGuennadi Liakhovetski #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) 564981c4dcSGuennadi Liakhovetski 574981c4dcSGuennadi Liakhovetski /* 584981c4dcSGuennadi Liakhovetski * Used for write-side mutual exclusion for the global device list, 594981c4dcSGuennadi Liakhovetski * read-side synchronization by way of RCU, and per-controller data. 604981c4dcSGuennadi Liakhovetski */ 614981c4dcSGuennadi Liakhovetski static DEFINE_SPINLOCK(sh_dmae_lock); 624981c4dcSGuennadi Liakhovetski static LIST_HEAD(sh_dmae_devices); 634981c4dcSGuennadi Liakhovetski 644981c4dcSGuennadi Liakhovetski /* 654981c4dcSGuennadi Liakhovetski * Different DMAC implementations provide different ways to clear DMA channels: 664981c4dcSGuennadi Liakhovetski * (1) none - no CHCLR registers are available 674981c4dcSGuennadi Liakhovetski * (2) one CHCLR register per channel - 0 has to be written to it to clear 684981c4dcSGuennadi Liakhovetski * channel buffers 694981c4dcSGuennadi Liakhovetski * (3) one CHCLR per several channels - 1 has to be written to the bit, 704981c4dcSGuennadi Liakhovetski * corresponding to the specific channel to reset it 714981c4dcSGuennadi Liakhovetski */ 724981c4dcSGuennadi Liakhovetski static void channel_clear(struct sh_dmae_chan *sh_dc) 734981c4dcSGuennadi Liakhovetski { 744981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 754981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + 764981c4dcSGuennadi Liakhovetski sh_dc->shdma_chan.id; 774981c4dcSGuennadi Liakhovetski u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; 784981c4dcSGuennadi Liakhovetski 794981c4dcSGuennadi Liakhovetski __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); 804981c4dcSGuennadi Liakhovetski } 814981c4dcSGuennadi Liakhovetski 824981c4dcSGuennadi Liakhovetski static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 834981c4dcSGuennadi Liakhovetski { 844981c4dcSGuennadi Liakhovetski __raw_writel(data, sh_dc->base + reg); 854981c4dcSGuennadi Liakhovetski } 864981c4dcSGuennadi Liakhovetski 874981c4dcSGuennadi Liakhovetski static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 884981c4dcSGuennadi Liakhovetski { 894981c4dcSGuennadi Liakhovetski return __raw_readl(sh_dc->base + reg); 904981c4dcSGuennadi Liakhovetski } 914981c4dcSGuennadi Liakhovetski 924981c4dcSGuennadi Liakhovetski static u16 dmaor_read(struct sh_dmae_device *shdev) 934981c4dcSGuennadi Liakhovetski { 944981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->chan_reg + DMAOR; 954981c4dcSGuennadi Liakhovetski 964981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_is_32bit) 974981c4dcSGuennadi Liakhovetski return __raw_readl(addr); 984981c4dcSGuennadi Liakhovetski else 994981c4dcSGuennadi Liakhovetski return __raw_readw(addr); 1004981c4dcSGuennadi Liakhovetski } 1014981c4dcSGuennadi Liakhovetski 1024981c4dcSGuennadi Liakhovetski static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 1034981c4dcSGuennadi Liakhovetski { 1044981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->chan_reg + DMAOR; 1054981c4dcSGuennadi Liakhovetski 1064981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_is_32bit) 1074981c4dcSGuennadi Liakhovetski __raw_writel(data, addr); 1084981c4dcSGuennadi Liakhovetski else 1094981c4dcSGuennadi Liakhovetski __raw_writew(data, addr); 1104981c4dcSGuennadi Liakhovetski } 1114981c4dcSGuennadi Liakhovetski 1124981c4dcSGuennadi Liakhovetski static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 1134981c4dcSGuennadi Liakhovetski { 1144981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 1154981c4dcSGuennadi Liakhovetski 1164981c4dcSGuennadi Liakhovetski __raw_writel(data, sh_dc->base + shdev->chcr_offset); 1174981c4dcSGuennadi Liakhovetski } 1184981c4dcSGuennadi Liakhovetski 1194981c4dcSGuennadi Liakhovetski static u32 chcr_read(struct sh_dmae_chan *sh_dc) 1204981c4dcSGuennadi Liakhovetski { 1214981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 1224981c4dcSGuennadi Liakhovetski 1234981c4dcSGuennadi Liakhovetski return __raw_readl(sh_dc->base + shdev->chcr_offset); 1244981c4dcSGuennadi Liakhovetski } 1254981c4dcSGuennadi Liakhovetski 1264981c4dcSGuennadi Liakhovetski /* 1274981c4dcSGuennadi Liakhovetski * Reset DMA controller 1284981c4dcSGuennadi Liakhovetski * 1294981c4dcSGuennadi Liakhovetski * SH7780 has two DMAOR register 1304981c4dcSGuennadi Liakhovetski */ 1314981c4dcSGuennadi Liakhovetski static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 1324981c4dcSGuennadi Liakhovetski { 1334981c4dcSGuennadi Liakhovetski unsigned short dmaor; 1344981c4dcSGuennadi Liakhovetski unsigned long flags; 1354981c4dcSGuennadi Liakhovetski 1364981c4dcSGuennadi Liakhovetski spin_lock_irqsave(&sh_dmae_lock, flags); 1374981c4dcSGuennadi Liakhovetski 1384981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev); 1394981c4dcSGuennadi Liakhovetski dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 1404981c4dcSGuennadi Liakhovetski 1414981c4dcSGuennadi Liakhovetski spin_unlock_irqrestore(&sh_dmae_lock, flags); 1424981c4dcSGuennadi Liakhovetski } 1434981c4dcSGuennadi Liakhovetski 1444981c4dcSGuennadi Liakhovetski static int sh_dmae_rst(struct sh_dmae_device *shdev) 1454981c4dcSGuennadi Liakhovetski { 1464981c4dcSGuennadi Liakhovetski unsigned short dmaor; 1474981c4dcSGuennadi Liakhovetski unsigned long flags; 1484981c4dcSGuennadi Liakhovetski 1494981c4dcSGuennadi Liakhovetski spin_lock_irqsave(&sh_dmae_lock, flags); 1504981c4dcSGuennadi Liakhovetski 1514981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 1524981c4dcSGuennadi Liakhovetski 1534981c4dcSGuennadi Liakhovetski if (shdev->pdata->chclr_present) { 1544981c4dcSGuennadi Liakhovetski int i; 1554981c4dcSGuennadi Liakhovetski for (i = 0; i < shdev->pdata->channel_num; i++) { 1564981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1574981c4dcSGuennadi Liakhovetski if (sh_chan) 1584981c4dcSGuennadi Liakhovetski channel_clear(sh_chan); 1594981c4dcSGuennadi Liakhovetski } 1604981c4dcSGuennadi Liakhovetski } 1614981c4dcSGuennadi Liakhovetski 1624981c4dcSGuennadi Liakhovetski dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 1634981c4dcSGuennadi Liakhovetski 1644981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev); 1654981c4dcSGuennadi Liakhovetski 1664981c4dcSGuennadi Liakhovetski spin_unlock_irqrestore(&sh_dmae_lock, flags); 1674981c4dcSGuennadi Liakhovetski 1684981c4dcSGuennadi Liakhovetski if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 1694981c4dcSGuennadi Liakhovetski dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); 1704981c4dcSGuennadi Liakhovetski return -EIO; 1714981c4dcSGuennadi Liakhovetski } 1724981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_init & ~dmaor) 1734981c4dcSGuennadi Liakhovetski dev_warn(shdev->shdma_dev.dma_dev.dev, 1744981c4dcSGuennadi Liakhovetski "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 1754981c4dcSGuennadi Liakhovetski dmaor, shdev->pdata->dmaor_init); 1764981c4dcSGuennadi Liakhovetski return 0; 1774981c4dcSGuennadi Liakhovetski } 1784981c4dcSGuennadi Liakhovetski 1794981c4dcSGuennadi Liakhovetski static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 1804981c4dcSGuennadi Liakhovetski { 1814981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 1824981c4dcSGuennadi Liakhovetski 1834981c4dcSGuennadi Liakhovetski if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 1844981c4dcSGuennadi Liakhovetski return true; /* working */ 1854981c4dcSGuennadi Liakhovetski 1864981c4dcSGuennadi Liakhovetski return false; /* waiting */ 1874981c4dcSGuennadi Liakhovetski } 1884981c4dcSGuennadi Liakhovetski 1894981c4dcSGuennadi Liakhovetski static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 1904981c4dcSGuennadi Liakhovetski { 1914981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 1924981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 1934981c4dcSGuennadi Liakhovetski int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 1944981c4dcSGuennadi Liakhovetski ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 1954981c4dcSGuennadi Liakhovetski 1964981c4dcSGuennadi Liakhovetski if (cnt >= pdata->ts_shift_num) 1974981c4dcSGuennadi Liakhovetski cnt = 0; 1984981c4dcSGuennadi Liakhovetski 1994981c4dcSGuennadi Liakhovetski return pdata->ts_shift[cnt]; 2004981c4dcSGuennadi Liakhovetski } 2014981c4dcSGuennadi Liakhovetski 2024981c4dcSGuennadi Liakhovetski static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 2034981c4dcSGuennadi Liakhovetski { 2044981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 2054981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 2064981c4dcSGuennadi Liakhovetski int i; 2074981c4dcSGuennadi Liakhovetski 2084981c4dcSGuennadi Liakhovetski for (i = 0; i < pdata->ts_shift_num; i++) 2094981c4dcSGuennadi Liakhovetski if (pdata->ts_shift[i] == l2size) 2104981c4dcSGuennadi Liakhovetski break; 2114981c4dcSGuennadi Liakhovetski 2124981c4dcSGuennadi Liakhovetski if (i == pdata->ts_shift_num) 2134981c4dcSGuennadi Liakhovetski i = 0; 2144981c4dcSGuennadi Liakhovetski 2154981c4dcSGuennadi Liakhovetski return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 2164981c4dcSGuennadi Liakhovetski ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 2174981c4dcSGuennadi Liakhovetski } 2184981c4dcSGuennadi Liakhovetski 2194981c4dcSGuennadi Liakhovetski static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 2204981c4dcSGuennadi Liakhovetski { 2214981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->sar, SAR); 2224981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->dar, DAR); 2234981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 2244981c4dcSGuennadi Liakhovetski } 2254981c4dcSGuennadi Liakhovetski 2264981c4dcSGuennadi Liakhovetski static void dmae_start(struct sh_dmae_chan *sh_chan) 2274981c4dcSGuennadi Liakhovetski { 2284981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 2294981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 2304981c4dcSGuennadi Liakhovetski 2314981c4dcSGuennadi Liakhovetski if (shdev->pdata->needs_tend_set) 2324981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 2334981c4dcSGuennadi Liakhovetski 2344981c4dcSGuennadi Liakhovetski chcr |= CHCR_DE | shdev->chcr_ie_bit; 2354981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr & ~CHCR_TE); 2364981c4dcSGuennadi Liakhovetski } 2374981c4dcSGuennadi Liakhovetski 2384981c4dcSGuennadi Liakhovetski static void dmae_init(struct sh_dmae_chan *sh_chan) 2394981c4dcSGuennadi Liakhovetski { 2404981c4dcSGuennadi Liakhovetski /* 2414981c4dcSGuennadi Liakhovetski * Default configuration for dual address memory-memory transfer. 2424981c4dcSGuennadi Liakhovetski * 0x400 represents auto-request. 2434981c4dcSGuennadi Liakhovetski */ 2444981c4dcSGuennadi Liakhovetski u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 2454981c4dcSGuennadi Liakhovetski LOG2_DEFAULT_XFER_SIZE); 2464981c4dcSGuennadi Liakhovetski sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 2474981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr); 2484981c4dcSGuennadi Liakhovetski } 2494981c4dcSGuennadi Liakhovetski 2504981c4dcSGuennadi Liakhovetski static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 2514981c4dcSGuennadi Liakhovetski { 2524981c4dcSGuennadi Liakhovetski /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 2534981c4dcSGuennadi Liakhovetski if (dmae_is_busy(sh_chan)) 2544981c4dcSGuennadi Liakhovetski return -EBUSY; 2554981c4dcSGuennadi Liakhovetski 2564981c4dcSGuennadi Liakhovetski sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 2574981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, val); 2584981c4dcSGuennadi Liakhovetski 2594981c4dcSGuennadi Liakhovetski return 0; 2604981c4dcSGuennadi Liakhovetski } 2614981c4dcSGuennadi Liakhovetski 2624981c4dcSGuennadi Liakhovetski static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 2634981c4dcSGuennadi Liakhovetski { 2644981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 2654981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 2664981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; 2674981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->dmars; 2684981c4dcSGuennadi Liakhovetski unsigned int shift = chan_pdata->dmars_bit; 2694981c4dcSGuennadi Liakhovetski 2704981c4dcSGuennadi Liakhovetski if (dmae_is_busy(sh_chan)) 2714981c4dcSGuennadi Liakhovetski return -EBUSY; 2724981c4dcSGuennadi Liakhovetski 2734981c4dcSGuennadi Liakhovetski if (pdata->no_dmars) 2744981c4dcSGuennadi Liakhovetski return 0; 2754981c4dcSGuennadi Liakhovetski 2764981c4dcSGuennadi Liakhovetski /* in the case of a missing DMARS resource use first memory window */ 2774981c4dcSGuennadi Liakhovetski if (!addr) 2784981c4dcSGuennadi Liakhovetski addr = shdev->chan_reg; 2794981c4dcSGuennadi Liakhovetski addr += chan_pdata->dmars; 2804981c4dcSGuennadi Liakhovetski 2814981c4dcSGuennadi Liakhovetski __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 2824981c4dcSGuennadi Liakhovetski addr); 2834981c4dcSGuennadi Liakhovetski 2844981c4dcSGuennadi Liakhovetski return 0; 2854981c4dcSGuennadi Liakhovetski } 2864981c4dcSGuennadi Liakhovetski 2874981c4dcSGuennadi Liakhovetski static void sh_dmae_start_xfer(struct shdma_chan *schan, 2884981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 2894981c4dcSGuennadi Liakhovetski { 2904981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 2914981c4dcSGuennadi Liakhovetski shdma_chan); 2924981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 2934981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 2944981c4dcSGuennadi Liakhovetski dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", 2954981c4dcSGuennadi Liakhovetski sdesc->async_tx.cookie, sh_chan->shdma_chan.id, 2964981c4dcSGuennadi Liakhovetski sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); 2974981c4dcSGuennadi Liakhovetski /* Get the ld start address from ld_queue */ 2984981c4dcSGuennadi Liakhovetski dmae_set_reg(sh_chan, &sh_desc->hw); 2994981c4dcSGuennadi Liakhovetski dmae_start(sh_chan); 3004981c4dcSGuennadi Liakhovetski } 3014981c4dcSGuennadi Liakhovetski 3024981c4dcSGuennadi Liakhovetski static bool sh_dmae_channel_busy(struct shdma_chan *schan) 3034981c4dcSGuennadi Liakhovetski { 3044981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 3054981c4dcSGuennadi Liakhovetski shdma_chan); 3064981c4dcSGuennadi Liakhovetski return dmae_is_busy(sh_chan); 3074981c4dcSGuennadi Liakhovetski } 3084981c4dcSGuennadi Liakhovetski 3094981c4dcSGuennadi Liakhovetski static void sh_dmae_setup_xfer(struct shdma_chan *schan, 3104981c4dcSGuennadi Liakhovetski int slave_id) 3114981c4dcSGuennadi Liakhovetski { 3124981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 3134981c4dcSGuennadi Liakhovetski shdma_chan); 3144981c4dcSGuennadi Liakhovetski 3154981c4dcSGuennadi Liakhovetski if (slave_id >= 0) { 3164981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = 3174981c4dcSGuennadi Liakhovetski sh_chan->config; 3184981c4dcSGuennadi Liakhovetski 3194981c4dcSGuennadi Liakhovetski dmae_set_dmars(sh_chan, cfg->mid_rid); 3204981c4dcSGuennadi Liakhovetski dmae_set_chcr(sh_chan, cfg->chcr); 3214981c4dcSGuennadi Liakhovetski } else { 3224981c4dcSGuennadi Liakhovetski dmae_init(sh_chan); 3234981c4dcSGuennadi Liakhovetski } 3244981c4dcSGuennadi Liakhovetski } 3254981c4dcSGuennadi Liakhovetski 3264981c4dcSGuennadi Liakhovetski /* 3274981c4dcSGuennadi Liakhovetski * Find a slave channel configuration from the contoller list by either a slave 3284981c4dcSGuennadi Liakhovetski * ID in the non-DT case, or by a MID/RID value in the DT case 3294981c4dcSGuennadi Liakhovetski */ 3304981c4dcSGuennadi Liakhovetski static const struct sh_dmae_slave_config *dmae_find_slave( 3314981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan, int match) 3324981c4dcSGuennadi Liakhovetski { 3334981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 3344981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 3354981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg; 3364981c4dcSGuennadi Liakhovetski int i; 3374981c4dcSGuennadi Liakhovetski 3384981c4dcSGuennadi Liakhovetski if (!sh_chan->shdma_chan.dev->of_node) { 3394981c4dcSGuennadi Liakhovetski if (match >= SH_DMA_SLAVE_NUMBER) 3404981c4dcSGuennadi Liakhovetski return NULL; 3414981c4dcSGuennadi Liakhovetski 3424981c4dcSGuennadi Liakhovetski for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 3434981c4dcSGuennadi Liakhovetski if (cfg->slave_id == match) 3444981c4dcSGuennadi Liakhovetski return cfg; 3454981c4dcSGuennadi Liakhovetski } else { 3464981c4dcSGuennadi Liakhovetski for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 3474981c4dcSGuennadi Liakhovetski if (cfg->mid_rid == match) { 3484981c4dcSGuennadi Liakhovetski sh_chan->shdma_chan.slave_id = i; 3494981c4dcSGuennadi Liakhovetski return cfg; 3504981c4dcSGuennadi Liakhovetski } 3514981c4dcSGuennadi Liakhovetski } 3524981c4dcSGuennadi Liakhovetski 3534981c4dcSGuennadi Liakhovetski return NULL; 3544981c4dcSGuennadi Liakhovetski } 3554981c4dcSGuennadi Liakhovetski 3564981c4dcSGuennadi Liakhovetski static int sh_dmae_set_slave(struct shdma_chan *schan, 3574981c4dcSGuennadi Liakhovetski int slave_id, dma_addr_t slave_addr, bool try) 3584981c4dcSGuennadi Liakhovetski { 3594981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 3604981c4dcSGuennadi Liakhovetski shdma_chan); 3614981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); 3624981c4dcSGuennadi Liakhovetski if (!cfg) 3634981c4dcSGuennadi Liakhovetski return -ENXIO; 3644981c4dcSGuennadi Liakhovetski 3654981c4dcSGuennadi Liakhovetski if (!try) { 3664981c4dcSGuennadi Liakhovetski sh_chan->config = cfg; 3674981c4dcSGuennadi Liakhovetski sh_chan->slave_addr = slave_addr ? : cfg->addr; 3684981c4dcSGuennadi Liakhovetski } 3694981c4dcSGuennadi Liakhovetski 3704981c4dcSGuennadi Liakhovetski return 0; 3714981c4dcSGuennadi Liakhovetski } 3724981c4dcSGuennadi Liakhovetski 3734981c4dcSGuennadi Liakhovetski static void dmae_halt(struct sh_dmae_chan *sh_chan) 3744981c4dcSGuennadi Liakhovetski { 3754981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 3764981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 3774981c4dcSGuennadi Liakhovetski 3784981c4dcSGuennadi Liakhovetski chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 3794981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr); 3804981c4dcSGuennadi Liakhovetski } 3814981c4dcSGuennadi Liakhovetski 3824981c4dcSGuennadi Liakhovetski static int sh_dmae_desc_setup(struct shdma_chan *schan, 3834981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc, 3844981c4dcSGuennadi Liakhovetski dma_addr_t src, dma_addr_t dst, size_t *len) 3854981c4dcSGuennadi Liakhovetski { 3864981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 3874981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 3884981c4dcSGuennadi Liakhovetski 3894981c4dcSGuennadi Liakhovetski if (*len > schan->max_xfer_len) 3904981c4dcSGuennadi Liakhovetski *len = schan->max_xfer_len; 3914981c4dcSGuennadi Liakhovetski 3924981c4dcSGuennadi Liakhovetski sh_desc->hw.sar = src; 3934981c4dcSGuennadi Liakhovetski sh_desc->hw.dar = dst; 3944981c4dcSGuennadi Liakhovetski sh_desc->hw.tcr = *len; 3954981c4dcSGuennadi Liakhovetski 3964981c4dcSGuennadi Liakhovetski return 0; 3974981c4dcSGuennadi Liakhovetski } 3984981c4dcSGuennadi Liakhovetski 3994981c4dcSGuennadi Liakhovetski static void sh_dmae_halt(struct shdma_chan *schan) 4004981c4dcSGuennadi Liakhovetski { 4014981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 4024981c4dcSGuennadi Liakhovetski shdma_chan); 4034981c4dcSGuennadi Liakhovetski dmae_halt(sh_chan); 4044981c4dcSGuennadi Liakhovetski } 4054981c4dcSGuennadi Liakhovetski 4064981c4dcSGuennadi Liakhovetski static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) 4074981c4dcSGuennadi Liakhovetski { 4084981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 4094981c4dcSGuennadi Liakhovetski shdma_chan); 4104981c4dcSGuennadi Liakhovetski 4114981c4dcSGuennadi Liakhovetski if (!(chcr_read(sh_chan) & CHCR_TE)) 4124981c4dcSGuennadi Liakhovetski return false; 4134981c4dcSGuennadi Liakhovetski 4144981c4dcSGuennadi Liakhovetski /* DMA stop */ 4154981c4dcSGuennadi Liakhovetski dmae_halt(sh_chan); 4164981c4dcSGuennadi Liakhovetski 4174981c4dcSGuennadi Liakhovetski return true; 4184981c4dcSGuennadi Liakhovetski } 4194981c4dcSGuennadi Liakhovetski 4204981c4dcSGuennadi Liakhovetski static size_t sh_dmae_get_partial(struct shdma_chan *schan, 4214981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 4224981c4dcSGuennadi Liakhovetski { 4234981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 4244981c4dcSGuennadi Liakhovetski shdma_chan); 4254981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 4264981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 427ec5b103eSLinus Torvalds return sh_desc->hw.tcr - 428ec5b103eSLinus Torvalds (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); 4294981c4dcSGuennadi Liakhovetski } 4304981c4dcSGuennadi Liakhovetski 4314981c4dcSGuennadi Liakhovetski /* Called from error IRQ or NMI */ 4324981c4dcSGuennadi Liakhovetski static bool sh_dmae_reset(struct sh_dmae_device *shdev) 4334981c4dcSGuennadi Liakhovetski { 4344981c4dcSGuennadi Liakhovetski bool ret; 4354981c4dcSGuennadi Liakhovetski 4364981c4dcSGuennadi Liakhovetski /* halt the dma controller */ 4374981c4dcSGuennadi Liakhovetski sh_dmae_ctl_stop(shdev); 4384981c4dcSGuennadi Liakhovetski 4394981c4dcSGuennadi Liakhovetski /* We cannot detect, which channel caused the error, have to reset all */ 4404981c4dcSGuennadi Liakhovetski ret = shdma_reset(&shdev->shdma_dev); 4414981c4dcSGuennadi Liakhovetski 4424981c4dcSGuennadi Liakhovetski sh_dmae_rst(shdev); 4434981c4dcSGuennadi Liakhovetski 4444981c4dcSGuennadi Liakhovetski return ret; 4454981c4dcSGuennadi Liakhovetski } 4464981c4dcSGuennadi Liakhovetski 44752d6a5eeSLaurent Pinchart #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) 4484981c4dcSGuennadi Liakhovetski static irqreturn_t sh_dmae_err(int irq, void *data) 4494981c4dcSGuennadi Liakhovetski { 4504981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = data; 4514981c4dcSGuennadi Liakhovetski 4524981c4dcSGuennadi Liakhovetski if (!(dmaor_read(shdev) & DMAOR_AE)) 4534981c4dcSGuennadi Liakhovetski return IRQ_NONE; 4544981c4dcSGuennadi Liakhovetski 4554981c4dcSGuennadi Liakhovetski sh_dmae_reset(shdev); 4564981c4dcSGuennadi Liakhovetski return IRQ_HANDLED; 4574981c4dcSGuennadi Liakhovetski } 45852d6a5eeSLaurent Pinchart #endif 4594981c4dcSGuennadi Liakhovetski 4604981c4dcSGuennadi Liakhovetski static bool sh_dmae_desc_completed(struct shdma_chan *schan, 4614981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 4624981c4dcSGuennadi Liakhovetski { 4634981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, 4644981c4dcSGuennadi Liakhovetski struct sh_dmae_chan, shdma_chan); 4654981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 4664981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 4674981c4dcSGuennadi Liakhovetski u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 4684981c4dcSGuennadi Liakhovetski u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 4694981c4dcSGuennadi Liakhovetski 4704981c4dcSGuennadi Liakhovetski return (sdesc->direction == DMA_DEV_TO_MEM && 4714981c4dcSGuennadi Liakhovetski (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || 4724981c4dcSGuennadi Liakhovetski (sdesc->direction != DMA_DEV_TO_MEM && 4734981c4dcSGuennadi Liakhovetski (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); 4744981c4dcSGuennadi Liakhovetski } 4754981c4dcSGuennadi Liakhovetski 4764981c4dcSGuennadi Liakhovetski static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 4774981c4dcSGuennadi Liakhovetski { 4784981c4dcSGuennadi Liakhovetski /* Fast path out if NMIF is not asserted for this controller */ 4794981c4dcSGuennadi Liakhovetski if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 4804981c4dcSGuennadi Liakhovetski return false; 4814981c4dcSGuennadi Liakhovetski 4824981c4dcSGuennadi Liakhovetski return sh_dmae_reset(shdev); 4834981c4dcSGuennadi Liakhovetski } 4844981c4dcSGuennadi Liakhovetski 4854981c4dcSGuennadi Liakhovetski static int sh_dmae_nmi_handler(struct notifier_block *self, 4864981c4dcSGuennadi Liakhovetski unsigned long cmd, void *data) 4874981c4dcSGuennadi Liakhovetski { 4884981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev; 4894981c4dcSGuennadi Liakhovetski int ret = NOTIFY_DONE; 4904981c4dcSGuennadi Liakhovetski bool triggered; 4914981c4dcSGuennadi Liakhovetski 4924981c4dcSGuennadi Liakhovetski /* 4934981c4dcSGuennadi Liakhovetski * Only concern ourselves with NMI events. 4944981c4dcSGuennadi Liakhovetski * 4954981c4dcSGuennadi Liakhovetski * Normally we would check the die chain value, but as this needs 4964981c4dcSGuennadi Liakhovetski * to be architecture independent, check for NMI context instead. 4974981c4dcSGuennadi Liakhovetski */ 4984981c4dcSGuennadi Liakhovetski if (!in_nmi()) 4994981c4dcSGuennadi Liakhovetski return NOTIFY_DONE; 5004981c4dcSGuennadi Liakhovetski 5014981c4dcSGuennadi Liakhovetski rcu_read_lock(); 5024981c4dcSGuennadi Liakhovetski list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 5034981c4dcSGuennadi Liakhovetski /* 5044981c4dcSGuennadi Liakhovetski * Only stop if one of the controllers has NMIF asserted, 5054981c4dcSGuennadi Liakhovetski * we do not want to interfere with regular address error 5064981c4dcSGuennadi Liakhovetski * handling or NMI events that don't concern the DMACs. 5074981c4dcSGuennadi Liakhovetski */ 5084981c4dcSGuennadi Liakhovetski triggered = sh_dmae_nmi_notify(shdev); 5094981c4dcSGuennadi Liakhovetski if (triggered == true) 5104981c4dcSGuennadi Liakhovetski ret = NOTIFY_OK; 5114981c4dcSGuennadi Liakhovetski } 5124981c4dcSGuennadi Liakhovetski rcu_read_unlock(); 5134981c4dcSGuennadi Liakhovetski 5144981c4dcSGuennadi Liakhovetski return ret; 5154981c4dcSGuennadi Liakhovetski } 5164981c4dcSGuennadi Liakhovetski 5174981c4dcSGuennadi Liakhovetski static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 5184981c4dcSGuennadi Liakhovetski .notifier_call = sh_dmae_nmi_handler, 5194981c4dcSGuennadi Liakhovetski 5204981c4dcSGuennadi Liakhovetski /* Run before NMI debug handler and KGDB */ 5214981c4dcSGuennadi Liakhovetski .priority = 1, 5224981c4dcSGuennadi Liakhovetski }; 5234981c4dcSGuennadi Liakhovetski 5244981c4dcSGuennadi Liakhovetski static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 5254981c4dcSGuennadi Liakhovetski int irq, unsigned long flags) 5264981c4dcSGuennadi Liakhovetski { 5274981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 5284981c4dcSGuennadi Liakhovetski struct shdma_dev *sdev = &shdev->shdma_dev; 5294981c4dcSGuennadi Liakhovetski struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); 5304981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan; 5314981c4dcSGuennadi Liakhovetski struct shdma_chan *schan; 5324981c4dcSGuennadi Liakhovetski int err; 5334981c4dcSGuennadi Liakhovetski 5344981c4dcSGuennadi Liakhovetski sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), 5354981c4dcSGuennadi Liakhovetski GFP_KERNEL); 5364981c4dcSGuennadi Liakhovetski if (!sh_chan) { 5374981c4dcSGuennadi Liakhovetski dev_err(sdev->dma_dev.dev, 5384981c4dcSGuennadi Liakhovetski "No free memory for allocating dma channels!\n"); 5394981c4dcSGuennadi Liakhovetski return -ENOMEM; 5404981c4dcSGuennadi Liakhovetski } 5414981c4dcSGuennadi Liakhovetski 5424981c4dcSGuennadi Liakhovetski schan = &sh_chan->shdma_chan; 5434981c4dcSGuennadi Liakhovetski schan->max_xfer_len = SH_DMA_TCR_MAX + 1; 5444981c4dcSGuennadi Liakhovetski 5454981c4dcSGuennadi Liakhovetski shdma_chan_probe(sdev, schan, id); 5464981c4dcSGuennadi Liakhovetski 5474981c4dcSGuennadi Liakhovetski sh_chan->base = shdev->chan_reg + chan_pdata->offset; 5484981c4dcSGuennadi Liakhovetski 5494981c4dcSGuennadi Liakhovetski /* set up channel irq */ 5504981c4dcSGuennadi Liakhovetski if (pdev->id >= 0) 5514981c4dcSGuennadi Liakhovetski snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 5524981c4dcSGuennadi Liakhovetski "sh-dmae%d.%d", pdev->id, id); 5534981c4dcSGuennadi Liakhovetski else 5544981c4dcSGuennadi Liakhovetski snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 5554981c4dcSGuennadi Liakhovetski "sh-dma%d", id); 5564981c4dcSGuennadi Liakhovetski 5574981c4dcSGuennadi Liakhovetski err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); 5584981c4dcSGuennadi Liakhovetski if (err) { 5594981c4dcSGuennadi Liakhovetski dev_err(sdev->dma_dev.dev, 5604981c4dcSGuennadi Liakhovetski "DMA channel %d request_irq error %d\n", 5614981c4dcSGuennadi Liakhovetski id, err); 5624981c4dcSGuennadi Liakhovetski goto err_no_irq; 5634981c4dcSGuennadi Liakhovetski } 5644981c4dcSGuennadi Liakhovetski 5654981c4dcSGuennadi Liakhovetski shdev->chan[id] = sh_chan; 5664981c4dcSGuennadi Liakhovetski return 0; 5674981c4dcSGuennadi Liakhovetski 5684981c4dcSGuennadi Liakhovetski err_no_irq: 5694981c4dcSGuennadi Liakhovetski /* remove from dmaengine device node */ 5704981c4dcSGuennadi Liakhovetski shdma_chan_remove(schan); 5714981c4dcSGuennadi Liakhovetski return err; 5724981c4dcSGuennadi Liakhovetski } 5734981c4dcSGuennadi Liakhovetski 5744981c4dcSGuennadi Liakhovetski static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 5754981c4dcSGuennadi Liakhovetski { 5764981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 5774981c4dcSGuennadi Liakhovetski struct shdma_chan *schan; 5784981c4dcSGuennadi Liakhovetski int i; 5794981c4dcSGuennadi Liakhovetski 5804981c4dcSGuennadi Liakhovetski shdma_for_each_chan(schan, &shdev->shdma_dev, i) { 5814981c4dcSGuennadi Liakhovetski BUG_ON(!schan); 5824981c4dcSGuennadi Liakhovetski 5834981c4dcSGuennadi Liakhovetski shdma_chan_remove(schan); 5844981c4dcSGuennadi Liakhovetski } 5854981c4dcSGuennadi Liakhovetski dma_dev->chancnt = 0; 5864981c4dcSGuennadi Liakhovetski } 5874981c4dcSGuennadi Liakhovetski 5884981c4dcSGuennadi Liakhovetski static void sh_dmae_shutdown(struct platform_device *pdev) 5894981c4dcSGuennadi Liakhovetski { 5904981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 5914981c4dcSGuennadi Liakhovetski sh_dmae_ctl_stop(shdev); 5924981c4dcSGuennadi Liakhovetski } 5934981c4dcSGuennadi Liakhovetski 5944981c4dcSGuennadi Liakhovetski static int sh_dmae_runtime_suspend(struct device *dev) 5954981c4dcSGuennadi Liakhovetski { 5964981c4dcSGuennadi Liakhovetski return 0; 5974981c4dcSGuennadi Liakhovetski } 5984981c4dcSGuennadi Liakhovetski 5994981c4dcSGuennadi Liakhovetski static int sh_dmae_runtime_resume(struct device *dev) 6004981c4dcSGuennadi Liakhovetski { 6014981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = dev_get_drvdata(dev); 6024981c4dcSGuennadi Liakhovetski 6034981c4dcSGuennadi Liakhovetski return sh_dmae_rst(shdev); 6044981c4dcSGuennadi Liakhovetski } 6054981c4dcSGuennadi Liakhovetski 6064981c4dcSGuennadi Liakhovetski #ifdef CONFIG_PM 6074981c4dcSGuennadi Liakhovetski static int sh_dmae_suspend(struct device *dev) 6084981c4dcSGuennadi Liakhovetski { 6094981c4dcSGuennadi Liakhovetski return 0; 6104981c4dcSGuennadi Liakhovetski } 6114981c4dcSGuennadi Liakhovetski 6124981c4dcSGuennadi Liakhovetski static int sh_dmae_resume(struct device *dev) 6134981c4dcSGuennadi Liakhovetski { 6144981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = dev_get_drvdata(dev); 6154981c4dcSGuennadi Liakhovetski int i, ret; 6164981c4dcSGuennadi Liakhovetski 6174981c4dcSGuennadi Liakhovetski ret = sh_dmae_rst(shdev); 6184981c4dcSGuennadi Liakhovetski if (ret < 0) 6194981c4dcSGuennadi Liakhovetski dev_err(dev, "Failed to reset!\n"); 6204981c4dcSGuennadi Liakhovetski 6214981c4dcSGuennadi Liakhovetski for (i = 0; i < shdev->pdata->channel_num; i++) { 6224981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = shdev->chan[i]; 6234981c4dcSGuennadi Liakhovetski 6244981c4dcSGuennadi Liakhovetski if (!sh_chan->shdma_chan.desc_num) 6254981c4dcSGuennadi Liakhovetski continue; 6264981c4dcSGuennadi Liakhovetski 6274981c4dcSGuennadi Liakhovetski if (sh_chan->shdma_chan.slave_id >= 0) { 6284981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = sh_chan->config; 6294981c4dcSGuennadi Liakhovetski dmae_set_dmars(sh_chan, cfg->mid_rid); 6304981c4dcSGuennadi Liakhovetski dmae_set_chcr(sh_chan, cfg->chcr); 6314981c4dcSGuennadi Liakhovetski } else { 6324981c4dcSGuennadi Liakhovetski dmae_init(sh_chan); 6334981c4dcSGuennadi Liakhovetski } 6344981c4dcSGuennadi Liakhovetski } 6354981c4dcSGuennadi Liakhovetski 6364981c4dcSGuennadi Liakhovetski return 0; 6374981c4dcSGuennadi Liakhovetski } 6384981c4dcSGuennadi Liakhovetski #else 6394981c4dcSGuennadi Liakhovetski #define sh_dmae_suspend NULL 6404981c4dcSGuennadi Liakhovetski #define sh_dmae_resume NULL 6414981c4dcSGuennadi Liakhovetski #endif 6424981c4dcSGuennadi Liakhovetski 64351455ec4SLaurent Pinchart static const struct dev_pm_ops sh_dmae_pm = { 6444981c4dcSGuennadi Liakhovetski .suspend = sh_dmae_suspend, 6454981c4dcSGuennadi Liakhovetski .resume = sh_dmae_resume, 6464981c4dcSGuennadi Liakhovetski .runtime_suspend = sh_dmae_runtime_suspend, 6474981c4dcSGuennadi Liakhovetski .runtime_resume = sh_dmae_runtime_resume, 6484981c4dcSGuennadi Liakhovetski }; 6494981c4dcSGuennadi Liakhovetski 6504981c4dcSGuennadi Liakhovetski static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 6514981c4dcSGuennadi Liakhovetski { 6524981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, 6534981c4dcSGuennadi Liakhovetski struct sh_dmae_chan, shdma_chan); 6544981c4dcSGuennadi Liakhovetski 6554981c4dcSGuennadi Liakhovetski /* 6564981c4dcSGuennadi Liakhovetski * Implicit BUG_ON(!sh_chan->config) 6574981c4dcSGuennadi Liakhovetski * This is an exclusive slave DMA operation, may only be called after a 6584981c4dcSGuennadi Liakhovetski * successful slave configuration. 6594981c4dcSGuennadi Liakhovetski */ 6604981c4dcSGuennadi Liakhovetski return sh_chan->slave_addr; 6614981c4dcSGuennadi Liakhovetski } 6624981c4dcSGuennadi Liakhovetski 6634981c4dcSGuennadi Liakhovetski static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) 6644981c4dcSGuennadi Liakhovetski { 6654981c4dcSGuennadi Liakhovetski return &((struct sh_dmae_desc *)buf)[i].shdma_desc; 6664981c4dcSGuennadi Liakhovetski } 6674981c4dcSGuennadi Liakhovetski 6684981c4dcSGuennadi Liakhovetski static const struct shdma_ops sh_dmae_shdma_ops = { 6694981c4dcSGuennadi Liakhovetski .desc_completed = sh_dmae_desc_completed, 6704981c4dcSGuennadi Liakhovetski .halt_channel = sh_dmae_halt, 6714981c4dcSGuennadi Liakhovetski .channel_busy = sh_dmae_channel_busy, 6724981c4dcSGuennadi Liakhovetski .slave_addr = sh_dmae_slave_addr, 6734981c4dcSGuennadi Liakhovetski .desc_setup = sh_dmae_desc_setup, 6744981c4dcSGuennadi Liakhovetski .set_slave = sh_dmae_set_slave, 6754981c4dcSGuennadi Liakhovetski .setup_xfer = sh_dmae_setup_xfer, 6764981c4dcSGuennadi Liakhovetski .start_xfer = sh_dmae_start_xfer, 6774981c4dcSGuennadi Liakhovetski .embedded_desc = sh_dmae_embedded_desc, 6784981c4dcSGuennadi Liakhovetski .chan_irq = sh_dmae_chan_irq, 6794981c4dcSGuennadi Liakhovetski .get_partial = sh_dmae_get_partial, 6804981c4dcSGuennadi Liakhovetski }; 6814981c4dcSGuennadi Liakhovetski 6824981c4dcSGuennadi Liakhovetski static const struct of_device_id sh_dmae_of_match[] = { 6831e69653dSGuennadi Liakhovetski {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,}, 6844981c4dcSGuennadi Liakhovetski {} 6854981c4dcSGuennadi Liakhovetski }; 6864981c4dcSGuennadi Liakhovetski MODULE_DEVICE_TABLE(of, sh_dmae_of_match); 6874981c4dcSGuennadi Liakhovetski 6884981c4dcSGuennadi Liakhovetski static int sh_dmae_probe(struct platform_device *pdev) 6894981c4dcSGuennadi Liakhovetski { 6904981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata; 69152d6a5eeSLaurent Pinchart unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 69252d6a5eeSLaurent Pinchart int chan_irq[SH_DMAE_MAX_CHANNELS]; 69352d6a5eeSLaurent Pinchart #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) 69452d6a5eeSLaurent Pinchart unsigned long irqflags = 0; 69552d6a5eeSLaurent Pinchart int errirq; 69652d6a5eeSLaurent Pinchart #endif 6974981c4dcSGuennadi Liakhovetski int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 6984981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev; 6994981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev; 7004981c4dcSGuennadi Liakhovetski struct resource *chan, *dmars, *errirq_res, *chanirq_res; 7014981c4dcSGuennadi Liakhovetski 7024981c4dcSGuennadi Liakhovetski if (pdev->dev.of_node) 7034981c4dcSGuennadi Liakhovetski pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data; 7044981c4dcSGuennadi Liakhovetski else 705265d9c67SVinod Koul pdata = dev_get_platdata(&pdev->dev); 7064981c4dcSGuennadi Liakhovetski 7074981c4dcSGuennadi Liakhovetski /* get platform data */ 7084981c4dcSGuennadi Liakhovetski if (!pdata || !pdata->channel_num) 7094981c4dcSGuennadi Liakhovetski return -ENODEV; 7104981c4dcSGuennadi Liakhovetski 7114981c4dcSGuennadi Liakhovetski chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 7124981c4dcSGuennadi Liakhovetski /* DMARS area is optional */ 7134981c4dcSGuennadi Liakhovetski dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 7144981c4dcSGuennadi Liakhovetski /* 7154981c4dcSGuennadi Liakhovetski * IRQ resources: 7164981c4dcSGuennadi Liakhovetski * 1. there always must be at least one IRQ IO-resource. On SH4 it is 7174981c4dcSGuennadi Liakhovetski * the error IRQ, in which case it is the only IRQ in this resource: 7184981c4dcSGuennadi Liakhovetski * start == end. If it is the only IRQ resource, all channels also 7194981c4dcSGuennadi Liakhovetski * use the same IRQ. 7204981c4dcSGuennadi Liakhovetski * 2. DMA channel IRQ resources can be specified one per resource or in 7214981c4dcSGuennadi Liakhovetski * ranges (start != end) 7224981c4dcSGuennadi Liakhovetski * 3. iff all events (channels and, optionally, error) on this 7234981c4dcSGuennadi Liakhovetski * controller use the same IRQ, only one IRQ resource can be 7244981c4dcSGuennadi Liakhovetski * specified, otherwise there must be one IRQ per channel, even if 7254981c4dcSGuennadi Liakhovetski * some of them are equal 7264981c4dcSGuennadi Liakhovetski * 4. if all IRQs on this controller are equal or if some specific IRQs 7274981c4dcSGuennadi Liakhovetski * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 7284981c4dcSGuennadi Liakhovetski * requested with the IRQF_SHARED flag 7294981c4dcSGuennadi Liakhovetski */ 7304981c4dcSGuennadi Liakhovetski errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 7314981c4dcSGuennadi Liakhovetski if (!chan || !errirq_res) 7324981c4dcSGuennadi Liakhovetski return -ENODEV; 7334981c4dcSGuennadi Liakhovetski 7344981c4dcSGuennadi Liakhovetski shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), 7354981c4dcSGuennadi Liakhovetski GFP_KERNEL); 7364981c4dcSGuennadi Liakhovetski if (!shdev) { 7374981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, "Not enough memory\n"); 7384981c4dcSGuennadi Liakhovetski return -ENOMEM; 7394981c4dcSGuennadi Liakhovetski } 7404981c4dcSGuennadi Liakhovetski 7414981c4dcSGuennadi Liakhovetski dma_dev = &shdev->shdma_dev.dma_dev; 7424981c4dcSGuennadi Liakhovetski 7434981c4dcSGuennadi Liakhovetski shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); 7444981c4dcSGuennadi Liakhovetski if (IS_ERR(shdev->chan_reg)) 7454981c4dcSGuennadi Liakhovetski return PTR_ERR(shdev->chan_reg); 7464981c4dcSGuennadi Liakhovetski if (dmars) { 7474981c4dcSGuennadi Liakhovetski shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); 7484981c4dcSGuennadi Liakhovetski if (IS_ERR(shdev->dmars)) 7494981c4dcSGuennadi Liakhovetski return PTR_ERR(shdev->dmars); 7504981c4dcSGuennadi Liakhovetski } 7514981c4dcSGuennadi Liakhovetski 7524981c4dcSGuennadi Liakhovetski if (!pdata->slave_only) 7534981c4dcSGuennadi Liakhovetski dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 7544981c4dcSGuennadi Liakhovetski if (pdata->slave && pdata->slave_num) 7554981c4dcSGuennadi Liakhovetski dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 7564981c4dcSGuennadi Liakhovetski 7574981c4dcSGuennadi Liakhovetski /* Default transfer size of 32 bytes requires 32-byte alignment */ 7584981c4dcSGuennadi Liakhovetski dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; 7594981c4dcSGuennadi Liakhovetski 7604981c4dcSGuennadi Liakhovetski shdev->shdma_dev.ops = &sh_dmae_shdma_ops; 7614981c4dcSGuennadi Liakhovetski shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); 7624981c4dcSGuennadi Liakhovetski err = shdma_init(&pdev->dev, &shdev->shdma_dev, 7634981c4dcSGuennadi Liakhovetski pdata->channel_num); 7644981c4dcSGuennadi Liakhovetski if (err < 0) 7654981c4dcSGuennadi Liakhovetski goto eshdma; 7664981c4dcSGuennadi Liakhovetski 7674981c4dcSGuennadi Liakhovetski /* platform data */ 7684981c4dcSGuennadi Liakhovetski shdev->pdata = pdata; 7694981c4dcSGuennadi Liakhovetski 7704981c4dcSGuennadi Liakhovetski if (pdata->chcr_offset) 7714981c4dcSGuennadi Liakhovetski shdev->chcr_offset = pdata->chcr_offset; 7724981c4dcSGuennadi Liakhovetski else 7734981c4dcSGuennadi Liakhovetski shdev->chcr_offset = CHCR; 7744981c4dcSGuennadi Liakhovetski 7754981c4dcSGuennadi Liakhovetski if (pdata->chcr_ie_bit) 7764981c4dcSGuennadi Liakhovetski shdev->chcr_ie_bit = pdata->chcr_ie_bit; 7774981c4dcSGuennadi Liakhovetski else 7784981c4dcSGuennadi Liakhovetski shdev->chcr_ie_bit = CHCR_IE; 7794981c4dcSGuennadi Liakhovetski 7804981c4dcSGuennadi Liakhovetski platform_set_drvdata(pdev, shdev); 7814981c4dcSGuennadi Liakhovetski 7824981c4dcSGuennadi Liakhovetski pm_runtime_enable(&pdev->dev); 7834981c4dcSGuennadi Liakhovetski err = pm_runtime_get_sync(&pdev->dev); 7844981c4dcSGuennadi Liakhovetski if (err < 0) 7854981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); 7864981c4dcSGuennadi Liakhovetski 7874981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 7884981c4dcSGuennadi Liakhovetski list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 7894981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 7904981c4dcSGuennadi Liakhovetski 7914981c4dcSGuennadi Liakhovetski /* reset dma controller - only needed as a test */ 7924981c4dcSGuennadi Liakhovetski err = sh_dmae_rst(shdev); 7934981c4dcSGuennadi Liakhovetski if (err) 7944981c4dcSGuennadi Liakhovetski goto rst_err; 7954981c4dcSGuennadi Liakhovetski 7964981c4dcSGuennadi Liakhovetski #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 7974981c4dcSGuennadi Liakhovetski chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 7984981c4dcSGuennadi Liakhovetski 7994981c4dcSGuennadi Liakhovetski if (!chanirq_res) 8004981c4dcSGuennadi Liakhovetski chanirq_res = errirq_res; 8014981c4dcSGuennadi Liakhovetski else 8024981c4dcSGuennadi Liakhovetski irqres++; 8034981c4dcSGuennadi Liakhovetski 8044981c4dcSGuennadi Liakhovetski if (chanirq_res == errirq_res || 8054981c4dcSGuennadi Liakhovetski (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 8064981c4dcSGuennadi Liakhovetski irqflags = IRQF_SHARED; 8074981c4dcSGuennadi Liakhovetski 8084981c4dcSGuennadi Liakhovetski errirq = errirq_res->start; 8094981c4dcSGuennadi Liakhovetski 8104981c4dcSGuennadi Liakhovetski err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, 8114981c4dcSGuennadi Liakhovetski "DMAC Address Error", shdev); 8124981c4dcSGuennadi Liakhovetski if (err) { 8134981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, 8144981c4dcSGuennadi Liakhovetski "DMA failed requesting irq #%d, error %d\n", 8154981c4dcSGuennadi Liakhovetski errirq, err); 8164981c4dcSGuennadi Liakhovetski goto eirq_err; 8174981c4dcSGuennadi Liakhovetski } 8184981c4dcSGuennadi Liakhovetski 8194981c4dcSGuennadi Liakhovetski #else 8204981c4dcSGuennadi Liakhovetski chanirq_res = errirq_res; 8214981c4dcSGuennadi Liakhovetski #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 8224981c4dcSGuennadi Liakhovetski 8234981c4dcSGuennadi Liakhovetski if (chanirq_res->start == chanirq_res->end && 8244981c4dcSGuennadi Liakhovetski !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 8254981c4dcSGuennadi Liakhovetski /* Special case - all multiplexed */ 8264981c4dcSGuennadi Liakhovetski for (; irq_cnt < pdata->channel_num; irq_cnt++) { 8274981c4dcSGuennadi Liakhovetski if (irq_cnt < SH_DMAE_MAX_CHANNELS) { 8284981c4dcSGuennadi Liakhovetski chan_irq[irq_cnt] = chanirq_res->start; 8294981c4dcSGuennadi Liakhovetski chan_flag[irq_cnt] = IRQF_SHARED; 8304981c4dcSGuennadi Liakhovetski } else { 8314981c4dcSGuennadi Liakhovetski irq_cap = 1; 8324981c4dcSGuennadi Liakhovetski break; 8334981c4dcSGuennadi Liakhovetski } 8344981c4dcSGuennadi Liakhovetski } 8354981c4dcSGuennadi Liakhovetski } else { 8364981c4dcSGuennadi Liakhovetski do { 8374981c4dcSGuennadi Liakhovetski for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 8384981c4dcSGuennadi Liakhovetski if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { 8394981c4dcSGuennadi Liakhovetski irq_cap = 1; 8404981c4dcSGuennadi Liakhovetski break; 8414981c4dcSGuennadi Liakhovetski } 8424981c4dcSGuennadi Liakhovetski 8434981c4dcSGuennadi Liakhovetski if ((errirq_res->flags & IORESOURCE_BITS) == 8444981c4dcSGuennadi Liakhovetski IORESOURCE_IRQ_SHAREABLE) 8454981c4dcSGuennadi Liakhovetski chan_flag[irq_cnt] = IRQF_SHARED; 8464981c4dcSGuennadi Liakhovetski else 847174b537aSMichael Opdenacker chan_flag[irq_cnt] = 0; 8484981c4dcSGuennadi Liakhovetski dev_dbg(&pdev->dev, 8494981c4dcSGuennadi Liakhovetski "Found IRQ %d for channel %d\n", 8504981c4dcSGuennadi Liakhovetski i, irq_cnt); 8514981c4dcSGuennadi Liakhovetski chan_irq[irq_cnt++] = i; 8524981c4dcSGuennadi Liakhovetski } 8534981c4dcSGuennadi Liakhovetski 8544981c4dcSGuennadi Liakhovetski if (irq_cnt >= SH_DMAE_MAX_CHANNELS) 8554981c4dcSGuennadi Liakhovetski break; 8564981c4dcSGuennadi Liakhovetski 8574981c4dcSGuennadi Liakhovetski chanirq_res = platform_get_resource(pdev, 8584981c4dcSGuennadi Liakhovetski IORESOURCE_IRQ, ++irqres); 8594981c4dcSGuennadi Liakhovetski } while (irq_cnt < pdata->channel_num && chanirq_res); 8604981c4dcSGuennadi Liakhovetski } 8614981c4dcSGuennadi Liakhovetski 8624981c4dcSGuennadi Liakhovetski /* Create DMA Channel */ 8634981c4dcSGuennadi Liakhovetski for (i = 0; i < irq_cnt; i++) { 8644981c4dcSGuennadi Liakhovetski err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 8654981c4dcSGuennadi Liakhovetski if (err) 8664981c4dcSGuennadi Liakhovetski goto chan_probe_err; 8674981c4dcSGuennadi Liakhovetski } 8684981c4dcSGuennadi Liakhovetski 8694981c4dcSGuennadi Liakhovetski if (irq_cap) 8704981c4dcSGuennadi Liakhovetski dev_notice(&pdev->dev, "Attempting to register %d DMA " 8714981c4dcSGuennadi Liakhovetski "channels when a maximum of %d are supported.\n", 8724981c4dcSGuennadi Liakhovetski pdata->channel_num, SH_DMAE_MAX_CHANNELS); 8734981c4dcSGuennadi Liakhovetski 8744981c4dcSGuennadi Liakhovetski pm_runtime_put(&pdev->dev); 8754981c4dcSGuennadi Liakhovetski 8764981c4dcSGuennadi Liakhovetski err = dma_async_device_register(&shdev->shdma_dev.dma_dev); 8774981c4dcSGuennadi Liakhovetski if (err < 0) 8784981c4dcSGuennadi Liakhovetski goto edmadevreg; 8794981c4dcSGuennadi Liakhovetski 8804981c4dcSGuennadi Liakhovetski return err; 8814981c4dcSGuennadi Liakhovetski 8824981c4dcSGuennadi Liakhovetski edmadevreg: 8834981c4dcSGuennadi Liakhovetski pm_runtime_get(&pdev->dev); 8844981c4dcSGuennadi Liakhovetski 8854981c4dcSGuennadi Liakhovetski chan_probe_err: 8864981c4dcSGuennadi Liakhovetski sh_dmae_chan_remove(shdev); 8874981c4dcSGuennadi Liakhovetski 8884981c4dcSGuennadi Liakhovetski #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 8894981c4dcSGuennadi Liakhovetski eirq_err: 8904981c4dcSGuennadi Liakhovetski #endif 8914981c4dcSGuennadi Liakhovetski rst_err: 8924981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 8934981c4dcSGuennadi Liakhovetski list_del_rcu(&shdev->node); 8944981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 8954981c4dcSGuennadi Liakhovetski 8964981c4dcSGuennadi Liakhovetski pm_runtime_put(&pdev->dev); 8974981c4dcSGuennadi Liakhovetski pm_runtime_disable(&pdev->dev); 8984981c4dcSGuennadi Liakhovetski 8994981c4dcSGuennadi Liakhovetski shdma_cleanup(&shdev->shdma_dev); 9004981c4dcSGuennadi Liakhovetski eshdma: 9014981c4dcSGuennadi Liakhovetski synchronize_rcu(); 9024981c4dcSGuennadi Liakhovetski 9034981c4dcSGuennadi Liakhovetski return err; 9044981c4dcSGuennadi Liakhovetski } 9054981c4dcSGuennadi Liakhovetski 9064981c4dcSGuennadi Liakhovetski static int sh_dmae_remove(struct platform_device *pdev) 9074981c4dcSGuennadi Liakhovetski { 9084981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 9094981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 9104981c4dcSGuennadi Liakhovetski 9114981c4dcSGuennadi Liakhovetski dma_async_device_unregister(dma_dev); 9124981c4dcSGuennadi Liakhovetski 9134981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 9144981c4dcSGuennadi Liakhovetski list_del_rcu(&shdev->node); 9154981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 9164981c4dcSGuennadi Liakhovetski 9174981c4dcSGuennadi Liakhovetski pm_runtime_disable(&pdev->dev); 9184981c4dcSGuennadi Liakhovetski 9194981c4dcSGuennadi Liakhovetski sh_dmae_chan_remove(shdev); 9204981c4dcSGuennadi Liakhovetski shdma_cleanup(&shdev->shdma_dev); 9214981c4dcSGuennadi Liakhovetski 9224981c4dcSGuennadi Liakhovetski synchronize_rcu(); 9234981c4dcSGuennadi Liakhovetski 9244981c4dcSGuennadi Liakhovetski return 0; 9254981c4dcSGuennadi Liakhovetski } 9264981c4dcSGuennadi Liakhovetski 9274981c4dcSGuennadi Liakhovetski static struct platform_driver sh_dmae_driver = { 9284981c4dcSGuennadi Liakhovetski .driver = { 9294981c4dcSGuennadi Liakhovetski .owner = THIS_MODULE, 9304981c4dcSGuennadi Liakhovetski .pm = &sh_dmae_pm, 9314981c4dcSGuennadi Liakhovetski .name = SH_DMAE_DRV_NAME, 9324981c4dcSGuennadi Liakhovetski .of_match_table = sh_dmae_of_match, 9334981c4dcSGuennadi Liakhovetski }, 9344981c4dcSGuennadi Liakhovetski .remove = sh_dmae_remove, 9354981c4dcSGuennadi Liakhovetski .shutdown = sh_dmae_shutdown, 9364981c4dcSGuennadi Liakhovetski }; 9374981c4dcSGuennadi Liakhovetski 9384981c4dcSGuennadi Liakhovetski static int __init sh_dmae_init(void) 9394981c4dcSGuennadi Liakhovetski { 9404981c4dcSGuennadi Liakhovetski /* Wire up NMI handling */ 9414981c4dcSGuennadi Liakhovetski int err = register_die_notifier(&sh_dmae_nmi_notifier); 9424981c4dcSGuennadi Liakhovetski if (err) 9434981c4dcSGuennadi Liakhovetski return err; 9444981c4dcSGuennadi Liakhovetski 9454981c4dcSGuennadi Liakhovetski return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 9464981c4dcSGuennadi Liakhovetski } 9474981c4dcSGuennadi Liakhovetski module_init(sh_dmae_init); 9484981c4dcSGuennadi Liakhovetski 9494981c4dcSGuennadi Liakhovetski static void __exit sh_dmae_exit(void) 9504981c4dcSGuennadi Liakhovetski { 9514981c4dcSGuennadi Liakhovetski platform_driver_unregister(&sh_dmae_driver); 9524981c4dcSGuennadi Liakhovetski 9534981c4dcSGuennadi Liakhovetski unregister_die_notifier(&sh_dmae_nmi_notifier); 9544981c4dcSGuennadi Liakhovetski } 9554981c4dcSGuennadi Liakhovetski module_exit(sh_dmae_exit); 9564981c4dcSGuennadi Liakhovetski 9574981c4dcSGuennadi Liakhovetski MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 9584981c4dcSGuennadi Liakhovetski MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 9594981c4dcSGuennadi Liakhovetski MODULE_LICENSE("GPL"); 9604981c4dcSGuennadi Liakhovetski MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); 961