14981c4dcSGuennadi Liakhovetski /* 24981c4dcSGuennadi Liakhovetski * Renesas SuperH DMA Engine support 34981c4dcSGuennadi Liakhovetski * 44981c4dcSGuennadi Liakhovetski * base is drivers/dma/flsdma.c 54981c4dcSGuennadi Liakhovetski * 64981c4dcSGuennadi Liakhovetski * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 74981c4dcSGuennadi Liakhovetski * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 84981c4dcSGuennadi Liakhovetski * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 94981c4dcSGuennadi Liakhovetski * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 104981c4dcSGuennadi Liakhovetski * 114981c4dcSGuennadi Liakhovetski * This is free software; you can redistribute it and/or modify 124981c4dcSGuennadi Liakhovetski * it under the terms of the GNU General Public License as published by 134981c4dcSGuennadi Liakhovetski * the Free Software Foundation; either version 2 of the License, or 144981c4dcSGuennadi Liakhovetski * (at your option) any later version. 154981c4dcSGuennadi Liakhovetski * 164981c4dcSGuennadi Liakhovetski * - DMA of SuperH does not have Hardware DMA chain mode. 174981c4dcSGuennadi Liakhovetski * - MAX DMA size is 16MB. 184981c4dcSGuennadi Liakhovetski * 194981c4dcSGuennadi Liakhovetski */ 204981c4dcSGuennadi Liakhovetski 214981c4dcSGuennadi Liakhovetski #include <linux/init.h> 224981c4dcSGuennadi Liakhovetski #include <linux/module.h> 234981c4dcSGuennadi Liakhovetski #include <linux/of.h> 244981c4dcSGuennadi Liakhovetski #include <linux/of_device.h> 254981c4dcSGuennadi Liakhovetski #include <linux/slab.h> 264981c4dcSGuennadi Liakhovetski #include <linux/interrupt.h> 274981c4dcSGuennadi Liakhovetski #include <linux/dmaengine.h> 284981c4dcSGuennadi Liakhovetski #include <linux/delay.h> 294981c4dcSGuennadi Liakhovetski #include <linux/platform_device.h> 304981c4dcSGuennadi Liakhovetski #include <linux/pm_runtime.h> 314981c4dcSGuennadi Liakhovetski #include <linux/sh_dma.h> 324981c4dcSGuennadi Liakhovetski #include <linux/notifier.h> 334981c4dcSGuennadi Liakhovetski #include <linux/kdebug.h> 344981c4dcSGuennadi Liakhovetski #include <linux/spinlock.h> 354981c4dcSGuennadi Liakhovetski #include <linux/rculist.h> 364981c4dcSGuennadi Liakhovetski 374981c4dcSGuennadi Liakhovetski #include "../dmaengine.h" 384981c4dcSGuennadi Liakhovetski #include "shdma.h" 394981c4dcSGuennadi Liakhovetski 404620ad54SGuennadi Liakhovetski /* DMA register */ 414620ad54SGuennadi Liakhovetski #define SAR 0x00 424620ad54SGuennadi Liakhovetski #define DAR 0x04 434620ad54SGuennadi Liakhovetski #define TCR 0x08 444620ad54SGuennadi Liakhovetski #define CHCR 0x0C 454620ad54SGuennadi Liakhovetski #define DMAOR 0x40 464620ad54SGuennadi Liakhovetski 474620ad54SGuennadi Liakhovetski #define TEND 0x18 /* USB-DMAC */ 484620ad54SGuennadi Liakhovetski 494981c4dcSGuennadi Liakhovetski #define SH_DMAE_DRV_NAME "sh-dma-engine" 504981c4dcSGuennadi Liakhovetski 514981c4dcSGuennadi Liakhovetski /* Default MEMCPY transfer size = 2^2 = 4 bytes */ 524981c4dcSGuennadi Liakhovetski #define LOG2_DEFAULT_XFER_SIZE 2 534981c4dcSGuennadi Liakhovetski #define SH_DMA_SLAVE_NUMBER 256 544981c4dcSGuennadi Liakhovetski #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) 554981c4dcSGuennadi Liakhovetski 564981c4dcSGuennadi Liakhovetski /* 574981c4dcSGuennadi Liakhovetski * Used for write-side mutual exclusion for the global device list, 584981c4dcSGuennadi Liakhovetski * read-side synchronization by way of RCU, and per-controller data. 594981c4dcSGuennadi Liakhovetski */ 604981c4dcSGuennadi Liakhovetski static DEFINE_SPINLOCK(sh_dmae_lock); 614981c4dcSGuennadi Liakhovetski static LIST_HEAD(sh_dmae_devices); 624981c4dcSGuennadi Liakhovetski 634981c4dcSGuennadi Liakhovetski /* 644981c4dcSGuennadi Liakhovetski * Different DMAC implementations provide different ways to clear DMA channels: 654981c4dcSGuennadi Liakhovetski * (1) none - no CHCLR registers are available 664981c4dcSGuennadi Liakhovetski * (2) one CHCLR register per channel - 0 has to be written to it to clear 674981c4dcSGuennadi Liakhovetski * channel buffers 684981c4dcSGuennadi Liakhovetski * (3) one CHCLR per several channels - 1 has to be written to the bit, 694981c4dcSGuennadi Liakhovetski * corresponding to the specific channel to reset it 704981c4dcSGuennadi Liakhovetski */ 714981c4dcSGuennadi Liakhovetski static void channel_clear(struct sh_dmae_chan *sh_dc) 724981c4dcSGuennadi Liakhovetski { 734981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 744981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + 754981c4dcSGuennadi Liakhovetski sh_dc->shdma_chan.id; 764981c4dcSGuennadi Liakhovetski u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; 774981c4dcSGuennadi Liakhovetski 784981c4dcSGuennadi Liakhovetski __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); 794981c4dcSGuennadi Liakhovetski } 804981c4dcSGuennadi Liakhovetski 814981c4dcSGuennadi Liakhovetski static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 824981c4dcSGuennadi Liakhovetski { 834981c4dcSGuennadi Liakhovetski __raw_writel(data, sh_dc->base + reg); 844981c4dcSGuennadi Liakhovetski } 854981c4dcSGuennadi Liakhovetski 864981c4dcSGuennadi Liakhovetski static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 874981c4dcSGuennadi Liakhovetski { 884981c4dcSGuennadi Liakhovetski return __raw_readl(sh_dc->base + reg); 894981c4dcSGuennadi Liakhovetski } 904981c4dcSGuennadi Liakhovetski 914981c4dcSGuennadi Liakhovetski static u16 dmaor_read(struct sh_dmae_device *shdev) 924981c4dcSGuennadi Liakhovetski { 934981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->chan_reg + DMAOR; 944981c4dcSGuennadi Liakhovetski 954981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_is_32bit) 964981c4dcSGuennadi Liakhovetski return __raw_readl(addr); 974981c4dcSGuennadi Liakhovetski else 984981c4dcSGuennadi Liakhovetski return __raw_readw(addr); 994981c4dcSGuennadi Liakhovetski } 1004981c4dcSGuennadi Liakhovetski 1014981c4dcSGuennadi Liakhovetski static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 1024981c4dcSGuennadi Liakhovetski { 1034981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->chan_reg + DMAOR; 1044981c4dcSGuennadi Liakhovetski 1054981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_is_32bit) 1064981c4dcSGuennadi Liakhovetski __raw_writel(data, addr); 1074981c4dcSGuennadi Liakhovetski else 1084981c4dcSGuennadi Liakhovetski __raw_writew(data, addr); 1094981c4dcSGuennadi Liakhovetski } 1104981c4dcSGuennadi Liakhovetski 1114981c4dcSGuennadi Liakhovetski static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 1124981c4dcSGuennadi Liakhovetski { 1134981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 1144981c4dcSGuennadi Liakhovetski 1154981c4dcSGuennadi Liakhovetski __raw_writel(data, sh_dc->base + shdev->chcr_offset); 1164981c4dcSGuennadi Liakhovetski } 1174981c4dcSGuennadi Liakhovetski 1184981c4dcSGuennadi Liakhovetski static u32 chcr_read(struct sh_dmae_chan *sh_dc) 1194981c4dcSGuennadi Liakhovetski { 1204981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 1214981c4dcSGuennadi Liakhovetski 1224981c4dcSGuennadi Liakhovetski return __raw_readl(sh_dc->base + shdev->chcr_offset); 1234981c4dcSGuennadi Liakhovetski } 1244981c4dcSGuennadi Liakhovetski 1254981c4dcSGuennadi Liakhovetski /* 1264981c4dcSGuennadi Liakhovetski * Reset DMA controller 1274981c4dcSGuennadi Liakhovetski * 1284981c4dcSGuennadi Liakhovetski * SH7780 has two DMAOR register 1294981c4dcSGuennadi Liakhovetski */ 1304981c4dcSGuennadi Liakhovetski static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 1314981c4dcSGuennadi Liakhovetski { 1324981c4dcSGuennadi Liakhovetski unsigned short dmaor; 1334981c4dcSGuennadi Liakhovetski unsigned long flags; 1344981c4dcSGuennadi Liakhovetski 1354981c4dcSGuennadi Liakhovetski spin_lock_irqsave(&sh_dmae_lock, flags); 1364981c4dcSGuennadi Liakhovetski 1374981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev); 1384981c4dcSGuennadi Liakhovetski dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 1394981c4dcSGuennadi Liakhovetski 1404981c4dcSGuennadi Liakhovetski spin_unlock_irqrestore(&sh_dmae_lock, flags); 1414981c4dcSGuennadi Liakhovetski } 1424981c4dcSGuennadi Liakhovetski 1434981c4dcSGuennadi Liakhovetski static int sh_dmae_rst(struct sh_dmae_device *shdev) 1444981c4dcSGuennadi Liakhovetski { 1454981c4dcSGuennadi Liakhovetski unsigned short dmaor; 1464981c4dcSGuennadi Liakhovetski unsigned long flags; 1474981c4dcSGuennadi Liakhovetski 1484981c4dcSGuennadi Liakhovetski spin_lock_irqsave(&sh_dmae_lock, flags); 1494981c4dcSGuennadi Liakhovetski 1504981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 1514981c4dcSGuennadi Liakhovetski 1524981c4dcSGuennadi Liakhovetski if (shdev->pdata->chclr_present) { 1534981c4dcSGuennadi Liakhovetski int i; 1544981c4dcSGuennadi Liakhovetski for (i = 0; i < shdev->pdata->channel_num; i++) { 1554981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1564981c4dcSGuennadi Liakhovetski if (sh_chan) 1574981c4dcSGuennadi Liakhovetski channel_clear(sh_chan); 1584981c4dcSGuennadi Liakhovetski } 1594981c4dcSGuennadi Liakhovetski } 1604981c4dcSGuennadi Liakhovetski 1614981c4dcSGuennadi Liakhovetski dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 1624981c4dcSGuennadi Liakhovetski 1634981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev); 1644981c4dcSGuennadi Liakhovetski 1654981c4dcSGuennadi Liakhovetski spin_unlock_irqrestore(&sh_dmae_lock, flags); 1664981c4dcSGuennadi Liakhovetski 1674981c4dcSGuennadi Liakhovetski if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 1684981c4dcSGuennadi Liakhovetski dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); 1694981c4dcSGuennadi Liakhovetski return -EIO; 1704981c4dcSGuennadi Liakhovetski } 1714981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_init & ~dmaor) 1724981c4dcSGuennadi Liakhovetski dev_warn(shdev->shdma_dev.dma_dev.dev, 1734981c4dcSGuennadi Liakhovetski "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 1744981c4dcSGuennadi Liakhovetski dmaor, shdev->pdata->dmaor_init); 1754981c4dcSGuennadi Liakhovetski return 0; 1764981c4dcSGuennadi Liakhovetski } 1774981c4dcSGuennadi Liakhovetski 1784981c4dcSGuennadi Liakhovetski static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 1794981c4dcSGuennadi Liakhovetski { 1804981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 1814981c4dcSGuennadi Liakhovetski 1824981c4dcSGuennadi Liakhovetski if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 1834981c4dcSGuennadi Liakhovetski return true; /* working */ 1844981c4dcSGuennadi Liakhovetski 1854981c4dcSGuennadi Liakhovetski return false; /* waiting */ 1864981c4dcSGuennadi Liakhovetski } 1874981c4dcSGuennadi Liakhovetski 1884981c4dcSGuennadi Liakhovetski static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 1894981c4dcSGuennadi Liakhovetski { 1904981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 1914981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 1924981c4dcSGuennadi Liakhovetski int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 1934981c4dcSGuennadi Liakhovetski ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 1944981c4dcSGuennadi Liakhovetski 1954981c4dcSGuennadi Liakhovetski if (cnt >= pdata->ts_shift_num) 1964981c4dcSGuennadi Liakhovetski cnt = 0; 1974981c4dcSGuennadi Liakhovetski 1984981c4dcSGuennadi Liakhovetski return pdata->ts_shift[cnt]; 1994981c4dcSGuennadi Liakhovetski } 2004981c4dcSGuennadi Liakhovetski 2014981c4dcSGuennadi Liakhovetski static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 2024981c4dcSGuennadi Liakhovetski { 2034981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 2044981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 2054981c4dcSGuennadi Liakhovetski int i; 2064981c4dcSGuennadi Liakhovetski 2074981c4dcSGuennadi Liakhovetski for (i = 0; i < pdata->ts_shift_num; i++) 2084981c4dcSGuennadi Liakhovetski if (pdata->ts_shift[i] == l2size) 2094981c4dcSGuennadi Liakhovetski break; 2104981c4dcSGuennadi Liakhovetski 2114981c4dcSGuennadi Liakhovetski if (i == pdata->ts_shift_num) 2124981c4dcSGuennadi Liakhovetski i = 0; 2134981c4dcSGuennadi Liakhovetski 2144981c4dcSGuennadi Liakhovetski return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 2154981c4dcSGuennadi Liakhovetski ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 2164981c4dcSGuennadi Liakhovetski } 2174981c4dcSGuennadi Liakhovetski 2184981c4dcSGuennadi Liakhovetski static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 2194981c4dcSGuennadi Liakhovetski { 2204981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->sar, SAR); 2214981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->dar, DAR); 2224981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 2234981c4dcSGuennadi Liakhovetski } 2244981c4dcSGuennadi Liakhovetski 2254981c4dcSGuennadi Liakhovetski static void dmae_start(struct sh_dmae_chan *sh_chan) 2264981c4dcSGuennadi Liakhovetski { 2274981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 2284981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 2294981c4dcSGuennadi Liakhovetski 2304981c4dcSGuennadi Liakhovetski if (shdev->pdata->needs_tend_set) 2314981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 2324981c4dcSGuennadi Liakhovetski 2334981c4dcSGuennadi Liakhovetski chcr |= CHCR_DE | shdev->chcr_ie_bit; 2344981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr & ~CHCR_TE); 2354981c4dcSGuennadi Liakhovetski } 2364981c4dcSGuennadi Liakhovetski 2374981c4dcSGuennadi Liakhovetski static void dmae_init(struct sh_dmae_chan *sh_chan) 2384981c4dcSGuennadi Liakhovetski { 2394981c4dcSGuennadi Liakhovetski /* 2404981c4dcSGuennadi Liakhovetski * Default configuration for dual address memory-memory transfer. 2414981c4dcSGuennadi Liakhovetski * 0x400 represents auto-request. 2424981c4dcSGuennadi Liakhovetski */ 2434981c4dcSGuennadi Liakhovetski u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 2444981c4dcSGuennadi Liakhovetski LOG2_DEFAULT_XFER_SIZE); 2454981c4dcSGuennadi Liakhovetski sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 2464981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr); 2474981c4dcSGuennadi Liakhovetski } 2484981c4dcSGuennadi Liakhovetski 2494981c4dcSGuennadi Liakhovetski static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 2504981c4dcSGuennadi Liakhovetski { 2514981c4dcSGuennadi Liakhovetski /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 2524981c4dcSGuennadi Liakhovetski if (dmae_is_busy(sh_chan)) 2534981c4dcSGuennadi Liakhovetski return -EBUSY; 2544981c4dcSGuennadi Liakhovetski 2554981c4dcSGuennadi Liakhovetski sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 2564981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, val); 2574981c4dcSGuennadi Liakhovetski 2584981c4dcSGuennadi Liakhovetski return 0; 2594981c4dcSGuennadi Liakhovetski } 2604981c4dcSGuennadi Liakhovetski 2614981c4dcSGuennadi Liakhovetski static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 2624981c4dcSGuennadi Liakhovetski { 2634981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 2644981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 2654981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; 2664981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->dmars; 2674981c4dcSGuennadi Liakhovetski unsigned int shift = chan_pdata->dmars_bit; 2684981c4dcSGuennadi Liakhovetski 2694981c4dcSGuennadi Liakhovetski if (dmae_is_busy(sh_chan)) 2704981c4dcSGuennadi Liakhovetski return -EBUSY; 2714981c4dcSGuennadi Liakhovetski 2724981c4dcSGuennadi Liakhovetski if (pdata->no_dmars) 2734981c4dcSGuennadi Liakhovetski return 0; 2744981c4dcSGuennadi Liakhovetski 2754981c4dcSGuennadi Liakhovetski /* in the case of a missing DMARS resource use first memory window */ 2764981c4dcSGuennadi Liakhovetski if (!addr) 2774981c4dcSGuennadi Liakhovetski addr = shdev->chan_reg; 2784981c4dcSGuennadi Liakhovetski addr += chan_pdata->dmars; 2794981c4dcSGuennadi Liakhovetski 2804981c4dcSGuennadi Liakhovetski __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 2814981c4dcSGuennadi Liakhovetski addr); 2824981c4dcSGuennadi Liakhovetski 2834981c4dcSGuennadi Liakhovetski return 0; 2844981c4dcSGuennadi Liakhovetski } 2854981c4dcSGuennadi Liakhovetski 2864981c4dcSGuennadi Liakhovetski static void sh_dmae_start_xfer(struct shdma_chan *schan, 2874981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 2884981c4dcSGuennadi Liakhovetski { 2894981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 2904981c4dcSGuennadi Liakhovetski shdma_chan); 2914981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 2924981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 2934981c4dcSGuennadi Liakhovetski dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", 2944981c4dcSGuennadi Liakhovetski sdesc->async_tx.cookie, sh_chan->shdma_chan.id, 2954981c4dcSGuennadi Liakhovetski sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); 2964981c4dcSGuennadi Liakhovetski /* Get the ld start address from ld_queue */ 2974981c4dcSGuennadi Liakhovetski dmae_set_reg(sh_chan, &sh_desc->hw); 2984981c4dcSGuennadi Liakhovetski dmae_start(sh_chan); 2994981c4dcSGuennadi Liakhovetski } 3004981c4dcSGuennadi Liakhovetski 3014981c4dcSGuennadi Liakhovetski static bool sh_dmae_channel_busy(struct shdma_chan *schan) 3024981c4dcSGuennadi Liakhovetski { 3034981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 3044981c4dcSGuennadi Liakhovetski shdma_chan); 3054981c4dcSGuennadi Liakhovetski return dmae_is_busy(sh_chan); 3064981c4dcSGuennadi Liakhovetski } 3074981c4dcSGuennadi Liakhovetski 3084981c4dcSGuennadi Liakhovetski static void sh_dmae_setup_xfer(struct shdma_chan *schan, 3094981c4dcSGuennadi Liakhovetski int slave_id) 3104981c4dcSGuennadi Liakhovetski { 3114981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 3124981c4dcSGuennadi Liakhovetski shdma_chan); 3134981c4dcSGuennadi Liakhovetski 3144981c4dcSGuennadi Liakhovetski if (slave_id >= 0) { 3154981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = 3164981c4dcSGuennadi Liakhovetski sh_chan->config; 3174981c4dcSGuennadi Liakhovetski 3184981c4dcSGuennadi Liakhovetski dmae_set_dmars(sh_chan, cfg->mid_rid); 3194981c4dcSGuennadi Liakhovetski dmae_set_chcr(sh_chan, cfg->chcr); 3204981c4dcSGuennadi Liakhovetski } else { 3214981c4dcSGuennadi Liakhovetski dmae_init(sh_chan); 3224981c4dcSGuennadi Liakhovetski } 3234981c4dcSGuennadi Liakhovetski } 3244981c4dcSGuennadi Liakhovetski 3254981c4dcSGuennadi Liakhovetski /* 3264981c4dcSGuennadi Liakhovetski * Find a slave channel configuration from the contoller list by either a slave 3274981c4dcSGuennadi Liakhovetski * ID in the non-DT case, or by a MID/RID value in the DT case 3284981c4dcSGuennadi Liakhovetski */ 3294981c4dcSGuennadi Liakhovetski static const struct sh_dmae_slave_config *dmae_find_slave( 3304981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan, int match) 3314981c4dcSGuennadi Liakhovetski { 3324981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 3334981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 3344981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg; 3354981c4dcSGuennadi Liakhovetski int i; 3364981c4dcSGuennadi Liakhovetski 3374981c4dcSGuennadi Liakhovetski if (!sh_chan->shdma_chan.dev->of_node) { 3384981c4dcSGuennadi Liakhovetski if (match >= SH_DMA_SLAVE_NUMBER) 3394981c4dcSGuennadi Liakhovetski return NULL; 3404981c4dcSGuennadi Liakhovetski 3414981c4dcSGuennadi Liakhovetski for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 3424981c4dcSGuennadi Liakhovetski if (cfg->slave_id == match) 3434981c4dcSGuennadi Liakhovetski return cfg; 3444981c4dcSGuennadi Liakhovetski } else { 3454981c4dcSGuennadi Liakhovetski for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 3464981c4dcSGuennadi Liakhovetski if (cfg->mid_rid == match) { 3474981c4dcSGuennadi Liakhovetski sh_chan->shdma_chan.slave_id = i; 3484981c4dcSGuennadi Liakhovetski return cfg; 3494981c4dcSGuennadi Liakhovetski } 3504981c4dcSGuennadi Liakhovetski } 3514981c4dcSGuennadi Liakhovetski 3524981c4dcSGuennadi Liakhovetski return NULL; 3534981c4dcSGuennadi Liakhovetski } 3544981c4dcSGuennadi Liakhovetski 3554981c4dcSGuennadi Liakhovetski static int sh_dmae_set_slave(struct shdma_chan *schan, 3564981c4dcSGuennadi Liakhovetski int slave_id, dma_addr_t slave_addr, bool try) 3574981c4dcSGuennadi Liakhovetski { 3584981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 3594981c4dcSGuennadi Liakhovetski shdma_chan); 3604981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); 3614981c4dcSGuennadi Liakhovetski if (!cfg) 3624981c4dcSGuennadi Liakhovetski return -ENXIO; 3634981c4dcSGuennadi Liakhovetski 3644981c4dcSGuennadi Liakhovetski if (!try) { 3654981c4dcSGuennadi Liakhovetski sh_chan->config = cfg; 3664981c4dcSGuennadi Liakhovetski sh_chan->slave_addr = slave_addr ? : cfg->addr; 3674981c4dcSGuennadi Liakhovetski } 3684981c4dcSGuennadi Liakhovetski 3694981c4dcSGuennadi Liakhovetski return 0; 3704981c4dcSGuennadi Liakhovetski } 3714981c4dcSGuennadi Liakhovetski 3724981c4dcSGuennadi Liakhovetski static void dmae_halt(struct sh_dmae_chan *sh_chan) 3734981c4dcSGuennadi Liakhovetski { 3744981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 3754981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 3764981c4dcSGuennadi Liakhovetski 3774981c4dcSGuennadi Liakhovetski chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 3784981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr); 3794981c4dcSGuennadi Liakhovetski } 3804981c4dcSGuennadi Liakhovetski 3814981c4dcSGuennadi Liakhovetski static int sh_dmae_desc_setup(struct shdma_chan *schan, 3824981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc, 3834981c4dcSGuennadi Liakhovetski dma_addr_t src, dma_addr_t dst, size_t *len) 3844981c4dcSGuennadi Liakhovetski { 3854981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 3864981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 3874981c4dcSGuennadi Liakhovetski 3884981c4dcSGuennadi Liakhovetski if (*len > schan->max_xfer_len) 3894981c4dcSGuennadi Liakhovetski *len = schan->max_xfer_len; 3904981c4dcSGuennadi Liakhovetski 3914981c4dcSGuennadi Liakhovetski sh_desc->hw.sar = src; 3924981c4dcSGuennadi Liakhovetski sh_desc->hw.dar = dst; 3934981c4dcSGuennadi Liakhovetski sh_desc->hw.tcr = *len; 3944981c4dcSGuennadi Liakhovetski 3954981c4dcSGuennadi Liakhovetski return 0; 3964981c4dcSGuennadi Liakhovetski } 3974981c4dcSGuennadi Liakhovetski 3984981c4dcSGuennadi Liakhovetski static void sh_dmae_halt(struct shdma_chan *schan) 3994981c4dcSGuennadi Liakhovetski { 4004981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 4014981c4dcSGuennadi Liakhovetski shdma_chan); 4024981c4dcSGuennadi Liakhovetski dmae_halt(sh_chan); 4034981c4dcSGuennadi Liakhovetski } 4044981c4dcSGuennadi Liakhovetski 4054981c4dcSGuennadi Liakhovetski static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) 4064981c4dcSGuennadi Liakhovetski { 4074981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 4084981c4dcSGuennadi Liakhovetski shdma_chan); 4094981c4dcSGuennadi Liakhovetski 4104981c4dcSGuennadi Liakhovetski if (!(chcr_read(sh_chan) & CHCR_TE)) 4114981c4dcSGuennadi Liakhovetski return false; 4124981c4dcSGuennadi Liakhovetski 4134981c4dcSGuennadi Liakhovetski /* DMA stop */ 4144981c4dcSGuennadi Liakhovetski dmae_halt(sh_chan); 4154981c4dcSGuennadi Liakhovetski 4164981c4dcSGuennadi Liakhovetski return true; 4174981c4dcSGuennadi Liakhovetski } 4184981c4dcSGuennadi Liakhovetski 4194981c4dcSGuennadi Liakhovetski static size_t sh_dmae_get_partial(struct shdma_chan *schan, 4204981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 4214981c4dcSGuennadi Liakhovetski { 4224981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 4234981c4dcSGuennadi Liakhovetski shdma_chan); 4244981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 4254981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 426ec5b103eSLinus Torvalds return sh_desc->hw.tcr - 427ec5b103eSLinus Torvalds (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); 4284981c4dcSGuennadi Liakhovetski } 4294981c4dcSGuennadi Liakhovetski 4304981c4dcSGuennadi Liakhovetski /* Called from error IRQ or NMI */ 4314981c4dcSGuennadi Liakhovetski static bool sh_dmae_reset(struct sh_dmae_device *shdev) 4324981c4dcSGuennadi Liakhovetski { 4334981c4dcSGuennadi Liakhovetski bool ret; 4344981c4dcSGuennadi Liakhovetski 4354981c4dcSGuennadi Liakhovetski /* halt the dma controller */ 4364981c4dcSGuennadi Liakhovetski sh_dmae_ctl_stop(shdev); 4374981c4dcSGuennadi Liakhovetski 4384981c4dcSGuennadi Liakhovetski /* We cannot detect, which channel caused the error, have to reset all */ 4394981c4dcSGuennadi Liakhovetski ret = shdma_reset(&shdev->shdma_dev); 4404981c4dcSGuennadi Liakhovetski 4414981c4dcSGuennadi Liakhovetski sh_dmae_rst(shdev); 4424981c4dcSGuennadi Liakhovetski 4434981c4dcSGuennadi Liakhovetski return ret; 4444981c4dcSGuennadi Liakhovetski } 4454981c4dcSGuennadi Liakhovetski 446*52d6a5eeSLaurent Pinchart #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) 4474981c4dcSGuennadi Liakhovetski static irqreturn_t sh_dmae_err(int irq, void *data) 4484981c4dcSGuennadi Liakhovetski { 4494981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = data; 4504981c4dcSGuennadi Liakhovetski 4514981c4dcSGuennadi Liakhovetski if (!(dmaor_read(shdev) & DMAOR_AE)) 4524981c4dcSGuennadi Liakhovetski return IRQ_NONE; 4534981c4dcSGuennadi Liakhovetski 4544981c4dcSGuennadi Liakhovetski sh_dmae_reset(shdev); 4554981c4dcSGuennadi Liakhovetski return IRQ_HANDLED; 4564981c4dcSGuennadi Liakhovetski } 457*52d6a5eeSLaurent Pinchart #endif 4584981c4dcSGuennadi Liakhovetski 4594981c4dcSGuennadi Liakhovetski static bool sh_dmae_desc_completed(struct shdma_chan *schan, 4604981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 4614981c4dcSGuennadi Liakhovetski { 4624981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, 4634981c4dcSGuennadi Liakhovetski struct sh_dmae_chan, shdma_chan); 4644981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 4654981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 4664981c4dcSGuennadi Liakhovetski u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 4674981c4dcSGuennadi Liakhovetski u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 4684981c4dcSGuennadi Liakhovetski 4694981c4dcSGuennadi Liakhovetski return (sdesc->direction == DMA_DEV_TO_MEM && 4704981c4dcSGuennadi Liakhovetski (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || 4714981c4dcSGuennadi Liakhovetski (sdesc->direction != DMA_DEV_TO_MEM && 4724981c4dcSGuennadi Liakhovetski (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); 4734981c4dcSGuennadi Liakhovetski } 4744981c4dcSGuennadi Liakhovetski 4754981c4dcSGuennadi Liakhovetski static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 4764981c4dcSGuennadi Liakhovetski { 4774981c4dcSGuennadi Liakhovetski /* Fast path out if NMIF is not asserted for this controller */ 4784981c4dcSGuennadi Liakhovetski if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 4794981c4dcSGuennadi Liakhovetski return false; 4804981c4dcSGuennadi Liakhovetski 4814981c4dcSGuennadi Liakhovetski return sh_dmae_reset(shdev); 4824981c4dcSGuennadi Liakhovetski } 4834981c4dcSGuennadi Liakhovetski 4844981c4dcSGuennadi Liakhovetski static int sh_dmae_nmi_handler(struct notifier_block *self, 4854981c4dcSGuennadi Liakhovetski unsigned long cmd, void *data) 4864981c4dcSGuennadi Liakhovetski { 4874981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev; 4884981c4dcSGuennadi Liakhovetski int ret = NOTIFY_DONE; 4894981c4dcSGuennadi Liakhovetski bool triggered; 4904981c4dcSGuennadi Liakhovetski 4914981c4dcSGuennadi Liakhovetski /* 4924981c4dcSGuennadi Liakhovetski * Only concern ourselves with NMI events. 4934981c4dcSGuennadi Liakhovetski * 4944981c4dcSGuennadi Liakhovetski * Normally we would check the die chain value, but as this needs 4954981c4dcSGuennadi Liakhovetski * to be architecture independent, check for NMI context instead. 4964981c4dcSGuennadi Liakhovetski */ 4974981c4dcSGuennadi Liakhovetski if (!in_nmi()) 4984981c4dcSGuennadi Liakhovetski return NOTIFY_DONE; 4994981c4dcSGuennadi Liakhovetski 5004981c4dcSGuennadi Liakhovetski rcu_read_lock(); 5014981c4dcSGuennadi Liakhovetski list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 5024981c4dcSGuennadi Liakhovetski /* 5034981c4dcSGuennadi Liakhovetski * Only stop if one of the controllers has NMIF asserted, 5044981c4dcSGuennadi Liakhovetski * we do not want to interfere with regular address error 5054981c4dcSGuennadi Liakhovetski * handling or NMI events that don't concern the DMACs. 5064981c4dcSGuennadi Liakhovetski */ 5074981c4dcSGuennadi Liakhovetski triggered = sh_dmae_nmi_notify(shdev); 5084981c4dcSGuennadi Liakhovetski if (triggered == true) 5094981c4dcSGuennadi Liakhovetski ret = NOTIFY_OK; 5104981c4dcSGuennadi Liakhovetski } 5114981c4dcSGuennadi Liakhovetski rcu_read_unlock(); 5124981c4dcSGuennadi Liakhovetski 5134981c4dcSGuennadi Liakhovetski return ret; 5144981c4dcSGuennadi Liakhovetski } 5154981c4dcSGuennadi Liakhovetski 5164981c4dcSGuennadi Liakhovetski static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 5174981c4dcSGuennadi Liakhovetski .notifier_call = sh_dmae_nmi_handler, 5184981c4dcSGuennadi Liakhovetski 5194981c4dcSGuennadi Liakhovetski /* Run before NMI debug handler and KGDB */ 5204981c4dcSGuennadi Liakhovetski .priority = 1, 5214981c4dcSGuennadi Liakhovetski }; 5224981c4dcSGuennadi Liakhovetski 5234981c4dcSGuennadi Liakhovetski static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 5244981c4dcSGuennadi Liakhovetski int irq, unsigned long flags) 5254981c4dcSGuennadi Liakhovetski { 5264981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 5274981c4dcSGuennadi Liakhovetski struct shdma_dev *sdev = &shdev->shdma_dev; 5284981c4dcSGuennadi Liakhovetski struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); 5294981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan; 5304981c4dcSGuennadi Liakhovetski struct shdma_chan *schan; 5314981c4dcSGuennadi Liakhovetski int err; 5324981c4dcSGuennadi Liakhovetski 5334981c4dcSGuennadi Liakhovetski sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), 5344981c4dcSGuennadi Liakhovetski GFP_KERNEL); 5354981c4dcSGuennadi Liakhovetski if (!sh_chan) { 5364981c4dcSGuennadi Liakhovetski dev_err(sdev->dma_dev.dev, 5374981c4dcSGuennadi Liakhovetski "No free memory for allocating dma channels!\n"); 5384981c4dcSGuennadi Liakhovetski return -ENOMEM; 5394981c4dcSGuennadi Liakhovetski } 5404981c4dcSGuennadi Liakhovetski 5414981c4dcSGuennadi Liakhovetski schan = &sh_chan->shdma_chan; 5424981c4dcSGuennadi Liakhovetski schan->max_xfer_len = SH_DMA_TCR_MAX + 1; 5434981c4dcSGuennadi Liakhovetski 5444981c4dcSGuennadi Liakhovetski shdma_chan_probe(sdev, schan, id); 5454981c4dcSGuennadi Liakhovetski 5464981c4dcSGuennadi Liakhovetski sh_chan->base = shdev->chan_reg + chan_pdata->offset; 5474981c4dcSGuennadi Liakhovetski 5484981c4dcSGuennadi Liakhovetski /* set up channel irq */ 5494981c4dcSGuennadi Liakhovetski if (pdev->id >= 0) 5504981c4dcSGuennadi Liakhovetski snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 5514981c4dcSGuennadi Liakhovetski "sh-dmae%d.%d", pdev->id, id); 5524981c4dcSGuennadi Liakhovetski else 5534981c4dcSGuennadi Liakhovetski snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 5544981c4dcSGuennadi Liakhovetski "sh-dma%d", id); 5554981c4dcSGuennadi Liakhovetski 5564981c4dcSGuennadi Liakhovetski err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); 5574981c4dcSGuennadi Liakhovetski if (err) { 5584981c4dcSGuennadi Liakhovetski dev_err(sdev->dma_dev.dev, 5594981c4dcSGuennadi Liakhovetski "DMA channel %d request_irq error %d\n", 5604981c4dcSGuennadi Liakhovetski id, err); 5614981c4dcSGuennadi Liakhovetski goto err_no_irq; 5624981c4dcSGuennadi Liakhovetski } 5634981c4dcSGuennadi Liakhovetski 5644981c4dcSGuennadi Liakhovetski shdev->chan[id] = sh_chan; 5654981c4dcSGuennadi Liakhovetski return 0; 5664981c4dcSGuennadi Liakhovetski 5674981c4dcSGuennadi Liakhovetski err_no_irq: 5684981c4dcSGuennadi Liakhovetski /* remove from dmaengine device node */ 5694981c4dcSGuennadi Liakhovetski shdma_chan_remove(schan); 5704981c4dcSGuennadi Liakhovetski return err; 5714981c4dcSGuennadi Liakhovetski } 5724981c4dcSGuennadi Liakhovetski 5734981c4dcSGuennadi Liakhovetski static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 5744981c4dcSGuennadi Liakhovetski { 5754981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 5764981c4dcSGuennadi Liakhovetski struct shdma_chan *schan; 5774981c4dcSGuennadi Liakhovetski int i; 5784981c4dcSGuennadi Liakhovetski 5794981c4dcSGuennadi Liakhovetski shdma_for_each_chan(schan, &shdev->shdma_dev, i) { 5804981c4dcSGuennadi Liakhovetski BUG_ON(!schan); 5814981c4dcSGuennadi Liakhovetski 5824981c4dcSGuennadi Liakhovetski shdma_chan_remove(schan); 5834981c4dcSGuennadi Liakhovetski } 5844981c4dcSGuennadi Liakhovetski dma_dev->chancnt = 0; 5854981c4dcSGuennadi Liakhovetski } 5864981c4dcSGuennadi Liakhovetski 5874981c4dcSGuennadi Liakhovetski static void sh_dmae_shutdown(struct platform_device *pdev) 5884981c4dcSGuennadi Liakhovetski { 5894981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 5904981c4dcSGuennadi Liakhovetski sh_dmae_ctl_stop(shdev); 5914981c4dcSGuennadi Liakhovetski } 5924981c4dcSGuennadi Liakhovetski 5934981c4dcSGuennadi Liakhovetski static int sh_dmae_runtime_suspend(struct device *dev) 5944981c4dcSGuennadi Liakhovetski { 5954981c4dcSGuennadi Liakhovetski return 0; 5964981c4dcSGuennadi Liakhovetski } 5974981c4dcSGuennadi Liakhovetski 5984981c4dcSGuennadi Liakhovetski static int sh_dmae_runtime_resume(struct device *dev) 5994981c4dcSGuennadi Liakhovetski { 6004981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = dev_get_drvdata(dev); 6014981c4dcSGuennadi Liakhovetski 6024981c4dcSGuennadi Liakhovetski return sh_dmae_rst(shdev); 6034981c4dcSGuennadi Liakhovetski } 6044981c4dcSGuennadi Liakhovetski 6054981c4dcSGuennadi Liakhovetski #ifdef CONFIG_PM 6064981c4dcSGuennadi Liakhovetski static int sh_dmae_suspend(struct device *dev) 6074981c4dcSGuennadi Liakhovetski { 6084981c4dcSGuennadi Liakhovetski return 0; 6094981c4dcSGuennadi Liakhovetski } 6104981c4dcSGuennadi Liakhovetski 6114981c4dcSGuennadi Liakhovetski static int sh_dmae_resume(struct device *dev) 6124981c4dcSGuennadi Liakhovetski { 6134981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = dev_get_drvdata(dev); 6144981c4dcSGuennadi Liakhovetski int i, ret; 6154981c4dcSGuennadi Liakhovetski 6164981c4dcSGuennadi Liakhovetski ret = sh_dmae_rst(shdev); 6174981c4dcSGuennadi Liakhovetski if (ret < 0) 6184981c4dcSGuennadi Liakhovetski dev_err(dev, "Failed to reset!\n"); 6194981c4dcSGuennadi Liakhovetski 6204981c4dcSGuennadi Liakhovetski for (i = 0; i < shdev->pdata->channel_num; i++) { 6214981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = shdev->chan[i]; 6224981c4dcSGuennadi Liakhovetski 6234981c4dcSGuennadi Liakhovetski if (!sh_chan->shdma_chan.desc_num) 6244981c4dcSGuennadi Liakhovetski continue; 6254981c4dcSGuennadi Liakhovetski 6264981c4dcSGuennadi Liakhovetski if (sh_chan->shdma_chan.slave_id >= 0) { 6274981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = sh_chan->config; 6284981c4dcSGuennadi Liakhovetski dmae_set_dmars(sh_chan, cfg->mid_rid); 6294981c4dcSGuennadi Liakhovetski dmae_set_chcr(sh_chan, cfg->chcr); 6304981c4dcSGuennadi Liakhovetski } else { 6314981c4dcSGuennadi Liakhovetski dmae_init(sh_chan); 6324981c4dcSGuennadi Liakhovetski } 6334981c4dcSGuennadi Liakhovetski } 6344981c4dcSGuennadi Liakhovetski 6354981c4dcSGuennadi Liakhovetski return 0; 6364981c4dcSGuennadi Liakhovetski } 6374981c4dcSGuennadi Liakhovetski #else 6384981c4dcSGuennadi Liakhovetski #define sh_dmae_suspend NULL 6394981c4dcSGuennadi Liakhovetski #define sh_dmae_resume NULL 6404981c4dcSGuennadi Liakhovetski #endif 6414981c4dcSGuennadi Liakhovetski 6424981c4dcSGuennadi Liakhovetski const struct dev_pm_ops sh_dmae_pm = { 6434981c4dcSGuennadi Liakhovetski .suspend = sh_dmae_suspend, 6444981c4dcSGuennadi Liakhovetski .resume = sh_dmae_resume, 6454981c4dcSGuennadi Liakhovetski .runtime_suspend = sh_dmae_runtime_suspend, 6464981c4dcSGuennadi Liakhovetski .runtime_resume = sh_dmae_runtime_resume, 6474981c4dcSGuennadi Liakhovetski }; 6484981c4dcSGuennadi Liakhovetski 6494981c4dcSGuennadi Liakhovetski static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 6504981c4dcSGuennadi Liakhovetski { 6514981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, 6524981c4dcSGuennadi Liakhovetski struct sh_dmae_chan, shdma_chan); 6534981c4dcSGuennadi Liakhovetski 6544981c4dcSGuennadi Liakhovetski /* 6554981c4dcSGuennadi Liakhovetski * Implicit BUG_ON(!sh_chan->config) 6564981c4dcSGuennadi Liakhovetski * This is an exclusive slave DMA operation, may only be called after a 6574981c4dcSGuennadi Liakhovetski * successful slave configuration. 6584981c4dcSGuennadi Liakhovetski */ 6594981c4dcSGuennadi Liakhovetski return sh_chan->slave_addr; 6604981c4dcSGuennadi Liakhovetski } 6614981c4dcSGuennadi Liakhovetski 6624981c4dcSGuennadi Liakhovetski static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) 6634981c4dcSGuennadi Liakhovetski { 6644981c4dcSGuennadi Liakhovetski return &((struct sh_dmae_desc *)buf)[i].shdma_desc; 6654981c4dcSGuennadi Liakhovetski } 6664981c4dcSGuennadi Liakhovetski 6674981c4dcSGuennadi Liakhovetski static const struct shdma_ops sh_dmae_shdma_ops = { 6684981c4dcSGuennadi Liakhovetski .desc_completed = sh_dmae_desc_completed, 6694981c4dcSGuennadi Liakhovetski .halt_channel = sh_dmae_halt, 6704981c4dcSGuennadi Liakhovetski .channel_busy = sh_dmae_channel_busy, 6714981c4dcSGuennadi Liakhovetski .slave_addr = sh_dmae_slave_addr, 6724981c4dcSGuennadi Liakhovetski .desc_setup = sh_dmae_desc_setup, 6734981c4dcSGuennadi Liakhovetski .set_slave = sh_dmae_set_slave, 6744981c4dcSGuennadi Liakhovetski .setup_xfer = sh_dmae_setup_xfer, 6754981c4dcSGuennadi Liakhovetski .start_xfer = sh_dmae_start_xfer, 6764981c4dcSGuennadi Liakhovetski .embedded_desc = sh_dmae_embedded_desc, 6774981c4dcSGuennadi Liakhovetski .chan_irq = sh_dmae_chan_irq, 6784981c4dcSGuennadi Liakhovetski .get_partial = sh_dmae_get_partial, 6794981c4dcSGuennadi Liakhovetski }; 6804981c4dcSGuennadi Liakhovetski 6814981c4dcSGuennadi Liakhovetski static const struct of_device_id sh_dmae_of_match[] = { 6821e69653dSGuennadi Liakhovetski {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,}, 6834981c4dcSGuennadi Liakhovetski {} 6844981c4dcSGuennadi Liakhovetski }; 6854981c4dcSGuennadi Liakhovetski MODULE_DEVICE_TABLE(of, sh_dmae_of_match); 6864981c4dcSGuennadi Liakhovetski 6874981c4dcSGuennadi Liakhovetski static int sh_dmae_probe(struct platform_device *pdev) 6884981c4dcSGuennadi Liakhovetski { 6894981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata; 690*52d6a5eeSLaurent Pinchart unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 691*52d6a5eeSLaurent Pinchart int chan_irq[SH_DMAE_MAX_CHANNELS]; 692*52d6a5eeSLaurent Pinchart #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) 693*52d6a5eeSLaurent Pinchart unsigned long irqflags = 0; 694*52d6a5eeSLaurent Pinchart int errirq; 695*52d6a5eeSLaurent Pinchart #endif 6964981c4dcSGuennadi Liakhovetski int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 6974981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev; 6984981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev; 6994981c4dcSGuennadi Liakhovetski struct resource *chan, *dmars, *errirq_res, *chanirq_res; 7004981c4dcSGuennadi Liakhovetski 7014981c4dcSGuennadi Liakhovetski if (pdev->dev.of_node) 7024981c4dcSGuennadi Liakhovetski pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data; 7034981c4dcSGuennadi Liakhovetski else 704265d9c67SVinod Koul pdata = dev_get_platdata(&pdev->dev); 7054981c4dcSGuennadi Liakhovetski 7064981c4dcSGuennadi Liakhovetski /* get platform data */ 7074981c4dcSGuennadi Liakhovetski if (!pdata || !pdata->channel_num) 7084981c4dcSGuennadi Liakhovetski return -ENODEV; 7094981c4dcSGuennadi Liakhovetski 7104981c4dcSGuennadi Liakhovetski chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 7114981c4dcSGuennadi Liakhovetski /* DMARS area is optional */ 7124981c4dcSGuennadi Liakhovetski dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 7134981c4dcSGuennadi Liakhovetski /* 7144981c4dcSGuennadi Liakhovetski * IRQ resources: 7154981c4dcSGuennadi Liakhovetski * 1. there always must be at least one IRQ IO-resource. On SH4 it is 7164981c4dcSGuennadi Liakhovetski * the error IRQ, in which case it is the only IRQ in this resource: 7174981c4dcSGuennadi Liakhovetski * start == end. If it is the only IRQ resource, all channels also 7184981c4dcSGuennadi Liakhovetski * use the same IRQ. 7194981c4dcSGuennadi Liakhovetski * 2. DMA channel IRQ resources can be specified one per resource or in 7204981c4dcSGuennadi Liakhovetski * ranges (start != end) 7214981c4dcSGuennadi Liakhovetski * 3. iff all events (channels and, optionally, error) on this 7224981c4dcSGuennadi Liakhovetski * controller use the same IRQ, only one IRQ resource can be 7234981c4dcSGuennadi Liakhovetski * specified, otherwise there must be one IRQ per channel, even if 7244981c4dcSGuennadi Liakhovetski * some of them are equal 7254981c4dcSGuennadi Liakhovetski * 4. if all IRQs on this controller are equal or if some specific IRQs 7264981c4dcSGuennadi Liakhovetski * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 7274981c4dcSGuennadi Liakhovetski * requested with the IRQF_SHARED flag 7284981c4dcSGuennadi Liakhovetski */ 7294981c4dcSGuennadi Liakhovetski errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 7304981c4dcSGuennadi Liakhovetski if (!chan || !errirq_res) 7314981c4dcSGuennadi Liakhovetski return -ENODEV; 7324981c4dcSGuennadi Liakhovetski 7334981c4dcSGuennadi Liakhovetski shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), 7344981c4dcSGuennadi Liakhovetski GFP_KERNEL); 7354981c4dcSGuennadi Liakhovetski if (!shdev) { 7364981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, "Not enough memory\n"); 7374981c4dcSGuennadi Liakhovetski return -ENOMEM; 7384981c4dcSGuennadi Liakhovetski } 7394981c4dcSGuennadi Liakhovetski 7404981c4dcSGuennadi Liakhovetski dma_dev = &shdev->shdma_dev.dma_dev; 7414981c4dcSGuennadi Liakhovetski 7424981c4dcSGuennadi Liakhovetski shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); 7434981c4dcSGuennadi Liakhovetski if (IS_ERR(shdev->chan_reg)) 7444981c4dcSGuennadi Liakhovetski return PTR_ERR(shdev->chan_reg); 7454981c4dcSGuennadi Liakhovetski if (dmars) { 7464981c4dcSGuennadi Liakhovetski shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); 7474981c4dcSGuennadi Liakhovetski if (IS_ERR(shdev->dmars)) 7484981c4dcSGuennadi Liakhovetski return PTR_ERR(shdev->dmars); 7494981c4dcSGuennadi Liakhovetski } 7504981c4dcSGuennadi Liakhovetski 7514981c4dcSGuennadi Liakhovetski if (!pdata->slave_only) 7524981c4dcSGuennadi Liakhovetski dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 7534981c4dcSGuennadi Liakhovetski if (pdata->slave && pdata->slave_num) 7544981c4dcSGuennadi Liakhovetski dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 7554981c4dcSGuennadi Liakhovetski 7564981c4dcSGuennadi Liakhovetski /* Default transfer size of 32 bytes requires 32-byte alignment */ 7574981c4dcSGuennadi Liakhovetski dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; 7584981c4dcSGuennadi Liakhovetski 7594981c4dcSGuennadi Liakhovetski shdev->shdma_dev.ops = &sh_dmae_shdma_ops; 7604981c4dcSGuennadi Liakhovetski shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); 7614981c4dcSGuennadi Liakhovetski err = shdma_init(&pdev->dev, &shdev->shdma_dev, 7624981c4dcSGuennadi Liakhovetski pdata->channel_num); 7634981c4dcSGuennadi Liakhovetski if (err < 0) 7644981c4dcSGuennadi Liakhovetski goto eshdma; 7654981c4dcSGuennadi Liakhovetski 7664981c4dcSGuennadi Liakhovetski /* platform data */ 7674981c4dcSGuennadi Liakhovetski shdev->pdata = pdata; 7684981c4dcSGuennadi Liakhovetski 7694981c4dcSGuennadi Liakhovetski if (pdata->chcr_offset) 7704981c4dcSGuennadi Liakhovetski shdev->chcr_offset = pdata->chcr_offset; 7714981c4dcSGuennadi Liakhovetski else 7724981c4dcSGuennadi Liakhovetski shdev->chcr_offset = CHCR; 7734981c4dcSGuennadi Liakhovetski 7744981c4dcSGuennadi Liakhovetski if (pdata->chcr_ie_bit) 7754981c4dcSGuennadi Liakhovetski shdev->chcr_ie_bit = pdata->chcr_ie_bit; 7764981c4dcSGuennadi Liakhovetski else 7774981c4dcSGuennadi Liakhovetski shdev->chcr_ie_bit = CHCR_IE; 7784981c4dcSGuennadi Liakhovetski 7794981c4dcSGuennadi Liakhovetski platform_set_drvdata(pdev, shdev); 7804981c4dcSGuennadi Liakhovetski 7814981c4dcSGuennadi Liakhovetski pm_runtime_enable(&pdev->dev); 7824981c4dcSGuennadi Liakhovetski err = pm_runtime_get_sync(&pdev->dev); 7834981c4dcSGuennadi Liakhovetski if (err < 0) 7844981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); 7854981c4dcSGuennadi Liakhovetski 7864981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 7874981c4dcSGuennadi Liakhovetski list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 7884981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 7894981c4dcSGuennadi Liakhovetski 7904981c4dcSGuennadi Liakhovetski /* reset dma controller - only needed as a test */ 7914981c4dcSGuennadi Liakhovetski err = sh_dmae_rst(shdev); 7924981c4dcSGuennadi Liakhovetski if (err) 7934981c4dcSGuennadi Liakhovetski goto rst_err; 7944981c4dcSGuennadi Liakhovetski 7954981c4dcSGuennadi Liakhovetski #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 7964981c4dcSGuennadi Liakhovetski chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 7974981c4dcSGuennadi Liakhovetski 7984981c4dcSGuennadi Liakhovetski if (!chanirq_res) 7994981c4dcSGuennadi Liakhovetski chanirq_res = errirq_res; 8004981c4dcSGuennadi Liakhovetski else 8014981c4dcSGuennadi Liakhovetski irqres++; 8024981c4dcSGuennadi Liakhovetski 8034981c4dcSGuennadi Liakhovetski if (chanirq_res == errirq_res || 8044981c4dcSGuennadi Liakhovetski (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 8054981c4dcSGuennadi Liakhovetski irqflags = IRQF_SHARED; 8064981c4dcSGuennadi Liakhovetski 8074981c4dcSGuennadi Liakhovetski errirq = errirq_res->start; 8084981c4dcSGuennadi Liakhovetski 8094981c4dcSGuennadi Liakhovetski err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, 8104981c4dcSGuennadi Liakhovetski "DMAC Address Error", shdev); 8114981c4dcSGuennadi Liakhovetski if (err) { 8124981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, 8134981c4dcSGuennadi Liakhovetski "DMA failed requesting irq #%d, error %d\n", 8144981c4dcSGuennadi Liakhovetski errirq, err); 8154981c4dcSGuennadi Liakhovetski goto eirq_err; 8164981c4dcSGuennadi Liakhovetski } 8174981c4dcSGuennadi Liakhovetski 8184981c4dcSGuennadi Liakhovetski #else 8194981c4dcSGuennadi Liakhovetski chanirq_res = errirq_res; 8204981c4dcSGuennadi Liakhovetski #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 8214981c4dcSGuennadi Liakhovetski 8224981c4dcSGuennadi Liakhovetski if (chanirq_res->start == chanirq_res->end && 8234981c4dcSGuennadi Liakhovetski !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 8244981c4dcSGuennadi Liakhovetski /* Special case - all multiplexed */ 8254981c4dcSGuennadi Liakhovetski for (; irq_cnt < pdata->channel_num; irq_cnt++) { 8264981c4dcSGuennadi Liakhovetski if (irq_cnt < SH_DMAE_MAX_CHANNELS) { 8274981c4dcSGuennadi Liakhovetski chan_irq[irq_cnt] = chanirq_res->start; 8284981c4dcSGuennadi Liakhovetski chan_flag[irq_cnt] = IRQF_SHARED; 8294981c4dcSGuennadi Liakhovetski } else { 8304981c4dcSGuennadi Liakhovetski irq_cap = 1; 8314981c4dcSGuennadi Liakhovetski break; 8324981c4dcSGuennadi Liakhovetski } 8334981c4dcSGuennadi Liakhovetski } 8344981c4dcSGuennadi Liakhovetski } else { 8354981c4dcSGuennadi Liakhovetski do { 8364981c4dcSGuennadi Liakhovetski for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 8374981c4dcSGuennadi Liakhovetski if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { 8384981c4dcSGuennadi Liakhovetski irq_cap = 1; 8394981c4dcSGuennadi Liakhovetski break; 8404981c4dcSGuennadi Liakhovetski } 8414981c4dcSGuennadi Liakhovetski 8424981c4dcSGuennadi Liakhovetski if ((errirq_res->flags & IORESOURCE_BITS) == 8434981c4dcSGuennadi Liakhovetski IORESOURCE_IRQ_SHAREABLE) 8444981c4dcSGuennadi Liakhovetski chan_flag[irq_cnt] = IRQF_SHARED; 8454981c4dcSGuennadi Liakhovetski else 846174b537aSMichael Opdenacker chan_flag[irq_cnt] = 0; 8474981c4dcSGuennadi Liakhovetski dev_dbg(&pdev->dev, 8484981c4dcSGuennadi Liakhovetski "Found IRQ %d for channel %d\n", 8494981c4dcSGuennadi Liakhovetski i, irq_cnt); 8504981c4dcSGuennadi Liakhovetski chan_irq[irq_cnt++] = i; 8514981c4dcSGuennadi Liakhovetski } 8524981c4dcSGuennadi Liakhovetski 8534981c4dcSGuennadi Liakhovetski if (irq_cnt >= SH_DMAE_MAX_CHANNELS) 8544981c4dcSGuennadi Liakhovetski break; 8554981c4dcSGuennadi Liakhovetski 8564981c4dcSGuennadi Liakhovetski chanirq_res = platform_get_resource(pdev, 8574981c4dcSGuennadi Liakhovetski IORESOURCE_IRQ, ++irqres); 8584981c4dcSGuennadi Liakhovetski } while (irq_cnt < pdata->channel_num && chanirq_res); 8594981c4dcSGuennadi Liakhovetski } 8604981c4dcSGuennadi Liakhovetski 8614981c4dcSGuennadi Liakhovetski /* Create DMA Channel */ 8624981c4dcSGuennadi Liakhovetski for (i = 0; i < irq_cnt; i++) { 8634981c4dcSGuennadi Liakhovetski err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 8644981c4dcSGuennadi Liakhovetski if (err) 8654981c4dcSGuennadi Liakhovetski goto chan_probe_err; 8664981c4dcSGuennadi Liakhovetski } 8674981c4dcSGuennadi Liakhovetski 8684981c4dcSGuennadi Liakhovetski if (irq_cap) 8694981c4dcSGuennadi Liakhovetski dev_notice(&pdev->dev, "Attempting to register %d DMA " 8704981c4dcSGuennadi Liakhovetski "channels when a maximum of %d are supported.\n", 8714981c4dcSGuennadi Liakhovetski pdata->channel_num, SH_DMAE_MAX_CHANNELS); 8724981c4dcSGuennadi Liakhovetski 8734981c4dcSGuennadi Liakhovetski pm_runtime_put(&pdev->dev); 8744981c4dcSGuennadi Liakhovetski 8754981c4dcSGuennadi Liakhovetski err = dma_async_device_register(&shdev->shdma_dev.dma_dev); 8764981c4dcSGuennadi Liakhovetski if (err < 0) 8774981c4dcSGuennadi Liakhovetski goto edmadevreg; 8784981c4dcSGuennadi Liakhovetski 8794981c4dcSGuennadi Liakhovetski return err; 8804981c4dcSGuennadi Liakhovetski 8814981c4dcSGuennadi Liakhovetski edmadevreg: 8824981c4dcSGuennadi Liakhovetski pm_runtime_get(&pdev->dev); 8834981c4dcSGuennadi Liakhovetski 8844981c4dcSGuennadi Liakhovetski chan_probe_err: 8854981c4dcSGuennadi Liakhovetski sh_dmae_chan_remove(shdev); 8864981c4dcSGuennadi Liakhovetski 8874981c4dcSGuennadi Liakhovetski #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 8884981c4dcSGuennadi Liakhovetski eirq_err: 8894981c4dcSGuennadi Liakhovetski #endif 8904981c4dcSGuennadi Liakhovetski rst_err: 8914981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 8924981c4dcSGuennadi Liakhovetski list_del_rcu(&shdev->node); 8934981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 8944981c4dcSGuennadi Liakhovetski 8954981c4dcSGuennadi Liakhovetski pm_runtime_put(&pdev->dev); 8964981c4dcSGuennadi Liakhovetski pm_runtime_disable(&pdev->dev); 8974981c4dcSGuennadi Liakhovetski 8984981c4dcSGuennadi Liakhovetski shdma_cleanup(&shdev->shdma_dev); 8994981c4dcSGuennadi Liakhovetski eshdma: 9004981c4dcSGuennadi Liakhovetski synchronize_rcu(); 9014981c4dcSGuennadi Liakhovetski 9024981c4dcSGuennadi Liakhovetski return err; 9034981c4dcSGuennadi Liakhovetski } 9044981c4dcSGuennadi Liakhovetski 9054981c4dcSGuennadi Liakhovetski static int sh_dmae_remove(struct platform_device *pdev) 9064981c4dcSGuennadi Liakhovetski { 9074981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 9084981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 9094981c4dcSGuennadi Liakhovetski 9104981c4dcSGuennadi Liakhovetski dma_async_device_unregister(dma_dev); 9114981c4dcSGuennadi Liakhovetski 9124981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 9134981c4dcSGuennadi Liakhovetski list_del_rcu(&shdev->node); 9144981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 9154981c4dcSGuennadi Liakhovetski 9164981c4dcSGuennadi Liakhovetski pm_runtime_disable(&pdev->dev); 9174981c4dcSGuennadi Liakhovetski 9184981c4dcSGuennadi Liakhovetski sh_dmae_chan_remove(shdev); 9194981c4dcSGuennadi Liakhovetski shdma_cleanup(&shdev->shdma_dev); 9204981c4dcSGuennadi Liakhovetski 9214981c4dcSGuennadi Liakhovetski synchronize_rcu(); 9224981c4dcSGuennadi Liakhovetski 9234981c4dcSGuennadi Liakhovetski return 0; 9244981c4dcSGuennadi Liakhovetski } 9254981c4dcSGuennadi Liakhovetski 9264981c4dcSGuennadi Liakhovetski static struct platform_driver sh_dmae_driver = { 9274981c4dcSGuennadi Liakhovetski .driver = { 9284981c4dcSGuennadi Liakhovetski .owner = THIS_MODULE, 9294981c4dcSGuennadi Liakhovetski .pm = &sh_dmae_pm, 9304981c4dcSGuennadi Liakhovetski .name = SH_DMAE_DRV_NAME, 9314981c4dcSGuennadi Liakhovetski .of_match_table = sh_dmae_of_match, 9324981c4dcSGuennadi Liakhovetski }, 9334981c4dcSGuennadi Liakhovetski .remove = sh_dmae_remove, 9344981c4dcSGuennadi Liakhovetski .shutdown = sh_dmae_shutdown, 9354981c4dcSGuennadi Liakhovetski }; 9364981c4dcSGuennadi Liakhovetski 9374981c4dcSGuennadi Liakhovetski static int __init sh_dmae_init(void) 9384981c4dcSGuennadi Liakhovetski { 9394981c4dcSGuennadi Liakhovetski /* Wire up NMI handling */ 9404981c4dcSGuennadi Liakhovetski int err = register_die_notifier(&sh_dmae_nmi_notifier); 9414981c4dcSGuennadi Liakhovetski if (err) 9424981c4dcSGuennadi Liakhovetski return err; 9434981c4dcSGuennadi Liakhovetski 9444981c4dcSGuennadi Liakhovetski return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 9454981c4dcSGuennadi Liakhovetski } 9464981c4dcSGuennadi Liakhovetski module_init(sh_dmae_init); 9474981c4dcSGuennadi Liakhovetski 9484981c4dcSGuennadi Liakhovetski static void __exit sh_dmae_exit(void) 9494981c4dcSGuennadi Liakhovetski { 9504981c4dcSGuennadi Liakhovetski platform_driver_unregister(&sh_dmae_driver); 9514981c4dcSGuennadi Liakhovetski 9524981c4dcSGuennadi Liakhovetski unregister_die_notifier(&sh_dmae_nmi_notifier); 9534981c4dcSGuennadi Liakhovetski } 9544981c4dcSGuennadi Liakhovetski module_exit(sh_dmae_exit); 9554981c4dcSGuennadi Liakhovetski 9564981c4dcSGuennadi Liakhovetski MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 9574981c4dcSGuennadi Liakhovetski MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 9584981c4dcSGuennadi Liakhovetski MODULE_LICENSE("GPL"); 9594981c4dcSGuennadi Liakhovetski MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); 960