1*4981c4dcSGuennadi Liakhovetski /* 2*4981c4dcSGuennadi Liakhovetski * Renesas SuperH DMA Engine support 3*4981c4dcSGuennadi Liakhovetski * 4*4981c4dcSGuennadi Liakhovetski * base is drivers/dma/flsdma.c 5*4981c4dcSGuennadi Liakhovetski * 6*4981c4dcSGuennadi Liakhovetski * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 7*4981c4dcSGuennadi Liakhovetski * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 8*4981c4dcSGuennadi Liakhovetski * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 9*4981c4dcSGuennadi Liakhovetski * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 10*4981c4dcSGuennadi Liakhovetski * 11*4981c4dcSGuennadi Liakhovetski * This is free software; you can redistribute it and/or modify 12*4981c4dcSGuennadi Liakhovetski * it under the terms of the GNU General Public License as published by 13*4981c4dcSGuennadi Liakhovetski * the Free Software Foundation; either version 2 of the License, or 14*4981c4dcSGuennadi Liakhovetski * (at your option) any later version. 15*4981c4dcSGuennadi Liakhovetski * 16*4981c4dcSGuennadi Liakhovetski * - DMA of SuperH does not have Hardware DMA chain mode. 17*4981c4dcSGuennadi Liakhovetski * - MAX DMA size is 16MB. 18*4981c4dcSGuennadi Liakhovetski * 19*4981c4dcSGuennadi Liakhovetski */ 20*4981c4dcSGuennadi Liakhovetski 21*4981c4dcSGuennadi Liakhovetski #include <linux/init.h> 22*4981c4dcSGuennadi Liakhovetski #include <linux/module.h> 23*4981c4dcSGuennadi Liakhovetski #include <linux/of.h> 24*4981c4dcSGuennadi Liakhovetski #include <linux/of_device.h> 25*4981c4dcSGuennadi Liakhovetski #include <linux/slab.h> 26*4981c4dcSGuennadi Liakhovetski #include <linux/interrupt.h> 27*4981c4dcSGuennadi Liakhovetski #include <linux/dmaengine.h> 28*4981c4dcSGuennadi Liakhovetski #include <linux/delay.h> 29*4981c4dcSGuennadi Liakhovetski #include <linux/platform_device.h> 30*4981c4dcSGuennadi Liakhovetski #include <linux/pm_runtime.h> 31*4981c4dcSGuennadi Liakhovetski #include <linux/sh_dma.h> 32*4981c4dcSGuennadi Liakhovetski #include <linux/notifier.h> 33*4981c4dcSGuennadi Liakhovetski #include <linux/kdebug.h> 34*4981c4dcSGuennadi Liakhovetski #include <linux/spinlock.h> 35*4981c4dcSGuennadi Liakhovetski #include <linux/rculist.h> 36*4981c4dcSGuennadi Liakhovetski 37*4981c4dcSGuennadi Liakhovetski #include "../dmaengine.h" 38*4981c4dcSGuennadi Liakhovetski #include "shdma.h" 39*4981c4dcSGuennadi Liakhovetski 40*4981c4dcSGuennadi Liakhovetski #define SH_DMAE_DRV_NAME "sh-dma-engine" 41*4981c4dcSGuennadi Liakhovetski 42*4981c4dcSGuennadi Liakhovetski /* Default MEMCPY transfer size = 2^2 = 4 bytes */ 43*4981c4dcSGuennadi Liakhovetski #define LOG2_DEFAULT_XFER_SIZE 2 44*4981c4dcSGuennadi Liakhovetski #define SH_DMA_SLAVE_NUMBER 256 45*4981c4dcSGuennadi Liakhovetski #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) 46*4981c4dcSGuennadi Liakhovetski 47*4981c4dcSGuennadi Liakhovetski /* 48*4981c4dcSGuennadi Liakhovetski * Used for write-side mutual exclusion for the global device list, 49*4981c4dcSGuennadi Liakhovetski * read-side synchronization by way of RCU, and per-controller data. 50*4981c4dcSGuennadi Liakhovetski */ 51*4981c4dcSGuennadi Liakhovetski static DEFINE_SPINLOCK(sh_dmae_lock); 52*4981c4dcSGuennadi Liakhovetski static LIST_HEAD(sh_dmae_devices); 53*4981c4dcSGuennadi Liakhovetski 54*4981c4dcSGuennadi Liakhovetski /* 55*4981c4dcSGuennadi Liakhovetski * Different DMAC implementations provide different ways to clear DMA channels: 56*4981c4dcSGuennadi Liakhovetski * (1) none - no CHCLR registers are available 57*4981c4dcSGuennadi Liakhovetski * (2) one CHCLR register per channel - 0 has to be written to it to clear 58*4981c4dcSGuennadi Liakhovetski * channel buffers 59*4981c4dcSGuennadi Liakhovetski * (3) one CHCLR per several channels - 1 has to be written to the bit, 60*4981c4dcSGuennadi Liakhovetski * corresponding to the specific channel to reset it 61*4981c4dcSGuennadi Liakhovetski */ 62*4981c4dcSGuennadi Liakhovetski static void channel_clear(struct sh_dmae_chan *sh_dc) 63*4981c4dcSGuennadi Liakhovetski { 64*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 65*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + 66*4981c4dcSGuennadi Liakhovetski sh_dc->shdma_chan.id; 67*4981c4dcSGuennadi Liakhovetski u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; 68*4981c4dcSGuennadi Liakhovetski 69*4981c4dcSGuennadi Liakhovetski __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); 70*4981c4dcSGuennadi Liakhovetski } 71*4981c4dcSGuennadi Liakhovetski 72*4981c4dcSGuennadi Liakhovetski static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 73*4981c4dcSGuennadi Liakhovetski { 74*4981c4dcSGuennadi Liakhovetski __raw_writel(data, sh_dc->base + reg); 75*4981c4dcSGuennadi Liakhovetski } 76*4981c4dcSGuennadi Liakhovetski 77*4981c4dcSGuennadi Liakhovetski static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 78*4981c4dcSGuennadi Liakhovetski { 79*4981c4dcSGuennadi Liakhovetski return __raw_readl(sh_dc->base + reg); 80*4981c4dcSGuennadi Liakhovetski } 81*4981c4dcSGuennadi Liakhovetski 82*4981c4dcSGuennadi Liakhovetski static u16 dmaor_read(struct sh_dmae_device *shdev) 83*4981c4dcSGuennadi Liakhovetski { 84*4981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->chan_reg + DMAOR; 85*4981c4dcSGuennadi Liakhovetski 86*4981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_is_32bit) 87*4981c4dcSGuennadi Liakhovetski return __raw_readl(addr); 88*4981c4dcSGuennadi Liakhovetski else 89*4981c4dcSGuennadi Liakhovetski return __raw_readw(addr); 90*4981c4dcSGuennadi Liakhovetski } 91*4981c4dcSGuennadi Liakhovetski 92*4981c4dcSGuennadi Liakhovetski static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 93*4981c4dcSGuennadi Liakhovetski { 94*4981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->chan_reg + DMAOR; 95*4981c4dcSGuennadi Liakhovetski 96*4981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_is_32bit) 97*4981c4dcSGuennadi Liakhovetski __raw_writel(data, addr); 98*4981c4dcSGuennadi Liakhovetski else 99*4981c4dcSGuennadi Liakhovetski __raw_writew(data, addr); 100*4981c4dcSGuennadi Liakhovetski } 101*4981c4dcSGuennadi Liakhovetski 102*4981c4dcSGuennadi Liakhovetski static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 103*4981c4dcSGuennadi Liakhovetski { 104*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 105*4981c4dcSGuennadi Liakhovetski 106*4981c4dcSGuennadi Liakhovetski __raw_writel(data, sh_dc->base + shdev->chcr_offset); 107*4981c4dcSGuennadi Liakhovetski } 108*4981c4dcSGuennadi Liakhovetski 109*4981c4dcSGuennadi Liakhovetski static u32 chcr_read(struct sh_dmae_chan *sh_dc) 110*4981c4dcSGuennadi Liakhovetski { 111*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 112*4981c4dcSGuennadi Liakhovetski 113*4981c4dcSGuennadi Liakhovetski return __raw_readl(sh_dc->base + shdev->chcr_offset); 114*4981c4dcSGuennadi Liakhovetski } 115*4981c4dcSGuennadi Liakhovetski 116*4981c4dcSGuennadi Liakhovetski /* 117*4981c4dcSGuennadi Liakhovetski * Reset DMA controller 118*4981c4dcSGuennadi Liakhovetski * 119*4981c4dcSGuennadi Liakhovetski * SH7780 has two DMAOR register 120*4981c4dcSGuennadi Liakhovetski */ 121*4981c4dcSGuennadi Liakhovetski static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 122*4981c4dcSGuennadi Liakhovetski { 123*4981c4dcSGuennadi Liakhovetski unsigned short dmaor; 124*4981c4dcSGuennadi Liakhovetski unsigned long flags; 125*4981c4dcSGuennadi Liakhovetski 126*4981c4dcSGuennadi Liakhovetski spin_lock_irqsave(&sh_dmae_lock, flags); 127*4981c4dcSGuennadi Liakhovetski 128*4981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev); 129*4981c4dcSGuennadi Liakhovetski dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 130*4981c4dcSGuennadi Liakhovetski 131*4981c4dcSGuennadi Liakhovetski spin_unlock_irqrestore(&sh_dmae_lock, flags); 132*4981c4dcSGuennadi Liakhovetski } 133*4981c4dcSGuennadi Liakhovetski 134*4981c4dcSGuennadi Liakhovetski static int sh_dmae_rst(struct sh_dmae_device *shdev) 135*4981c4dcSGuennadi Liakhovetski { 136*4981c4dcSGuennadi Liakhovetski unsigned short dmaor; 137*4981c4dcSGuennadi Liakhovetski unsigned long flags; 138*4981c4dcSGuennadi Liakhovetski 139*4981c4dcSGuennadi Liakhovetski spin_lock_irqsave(&sh_dmae_lock, flags); 140*4981c4dcSGuennadi Liakhovetski 141*4981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 142*4981c4dcSGuennadi Liakhovetski 143*4981c4dcSGuennadi Liakhovetski if (shdev->pdata->chclr_present) { 144*4981c4dcSGuennadi Liakhovetski int i; 145*4981c4dcSGuennadi Liakhovetski for (i = 0; i < shdev->pdata->channel_num; i++) { 146*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = shdev->chan[i]; 147*4981c4dcSGuennadi Liakhovetski if (sh_chan) 148*4981c4dcSGuennadi Liakhovetski channel_clear(sh_chan); 149*4981c4dcSGuennadi Liakhovetski } 150*4981c4dcSGuennadi Liakhovetski } 151*4981c4dcSGuennadi Liakhovetski 152*4981c4dcSGuennadi Liakhovetski dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 153*4981c4dcSGuennadi Liakhovetski 154*4981c4dcSGuennadi Liakhovetski dmaor = dmaor_read(shdev); 155*4981c4dcSGuennadi Liakhovetski 156*4981c4dcSGuennadi Liakhovetski spin_unlock_irqrestore(&sh_dmae_lock, flags); 157*4981c4dcSGuennadi Liakhovetski 158*4981c4dcSGuennadi Liakhovetski if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 159*4981c4dcSGuennadi Liakhovetski dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); 160*4981c4dcSGuennadi Liakhovetski return -EIO; 161*4981c4dcSGuennadi Liakhovetski } 162*4981c4dcSGuennadi Liakhovetski if (shdev->pdata->dmaor_init & ~dmaor) 163*4981c4dcSGuennadi Liakhovetski dev_warn(shdev->shdma_dev.dma_dev.dev, 164*4981c4dcSGuennadi Liakhovetski "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 165*4981c4dcSGuennadi Liakhovetski dmaor, shdev->pdata->dmaor_init); 166*4981c4dcSGuennadi Liakhovetski return 0; 167*4981c4dcSGuennadi Liakhovetski } 168*4981c4dcSGuennadi Liakhovetski 169*4981c4dcSGuennadi Liakhovetski static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 170*4981c4dcSGuennadi Liakhovetski { 171*4981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 172*4981c4dcSGuennadi Liakhovetski 173*4981c4dcSGuennadi Liakhovetski if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 174*4981c4dcSGuennadi Liakhovetski return true; /* working */ 175*4981c4dcSGuennadi Liakhovetski 176*4981c4dcSGuennadi Liakhovetski return false; /* waiting */ 177*4981c4dcSGuennadi Liakhovetski } 178*4981c4dcSGuennadi Liakhovetski 179*4981c4dcSGuennadi Liakhovetski static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 180*4981c4dcSGuennadi Liakhovetski { 181*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 182*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 183*4981c4dcSGuennadi Liakhovetski int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 184*4981c4dcSGuennadi Liakhovetski ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 185*4981c4dcSGuennadi Liakhovetski 186*4981c4dcSGuennadi Liakhovetski if (cnt >= pdata->ts_shift_num) 187*4981c4dcSGuennadi Liakhovetski cnt = 0; 188*4981c4dcSGuennadi Liakhovetski 189*4981c4dcSGuennadi Liakhovetski return pdata->ts_shift[cnt]; 190*4981c4dcSGuennadi Liakhovetski } 191*4981c4dcSGuennadi Liakhovetski 192*4981c4dcSGuennadi Liakhovetski static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 193*4981c4dcSGuennadi Liakhovetski { 194*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 195*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 196*4981c4dcSGuennadi Liakhovetski int i; 197*4981c4dcSGuennadi Liakhovetski 198*4981c4dcSGuennadi Liakhovetski for (i = 0; i < pdata->ts_shift_num; i++) 199*4981c4dcSGuennadi Liakhovetski if (pdata->ts_shift[i] == l2size) 200*4981c4dcSGuennadi Liakhovetski break; 201*4981c4dcSGuennadi Liakhovetski 202*4981c4dcSGuennadi Liakhovetski if (i == pdata->ts_shift_num) 203*4981c4dcSGuennadi Liakhovetski i = 0; 204*4981c4dcSGuennadi Liakhovetski 205*4981c4dcSGuennadi Liakhovetski return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 206*4981c4dcSGuennadi Liakhovetski ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 207*4981c4dcSGuennadi Liakhovetski } 208*4981c4dcSGuennadi Liakhovetski 209*4981c4dcSGuennadi Liakhovetski static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 210*4981c4dcSGuennadi Liakhovetski { 211*4981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->sar, SAR); 212*4981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->dar, DAR); 213*4981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 214*4981c4dcSGuennadi Liakhovetski } 215*4981c4dcSGuennadi Liakhovetski 216*4981c4dcSGuennadi Liakhovetski static void dmae_start(struct sh_dmae_chan *sh_chan) 217*4981c4dcSGuennadi Liakhovetski { 218*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 219*4981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 220*4981c4dcSGuennadi Liakhovetski 221*4981c4dcSGuennadi Liakhovetski if (shdev->pdata->needs_tend_set) 222*4981c4dcSGuennadi Liakhovetski sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 223*4981c4dcSGuennadi Liakhovetski 224*4981c4dcSGuennadi Liakhovetski chcr |= CHCR_DE | shdev->chcr_ie_bit; 225*4981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr & ~CHCR_TE); 226*4981c4dcSGuennadi Liakhovetski } 227*4981c4dcSGuennadi Liakhovetski 228*4981c4dcSGuennadi Liakhovetski static void dmae_init(struct sh_dmae_chan *sh_chan) 229*4981c4dcSGuennadi Liakhovetski { 230*4981c4dcSGuennadi Liakhovetski /* 231*4981c4dcSGuennadi Liakhovetski * Default configuration for dual address memory-memory transfer. 232*4981c4dcSGuennadi Liakhovetski * 0x400 represents auto-request. 233*4981c4dcSGuennadi Liakhovetski */ 234*4981c4dcSGuennadi Liakhovetski u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 235*4981c4dcSGuennadi Liakhovetski LOG2_DEFAULT_XFER_SIZE); 236*4981c4dcSGuennadi Liakhovetski sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 237*4981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr); 238*4981c4dcSGuennadi Liakhovetski } 239*4981c4dcSGuennadi Liakhovetski 240*4981c4dcSGuennadi Liakhovetski static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 241*4981c4dcSGuennadi Liakhovetski { 242*4981c4dcSGuennadi Liakhovetski /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 243*4981c4dcSGuennadi Liakhovetski if (dmae_is_busy(sh_chan)) 244*4981c4dcSGuennadi Liakhovetski return -EBUSY; 245*4981c4dcSGuennadi Liakhovetski 246*4981c4dcSGuennadi Liakhovetski sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 247*4981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, val); 248*4981c4dcSGuennadi Liakhovetski 249*4981c4dcSGuennadi Liakhovetski return 0; 250*4981c4dcSGuennadi Liakhovetski } 251*4981c4dcSGuennadi Liakhovetski 252*4981c4dcSGuennadi Liakhovetski static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 253*4981c4dcSGuennadi Liakhovetski { 254*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 255*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 256*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; 257*4981c4dcSGuennadi Liakhovetski void __iomem *addr = shdev->dmars; 258*4981c4dcSGuennadi Liakhovetski unsigned int shift = chan_pdata->dmars_bit; 259*4981c4dcSGuennadi Liakhovetski 260*4981c4dcSGuennadi Liakhovetski if (dmae_is_busy(sh_chan)) 261*4981c4dcSGuennadi Liakhovetski return -EBUSY; 262*4981c4dcSGuennadi Liakhovetski 263*4981c4dcSGuennadi Liakhovetski if (pdata->no_dmars) 264*4981c4dcSGuennadi Liakhovetski return 0; 265*4981c4dcSGuennadi Liakhovetski 266*4981c4dcSGuennadi Liakhovetski /* in the case of a missing DMARS resource use first memory window */ 267*4981c4dcSGuennadi Liakhovetski if (!addr) 268*4981c4dcSGuennadi Liakhovetski addr = shdev->chan_reg; 269*4981c4dcSGuennadi Liakhovetski addr += chan_pdata->dmars; 270*4981c4dcSGuennadi Liakhovetski 271*4981c4dcSGuennadi Liakhovetski __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 272*4981c4dcSGuennadi Liakhovetski addr); 273*4981c4dcSGuennadi Liakhovetski 274*4981c4dcSGuennadi Liakhovetski return 0; 275*4981c4dcSGuennadi Liakhovetski } 276*4981c4dcSGuennadi Liakhovetski 277*4981c4dcSGuennadi Liakhovetski static void sh_dmae_start_xfer(struct shdma_chan *schan, 278*4981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 279*4981c4dcSGuennadi Liakhovetski { 280*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 281*4981c4dcSGuennadi Liakhovetski shdma_chan); 282*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 283*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 284*4981c4dcSGuennadi Liakhovetski dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", 285*4981c4dcSGuennadi Liakhovetski sdesc->async_tx.cookie, sh_chan->shdma_chan.id, 286*4981c4dcSGuennadi Liakhovetski sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); 287*4981c4dcSGuennadi Liakhovetski /* Get the ld start address from ld_queue */ 288*4981c4dcSGuennadi Liakhovetski dmae_set_reg(sh_chan, &sh_desc->hw); 289*4981c4dcSGuennadi Liakhovetski dmae_start(sh_chan); 290*4981c4dcSGuennadi Liakhovetski } 291*4981c4dcSGuennadi Liakhovetski 292*4981c4dcSGuennadi Liakhovetski static bool sh_dmae_channel_busy(struct shdma_chan *schan) 293*4981c4dcSGuennadi Liakhovetski { 294*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 295*4981c4dcSGuennadi Liakhovetski shdma_chan); 296*4981c4dcSGuennadi Liakhovetski return dmae_is_busy(sh_chan); 297*4981c4dcSGuennadi Liakhovetski } 298*4981c4dcSGuennadi Liakhovetski 299*4981c4dcSGuennadi Liakhovetski static void sh_dmae_setup_xfer(struct shdma_chan *schan, 300*4981c4dcSGuennadi Liakhovetski int slave_id) 301*4981c4dcSGuennadi Liakhovetski { 302*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 303*4981c4dcSGuennadi Liakhovetski shdma_chan); 304*4981c4dcSGuennadi Liakhovetski 305*4981c4dcSGuennadi Liakhovetski if (slave_id >= 0) { 306*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = 307*4981c4dcSGuennadi Liakhovetski sh_chan->config; 308*4981c4dcSGuennadi Liakhovetski 309*4981c4dcSGuennadi Liakhovetski dmae_set_dmars(sh_chan, cfg->mid_rid); 310*4981c4dcSGuennadi Liakhovetski dmae_set_chcr(sh_chan, cfg->chcr); 311*4981c4dcSGuennadi Liakhovetski } else { 312*4981c4dcSGuennadi Liakhovetski dmae_init(sh_chan); 313*4981c4dcSGuennadi Liakhovetski } 314*4981c4dcSGuennadi Liakhovetski } 315*4981c4dcSGuennadi Liakhovetski 316*4981c4dcSGuennadi Liakhovetski /* 317*4981c4dcSGuennadi Liakhovetski * Find a slave channel configuration from the contoller list by either a slave 318*4981c4dcSGuennadi Liakhovetski * ID in the non-DT case, or by a MID/RID value in the DT case 319*4981c4dcSGuennadi Liakhovetski */ 320*4981c4dcSGuennadi Liakhovetski static const struct sh_dmae_slave_config *dmae_find_slave( 321*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan, int match) 322*4981c4dcSGuennadi Liakhovetski { 323*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 324*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata = shdev->pdata; 325*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg; 326*4981c4dcSGuennadi Liakhovetski int i; 327*4981c4dcSGuennadi Liakhovetski 328*4981c4dcSGuennadi Liakhovetski if (!sh_chan->shdma_chan.dev->of_node) { 329*4981c4dcSGuennadi Liakhovetski if (match >= SH_DMA_SLAVE_NUMBER) 330*4981c4dcSGuennadi Liakhovetski return NULL; 331*4981c4dcSGuennadi Liakhovetski 332*4981c4dcSGuennadi Liakhovetski for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 333*4981c4dcSGuennadi Liakhovetski if (cfg->slave_id == match) 334*4981c4dcSGuennadi Liakhovetski return cfg; 335*4981c4dcSGuennadi Liakhovetski } else { 336*4981c4dcSGuennadi Liakhovetski for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 337*4981c4dcSGuennadi Liakhovetski if (cfg->mid_rid == match) { 338*4981c4dcSGuennadi Liakhovetski sh_chan->shdma_chan.slave_id = i; 339*4981c4dcSGuennadi Liakhovetski return cfg; 340*4981c4dcSGuennadi Liakhovetski } 341*4981c4dcSGuennadi Liakhovetski } 342*4981c4dcSGuennadi Liakhovetski 343*4981c4dcSGuennadi Liakhovetski return NULL; 344*4981c4dcSGuennadi Liakhovetski } 345*4981c4dcSGuennadi Liakhovetski 346*4981c4dcSGuennadi Liakhovetski static int sh_dmae_set_slave(struct shdma_chan *schan, 347*4981c4dcSGuennadi Liakhovetski int slave_id, dma_addr_t slave_addr, bool try) 348*4981c4dcSGuennadi Liakhovetski { 349*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 350*4981c4dcSGuennadi Liakhovetski shdma_chan); 351*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); 352*4981c4dcSGuennadi Liakhovetski if (!cfg) 353*4981c4dcSGuennadi Liakhovetski return -ENXIO; 354*4981c4dcSGuennadi Liakhovetski 355*4981c4dcSGuennadi Liakhovetski if (!try) { 356*4981c4dcSGuennadi Liakhovetski sh_chan->config = cfg; 357*4981c4dcSGuennadi Liakhovetski sh_chan->slave_addr = slave_addr ? : cfg->addr; 358*4981c4dcSGuennadi Liakhovetski } 359*4981c4dcSGuennadi Liakhovetski 360*4981c4dcSGuennadi Liakhovetski return 0; 361*4981c4dcSGuennadi Liakhovetski } 362*4981c4dcSGuennadi Liakhovetski 363*4981c4dcSGuennadi Liakhovetski static void dmae_halt(struct sh_dmae_chan *sh_chan) 364*4981c4dcSGuennadi Liakhovetski { 365*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 366*4981c4dcSGuennadi Liakhovetski u32 chcr = chcr_read(sh_chan); 367*4981c4dcSGuennadi Liakhovetski 368*4981c4dcSGuennadi Liakhovetski chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 369*4981c4dcSGuennadi Liakhovetski chcr_write(sh_chan, chcr); 370*4981c4dcSGuennadi Liakhovetski } 371*4981c4dcSGuennadi Liakhovetski 372*4981c4dcSGuennadi Liakhovetski static int sh_dmae_desc_setup(struct shdma_chan *schan, 373*4981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc, 374*4981c4dcSGuennadi Liakhovetski dma_addr_t src, dma_addr_t dst, size_t *len) 375*4981c4dcSGuennadi Liakhovetski { 376*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 377*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 378*4981c4dcSGuennadi Liakhovetski 379*4981c4dcSGuennadi Liakhovetski if (*len > schan->max_xfer_len) 380*4981c4dcSGuennadi Liakhovetski *len = schan->max_xfer_len; 381*4981c4dcSGuennadi Liakhovetski 382*4981c4dcSGuennadi Liakhovetski sh_desc->hw.sar = src; 383*4981c4dcSGuennadi Liakhovetski sh_desc->hw.dar = dst; 384*4981c4dcSGuennadi Liakhovetski sh_desc->hw.tcr = *len; 385*4981c4dcSGuennadi Liakhovetski 386*4981c4dcSGuennadi Liakhovetski return 0; 387*4981c4dcSGuennadi Liakhovetski } 388*4981c4dcSGuennadi Liakhovetski 389*4981c4dcSGuennadi Liakhovetski static void sh_dmae_halt(struct shdma_chan *schan) 390*4981c4dcSGuennadi Liakhovetski { 391*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 392*4981c4dcSGuennadi Liakhovetski shdma_chan); 393*4981c4dcSGuennadi Liakhovetski dmae_halt(sh_chan); 394*4981c4dcSGuennadi Liakhovetski } 395*4981c4dcSGuennadi Liakhovetski 396*4981c4dcSGuennadi Liakhovetski static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) 397*4981c4dcSGuennadi Liakhovetski { 398*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 399*4981c4dcSGuennadi Liakhovetski shdma_chan); 400*4981c4dcSGuennadi Liakhovetski 401*4981c4dcSGuennadi Liakhovetski if (!(chcr_read(sh_chan) & CHCR_TE)) 402*4981c4dcSGuennadi Liakhovetski return false; 403*4981c4dcSGuennadi Liakhovetski 404*4981c4dcSGuennadi Liakhovetski /* DMA stop */ 405*4981c4dcSGuennadi Liakhovetski dmae_halt(sh_chan); 406*4981c4dcSGuennadi Liakhovetski 407*4981c4dcSGuennadi Liakhovetski return true; 408*4981c4dcSGuennadi Liakhovetski } 409*4981c4dcSGuennadi Liakhovetski 410*4981c4dcSGuennadi Liakhovetski static size_t sh_dmae_get_partial(struct shdma_chan *schan, 411*4981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 412*4981c4dcSGuennadi Liakhovetski { 413*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 414*4981c4dcSGuennadi Liakhovetski shdma_chan); 415*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 416*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 417*4981c4dcSGuennadi Liakhovetski return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 418*4981c4dcSGuennadi Liakhovetski sh_chan->xmit_shift; 419*4981c4dcSGuennadi Liakhovetski } 420*4981c4dcSGuennadi Liakhovetski 421*4981c4dcSGuennadi Liakhovetski /* Called from error IRQ or NMI */ 422*4981c4dcSGuennadi Liakhovetski static bool sh_dmae_reset(struct sh_dmae_device *shdev) 423*4981c4dcSGuennadi Liakhovetski { 424*4981c4dcSGuennadi Liakhovetski bool ret; 425*4981c4dcSGuennadi Liakhovetski 426*4981c4dcSGuennadi Liakhovetski /* halt the dma controller */ 427*4981c4dcSGuennadi Liakhovetski sh_dmae_ctl_stop(shdev); 428*4981c4dcSGuennadi Liakhovetski 429*4981c4dcSGuennadi Liakhovetski /* We cannot detect, which channel caused the error, have to reset all */ 430*4981c4dcSGuennadi Liakhovetski ret = shdma_reset(&shdev->shdma_dev); 431*4981c4dcSGuennadi Liakhovetski 432*4981c4dcSGuennadi Liakhovetski sh_dmae_rst(shdev); 433*4981c4dcSGuennadi Liakhovetski 434*4981c4dcSGuennadi Liakhovetski return ret; 435*4981c4dcSGuennadi Liakhovetski } 436*4981c4dcSGuennadi Liakhovetski 437*4981c4dcSGuennadi Liakhovetski static irqreturn_t sh_dmae_err(int irq, void *data) 438*4981c4dcSGuennadi Liakhovetski { 439*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = data; 440*4981c4dcSGuennadi Liakhovetski 441*4981c4dcSGuennadi Liakhovetski if (!(dmaor_read(shdev) & DMAOR_AE)) 442*4981c4dcSGuennadi Liakhovetski return IRQ_NONE; 443*4981c4dcSGuennadi Liakhovetski 444*4981c4dcSGuennadi Liakhovetski sh_dmae_reset(shdev); 445*4981c4dcSGuennadi Liakhovetski return IRQ_HANDLED; 446*4981c4dcSGuennadi Liakhovetski } 447*4981c4dcSGuennadi Liakhovetski 448*4981c4dcSGuennadi Liakhovetski static bool sh_dmae_desc_completed(struct shdma_chan *schan, 449*4981c4dcSGuennadi Liakhovetski struct shdma_desc *sdesc) 450*4981c4dcSGuennadi Liakhovetski { 451*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, 452*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan, shdma_chan); 453*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc *sh_desc = container_of(sdesc, 454*4981c4dcSGuennadi Liakhovetski struct sh_dmae_desc, shdma_desc); 455*4981c4dcSGuennadi Liakhovetski u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 456*4981c4dcSGuennadi Liakhovetski u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 457*4981c4dcSGuennadi Liakhovetski 458*4981c4dcSGuennadi Liakhovetski return (sdesc->direction == DMA_DEV_TO_MEM && 459*4981c4dcSGuennadi Liakhovetski (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || 460*4981c4dcSGuennadi Liakhovetski (sdesc->direction != DMA_DEV_TO_MEM && 461*4981c4dcSGuennadi Liakhovetski (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); 462*4981c4dcSGuennadi Liakhovetski } 463*4981c4dcSGuennadi Liakhovetski 464*4981c4dcSGuennadi Liakhovetski static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 465*4981c4dcSGuennadi Liakhovetski { 466*4981c4dcSGuennadi Liakhovetski /* Fast path out if NMIF is not asserted for this controller */ 467*4981c4dcSGuennadi Liakhovetski if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 468*4981c4dcSGuennadi Liakhovetski return false; 469*4981c4dcSGuennadi Liakhovetski 470*4981c4dcSGuennadi Liakhovetski return sh_dmae_reset(shdev); 471*4981c4dcSGuennadi Liakhovetski } 472*4981c4dcSGuennadi Liakhovetski 473*4981c4dcSGuennadi Liakhovetski static int sh_dmae_nmi_handler(struct notifier_block *self, 474*4981c4dcSGuennadi Liakhovetski unsigned long cmd, void *data) 475*4981c4dcSGuennadi Liakhovetski { 476*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev; 477*4981c4dcSGuennadi Liakhovetski int ret = NOTIFY_DONE; 478*4981c4dcSGuennadi Liakhovetski bool triggered; 479*4981c4dcSGuennadi Liakhovetski 480*4981c4dcSGuennadi Liakhovetski /* 481*4981c4dcSGuennadi Liakhovetski * Only concern ourselves with NMI events. 482*4981c4dcSGuennadi Liakhovetski * 483*4981c4dcSGuennadi Liakhovetski * Normally we would check the die chain value, but as this needs 484*4981c4dcSGuennadi Liakhovetski * to be architecture independent, check for NMI context instead. 485*4981c4dcSGuennadi Liakhovetski */ 486*4981c4dcSGuennadi Liakhovetski if (!in_nmi()) 487*4981c4dcSGuennadi Liakhovetski return NOTIFY_DONE; 488*4981c4dcSGuennadi Liakhovetski 489*4981c4dcSGuennadi Liakhovetski rcu_read_lock(); 490*4981c4dcSGuennadi Liakhovetski list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 491*4981c4dcSGuennadi Liakhovetski /* 492*4981c4dcSGuennadi Liakhovetski * Only stop if one of the controllers has NMIF asserted, 493*4981c4dcSGuennadi Liakhovetski * we do not want to interfere with regular address error 494*4981c4dcSGuennadi Liakhovetski * handling or NMI events that don't concern the DMACs. 495*4981c4dcSGuennadi Liakhovetski */ 496*4981c4dcSGuennadi Liakhovetski triggered = sh_dmae_nmi_notify(shdev); 497*4981c4dcSGuennadi Liakhovetski if (triggered == true) 498*4981c4dcSGuennadi Liakhovetski ret = NOTIFY_OK; 499*4981c4dcSGuennadi Liakhovetski } 500*4981c4dcSGuennadi Liakhovetski rcu_read_unlock(); 501*4981c4dcSGuennadi Liakhovetski 502*4981c4dcSGuennadi Liakhovetski return ret; 503*4981c4dcSGuennadi Liakhovetski } 504*4981c4dcSGuennadi Liakhovetski 505*4981c4dcSGuennadi Liakhovetski static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 506*4981c4dcSGuennadi Liakhovetski .notifier_call = sh_dmae_nmi_handler, 507*4981c4dcSGuennadi Liakhovetski 508*4981c4dcSGuennadi Liakhovetski /* Run before NMI debug handler and KGDB */ 509*4981c4dcSGuennadi Liakhovetski .priority = 1, 510*4981c4dcSGuennadi Liakhovetski }; 511*4981c4dcSGuennadi Liakhovetski 512*4981c4dcSGuennadi Liakhovetski static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 513*4981c4dcSGuennadi Liakhovetski int irq, unsigned long flags) 514*4981c4dcSGuennadi Liakhovetski { 515*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 516*4981c4dcSGuennadi Liakhovetski struct shdma_dev *sdev = &shdev->shdma_dev; 517*4981c4dcSGuennadi Liakhovetski struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); 518*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan; 519*4981c4dcSGuennadi Liakhovetski struct shdma_chan *schan; 520*4981c4dcSGuennadi Liakhovetski int err; 521*4981c4dcSGuennadi Liakhovetski 522*4981c4dcSGuennadi Liakhovetski sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), 523*4981c4dcSGuennadi Liakhovetski GFP_KERNEL); 524*4981c4dcSGuennadi Liakhovetski if (!sh_chan) { 525*4981c4dcSGuennadi Liakhovetski dev_err(sdev->dma_dev.dev, 526*4981c4dcSGuennadi Liakhovetski "No free memory for allocating dma channels!\n"); 527*4981c4dcSGuennadi Liakhovetski return -ENOMEM; 528*4981c4dcSGuennadi Liakhovetski } 529*4981c4dcSGuennadi Liakhovetski 530*4981c4dcSGuennadi Liakhovetski schan = &sh_chan->shdma_chan; 531*4981c4dcSGuennadi Liakhovetski schan->max_xfer_len = SH_DMA_TCR_MAX + 1; 532*4981c4dcSGuennadi Liakhovetski 533*4981c4dcSGuennadi Liakhovetski shdma_chan_probe(sdev, schan, id); 534*4981c4dcSGuennadi Liakhovetski 535*4981c4dcSGuennadi Liakhovetski sh_chan->base = shdev->chan_reg + chan_pdata->offset; 536*4981c4dcSGuennadi Liakhovetski 537*4981c4dcSGuennadi Liakhovetski /* set up channel irq */ 538*4981c4dcSGuennadi Liakhovetski if (pdev->id >= 0) 539*4981c4dcSGuennadi Liakhovetski snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 540*4981c4dcSGuennadi Liakhovetski "sh-dmae%d.%d", pdev->id, id); 541*4981c4dcSGuennadi Liakhovetski else 542*4981c4dcSGuennadi Liakhovetski snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 543*4981c4dcSGuennadi Liakhovetski "sh-dma%d", id); 544*4981c4dcSGuennadi Liakhovetski 545*4981c4dcSGuennadi Liakhovetski err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); 546*4981c4dcSGuennadi Liakhovetski if (err) { 547*4981c4dcSGuennadi Liakhovetski dev_err(sdev->dma_dev.dev, 548*4981c4dcSGuennadi Liakhovetski "DMA channel %d request_irq error %d\n", 549*4981c4dcSGuennadi Liakhovetski id, err); 550*4981c4dcSGuennadi Liakhovetski goto err_no_irq; 551*4981c4dcSGuennadi Liakhovetski } 552*4981c4dcSGuennadi Liakhovetski 553*4981c4dcSGuennadi Liakhovetski shdev->chan[id] = sh_chan; 554*4981c4dcSGuennadi Liakhovetski return 0; 555*4981c4dcSGuennadi Liakhovetski 556*4981c4dcSGuennadi Liakhovetski err_no_irq: 557*4981c4dcSGuennadi Liakhovetski /* remove from dmaengine device node */ 558*4981c4dcSGuennadi Liakhovetski shdma_chan_remove(schan); 559*4981c4dcSGuennadi Liakhovetski return err; 560*4981c4dcSGuennadi Liakhovetski } 561*4981c4dcSGuennadi Liakhovetski 562*4981c4dcSGuennadi Liakhovetski static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 563*4981c4dcSGuennadi Liakhovetski { 564*4981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 565*4981c4dcSGuennadi Liakhovetski struct shdma_chan *schan; 566*4981c4dcSGuennadi Liakhovetski int i; 567*4981c4dcSGuennadi Liakhovetski 568*4981c4dcSGuennadi Liakhovetski shdma_for_each_chan(schan, &shdev->shdma_dev, i) { 569*4981c4dcSGuennadi Liakhovetski BUG_ON(!schan); 570*4981c4dcSGuennadi Liakhovetski 571*4981c4dcSGuennadi Liakhovetski shdma_chan_remove(schan); 572*4981c4dcSGuennadi Liakhovetski } 573*4981c4dcSGuennadi Liakhovetski dma_dev->chancnt = 0; 574*4981c4dcSGuennadi Liakhovetski } 575*4981c4dcSGuennadi Liakhovetski 576*4981c4dcSGuennadi Liakhovetski static void sh_dmae_shutdown(struct platform_device *pdev) 577*4981c4dcSGuennadi Liakhovetski { 578*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 579*4981c4dcSGuennadi Liakhovetski sh_dmae_ctl_stop(shdev); 580*4981c4dcSGuennadi Liakhovetski } 581*4981c4dcSGuennadi Liakhovetski 582*4981c4dcSGuennadi Liakhovetski static int sh_dmae_runtime_suspend(struct device *dev) 583*4981c4dcSGuennadi Liakhovetski { 584*4981c4dcSGuennadi Liakhovetski return 0; 585*4981c4dcSGuennadi Liakhovetski } 586*4981c4dcSGuennadi Liakhovetski 587*4981c4dcSGuennadi Liakhovetski static int sh_dmae_runtime_resume(struct device *dev) 588*4981c4dcSGuennadi Liakhovetski { 589*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = dev_get_drvdata(dev); 590*4981c4dcSGuennadi Liakhovetski 591*4981c4dcSGuennadi Liakhovetski return sh_dmae_rst(shdev); 592*4981c4dcSGuennadi Liakhovetski } 593*4981c4dcSGuennadi Liakhovetski 594*4981c4dcSGuennadi Liakhovetski #ifdef CONFIG_PM 595*4981c4dcSGuennadi Liakhovetski static int sh_dmae_suspend(struct device *dev) 596*4981c4dcSGuennadi Liakhovetski { 597*4981c4dcSGuennadi Liakhovetski return 0; 598*4981c4dcSGuennadi Liakhovetski } 599*4981c4dcSGuennadi Liakhovetski 600*4981c4dcSGuennadi Liakhovetski static int sh_dmae_resume(struct device *dev) 601*4981c4dcSGuennadi Liakhovetski { 602*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = dev_get_drvdata(dev); 603*4981c4dcSGuennadi Liakhovetski int i, ret; 604*4981c4dcSGuennadi Liakhovetski 605*4981c4dcSGuennadi Liakhovetski ret = sh_dmae_rst(shdev); 606*4981c4dcSGuennadi Liakhovetski if (ret < 0) 607*4981c4dcSGuennadi Liakhovetski dev_err(dev, "Failed to reset!\n"); 608*4981c4dcSGuennadi Liakhovetski 609*4981c4dcSGuennadi Liakhovetski for (i = 0; i < shdev->pdata->channel_num; i++) { 610*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = shdev->chan[i]; 611*4981c4dcSGuennadi Liakhovetski 612*4981c4dcSGuennadi Liakhovetski if (!sh_chan->shdma_chan.desc_num) 613*4981c4dcSGuennadi Liakhovetski continue; 614*4981c4dcSGuennadi Liakhovetski 615*4981c4dcSGuennadi Liakhovetski if (sh_chan->shdma_chan.slave_id >= 0) { 616*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_slave_config *cfg = sh_chan->config; 617*4981c4dcSGuennadi Liakhovetski dmae_set_dmars(sh_chan, cfg->mid_rid); 618*4981c4dcSGuennadi Liakhovetski dmae_set_chcr(sh_chan, cfg->chcr); 619*4981c4dcSGuennadi Liakhovetski } else { 620*4981c4dcSGuennadi Liakhovetski dmae_init(sh_chan); 621*4981c4dcSGuennadi Liakhovetski } 622*4981c4dcSGuennadi Liakhovetski } 623*4981c4dcSGuennadi Liakhovetski 624*4981c4dcSGuennadi Liakhovetski return 0; 625*4981c4dcSGuennadi Liakhovetski } 626*4981c4dcSGuennadi Liakhovetski #else 627*4981c4dcSGuennadi Liakhovetski #define sh_dmae_suspend NULL 628*4981c4dcSGuennadi Liakhovetski #define sh_dmae_resume NULL 629*4981c4dcSGuennadi Liakhovetski #endif 630*4981c4dcSGuennadi Liakhovetski 631*4981c4dcSGuennadi Liakhovetski const struct dev_pm_ops sh_dmae_pm = { 632*4981c4dcSGuennadi Liakhovetski .suspend = sh_dmae_suspend, 633*4981c4dcSGuennadi Liakhovetski .resume = sh_dmae_resume, 634*4981c4dcSGuennadi Liakhovetski .runtime_suspend = sh_dmae_runtime_suspend, 635*4981c4dcSGuennadi Liakhovetski .runtime_resume = sh_dmae_runtime_resume, 636*4981c4dcSGuennadi Liakhovetski }; 637*4981c4dcSGuennadi Liakhovetski 638*4981c4dcSGuennadi Liakhovetski static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 639*4981c4dcSGuennadi Liakhovetski { 640*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan *sh_chan = container_of(schan, 641*4981c4dcSGuennadi Liakhovetski struct sh_dmae_chan, shdma_chan); 642*4981c4dcSGuennadi Liakhovetski 643*4981c4dcSGuennadi Liakhovetski /* 644*4981c4dcSGuennadi Liakhovetski * Implicit BUG_ON(!sh_chan->config) 645*4981c4dcSGuennadi Liakhovetski * This is an exclusive slave DMA operation, may only be called after a 646*4981c4dcSGuennadi Liakhovetski * successful slave configuration. 647*4981c4dcSGuennadi Liakhovetski */ 648*4981c4dcSGuennadi Liakhovetski return sh_chan->slave_addr; 649*4981c4dcSGuennadi Liakhovetski } 650*4981c4dcSGuennadi Liakhovetski 651*4981c4dcSGuennadi Liakhovetski static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) 652*4981c4dcSGuennadi Liakhovetski { 653*4981c4dcSGuennadi Liakhovetski return &((struct sh_dmae_desc *)buf)[i].shdma_desc; 654*4981c4dcSGuennadi Liakhovetski } 655*4981c4dcSGuennadi Liakhovetski 656*4981c4dcSGuennadi Liakhovetski static const struct shdma_ops sh_dmae_shdma_ops = { 657*4981c4dcSGuennadi Liakhovetski .desc_completed = sh_dmae_desc_completed, 658*4981c4dcSGuennadi Liakhovetski .halt_channel = sh_dmae_halt, 659*4981c4dcSGuennadi Liakhovetski .channel_busy = sh_dmae_channel_busy, 660*4981c4dcSGuennadi Liakhovetski .slave_addr = sh_dmae_slave_addr, 661*4981c4dcSGuennadi Liakhovetski .desc_setup = sh_dmae_desc_setup, 662*4981c4dcSGuennadi Liakhovetski .set_slave = sh_dmae_set_slave, 663*4981c4dcSGuennadi Liakhovetski .setup_xfer = sh_dmae_setup_xfer, 664*4981c4dcSGuennadi Liakhovetski .start_xfer = sh_dmae_start_xfer, 665*4981c4dcSGuennadi Liakhovetski .embedded_desc = sh_dmae_embedded_desc, 666*4981c4dcSGuennadi Liakhovetski .chan_irq = sh_dmae_chan_irq, 667*4981c4dcSGuennadi Liakhovetski .get_partial = sh_dmae_get_partial, 668*4981c4dcSGuennadi Liakhovetski }; 669*4981c4dcSGuennadi Liakhovetski 670*4981c4dcSGuennadi Liakhovetski static const struct of_device_id sh_dmae_of_match[] = { 671*4981c4dcSGuennadi Liakhovetski {} 672*4981c4dcSGuennadi Liakhovetski }; 673*4981c4dcSGuennadi Liakhovetski MODULE_DEVICE_TABLE(of, sh_dmae_of_match); 674*4981c4dcSGuennadi Liakhovetski 675*4981c4dcSGuennadi Liakhovetski static int sh_dmae_probe(struct platform_device *pdev) 676*4981c4dcSGuennadi Liakhovetski { 677*4981c4dcSGuennadi Liakhovetski const struct sh_dmae_pdata *pdata; 678*4981c4dcSGuennadi Liakhovetski unsigned long irqflags = IRQF_DISABLED, 679*4981c4dcSGuennadi Liakhovetski chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 680*4981c4dcSGuennadi Liakhovetski int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; 681*4981c4dcSGuennadi Liakhovetski int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 682*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev; 683*4981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev; 684*4981c4dcSGuennadi Liakhovetski struct resource *chan, *dmars, *errirq_res, *chanirq_res; 685*4981c4dcSGuennadi Liakhovetski 686*4981c4dcSGuennadi Liakhovetski if (pdev->dev.of_node) 687*4981c4dcSGuennadi Liakhovetski pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data; 688*4981c4dcSGuennadi Liakhovetski else 689*4981c4dcSGuennadi Liakhovetski pdata = pdev->dev.platform_data; 690*4981c4dcSGuennadi Liakhovetski 691*4981c4dcSGuennadi Liakhovetski /* get platform data */ 692*4981c4dcSGuennadi Liakhovetski if (!pdata || !pdata->channel_num) 693*4981c4dcSGuennadi Liakhovetski return -ENODEV; 694*4981c4dcSGuennadi Liakhovetski 695*4981c4dcSGuennadi Liakhovetski chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 696*4981c4dcSGuennadi Liakhovetski /* DMARS area is optional */ 697*4981c4dcSGuennadi Liakhovetski dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 698*4981c4dcSGuennadi Liakhovetski /* 699*4981c4dcSGuennadi Liakhovetski * IRQ resources: 700*4981c4dcSGuennadi Liakhovetski * 1. there always must be at least one IRQ IO-resource. On SH4 it is 701*4981c4dcSGuennadi Liakhovetski * the error IRQ, in which case it is the only IRQ in this resource: 702*4981c4dcSGuennadi Liakhovetski * start == end. If it is the only IRQ resource, all channels also 703*4981c4dcSGuennadi Liakhovetski * use the same IRQ. 704*4981c4dcSGuennadi Liakhovetski * 2. DMA channel IRQ resources can be specified one per resource or in 705*4981c4dcSGuennadi Liakhovetski * ranges (start != end) 706*4981c4dcSGuennadi Liakhovetski * 3. iff all events (channels and, optionally, error) on this 707*4981c4dcSGuennadi Liakhovetski * controller use the same IRQ, only one IRQ resource can be 708*4981c4dcSGuennadi Liakhovetski * specified, otherwise there must be one IRQ per channel, even if 709*4981c4dcSGuennadi Liakhovetski * some of them are equal 710*4981c4dcSGuennadi Liakhovetski * 4. if all IRQs on this controller are equal or if some specific IRQs 711*4981c4dcSGuennadi Liakhovetski * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 712*4981c4dcSGuennadi Liakhovetski * requested with the IRQF_SHARED flag 713*4981c4dcSGuennadi Liakhovetski */ 714*4981c4dcSGuennadi Liakhovetski errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 715*4981c4dcSGuennadi Liakhovetski if (!chan || !errirq_res) 716*4981c4dcSGuennadi Liakhovetski return -ENODEV; 717*4981c4dcSGuennadi Liakhovetski 718*4981c4dcSGuennadi Liakhovetski shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), 719*4981c4dcSGuennadi Liakhovetski GFP_KERNEL); 720*4981c4dcSGuennadi Liakhovetski if (!shdev) { 721*4981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, "Not enough memory\n"); 722*4981c4dcSGuennadi Liakhovetski return -ENOMEM; 723*4981c4dcSGuennadi Liakhovetski } 724*4981c4dcSGuennadi Liakhovetski 725*4981c4dcSGuennadi Liakhovetski dma_dev = &shdev->shdma_dev.dma_dev; 726*4981c4dcSGuennadi Liakhovetski 727*4981c4dcSGuennadi Liakhovetski shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); 728*4981c4dcSGuennadi Liakhovetski if (IS_ERR(shdev->chan_reg)) 729*4981c4dcSGuennadi Liakhovetski return PTR_ERR(shdev->chan_reg); 730*4981c4dcSGuennadi Liakhovetski if (dmars) { 731*4981c4dcSGuennadi Liakhovetski shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); 732*4981c4dcSGuennadi Liakhovetski if (IS_ERR(shdev->dmars)) 733*4981c4dcSGuennadi Liakhovetski return PTR_ERR(shdev->dmars); 734*4981c4dcSGuennadi Liakhovetski } 735*4981c4dcSGuennadi Liakhovetski 736*4981c4dcSGuennadi Liakhovetski if (!pdata->slave_only) 737*4981c4dcSGuennadi Liakhovetski dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 738*4981c4dcSGuennadi Liakhovetski if (pdata->slave && pdata->slave_num) 739*4981c4dcSGuennadi Liakhovetski dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 740*4981c4dcSGuennadi Liakhovetski 741*4981c4dcSGuennadi Liakhovetski /* Default transfer size of 32 bytes requires 32-byte alignment */ 742*4981c4dcSGuennadi Liakhovetski dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; 743*4981c4dcSGuennadi Liakhovetski 744*4981c4dcSGuennadi Liakhovetski shdev->shdma_dev.ops = &sh_dmae_shdma_ops; 745*4981c4dcSGuennadi Liakhovetski shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); 746*4981c4dcSGuennadi Liakhovetski err = shdma_init(&pdev->dev, &shdev->shdma_dev, 747*4981c4dcSGuennadi Liakhovetski pdata->channel_num); 748*4981c4dcSGuennadi Liakhovetski if (err < 0) 749*4981c4dcSGuennadi Liakhovetski goto eshdma; 750*4981c4dcSGuennadi Liakhovetski 751*4981c4dcSGuennadi Liakhovetski /* platform data */ 752*4981c4dcSGuennadi Liakhovetski shdev->pdata = pdata; 753*4981c4dcSGuennadi Liakhovetski 754*4981c4dcSGuennadi Liakhovetski if (pdata->chcr_offset) 755*4981c4dcSGuennadi Liakhovetski shdev->chcr_offset = pdata->chcr_offset; 756*4981c4dcSGuennadi Liakhovetski else 757*4981c4dcSGuennadi Liakhovetski shdev->chcr_offset = CHCR; 758*4981c4dcSGuennadi Liakhovetski 759*4981c4dcSGuennadi Liakhovetski if (pdata->chcr_ie_bit) 760*4981c4dcSGuennadi Liakhovetski shdev->chcr_ie_bit = pdata->chcr_ie_bit; 761*4981c4dcSGuennadi Liakhovetski else 762*4981c4dcSGuennadi Liakhovetski shdev->chcr_ie_bit = CHCR_IE; 763*4981c4dcSGuennadi Liakhovetski 764*4981c4dcSGuennadi Liakhovetski platform_set_drvdata(pdev, shdev); 765*4981c4dcSGuennadi Liakhovetski 766*4981c4dcSGuennadi Liakhovetski pm_runtime_enable(&pdev->dev); 767*4981c4dcSGuennadi Liakhovetski err = pm_runtime_get_sync(&pdev->dev); 768*4981c4dcSGuennadi Liakhovetski if (err < 0) 769*4981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); 770*4981c4dcSGuennadi Liakhovetski 771*4981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 772*4981c4dcSGuennadi Liakhovetski list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 773*4981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 774*4981c4dcSGuennadi Liakhovetski 775*4981c4dcSGuennadi Liakhovetski /* reset dma controller - only needed as a test */ 776*4981c4dcSGuennadi Liakhovetski err = sh_dmae_rst(shdev); 777*4981c4dcSGuennadi Liakhovetski if (err) 778*4981c4dcSGuennadi Liakhovetski goto rst_err; 779*4981c4dcSGuennadi Liakhovetski 780*4981c4dcSGuennadi Liakhovetski #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 781*4981c4dcSGuennadi Liakhovetski chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 782*4981c4dcSGuennadi Liakhovetski 783*4981c4dcSGuennadi Liakhovetski if (!chanirq_res) 784*4981c4dcSGuennadi Liakhovetski chanirq_res = errirq_res; 785*4981c4dcSGuennadi Liakhovetski else 786*4981c4dcSGuennadi Liakhovetski irqres++; 787*4981c4dcSGuennadi Liakhovetski 788*4981c4dcSGuennadi Liakhovetski if (chanirq_res == errirq_res || 789*4981c4dcSGuennadi Liakhovetski (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 790*4981c4dcSGuennadi Liakhovetski irqflags = IRQF_SHARED; 791*4981c4dcSGuennadi Liakhovetski 792*4981c4dcSGuennadi Liakhovetski errirq = errirq_res->start; 793*4981c4dcSGuennadi Liakhovetski 794*4981c4dcSGuennadi Liakhovetski err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, 795*4981c4dcSGuennadi Liakhovetski "DMAC Address Error", shdev); 796*4981c4dcSGuennadi Liakhovetski if (err) { 797*4981c4dcSGuennadi Liakhovetski dev_err(&pdev->dev, 798*4981c4dcSGuennadi Liakhovetski "DMA failed requesting irq #%d, error %d\n", 799*4981c4dcSGuennadi Liakhovetski errirq, err); 800*4981c4dcSGuennadi Liakhovetski goto eirq_err; 801*4981c4dcSGuennadi Liakhovetski } 802*4981c4dcSGuennadi Liakhovetski 803*4981c4dcSGuennadi Liakhovetski #else 804*4981c4dcSGuennadi Liakhovetski chanirq_res = errirq_res; 805*4981c4dcSGuennadi Liakhovetski #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 806*4981c4dcSGuennadi Liakhovetski 807*4981c4dcSGuennadi Liakhovetski if (chanirq_res->start == chanirq_res->end && 808*4981c4dcSGuennadi Liakhovetski !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 809*4981c4dcSGuennadi Liakhovetski /* Special case - all multiplexed */ 810*4981c4dcSGuennadi Liakhovetski for (; irq_cnt < pdata->channel_num; irq_cnt++) { 811*4981c4dcSGuennadi Liakhovetski if (irq_cnt < SH_DMAE_MAX_CHANNELS) { 812*4981c4dcSGuennadi Liakhovetski chan_irq[irq_cnt] = chanirq_res->start; 813*4981c4dcSGuennadi Liakhovetski chan_flag[irq_cnt] = IRQF_SHARED; 814*4981c4dcSGuennadi Liakhovetski } else { 815*4981c4dcSGuennadi Liakhovetski irq_cap = 1; 816*4981c4dcSGuennadi Liakhovetski break; 817*4981c4dcSGuennadi Liakhovetski } 818*4981c4dcSGuennadi Liakhovetski } 819*4981c4dcSGuennadi Liakhovetski } else { 820*4981c4dcSGuennadi Liakhovetski do { 821*4981c4dcSGuennadi Liakhovetski for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 822*4981c4dcSGuennadi Liakhovetski if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { 823*4981c4dcSGuennadi Liakhovetski irq_cap = 1; 824*4981c4dcSGuennadi Liakhovetski break; 825*4981c4dcSGuennadi Liakhovetski } 826*4981c4dcSGuennadi Liakhovetski 827*4981c4dcSGuennadi Liakhovetski if ((errirq_res->flags & IORESOURCE_BITS) == 828*4981c4dcSGuennadi Liakhovetski IORESOURCE_IRQ_SHAREABLE) 829*4981c4dcSGuennadi Liakhovetski chan_flag[irq_cnt] = IRQF_SHARED; 830*4981c4dcSGuennadi Liakhovetski else 831*4981c4dcSGuennadi Liakhovetski chan_flag[irq_cnt] = IRQF_DISABLED; 832*4981c4dcSGuennadi Liakhovetski dev_dbg(&pdev->dev, 833*4981c4dcSGuennadi Liakhovetski "Found IRQ %d for channel %d\n", 834*4981c4dcSGuennadi Liakhovetski i, irq_cnt); 835*4981c4dcSGuennadi Liakhovetski chan_irq[irq_cnt++] = i; 836*4981c4dcSGuennadi Liakhovetski } 837*4981c4dcSGuennadi Liakhovetski 838*4981c4dcSGuennadi Liakhovetski if (irq_cnt >= SH_DMAE_MAX_CHANNELS) 839*4981c4dcSGuennadi Liakhovetski break; 840*4981c4dcSGuennadi Liakhovetski 841*4981c4dcSGuennadi Liakhovetski chanirq_res = platform_get_resource(pdev, 842*4981c4dcSGuennadi Liakhovetski IORESOURCE_IRQ, ++irqres); 843*4981c4dcSGuennadi Liakhovetski } while (irq_cnt < pdata->channel_num && chanirq_res); 844*4981c4dcSGuennadi Liakhovetski } 845*4981c4dcSGuennadi Liakhovetski 846*4981c4dcSGuennadi Liakhovetski /* Create DMA Channel */ 847*4981c4dcSGuennadi Liakhovetski for (i = 0; i < irq_cnt; i++) { 848*4981c4dcSGuennadi Liakhovetski err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 849*4981c4dcSGuennadi Liakhovetski if (err) 850*4981c4dcSGuennadi Liakhovetski goto chan_probe_err; 851*4981c4dcSGuennadi Liakhovetski } 852*4981c4dcSGuennadi Liakhovetski 853*4981c4dcSGuennadi Liakhovetski if (irq_cap) 854*4981c4dcSGuennadi Liakhovetski dev_notice(&pdev->dev, "Attempting to register %d DMA " 855*4981c4dcSGuennadi Liakhovetski "channels when a maximum of %d are supported.\n", 856*4981c4dcSGuennadi Liakhovetski pdata->channel_num, SH_DMAE_MAX_CHANNELS); 857*4981c4dcSGuennadi Liakhovetski 858*4981c4dcSGuennadi Liakhovetski pm_runtime_put(&pdev->dev); 859*4981c4dcSGuennadi Liakhovetski 860*4981c4dcSGuennadi Liakhovetski err = dma_async_device_register(&shdev->shdma_dev.dma_dev); 861*4981c4dcSGuennadi Liakhovetski if (err < 0) 862*4981c4dcSGuennadi Liakhovetski goto edmadevreg; 863*4981c4dcSGuennadi Liakhovetski 864*4981c4dcSGuennadi Liakhovetski return err; 865*4981c4dcSGuennadi Liakhovetski 866*4981c4dcSGuennadi Liakhovetski edmadevreg: 867*4981c4dcSGuennadi Liakhovetski pm_runtime_get(&pdev->dev); 868*4981c4dcSGuennadi Liakhovetski 869*4981c4dcSGuennadi Liakhovetski chan_probe_err: 870*4981c4dcSGuennadi Liakhovetski sh_dmae_chan_remove(shdev); 871*4981c4dcSGuennadi Liakhovetski 872*4981c4dcSGuennadi Liakhovetski #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 873*4981c4dcSGuennadi Liakhovetski eirq_err: 874*4981c4dcSGuennadi Liakhovetski #endif 875*4981c4dcSGuennadi Liakhovetski rst_err: 876*4981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 877*4981c4dcSGuennadi Liakhovetski list_del_rcu(&shdev->node); 878*4981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 879*4981c4dcSGuennadi Liakhovetski 880*4981c4dcSGuennadi Liakhovetski pm_runtime_put(&pdev->dev); 881*4981c4dcSGuennadi Liakhovetski pm_runtime_disable(&pdev->dev); 882*4981c4dcSGuennadi Liakhovetski 883*4981c4dcSGuennadi Liakhovetski platform_set_drvdata(pdev, NULL); 884*4981c4dcSGuennadi Liakhovetski shdma_cleanup(&shdev->shdma_dev); 885*4981c4dcSGuennadi Liakhovetski eshdma: 886*4981c4dcSGuennadi Liakhovetski synchronize_rcu(); 887*4981c4dcSGuennadi Liakhovetski 888*4981c4dcSGuennadi Liakhovetski return err; 889*4981c4dcSGuennadi Liakhovetski } 890*4981c4dcSGuennadi Liakhovetski 891*4981c4dcSGuennadi Liakhovetski static int sh_dmae_remove(struct platform_device *pdev) 892*4981c4dcSGuennadi Liakhovetski { 893*4981c4dcSGuennadi Liakhovetski struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 894*4981c4dcSGuennadi Liakhovetski struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 895*4981c4dcSGuennadi Liakhovetski struct resource *res; 896*4981c4dcSGuennadi Liakhovetski int errirq = platform_get_irq(pdev, 0); 897*4981c4dcSGuennadi Liakhovetski 898*4981c4dcSGuennadi Liakhovetski dma_async_device_unregister(dma_dev); 899*4981c4dcSGuennadi Liakhovetski 900*4981c4dcSGuennadi Liakhovetski if (errirq > 0) 901*4981c4dcSGuennadi Liakhovetski free_irq(errirq, shdev); 902*4981c4dcSGuennadi Liakhovetski 903*4981c4dcSGuennadi Liakhovetski spin_lock_irq(&sh_dmae_lock); 904*4981c4dcSGuennadi Liakhovetski list_del_rcu(&shdev->node); 905*4981c4dcSGuennadi Liakhovetski spin_unlock_irq(&sh_dmae_lock); 906*4981c4dcSGuennadi Liakhovetski 907*4981c4dcSGuennadi Liakhovetski pm_runtime_disable(&pdev->dev); 908*4981c4dcSGuennadi Liakhovetski 909*4981c4dcSGuennadi Liakhovetski sh_dmae_chan_remove(shdev); 910*4981c4dcSGuennadi Liakhovetski shdma_cleanup(&shdev->shdma_dev); 911*4981c4dcSGuennadi Liakhovetski 912*4981c4dcSGuennadi Liakhovetski platform_set_drvdata(pdev, NULL); 913*4981c4dcSGuennadi Liakhovetski 914*4981c4dcSGuennadi Liakhovetski synchronize_rcu(); 915*4981c4dcSGuennadi Liakhovetski 916*4981c4dcSGuennadi Liakhovetski return 0; 917*4981c4dcSGuennadi Liakhovetski } 918*4981c4dcSGuennadi Liakhovetski 919*4981c4dcSGuennadi Liakhovetski static struct platform_driver sh_dmae_driver = { 920*4981c4dcSGuennadi Liakhovetski .driver = { 921*4981c4dcSGuennadi Liakhovetski .owner = THIS_MODULE, 922*4981c4dcSGuennadi Liakhovetski .pm = &sh_dmae_pm, 923*4981c4dcSGuennadi Liakhovetski .name = SH_DMAE_DRV_NAME, 924*4981c4dcSGuennadi Liakhovetski .of_match_table = sh_dmae_of_match, 925*4981c4dcSGuennadi Liakhovetski }, 926*4981c4dcSGuennadi Liakhovetski .remove = sh_dmae_remove, 927*4981c4dcSGuennadi Liakhovetski .shutdown = sh_dmae_shutdown, 928*4981c4dcSGuennadi Liakhovetski }; 929*4981c4dcSGuennadi Liakhovetski 930*4981c4dcSGuennadi Liakhovetski static int __init sh_dmae_init(void) 931*4981c4dcSGuennadi Liakhovetski { 932*4981c4dcSGuennadi Liakhovetski /* Wire up NMI handling */ 933*4981c4dcSGuennadi Liakhovetski int err = register_die_notifier(&sh_dmae_nmi_notifier); 934*4981c4dcSGuennadi Liakhovetski if (err) 935*4981c4dcSGuennadi Liakhovetski return err; 936*4981c4dcSGuennadi Liakhovetski 937*4981c4dcSGuennadi Liakhovetski return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 938*4981c4dcSGuennadi Liakhovetski } 939*4981c4dcSGuennadi Liakhovetski module_init(sh_dmae_init); 940*4981c4dcSGuennadi Liakhovetski 941*4981c4dcSGuennadi Liakhovetski static void __exit sh_dmae_exit(void) 942*4981c4dcSGuennadi Liakhovetski { 943*4981c4dcSGuennadi Liakhovetski platform_driver_unregister(&sh_dmae_driver); 944*4981c4dcSGuennadi Liakhovetski 945*4981c4dcSGuennadi Liakhovetski unregister_die_notifier(&sh_dmae_nmi_notifier); 946*4981c4dcSGuennadi Liakhovetski } 947*4981c4dcSGuennadi Liakhovetski module_exit(sh_dmae_exit); 948*4981c4dcSGuennadi Liakhovetski 949*4981c4dcSGuennadi Liakhovetski MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 950*4981c4dcSGuennadi Liakhovetski MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 951*4981c4dcSGuennadi Liakhovetski MODULE_LICENSE("GPL"); 952*4981c4dcSGuennadi Liakhovetski MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); 953