1 /* 2 * arch/sh/drivers/dma/dma-sh.c 3 * 4 * SuperH On-chip DMAC Support 5 * 6 * Copyright (C) 2000 Takashi YOSHII 7 * Copyright (C) 2003, 2004 Paul Mundt 8 * Copyright (C) 2005 Andriy Skulysh 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file "COPYING" in the main directory of this archive 12 * for more details. 13 */ 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/module.h> 17 #include <mach-dreamcast/mach/dma.h> 18 #include <asm/dma.h> 19 #include <asm/io.h> 20 #include <asm/dma-sh.h> 21 22 #if defined(DMAE1_IRQ) 23 #define NR_DMAE 2 24 #else 25 #define NR_DMAE 1 26 #endif 27 28 static const char *dmae_name[] = { 29 "DMAC Address Error0", "DMAC Address Error1" 30 }; 31 32 static inline unsigned int get_dmte_irq(unsigned int chan) 33 { 34 unsigned int irq = 0; 35 if (chan < ARRAY_SIZE(dmte_irq_map)) 36 irq = dmte_irq_map[chan]; 37 38 #if defined(CONFIG_SH_DMA_IRQ_MULTI) 39 if (irq > DMTE6_IRQ) 40 return DMTE6_IRQ; 41 return DMTE0_IRQ; 42 #else 43 return irq; 44 #endif 45 } 46 47 /* 48 * We determine the correct shift size based off of the CHCR transmit size 49 * for the given channel. Since we know that it will take: 50 * 51 * info->count >> ts_shift[transmit_size] 52 * 53 * iterations to complete the transfer. 54 */ 55 static unsigned int ts_shift[] = TS_SHIFT; 56 static inline unsigned int calc_xmit_shift(struct dma_channel *chan) 57 { 58 u32 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 59 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | 60 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); 61 62 return ts_shift[cnt]; 63 } 64 65 /* 66 * The transfer end interrupt must read the chcr register to end the 67 * hardware interrupt active condition. 68 * Besides that it needs to waken any waiting process, which should handle 69 * setting up the next transfer. 70 */ 71 static irqreturn_t dma_tei(int irq, void *dev_id) 72 { 73 struct dma_channel *chan = dev_id; 74 u32 chcr; 75 76 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 77 78 if (!(chcr & CHCR_TE)) 79 return IRQ_NONE; 80 81 chcr &= ~(CHCR_IE | CHCR_DE); 82 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 83 84 wake_up(&chan->wait_queue); 85 86 return IRQ_HANDLED; 87 } 88 89 static int sh_dmac_request_dma(struct dma_channel *chan) 90 { 91 if (unlikely(!(chan->flags & DMA_TEI_CAPABLE))) 92 return 0; 93 94 return request_irq(get_dmte_irq(chan->chan), dma_tei, 95 #if defined(CONFIG_SH_DMA_IRQ_MULTI) 96 IRQF_SHARED, 97 #else 98 IRQF_DISABLED, 99 #endif 100 chan->dev_id, chan); 101 } 102 103 static void sh_dmac_free_dma(struct dma_channel *chan) 104 { 105 free_irq(get_dmte_irq(chan->chan), chan); 106 } 107 108 static int 109 sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr) 110 { 111 if (!chcr) 112 chcr = RS_DUAL | CHCR_IE; 113 114 if (chcr & CHCR_IE) { 115 chcr &= ~CHCR_IE; 116 chan->flags |= DMA_TEI_CAPABLE; 117 } else { 118 chan->flags &= ~DMA_TEI_CAPABLE; 119 } 120 121 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 122 123 chan->flags |= DMA_CONFIGURED; 124 return 0; 125 } 126 127 static void sh_dmac_enable_dma(struct dma_channel *chan) 128 { 129 int irq; 130 u32 chcr; 131 132 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 133 chcr |= CHCR_DE; 134 135 if (chan->flags & DMA_TEI_CAPABLE) 136 chcr |= CHCR_IE; 137 138 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 139 140 if (chan->flags & DMA_TEI_CAPABLE) { 141 irq = get_dmte_irq(chan->chan); 142 enable_irq(irq); 143 } 144 } 145 146 static void sh_dmac_disable_dma(struct dma_channel *chan) 147 { 148 int irq; 149 u32 chcr; 150 151 if (chan->flags & DMA_TEI_CAPABLE) { 152 irq = get_dmte_irq(chan->chan); 153 disable_irq(irq); 154 } 155 156 chcr = __raw_readl(dma_base_addr[chan->chan] + CHCR); 157 chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); 158 __raw_writel(chcr, (dma_base_addr[chan->chan] + CHCR)); 159 } 160 161 static int sh_dmac_xfer_dma(struct dma_channel *chan) 162 { 163 /* 164 * If we haven't pre-configured the channel with special flags, use 165 * the defaults. 166 */ 167 if (unlikely(!(chan->flags & DMA_CONFIGURED))) 168 sh_dmac_configure_channel(chan, 0); 169 170 sh_dmac_disable_dma(chan); 171 172 /* 173 * Single-address mode usage note! 174 * 175 * It's important that we don't accidentally write any value to SAR/DAR 176 * (this includes 0) that hasn't been directly specified by the user if 177 * we're in single-address mode. 178 * 179 * In this case, only one address can be defined, anything else will 180 * result in a DMA address error interrupt (at least on the SH-4), 181 * which will subsequently halt the transfer. 182 * 183 * Channel 2 on the Dreamcast is a special case, as this is used for 184 * cascading to the PVR2 DMAC. In this case, we still need to write 185 * SAR and DAR, regardless of value, in order for cascading to work. 186 */ 187 if (chan->sar || (mach_is_dreamcast() && 188 chan->chan == PVR2_CASCADE_CHAN)) 189 __raw_writel(chan->sar, (dma_base_addr[chan->chan]+SAR)); 190 if (chan->dar || (mach_is_dreamcast() && 191 chan->chan == PVR2_CASCADE_CHAN)) 192 __raw_writel(chan->dar, (dma_base_addr[chan->chan] + DAR)); 193 194 __raw_writel(chan->count >> calc_xmit_shift(chan), 195 (dma_base_addr[chan->chan] + TCR)); 196 197 sh_dmac_enable_dma(chan); 198 199 return 0; 200 } 201 202 static int sh_dmac_get_dma_residue(struct dma_channel *chan) 203 { 204 if (!(__raw_readl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE)) 205 return 0; 206 207 return __raw_readl(dma_base_addr[chan->chan] + TCR) 208 << calc_xmit_shift(chan); 209 } 210 211 static inline int dmaor_reset(int no) 212 { 213 unsigned long dmaor = dmaor_read_reg(no); 214 215 /* Try to clear the error flags first, incase they are set */ 216 dmaor &= ~(DMAOR_NMIF | DMAOR_AE); 217 dmaor_write_reg(no, dmaor); 218 219 dmaor |= DMAOR_INIT; 220 dmaor_write_reg(no, dmaor); 221 222 /* See if we got an error again */ 223 if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) { 224 printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); 225 return -EINVAL; 226 } 227 228 return 0; 229 } 230 231 #if defined(CONFIG_CPU_SH4) 232 static irqreturn_t dma_err(int irq, void *dummy) 233 { 234 #if defined(CONFIG_SH_DMA_IRQ_MULTI) 235 int cnt = 0; 236 switch (irq) { 237 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 238 case DMTE6_IRQ: 239 cnt++; 240 #endif 241 case DMTE0_IRQ: 242 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { 243 disable_irq(irq); 244 /* DMA multi and error IRQ */ 245 return IRQ_HANDLED; 246 } 247 default: 248 return IRQ_NONE; 249 } 250 #else 251 dmaor_reset(0); 252 #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ 253 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 254 defined(CONFIG_CPU_SUBTYPE_SH7785) 255 dmaor_reset(1); 256 #endif 257 disable_irq(irq); 258 259 return IRQ_HANDLED; 260 #endif 261 } 262 #endif 263 264 static struct dma_ops sh_dmac_ops = { 265 .request = sh_dmac_request_dma, 266 .free = sh_dmac_free_dma, 267 .get_residue = sh_dmac_get_dma_residue, 268 .xfer = sh_dmac_xfer_dma, 269 .configure = sh_dmac_configure_channel, 270 }; 271 272 static struct dma_info sh_dmac_info = { 273 .name = "sh_dmac", 274 .nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS, 275 .ops = &sh_dmac_ops, 276 .flags = DMAC_CHANNELS_TEI_CAPABLE, 277 }; 278 279 #ifdef CONFIG_CPU_SH4 280 static unsigned int get_dma_error_irq(int n) 281 { 282 #if defined(CONFIG_SH_DMA_IRQ_MULTI) 283 return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6); 284 #else 285 return (n == 0) ? DMAE0_IRQ : 286 #if defined(DMAE1_IRQ) 287 DMAE1_IRQ; 288 #else 289 -1; 290 #endif 291 #endif 292 } 293 #endif 294 295 static int __init sh_dmac_init(void) 296 { 297 struct dma_info *info = &sh_dmac_info; 298 int i; 299 300 #ifdef CONFIG_CPU_SH4 301 int n; 302 303 for (n = 0; n < NR_DMAE; n++) { 304 i = request_irq(get_dma_error_irq(n), dma_err, 305 #if defined(CONFIG_SH_DMA_IRQ_MULTI) 306 IRQF_SHARED, 307 #else 308 IRQF_DISABLED, 309 #endif 310 dmae_name[n], (void *)dmae_name[n]); 311 if (unlikely(i < 0)) { 312 printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); 313 return i; 314 } 315 } 316 #endif /* CONFIG_CPU_SH4 */ 317 318 /* 319 * Initialize DMAOR, and clean up any error flags that may have 320 * been set. 321 */ 322 i = dmaor_reset(0); 323 if (unlikely(i != 0)) 324 return i; 325 #if defined(CONFIG_CPU_SUBTYPE_SH7723) || \ 326 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 327 defined(CONFIG_CPU_SUBTYPE_SH7785) 328 i = dmaor_reset(1); 329 if (unlikely(i != 0)) 330 return i; 331 #endif 332 333 return register_dmac(info); 334 } 335 336 static void __exit sh_dmac_exit(void) 337 { 338 #ifdef CONFIG_CPU_SH4 339 int n; 340 341 for (n = 0; n < NR_DMAE; n++) { 342 free_irq(get_dma_error_irq(n), (void *)dmae_name[n]); 343 } 344 #endif /* CONFIG_CPU_SH4 */ 345 unregister_dmac(&sh_dmac_info); 346 } 347 348 subsys_initcall(sh_dmac_init); 349 module_exit(sh_dmac_exit); 350 351 MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh"); 352 MODULE_DESCRIPTION("SuperH On-Chip DMAC Support"); 353 MODULE_LICENSE("GPL"); 354