1 /* 2 * 3 * BRIEF MODULE DESCRIPTION 4 * A DMA channel allocator for Au1x00. API is modeled loosely off of 5 * linux/kernel/dma.c. 6 * 7 * Copyright 2000, 2008 MontaVista Software Inc. 8 * Author: MontaVista Software, Inc. <source@mvista.com> 9 * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org) 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License as published by the 13 * Free Software Foundation; either version 2 of the License, or (at your 14 * option) any later version. 15 * 16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 22 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 23 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 * You should have received a copy of the GNU General Public License along 28 * with this program; if not, write to the Free Software Foundation, Inc., 29 * 675 Mass Ave, Cambridge, MA 02139, USA. 30 * 31 */ 32 #include <linux/module.h> 33 #include <linux/kernel.h> 34 #include <linux/errno.h> 35 #include <linux/spinlock.h> 36 #include <linux/interrupt.h> 37 38 #include <asm/mach-au1x00/au1000.h> 39 #include <asm/mach-au1x00/au1000_dma.h> 40 41 #if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1500) || \ 42 defined(CONFIG_SOC_AU1100) 43 /* 44 * A note on resource allocation: 45 * 46 * All drivers needing DMA channels, should allocate and release them 47 * through the public routines `request_dma()' and `free_dma()'. 48 * 49 * In order to avoid problems, all processes should allocate resources in 50 * the same sequence and release them in the reverse order. 51 * 52 * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ. 53 * When releasing them, first release the IRQ, then release the DMA. The 54 * main reason for this order is that, if you are requesting the DMA buffer 55 * done interrupt, you won't know the irq number until the DMA channel is 56 * returned from request_dma. 57 */ 58 59 DEFINE_SPINLOCK(au1000_dma_spin_lock); 60 61 struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = { 62 {.dev_id = -1,}, 63 {.dev_id = -1,}, 64 {.dev_id = -1,}, 65 {.dev_id = -1,}, 66 {.dev_id = -1,}, 67 {.dev_id = -1,}, 68 {.dev_id = -1,}, 69 {.dev_id = -1,} 70 }; 71 EXPORT_SYMBOL(au1000_dma_table); 72 73 /* Device FIFO addresses and default DMA modes */ 74 static const struct dma_dev { 75 unsigned int fifo_addr; 76 unsigned int dma_mode; 77 } dma_dev_table[DMA_NUM_DEV] = { 78 {UART0_ADDR + UART_TX, 0}, 79 {UART0_ADDR + UART_RX, 0}, 80 {0, 0}, 81 {0, 0}, 82 {AC97C_DATA, DMA_DW16 }, /* coherent */ 83 {AC97C_DATA, DMA_DR | DMA_DW16 }, /* coherent */ 84 {UART3_ADDR + UART_TX, DMA_DW8 | DMA_NC}, 85 {UART3_ADDR + UART_RX, DMA_DR | DMA_DW8 | DMA_NC}, 86 {USBD_EP0RD, DMA_DR | DMA_DW8 | DMA_NC}, 87 {USBD_EP0WR, DMA_DW8 | DMA_NC}, 88 {USBD_EP2WR, DMA_DW8 | DMA_NC}, 89 {USBD_EP3WR, DMA_DW8 | DMA_NC}, 90 {USBD_EP4RD, DMA_DR | DMA_DW8 | DMA_NC}, 91 {USBD_EP5RD, DMA_DR | DMA_DW8 | DMA_NC}, 92 {I2S_DATA, DMA_DW32 | DMA_NC}, 93 {I2S_DATA, DMA_DR | DMA_DW32 | DMA_NC} 94 }; 95 96 int au1000_dma_read_proc(char *buf, char **start, off_t fpos, 97 int length, int *eof, void *data) 98 { 99 int i, len = 0; 100 struct dma_chan *chan; 101 102 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) { 103 chan = get_dma_chan(i); 104 if (chan != NULL) 105 len += sprintf(buf + len, "%2d: %s\n", 106 i, chan->dev_str); 107 } 108 109 if (fpos >= len) { 110 *start = buf; 111 *eof = 1; 112 return 0; 113 } 114 *start = buf + fpos; 115 len -= fpos; 116 if (len > length) 117 return length; 118 *eof = 1; 119 return len; 120 } 121 122 /* Device FIFO addresses and default DMA modes - 2nd bank */ 123 static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = { 124 { SD0_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ 125 { SD0_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 }, /* coherent */ 126 { SD1_XMIT_FIFO, DMA_DS | DMA_DW8 }, /* coherent */ 127 { SD1_RECV_FIFO, DMA_DS | DMA_DR | DMA_DW8 } /* coherent */ 128 }; 129 130 void dump_au1000_dma_channel(unsigned int dmanr) 131 { 132 struct dma_chan *chan; 133 134 if (dmanr >= NUM_AU1000_DMA_CHANNELS) 135 return; 136 chan = &au1000_dma_table[dmanr]; 137 138 printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr); 139 printk(KERN_INFO " mode = 0x%08x\n", 140 au_readl(chan->io + DMA_MODE_SET)); 141 printk(KERN_INFO " addr = 0x%08x\n", 142 au_readl(chan->io + DMA_PERIPHERAL_ADDR)); 143 printk(KERN_INFO " start0 = 0x%08x\n", 144 au_readl(chan->io + DMA_BUFFER0_START)); 145 printk(KERN_INFO " start1 = 0x%08x\n", 146 au_readl(chan->io + DMA_BUFFER1_START)); 147 printk(KERN_INFO " count0 = 0x%08x\n", 148 au_readl(chan->io + DMA_BUFFER0_COUNT)); 149 printk(KERN_INFO " count1 = 0x%08x\n", 150 au_readl(chan->io + DMA_BUFFER1_COUNT)); 151 } 152 153 /* 154 * Finds a free channel, and binds the requested device to it. 155 * Returns the allocated channel number, or negative on error. 156 * Requests the DMA done IRQ if irqhandler != NULL. 157 */ 158 int request_au1000_dma(int dev_id, const char *dev_str, 159 irq_handler_t irqhandler, 160 unsigned long irqflags, 161 void *irq_dev_id) 162 { 163 struct dma_chan *chan; 164 const struct dma_dev *dev; 165 int i, ret; 166 167 #if defined(CONFIG_SOC_AU1100) 168 if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2)) 169 return -EINVAL; 170 #else 171 if (dev_id < 0 || dev_id >= DMA_NUM_DEV) 172 return -EINVAL; 173 #endif 174 175 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) 176 if (au1000_dma_table[i].dev_id < 0) 177 break; 178 179 if (i == NUM_AU1000_DMA_CHANNELS) 180 return -ENODEV; 181 182 chan = &au1000_dma_table[i]; 183 184 if (dev_id >= DMA_NUM_DEV) { 185 dev_id -= DMA_NUM_DEV; 186 dev = &dma_dev_table_bank2[dev_id]; 187 } else 188 dev = &dma_dev_table[dev_id]; 189 190 if (irqhandler) { 191 chan->irq = AU1000_DMA_INT_BASE + i; 192 chan->irq_dev = irq_dev_id; 193 ret = request_irq(chan->irq, irqhandler, irqflags, dev_str, 194 chan->irq_dev); 195 if (ret) { 196 chan->irq = 0; 197 chan->irq_dev = NULL; 198 return ret; 199 } 200 } else { 201 chan->irq = 0; 202 chan->irq_dev = NULL; 203 } 204 205 /* fill it in */ 206 chan->io = DMA_CHANNEL_BASE + i * DMA_CHANNEL_LEN; 207 chan->dev_id = dev_id; 208 chan->dev_str = dev_str; 209 chan->fifo_addr = dev->fifo_addr; 210 chan->mode = dev->dma_mode; 211 212 /* initialize the channel before returning */ 213 init_dma(i); 214 215 return i; 216 } 217 EXPORT_SYMBOL(request_au1000_dma); 218 219 void free_au1000_dma(unsigned int dmanr) 220 { 221 struct dma_chan *chan = get_dma_chan(dmanr); 222 223 if (!chan) { 224 printk(KERN_ERR "Error trying to free DMA%d\n", dmanr); 225 return; 226 } 227 228 disable_dma(dmanr); 229 if (chan->irq) 230 free_irq(chan->irq, chan->irq_dev); 231 232 chan->irq = 0; 233 chan->irq_dev = NULL; 234 chan->dev_id = -1; 235 } 236 EXPORT_SYMBOL(free_au1000_dma); 237 238 #endif /* AU1000 AU1500 AU1100 */ 239