xref: /openbmc/linux/arch/mips/alchemy/common/dma.c (revision 55fd7e02)
1 /*
2  *
3  * BRIEF MODULE DESCRIPTION
4  *      A DMA channel allocator for Au1x00. API is modeled loosely off of
5  *      linux/kernel/dma.c.
6  *
7  * Copyright 2000, 2008 MontaVista Software Inc.
8  * Author: MontaVista Software, Inc. <source@mvista.com>
9  * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
10  *
11  *  This program is free software; you can redistribute  it and/or modify it
12  *  under  the terms of  the GNU General  Public License as published by the
13  *  Free Software Foundation;  either version 2 of the  License, or (at your
14  *  option) any later version.
15  *
16  *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
17  *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
18  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
19  *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
20  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
22  *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23  *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
24  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  *  You should have received a copy of the  GNU General Public License along
28  *  with this program; if not, write  to the Free Software Foundation, Inc.,
29  *  675 Mass Ave, Cambridge, MA 02139, USA.
30  *
31  */
32 
33 #include <linux/init.h>
34 #include <linux/export.h>
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/spinlock.h>
38 #include <linux/interrupt.h>
39 
40 #include <asm/mach-au1x00/au1000.h>
41 #include <asm/mach-au1x00/au1000_dma.h>
42 
43 /*
44  * A note on resource allocation:
45  *
46  * All drivers needing DMA channels, should allocate and release them
47  * through the public routines `request_dma()' and `free_dma()'.
48  *
49  * In order to avoid problems, all processes should allocate resources in
50  * the same sequence and release them in the reverse order.
51  *
52  * So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
53  * When releasing them, first release the IRQ, then release the DMA. The
54  * main reason for this order is that, if you are requesting the DMA buffer
55  * done interrupt, you won't know the irq number until the DMA channel is
56  * returned from request_dma.
57  */
58 
59 /* DMA Channel register block spacing */
60 #define DMA_CHANNEL_LEN		0x00000100
61 
62 DEFINE_SPINLOCK(au1000_dma_spin_lock);
63 
64 struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
65       {.dev_id = -1,},
66       {.dev_id = -1,},
67       {.dev_id = -1,},
68       {.dev_id = -1,},
69       {.dev_id = -1,},
70       {.dev_id = -1,},
71       {.dev_id = -1,},
72       {.dev_id = -1,}
73 };
74 EXPORT_SYMBOL(au1000_dma_table);
75 
76 /* Device FIFO addresses and default DMA modes */
77 static const struct dma_dev {
78 	unsigned int fifo_addr;
79 	unsigned int dma_mode;
80 } dma_dev_table[DMA_NUM_DEV] = {
81 	{ AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 },		/* UART0_TX */
82 	{ AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR },	/* UART0_RX */
83 	{ 0, 0 },	/* DMA_REQ0 */
84 	{ 0, 0 },	/* DMA_REQ1 */
85 	{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 },		/* AC97 TX c */
86 	{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR },	/* AC97 RX c */
87 	{ AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC },	/* UART3_TX */
88 	{ AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
89 	{ AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
90 	{ AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
91 	{ AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
92 	{ AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
93 	{ AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
94 	{ AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
95 	/* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
96 	{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC},	/* I2S TX */
97 	{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
98 };
99 
100 int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
101 			 int length, int *eof, void *data)
102 {
103 	int i, len = 0;
104 	struct dma_chan *chan;
105 
106 	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
107 		chan = get_dma_chan(i);
108 		if (chan != NULL)
109 			len += sprintf(buf + len, "%2d: %s\n",
110 				       i, chan->dev_str);
111 	}
112 
113 	if (fpos >= len) {
114 		*start = buf;
115 		*eof = 1;
116 		return 0;
117 	}
118 	*start = buf + fpos;
119 	len -= fpos;
120 	if (len > length)
121 		return length;
122 	*eof = 1;
123 	return len;
124 }
125 
126 /* Device FIFO addresses and default DMA modes - 2nd bank */
127 static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
128 	{ AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 },		/* coherent */
129 	{ AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR },	/* coherent */
130 	{ AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 },		/* coherent */
131 	{ AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }	/* coherent */
132 };
133 
134 void dump_au1000_dma_channel(unsigned int dmanr)
135 {
136 	struct dma_chan *chan;
137 
138 	if (dmanr >= NUM_AU1000_DMA_CHANNELS)
139 		return;
140 	chan = &au1000_dma_table[dmanr];
141 
142 	printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
143 	printk(KERN_INFO "  mode = 0x%08x\n",
144 	       __raw_readl(chan->io + DMA_MODE_SET));
145 	printk(KERN_INFO "  addr = 0x%08x\n",
146 	       __raw_readl(chan->io + DMA_PERIPHERAL_ADDR));
147 	printk(KERN_INFO "  start0 = 0x%08x\n",
148 	       __raw_readl(chan->io + DMA_BUFFER0_START));
149 	printk(KERN_INFO "  start1 = 0x%08x\n",
150 	       __raw_readl(chan->io + DMA_BUFFER1_START));
151 	printk(KERN_INFO "  count0 = 0x%08x\n",
152 	       __raw_readl(chan->io + DMA_BUFFER0_COUNT));
153 	printk(KERN_INFO "  count1 = 0x%08x\n",
154 	       __raw_readl(chan->io + DMA_BUFFER1_COUNT));
155 }
156 
157 /*
158  * Finds a free channel, and binds the requested device to it.
159  * Returns the allocated channel number, or negative on error.
160  * Requests the DMA done IRQ if irqhandler != NULL.
161  */
162 int request_au1000_dma(int dev_id, const char *dev_str,
163 		       irq_handler_t irqhandler,
164 		       unsigned long irqflags,
165 		       void *irq_dev_id)
166 {
167 	struct dma_chan *chan;
168 	const struct dma_dev *dev;
169 	int i, ret;
170 
171 	if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) {
172 		if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
173 			return -EINVAL;
174 	} else {
175 		if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
176 			return -EINVAL;
177 	}
178 
179 	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
180 		if (au1000_dma_table[i].dev_id < 0)
181 			break;
182 
183 	if (i == NUM_AU1000_DMA_CHANNELS)
184 		return -ENODEV;
185 
186 	chan = &au1000_dma_table[i];
187 
188 	if (dev_id >= DMA_NUM_DEV) {
189 		dev_id -= DMA_NUM_DEV;
190 		dev = &dma_dev_table_bank2[dev_id];
191 	} else
192 		dev = &dma_dev_table[dev_id];
193 
194 	if (irqhandler) {
195 		chan->irq_dev = irq_dev_id;
196 		ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
197 				  chan->irq_dev);
198 		if (ret) {
199 			chan->irq_dev = NULL;
200 			return ret;
201 		}
202 	} else {
203 		chan->irq_dev = NULL;
204 	}
205 
206 	/* fill it in */
207 	chan->io = (void __iomem *)(KSEG1ADDR(AU1000_DMA_PHYS_ADDR) +
208 			i * DMA_CHANNEL_LEN);
209 	chan->dev_id = dev_id;
210 	chan->dev_str = dev_str;
211 	chan->fifo_addr = dev->fifo_addr;
212 	chan->mode = dev->dma_mode;
213 
214 	/* initialize the channel before returning */
215 	init_dma(i);
216 
217 	return i;
218 }
219 EXPORT_SYMBOL(request_au1000_dma);
220 
221 void free_au1000_dma(unsigned int dmanr)
222 {
223 	struct dma_chan *chan = get_dma_chan(dmanr);
224 
225 	if (!chan) {
226 		printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
227 		return;
228 	}
229 
230 	disable_dma(dmanr);
231 	if (chan->irq_dev)
232 		free_irq(chan->irq, chan->irq_dev);
233 
234 	chan->irq_dev = NULL;
235 	chan->dev_id = -1;
236 }
237 EXPORT_SYMBOL(free_au1000_dma);
238 
239 static int __init au1000_dma_init(void)
240 {
241 	int base, i;
242 
243 	switch (alchemy_get_cputype()) {
244 	case ALCHEMY_CPU_AU1000:
245 		base = AU1000_DMA_INT_BASE;
246 		break;
247 	case ALCHEMY_CPU_AU1500:
248 		base = AU1500_DMA_INT_BASE;
249 		break;
250 	case ALCHEMY_CPU_AU1100:
251 		base = AU1100_DMA_INT_BASE;
252 		break;
253 	default:
254 		goto out;
255 	}
256 
257 	for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
258 		au1000_dma_table[i].irq = base + i;
259 
260 	printk(KERN_INFO "Alchemy DMA initialized\n");
261 
262 out:
263 	return 0;
264 }
265 arch_initcall(au1000_dma_init);
266