xref: /openbmc/linux/arch/sh/drivers/dma/dma-sh.c (revision b627b4ed)
1 /*
2  * arch/sh/drivers/dma/dma-sh.c
3  *
4  * SuperH On-chip DMAC Support
5  *
6  * Copyright (C) 2000 Takashi YOSHII
7  * Copyright (C) 2003, 2004 Paul Mundt
8  * Copyright (C) 2005 Andriy Skulysh
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
17 #include <mach-dreamcast/mach/dma.h>
18 #include <asm/dma.h>
19 #include <asm/io.h>
20 #include <asm/dma-sh.h>
21 
22 #if defined(DMAE1_IRQ)
23 #define NR_DMAE		2
24 #else
25 #define NR_DMAE		1
26 #endif
27 
28 static const char *dmae_name[] = {
29 	"DMAC Address Error0", "DMAC Address Error1"
30 };
31 
32 static inline unsigned int get_dmte_irq(unsigned int chan)
33 {
34 	unsigned int irq = 0;
35 	if (chan < ARRAY_SIZE(dmte_irq_map))
36 		irq = dmte_irq_map[chan];
37 
38 #if defined(CONFIG_SH_DMA_IRQ_MULTI)
39 	if (irq > DMTE6_IRQ)
40 		return DMTE6_IRQ;
41 	return DMTE0_IRQ;
42 #else
43 	return irq;
44 #endif
45 }
46 
47 /*
48  * We determine the correct shift size based off of the CHCR transmit size
49  * for the given channel. Since we know that it will take:
50  *
51  *	info->count >> ts_shift[transmit_size]
52  *
53  * iterations to complete the transfer.
54  */
55 static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
56 {
57 	u32 chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
58 
59 	return ts_shift[(chcr & CHCR_TS_MASK)>>CHCR_TS_SHIFT];
60 }
61 
62 /*
63  * The transfer end interrupt must read the chcr register to end the
64  * hardware interrupt active condition.
65  * Besides that it needs to waken any waiting process, which should handle
66  * setting up the next transfer.
67  */
68 static irqreturn_t dma_tei(int irq, void *dev_id)
69 {
70 	struct dma_channel *chan = dev_id;
71 	u32 chcr;
72 
73 	chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
74 
75 	if (!(chcr & CHCR_TE))
76 		return IRQ_NONE;
77 
78 	chcr &= ~(CHCR_IE | CHCR_DE);
79 	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
80 
81 	wake_up(&chan->wait_queue);
82 
83 	return IRQ_HANDLED;
84 }
85 
86 static int sh_dmac_request_dma(struct dma_channel *chan)
87 {
88 	if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
89 		return 0;
90 
91 	return request_irq(get_dmte_irq(chan->chan), dma_tei,
92 #if defined(CONFIG_SH_DMA_IRQ_MULTI)
93 				IRQF_SHARED,
94 #else
95 				IRQF_DISABLED,
96 #endif
97 				chan->dev_id, chan);
98 }
99 
100 static void sh_dmac_free_dma(struct dma_channel *chan)
101 {
102 	free_irq(get_dmte_irq(chan->chan), chan);
103 }
104 
105 static int
106 sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
107 {
108 	if (!chcr)
109 		chcr = RS_DUAL | CHCR_IE;
110 
111 	if (chcr & CHCR_IE) {
112 		chcr &= ~CHCR_IE;
113 		chan->flags |= DMA_TEI_CAPABLE;
114 	} else {
115 		chan->flags &= ~DMA_TEI_CAPABLE;
116 	}
117 
118 	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
119 
120 	chan->flags |= DMA_CONFIGURED;
121 	return 0;
122 }
123 
124 static void sh_dmac_enable_dma(struct dma_channel *chan)
125 {
126 	int irq;
127 	u32 chcr;
128 
129 	chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
130 	chcr |= CHCR_DE;
131 
132 	if (chan->flags & DMA_TEI_CAPABLE)
133 		chcr |= CHCR_IE;
134 
135 	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
136 
137 	if (chan->flags & DMA_TEI_CAPABLE) {
138 		irq = get_dmte_irq(chan->chan);
139 		enable_irq(irq);
140 	}
141 }
142 
143 static void sh_dmac_disable_dma(struct dma_channel *chan)
144 {
145 	int irq;
146 	u32 chcr;
147 
148 	if (chan->flags & DMA_TEI_CAPABLE) {
149 		irq = get_dmte_irq(chan->chan);
150 		disable_irq(irq);
151 	}
152 
153 	chcr = ctrl_inl(dma_base_addr[chan->chan] + CHCR);
154 	chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
155 	ctrl_outl(chcr, (dma_base_addr[chan->chan] + CHCR));
156 }
157 
158 static int sh_dmac_xfer_dma(struct dma_channel *chan)
159 {
160 	/*
161 	 * If we haven't pre-configured the channel with special flags, use
162 	 * the defaults.
163 	 */
164 	if (unlikely(!(chan->flags & DMA_CONFIGURED)))
165 		sh_dmac_configure_channel(chan, 0);
166 
167 	sh_dmac_disable_dma(chan);
168 
169 	/*
170 	 * Single-address mode usage note!
171 	 *
172 	 * It's important that we don't accidentally write any value to SAR/DAR
173 	 * (this includes 0) that hasn't been directly specified by the user if
174 	 * we're in single-address mode.
175 	 *
176 	 * In this case, only one address can be defined, anything else will
177 	 * result in a DMA address error interrupt (at least on the SH-4),
178 	 * which will subsequently halt the transfer.
179 	 *
180 	 * Channel 2 on the Dreamcast is a special case, as this is used for
181 	 * cascading to the PVR2 DMAC. In this case, we still need to write
182 	 * SAR and DAR, regardless of value, in order for cascading to work.
183 	 */
184 	if (chan->sar || (mach_is_dreamcast() &&
185 			  chan->chan == PVR2_CASCADE_CHAN))
186 		ctrl_outl(chan->sar, (dma_base_addr[chan->chan]+SAR));
187 	if (chan->dar || (mach_is_dreamcast() &&
188 			  chan->chan == PVR2_CASCADE_CHAN))
189 		ctrl_outl(chan->dar, (dma_base_addr[chan->chan] + DAR));
190 
191 	ctrl_outl(chan->count >> calc_xmit_shift(chan),
192 		(dma_base_addr[chan->chan] + TCR));
193 
194 	sh_dmac_enable_dma(chan);
195 
196 	return 0;
197 }
198 
199 static int sh_dmac_get_dma_residue(struct dma_channel *chan)
200 {
201 	if (!(ctrl_inl(dma_base_addr[chan->chan] + CHCR) & CHCR_DE))
202 		return 0;
203 
204 	return ctrl_inl(dma_base_addr[chan->chan] + TCR)
205 		 << calc_xmit_shift(chan);
206 }
207 
208 static inline int dmaor_reset(int no)
209 {
210 	unsigned long dmaor = dmaor_read_reg(no);
211 
212 	/* Try to clear the error flags first, incase they are set */
213 	dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
214 	dmaor_write_reg(no, dmaor);
215 
216 	dmaor |= DMAOR_INIT;
217 	dmaor_write_reg(no, dmaor);
218 
219 	/* See if we got an error again */
220 	if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
221 		printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
222 		return -EINVAL;
223 	}
224 
225 	return 0;
226 }
227 
228 #if defined(CONFIG_CPU_SH4)
229 static irqreturn_t dma_err(int irq, void *dummy)
230 {
231 #if defined(CONFIG_SH_DMA_IRQ_MULTI)
232 	int cnt = 0;
233 	switch (irq) {
234 #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
235 	case DMTE6_IRQ:
236 		cnt++;
237 #endif
238 	case DMTE0_IRQ:
239 		if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
240 			disable_irq(irq);
241 			/* DMA multi and error IRQ */
242 			return IRQ_HANDLED;
243 		}
244 	default:
245 		return IRQ_NONE;
246 	}
247 #else
248 	dmaor_reset(0);
249 #if defined(CONFIG_CPU_SUBTYPE_SH7723)	|| \
250 		defined(CONFIG_CPU_SUBTYPE_SH7780)	|| \
251 		defined(CONFIG_CPU_SUBTYPE_SH7785)
252 	dmaor_reset(1);
253 #endif
254 	disable_irq(irq);
255 
256 	return IRQ_HANDLED;
257 #endif
258 }
259 #endif
260 
261 static struct dma_ops sh_dmac_ops = {
262 	.request	= sh_dmac_request_dma,
263 	.free		= sh_dmac_free_dma,
264 	.get_residue	= sh_dmac_get_dma_residue,
265 	.xfer		= sh_dmac_xfer_dma,
266 	.configure	= sh_dmac_configure_channel,
267 };
268 
269 static struct dma_info sh_dmac_info = {
270 	.name		= "sh_dmac",
271 	.nr_channels	= CONFIG_NR_ONCHIP_DMA_CHANNELS,
272 	.ops		= &sh_dmac_ops,
273 	.flags		= DMAC_CHANNELS_TEI_CAPABLE,
274 };
275 
276 #ifdef CONFIG_CPU_SH4
277 static unsigned int get_dma_error_irq(int n)
278 {
279 #if defined(CONFIG_SH_DMA_IRQ_MULTI)
280 	return (n == 0) ? get_dmte_irq(0) : get_dmte_irq(6);
281 #else
282 	return (n == 0) ? DMAE0_IRQ :
283 #if defined(DMAE1_IRQ)
284 				DMAE1_IRQ;
285 #else
286 				-1;
287 #endif
288 #endif
289 }
290 #endif
291 
292 static int __init sh_dmac_init(void)
293 {
294 	struct dma_info *info = &sh_dmac_info;
295 	int i;
296 
297 #ifdef CONFIG_CPU_SH4
298 	int n;
299 
300 	for (n = 0; n < NR_DMAE; n++) {
301 		i = request_irq(get_dma_error_irq(n), dma_err,
302 #if defined(CONFIG_SH_DMA_IRQ_MULTI)
303 				IRQF_SHARED,
304 #else
305 				IRQF_DISABLED,
306 #endif
307 				dmae_name[n], (void *)dmae_name[n]);
308 		if (unlikely(i < 0)) {
309 			printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
310 			return i;
311 		}
312 	}
313 #endif /* CONFIG_CPU_SH4 */
314 
315 	/*
316 	 * Initialize DMAOR, and clean up any error flags that may have
317 	 * been set.
318 	 */
319 	i = dmaor_reset(0);
320 	if (unlikely(i != 0))
321 		return i;
322 #if defined(CONFIG_CPU_SUBTYPE_SH7723)	|| \
323 		defined(CONFIG_CPU_SUBTYPE_SH7780)	|| \
324 		defined(CONFIG_CPU_SUBTYPE_SH7785)
325 	i = dmaor_reset(1);
326 	if (unlikely(i != 0))
327 		return i;
328 #endif
329 
330 	return register_dmac(info);
331 }
332 
333 static void __exit sh_dmac_exit(void)
334 {
335 #ifdef CONFIG_CPU_SH4
336 	int n;
337 
338 	for (n = 0; n < NR_DMAE; n++) {
339 		free_irq(get_dma_error_irq(n), (void *)dmae_name[n]);
340 	}
341 #endif /* CONFIG_CPU_SH4 */
342 	unregister_dmac(&sh_dmac_info);
343 }
344 
345 subsys_initcall(sh_dmac_init);
346 module_exit(sh_dmac_exit);
347 
348 MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
349 MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
350 MODULE_LICENSE("GPL");
351