xref: /openbmc/linux/sound/isa/gus/gus_dma.c (revision 275876e2)
1 /*
2  *  Routines for GF1 DMA control
3  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4  *
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of the GNU General Public License as published by
8  *   the Free Software Foundation; either version 2 of the License, or
9  *   (at your option) any later version.
10  *
11  *   This program is distributed in the hope that it will be useful,
12  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *   GNU General Public License for more details.
15  *
16  *   You should have received a copy of the GNU General Public License
17  *   along with this program; if not, write to the Free Software
18  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19  *
20  */
21 
22 #include <asm/dma.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include <sound/gus.h>
26 
27 static void snd_gf1_dma_ack(struct snd_gus_card * gus)
28 {
29 	unsigned long flags;
30 
31 	spin_lock_irqsave(&gus->reg_lock, flags);
32 	snd_gf1_write8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL, 0x00);
33 	snd_gf1_look8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL);
34 	spin_unlock_irqrestore(&gus->reg_lock, flags);
35 }
36 
37 static void snd_gf1_dma_program(struct snd_gus_card * gus,
38 				unsigned int addr,
39 				unsigned long buf_addr,
40 				unsigned int count,
41 				unsigned int cmd)
42 {
43 	unsigned long flags;
44 	unsigned int address;
45 	unsigned char dma_cmd;
46 	unsigned int address_high;
47 
48 	snd_printdd("dma_transfer: addr=0x%x, buf=0x%lx, count=0x%x\n",
49 		    addr, buf_addr, count);
50 
51 	if (gus->gf1.dma1 > 3) {
52 		if (gus->gf1.enh_mode) {
53 			address = addr >> 1;
54 		} else {
55 			if (addr & 0x1f) {
56 				snd_printd("snd_gf1_dma_transfer: unaligned address (0x%x)?\n", addr);
57 				return;
58 			}
59 			address = (addr & 0x000c0000) | ((addr & 0x0003ffff) >> 1);
60 		}
61 	} else {
62 		address = addr;
63 	}
64 
65 	dma_cmd = SNDRV_GF1_DMA_ENABLE | (unsigned short) cmd;
66 #if 0
67 	dma_cmd |= 0x08;
68 #endif
69 	if (dma_cmd & SNDRV_GF1_DMA_16BIT) {
70 		count++;
71 		count &= ~1;	/* align */
72 	}
73 	if (gus->gf1.dma1 > 3) {
74 		dma_cmd |= SNDRV_GF1_DMA_WIDTH16;
75 		count++;
76 		count &= ~1;	/* align */
77 	}
78 	snd_gf1_dma_ack(gus);
79 	snd_dma_program(gus->gf1.dma1, buf_addr, count, dma_cmd & SNDRV_GF1_DMA_READ ? DMA_MODE_READ : DMA_MODE_WRITE);
80 #if 0
81 	snd_printk(KERN_DEBUG "address = 0x%x, count = 0x%x, dma_cmd = 0x%x\n",
82 		   address << 1, count, dma_cmd);
83 #endif
84 	spin_lock_irqsave(&gus->reg_lock, flags);
85 	if (gus->gf1.enh_mode) {
86 		address_high = ((address >> 16) & 0x000000f0) | (address & 0x0000000f);
87 		snd_gf1_write16(gus, SNDRV_GF1_GW_DRAM_DMA_LOW, (unsigned short) (address >> 4));
88 		snd_gf1_write8(gus, SNDRV_GF1_GB_DRAM_DMA_HIGH, (unsigned char) address_high);
89 	} else
90 		snd_gf1_write16(gus, SNDRV_GF1_GW_DRAM_DMA_LOW, (unsigned short) (address >> 4));
91 	snd_gf1_write8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL, dma_cmd);
92 	spin_unlock_irqrestore(&gus->reg_lock, flags);
93 }
94 
95 static struct snd_gf1_dma_block *snd_gf1_dma_next_block(struct snd_gus_card * gus)
96 {
97 	struct snd_gf1_dma_block *block;
98 
99 	/* PCM block have bigger priority than synthesizer one */
100 	if (gus->gf1.dma_data_pcm) {
101 		block = gus->gf1.dma_data_pcm;
102 		if (gus->gf1.dma_data_pcm_last == block) {
103 			gus->gf1.dma_data_pcm =
104 			gus->gf1.dma_data_pcm_last = NULL;
105 		} else {
106 			gus->gf1.dma_data_pcm = block->next;
107 		}
108 	} else if (gus->gf1.dma_data_synth) {
109 		block = gus->gf1.dma_data_synth;
110 		if (gus->gf1.dma_data_synth_last == block) {
111 			gus->gf1.dma_data_synth =
112 			gus->gf1.dma_data_synth_last = NULL;
113 		} else {
114 			gus->gf1.dma_data_synth = block->next;
115 		}
116 	} else {
117 		block = NULL;
118 	}
119 	if (block) {
120 		gus->gf1.dma_ack = block->ack;
121 		gus->gf1.dma_private_data = block->private_data;
122 	}
123 	return block;
124 }
125 
126 
127 static void snd_gf1_dma_interrupt(struct snd_gus_card * gus)
128 {
129 	struct snd_gf1_dma_block *block;
130 
131 	snd_gf1_dma_ack(gus);
132 	if (gus->gf1.dma_ack)
133 		gus->gf1.dma_ack(gus, gus->gf1.dma_private_data);
134 	spin_lock(&gus->dma_lock);
135 	if (gus->gf1.dma_data_pcm == NULL &&
136 	    gus->gf1.dma_data_synth == NULL) {
137 	    	gus->gf1.dma_ack = NULL;
138 		gus->gf1.dma_flags &= ~SNDRV_GF1_DMA_TRIGGER;
139 		spin_unlock(&gus->dma_lock);
140 		return;
141 	}
142 	block = snd_gf1_dma_next_block(gus);
143 	spin_unlock(&gus->dma_lock);
144 	snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd);
145 	kfree(block);
146 #if 0
147 	snd_printd(KERN_DEBUG "program dma (IRQ) - "
148 		   "addr = 0x%x, buffer = 0x%lx, count = 0x%x, cmd = 0x%x\n",
149 		   block->addr, block->buf_addr, block->count, block->cmd);
150 #endif
151 }
152 
153 int snd_gf1_dma_init(struct snd_gus_card * gus)
154 {
155 	mutex_lock(&gus->dma_mutex);
156 	gus->gf1.dma_shared++;
157 	if (gus->gf1.dma_shared > 1) {
158 		mutex_unlock(&gus->dma_mutex);
159 		return 0;
160 	}
161 	gus->gf1.interrupt_handler_dma_write = snd_gf1_dma_interrupt;
162 	gus->gf1.dma_data_pcm =
163 	gus->gf1.dma_data_pcm_last =
164 	gus->gf1.dma_data_synth =
165 	gus->gf1.dma_data_synth_last = NULL;
166 	mutex_unlock(&gus->dma_mutex);
167 	return 0;
168 }
169 
170 int snd_gf1_dma_done(struct snd_gus_card * gus)
171 {
172 	struct snd_gf1_dma_block *block;
173 
174 	mutex_lock(&gus->dma_mutex);
175 	gus->gf1.dma_shared--;
176 	if (!gus->gf1.dma_shared) {
177 		snd_dma_disable(gus->gf1.dma1);
178 		snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_DMA_WRITE);
179 		snd_gf1_dma_ack(gus);
180 		while ((block = gus->gf1.dma_data_pcm)) {
181 			gus->gf1.dma_data_pcm = block->next;
182 			kfree(block);
183 		}
184 		while ((block = gus->gf1.dma_data_synth)) {
185 			gus->gf1.dma_data_synth = block->next;
186 			kfree(block);
187 		}
188 		gus->gf1.dma_data_pcm_last =
189 		gus->gf1.dma_data_synth_last = NULL;
190 	}
191 	mutex_unlock(&gus->dma_mutex);
192 	return 0;
193 }
194 
195 int snd_gf1_dma_transfer_block(struct snd_gus_card * gus,
196 			       struct snd_gf1_dma_block * __block,
197 			       int atomic,
198 			       int synth)
199 {
200 	unsigned long flags;
201 	struct snd_gf1_dma_block *block;
202 
203 	block = kmalloc(sizeof(*block), atomic ? GFP_ATOMIC : GFP_KERNEL);
204 	if (block == NULL) {
205 		snd_printk(KERN_ERR "gf1: DMA transfer failure; not enough memory\n");
206 		return -ENOMEM;
207 	}
208 	*block = *__block;
209 	block->next = NULL;
210 
211 	snd_printdd("addr = 0x%x, buffer = 0x%lx, count = 0x%x, cmd = 0x%x\n",
212 		    block->addr, (long) block->buffer, block->count,
213 		    block->cmd);
214 
215 	snd_printdd("gus->gf1.dma_data_pcm_last = 0x%lx\n",
216 		    (long)gus->gf1.dma_data_pcm_last);
217 	snd_printdd("gus->gf1.dma_data_pcm = 0x%lx\n",
218 		    (long)gus->gf1.dma_data_pcm);
219 
220 	spin_lock_irqsave(&gus->dma_lock, flags);
221 	if (synth) {
222 		if (gus->gf1.dma_data_synth_last) {
223 			gus->gf1.dma_data_synth_last->next = block;
224 			gus->gf1.dma_data_synth_last = block;
225 		} else {
226 			gus->gf1.dma_data_synth =
227 			gus->gf1.dma_data_synth_last = block;
228 		}
229 	} else {
230 		if (gus->gf1.dma_data_pcm_last) {
231 			gus->gf1.dma_data_pcm_last->next = block;
232 			gus->gf1.dma_data_pcm_last = block;
233 		} else {
234 			gus->gf1.dma_data_pcm =
235 			gus->gf1.dma_data_pcm_last = block;
236 		}
237 	}
238 	if (!(gus->gf1.dma_flags & SNDRV_GF1_DMA_TRIGGER)) {
239 		gus->gf1.dma_flags |= SNDRV_GF1_DMA_TRIGGER;
240 		block = snd_gf1_dma_next_block(gus);
241 		spin_unlock_irqrestore(&gus->dma_lock, flags);
242 		if (block == NULL)
243 			return 0;
244 		snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd);
245 		kfree(block);
246 		return 0;
247 	}
248 	spin_unlock_irqrestore(&gus->dma_lock, flags);
249 	return 0;
250 }
251