xref: /openbmc/linux/sound/soc/samsung/idma.c (revision 9c1f8594)
1 /*
2  * sound/soc/samsung/idma.c
3  *
4  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5  *		http://www.samsung.com
6  *
7  * I2S0's Internal DMA driver
8  *
9  * This program is free software; you can redistribute  it and/or modify it
10  * under  the terms of  the GNU General  Public License as published by the
11  * Free Software Foundation;  either version 2 of the  License, or (at your
12  * option) any later version.
13  */
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/slab.h>
18 #include <sound/pcm.h>
19 #include <sound/pcm_params.h>
20 #include <sound/soc.h>
21 
22 #include "i2s.h"
23 #include "idma.h"
24 #include "dma.h"
25 #include "i2s-regs.h"
26 
27 #define ST_RUNNING		(1<<0)
28 #define ST_OPENED		(1<<1)
29 
30 static const struct snd_pcm_hardware idma_hardware = {
31 	.info = SNDRV_PCM_INFO_INTERLEAVED |
32 		    SNDRV_PCM_INFO_BLOCK_TRANSFER |
33 		    SNDRV_PCM_INFO_MMAP |
34 		    SNDRV_PCM_INFO_MMAP_VALID |
35 		    SNDRV_PCM_INFO_PAUSE |
36 		    SNDRV_PCM_INFO_RESUME,
37 	.formats = SNDRV_PCM_FMTBIT_S16_LE |
38 		    SNDRV_PCM_FMTBIT_U16_LE |
39 		    SNDRV_PCM_FMTBIT_S24_LE |
40 		    SNDRV_PCM_FMTBIT_U24_LE |
41 		    SNDRV_PCM_FMTBIT_U8 |
42 		    SNDRV_PCM_FMTBIT_S8,
43 	.channels_min = 2,
44 	.channels_max = 2,
45 	.buffer_bytes_max = MAX_IDMA_BUFFER,
46 	.period_bytes_min = 128,
47 	.period_bytes_max = MAX_IDMA_PERIOD,
48 	.periods_min = 1,
49 	.periods_max = 2,
50 };
51 
52 struct idma_ctrl {
53 	spinlock_t	lock;
54 	int		state;
55 	dma_addr_t	start;
56 	dma_addr_t	pos;
57 	dma_addr_t	end;
58 	dma_addr_t	period;
59 	dma_addr_t	periodsz;
60 	void		*token;
61 	void		(*cb)(void *dt, int bytes_xfer);
62 };
63 
64 static struct idma_info {
65 	spinlock_t	lock;
66 	void		 __iomem  *regs;
67 	dma_addr_t	lp_tx_addr;
68 } idma;
69 
70 static void idma_getpos(dma_addr_t *src)
71 {
72 	*src = idma.lp_tx_addr +
73 		(readl(idma.regs + I2STRNCNT) & 0xffffff) * 4;
74 }
75 
76 static int idma_enqueue(struct snd_pcm_substream *substream)
77 {
78 	struct snd_pcm_runtime *runtime = substream->runtime;
79 	struct idma_ctrl *prtd = substream->runtime->private_data;
80 	u32 val;
81 
82 	spin_lock(&prtd->lock);
83 	prtd->token = (void *) substream;
84 	spin_unlock(&prtd->lock);
85 
86 	/* Internal DMA Level0 Interrupt Address */
87 	val = idma.lp_tx_addr + prtd->periodsz;
88 	writel(val, idma.regs + I2SLVL0ADDR);
89 
90 	/* Start address0 of I2S internal DMA operation. */
91 	val = idma.lp_tx_addr;
92 	writel(val, idma.regs + I2SSTR0);
93 
94 	/*
95 	 * Transfer block size for I2S internal DMA.
96 	 * Should decide transfer size before start dma operation
97 	 */
98 	val = readl(idma.regs + I2SSIZE);
99 	val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT);
100 	val |= (((runtime->dma_bytes >> 2) &
101 			I2SSIZE_TRNMSK) << I2SSIZE_SHIFT);
102 	writel(val, idma.regs + I2SSIZE);
103 
104 	val = readl(idma.regs + I2SAHB);
105 	val |= AHB_INTENLVL0;
106 	writel(val, idma.regs + I2SAHB);
107 
108 	return 0;
109 }
110 
111 static void idma_setcallbk(struct snd_pcm_substream *substream,
112 				void (*cb)(void *, int))
113 {
114 	struct idma_ctrl *prtd = substream->runtime->private_data;
115 
116 	spin_lock(&prtd->lock);
117 	prtd->cb = cb;
118 	spin_unlock(&prtd->lock);
119 }
120 
121 static void idma_control(int op)
122 {
123 	u32 val = readl(idma.regs + I2SAHB);
124 
125 	spin_lock(&idma.lock);
126 
127 	switch (op) {
128 	case LPAM_DMA_START:
129 		val |= (AHB_INTENLVL0 | AHB_DMAEN);
130 		break;
131 	case LPAM_DMA_STOP:
132 		val &= ~(AHB_INTENLVL0 | AHB_DMAEN);
133 		break;
134 	default:
135 		spin_unlock(&idma.lock);
136 		return;
137 	}
138 
139 	writel(val, idma.regs + I2SAHB);
140 	spin_unlock(&idma.lock);
141 }
142 
143 static void idma_done(void *id, int bytes_xfer)
144 {
145 	struct snd_pcm_substream *substream = id;
146 	struct idma_ctrl *prtd = substream->runtime->private_data;
147 
148 	if (prtd && (prtd->state & ST_RUNNING))
149 		snd_pcm_period_elapsed(substream);
150 }
151 
152 static int idma_hw_params(struct snd_pcm_substream *substream,
153 				struct snd_pcm_hw_params *params)
154 {
155 	struct snd_pcm_runtime *runtime = substream->runtime;
156 	struct idma_ctrl *prtd = substream->runtime->private_data;
157 	u32 mod = readl(idma.regs + I2SMOD);
158 	u32 ahb = readl(idma.regs + I2SAHB);
159 
160 	ahb |= (AHB_DMARLD | AHB_INTMASK);
161 	mod |= MOD_TXS_IDMA;
162 	writel(ahb, idma.regs + I2SAHB);
163 	writel(mod, idma.regs + I2SMOD);
164 
165 	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
166 	runtime->dma_bytes = params_buffer_bytes(params);
167 
168 	prtd->start = prtd->pos = runtime->dma_addr;
169 	prtd->period = params_periods(params);
170 	prtd->periodsz = params_period_bytes(params);
171 	prtd->end = runtime->dma_addr + runtime->dma_bytes;
172 
173 	idma_setcallbk(substream, idma_done);
174 
175 	return 0;
176 }
177 
178 static int idma_hw_free(struct snd_pcm_substream *substream)
179 {
180 	snd_pcm_set_runtime_buffer(substream, NULL);
181 
182 	return 0;
183 }
184 
185 static int idma_prepare(struct snd_pcm_substream *substream)
186 {
187 	struct idma_ctrl *prtd = substream->runtime->private_data;
188 
189 	prtd->pos = prtd->start;
190 
191 	/* flush the DMA channel */
192 	idma_control(LPAM_DMA_STOP);
193 	idma_enqueue(substream);
194 
195 	return 0;
196 }
197 
198 static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
199 {
200 	struct idma_ctrl *prtd = substream->runtime->private_data;
201 	int ret = 0;
202 
203 	spin_lock(&prtd->lock);
204 
205 	switch (cmd) {
206 	case SNDRV_PCM_TRIGGER_RESUME:
207 	case SNDRV_PCM_TRIGGER_START:
208 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
209 		prtd->state |= ST_RUNNING;
210 		idma_control(LPAM_DMA_START);
211 		break;
212 
213 	case SNDRV_PCM_TRIGGER_SUSPEND:
214 	case SNDRV_PCM_TRIGGER_STOP:
215 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
216 		prtd->state &= ~ST_RUNNING;
217 		idma_control(LPAM_DMA_STOP);
218 		break;
219 
220 	default:
221 		ret = -EINVAL;
222 		break;
223 	}
224 
225 	spin_unlock(&prtd->lock);
226 
227 	return ret;
228 }
229 
230 static snd_pcm_uframes_t
231 	idma_pointer(struct snd_pcm_substream *substream)
232 {
233 	struct snd_pcm_runtime *runtime = substream->runtime;
234 	struct idma_ctrl *prtd = runtime->private_data;
235 	dma_addr_t src;
236 	unsigned long res;
237 
238 	spin_lock(&prtd->lock);
239 
240 	idma_getpos(&src);
241 	res = src - prtd->start;
242 
243 	spin_unlock(&prtd->lock);
244 
245 	return bytes_to_frames(substream->runtime, res);
246 }
247 
248 static int idma_mmap(struct snd_pcm_substream *substream,
249 	struct vm_area_struct *vma)
250 {
251 	struct snd_pcm_runtime *runtime = substream->runtime;
252 	unsigned long size, offset;
253 	int ret;
254 
255 	/* From snd_pcm_lib_mmap_iomem */
256 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
257 	vma->vm_flags |= VM_IO;
258 	size = vma->vm_end - vma->vm_start;
259 	offset = vma->vm_pgoff << PAGE_SHIFT;
260 	ret = io_remap_pfn_range(vma, vma->vm_start,
261 			(runtime->dma_addr + offset) >> PAGE_SHIFT,
262 			size, vma->vm_page_prot);
263 
264 	return ret;
265 }
266 
267 static irqreturn_t iis_irq(int irqno, void *dev_id)
268 {
269 	struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id;
270 	u32 iiscon, iisahb, val, addr;
271 
272 	iisahb  = readl(idma.regs + I2SAHB);
273 	iiscon  = readl(idma.regs + I2SCON);
274 
275 	val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0;
276 
277 	if (val) {
278 		iisahb |= val;
279 		writel(iisahb, idma.regs + I2SAHB);
280 
281 		addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr;
282 		addr += prtd->periodsz;
283 		addr %= (prtd->end - prtd->start);
284 		addr += idma.lp_tx_addr;
285 
286 		writel(addr, idma.regs + I2SLVL0ADDR);
287 
288 		if (prtd->cb)
289 			prtd->cb(prtd->token, prtd->period);
290 	}
291 
292 	return IRQ_HANDLED;
293 }
294 
295 static int idma_open(struct snd_pcm_substream *substream)
296 {
297 	struct snd_pcm_runtime *runtime = substream->runtime;
298 	struct idma_ctrl *prtd;
299 	int ret;
300 
301 	snd_soc_set_runtime_hwparams(substream, &idma_hardware);
302 
303 	prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL);
304 	if (prtd == NULL)
305 		return -ENOMEM;
306 
307 	ret = request_irq(IRQ_I2S0, iis_irq, 0, "i2s", prtd);
308 	if (ret < 0) {
309 		pr_err("fail to claim i2s irq , ret = %d\n", ret);
310 		kfree(prtd);
311 		return ret;
312 	}
313 
314 	spin_lock_init(&prtd->lock);
315 
316 	runtime->private_data = prtd;
317 
318 	return 0;
319 }
320 
321 static int idma_close(struct snd_pcm_substream *substream)
322 {
323 	struct snd_pcm_runtime *runtime = substream->runtime;
324 	struct idma_ctrl *prtd = runtime->private_data;
325 
326 	free_irq(IRQ_I2S0, prtd);
327 
328 	if (!prtd)
329 		pr_err("idma_close called with prtd == NULL\n");
330 
331 	kfree(prtd);
332 
333 	return 0;
334 }
335 
336 static struct snd_pcm_ops idma_ops = {
337 	.open		= idma_open,
338 	.close		= idma_close,
339 	.ioctl		= snd_pcm_lib_ioctl,
340 	.trigger	= idma_trigger,
341 	.pointer	= idma_pointer,
342 	.mmap		= idma_mmap,
343 	.hw_params	= idma_hw_params,
344 	.hw_free	= idma_hw_free,
345 	.prepare	= idma_prepare,
346 };
347 
348 static void idma_free(struct snd_pcm *pcm)
349 {
350 	struct snd_pcm_substream *substream;
351 	struct snd_dma_buffer *buf;
352 
353 	substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
354 	if (!substream)
355 		return;
356 
357 	buf = &substream->dma_buffer;
358 	if (!buf->area)
359 		return;
360 
361 	iounmap(buf->area);
362 
363 	buf->area = NULL;
364 	buf->addr = 0;
365 }
366 
367 static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
368 {
369 	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
370 	struct snd_dma_buffer *buf = &substream->dma_buffer;
371 
372 	buf->dev.dev = pcm->card->dev;
373 	buf->private_data = NULL;
374 
375 	/* Assign PCM buffer pointers */
376 	buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS;
377 	buf->addr = idma.lp_tx_addr;
378 	buf->bytes = idma_hardware.buffer_bytes_max;
379 	buf->area = (unsigned char *)ioremap(buf->addr, buf->bytes);
380 
381 	return 0;
382 }
383 
384 static u64 idma_mask = DMA_BIT_MASK(32);
385 
386 static int idma_new(struct snd_soc_pcm_runtime *rtd)
387 {
388 	struct snd_card *card = rtd->card->snd_card;
389 	struct snd_soc_dai *dai = rtd->cpu_dai;
390 	struct snd_pcm *pcm = rtd->pcm;
391 	int ret = 0;
392 
393 	if (!card->dev->dma_mask)
394 		card->dev->dma_mask = &idma_mask;
395 	if (!card->dev->coherent_dma_mask)
396 		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
397 
398 	if (dai->driver->playback.channels_min)
399 		ret = preallocate_idma_buffer(pcm,
400 				SNDRV_PCM_STREAM_PLAYBACK);
401 
402 	return ret;
403 }
404 
405 void idma_reg_addr_init(void *regs, dma_addr_t addr)
406 {
407 	spin_lock_init(&idma.lock);
408 	idma.regs = regs;
409 	idma.lp_tx_addr = addr;
410 }
411 
412 struct snd_soc_platform_driver asoc_idma_platform = {
413 	.ops = &idma_ops,
414 	.pcm_new = idma_new,
415 	.pcm_free = idma_free,
416 };
417 
418 static int __devinit asoc_idma_platform_probe(struct platform_device *pdev)
419 {
420 	return snd_soc_register_platform(&pdev->dev, &asoc_idma_platform);
421 }
422 
423 static int __devexit asoc_idma_platform_remove(struct platform_device *pdev)
424 {
425 	snd_soc_unregister_platform(&pdev->dev);
426 	return 0;
427 }
428 
429 static struct platform_driver asoc_idma_driver = {
430 	.driver = {
431 		.name = "samsung-idma",
432 		.owner = THIS_MODULE,
433 	},
434 
435 	.probe = asoc_idma_platform_probe,
436 	.remove = __devexit_p(asoc_idma_platform_remove),
437 };
438 
439 static int __init asoc_idma_init(void)
440 {
441 	return platform_driver_register(&asoc_idma_driver);
442 }
443 module_init(asoc_idma_init);
444 
445 static void __exit asoc_idma_exit(void)
446 {
447 	platform_driver_unregister(&asoc_idma_driver);
448 }
449 module_exit(asoc_idma_exit);
450 
451 MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
452 MODULE_DESCRIPTION("Samsung ASoC IDMA Driver");
453 MODULE_LICENSE("GPL");
454