xref: /openbmc/linux/sound/soc/uniphier/aio-dma.c (revision addee42a)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Socionext UniPhier AIO DMA driver.
4 //
5 // Copyright (c) 2016-2018 Socionext Inc.
6 //
7 // This program is free software; you can redistribute it and/or
8 // modify it under the terms of the GNU General Public License
9 // as published by the Free Software Foundation; version 2
10 // of the License.
11 //
12 // This program is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 // GNU General Public License for more details.
16 //
17 // You should have received a copy of the GNU General Public License
18 // along with this program; if not, see <http://www.gnu.org/licenses/>.
19 
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <sound/core.h>
25 #include <sound/pcm.h>
26 #include <sound/soc.h>
27 
28 #include "aio.h"
29 
30 static struct snd_pcm_hardware uniphier_aiodma_hw = {
31 	.info = SNDRV_PCM_INFO_MMAP |
32 		SNDRV_PCM_INFO_MMAP_VALID |
33 		SNDRV_PCM_INFO_INTERLEAVED,
34 	.period_bytes_min = 256,
35 	.period_bytes_max = 4096,
36 	.periods_min      = 4,
37 	.periods_max      = 1024,
38 	.buffer_bytes_max = 128 * 1024,
39 };
40 
41 static void aiodma_pcm_irq(struct uniphier_aio_sub *sub)
42 {
43 	struct snd_pcm_runtime *runtime = sub->substream->runtime;
44 	int bytes = runtime->period_size *
45 		runtime->channels * samples_to_bytes(runtime, 1);
46 	int ret;
47 
48 	spin_lock(&sub->lock);
49 	ret = aiodma_rb_set_threshold(sub, runtime->dma_bytes,
50 				      sub->threshold + bytes);
51 	if (!ret)
52 		sub->threshold += bytes;
53 
54 	aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
55 	aiodma_rb_clear_irq(sub);
56 	spin_unlock(&sub->lock);
57 
58 	snd_pcm_period_elapsed(sub->substream);
59 }
60 
61 static void aiodma_compr_irq(struct uniphier_aio_sub *sub)
62 {
63 	struct snd_compr_runtime *runtime = sub->cstream->runtime;
64 	int bytes = runtime->fragment_size;
65 	int ret;
66 
67 	spin_lock(&sub->lock);
68 	ret = aiodma_rb_set_threshold(sub, sub->compr_bytes,
69 				      sub->threshold + bytes);
70 	if (!ret)
71 		sub->threshold += bytes;
72 
73 	aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
74 	aiodma_rb_clear_irq(sub);
75 	spin_unlock(&sub->lock);
76 
77 	snd_compr_fragment_elapsed(sub->cstream);
78 }
79 
80 static irqreturn_t aiodma_irq(int irq, void *p)
81 {
82 	struct platform_device *pdev = p;
83 	struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
84 	irqreturn_t ret = IRQ_NONE;
85 	int i, j;
86 
87 	for (i = 0; i < chip->num_aios; i++) {
88 		struct uniphier_aio *aio = &chip->aios[i];
89 
90 		for (j = 0; j < ARRAY_SIZE(aio->sub); j++) {
91 			struct uniphier_aio_sub *sub = &aio->sub[j];
92 
93 			/* Skip channel that does not trigger */
94 			if (!sub->running || !aiodma_rb_is_irq(sub))
95 				continue;
96 
97 			if (sub->substream)
98 				aiodma_pcm_irq(sub);
99 			if (sub->cstream)
100 				aiodma_compr_irq(sub);
101 
102 			ret = IRQ_HANDLED;
103 		}
104 	}
105 
106 	return ret;
107 }
108 
109 static int uniphier_aiodma_open(struct snd_pcm_substream *substream)
110 {
111 	struct snd_pcm_runtime *runtime = substream->runtime;
112 
113 	snd_soc_set_runtime_hwparams(substream, &uniphier_aiodma_hw);
114 
115 	return snd_pcm_hw_constraint_step(runtime, 0,
116 		SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256);
117 }
118 
119 static int uniphier_aiodma_hw_params(struct snd_pcm_substream *substream,
120 				     struct snd_pcm_hw_params *params)
121 {
122 	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
123 	substream->runtime->dma_bytes = params_buffer_bytes(params);
124 
125 	return 0;
126 }
127 
128 static int uniphier_aiodma_hw_free(struct snd_pcm_substream *substream)
129 {
130 	snd_pcm_set_runtime_buffer(substream, NULL);
131 	substream->runtime->dma_bytes = 0;
132 
133 	return 0;
134 }
135 
136 static int uniphier_aiodma_prepare(struct snd_pcm_substream *substream)
137 {
138 	struct snd_pcm_runtime *runtime = substream->runtime;
139 	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
140 	struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
141 	struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
142 	int bytes = runtime->period_size *
143 		runtime->channels * samples_to_bytes(runtime, 1);
144 	unsigned long flags;
145 	int ret;
146 
147 	ret = aiodma_ch_set_param(sub);
148 	if (ret)
149 		return ret;
150 
151 	spin_lock_irqsave(&sub->lock, flags);
152 	ret = aiodma_rb_set_buffer(sub, runtime->dma_addr,
153 				   runtime->dma_addr + runtime->dma_bytes,
154 				   bytes);
155 	spin_unlock_irqrestore(&sub->lock, flags);
156 	if (ret)
157 		return ret;
158 
159 	return 0;
160 }
161 
162 static int uniphier_aiodma_trigger(struct snd_pcm_substream *substream, int cmd)
163 {
164 	struct snd_pcm_runtime *runtime = substream->runtime;
165 	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
166 	struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
167 	struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
168 	struct device *dev = &aio->chip->pdev->dev;
169 	int bytes = runtime->period_size *
170 		runtime->channels * samples_to_bytes(runtime, 1);
171 	unsigned long flags;
172 
173 	spin_lock_irqsave(&sub->lock, flags);
174 	switch (cmd) {
175 	case SNDRV_PCM_TRIGGER_START:
176 		aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes,
177 			       bytes);
178 		aiodma_ch_set_enable(sub, 1);
179 		sub->running = 1;
180 
181 		break;
182 	case SNDRV_PCM_TRIGGER_STOP:
183 		sub->running = 0;
184 		aiodma_ch_set_enable(sub, 0);
185 
186 		break;
187 	default:
188 		dev_warn(dev, "Unknown trigger(%d) ignored\n", cmd);
189 		break;
190 	}
191 	spin_unlock_irqrestore(&sub->lock, flags);
192 
193 	return 0;
194 }
195 
196 static snd_pcm_uframes_t uniphier_aiodma_pointer(
197 					struct snd_pcm_substream *substream)
198 {
199 	struct snd_pcm_runtime *runtime = substream->runtime;
200 	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
201 	struct uniphier_aio *aio = uniphier_priv(rtd->cpu_dai);
202 	struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
203 	int bytes = runtime->period_size *
204 		runtime->channels * samples_to_bytes(runtime, 1);
205 	unsigned long flags;
206 	snd_pcm_uframes_t pos;
207 
208 	spin_lock_irqsave(&sub->lock, flags);
209 	aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
210 
211 	if (sub->swm->dir == PORT_DIR_OUTPUT)
212 		pos = bytes_to_frames(runtime, sub->rd_offs);
213 	else
214 		pos = bytes_to_frames(runtime, sub->wr_offs);
215 	spin_unlock_irqrestore(&sub->lock, flags);
216 
217 	return pos;
218 }
219 
220 static int uniphier_aiodma_mmap(struct snd_pcm_substream *substream,
221 				struct vm_area_struct *vma)
222 {
223 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
224 
225 	return remap_pfn_range(vma, vma->vm_start,
226 			       substream->dma_buffer.addr >> PAGE_SHIFT,
227 			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
228 }
229 
230 static const struct snd_pcm_ops uniphier_aiodma_ops = {
231 	.open      = uniphier_aiodma_open,
232 	.ioctl     = snd_pcm_lib_ioctl,
233 	.hw_params = uniphier_aiodma_hw_params,
234 	.hw_free   = uniphier_aiodma_hw_free,
235 	.prepare   = uniphier_aiodma_prepare,
236 	.trigger   = uniphier_aiodma_trigger,
237 	.pointer   = uniphier_aiodma_pointer,
238 	.mmap      = uniphier_aiodma_mmap,
239 };
240 
241 static int uniphier_aiodma_new(struct snd_soc_pcm_runtime *rtd)
242 {
243 	struct device *dev = rtd->card->snd_card->dev;
244 	struct snd_pcm *pcm = rtd->pcm;
245 	int ret;
246 
247 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(33));
248 	if (ret)
249 		return ret;
250 
251 	return snd_pcm_lib_preallocate_pages_for_all(pcm,
252 		SNDRV_DMA_TYPE_DEV, dev,
253 		uniphier_aiodma_hw.buffer_bytes_max,
254 		uniphier_aiodma_hw.buffer_bytes_max);
255 }
256 
257 static void uniphier_aiodma_free(struct snd_pcm *pcm)
258 {
259 	snd_pcm_lib_preallocate_free_for_all(pcm);
260 }
261 
262 static const struct snd_soc_component_driver uniphier_soc_platform = {
263 	.pcm_new   = uniphier_aiodma_new,
264 	.pcm_free  = uniphier_aiodma_free,
265 	.ops       = &uniphier_aiodma_ops,
266 	.compr_ops = &uniphier_aio_compr_ops,
267 };
268 
269 static const struct regmap_config aiodma_regmap_config = {
270 	.reg_bits      = 32,
271 	.reg_stride    = 4,
272 	.val_bits      = 32,
273 	.max_register  = 0x7fffc,
274 	.cache_type    = REGCACHE_NONE,
275 };
276 
277 /**
278  * uniphier_aiodma_soc_register_platform - register the AIO DMA
279  * @pdev: the platform device
280  *
281  * Register and setup the DMA of AIO to transfer the sound data to device.
282  * This function need to call once at driver startup and need NOT to call
283  * unregister function.
284  *
285  * Return: Zero if successful, otherwise a negative value on error.
286  */
287 int uniphier_aiodma_soc_register_platform(struct platform_device *pdev)
288 {
289 	struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
290 	struct device *dev = &pdev->dev;
291 	struct resource *res;
292 	void __iomem *preg;
293 	int irq, ret;
294 
295 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
296 	preg = devm_ioremap_resource(dev, res);
297 	if (IS_ERR(preg))
298 		return PTR_ERR(preg);
299 
300 	chip->regmap = devm_regmap_init_mmio(dev, preg,
301 					     &aiodma_regmap_config);
302 	if (IS_ERR(chip->regmap))
303 		return PTR_ERR(chip->regmap);
304 
305 	irq = platform_get_irq(pdev, 0);
306 	if (irq < 0) {
307 		dev_err(dev, "Could not get irq.\n");
308 		return irq;
309 	}
310 
311 	ret = devm_request_irq(dev, irq, aiodma_irq,
312 			       IRQF_SHARED, dev_name(dev), pdev);
313 	if (ret)
314 		return ret;
315 
316 	return devm_snd_soc_register_component(dev, &uniphier_soc_platform,
317 					       NULL, 0);
318 }
319 EXPORT_SYMBOL_GPL(uniphier_aiodma_soc_register_platform);
320