xref: /openbmc/linux/sound/soc/meson/axg-fifo.c (revision 2985bed6)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 //
3 // Copyright (c) 2018 BayLibre, SAS.
4 // Author: Jerome Brunet <jbrunet@baylibre.com>
5 
6 #include <linux/clk.h>
7 #include <linux/of_irq.h>
8 #include <linux/of_platform.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/reset.h>
12 #include <sound/pcm_params.h>
13 #include <sound/soc.h>
14 #include <sound/soc-dai.h>
15 
16 #include "axg-fifo.h"
17 
18 /*
19  * This file implements the platform operations common to the playback and
20  * capture frontend DAI. The logic behind this two types of fifo is very
21  * similar but some difference exist.
22  * These differences are handled in the respective DAI drivers
23  */
24 
25 static struct snd_pcm_hardware axg_fifo_hw = {
26 	.info = (SNDRV_PCM_INFO_INTERLEAVED |
27 		 SNDRV_PCM_INFO_MMAP |
28 		 SNDRV_PCM_INFO_MMAP_VALID |
29 		 SNDRV_PCM_INFO_BLOCK_TRANSFER |
30 		 SNDRV_PCM_INFO_PAUSE),
31 
32 	.formats = AXG_FIFO_FORMATS,
33 	.rate_min = 5512,
34 	.rate_max = 192000,
35 	.channels_min = 1,
36 	.channels_max = AXG_FIFO_CH_MAX,
37 	.period_bytes_min = AXG_FIFO_BURST,
38 	.period_bytes_max = UINT_MAX,
39 	.periods_min = 2,
40 	.periods_max = UINT_MAX,
41 
42 	/* No real justification for this */
43 	.buffer_bytes_max = 1 * 1024 * 1024,
44 };
45 
46 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
47 {
48 	struct snd_soc_pcm_runtime *rtd = ss->private_data;
49 
50 	return rtd->cpu_dai;
51 }
52 
53 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
54 {
55 	struct snd_soc_dai *dai = axg_fifo_dai(ss);
56 
57 	return snd_soc_dai_get_drvdata(dai);
58 }
59 
60 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
61 {
62 	struct snd_soc_dai *dai = axg_fifo_dai(ss);
63 
64 	return dai->dev;
65 }
66 
67 static void __dma_enable(struct axg_fifo *fifo,  bool enable)
68 {
69 	regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
70 			   enable ? CTRL0_DMA_EN : 0);
71 }
72 
73 int axg_fifo_pcm_trigger(struct snd_soc_component *component,
74 			 struct snd_pcm_substream *ss, int cmd)
75 {
76 	struct axg_fifo *fifo = axg_fifo_data(ss);
77 
78 	switch (cmd) {
79 	case SNDRV_PCM_TRIGGER_START:
80 	case SNDRV_PCM_TRIGGER_RESUME:
81 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
82 		__dma_enable(fifo, true);
83 		break;
84 	case SNDRV_PCM_TRIGGER_SUSPEND:
85 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
86 	case SNDRV_PCM_TRIGGER_STOP:
87 		__dma_enable(fifo, false);
88 		break;
89 	default:
90 		return -EINVAL;
91 	}
92 
93 	return 0;
94 }
95 EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger);
96 
97 snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component,
98 				       struct snd_pcm_substream *ss)
99 {
100 	struct axg_fifo *fifo = axg_fifo_data(ss);
101 	struct snd_pcm_runtime *runtime = ss->runtime;
102 	unsigned int addr;
103 
104 	regmap_read(fifo->map, FIFO_STATUS2, &addr);
105 
106 	return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
107 }
108 EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer);
109 
110 int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
111 			   struct snd_pcm_substream *ss,
112 			   struct snd_pcm_hw_params *params)
113 {
114 	struct snd_pcm_runtime *runtime = ss->runtime;
115 	struct axg_fifo *fifo = axg_fifo_data(ss);
116 	unsigned int burst_num, period, threshold;
117 	dma_addr_t end_ptr;
118 
119 	period = params_period_bytes(params);
120 
121 	/* Setup dma memory pointers */
122 	end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
123 	regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
124 	regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
125 
126 	/* Setup interrupt periodicity */
127 	burst_num = period / AXG_FIFO_BURST;
128 	regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
129 
130 	/*
131 	 * Start the fifo request on the smallest of the following:
132 	 * - Half the fifo size
133 	 * - Half the period size
134 	 */
135 	threshold = min(period / 2, fifo->depth / 2);
136 
137 	/*
138 	 * With the threshold in bytes, register value is:
139 	 * V = (threshold / burst) - 1
140 	 */
141 	threshold /= AXG_FIFO_BURST;
142 	regmap_field_write(fifo->field_threshold,
143 			   threshold ? threshold - 1 : 0);
144 
145 	/* Enable block count irq */
146 	regmap_update_bits(fifo->map, FIFO_CTRL0,
147 			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
148 			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT));
149 
150 	return 0;
151 }
152 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params);
153 
154 int g12a_fifo_pcm_hw_params(struct snd_soc_component *component,
155 			    struct snd_pcm_substream *ss,
156 			    struct snd_pcm_hw_params *params)
157 {
158 	struct axg_fifo *fifo = axg_fifo_data(ss);
159 	struct snd_pcm_runtime *runtime = ss->runtime;
160 	int ret;
161 
162 	ret = axg_fifo_pcm_hw_params(component, ss, params);
163 	if (ret)
164 		return ret;
165 
166 	/* Set the initial memory address of the DMA */
167 	regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr);
168 
169 	return 0;
170 }
171 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params);
172 
173 int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
174 			 struct snd_pcm_substream *ss)
175 {
176 	struct axg_fifo *fifo = axg_fifo_data(ss);
177 
178 	/* Disable the block count irq */
179 	regmap_update_bits(fifo->map, FIFO_CTRL0,
180 			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
181 
182 	return 0;
183 }
184 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
185 
186 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
187 {
188 	regmap_update_bits(fifo->map, FIFO_CTRL1,
189 			   CTRL1_INT_CLR(FIFO_INT_MASK),
190 			   CTRL1_INT_CLR(mask));
191 
192 	/* Clear must also be cleared */
193 	regmap_update_bits(fifo->map, FIFO_CTRL1,
194 			   CTRL1_INT_CLR(FIFO_INT_MASK),
195 			   0);
196 }
197 
198 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
199 {
200 	struct snd_pcm_substream *ss = dev_id;
201 	struct axg_fifo *fifo = axg_fifo_data(ss);
202 	unsigned int status;
203 
204 	regmap_read(fifo->map, FIFO_STATUS1, &status);
205 
206 	status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
207 	if (status & FIFO_INT_COUNT_REPEAT)
208 		snd_pcm_period_elapsed(ss);
209 	else
210 		dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
211 			status);
212 
213 	/* Ack irqs */
214 	axg_fifo_ack_irq(fifo, status);
215 
216 	return IRQ_RETVAL(status);
217 }
218 
219 int axg_fifo_pcm_open(struct snd_soc_component *component,
220 		      struct snd_pcm_substream *ss)
221 {
222 	struct axg_fifo *fifo = axg_fifo_data(ss);
223 	struct device *dev = axg_fifo_dev(ss);
224 	int ret;
225 
226 	snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
227 
228 	/*
229 	 * Make sure the buffer and period size are multiple of the FIFO
230 	 * burst
231 	 */
232 	ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
233 					 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
234 					 AXG_FIFO_BURST);
235 	if (ret)
236 		return ret;
237 
238 	ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
239 					 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
240 					 AXG_FIFO_BURST);
241 	if (ret)
242 		return ret;
243 
244 	ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
245 			  dev_name(dev), ss);
246 	if (ret)
247 		return ret;
248 
249 	/* Enable pclk to access registers and clock the fifo ip */
250 	ret = clk_prepare_enable(fifo->pclk);
251 	if (ret)
252 		return ret;
253 
254 	/* Setup status2 so it reports the memory pointer */
255 	regmap_update_bits(fifo->map, FIFO_CTRL1,
256 			   CTRL1_STATUS2_SEL_MASK,
257 			   CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
258 
259 	/* Make sure the dma is initially disabled */
260 	__dma_enable(fifo, false);
261 
262 	/* Disable irqs until params are ready */
263 	regmap_update_bits(fifo->map, FIFO_CTRL0,
264 			   CTRL0_INT_EN(FIFO_INT_MASK), 0);
265 
266 	/* Clear any pending interrupt */
267 	axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
268 
269 	/* Take memory arbitror out of reset */
270 	ret = reset_control_deassert(fifo->arb);
271 	if (ret)
272 		clk_disable_unprepare(fifo->pclk);
273 
274 	return ret;
275 }
276 EXPORT_SYMBOL_GPL(axg_fifo_pcm_open);
277 
278 int axg_fifo_pcm_close(struct snd_soc_component *component,
279 		       struct snd_pcm_substream *ss)
280 {
281 	struct axg_fifo *fifo = axg_fifo_data(ss);
282 	int ret;
283 
284 	/* Put the memory arbitror back in reset */
285 	ret = reset_control_assert(fifo->arb);
286 
287 	/* Disable fifo ip and register access */
288 	clk_disable_unprepare(fifo->pclk);
289 
290 	/* remove IRQ */
291 	free_irq(fifo->irq, ss);
292 
293 	return ret;
294 }
295 EXPORT_SYMBOL_GPL(axg_fifo_pcm_close);
296 
297 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
298 {
299 	struct snd_card *card = rtd->card->snd_card;
300 	size_t size = axg_fifo_hw.buffer_bytes_max;
301 
302 	snd_pcm_set_managed_buffer(rtd->pcm->streams[type].substream,
303 				   SNDRV_DMA_TYPE_DEV, card->dev,
304 				   size, size);
305 	return 0;
306 }
307 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
308 
309 static const struct regmap_config axg_fifo_regmap_cfg = {
310 	.reg_bits	= 32,
311 	.val_bits	= 32,
312 	.reg_stride	= 4,
313 	.max_register	= FIFO_CTRL2,
314 };
315 
316 int axg_fifo_probe(struct platform_device *pdev)
317 {
318 	struct device *dev = &pdev->dev;
319 	const struct axg_fifo_match_data *data;
320 	struct axg_fifo *fifo;
321 	void __iomem *regs;
322 	int ret;
323 
324 	data = of_device_get_match_data(dev);
325 	if (!data) {
326 		dev_err(dev, "failed to match device\n");
327 		return -ENODEV;
328 	}
329 
330 	fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
331 	if (!fifo)
332 		return -ENOMEM;
333 	platform_set_drvdata(pdev, fifo);
334 
335 	regs = devm_platform_ioremap_resource(pdev, 0);
336 	if (IS_ERR(regs))
337 		return PTR_ERR(regs);
338 
339 	fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
340 	if (IS_ERR(fifo->map)) {
341 		dev_err(dev, "failed to init regmap: %ld\n",
342 			PTR_ERR(fifo->map));
343 		return PTR_ERR(fifo->map);
344 	}
345 
346 	fifo->pclk = devm_clk_get(dev, NULL);
347 	if (IS_ERR(fifo->pclk)) {
348 		if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER)
349 			dev_err(dev, "failed to get pclk: %ld\n",
350 				PTR_ERR(fifo->pclk));
351 		return PTR_ERR(fifo->pclk);
352 	}
353 
354 	fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
355 	if (IS_ERR(fifo->arb)) {
356 		if (PTR_ERR(fifo->arb) != -EPROBE_DEFER)
357 			dev_err(dev, "failed to get arb reset: %ld\n",
358 				PTR_ERR(fifo->arb));
359 		return PTR_ERR(fifo->arb);
360 	}
361 
362 	fifo->irq = of_irq_get(dev->of_node, 0);
363 	if (fifo->irq <= 0) {
364 		dev_err(dev, "failed to get irq: %d\n", fifo->irq);
365 		return fifo->irq;
366 	}
367 
368 	fifo->field_threshold =
369 		devm_regmap_field_alloc(dev, fifo->map, data->field_threshold);
370 	if (IS_ERR(fifo->field_threshold))
371 		return PTR_ERR(fifo->field_threshold);
372 
373 	ret = of_property_read_u32(dev->of_node, "amlogic,fifo-depth",
374 				   &fifo->depth);
375 	if (ret) {
376 		/* Error out for anything but a missing property */
377 		if (ret != -EINVAL)
378 			return ret;
379 		/*
380 		 * If the property is missing, it might be because of an old
381 		 * DT. In such case, assume the smallest known fifo depth
382 		 */
383 		fifo->depth = 256;
384 		dev_warn(dev, "fifo depth not found, assume %u bytes\n",
385 			 fifo->depth);
386 	}
387 
388 	return devm_snd_soc_register_component(dev, data->component_drv,
389 					       data->dai_drv, 1);
390 }
391 EXPORT_SYMBOL_GPL(axg_fifo_probe);
392 
393 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver");
394 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
395 MODULE_LICENSE("GPL v2");
396