1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 //
3 // Copyright (c) 2018 BayLibre, SAS.
4 // Author: Jerome Brunet <jbrunet@baylibre.com>
5
6 #include <linux/bitfield.h>
7 #include <linux/clk.h>
8 #include <linux/of_irq.h>
9 #include <linux/of_platform.h>
10 #include <linux/module.h>
11 #include <linux/regmap.h>
12 #include <linux/reset.h>
13 #include <sound/pcm_params.h>
14 #include <sound/soc.h>
15 #include <sound/soc-dai.h>
16
17 #include "axg-fifo.h"
18
19 /*
20 * This file implements the platform operations common to the playback and
21 * capture frontend DAI. The logic behind this two types of fifo is very
22 * similar but some difference exist.
23 * These differences are handled in the respective DAI drivers
24 */
25
26 static struct snd_pcm_hardware axg_fifo_hw = {
27 .info = (SNDRV_PCM_INFO_INTERLEAVED |
28 SNDRV_PCM_INFO_MMAP |
29 SNDRV_PCM_INFO_MMAP_VALID |
30 SNDRV_PCM_INFO_BLOCK_TRANSFER |
31 SNDRV_PCM_INFO_PAUSE |
32 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
33 .formats = AXG_FIFO_FORMATS,
34 .rate_min = 5512,
35 .rate_max = 192000,
36 .channels_min = 1,
37 .channels_max = AXG_FIFO_CH_MAX,
38 .period_bytes_min = AXG_FIFO_BURST,
39 .period_bytes_max = UINT_MAX,
40 .periods_min = 2,
41 .periods_max = UINT_MAX,
42
43 /* No real justification for this */
44 .buffer_bytes_max = 1 * 1024 * 1024,
45 };
46
axg_fifo_dai(struct snd_pcm_substream * ss)47 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
48 {
49 struct snd_soc_pcm_runtime *rtd = ss->private_data;
50
51 return asoc_rtd_to_cpu(rtd, 0);
52 }
53
axg_fifo_data(struct snd_pcm_substream * ss)54 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
55 {
56 struct snd_soc_dai *dai = axg_fifo_dai(ss);
57
58 return snd_soc_dai_get_drvdata(dai);
59 }
60
axg_fifo_dev(struct snd_pcm_substream * ss)61 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
62 {
63 struct snd_soc_dai *dai = axg_fifo_dai(ss);
64
65 return dai->dev;
66 }
67
__dma_enable(struct axg_fifo * fifo,bool enable)68 static void __dma_enable(struct axg_fifo *fifo, bool enable)
69 {
70 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
71 enable ? CTRL0_DMA_EN : 0);
72 }
73
axg_fifo_pcm_trigger(struct snd_soc_component * component,struct snd_pcm_substream * ss,int cmd)74 int axg_fifo_pcm_trigger(struct snd_soc_component *component,
75 struct snd_pcm_substream *ss, int cmd)
76 {
77 struct axg_fifo *fifo = axg_fifo_data(ss);
78
79 switch (cmd) {
80 case SNDRV_PCM_TRIGGER_START:
81 case SNDRV_PCM_TRIGGER_RESUME:
82 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
83 __dma_enable(fifo, true);
84 break;
85 case SNDRV_PCM_TRIGGER_SUSPEND:
86 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
87 case SNDRV_PCM_TRIGGER_STOP:
88 __dma_enable(fifo, false);
89 break;
90 default:
91 return -EINVAL;
92 }
93
94 return 0;
95 }
96 EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger);
97
axg_fifo_pcm_pointer(struct snd_soc_component * component,struct snd_pcm_substream * ss)98 snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component,
99 struct snd_pcm_substream *ss)
100 {
101 struct axg_fifo *fifo = axg_fifo_data(ss);
102 struct snd_pcm_runtime *runtime = ss->runtime;
103 unsigned int addr;
104
105 regmap_read(fifo->map, FIFO_STATUS2, &addr);
106
107 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
108 }
109 EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer);
110
axg_fifo_pcm_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * ss,struct snd_pcm_hw_params * params)111 int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
112 struct snd_pcm_substream *ss,
113 struct snd_pcm_hw_params *params)
114 {
115 struct snd_pcm_runtime *runtime = ss->runtime;
116 struct axg_fifo *fifo = axg_fifo_data(ss);
117 unsigned int burst_num, period, threshold, irq_en;
118 dma_addr_t end_ptr;
119
120 period = params_period_bytes(params);
121
122 /* Setup dma memory pointers */
123 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
124 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
125 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
126
127 /* Setup interrupt periodicity */
128 burst_num = period / AXG_FIFO_BURST;
129 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
130
131 /*
132 * Start the fifo request on the smallest of the following:
133 * - Half the fifo size
134 * - Half the period size
135 */
136 threshold = min(period / 2, fifo->depth / 2);
137
138 /*
139 * With the threshold in bytes, register value is:
140 * V = (threshold / burst) - 1
141 */
142 threshold /= AXG_FIFO_BURST;
143 regmap_field_write(fifo->field_threshold,
144 threshold ? threshold - 1 : 0);
145
146 /* Enable irq if necessary */
147 irq_en = runtime->no_period_wakeup ? 0 : FIFO_INT_COUNT_REPEAT;
148 regmap_update_bits(fifo->map, FIFO_CTRL0,
149 CTRL0_INT_EN,
150 FIELD_PREP(CTRL0_INT_EN, irq_en));
151
152 return 0;
153 }
154 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params);
155
g12a_fifo_pcm_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * ss,struct snd_pcm_hw_params * params)156 int g12a_fifo_pcm_hw_params(struct snd_soc_component *component,
157 struct snd_pcm_substream *ss,
158 struct snd_pcm_hw_params *params)
159 {
160 struct axg_fifo *fifo = axg_fifo_data(ss);
161 struct snd_pcm_runtime *runtime = ss->runtime;
162 int ret;
163
164 ret = axg_fifo_pcm_hw_params(component, ss, params);
165 if (ret)
166 return ret;
167
168 /* Set the initial memory address of the DMA */
169 regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr);
170
171 return 0;
172 }
173 EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params);
174
axg_fifo_pcm_hw_free(struct snd_soc_component * component,struct snd_pcm_substream * ss)175 int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
176 struct snd_pcm_substream *ss)
177 {
178 struct axg_fifo *fifo = axg_fifo_data(ss);
179
180 /* Disable irqs */
181 regmap_update_bits(fifo->map, FIFO_CTRL0,
182 CTRL0_INT_EN, 0);
183
184 return 0;
185 }
186 EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
187
axg_fifo_ack_irq(struct axg_fifo * fifo,u8 mask)188 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
189 {
190 regmap_update_bits(fifo->map, FIFO_CTRL1,
191 CTRL1_INT_CLR,
192 FIELD_PREP(CTRL1_INT_CLR, mask));
193
194 /* Clear must also be cleared */
195 regmap_update_bits(fifo->map, FIFO_CTRL1,
196 CTRL1_INT_CLR,
197 FIELD_PREP(CTRL1_INT_CLR, 0));
198 }
199
axg_fifo_pcm_irq_block(int irq,void * dev_id)200 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
201 {
202 struct snd_pcm_substream *ss = dev_id;
203 struct axg_fifo *fifo = axg_fifo_data(ss);
204 unsigned int status;
205
206 regmap_read(fifo->map, FIFO_STATUS1, &status);
207 status = FIELD_GET(STATUS1_INT_STS, status);
208 axg_fifo_ack_irq(fifo, status);
209
210 if (status & ~FIFO_INT_COUNT_REPEAT)
211 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
212 status);
213
214 if (status & FIFO_INT_COUNT_REPEAT) {
215 snd_pcm_period_elapsed(ss);
216 return IRQ_HANDLED;
217 }
218
219 return IRQ_NONE;
220 }
221
axg_fifo_pcm_open(struct snd_soc_component * component,struct snd_pcm_substream * ss)222 int axg_fifo_pcm_open(struct snd_soc_component *component,
223 struct snd_pcm_substream *ss)
224 {
225 struct axg_fifo *fifo = axg_fifo_data(ss);
226 struct device *dev = axg_fifo_dev(ss);
227 int ret;
228
229 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
230
231 /*
232 * Make sure the buffer and period size are multiple of the FIFO
233 * burst
234 */
235 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
236 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
237 AXG_FIFO_BURST);
238 if (ret)
239 return ret;
240
241 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
242 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
243 AXG_FIFO_BURST);
244 if (ret)
245 return ret;
246
247 /* Use the threaded irq handler only with non-atomic links */
248 ret = request_threaded_irq(fifo->irq, NULL,
249 axg_fifo_pcm_irq_block,
250 IRQF_ONESHOT, dev_name(dev), ss);
251 if (ret)
252 return ret;
253
254 /* Enable pclk to access registers and clock the fifo ip */
255 ret = clk_prepare_enable(fifo->pclk);
256 if (ret)
257 goto free_irq;
258
259 /* Setup status2 so it reports the memory pointer */
260 regmap_update_bits(fifo->map, FIFO_CTRL1,
261 CTRL1_STATUS2_SEL,
262 FIELD_PREP(CTRL1_STATUS2_SEL, STATUS2_SEL_DDR_READ));
263
264 /* Make sure the dma is initially disabled */
265 __dma_enable(fifo, false);
266
267 /* Disable irqs until params are ready */
268 regmap_update_bits(fifo->map, FIFO_CTRL0,
269 CTRL0_INT_EN, 0);
270
271 /* Clear any pending interrupt */
272 axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
273
274 /* Take memory arbitror out of reset */
275 ret = reset_control_deassert(fifo->arb);
276 if (ret)
277 goto free_clk;
278
279 return 0;
280
281 free_clk:
282 clk_disable_unprepare(fifo->pclk);
283 free_irq:
284 free_irq(fifo->irq, ss);
285 return ret;
286 }
287 EXPORT_SYMBOL_GPL(axg_fifo_pcm_open);
288
axg_fifo_pcm_close(struct snd_soc_component * component,struct snd_pcm_substream * ss)289 int axg_fifo_pcm_close(struct snd_soc_component *component,
290 struct snd_pcm_substream *ss)
291 {
292 struct axg_fifo *fifo = axg_fifo_data(ss);
293 int ret;
294
295 /* Put the memory arbitror back in reset */
296 ret = reset_control_assert(fifo->arb);
297
298 /* Disable fifo ip and register access */
299 clk_disable_unprepare(fifo->pclk);
300
301 /* remove IRQ */
302 free_irq(fifo->irq, ss);
303
304 return ret;
305 }
306 EXPORT_SYMBOL_GPL(axg_fifo_pcm_close);
307
axg_fifo_pcm_new(struct snd_soc_pcm_runtime * rtd,unsigned int type)308 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
309 {
310 struct snd_card *card = rtd->card->snd_card;
311 size_t size = axg_fifo_hw.buffer_bytes_max;
312
313 snd_pcm_set_managed_buffer(rtd->pcm->streams[type].substream,
314 SNDRV_DMA_TYPE_DEV, card->dev,
315 size, size);
316 return 0;
317 }
318 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
319
320 static const struct regmap_config axg_fifo_regmap_cfg = {
321 .reg_bits = 32,
322 .val_bits = 32,
323 .reg_stride = 4,
324 .max_register = FIFO_CTRL2,
325 };
326
axg_fifo_probe(struct platform_device * pdev)327 int axg_fifo_probe(struct platform_device *pdev)
328 {
329 struct device *dev = &pdev->dev;
330 const struct axg_fifo_match_data *data;
331 struct axg_fifo *fifo;
332 void __iomem *regs;
333 int ret;
334
335 data = of_device_get_match_data(dev);
336 if (!data) {
337 dev_err(dev, "failed to match device\n");
338 return -ENODEV;
339 }
340
341 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
342 if (!fifo)
343 return -ENOMEM;
344 platform_set_drvdata(pdev, fifo);
345
346 regs = devm_platform_ioremap_resource(pdev, 0);
347 if (IS_ERR(regs))
348 return PTR_ERR(regs);
349
350 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
351 if (IS_ERR(fifo->map)) {
352 dev_err(dev, "failed to init regmap: %ld\n",
353 PTR_ERR(fifo->map));
354 return PTR_ERR(fifo->map);
355 }
356
357 fifo->pclk = devm_clk_get(dev, NULL);
358 if (IS_ERR(fifo->pclk))
359 return dev_err_probe(dev, PTR_ERR(fifo->pclk), "failed to get pclk\n");
360
361 fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
362 if (IS_ERR(fifo->arb))
363 return dev_err_probe(dev, PTR_ERR(fifo->arb), "failed to get arb reset\n");
364
365 fifo->irq = of_irq_get(dev->of_node, 0);
366 if (fifo->irq <= 0) {
367 dev_err(dev, "failed to get irq: %d\n", fifo->irq);
368 return fifo->irq;
369 }
370
371 fifo->field_threshold =
372 devm_regmap_field_alloc(dev, fifo->map, data->field_threshold);
373 if (IS_ERR(fifo->field_threshold))
374 return PTR_ERR(fifo->field_threshold);
375
376 ret = of_property_read_u32(dev->of_node, "amlogic,fifo-depth",
377 &fifo->depth);
378 if (ret) {
379 /* Error out for anything but a missing property */
380 if (ret != -EINVAL)
381 return ret;
382 /*
383 * If the property is missing, it might be because of an old
384 * DT. In such case, assume the smallest known fifo depth
385 */
386 fifo->depth = 256;
387 dev_warn(dev, "fifo depth not found, assume %u bytes\n",
388 fifo->depth);
389 }
390
391 return devm_snd_soc_register_component(dev, data->component_drv,
392 data->dai_drv, 1);
393 }
394 EXPORT_SYMBOL_GPL(axg_fifo_probe);
395
396 MODULE_DESCRIPTION("Amlogic AXG/G12A fifo driver");
397 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
398 MODULE_LICENSE("GPL v2");
399