1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mtk-afe-fe-dais.c  --  Mediatek afe fe dai operator
4  *
5  * Copyright (c) 2016 MediaTek Inc.
6  * Author: Garlic Tseng <garlic.tseng@mediatek.com>
7  */
8 
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/regmap.h>
13 #include <sound/soc.h>
14 #include "mtk-afe-platform-driver.h"
15 #include <sound/pcm_params.h>
16 #include "mtk-afe-fe-dai.h"
17 #include "mtk-base-afe.h"
18 
19 #define AFE_BASE_END_OFFSET 8
20 
21 static int mtk_regmap_update_bits(struct regmap *map, int reg,
22 			   unsigned int mask,
23 			   unsigned int val, int shift)
24 {
25 	if (reg < 0 || WARN_ON_ONCE(shift < 0))
26 		return 0;
27 	return regmap_update_bits(map, reg, mask << shift, val << shift);
28 }
29 
30 static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
31 {
32 	if (reg < 0)
33 		return 0;
34 	return regmap_write(map, reg, val);
35 }
36 
37 int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
38 		       struct snd_soc_dai *dai)
39 {
40 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
41 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
42 	struct snd_pcm_runtime *runtime = substream->runtime;
43 	int memif_num = rtd->cpu_dai->id;
44 	struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
45 	const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
46 	int ret;
47 
48 	memif->substream = substream;
49 
50 	snd_pcm_hw_constraint_step(substream->runtime, 0,
51 				   SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
52 	/* enable agent */
53 	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
54 			       1, 0, memif->data->agent_disable_shift);
55 
56 	snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
57 
58 	/*
59 	 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
60 	 * smaller than period_size due to AFE's internal buffer.
61 	 * This easily leads to overrun when avail_min is period_size.
62 	 * One more period can hold the possible unread buffer.
63 	 */
64 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
65 		int periods_max = mtk_afe_hardware->periods_max;
66 
67 		ret = snd_pcm_hw_constraint_minmax(runtime,
68 						   SNDRV_PCM_HW_PARAM_PERIODS,
69 						   3, periods_max);
70 		if (ret < 0) {
71 			dev_err(afe->dev, "hw_constraint_minmax failed\n");
72 			return ret;
73 		}
74 	}
75 
76 	ret = snd_pcm_hw_constraint_integer(runtime,
77 					    SNDRV_PCM_HW_PARAM_PERIODS);
78 	if (ret < 0)
79 		dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
80 
81 	/* dynamic allocate irq to memif */
82 	if (memif->irq_usage < 0) {
83 		int irq_id = mtk_dynamic_irq_acquire(afe);
84 
85 		if (irq_id != afe->irqs_size) {
86 			/* link */
87 			memif->irq_usage = irq_id;
88 		} else {
89 			dev_err(afe->dev, "%s() error: no more asys irq\n",
90 				__func__);
91 			ret = -EBUSY;
92 		}
93 	}
94 	return ret;
95 }
96 EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
97 
98 void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
99 			 struct snd_soc_dai *dai)
100 {
101 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
102 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
103 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
104 	int irq_id;
105 
106 	irq_id = memif->irq_usage;
107 
108 	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
109 			       1, 1, memif->data->agent_disable_shift);
110 
111 	if (!memif->const_irq) {
112 		mtk_dynamic_irq_release(afe, irq_id);
113 		memif->irq_usage = -1;
114 		memif->substream = NULL;
115 	}
116 }
117 EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
118 
119 int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
120 			 struct snd_pcm_hw_params *params,
121 			 struct snd_soc_dai *dai)
122 {
123 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
124 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
125 	int id = rtd->cpu_dai->id;
126 	struct mtk_base_afe_memif *memif = &afe->memif[id];
127 	int ret;
128 	unsigned int channels = params_channels(params);
129 	unsigned int rate = params_rate(params);
130 	snd_pcm_format_t format = params_format(params);
131 
132 	ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
133 	if (ret < 0)
134 		return ret;
135 
136 	if (afe->request_dram_resource)
137 		afe->request_dram_resource(afe->dev);
138 
139 	dev_dbg(afe->dev, "%s(), %s, ch %d, rate %d, fmt %d, dma_addr %pad, dma_area %p, dma_bytes 0x%zx\n",
140 		__func__, memif->data->name,
141 		channels, rate, format,
142 		&substream->runtime->dma_addr,
143 		substream->runtime->dma_area,
144 		substream->runtime->dma_bytes);
145 
146 	memset_io(substream->runtime->dma_area, 0,
147 		  substream->runtime->dma_bytes);
148 
149 	/* set addr */
150 	ret = mtk_memif_set_addr(afe, id,
151 				 substream->runtime->dma_area,
152 				 substream->runtime->dma_addr,
153 				 substream->runtime->dma_bytes);
154 	if (ret) {
155 		dev_err(afe->dev, "%s(), error, id %d, set addr, ret %d\n",
156 			__func__, id, ret);
157 		return ret;
158 	}
159 
160 	/* set channel */
161 	ret = mtk_memif_set_channel(afe, id, channels);
162 	if (ret) {
163 		dev_err(afe->dev, "%s(), error, id %d, set channel %d, ret %d\n",
164 			__func__, id, channels, ret);
165 		return ret;
166 	}
167 
168 	/* set rate */
169 	ret = mtk_memif_set_rate_substream(substream, id, rate);
170 	if (ret) {
171 		dev_err(afe->dev, "%s(), error, id %d, set rate %d, ret %d\n",
172 			__func__, id, rate, ret);
173 		return ret;
174 	}
175 
176 	/* set format */
177 	ret = mtk_memif_set_format(afe, id, format);
178 	if (ret) {
179 		dev_err(afe->dev, "%s(), error, id %d, set format %d, ret %d\n",
180 			__func__, id, format, ret);
181 		return ret;
182 	}
183 
184 	return 0;
185 }
186 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
187 
188 int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
189 		       struct snd_soc_dai *dai)
190 {
191 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
192 
193 	if (afe->release_dram_resource)
194 		afe->release_dram_resource(afe->dev);
195 
196 	return snd_pcm_lib_free_pages(substream);
197 }
198 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
199 
200 int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
201 		       struct snd_soc_dai *dai)
202 {
203 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
204 	struct snd_pcm_runtime * const runtime = substream->runtime;
205 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
206 	int id = rtd->cpu_dai->id;
207 	struct mtk_base_afe_memif *memif = &afe->memif[id];
208 	struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
209 	const struct mtk_base_irq_data *irq_data = irqs->irq_data;
210 	unsigned int counter = runtime->period_size;
211 	int fs;
212 	int ret;
213 
214 	dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
215 
216 	switch (cmd) {
217 	case SNDRV_PCM_TRIGGER_START:
218 	case SNDRV_PCM_TRIGGER_RESUME:
219 		ret = mtk_memif_set_enable(afe, id);
220 		if (ret) {
221 			dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
222 				__func__, id, ret);
223 			return ret;
224 		}
225 
226 		/* set irq counter */
227 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
228 				       irq_data->irq_cnt_maskbit, counter,
229 				       irq_data->irq_cnt_shift);
230 
231 		/* set irq fs */
232 		fs = afe->irq_fs(substream, runtime->rate);
233 
234 		if (fs < 0)
235 			return -EINVAL;
236 
237 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
238 				       irq_data->irq_fs_maskbit, fs,
239 				       irq_data->irq_fs_shift);
240 
241 		/* enable interrupt */
242 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
243 				       1, 1, irq_data->irq_en_shift);
244 
245 		return 0;
246 	case SNDRV_PCM_TRIGGER_STOP:
247 	case SNDRV_PCM_TRIGGER_SUSPEND:
248 		ret = mtk_memif_set_disable(afe, id);
249 		if (ret) {
250 			dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
251 				__func__, id, ret);
252 		}
253 
254 		/* disable interrupt */
255 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
256 				       1, 0, irq_data->irq_en_shift);
257 		/* and clear pending IRQ */
258 		mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
259 				 1 << irq_data->irq_clr_shift);
260 		return ret;
261 	default:
262 		return -EINVAL;
263 	}
264 }
265 EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
266 
267 int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
268 		       struct snd_soc_dai *dai)
269 {
270 	struct snd_soc_pcm_runtime *rtd  = substream->private_data;
271 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
272 	int id = rtd->cpu_dai->id;
273 	int pbuf_size;
274 
275 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
276 		if (afe->get_memif_pbuf_size) {
277 			pbuf_size = afe->get_memif_pbuf_size(substream);
278 			mtk_memif_set_pbuf_size(afe, id, pbuf_size);
279 		}
280 	}
281 	return 0;
282 }
283 EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
284 
285 const struct snd_soc_dai_ops mtk_afe_fe_ops = {
286 	.startup	= mtk_afe_fe_startup,
287 	.shutdown	= mtk_afe_fe_shutdown,
288 	.hw_params	= mtk_afe_fe_hw_params,
289 	.hw_free	= mtk_afe_fe_hw_free,
290 	.prepare	= mtk_afe_fe_prepare,
291 	.trigger	= mtk_afe_fe_trigger,
292 };
293 EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
294 
295 static DEFINE_MUTEX(irqs_lock);
296 int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
297 {
298 	int i;
299 
300 	mutex_lock(&afe->irq_alloc_lock);
301 	for (i = 0; i < afe->irqs_size; ++i) {
302 		if (afe->irqs[i].irq_occupyed == 0) {
303 			afe->irqs[i].irq_occupyed = 1;
304 			mutex_unlock(&afe->irq_alloc_lock);
305 			return i;
306 		}
307 	}
308 	mutex_unlock(&afe->irq_alloc_lock);
309 	return afe->irqs_size;
310 }
311 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
312 
313 int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
314 {
315 	mutex_lock(&afe->irq_alloc_lock);
316 	if (irq_id >= 0 && irq_id < afe->irqs_size) {
317 		afe->irqs[irq_id].irq_occupyed = 0;
318 		mutex_unlock(&afe->irq_alloc_lock);
319 		return 0;
320 	}
321 	mutex_unlock(&afe->irq_alloc_lock);
322 	return -EINVAL;
323 }
324 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
325 
326 int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
327 {
328 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
329 	struct device *dev = afe->dev;
330 	struct regmap *regmap = afe->regmap;
331 	int i;
332 
333 	if (pm_runtime_status_suspended(dev) || afe->suspended)
334 		return 0;
335 
336 	if (!afe->reg_back_up)
337 		afe->reg_back_up =
338 			devm_kcalloc(dev, afe->reg_back_up_list_num,
339 				     sizeof(unsigned int), GFP_KERNEL);
340 
341 	for (i = 0; i < afe->reg_back_up_list_num; i++)
342 		regmap_read(regmap, afe->reg_back_up_list[i],
343 			    &afe->reg_back_up[i]);
344 
345 	afe->suspended = true;
346 	afe->runtime_suspend(dev);
347 	return 0;
348 }
349 EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
350 
351 int mtk_afe_dai_resume(struct snd_soc_dai *dai)
352 {
353 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
354 	struct device *dev = afe->dev;
355 	struct regmap *regmap = afe->regmap;
356 	int i = 0;
357 
358 	if (pm_runtime_status_suspended(dev) || !afe->suspended)
359 		return 0;
360 
361 	afe->runtime_resume(dev);
362 
363 	if (!afe->reg_back_up)
364 		dev_dbg(dev, "%s no reg_backup\n", __func__);
365 
366 	for (i = 0; i < afe->reg_back_up_list_num; i++)
367 		mtk_regmap_write(regmap, afe->reg_back_up_list[i],
368 				 afe->reg_back_up[i]);
369 
370 	afe->suspended = false;
371 	return 0;
372 }
373 EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
374 
375 int mtk_memif_set_enable(struct mtk_base_afe *afe, int id)
376 {
377 	struct mtk_base_afe_memif *memif = &afe->memif[id];
378 
379 	if (memif->data->enable_shift < 0) {
380 		dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
381 			 __func__, id);
382 		return 0;
383 	}
384 	return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
385 				      1, 1, memif->data->enable_shift);
386 }
387 EXPORT_SYMBOL_GPL(mtk_memif_set_enable);
388 
389 int mtk_memif_set_disable(struct mtk_base_afe *afe, int id)
390 {
391 	struct mtk_base_afe_memif *memif = &afe->memif[id];
392 
393 	if (memif->data->enable_shift < 0) {
394 		dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
395 			 __func__, id);
396 		return 0;
397 	}
398 	return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
399 				      1, 0, memif->data->enable_shift);
400 }
401 EXPORT_SYMBOL_GPL(mtk_memif_set_disable);
402 
403 int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
404 		       unsigned char *dma_area,
405 		       dma_addr_t dma_addr,
406 		       size_t dma_bytes)
407 {
408 	struct mtk_base_afe_memif *memif = &afe->memif[id];
409 	int msb_at_bit33 = upper_32_bits(dma_addr) ? 1 : 0;
410 	unsigned int phys_buf_addr = lower_32_bits(dma_addr);
411 	unsigned int phys_buf_addr_upper_32 = upper_32_bits(dma_addr);
412 
413 	memif->dma_area = dma_area;
414 	memif->dma_addr = dma_addr;
415 	memif->dma_bytes = dma_bytes;
416 
417 	/* start */
418 	mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
419 			 phys_buf_addr);
420 	/* end */
421 	if (memif->data->reg_ofs_end)
422 		mtk_regmap_write(afe->regmap,
423 				 memif->data->reg_ofs_end,
424 				 phys_buf_addr + dma_bytes - 1);
425 	else
426 		mtk_regmap_write(afe->regmap,
427 				 memif->data->reg_ofs_base +
428 				 AFE_BASE_END_OFFSET,
429 				 phys_buf_addr + dma_bytes - 1);
430 
431 	/* set start, end, upper 32 bits */
432 	if (memif->data->reg_ofs_base_msb) {
433 		mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb,
434 				 phys_buf_addr_upper_32);
435 		mtk_regmap_write(afe->regmap,
436 				 memif->data->reg_ofs_end_msb,
437 				 phys_buf_addr_upper_32);
438 	}
439 
440 	/* set MSB to 33-bit */
441 	if (memif->data->msb_reg >= 0)
442 		mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
443 				       1, msb_at_bit33, memif->data->msb_shift);
444 
445 	return 0;
446 }
447 EXPORT_SYMBOL_GPL(mtk_memif_set_addr);
448 
449 int mtk_memif_set_channel(struct mtk_base_afe *afe,
450 			  int id, unsigned int channel)
451 {
452 	struct mtk_base_afe_memif *memif = &afe->memif[id];
453 	unsigned int mono;
454 
455 	if (memif->data->mono_shift < 0)
456 		return 0;
457 
458 	if (memif->data->quad_ch_mask) {
459 		unsigned int quad_ch = (channel == 4) ? 1 : 0;
460 
461 		mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg,
462 				       memif->data->quad_ch_mask,
463 				       quad_ch, memif->data->quad_ch_shift);
464 	}
465 
466 	if (memif->data->mono_invert)
467 		mono = (channel == 1) ? 0 : 1;
468 	else
469 		mono = (channel == 1) ? 1 : 0;
470 
471 	return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
472 				      1, mono, memif->data->mono_shift);
473 }
474 EXPORT_SYMBOL_GPL(mtk_memif_set_channel);
475 
476 static int mtk_memif_set_rate_fs(struct mtk_base_afe *afe,
477 				 int id, int fs)
478 {
479 	struct mtk_base_afe_memif *memif = &afe->memif[id];
480 
481 	if (memif->data->fs_shift >= 0)
482 		mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
483 				       memif->data->fs_maskbit,
484 				       fs, memif->data->fs_shift);
485 
486 	return 0;
487 }
488 
489 int mtk_memif_set_rate(struct mtk_base_afe *afe,
490 		       int id, unsigned int rate)
491 {
492 	int fs = 0;
493 
494 	if (!afe->get_dai_fs) {
495 		dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n",
496 			__func__);
497 		return -EINVAL;
498 	}
499 
500 	fs = afe->get_dai_fs(afe, id, rate);
501 
502 	if (fs < 0)
503 		return -EINVAL;
504 
505 	return mtk_memif_set_rate_fs(afe, id, fs);
506 }
507 EXPORT_SYMBOL_GPL(mtk_memif_set_rate);
508 
509 int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream,
510 				 int id, unsigned int rate)
511 {
512 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
513 	struct snd_soc_component *component =
514 		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
515 	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
516 
517 	int fs = 0;
518 
519 	if (!afe->memif_fs) {
520 		dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n",
521 			__func__);
522 		return -EINVAL;
523 	}
524 
525 	fs = afe->memif_fs(substream, rate);
526 
527 	if (fs < 0)
528 		return -EINVAL;
529 
530 	return mtk_memif_set_rate_fs(afe, id, fs);
531 }
532 EXPORT_SYMBOL_GPL(mtk_memif_set_rate_substream);
533 
534 int mtk_memif_set_format(struct mtk_base_afe *afe,
535 			 int id, snd_pcm_format_t format)
536 {
537 	struct mtk_base_afe_memif *memif = &afe->memif[id];
538 	int hd_audio = 0;
539 	int hd_align = 0;
540 
541 	/* set hd mode */
542 	switch (format) {
543 	case SNDRV_PCM_FORMAT_S16_LE:
544 	case SNDRV_PCM_FORMAT_U16_LE:
545 		hd_audio = 0;
546 		break;
547 	case SNDRV_PCM_FORMAT_S32_LE:
548 	case SNDRV_PCM_FORMAT_U32_LE:
549 		hd_audio = 1;
550 		hd_align = 1;
551 		break;
552 	case SNDRV_PCM_FORMAT_S24_LE:
553 	case SNDRV_PCM_FORMAT_U24_LE:
554 		hd_audio = 1;
555 		break;
556 	default:
557 		dev_err(afe->dev, "%s() error: unsupported format %d\n",
558 			__func__, format);
559 		break;
560 	}
561 
562 	mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
563 			       1, hd_audio, memif->data->hd_shift);
564 
565 	mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
566 			       1, hd_align, memif->data->hd_align_mshift);
567 
568 	return 0;
569 }
570 EXPORT_SYMBOL_GPL(mtk_memif_set_format);
571 
572 int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
573 			    int id, int pbuf_size)
574 {
575 	const struct mtk_base_memif_data *memif_data = afe->memif[id].data;
576 
577 	if (memif_data->pbuf_mask == 0 || memif_data->minlen_mask == 0)
578 		return 0;
579 
580 	mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg,
581 			       memif_data->pbuf_mask,
582 			       pbuf_size, memif_data->pbuf_shift);
583 
584 	mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg,
585 			       memif_data->minlen_mask,
586 			       pbuf_size, memif_data->minlen_shift);
587 	return 0;
588 }
589 EXPORT_SYMBOL_GPL(mtk_memif_set_pbuf_size);
590 
591 MODULE_DESCRIPTION("Mediatek simple fe dai operator");
592 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
593 MODULE_LICENSE("GPL v2");
594