1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mtk-afe-fe-dais.c  --  Mediatek afe fe dai operator
4  *
5  * Copyright (c) 2016 MediaTek Inc.
6  * Author: Garlic Tseng <garlic.tseng@mediatek.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regmap.h>
12 #include <sound/soc.h>
13 #include "mtk-afe-platform-driver.h"
14 #include "mtk-afe-fe-dai.h"
15 #include "mtk-base-afe.h"
16 
17 #define AFE_BASE_END_OFFSET 8
18 
19 static int mtk_regmap_update_bits(struct regmap *map, int reg,
20 			   unsigned int mask,
21 			   unsigned int val, int shift)
22 {
23 	if (reg < 0 || WARN_ON_ONCE(shift < 0))
24 		return 0;
25 	return regmap_update_bits(map, reg, mask << shift, val << shift);
26 }
27 
28 static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
29 {
30 	if (reg < 0)
31 		return 0;
32 	return regmap_write(map, reg, val);
33 }
34 
35 int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
36 		       struct snd_soc_dai *dai)
37 {
38 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
39 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
40 	struct snd_pcm_runtime *runtime = substream->runtime;
41 	int memif_num = rtd->cpu_dai->id;
42 	struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
43 	const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
44 	int ret;
45 
46 	memif->substream = substream;
47 
48 	snd_pcm_hw_constraint_step(substream->runtime, 0,
49 				   SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
50 	/* enable agent */
51 	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
52 			       1, 0, memif->data->agent_disable_shift);
53 
54 	snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
55 
56 	/*
57 	 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
58 	 * smaller than period_size due to AFE's internal buffer.
59 	 * This easily leads to overrun when avail_min is period_size.
60 	 * One more period can hold the possible unread buffer.
61 	 */
62 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
63 		int periods_max = mtk_afe_hardware->periods_max;
64 
65 		ret = snd_pcm_hw_constraint_minmax(runtime,
66 						   SNDRV_PCM_HW_PARAM_PERIODS,
67 						   3, periods_max);
68 		if (ret < 0) {
69 			dev_err(afe->dev, "hw_constraint_minmax failed\n");
70 			return ret;
71 		}
72 	}
73 
74 	ret = snd_pcm_hw_constraint_integer(runtime,
75 					    SNDRV_PCM_HW_PARAM_PERIODS);
76 	if (ret < 0)
77 		dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
78 
79 	/* dynamic allocate irq to memif */
80 	if (memif->irq_usage < 0) {
81 		int irq_id = mtk_dynamic_irq_acquire(afe);
82 
83 		if (irq_id != afe->irqs_size) {
84 			/* link */
85 			memif->irq_usage = irq_id;
86 		} else {
87 			dev_err(afe->dev, "%s() error: no more asys irq\n",
88 				__func__);
89 			ret = -EBUSY;
90 		}
91 	}
92 	return ret;
93 }
94 EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
95 
96 void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
97 			 struct snd_soc_dai *dai)
98 {
99 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
100 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
101 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
102 	int irq_id;
103 
104 	irq_id = memif->irq_usage;
105 
106 	mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
107 			       1, 1, memif->data->agent_disable_shift);
108 
109 	if (!memif->const_irq) {
110 		mtk_dynamic_irq_release(afe, irq_id);
111 		memif->irq_usage = -1;
112 		memif->substream = NULL;
113 	}
114 }
115 EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
116 
117 int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
118 			 struct snd_pcm_hw_params *params,
119 			 struct snd_soc_dai *dai)
120 {
121 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
122 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
123 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
124 	int msb_at_bit33 = 0;
125 	int ret, fs = 0;
126 
127 	ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
128 	if (ret < 0)
129 		return ret;
130 
131 	msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
132 	memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
133 	memif->buffer_size = substream->runtime->dma_bytes;
134 
135 	/* start */
136 	mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
137 			 memif->phys_buf_addr);
138 	/* end */
139 	mtk_regmap_write(afe->regmap,
140 			 memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
141 			 memif->phys_buf_addr + memif->buffer_size - 1);
142 
143 	/* set MSB to 33-bit */
144 	mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
145 			       1, msb_at_bit33, memif->data->msb_shift);
146 
147 	/* set channel */
148 	if (memif->data->mono_shift >= 0) {
149 		unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
150 
151 		mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
152 				       1, mono, memif->data->mono_shift);
153 	}
154 
155 	/* set rate */
156 	if (memif->data->fs_shift < 0)
157 		return 0;
158 
159 	fs = afe->memif_fs(substream, params_rate(params));
160 
161 	if (fs < 0)
162 		return -EINVAL;
163 
164 	mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
165 			       memif->data->fs_maskbit, fs,
166 			       memif->data->fs_shift);
167 
168 	return 0;
169 }
170 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
171 
172 int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
173 		       struct snd_soc_dai *dai)
174 {
175 	return snd_pcm_lib_free_pages(substream);
176 }
177 EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
178 
179 int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
180 		       struct snd_soc_dai *dai)
181 {
182 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
183 	struct snd_pcm_runtime * const runtime = substream->runtime;
184 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
185 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
186 	struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
187 	const struct mtk_base_irq_data *irq_data = irqs->irq_data;
188 	unsigned int counter = runtime->period_size;
189 	int fs;
190 
191 	dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
192 
193 	switch (cmd) {
194 	case SNDRV_PCM_TRIGGER_START:
195 	case SNDRV_PCM_TRIGGER_RESUME:
196 		mtk_regmap_update_bits(afe->regmap,
197 				       memif->data->enable_reg,
198 				       1, 1, memif->data->enable_shift);
199 
200 		/* set irq counter */
201 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
202 				       irq_data->irq_cnt_maskbit, counter,
203 				       irq_data->irq_cnt_shift);
204 
205 		/* set irq fs */
206 		fs = afe->irq_fs(substream, runtime->rate);
207 
208 		if (fs < 0)
209 			return -EINVAL;
210 
211 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
212 				       irq_data->irq_fs_maskbit, fs,
213 				       irq_data->irq_fs_shift);
214 
215 		/* enable interrupt */
216 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
217 				       1, 1, irq_data->irq_en_shift);
218 
219 		return 0;
220 	case SNDRV_PCM_TRIGGER_STOP:
221 	case SNDRV_PCM_TRIGGER_SUSPEND:
222 		mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
223 				       1, 0, memif->data->enable_shift);
224 		/* disable interrupt */
225 		mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
226 				       1, 0, irq_data->irq_en_shift);
227 		/* and clear pending IRQ */
228 		mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
229 				 1 << irq_data->irq_clr_shift);
230 		return 0;
231 	default:
232 		return -EINVAL;
233 	}
234 }
235 EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
236 
237 int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
238 		       struct snd_soc_dai *dai)
239 {
240 	struct snd_soc_pcm_runtime *rtd  = substream->private_data;
241 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
242 	struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
243 	int hd_audio = 0;
244 	int hd_align = 0;
245 
246 	/* set hd mode */
247 	switch (substream->runtime->format) {
248 	case SNDRV_PCM_FORMAT_S16_LE:
249 		hd_audio = 0;
250 		break;
251 	case SNDRV_PCM_FORMAT_S32_LE:
252 		hd_audio = 1;
253 		hd_align = 1;
254 		break;
255 	case SNDRV_PCM_FORMAT_S24_LE:
256 		hd_audio = 1;
257 		break;
258 	default:
259 		dev_err(afe->dev, "%s() error: unsupported format %d\n",
260 			__func__, substream->runtime->format);
261 		break;
262 	}
263 
264 	mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
265 			       1, hd_audio, memif->data->hd_shift);
266 
267 	mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
268 			       1, hd_align, memif->data->hd_align_mshift);
269 
270 	return 0;
271 }
272 EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
273 
274 const struct snd_soc_dai_ops mtk_afe_fe_ops = {
275 	.startup	= mtk_afe_fe_startup,
276 	.shutdown	= mtk_afe_fe_shutdown,
277 	.hw_params	= mtk_afe_fe_hw_params,
278 	.hw_free	= mtk_afe_fe_hw_free,
279 	.prepare	= mtk_afe_fe_prepare,
280 	.trigger	= mtk_afe_fe_trigger,
281 };
282 EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
283 
284 static DEFINE_MUTEX(irqs_lock);
285 int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
286 {
287 	int i;
288 
289 	mutex_lock(&afe->irq_alloc_lock);
290 	for (i = 0; i < afe->irqs_size; ++i) {
291 		if (afe->irqs[i].irq_occupyed == 0) {
292 			afe->irqs[i].irq_occupyed = 1;
293 			mutex_unlock(&afe->irq_alloc_lock);
294 			return i;
295 		}
296 	}
297 	mutex_unlock(&afe->irq_alloc_lock);
298 	return afe->irqs_size;
299 }
300 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
301 
302 int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
303 {
304 	mutex_lock(&afe->irq_alloc_lock);
305 	if (irq_id >= 0 && irq_id < afe->irqs_size) {
306 		afe->irqs[irq_id].irq_occupyed = 0;
307 		mutex_unlock(&afe->irq_alloc_lock);
308 		return 0;
309 	}
310 	mutex_unlock(&afe->irq_alloc_lock);
311 	return -EINVAL;
312 }
313 EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
314 
315 int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
316 {
317 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
318 	struct device *dev = afe->dev;
319 	struct regmap *regmap = afe->regmap;
320 	int i;
321 
322 	if (pm_runtime_status_suspended(dev) || afe->suspended)
323 		return 0;
324 
325 	if (!afe->reg_back_up)
326 		afe->reg_back_up =
327 			devm_kcalloc(dev, afe->reg_back_up_list_num,
328 				     sizeof(unsigned int), GFP_KERNEL);
329 
330 	for (i = 0; i < afe->reg_back_up_list_num; i++)
331 		regmap_read(regmap, afe->reg_back_up_list[i],
332 			    &afe->reg_back_up[i]);
333 
334 	afe->suspended = true;
335 	afe->runtime_suspend(dev);
336 	return 0;
337 }
338 EXPORT_SYMBOL_GPL(mtk_afe_dai_suspend);
339 
340 int mtk_afe_dai_resume(struct snd_soc_dai *dai)
341 {
342 	struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
343 	struct device *dev = afe->dev;
344 	struct regmap *regmap = afe->regmap;
345 	int i = 0;
346 
347 	if (pm_runtime_status_suspended(dev) || !afe->suspended)
348 		return 0;
349 
350 	afe->runtime_resume(dev);
351 
352 	if (!afe->reg_back_up)
353 		dev_dbg(dev, "%s no reg_backup\n", __func__);
354 
355 	for (i = 0; i < afe->reg_back_up_list_num; i++)
356 		mtk_regmap_write(regmap, afe->reg_back_up_list[i],
357 				 afe->reg_back_up[i]);
358 
359 	afe->suspended = false;
360 	return 0;
361 }
362 EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
363 
364 int mtk_memif_set_enable(struct mtk_base_afe *afe, int id)
365 {
366 	struct mtk_base_afe_memif *memif = &afe->memif[id];
367 
368 	if (memif->data->enable_shift < 0) {
369 		dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
370 			 __func__, id);
371 		return 0;
372 	}
373 	return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
374 				      1, 1, memif->data->enable_shift);
375 }
376 EXPORT_SYMBOL_GPL(mtk_memif_set_enable);
377 
378 int mtk_memif_set_disable(struct mtk_base_afe *afe, int id)
379 {
380 	struct mtk_base_afe_memif *memif = &afe->memif[id];
381 
382 	if (memif->data->enable_shift < 0) {
383 		dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
384 			 __func__, id);
385 		return 0;
386 	}
387 	return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
388 				      1, 0, memif->data->enable_shift);
389 }
390 EXPORT_SYMBOL_GPL(mtk_memif_set_disable);
391 
392 int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
393 		       unsigned char *dma_area,
394 		       dma_addr_t dma_addr,
395 		       size_t dma_bytes)
396 {
397 	struct mtk_base_afe_memif *memif = &afe->memif[id];
398 	int msb_at_bit33 = upper_32_bits(dma_addr) ? 1 : 0;
399 	unsigned int phys_buf_addr = lower_32_bits(dma_addr);
400 	unsigned int phys_buf_addr_upper_32 = upper_32_bits(dma_addr);
401 
402 	memif->dma_area = dma_area;
403 	memif->dma_addr = dma_addr;
404 	memif->dma_bytes = dma_bytes;
405 
406 	/* start */
407 	mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
408 			 phys_buf_addr);
409 	/* end */
410 	if (memif->data->reg_ofs_end)
411 		mtk_regmap_write(afe->regmap,
412 				 memif->data->reg_ofs_end,
413 				 phys_buf_addr + dma_bytes - 1);
414 	else
415 		mtk_regmap_write(afe->regmap,
416 				 memif->data->reg_ofs_base +
417 				 AFE_BASE_END_OFFSET,
418 				 phys_buf_addr + dma_bytes - 1);
419 
420 	/* set start, end, upper 32 bits */
421 	if (memif->data->reg_ofs_base_msb) {
422 		mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb,
423 				 phys_buf_addr_upper_32);
424 		mtk_regmap_write(afe->regmap,
425 				 memif->data->reg_ofs_end_msb,
426 				 phys_buf_addr_upper_32);
427 	}
428 
429 	/* set MSB to 33-bit */
430 	if (memif->data->msb_reg >= 0)
431 		mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
432 				       1, msb_at_bit33, memif->data->msb_shift);
433 
434 	return 0;
435 }
436 EXPORT_SYMBOL_GPL(mtk_memif_set_addr);
437 
438 int mtk_memif_set_channel(struct mtk_base_afe *afe,
439 			  int id, unsigned int channel)
440 {
441 	struct mtk_base_afe_memif *memif = &afe->memif[id];
442 	unsigned int mono;
443 
444 	if (memif->data->mono_shift < 0)
445 		return 0;
446 
447 	if (memif->data->quad_ch_mask) {
448 		unsigned int quad_ch = (channel == 4) ? 1 : 0;
449 
450 		mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg,
451 				       memif->data->quad_ch_mask,
452 				       quad_ch, memif->data->quad_ch_shift);
453 	}
454 
455 	if (memif->data->mono_invert)
456 		mono = (channel == 1) ? 0 : 1;
457 	else
458 		mono = (channel == 1) ? 1 : 0;
459 
460 	return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
461 				      1, mono, memif->data->mono_shift);
462 }
463 EXPORT_SYMBOL_GPL(mtk_memif_set_channel);
464 
465 static int mtk_memif_set_rate_fs(struct mtk_base_afe *afe,
466 				 int id, int fs)
467 {
468 	struct mtk_base_afe_memif *memif = &afe->memif[id];
469 
470 	if (memif->data->fs_shift >= 0)
471 		mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
472 				       memif->data->fs_maskbit,
473 				       fs, memif->data->fs_shift);
474 
475 	return 0;
476 }
477 
478 int mtk_memif_set_rate(struct mtk_base_afe *afe,
479 		       int id, unsigned int rate)
480 {
481 	int fs = 0;
482 
483 	if (!afe->get_dai_fs) {
484 		dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n",
485 			__func__);
486 		return -EINVAL;
487 	}
488 
489 	fs = afe->get_dai_fs(afe, id, rate);
490 
491 	if (fs < 0)
492 		return -EINVAL;
493 
494 	return mtk_memif_set_rate_fs(afe, id, fs);
495 }
496 EXPORT_SYMBOL_GPL(mtk_memif_set_rate);
497 
498 int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream,
499 				 int id, unsigned int rate)
500 {
501 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
502 	struct snd_soc_component *component =
503 		snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
504 	struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
505 
506 	int fs = 0;
507 
508 	if (!afe->memif_fs) {
509 		dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n",
510 			__func__);
511 		return -EINVAL;
512 	}
513 
514 	fs = afe->memif_fs(substream, rate);
515 
516 	if (fs < 0)
517 		return -EINVAL;
518 
519 	return mtk_memif_set_rate_fs(afe, id, fs);
520 }
521 EXPORT_SYMBOL_GPL(mtk_memif_set_rate_substream);
522 
523 int mtk_memif_set_format(struct mtk_base_afe *afe,
524 			 int id, snd_pcm_format_t format)
525 {
526 	struct mtk_base_afe_memif *memif = &afe->memif[id];
527 	int hd_audio = 0;
528 	int hd_align = 0;
529 
530 	/* set hd mode */
531 	switch (format) {
532 	case SNDRV_PCM_FORMAT_S16_LE:
533 	case SNDRV_PCM_FORMAT_U16_LE:
534 		hd_audio = 0;
535 		break;
536 	case SNDRV_PCM_FORMAT_S32_LE:
537 	case SNDRV_PCM_FORMAT_U32_LE:
538 		hd_audio = 1;
539 		hd_align = 1;
540 		break;
541 	case SNDRV_PCM_FORMAT_S24_LE:
542 	case SNDRV_PCM_FORMAT_U24_LE:
543 		hd_audio = 1;
544 		break;
545 	default:
546 		dev_err(afe->dev, "%s() error: unsupported format %d\n",
547 			__func__, format);
548 		break;
549 	}
550 
551 	mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
552 			       1, hd_audio, memif->data->hd_shift);
553 
554 	mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
555 			       1, hd_align, memif->data->hd_align_mshift);
556 
557 	return 0;
558 }
559 EXPORT_SYMBOL_GPL(mtk_memif_set_format);
560 
561 int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
562 			    int id, int pbuf_size)
563 {
564 	const struct mtk_base_memif_data *memif_data = afe->memif[id].data;
565 
566 	if (memif_data->pbuf_mask == 0 || memif_data->minlen_mask == 0)
567 		return 0;
568 
569 	mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg,
570 			       memif_data->pbuf_mask,
571 			       pbuf_size, memif_data->pbuf_shift);
572 
573 	mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg,
574 			       memif_data->minlen_mask,
575 			       pbuf_size, memif_data->minlen_shift);
576 	return 0;
577 }
578 EXPORT_SYMBOL_GPL(mtk_memif_set_pbuf_size);
579 
580 MODULE_DESCRIPTION("Mediatek simple fe dai operator");
581 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
582 MODULE_LICENSE("GPL v2");
583