1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module
4 * configurations
5 *
6 * Copyright (C) 2015 Intel Corp
7 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
8 * Jeeja KP <jeeja.kp@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 */
11
12 #include <linux/slab.h>
13 #include <linux/pci.h>
14 #include <sound/core.h>
15 #include <sound/pcm.h>
16 #include <uapi/sound/skl-tplg-interface.h>
17 #include "skl-sst-dsp.h"
18 #include "cnl-sst-dsp.h"
19 #include "skl-sst-ipc.h"
20 #include "skl.h"
21 #include "../common/sst-dsp.h"
22 #include "../common/sst-dsp-priv.h"
23 #include "skl-topology.h"
24
skl_alloc_dma_buf(struct device * dev,struct snd_dma_buffer * dmab,size_t size)25 static int skl_alloc_dma_buf(struct device *dev,
26 struct snd_dma_buffer *dmab, size_t size)
27 {
28 return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, dmab);
29 }
30
skl_free_dma_buf(struct device * dev,struct snd_dma_buffer * dmab)31 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
32 {
33 snd_dma_free_pages(dmab);
34 return 0;
35 }
36
37 #define SKL_ASTATE_PARAM_ID 4
38
skl_dsp_set_astate_cfg(struct skl_dev * skl,u32 cnt,void * data)39 void skl_dsp_set_astate_cfg(struct skl_dev *skl, u32 cnt, void *data)
40 {
41 struct skl_ipc_large_config_msg msg = {0};
42
43 msg.large_param_id = SKL_ASTATE_PARAM_ID;
44 msg.param_data_size = (cnt * sizeof(struct skl_astate_param) +
45 sizeof(cnt));
46
47 skl_ipc_set_large_config(&skl->ipc, &msg, data);
48 }
49
skl_dsp_setup_spib(struct device * dev,unsigned int size,int stream_tag,int enable)50 static int skl_dsp_setup_spib(struct device *dev, unsigned int size,
51 int stream_tag, int enable)
52 {
53 struct hdac_bus *bus = dev_get_drvdata(dev);
54 struct hdac_stream *stream = snd_hdac_get_stream(bus,
55 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
56
57 if (!stream)
58 return -EINVAL;
59
60 /* enable/disable SPIB for this hdac stream */
61 snd_hdac_stream_spbcap_enable(bus, enable, stream->index);
62
63 /* set the spib value */
64 snd_hdac_stream_set_spib(bus, stream, size);
65
66 return 0;
67 }
68
skl_dsp_prepare(struct device * dev,unsigned int format,unsigned int size,struct snd_dma_buffer * dmab)69 static int skl_dsp_prepare(struct device *dev, unsigned int format,
70 unsigned int size, struct snd_dma_buffer *dmab)
71 {
72 struct hdac_bus *bus = dev_get_drvdata(dev);
73 struct hdac_ext_stream *estream;
74 struct hdac_stream *stream;
75 struct snd_pcm_substream substream;
76 int ret;
77
78 if (!bus)
79 return -ENODEV;
80
81 memset(&substream, 0, sizeof(substream));
82 substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
83
84 estream = snd_hdac_ext_stream_assign(bus, &substream,
85 HDAC_EXT_STREAM_TYPE_HOST);
86 if (!estream)
87 return -ENODEV;
88
89 stream = hdac_stream(estream);
90
91 /* assign decouple host dma channel */
92 ret = snd_hdac_dsp_prepare(stream, format, size, dmab);
93 if (ret < 0)
94 return ret;
95
96 skl_dsp_setup_spib(dev, size, stream->stream_tag, true);
97
98 return stream->stream_tag;
99 }
100
skl_dsp_trigger(struct device * dev,bool start,int stream_tag)101 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag)
102 {
103 struct hdac_bus *bus = dev_get_drvdata(dev);
104 struct hdac_stream *stream;
105
106 if (!bus)
107 return -ENODEV;
108
109 stream = snd_hdac_get_stream(bus,
110 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
111 if (!stream)
112 return -EINVAL;
113
114 snd_hdac_dsp_trigger(stream, start);
115
116 return 0;
117 }
118
skl_dsp_cleanup(struct device * dev,struct snd_dma_buffer * dmab,int stream_tag)119 static int skl_dsp_cleanup(struct device *dev,
120 struct snd_dma_buffer *dmab, int stream_tag)
121 {
122 struct hdac_bus *bus = dev_get_drvdata(dev);
123 struct hdac_stream *stream;
124 struct hdac_ext_stream *estream;
125
126 if (!bus)
127 return -ENODEV;
128
129 stream = snd_hdac_get_stream(bus,
130 SNDRV_PCM_STREAM_PLAYBACK, stream_tag);
131 if (!stream)
132 return -EINVAL;
133
134 estream = stream_to_hdac_ext_stream(stream);
135 skl_dsp_setup_spib(dev, 0, stream_tag, false);
136 snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST);
137
138 snd_hdac_dsp_cleanup(stream, dmab);
139
140 return 0;
141 }
142
skl_get_loader_ops(void)143 static struct skl_dsp_loader_ops skl_get_loader_ops(void)
144 {
145 struct skl_dsp_loader_ops loader_ops;
146
147 memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops));
148
149 loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
150 loader_ops.free_dma_buf = skl_free_dma_buf;
151
152 return loader_ops;
153 };
154
bxt_get_loader_ops(void)155 static struct skl_dsp_loader_ops bxt_get_loader_ops(void)
156 {
157 struct skl_dsp_loader_ops loader_ops;
158
159 memset(&loader_ops, 0, sizeof(loader_ops));
160
161 loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
162 loader_ops.free_dma_buf = skl_free_dma_buf;
163 loader_ops.prepare = skl_dsp_prepare;
164 loader_ops.trigger = skl_dsp_trigger;
165 loader_ops.cleanup = skl_dsp_cleanup;
166
167 return loader_ops;
168 };
169
170 static const struct skl_dsp_ops dsp_ops[] = {
171 {
172 .id = PCI_DEVICE_ID_INTEL_HDA_SKL_LP,
173 .num_cores = 2,
174 .loader_ops = skl_get_loader_ops,
175 .init = skl_sst_dsp_init,
176 .init_fw = skl_sst_init_fw,
177 .cleanup = skl_sst_dsp_cleanup
178 },
179 {
180 .id = PCI_DEVICE_ID_INTEL_HDA_KBL_LP,
181 .num_cores = 2,
182 .loader_ops = skl_get_loader_ops,
183 .init = skl_sst_dsp_init,
184 .init_fw = skl_sst_init_fw,
185 .cleanup = skl_sst_dsp_cleanup
186 },
187 {
188 .id = PCI_DEVICE_ID_INTEL_HDA_APL,
189 .num_cores = 2,
190 .loader_ops = bxt_get_loader_ops,
191 .init = bxt_sst_dsp_init,
192 .init_fw = bxt_sst_init_fw,
193 .cleanup = bxt_sst_dsp_cleanup
194 },
195 {
196 .id = PCI_DEVICE_ID_INTEL_HDA_GML,
197 .num_cores = 2,
198 .loader_ops = bxt_get_loader_ops,
199 .init = bxt_sst_dsp_init,
200 .init_fw = bxt_sst_init_fw,
201 .cleanup = bxt_sst_dsp_cleanup
202 },
203 {
204 .id = PCI_DEVICE_ID_INTEL_HDA_CNL_LP,
205 .num_cores = 4,
206 .loader_ops = bxt_get_loader_ops,
207 .init = cnl_sst_dsp_init,
208 .init_fw = cnl_sst_init_fw,
209 .cleanup = cnl_sst_dsp_cleanup
210 },
211 {
212 .id = PCI_DEVICE_ID_INTEL_HDA_CNL_H,
213 .num_cores = 4,
214 .loader_ops = bxt_get_loader_ops,
215 .init = cnl_sst_dsp_init,
216 .init_fw = cnl_sst_init_fw,
217 .cleanup = cnl_sst_dsp_cleanup
218 },
219 {
220 .id = PCI_DEVICE_ID_INTEL_HDA_CML_LP,
221 .num_cores = 4,
222 .loader_ops = bxt_get_loader_ops,
223 .init = cnl_sst_dsp_init,
224 .init_fw = cnl_sst_init_fw,
225 .cleanup = cnl_sst_dsp_cleanup
226 },
227 {
228 .id = PCI_DEVICE_ID_INTEL_HDA_CML_H,
229 .num_cores = 4,
230 .loader_ops = bxt_get_loader_ops,
231 .init = cnl_sst_dsp_init,
232 .init_fw = cnl_sst_init_fw,
233 .cleanup = cnl_sst_dsp_cleanup
234 },
235 };
236
skl_get_dsp_ops(int pci_id)237 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
238 {
239 int i;
240
241 for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) {
242 if (dsp_ops[i].id == pci_id)
243 return &dsp_ops[i];
244 }
245
246 return NULL;
247 }
248
skl_init_dsp(struct skl_dev * skl)249 int skl_init_dsp(struct skl_dev *skl)
250 {
251 void __iomem *mmio_base;
252 struct hdac_bus *bus = skl_to_bus(skl);
253 struct skl_dsp_loader_ops loader_ops;
254 int irq = bus->irq;
255 const struct skl_dsp_ops *ops;
256 struct skl_dsp_cores *cores;
257 int ret;
258
259 /* enable ppcap interrupt */
260 snd_hdac_ext_bus_ppcap_enable(bus, true);
261 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
262
263 /* read the BAR of the ADSP MMIO */
264 mmio_base = pci_ioremap_bar(skl->pci, 4);
265 if (mmio_base == NULL) {
266 dev_err(bus->dev, "ioremap error\n");
267 return -ENXIO;
268 }
269
270 ops = skl_get_dsp_ops(skl->pci->device);
271 if (!ops) {
272 ret = -EIO;
273 goto unmap_mmio;
274 }
275
276 loader_ops = ops->loader_ops();
277 ret = ops->init(bus->dev, mmio_base, irq,
278 skl->fw_name, loader_ops,
279 &skl);
280
281 if (ret < 0)
282 goto unmap_mmio;
283
284 skl->dsp_ops = ops;
285 cores = &skl->cores;
286 cores->count = ops->num_cores;
287
288 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL);
289 if (!cores->state) {
290 ret = -ENOMEM;
291 goto unmap_mmio;
292 }
293
294 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count),
295 GFP_KERNEL);
296 if (!cores->usage_count) {
297 ret = -ENOMEM;
298 goto free_core_state;
299 }
300
301 dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
302
303 return 0;
304
305 free_core_state:
306 kfree(cores->state);
307
308 unmap_mmio:
309 iounmap(mmio_base);
310
311 return ret;
312 }
313
skl_free_dsp(struct skl_dev * skl)314 int skl_free_dsp(struct skl_dev *skl)
315 {
316 struct hdac_bus *bus = skl_to_bus(skl);
317
318 /* disable ppcap interrupt */
319 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
320
321 skl->dsp_ops->cleanup(bus->dev, skl);
322
323 kfree(skl->cores.state);
324 kfree(skl->cores.usage_count);
325
326 if (skl->dsp->addr.lpe)
327 iounmap(skl->dsp->addr.lpe);
328
329 return 0;
330 }
331
332 /*
333 * In the case of "suspend_active" i.e, the Audio IP being active
334 * during system suspend, immediately excecute any pending D0i3 work
335 * before suspending. This is needed for the IP to work in low power
336 * mode during system suspend. In the case of normal suspend, cancel
337 * any pending D0i3 work.
338 */
skl_suspend_late_dsp(struct skl_dev * skl)339 int skl_suspend_late_dsp(struct skl_dev *skl)
340 {
341 struct delayed_work *dwork;
342
343 if (!skl)
344 return 0;
345
346 dwork = &skl->d0i3.work;
347
348 if (dwork->work.func) {
349 if (skl->supend_active)
350 flush_delayed_work(dwork);
351 else
352 cancel_delayed_work_sync(dwork);
353 }
354
355 return 0;
356 }
357
skl_suspend_dsp(struct skl_dev * skl)358 int skl_suspend_dsp(struct skl_dev *skl)
359 {
360 struct hdac_bus *bus = skl_to_bus(skl);
361 int ret;
362
363 /* if ppcap is not supported return 0 */
364 if (!bus->ppcap)
365 return 0;
366
367 ret = skl_dsp_sleep(skl->dsp);
368 if (ret < 0)
369 return ret;
370
371 /* disable ppcap interrupt */
372 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
373 snd_hdac_ext_bus_ppcap_enable(bus, false);
374
375 return 0;
376 }
377
skl_resume_dsp(struct skl_dev * skl)378 int skl_resume_dsp(struct skl_dev *skl)
379 {
380 struct hdac_bus *bus = skl_to_bus(skl);
381 int ret;
382
383 /* if ppcap is not supported return 0 */
384 if (!bus->ppcap)
385 return 0;
386
387 /* enable ppcap interrupt */
388 snd_hdac_ext_bus_ppcap_enable(bus, true);
389 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
390
391 /* check if DSP 1st boot is done */
392 if (skl->is_first_boot)
393 return 0;
394
395 /*
396 * Disable dynamic clock and power gating during firmware
397 * and library download
398 */
399 skl->enable_miscbdcge(skl->dev, false);
400 skl->clock_power_gating(skl->dev, false);
401
402 ret = skl_dsp_wake(skl->dsp);
403 skl->enable_miscbdcge(skl->dev, true);
404 skl->clock_power_gating(skl->dev, true);
405 if (ret < 0)
406 return ret;
407
408 if (skl->cfg.astate_cfg != NULL) {
409 skl_dsp_set_astate_cfg(skl, skl->cfg.astate_cfg->count,
410 skl->cfg.astate_cfg);
411 }
412 return ret;
413 }
414
skl_get_bit_depth(int params)415 enum skl_bitdepth skl_get_bit_depth(int params)
416 {
417 switch (params) {
418 case 8:
419 return SKL_DEPTH_8BIT;
420
421 case 16:
422 return SKL_DEPTH_16BIT;
423
424 case 24:
425 return SKL_DEPTH_24BIT;
426
427 case 32:
428 return SKL_DEPTH_32BIT;
429
430 default:
431 return SKL_DEPTH_INVALID;
432
433 }
434 }
435
436 /*
437 * Each module in DSP expects a base module configuration, which consists of
438 * PCM format information, which we calculate in driver and resource values
439 * which are read from widget information passed through topology binary
440 * This is send when we create a module with INIT_INSTANCE IPC msg
441 */
skl_set_base_module_format(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_base_cfg * base_cfg)442 static void skl_set_base_module_format(struct skl_dev *skl,
443 struct skl_module_cfg *mconfig,
444 struct skl_base_cfg *base_cfg)
445 {
446 struct skl_module *module = mconfig->module;
447 struct skl_module_res *res = &module->resources[mconfig->res_idx];
448 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx];
449 struct skl_module_fmt *format = &fmt->inputs[0].fmt;
450
451 base_cfg->audio_fmt.number_of_channels = format->channels;
452
453 base_cfg->audio_fmt.s_freq = format->s_freq;
454 base_cfg->audio_fmt.bit_depth = format->bit_depth;
455 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
456 base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
457 base_cfg->audio_fmt.sample_type = format->sample_type;
458
459 dev_dbg(skl->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
460 format->bit_depth, format->valid_bit_depth,
461 format->ch_cfg);
462
463 base_cfg->audio_fmt.channel_map = format->ch_map;
464
465 base_cfg->audio_fmt.interleaving = format->interleaving_style;
466
467 base_cfg->cpc = res->cpc;
468 base_cfg->ibs = res->ibs;
469 base_cfg->obs = res->obs;
470 base_cfg->is_pages = res->is_pages;
471 }
472
fill_pin_params(struct skl_audio_data_format * pin_fmt,struct skl_module_fmt * format)473 static void fill_pin_params(struct skl_audio_data_format *pin_fmt,
474 struct skl_module_fmt *format)
475 {
476 pin_fmt->number_of_channels = format->channels;
477 pin_fmt->s_freq = format->s_freq;
478 pin_fmt->bit_depth = format->bit_depth;
479 pin_fmt->valid_bit_depth = format->valid_bit_depth;
480 pin_fmt->ch_cfg = format->ch_cfg;
481 pin_fmt->sample_type = format->sample_type;
482 pin_fmt->channel_map = format->ch_map;
483 pin_fmt->interleaving = format->interleaving_style;
484 }
485
486 /*
487 * Any module configuration begins with a base module configuration but
488 * can be followed by a generic extension containing audio format for all
489 * module's pins that are in use.
490 */
skl_set_base_ext_module_format(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_base_cfg_ext * base_cfg_ext)491 static void skl_set_base_ext_module_format(struct skl_dev *skl,
492 struct skl_module_cfg *mconfig,
493 struct skl_base_cfg_ext *base_cfg_ext)
494 {
495 struct skl_module *module = mconfig->module;
496 struct skl_module_pin_resources *pin_res;
497 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx];
498 struct skl_module_res *res = &module->resources[mconfig->res_idx];
499 struct skl_module_fmt *format;
500 struct skl_pin_format *pin_fmt;
501 char *params;
502 int i;
503
504 base_cfg_ext->nr_input_pins = res->nr_input_pins;
505 base_cfg_ext->nr_output_pins = res->nr_output_pins;
506 base_cfg_ext->priv_param_length =
507 mconfig->formats_config[SKL_PARAM_INIT].caps_size;
508
509 for (i = 0; i < res->nr_input_pins; i++) {
510 pin_res = &res->input[i];
511 pin_fmt = &base_cfg_ext->pins_fmt[i];
512
513 pin_fmt->pin_idx = pin_res->pin_index;
514 pin_fmt->buf_size = pin_res->buf_size;
515
516 format = &fmt->inputs[pin_res->pin_index].fmt;
517 fill_pin_params(&pin_fmt->audio_fmt, format);
518 }
519
520 for (i = 0; i < res->nr_output_pins; i++) {
521 pin_res = &res->output[i];
522 pin_fmt = &base_cfg_ext->pins_fmt[res->nr_input_pins + i];
523
524 pin_fmt->pin_idx = pin_res->pin_index;
525 pin_fmt->buf_size = pin_res->buf_size;
526
527 format = &fmt->outputs[pin_res->pin_index].fmt;
528 fill_pin_params(&pin_fmt->audio_fmt, format);
529 }
530
531 if (!base_cfg_ext->priv_param_length)
532 return;
533
534 params = (char *)base_cfg_ext + sizeof(struct skl_base_cfg_ext);
535 params += (base_cfg_ext->nr_input_pins + base_cfg_ext->nr_output_pins) *
536 sizeof(struct skl_pin_format);
537
538 memcpy(params, mconfig->formats_config[SKL_PARAM_INIT].caps,
539 mconfig->formats_config[SKL_PARAM_INIT].caps_size);
540 }
541
542 /*
543 * Copies copier capabilities into copier module and updates copier module
544 * config size.
545 */
skl_copy_copier_caps(struct skl_module_cfg * mconfig,struct skl_cpr_cfg * cpr_mconfig)546 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
547 struct skl_cpr_cfg *cpr_mconfig)
548 {
549 if (mconfig->formats_config[SKL_PARAM_INIT].caps_size == 0)
550 return;
551
552 memcpy(&cpr_mconfig->gtw_cfg.config_data,
553 mconfig->formats_config[SKL_PARAM_INIT].caps,
554 mconfig->formats_config[SKL_PARAM_INIT].caps_size);
555
556 cpr_mconfig->gtw_cfg.config_length =
557 (mconfig->formats_config[SKL_PARAM_INIT].caps_size) / 4;
558 }
559
560 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF
561 /*
562 * Calculate the gatewat settings required for copier module, type of
563 * gateway and index of gateway to use
564 */
skl_get_node_id(struct skl_dev * skl,struct skl_module_cfg * mconfig)565 static u32 skl_get_node_id(struct skl_dev *skl,
566 struct skl_module_cfg *mconfig)
567 {
568 union skl_connector_node_id node_id = {0};
569 union skl_ssp_dma_node ssp_node = {0};
570 struct skl_pipe_params *params = mconfig->pipe->p_params;
571
572 switch (mconfig->dev_type) {
573 case SKL_DEVICE_BT:
574 node_id.node.dma_type =
575 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
576 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
577 SKL_DMA_I2S_LINK_INPUT_CLASS;
578 node_id.node.vindex = params->host_dma_id +
579 (mconfig->vbus_id << 3);
580 break;
581
582 case SKL_DEVICE_I2S:
583 node_id.node.dma_type =
584 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
585 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
586 SKL_DMA_I2S_LINK_INPUT_CLASS;
587 ssp_node.dma_node.time_slot_index = mconfig->time_slot;
588 ssp_node.dma_node.i2s_instance = mconfig->vbus_id;
589 node_id.node.vindex = ssp_node.val;
590 break;
591
592 case SKL_DEVICE_DMIC:
593 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
594 node_id.node.vindex = mconfig->vbus_id +
595 (mconfig->time_slot);
596 break;
597
598 case SKL_DEVICE_HDALINK:
599 node_id.node.dma_type =
600 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
601 SKL_DMA_HDA_LINK_OUTPUT_CLASS :
602 SKL_DMA_HDA_LINK_INPUT_CLASS;
603 node_id.node.vindex = params->link_dma_id;
604 break;
605
606 case SKL_DEVICE_HDAHOST:
607 node_id.node.dma_type =
608 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
609 SKL_DMA_HDA_HOST_OUTPUT_CLASS :
610 SKL_DMA_HDA_HOST_INPUT_CLASS;
611 node_id.node.vindex = params->host_dma_id;
612 break;
613
614 default:
615 node_id.val = 0xFFFFFFFF;
616 break;
617 }
618
619 return node_id.val;
620 }
621
skl_setup_cpr_gateway_cfg(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_cpr_cfg * cpr_mconfig)622 static void skl_setup_cpr_gateway_cfg(struct skl_dev *skl,
623 struct skl_module_cfg *mconfig,
624 struct skl_cpr_cfg *cpr_mconfig)
625 {
626 u32 dma_io_buf;
627 struct skl_module_res *res;
628 int res_idx = mconfig->res_idx;
629
630 cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(skl, mconfig);
631
632 if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
633 cpr_mconfig->cpr_feature_mask = 0;
634 return;
635 }
636
637 if (skl->nr_modules) {
638 res = &mconfig->module->resources[mconfig->res_idx];
639 cpr_mconfig->gtw_cfg.dma_buffer_size = res->dma_buffer_size;
640 goto skip_buf_size_calc;
641 } else {
642 res = &mconfig->module->resources[res_idx];
643 }
644
645 switch (mconfig->hw_conn_type) {
646 case SKL_CONN_SOURCE:
647 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
648 dma_io_buf = res->ibs;
649 else
650 dma_io_buf = res->obs;
651 break;
652
653 case SKL_CONN_SINK:
654 if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
655 dma_io_buf = res->obs;
656 else
657 dma_io_buf = res->ibs;
658 break;
659
660 default:
661 dev_warn(skl->dev, "wrong connection type: %d\n",
662 mconfig->hw_conn_type);
663 return;
664 }
665
666 cpr_mconfig->gtw_cfg.dma_buffer_size =
667 mconfig->dma_buffer_size * dma_io_buf;
668
669 /* fallback to 2ms default value */
670 if (!cpr_mconfig->gtw_cfg.dma_buffer_size) {
671 if (mconfig->hw_conn_type == SKL_CONN_SOURCE)
672 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->obs;
673 else
674 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->ibs;
675 }
676
677 skip_buf_size_calc:
678 cpr_mconfig->cpr_feature_mask = 0;
679 cpr_mconfig->gtw_cfg.config_length = 0;
680
681 skl_copy_copier_caps(mconfig, cpr_mconfig);
682 }
683
684 #define DMA_CONTROL_ID 5
685 #define DMA_I2S_BLOB_SIZE 21
686
skl_dsp_set_dma_control(struct skl_dev * skl,u32 * caps,u32 caps_size,u32 node_id)687 int skl_dsp_set_dma_control(struct skl_dev *skl, u32 *caps,
688 u32 caps_size, u32 node_id)
689 {
690 struct skl_dma_control *dma_ctrl;
691 struct skl_ipc_large_config_msg msg = {0};
692 int err = 0;
693
694
695 /*
696 * if blob size zero, then return
697 */
698 if (caps_size == 0)
699 return 0;
700
701 msg.large_param_id = DMA_CONTROL_ID;
702 msg.param_data_size = sizeof(struct skl_dma_control) + caps_size;
703
704 dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
705 if (dma_ctrl == NULL)
706 return -ENOMEM;
707
708 dma_ctrl->node_id = node_id;
709
710 /*
711 * NHLT blob may contain additional configs along with i2s blob.
712 * firmware expects only the i2s blob size as the config_length.
713 * So fix to i2s blob size.
714 * size in dwords.
715 */
716 dma_ctrl->config_length = DMA_I2S_BLOB_SIZE;
717
718 memcpy(dma_ctrl->config_data, caps, caps_size);
719
720 err = skl_ipc_set_large_config(&skl->ipc, &msg, (u32 *)dma_ctrl);
721
722 kfree(dma_ctrl);
723 return err;
724 }
725 EXPORT_SYMBOL_GPL(skl_dsp_set_dma_control);
726
skl_setup_out_format(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_audio_data_format * out_fmt)727 static void skl_setup_out_format(struct skl_dev *skl,
728 struct skl_module_cfg *mconfig,
729 struct skl_audio_data_format *out_fmt)
730 {
731 struct skl_module *module = mconfig->module;
732 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx];
733 struct skl_module_fmt *format = &fmt->outputs[0].fmt;
734
735 out_fmt->number_of_channels = (u8)format->channels;
736 out_fmt->s_freq = format->s_freq;
737 out_fmt->bit_depth = format->bit_depth;
738 out_fmt->valid_bit_depth = format->valid_bit_depth;
739 out_fmt->ch_cfg = format->ch_cfg;
740
741 out_fmt->channel_map = format->ch_map;
742 out_fmt->interleaving = format->interleaving_style;
743 out_fmt->sample_type = format->sample_type;
744
745 dev_dbg(skl->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
746 out_fmt->number_of_channels, format->s_freq, format->bit_depth);
747 }
748
749 /*
750 * DSP needs SRC module for frequency conversion, SRC takes base module
751 * configuration and the target frequency as extra parameter passed as src
752 * config
753 */
skl_set_src_format(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_src_module_cfg * src_mconfig)754 static void skl_set_src_format(struct skl_dev *skl,
755 struct skl_module_cfg *mconfig,
756 struct skl_src_module_cfg *src_mconfig)
757 {
758 struct skl_module *module = mconfig->module;
759 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx];
760 struct skl_module_fmt *fmt = &iface->outputs[0].fmt;
761
762 skl_set_base_module_format(skl, mconfig,
763 (struct skl_base_cfg *)src_mconfig);
764
765 src_mconfig->src_cfg = fmt->s_freq;
766 }
767
768 /*
769 * DSP needs updown module to do channel conversion. updown module take base
770 * module configuration and channel configuration
771 * It also take coefficients and now we have defaults applied here
772 */
skl_set_updown_mixer_format(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_up_down_mixer_cfg * mixer_mconfig)773 static void skl_set_updown_mixer_format(struct skl_dev *skl,
774 struct skl_module_cfg *mconfig,
775 struct skl_up_down_mixer_cfg *mixer_mconfig)
776 {
777 struct skl_module *module = mconfig->module;
778 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx];
779 struct skl_module_fmt *fmt = &iface->outputs[0].fmt;
780
781 skl_set_base_module_format(skl, mconfig,
782 (struct skl_base_cfg *)mixer_mconfig);
783 mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
784 mixer_mconfig->ch_map = fmt->ch_map;
785 }
786
787 /*
788 * 'copier' is DSP internal module which copies data from Host DMA (HDA host
789 * dma) or link (hda link, SSP, PDM)
790 * Here we calculate the copier module parameters, like PCM format, output
791 * format, gateway settings
792 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
793 */
skl_set_copier_format(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_cpr_cfg * cpr_mconfig)794 static void skl_set_copier_format(struct skl_dev *skl,
795 struct skl_module_cfg *mconfig,
796 struct skl_cpr_cfg *cpr_mconfig)
797 {
798 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
799 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
800
801 skl_set_base_module_format(skl, mconfig, base_cfg);
802
803 skl_setup_out_format(skl, mconfig, out_fmt);
804 skl_setup_cpr_gateway_cfg(skl, mconfig, cpr_mconfig);
805 }
806
807 /*
808 * Mic select module allows selecting one or many input channels, thus
809 * acting as a demux.
810 *
811 * Mic select module take base module configuration and out-format
812 * configuration
813 */
skl_set_base_outfmt_format(struct skl_dev * skl,struct skl_module_cfg * mconfig,struct skl_base_outfmt_cfg * base_outfmt_mcfg)814 static void skl_set_base_outfmt_format(struct skl_dev *skl,
815 struct skl_module_cfg *mconfig,
816 struct skl_base_outfmt_cfg *base_outfmt_mcfg)
817 {
818 struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt;
819 struct skl_base_cfg *base_cfg =
820 (struct skl_base_cfg *)base_outfmt_mcfg;
821
822 skl_set_base_module_format(skl, mconfig, base_cfg);
823 skl_setup_out_format(skl, mconfig, out_fmt);
824 }
825
skl_get_module_param_size(struct skl_dev * skl,struct skl_module_cfg * mconfig)826 static u16 skl_get_module_param_size(struct skl_dev *skl,
827 struct skl_module_cfg *mconfig)
828 {
829 struct skl_module_res *res;
830 struct skl_module *module = mconfig->module;
831 u16 param_size;
832
833 switch (mconfig->m_type) {
834 case SKL_MODULE_TYPE_COPIER:
835 param_size = sizeof(struct skl_cpr_cfg);
836 param_size += mconfig->formats_config[SKL_PARAM_INIT].caps_size;
837 return param_size;
838
839 case SKL_MODULE_TYPE_SRCINT:
840 return sizeof(struct skl_src_module_cfg);
841
842 case SKL_MODULE_TYPE_UPDWMIX:
843 return sizeof(struct skl_up_down_mixer_cfg);
844
845 case SKL_MODULE_TYPE_BASE_OUTFMT:
846 case SKL_MODULE_TYPE_MIC_SELECT:
847 return sizeof(struct skl_base_outfmt_cfg);
848
849 case SKL_MODULE_TYPE_MIXER:
850 case SKL_MODULE_TYPE_KPB:
851 return sizeof(struct skl_base_cfg);
852
853 case SKL_MODULE_TYPE_ALGO:
854 default:
855 res = &module->resources[mconfig->res_idx];
856
857 param_size = sizeof(struct skl_base_cfg) + sizeof(struct skl_base_cfg_ext);
858 param_size += (res->nr_input_pins + res->nr_output_pins) *
859 sizeof(struct skl_pin_format);
860 param_size += mconfig->formats_config[SKL_PARAM_INIT].caps_size;
861
862 return param_size;
863 }
864
865 return 0;
866 }
867
868 /*
869 * DSP firmware supports various modules like copier, SRC, updown etc.
870 * These modules required various parameters to be calculated and sent for
871 * the module initialization to DSP. By default a generic module needs only
872 * base module format configuration
873 */
874
skl_set_module_format(struct skl_dev * skl,struct skl_module_cfg * module_config,u16 * module_config_size,void ** param_data)875 static int skl_set_module_format(struct skl_dev *skl,
876 struct skl_module_cfg *module_config,
877 u16 *module_config_size,
878 void **param_data)
879 {
880 u16 param_size;
881
882 param_size = skl_get_module_param_size(skl, module_config);
883
884 *param_data = kzalloc(param_size, GFP_KERNEL);
885 if (NULL == *param_data)
886 return -ENOMEM;
887
888 *module_config_size = param_size;
889
890 switch (module_config->m_type) {
891 case SKL_MODULE_TYPE_COPIER:
892 skl_set_copier_format(skl, module_config, *param_data);
893 break;
894
895 case SKL_MODULE_TYPE_SRCINT:
896 skl_set_src_format(skl, module_config, *param_data);
897 break;
898
899 case SKL_MODULE_TYPE_UPDWMIX:
900 skl_set_updown_mixer_format(skl, module_config, *param_data);
901 break;
902
903 case SKL_MODULE_TYPE_BASE_OUTFMT:
904 case SKL_MODULE_TYPE_MIC_SELECT:
905 skl_set_base_outfmt_format(skl, module_config, *param_data);
906 break;
907
908 case SKL_MODULE_TYPE_MIXER:
909 case SKL_MODULE_TYPE_KPB:
910 skl_set_base_module_format(skl, module_config, *param_data);
911 break;
912
913 case SKL_MODULE_TYPE_ALGO:
914 default:
915 skl_set_base_module_format(skl, module_config, *param_data);
916 skl_set_base_ext_module_format(skl, module_config,
917 *param_data +
918 sizeof(struct skl_base_cfg));
919 break;
920 }
921
922 dev_dbg(skl->dev, "Module type=%d id=%d config size: %d bytes\n",
923 module_config->m_type, module_config->id.module_id,
924 param_size);
925 print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4,
926 *param_data, param_size, false);
927 return 0;
928 }
929
skl_get_queue_index(struct skl_module_pin * mpin,struct skl_module_inst_id id,int max)930 static int skl_get_queue_index(struct skl_module_pin *mpin,
931 struct skl_module_inst_id id, int max)
932 {
933 int i;
934
935 for (i = 0; i < max; i++) {
936 if (mpin[i].id.module_id == id.module_id &&
937 mpin[i].id.instance_id == id.instance_id)
938 return i;
939 }
940
941 return -EINVAL;
942 }
943
944 /*
945 * Allocates queue for each module.
946 * if dynamic, the pin_index is allocated 0 to max_pin.
947 * In static, the pin_index is fixed based on module_id and instance id
948 */
skl_alloc_queue(struct skl_module_pin * mpin,struct skl_module_cfg * tgt_cfg,int max)949 static int skl_alloc_queue(struct skl_module_pin *mpin,
950 struct skl_module_cfg *tgt_cfg, int max)
951 {
952 int i;
953 struct skl_module_inst_id id = tgt_cfg->id;
954 /*
955 * if pin in dynamic, find first free pin
956 * otherwise find match module and instance id pin as topology will
957 * ensure a unique pin is assigned to this so no need to
958 * allocate/free
959 */
960 for (i = 0; i < max; i++) {
961 if (mpin[i].is_dynamic) {
962 if (!mpin[i].in_use &&
963 mpin[i].pin_state == SKL_PIN_UNBIND) {
964
965 mpin[i].in_use = true;
966 mpin[i].id.module_id = id.module_id;
967 mpin[i].id.instance_id = id.instance_id;
968 mpin[i].id.pvt_id = id.pvt_id;
969 mpin[i].tgt_mcfg = tgt_cfg;
970 return i;
971 }
972 } else {
973 if (mpin[i].id.module_id == id.module_id &&
974 mpin[i].id.instance_id == id.instance_id &&
975 mpin[i].pin_state == SKL_PIN_UNBIND) {
976
977 mpin[i].tgt_mcfg = tgt_cfg;
978 return i;
979 }
980 }
981 }
982
983 return -EINVAL;
984 }
985
skl_free_queue(struct skl_module_pin * mpin,int q_index)986 static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
987 {
988 if (mpin[q_index].is_dynamic) {
989 mpin[q_index].in_use = false;
990 mpin[q_index].id.module_id = 0;
991 mpin[q_index].id.instance_id = 0;
992 mpin[q_index].id.pvt_id = 0;
993 }
994 mpin[q_index].pin_state = SKL_PIN_UNBIND;
995 mpin[q_index].tgt_mcfg = NULL;
996 }
997
998 /* Module state will be set to unint, if all the out pin state is UNBIND */
999
skl_clear_module_state(struct skl_module_pin * mpin,int max,struct skl_module_cfg * mcfg)1000 static void skl_clear_module_state(struct skl_module_pin *mpin, int max,
1001 struct skl_module_cfg *mcfg)
1002 {
1003 int i;
1004 bool found = false;
1005
1006 for (i = 0; i < max; i++) {
1007 if (mpin[i].pin_state == SKL_PIN_UNBIND)
1008 continue;
1009 found = true;
1010 break;
1011 }
1012
1013 if (!found)
1014 mcfg->m_state = SKL_MODULE_INIT_DONE;
1015 return;
1016 }
1017
1018 /*
1019 * A module needs to be instanataited in DSP. A mdoule is present in a
1020 * collection of module referred as a PIPE.
1021 * We first calculate the module format, based on module type and then
1022 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
1023 */
skl_init_module(struct skl_dev * skl,struct skl_module_cfg * mconfig)1024 int skl_init_module(struct skl_dev *skl,
1025 struct skl_module_cfg *mconfig)
1026 {
1027 u16 module_config_size = 0;
1028 void *param_data = NULL;
1029 int ret;
1030 struct skl_ipc_init_instance_msg msg;
1031
1032 dev_dbg(skl->dev, "%s: module_id = %d instance=%d\n", __func__,
1033 mconfig->id.module_id, mconfig->id.pvt_id);
1034
1035 if (mconfig->pipe->state != SKL_PIPE_CREATED) {
1036 dev_err(skl->dev, "Pipe not created state= %d pipe_id= %d\n",
1037 mconfig->pipe->state, mconfig->pipe->ppl_id);
1038 return -EIO;
1039 }
1040
1041 ret = skl_set_module_format(skl, mconfig,
1042 &module_config_size, ¶m_data);
1043 if (ret < 0) {
1044 dev_err(skl->dev, "Failed to set module format ret=%d\n", ret);
1045 return ret;
1046 }
1047
1048 msg.module_id = mconfig->id.module_id;
1049 msg.instance_id = mconfig->id.pvt_id;
1050 msg.ppl_instance_id = mconfig->pipe->ppl_id;
1051 msg.param_data_size = module_config_size;
1052 msg.core_id = mconfig->core_id;
1053 msg.domain = mconfig->domain;
1054
1055 ret = skl_ipc_init_instance(&skl->ipc, &msg, param_data);
1056 if (ret < 0) {
1057 dev_err(skl->dev, "Failed to init instance ret=%d\n", ret);
1058 kfree(param_data);
1059 return ret;
1060 }
1061 mconfig->m_state = SKL_MODULE_INIT_DONE;
1062 kfree(param_data);
1063 return ret;
1064 }
1065
skl_dump_bind_info(struct skl_dev * skl,struct skl_module_cfg * src_module,struct skl_module_cfg * dst_module)1066 static void skl_dump_bind_info(struct skl_dev *skl, struct skl_module_cfg
1067 *src_module, struct skl_module_cfg *dst_module)
1068 {
1069 dev_dbg(skl->dev, "%s: src module_id = %d src_instance=%d\n",
1070 __func__, src_module->id.module_id, src_module->id.pvt_id);
1071 dev_dbg(skl->dev, "%s: dst_module=%d dst_instance=%d\n", __func__,
1072 dst_module->id.module_id, dst_module->id.pvt_id);
1073
1074 dev_dbg(skl->dev, "src_module state = %d dst module state = %d\n",
1075 src_module->m_state, dst_module->m_state);
1076 }
1077
1078 /*
1079 * On module freeup, we need to unbind the module with modules
1080 * it is already bind.
1081 * Find the pin allocated and unbind then using bind_unbind IPC
1082 */
skl_unbind_modules(struct skl_dev * skl,struct skl_module_cfg * src_mcfg,struct skl_module_cfg * dst_mcfg)1083 int skl_unbind_modules(struct skl_dev *skl,
1084 struct skl_module_cfg *src_mcfg,
1085 struct skl_module_cfg *dst_mcfg)
1086 {
1087 int ret;
1088 struct skl_ipc_bind_unbind_msg msg;
1089 struct skl_module_inst_id src_id = src_mcfg->id;
1090 struct skl_module_inst_id dst_id = dst_mcfg->id;
1091 int in_max = dst_mcfg->module->max_input_pins;
1092 int out_max = src_mcfg->module->max_output_pins;
1093 int src_index, dst_index, src_pin_state, dst_pin_state;
1094
1095 skl_dump_bind_info(skl, src_mcfg, dst_mcfg);
1096
1097 /* get src queue index */
1098 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
1099 if (src_index < 0)
1100 return 0;
1101
1102 msg.src_queue = src_index;
1103
1104 /* get dst queue index */
1105 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
1106 if (dst_index < 0)
1107 return 0;
1108
1109 msg.dst_queue = dst_index;
1110
1111 src_pin_state = src_mcfg->m_out_pin[src_index].pin_state;
1112 dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state;
1113
1114 if (src_pin_state != SKL_PIN_BIND_DONE ||
1115 dst_pin_state != SKL_PIN_BIND_DONE)
1116 return 0;
1117
1118 msg.module_id = src_mcfg->id.module_id;
1119 msg.instance_id = src_mcfg->id.pvt_id;
1120 msg.dst_module_id = dst_mcfg->id.module_id;
1121 msg.dst_instance_id = dst_mcfg->id.pvt_id;
1122 msg.bind = false;
1123
1124 ret = skl_ipc_bind_unbind(&skl->ipc, &msg);
1125 if (!ret) {
1126 /* free queue only if unbind is success */
1127 skl_free_queue(src_mcfg->m_out_pin, src_index);
1128 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
1129
1130 /*
1131 * check only if src module bind state, bind is
1132 * always from src -> sink
1133 */
1134 skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg);
1135 }
1136
1137 return ret;
1138 }
1139
1140 #define CPR_SINK_FMT_PARAM_ID 2
1141
1142 /*
1143 * Once a module is instantiated it need to be 'bind' with other modules in
1144 * the pipeline. For binding we need to find the module pins which are bind
1145 * together
1146 * This function finds the pins and then sends bund_unbind IPC message to
1147 * DSP using IPC helper
1148 */
skl_bind_modules(struct skl_dev * skl,struct skl_module_cfg * src_mcfg,struct skl_module_cfg * dst_mcfg)1149 int skl_bind_modules(struct skl_dev *skl,
1150 struct skl_module_cfg *src_mcfg,
1151 struct skl_module_cfg *dst_mcfg)
1152 {
1153 int ret = 0;
1154 struct skl_ipc_bind_unbind_msg msg;
1155 int in_max = dst_mcfg->module->max_input_pins;
1156 int out_max = src_mcfg->module->max_output_pins;
1157 int src_index, dst_index;
1158 struct skl_module_fmt *format;
1159 struct skl_cpr_pin_fmt pin_fmt;
1160 struct skl_module *module;
1161 struct skl_module_iface *fmt;
1162
1163 skl_dump_bind_info(skl, src_mcfg, dst_mcfg);
1164
1165 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
1166 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
1167 return 0;
1168
1169 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max);
1170 if (src_index < 0)
1171 return -EINVAL;
1172
1173 msg.src_queue = src_index;
1174 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max);
1175 if (dst_index < 0) {
1176 skl_free_queue(src_mcfg->m_out_pin, src_index);
1177 return -EINVAL;
1178 }
1179
1180 /*
1181 * Copier module requires the separate large_config_set_ipc to
1182 * configure the pins other than 0
1183 */
1184 if (src_mcfg->m_type == SKL_MODULE_TYPE_COPIER && src_index > 0) {
1185 pin_fmt.sink_id = src_index;
1186 module = src_mcfg->module;
1187 fmt = &module->formats[src_mcfg->fmt_idx];
1188
1189 /* Input fmt is same as that of src module input cfg */
1190 format = &fmt->inputs[0].fmt;
1191 fill_pin_params(&(pin_fmt.src_fmt), format);
1192
1193 format = &fmt->outputs[src_index].fmt;
1194 fill_pin_params(&(pin_fmt.dst_fmt), format);
1195 ret = skl_set_module_params(skl, (void *)&pin_fmt,
1196 sizeof(struct skl_cpr_pin_fmt),
1197 CPR_SINK_FMT_PARAM_ID, src_mcfg);
1198
1199 if (ret < 0)
1200 goto out;
1201 }
1202
1203 msg.dst_queue = dst_index;
1204
1205 dev_dbg(skl->dev, "src queue = %d dst queue =%d\n",
1206 msg.src_queue, msg.dst_queue);
1207
1208 msg.module_id = src_mcfg->id.module_id;
1209 msg.instance_id = src_mcfg->id.pvt_id;
1210 msg.dst_module_id = dst_mcfg->id.module_id;
1211 msg.dst_instance_id = dst_mcfg->id.pvt_id;
1212 msg.bind = true;
1213
1214 ret = skl_ipc_bind_unbind(&skl->ipc, &msg);
1215
1216 if (!ret) {
1217 src_mcfg->m_state = SKL_MODULE_BIND_DONE;
1218 src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE;
1219 dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE;
1220 return ret;
1221 }
1222 out:
1223 /* error case , if IPC fails, clear the queue index */
1224 skl_free_queue(src_mcfg->m_out_pin, src_index);
1225 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
1226
1227 return ret;
1228 }
1229
skl_set_pipe_state(struct skl_dev * skl,struct skl_pipe * pipe,enum skl_ipc_pipeline_state state)1230 static int skl_set_pipe_state(struct skl_dev *skl, struct skl_pipe *pipe,
1231 enum skl_ipc_pipeline_state state)
1232 {
1233 dev_dbg(skl->dev, "%s: pipe_state = %d\n", __func__, state);
1234
1235 return skl_ipc_set_pipeline_state(&skl->ipc, pipe->ppl_id, state);
1236 }
1237
1238 /*
1239 * A pipeline is a collection of modules. Before a module in instantiated a
1240 * pipeline needs to be created for it.
1241 * This function creates pipeline, by sending create pipeline IPC messages
1242 * to FW
1243 */
skl_create_pipeline(struct skl_dev * skl,struct skl_pipe * pipe)1244 int skl_create_pipeline(struct skl_dev *skl, struct skl_pipe *pipe)
1245 {
1246 int ret;
1247
1248 dev_dbg(skl->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
1249
1250 ret = skl_ipc_create_pipeline(&skl->ipc, pipe->memory_pages,
1251 pipe->pipe_priority, pipe->ppl_id,
1252 pipe->lp_mode);
1253 if (ret < 0) {
1254 dev_err(skl->dev, "Failed to create pipeline\n");
1255 return ret;
1256 }
1257
1258 pipe->state = SKL_PIPE_CREATED;
1259
1260 return 0;
1261 }
1262
1263 /*
1264 * A pipeline needs to be deleted on cleanup. If a pipeline is running,
1265 * then pause it first. Before actual deletion, pipeline should enter
1266 * reset state. Finish the procedure by sending delete pipeline IPC.
1267 * DSP will stop the DMA engines and release resources
1268 */
skl_delete_pipe(struct skl_dev * skl,struct skl_pipe * pipe)1269 int skl_delete_pipe(struct skl_dev *skl, struct skl_pipe *pipe)
1270 {
1271 int ret;
1272
1273 dev_dbg(skl->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1274
1275 /* If pipe was not created in FW, do not try to delete it */
1276 if (pipe->state < SKL_PIPE_CREATED)
1277 return 0;
1278
1279 /* If pipe is started, do stop the pipe in FW. */
1280 if (pipe->state >= SKL_PIPE_STARTED) {
1281 ret = skl_set_pipe_state(skl, pipe, PPL_PAUSED);
1282 if (ret < 0) {
1283 dev_err(skl->dev, "Failed to stop pipeline\n");
1284 return ret;
1285 }
1286
1287 pipe->state = SKL_PIPE_PAUSED;
1288 }
1289
1290 /* reset pipe state before deletion */
1291 ret = skl_set_pipe_state(skl, pipe, PPL_RESET);
1292 if (ret < 0) {
1293 dev_err(skl->dev, "Failed to reset pipe ret=%d\n", ret);
1294 return ret;
1295 }
1296
1297 pipe->state = SKL_PIPE_RESET;
1298
1299 ret = skl_ipc_delete_pipeline(&skl->ipc, pipe->ppl_id);
1300 if (ret < 0) {
1301 dev_err(skl->dev, "Failed to delete pipeline\n");
1302 return ret;
1303 }
1304
1305 pipe->state = SKL_PIPE_INVALID;
1306
1307 return ret;
1308 }
1309
1310 /*
1311 * A pipeline is also a scheduling entity in DSP which can be run, stopped
1312 * For processing data the pipe need to be run by sending IPC set pipe state
1313 * to DSP
1314 */
skl_run_pipe(struct skl_dev * skl,struct skl_pipe * pipe)1315 int skl_run_pipe(struct skl_dev *skl, struct skl_pipe *pipe)
1316 {
1317 int ret;
1318
1319 dev_dbg(skl->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
1320
1321 /* If pipe was not created in FW, do not try to pause or delete */
1322 if (pipe->state < SKL_PIPE_CREATED)
1323 return 0;
1324
1325 /* Pipe has to be paused before it is started */
1326 ret = skl_set_pipe_state(skl, pipe, PPL_PAUSED);
1327 if (ret < 0) {
1328 dev_err(skl->dev, "Failed to pause pipe\n");
1329 return ret;
1330 }
1331
1332 pipe->state = SKL_PIPE_PAUSED;
1333
1334 ret = skl_set_pipe_state(skl, pipe, PPL_RUNNING);
1335 if (ret < 0) {
1336 dev_err(skl->dev, "Failed to start pipe\n");
1337 return ret;
1338 }
1339
1340 pipe->state = SKL_PIPE_STARTED;
1341
1342 return 0;
1343 }
1344
1345 /*
1346 * Stop the pipeline by sending set pipe state IPC
1347 * DSP doesnt implement stop so we always send pause message
1348 */
skl_stop_pipe(struct skl_dev * skl,struct skl_pipe * pipe)1349 int skl_stop_pipe(struct skl_dev *skl, struct skl_pipe *pipe)
1350 {
1351 int ret;
1352
1353 dev_dbg(skl->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
1354
1355 /* If pipe was not created in FW, do not try to pause or delete */
1356 if (pipe->state < SKL_PIPE_PAUSED)
1357 return 0;
1358
1359 ret = skl_set_pipe_state(skl, pipe, PPL_PAUSED);
1360 if (ret < 0) {
1361 dev_dbg(skl->dev, "Failed to stop pipe\n");
1362 return ret;
1363 }
1364
1365 pipe->state = SKL_PIPE_PAUSED;
1366
1367 return 0;
1368 }
1369
1370 /*
1371 * Reset the pipeline by sending set pipe state IPC this will reset the DMA
1372 * from the DSP side
1373 */
skl_reset_pipe(struct skl_dev * skl,struct skl_pipe * pipe)1374 int skl_reset_pipe(struct skl_dev *skl, struct skl_pipe *pipe)
1375 {
1376 int ret;
1377
1378 /* If pipe was not created in FW, do not try to pause or delete */
1379 if (pipe->state < SKL_PIPE_PAUSED)
1380 return 0;
1381
1382 ret = skl_set_pipe_state(skl, pipe, PPL_RESET);
1383 if (ret < 0) {
1384 dev_dbg(skl->dev, "Failed to reset pipe ret=%d\n", ret);
1385 return ret;
1386 }
1387
1388 pipe->state = SKL_PIPE_RESET;
1389
1390 return 0;
1391 }
1392
1393 /* Algo parameter set helper function */
skl_set_module_params(struct skl_dev * skl,u32 * params,int size,u32 param_id,struct skl_module_cfg * mcfg)1394 int skl_set_module_params(struct skl_dev *skl, u32 *params, int size,
1395 u32 param_id, struct skl_module_cfg *mcfg)
1396 {
1397 struct skl_ipc_large_config_msg msg;
1398
1399 msg.module_id = mcfg->id.module_id;
1400 msg.instance_id = mcfg->id.pvt_id;
1401 msg.param_data_size = size;
1402 msg.large_param_id = param_id;
1403
1404 return skl_ipc_set_large_config(&skl->ipc, &msg, params);
1405 }
1406
skl_get_module_params(struct skl_dev * skl,u32 * params,int size,u32 param_id,struct skl_module_cfg * mcfg)1407 int skl_get_module_params(struct skl_dev *skl, u32 *params, int size,
1408 u32 param_id, struct skl_module_cfg *mcfg)
1409 {
1410 struct skl_ipc_large_config_msg msg;
1411 size_t bytes = size;
1412
1413 msg.module_id = mcfg->id.module_id;
1414 msg.instance_id = mcfg->id.pvt_id;
1415 msg.param_data_size = size;
1416 msg.large_param_id = param_id;
1417
1418 return skl_ipc_get_large_config(&skl->ipc, &msg, ¶ms, &bytes);
1419 }
1420