1 /* 2 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module 3 * configurations 4 * 5 * Copyright (C) 2015 Intel Corp 6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com> 7 * Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 */ 19 20 #include <linux/slab.h> 21 #include <linux/pci.h> 22 #include <sound/core.h> 23 #include <sound/pcm.h> 24 #include "skl-sst-dsp.h" 25 #include "cnl-sst-dsp.h" 26 #include "skl-sst-ipc.h" 27 #include "skl.h" 28 #include "../common/sst-dsp.h" 29 #include "../common/sst-dsp-priv.h" 30 #include "skl-topology.h" 31 #include "skl-tplg-interface.h" 32 33 static int skl_alloc_dma_buf(struct device *dev, 34 struct snd_dma_buffer *dmab, size_t size) 35 { 36 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 37 struct hdac_bus *bus = ebus_to_hbus(ebus); 38 39 if (!bus) 40 return -ENODEV; 41 42 return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab); 43 } 44 45 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab) 46 { 47 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 48 struct hdac_bus *bus = ebus_to_hbus(ebus); 49 50 if (!bus) 51 return -ENODEV; 52 53 bus->io_ops->dma_free_pages(bus, dmab); 54 55 return 0; 56 } 57 58 #define NOTIFICATION_PARAM_ID 3 59 #define NOTIFICATION_MASK 0xf 60 61 /* disable notfication for underruns/overruns from firmware module */ 62 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable) 63 { 64 struct notification_mask mask; 65 struct skl_ipc_large_config_msg msg = {0}; 66 67 mask.notify = NOTIFICATION_MASK; 68 mask.enable = enable; 69 70 msg.large_param_id = NOTIFICATION_PARAM_ID; 71 msg.param_data_size = sizeof(mask); 72 73 skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)&mask); 74 } 75 76 static int skl_dsp_setup_spib(struct device *dev, unsigned int size, 77 int stream_tag, int enable) 78 { 79 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 80 struct hdac_bus *bus = ebus_to_hbus(ebus); 81 struct hdac_stream *stream = snd_hdac_get_stream(bus, 82 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 83 struct hdac_ext_stream *estream; 84 85 if (!stream) 86 return -EINVAL; 87 88 estream = stream_to_hdac_ext_stream(stream); 89 /* enable/disable SPIB for this hdac stream */ 90 snd_hdac_ext_stream_spbcap_enable(ebus, enable, stream->index); 91 92 /* set the spib value */ 93 snd_hdac_ext_stream_set_spib(ebus, estream, size); 94 95 return 0; 96 } 97 98 static int skl_dsp_prepare(struct device *dev, unsigned int format, 99 unsigned int size, struct snd_dma_buffer *dmab) 100 { 101 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 102 struct hdac_bus *bus = ebus_to_hbus(ebus); 103 struct hdac_ext_stream *estream; 104 struct hdac_stream *stream; 105 struct snd_pcm_substream substream; 106 int ret; 107 108 if (!bus) 109 return -ENODEV; 110 111 memset(&substream, 0, sizeof(substream)); 112 substream.stream = SNDRV_PCM_STREAM_PLAYBACK; 113 114 estream = snd_hdac_ext_stream_assign(ebus, &substream, 115 HDAC_EXT_STREAM_TYPE_HOST); 116 if (!estream) 117 return -ENODEV; 118 119 stream = hdac_stream(estream); 120 121 /* assign decouple host dma channel */ 122 ret = snd_hdac_dsp_prepare(stream, format, size, dmab); 123 if (ret < 0) 124 return ret; 125 126 skl_dsp_setup_spib(dev, size, stream->stream_tag, true); 127 128 return stream->stream_tag; 129 } 130 131 static int skl_dsp_trigger(struct device *dev, bool start, int stream_tag) 132 { 133 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 134 struct hdac_stream *stream; 135 struct hdac_bus *bus = ebus_to_hbus(ebus); 136 137 if (!bus) 138 return -ENODEV; 139 140 stream = snd_hdac_get_stream(bus, 141 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 142 if (!stream) 143 return -EINVAL; 144 145 snd_hdac_dsp_trigger(stream, start); 146 147 return 0; 148 } 149 150 static int skl_dsp_cleanup(struct device *dev, 151 struct snd_dma_buffer *dmab, int stream_tag) 152 { 153 struct hdac_ext_bus *ebus = dev_get_drvdata(dev); 154 struct hdac_stream *stream; 155 struct hdac_ext_stream *estream; 156 struct hdac_bus *bus = ebus_to_hbus(ebus); 157 158 if (!bus) 159 return -ENODEV; 160 161 stream = snd_hdac_get_stream(bus, 162 SNDRV_PCM_STREAM_PLAYBACK, stream_tag); 163 if (!stream) 164 return -EINVAL; 165 166 estream = stream_to_hdac_ext_stream(stream); 167 skl_dsp_setup_spib(dev, 0, stream_tag, false); 168 snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); 169 170 snd_hdac_dsp_cleanup(stream, dmab); 171 172 return 0; 173 } 174 175 static struct skl_dsp_loader_ops skl_get_loader_ops(void) 176 { 177 struct skl_dsp_loader_ops loader_ops; 178 179 memset(&loader_ops, 0, sizeof(struct skl_dsp_loader_ops)); 180 181 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 182 loader_ops.free_dma_buf = skl_free_dma_buf; 183 184 return loader_ops; 185 }; 186 187 static struct skl_dsp_loader_ops bxt_get_loader_ops(void) 188 { 189 struct skl_dsp_loader_ops loader_ops; 190 191 memset(&loader_ops, 0, sizeof(loader_ops)); 192 193 loader_ops.alloc_dma_buf = skl_alloc_dma_buf; 194 loader_ops.free_dma_buf = skl_free_dma_buf; 195 loader_ops.prepare = skl_dsp_prepare; 196 loader_ops.trigger = skl_dsp_trigger; 197 loader_ops.cleanup = skl_dsp_cleanup; 198 199 return loader_ops; 200 }; 201 202 static const struct skl_dsp_ops dsp_ops[] = { 203 { 204 .id = 0x9d70, 205 .num_cores = 2, 206 .loader_ops = skl_get_loader_ops, 207 .init = skl_sst_dsp_init, 208 .init_fw = skl_sst_init_fw, 209 .cleanup = skl_sst_dsp_cleanup 210 }, 211 { 212 .id = 0x9d71, 213 .num_cores = 2, 214 .loader_ops = skl_get_loader_ops, 215 .init = kbl_sst_dsp_init, 216 .init_fw = skl_sst_init_fw, 217 .cleanup = skl_sst_dsp_cleanup 218 }, 219 { 220 .id = 0x5a98, 221 .num_cores = 2, 222 .loader_ops = bxt_get_loader_ops, 223 .init = bxt_sst_dsp_init, 224 .init_fw = bxt_sst_init_fw, 225 .cleanup = bxt_sst_dsp_cleanup 226 }, 227 { 228 .id = 0x3198, 229 .num_cores = 2, 230 .loader_ops = bxt_get_loader_ops, 231 .init = bxt_sst_dsp_init, 232 .init_fw = bxt_sst_init_fw, 233 .cleanup = bxt_sst_dsp_cleanup 234 }, 235 { 236 .id = 0x9dc8, 237 .num_cores = 4, 238 .loader_ops = bxt_get_loader_ops, 239 .init = cnl_sst_dsp_init, 240 .init_fw = cnl_sst_init_fw, 241 .cleanup = cnl_sst_dsp_cleanup 242 }, 243 }; 244 245 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id) 246 { 247 int i; 248 249 for (i = 0; i < ARRAY_SIZE(dsp_ops); i++) { 250 if (dsp_ops[i].id == pci_id) 251 return &dsp_ops[i]; 252 } 253 254 return NULL; 255 } 256 257 int skl_init_dsp(struct skl *skl) 258 { 259 void __iomem *mmio_base; 260 struct hdac_ext_bus *ebus = &skl->ebus; 261 struct hdac_bus *bus = ebus_to_hbus(ebus); 262 struct skl_dsp_loader_ops loader_ops; 263 int irq = bus->irq; 264 const struct skl_dsp_ops *ops; 265 struct skl_dsp_cores *cores; 266 int ret; 267 268 /* enable ppcap interrupt */ 269 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true); 270 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true); 271 272 /* read the BAR of the ADSP MMIO */ 273 mmio_base = pci_ioremap_bar(skl->pci, 4); 274 if (mmio_base == NULL) { 275 dev_err(bus->dev, "ioremap error\n"); 276 return -ENXIO; 277 } 278 279 ops = skl_get_dsp_ops(skl->pci->device); 280 if (!ops) { 281 ret = -EIO; 282 goto unmap_mmio; 283 } 284 285 loader_ops = ops->loader_ops(); 286 ret = ops->init(bus->dev, mmio_base, irq, 287 skl->fw_name, loader_ops, 288 &skl->skl_sst); 289 290 if (ret < 0) 291 goto unmap_mmio; 292 293 skl->skl_sst->dsp_ops = ops; 294 cores = &skl->skl_sst->cores; 295 cores->count = ops->num_cores; 296 297 cores->state = kcalloc(cores->count, sizeof(*cores->state), GFP_KERNEL); 298 if (!cores->state) { 299 ret = -ENOMEM; 300 goto unmap_mmio; 301 } 302 303 cores->usage_count = kcalloc(cores->count, sizeof(*cores->usage_count), 304 GFP_KERNEL); 305 if (!cores->usage_count) { 306 ret = -ENOMEM; 307 goto free_core_state; 308 } 309 310 dev_dbg(bus->dev, "dsp registration status=%d\n", ret); 311 312 return 0; 313 314 free_core_state: 315 kfree(cores->state); 316 317 unmap_mmio: 318 iounmap(mmio_base); 319 320 return ret; 321 } 322 323 int skl_free_dsp(struct skl *skl) 324 { 325 struct hdac_ext_bus *ebus = &skl->ebus; 326 struct hdac_bus *bus = ebus_to_hbus(ebus); 327 struct skl_sst *ctx = skl->skl_sst; 328 329 /* disable ppcap interrupt */ 330 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false); 331 332 ctx->dsp_ops->cleanup(bus->dev, ctx); 333 334 kfree(ctx->cores.state); 335 kfree(ctx->cores.usage_count); 336 337 if (ctx->dsp->addr.lpe) 338 iounmap(ctx->dsp->addr.lpe); 339 340 return 0; 341 } 342 343 /* 344 * In the case of "suspend_active" i.e, the Audio IP being active 345 * during system suspend, immediately excecute any pending D0i3 work 346 * before suspending. This is needed for the IP to work in low power 347 * mode during system suspend. In the case of normal suspend, cancel 348 * any pending D0i3 work. 349 */ 350 int skl_suspend_late_dsp(struct skl *skl) 351 { 352 struct skl_sst *ctx = skl->skl_sst; 353 struct delayed_work *dwork; 354 355 if (!ctx) 356 return 0; 357 358 dwork = &ctx->d0i3.work; 359 360 if (dwork->work.func) { 361 if (skl->supend_active) 362 flush_delayed_work(dwork); 363 else 364 cancel_delayed_work_sync(dwork); 365 } 366 367 return 0; 368 } 369 370 int skl_suspend_dsp(struct skl *skl) 371 { 372 struct skl_sst *ctx = skl->skl_sst; 373 int ret; 374 375 /* if ppcap is not supported return 0 */ 376 if (!skl->ebus.bus.ppcap) 377 return 0; 378 379 ret = skl_dsp_sleep(ctx->dsp); 380 if (ret < 0) 381 return ret; 382 383 /* disable ppcap interrupt */ 384 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false); 385 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false); 386 387 return 0; 388 } 389 390 int skl_resume_dsp(struct skl *skl) 391 { 392 struct skl_sst *ctx = skl->skl_sst; 393 int ret; 394 395 /* if ppcap is not supported return 0 */ 396 if (!skl->ebus.bus.ppcap) 397 return 0; 398 399 /* enable ppcap interrupt */ 400 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true); 401 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true); 402 403 /* check if DSP 1st boot is done */ 404 if (skl->skl_sst->is_first_boot == true) 405 return 0; 406 407 ret = skl_dsp_wake(ctx->dsp); 408 if (ret < 0) 409 return ret; 410 411 skl_dsp_enable_notification(skl->skl_sst, false); 412 return ret; 413 } 414 415 enum skl_bitdepth skl_get_bit_depth(int params) 416 { 417 switch (params) { 418 case 8: 419 return SKL_DEPTH_8BIT; 420 421 case 16: 422 return SKL_DEPTH_16BIT; 423 424 case 24: 425 return SKL_DEPTH_24BIT; 426 427 case 32: 428 return SKL_DEPTH_32BIT; 429 430 default: 431 return SKL_DEPTH_INVALID; 432 433 } 434 } 435 436 /* 437 * Each module in DSP expects a base module configuration, which consists of 438 * PCM format information, which we calculate in driver and resource values 439 * which are read from widget information passed through topology binary 440 * This is send when we create a module with INIT_INSTANCE IPC msg 441 */ 442 static void skl_set_base_module_format(struct skl_sst *ctx, 443 struct skl_module_cfg *mconfig, 444 struct skl_base_cfg *base_cfg) 445 { 446 struct skl_module *module = mconfig->module; 447 struct skl_module_res *res = &module->resources[mconfig->res_idx]; 448 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 449 struct skl_module_fmt *format = &fmt->inputs[0].fmt; 450 451 base_cfg->audio_fmt.number_of_channels = format->channels; 452 453 base_cfg->audio_fmt.s_freq = format->s_freq; 454 base_cfg->audio_fmt.bit_depth = format->bit_depth; 455 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth; 456 base_cfg->audio_fmt.ch_cfg = format->ch_cfg; 457 458 dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n", 459 format->bit_depth, format->valid_bit_depth, 460 format->ch_cfg); 461 462 base_cfg->audio_fmt.channel_map = format->ch_map; 463 464 base_cfg->audio_fmt.interleaving = format->interleaving_style; 465 466 base_cfg->cps = res->cps; 467 base_cfg->ibs = res->ibs; 468 base_cfg->obs = res->obs; 469 base_cfg->is_pages = res->is_pages; 470 } 471 472 /* 473 * Copies copier capabilities into copier module and updates copier module 474 * config size. 475 */ 476 static void skl_copy_copier_caps(struct skl_module_cfg *mconfig, 477 struct skl_cpr_cfg *cpr_mconfig) 478 { 479 if (mconfig->formats_config.caps_size == 0) 480 return; 481 482 memcpy(cpr_mconfig->gtw_cfg.config_data, 483 mconfig->formats_config.caps, 484 mconfig->formats_config.caps_size); 485 486 cpr_mconfig->gtw_cfg.config_length = 487 (mconfig->formats_config.caps_size) / 4; 488 } 489 490 #define SKL_NON_GATEWAY_CPR_NODE_ID 0xFFFFFFFF 491 /* 492 * Calculate the gatewat settings required for copier module, type of 493 * gateway and index of gateway to use 494 */ 495 static u32 skl_get_node_id(struct skl_sst *ctx, 496 struct skl_module_cfg *mconfig) 497 { 498 union skl_connector_node_id node_id = {0}; 499 union skl_ssp_dma_node ssp_node = {0}; 500 struct skl_pipe_params *params = mconfig->pipe->p_params; 501 502 switch (mconfig->dev_type) { 503 case SKL_DEVICE_BT: 504 node_id.node.dma_type = 505 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 506 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 507 SKL_DMA_I2S_LINK_INPUT_CLASS; 508 node_id.node.vindex = params->host_dma_id + 509 (mconfig->vbus_id << 3); 510 break; 511 512 case SKL_DEVICE_I2S: 513 node_id.node.dma_type = 514 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 515 SKL_DMA_I2S_LINK_OUTPUT_CLASS : 516 SKL_DMA_I2S_LINK_INPUT_CLASS; 517 ssp_node.dma_node.time_slot_index = mconfig->time_slot; 518 ssp_node.dma_node.i2s_instance = mconfig->vbus_id; 519 node_id.node.vindex = ssp_node.val; 520 break; 521 522 case SKL_DEVICE_DMIC: 523 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS; 524 node_id.node.vindex = mconfig->vbus_id + 525 (mconfig->time_slot); 526 break; 527 528 case SKL_DEVICE_HDALINK: 529 node_id.node.dma_type = 530 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 531 SKL_DMA_HDA_LINK_OUTPUT_CLASS : 532 SKL_DMA_HDA_LINK_INPUT_CLASS; 533 node_id.node.vindex = params->link_dma_id; 534 break; 535 536 case SKL_DEVICE_HDAHOST: 537 node_id.node.dma_type = 538 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ? 539 SKL_DMA_HDA_HOST_OUTPUT_CLASS : 540 SKL_DMA_HDA_HOST_INPUT_CLASS; 541 node_id.node.vindex = params->host_dma_id; 542 break; 543 544 default: 545 node_id.val = 0xFFFFFFFF; 546 break; 547 } 548 549 return node_id.val; 550 } 551 552 static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx, 553 struct skl_module_cfg *mconfig, 554 struct skl_cpr_cfg *cpr_mconfig) 555 { 556 u32 dma_io_buf; 557 struct skl_module_res *res; 558 int res_idx = mconfig->res_idx; 559 struct skl *skl = get_skl_ctx(ctx->dev); 560 561 cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig); 562 563 if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) { 564 cpr_mconfig->cpr_feature_mask = 0; 565 return; 566 } 567 568 if (skl->nr_modules) { 569 res = &mconfig->module->resources[mconfig->res_idx]; 570 cpr_mconfig->gtw_cfg.dma_buffer_size = res->dma_buffer_size; 571 goto skip_buf_size_calc; 572 } else { 573 res = &mconfig->module->resources[res_idx]; 574 } 575 576 switch (mconfig->hw_conn_type) { 577 case SKL_CONN_SOURCE: 578 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 579 dma_io_buf = res->ibs; 580 else 581 dma_io_buf = res->obs; 582 break; 583 584 case SKL_CONN_SINK: 585 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 586 dma_io_buf = res->obs; 587 else 588 dma_io_buf = res->ibs; 589 break; 590 591 default: 592 dev_warn(ctx->dev, "wrong connection type: %d\n", 593 mconfig->hw_conn_type); 594 return; 595 } 596 597 cpr_mconfig->gtw_cfg.dma_buffer_size = 598 mconfig->dma_buffer_size * dma_io_buf; 599 600 /* fallback to 2ms default value */ 601 if (!cpr_mconfig->gtw_cfg.dma_buffer_size) { 602 if (mconfig->hw_conn_type == SKL_CONN_SOURCE) 603 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->obs; 604 else 605 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * res->ibs; 606 } 607 608 skip_buf_size_calc: 609 cpr_mconfig->cpr_feature_mask = 0; 610 cpr_mconfig->gtw_cfg.config_length = 0; 611 612 skl_copy_copier_caps(mconfig, cpr_mconfig); 613 } 614 615 #define DMA_CONTROL_ID 5 616 617 int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig) 618 { 619 struct skl_dma_control *dma_ctrl; 620 struct skl_ipc_large_config_msg msg = {0}; 621 int err = 0; 622 623 624 /* 625 * if blob size zero, then return 626 */ 627 if (mconfig->formats_config.caps_size == 0) 628 return 0; 629 630 msg.large_param_id = DMA_CONTROL_ID; 631 msg.param_data_size = sizeof(struct skl_dma_control) + 632 mconfig->formats_config.caps_size; 633 634 dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL); 635 if (dma_ctrl == NULL) 636 return -ENOMEM; 637 638 dma_ctrl->node_id = skl_get_node_id(ctx, mconfig); 639 640 /* size in dwords */ 641 dma_ctrl->config_length = mconfig->formats_config.caps_size / 4; 642 643 memcpy(dma_ctrl->config_data, mconfig->formats_config.caps, 644 mconfig->formats_config.caps_size); 645 646 err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl); 647 648 kfree(dma_ctrl); 649 return err; 650 } 651 652 static void skl_setup_out_format(struct skl_sst *ctx, 653 struct skl_module_cfg *mconfig, 654 struct skl_audio_data_format *out_fmt) 655 { 656 struct skl_module *module = mconfig->module; 657 struct skl_module_iface *fmt = &module->formats[mconfig->fmt_idx]; 658 struct skl_module_fmt *format = &fmt->outputs[0].fmt; 659 660 out_fmt->number_of_channels = (u8)format->channels; 661 out_fmt->s_freq = format->s_freq; 662 out_fmt->bit_depth = format->bit_depth; 663 out_fmt->valid_bit_depth = format->valid_bit_depth; 664 out_fmt->ch_cfg = format->ch_cfg; 665 666 out_fmt->channel_map = format->ch_map; 667 out_fmt->interleaving = format->interleaving_style; 668 out_fmt->sample_type = format->sample_type; 669 670 dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n", 671 out_fmt->number_of_channels, format->s_freq, format->bit_depth); 672 } 673 674 /* 675 * DSP needs SRC module for frequency conversion, SRC takes base module 676 * configuration and the target frequency as extra parameter passed as src 677 * config 678 */ 679 static void skl_set_src_format(struct skl_sst *ctx, 680 struct skl_module_cfg *mconfig, 681 struct skl_src_module_cfg *src_mconfig) 682 { 683 struct skl_module *module = mconfig->module; 684 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 685 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 686 687 skl_set_base_module_format(ctx, mconfig, 688 (struct skl_base_cfg *)src_mconfig); 689 690 src_mconfig->src_cfg = fmt->s_freq; 691 } 692 693 /* 694 * DSP needs updown module to do channel conversion. updown module take base 695 * module configuration and channel configuration 696 * It also take coefficients and now we have defaults applied here 697 */ 698 static void skl_set_updown_mixer_format(struct skl_sst *ctx, 699 struct skl_module_cfg *mconfig, 700 struct skl_up_down_mixer_cfg *mixer_mconfig) 701 { 702 struct skl_module *module = mconfig->module; 703 struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx]; 704 struct skl_module_fmt *fmt = &iface->outputs[0].fmt; 705 int i = 0; 706 707 skl_set_base_module_format(ctx, mconfig, 708 (struct skl_base_cfg *)mixer_mconfig); 709 mixer_mconfig->out_ch_cfg = fmt->ch_cfg; 710 711 /* Select F/W default coefficient */ 712 mixer_mconfig->coeff_sel = 0x0; 713 714 /* User coeff, don't care since we are selecting F/W defaults */ 715 for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++) 716 mixer_mconfig->coeff[i] = 0xDEADBEEF; 717 } 718 719 /* 720 * 'copier' is DSP internal module which copies data from Host DMA (HDA host 721 * dma) or link (hda link, SSP, PDM) 722 * Here we calculate the copier module parameters, like PCM format, output 723 * format, gateway settings 724 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg 725 */ 726 static void skl_set_copier_format(struct skl_sst *ctx, 727 struct skl_module_cfg *mconfig, 728 struct skl_cpr_cfg *cpr_mconfig) 729 { 730 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt; 731 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig; 732 733 skl_set_base_module_format(ctx, mconfig, base_cfg); 734 735 skl_setup_out_format(ctx, mconfig, out_fmt); 736 skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig); 737 } 738 739 /* 740 * Algo module are DSP pre processing modules. Algo module take base module 741 * configuration and params 742 */ 743 744 static void skl_set_algo_format(struct skl_sst *ctx, 745 struct skl_module_cfg *mconfig, 746 struct skl_algo_cfg *algo_mcfg) 747 { 748 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)algo_mcfg; 749 750 skl_set_base_module_format(ctx, mconfig, base_cfg); 751 752 if (mconfig->formats_config.caps_size == 0) 753 return; 754 755 memcpy(algo_mcfg->params, 756 mconfig->formats_config.caps, 757 mconfig->formats_config.caps_size); 758 759 } 760 761 /* 762 * Mic select module allows selecting one or many input channels, thus 763 * acting as a demux. 764 * 765 * Mic select module take base module configuration and out-format 766 * configuration 767 */ 768 static void skl_set_base_outfmt_format(struct skl_sst *ctx, 769 struct skl_module_cfg *mconfig, 770 struct skl_base_outfmt_cfg *base_outfmt_mcfg) 771 { 772 struct skl_audio_data_format *out_fmt = &base_outfmt_mcfg->out_fmt; 773 struct skl_base_cfg *base_cfg = 774 (struct skl_base_cfg *)base_outfmt_mcfg; 775 776 skl_set_base_module_format(ctx, mconfig, base_cfg); 777 skl_setup_out_format(ctx, mconfig, out_fmt); 778 } 779 780 static u16 skl_get_module_param_size(struct skl_sst *ctx, 781 struct skl_module_cfg *mconfig) 782 { 783 u16 param_size; 784 785 switch (mconfig->m_type) { 786 case SKL_MODULE_TYPE_COPIER: 787 param_size = sizeof(struct skl_cpr_cfg); 788 param_size += mconfig->formats_config.caps_size; 789 return param_size; 790 791 case SKL_MODULE_TYPE_SRCINT: 792 return sizeof(struct skl_src_module_cfg); 793 794 case SKL_MODULE_TYPE_UPDWMIX: 795 return sizeof(struct skl_up_down_mixer_cfg); 796 797 case SKL_MODULE_TYPE_ALGO: 798 param_size = sizeof(struct skl_base_cfg); 799 param_size += mconfig->formats_config.caps_size; 800 return param_size; 801 802 case SKL_MODULE_TYPE_BASE_OUTFMT: 803 case SKL_MODULE_TYPE_MIC_SELECT: 804 case SKL_MODULE_TYPE_KPB: 805 return sizeof(struct skl_base_outfmt_cfg); 806 807 default: 808 /* 809 * return only base cfg when no specific module type is 810 * specified 811 */ 812 return sizeof(struct skl_base_cfg); 813 } 814 815 return 0; 816 } 817 818 /* 819 * DSP firmware supports various modules like copier, SRC, updown etc. 820 * These modules required various parameters to be calculated and sent for 821 * the module initialization to DSP. By default a generic module needs only 822 * base module format configuration 823 */ 824 825 static int skl_set_module_format(struct skl_sst *ctx, 826 struct skl_module_cfg *module_config, 827 u16 *module_config_size, 828 void **param_data) 829 { 830 u16 param_size; 831 832 param_size = skl_get_module_param_size(ctx, module_config); 833 834 *param_data = kzalloc(param_size, GFP_KERNEL); 835 if (NULL == *param_data) 836 return -ENOMEM; 837 838 *module_config_size = param_size; 839 840 switch (module_config->m_type) { 841 case SKL_MODULE_TYPE_COPIER: 842 skl_set_copier_format(ctx, module_config, *param_data); 843 break; 844 845 case SKL_MODULE_TYPE_SRCINT: 846 skl_set_src_format(ctx, module_config, *param_data); 847 break; 848 849 case SKL_MODULE_TYPE_UPDWMIX: 850 skl_set_updown_mixer_format(ctx, module_config, *param_data); 851 break; 852 853 case SKL_MODULE_TYPE_ALGO: 854 skl_set_algo_format(ctx, module_config, *param_data); 855 break; 856 857 case SKL_MODULE_TYPE_BASE_OUTFMT: 858 case SKL_MODULE_TYPE_MIC_SELECT: 859 case SKL_MODULE_TYPE_KPB: 860 skl_set_base_outfmt_format(ctx, module_config, *param_data); 861 break; 862 863 default: 864 skl_set_base_module_format(ctx, module_config, *param_data); 865 break; 866 867 } 868 869 dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n", 870 module_config->id.module_id, param_size); 871 print_hex_dump_debug("Module params:", DUMP_PREFIX_OFFSET, 8, 4, 872 *param_data, param_size, false); 873 return 0; 874 } 875 876 static int skl_get_queue_index(struct skl_module_pin *mpin, 877 struct skl_module_inst_id id, int max) 878 { 879 int i; 880 881 for (i = 0; i < max; i++) { 882 if (mpin[i].id.module_id == id.module_id && 883 mpin[i].id.instance_id == id.instance_id) 884 return i; 885 } 886 887 return -EINVAL; 888 } 889 890 /* 891 * Allocates queue for each module. 892 * if dynamic, the pin_index is allocated 0 to max_pin. 893 * In static, the pin_index is fixed based on module_id and instance id 894 */ 895 static int skl_alloc_queue(struct skl_module_pin *mpin, 896 struct skl_module_cfg *tgt_cfg, int max) 897 { 898 int i; 899 struct skl_module_inst_id id = tgt_cfg->id; 900 /* 901 * if pin in dynamic, find first free pin 902 * otherwise find match module and instance id pin as topology will 903 * ensure a unique pin is assigned to this so no need to 904 * allocate/free 905 */ 906 for (i = 0; i < max; i++) { 907 if (mpin[i].is_dynamic) { 908 if (!mpin[i].in_use && 909 mpin[i].pin_state == SKL_PIN_UNBIND) { 910 911 mpin[i].in_use = true; 912 mpin[i].id.module_id = id.module_id; 913 mpin[i].id.instance_id = id.instance_id; 914 mpin[i].id.pvt_id = id.pvt_id; 915 mpin[i].tgt_mcfg = tgt_cfg; 916 return i; 917 } 918 } else { 919 if (mpin[i].id.module_id == id.module_id && 920 mpin[i].id.instance_id == id.instance_id && 921 mpin[i].pin_state == SKL_PIN_UNBIND) { 922 923 mpin[i].tgt_mcfg = tgt_cfg; 924 return i; 925 } 926 } 927 } 928 929 return -EINVAL; 930 } 931 932 static void skl_free_queue(struct skl_module_pin *mpin, int q_index) 933 { 934 if (mpin[q_index].is_dynamic) { 935 mpin[q_index].in_use = false; 936 mpin[q_index].id.module_id = 0; 937 mpin[q_index].id.instance_id = 0; 938 mpin[q_index].id.pvt_id = 0; 939 } 940 mpin[q_index].pin_state = SKL_PIN_UNBIND; 941 mpin[q_index].tgt_mcfg = NULL; 942 } 943 944 /* Module state will be set to unint, if all the out pin state is UNBIND */ 945 946 static void skl_clear_module_state(struct skl_module_pin *mpin, int max, 947 struct skl_module_cfg *mcfg) 948 { 949 int i; 950 bool found = false; 951 952 for (i = 0; i < max; i++) { 953 if (mpin[i].pin_state == SKL_PIN_UNBIND) 954 continue; 955 found = true; 956 break; 957 } 958 959 if (!found) 960 mcfg->m_state = SKL_MODULE_INIT_DONE; 961 return; 962 } 963 964 /* 965 * A module needs to be instanataited in DSP. A mdoule is present in a 966 * collection of module referred as a PIPE. 967 * We first calculate the module format, based on module type and then 968 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper 969 */ 970 int skl_init_module(struct skl_sst *ctx, 971 struct skl_module_cfg *mconfig) 972 { 973 u16 module_config_size = 0; 974 void *param_data = NULL; 975 int ret; 976 struct skl_ipc_init_instance_msg msg; 977 978 dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__, 979 mconfig->id.module_id, mconfig->id.pvt_id); 980 981 if (mconfig->pipe->state != SKL_PIPE_CREATED) { 982 dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n", 983 mconfig->pipe->state, mconfig->pipe->ppl_id); 984 return -EIO; 985 } 986 987 ret = skl_set_module_format(ctx, mconfig, 988 &module_config_size, ¶m_data); 989 if (ret < 0) { 990 dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret); 991 return ret; 992 } 993 994 msg.module_id = mconfig->id.module_id; 995 msg.instance_id = mconfig->id.pvt_id; 996 msg.ppl_instance_id = mconfig->pipe->ppl_id; 997 msg.param_data_size = module_config_size; 998 msg.core_id = mconfig->core_id; 999 msg.domain = mconfig->domain; 1000 1001 ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data); 1002 if (ret < 0) { 1003 dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret); 1004 kfree(param_data); 1005 return ret; 1006 } 1007 mconfig->m_state = SKL_MODULE_INIT_DONE; 1008 kfree(param_data); 1009 return ret; 1010 } 1011 1012 static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg 1013 *src_module, struct skl_module_cfg *dst_module) 1014 { 1015 dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n", 1016 __func__, src_module->id.module_id, src_module->id.pvt_id); 1017 dev_dbg(ctx->dev, "%s: dst_module=%d dst_instance=%d\n", __func__, 1018 dst_module->id.module_id, dst_module->id.pvt_id); 1019 1020 dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n", 1021 src_module->m_state, dst_module->m_state); 1022 } 1023 1024 /* 1025 * On module freeup, we need to unbind the module with modules 1026 * it is already bind. 1027 * Find the pin allocated and unbind then using bind_unbind IPC 1028 */ 1029 int skl_unbind_modules(struct skl_sst *ctx, 1030 struct skl_module_cfg *src_mcfg, 1031 struct skl_module_cfg *dst_mcfg) 1032 { 1033 int ret; 1034 struct skl_ipc_bind_unbind_msg msg; 1035 struct skl_module_inst_id src_id = src_mcfg->id; 1036 struct skl_module_inst_id dst_id = dst_mcfg->id; 1037 int in_max = dst_mcfg->module->max_input_pins; 1038 int out_max = src_mcfg->module->max_output_pins; 1039 int src_index, dst_index, src_pin_state, dst_pin_state; 1040 1041 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1042 1043 /* get src queue index */ 1044 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max); 1045 if (src_index < 0) 1046 return 0; 1047 1048 msg.src_queue = src_index; 1049 1050 /* get dst queue index */ 1051 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max); 1052 if (dst_index < 0) 1053 return 0; 1054 1055 msg.dst_queue = dst_index; 1056 1057 src_pin_state = src_mcfg->m_out_pin[src_index].pin_state; 1058 dst_pin_state = dst_mcfg->m_in_pin[dst_index].pin_state; 1059 1060 if (src_pin_state != SKL_PIN_BIND_DONE || 1061 dst_pin_state != SKL_PIN_BIND_DONE) 1062 return 0; 1063 1064 msg.module_id = src_mcfg->id.module_id; 1065 msg.instance_id = src_mcfg->id.pvt_id; 1066 msg.dst_module_id = dst_mcfg->id.module_id; 1067 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1068 msg.bind = false; 1069 1070 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1071 if (!ret) { 1072 /* free queue only if unbind is success */ 1073 skl_free_queue(src_mcfg->m_out_pin, src_index); 1074 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1075 1076 /* 1077 * check only if src module bind state, bind is 1078 * always from src -> sink 1079 */ 1080 skl_clear_module_state(src_mcfg->m_out_pin, out_max, src_mcfg); 1081 } 1082 1083 return ret; 1084 } 1085 1086 static void fill_pin_params(struct skl_audio_data_format *pin_fmt, 1087 struct skl_module_fmt *format) 1088 { 1089 pin_fmt->number_of_channels = format->channels; 1090 pin_fmt->s_freq = format->s_freq; 1091 pin_fmt->bit_depth = format->bit_depth; 1092 pin_fmt->valid_bit_depth = format->valid_bit_depth; 1093 pin_fmt->ch_cfg = format->ch_cfg; 1094 pin_fmt->sample_type = format->sample_type; 1095 pin_fmt->channel_map = format->ch_map; 1096 pin_fmt->interleaving = format->interleaving_style; 1097 } 1098 1099 #define CPR_SINK_FMT_PARAM_ID 2 1100 1101 /* 1102 * Once a module is instantiated it need to be 'bind' with other modules in 1103 * the pipeline. For binding we need to find the module pins which are bind 1104 * together 1105 * This function finds the pins and then sends bund_unbind IPC message to 1106 * DSP using IPC helper 1107 */ 1108 int skl_bind_modules(struct skl_sst *ctx, 1109 struct skl_module_cfg *src_mcfg, 1110 struct skl_module_cfg *dst_mcfg) 1111 { 1112 int ret = 0; 1113 struct skl_ipc_bind_unbind_msg msg; 1114 int in_max = dst_mcfg->module->max_input_pins; 1115 int out_max = src_mcfg->module->max_output_pins; 1116 int src_index, dst_index; 1117 struct skl_module_fmt *format; 1118 struct skl_cpr_pin_fmt pin_fmt; 1119 struct skl_module *module; 1120 struct skl_module_iface *fmt; 1121 1122 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 1123 1124 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE || 1125 dst_mcfg->m_state < SKL_MODULE_INIT_DONE) 1126 return 0; 1127 1128 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_mcfg, out_max); 1129 if (src_index < 0) 1130 return -EINVAL; 1131 1132 msg.src_queue = src_index; 1133 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_mcfg, in_max); 1134 if (dst_index < 0) { 1135 skl_free_queue(src_mcfg->m_out_pin, src_index); 1136 return -EINVAL; 1137 } 1138 1139 /* 1140 * Copier module requires the separate large_config_set_ipc to 1141 * configure the pins other than 0 1142 */ 1143 if (src_mcfg->m_type == SKL_MODULE_TYPE_COPIER && src_index > 0) { 1144 pin_fmt.sink_id = src_index; 1145 module = src_mcfg->module; 1146 fmt = &module->formats[src_mcfg->fmt_idx]; 1147 1148 /* Input fmt is same as that of src module input cfg */ 1149 format = &fmt->inputs[0].fmt; 1150 fill_pin_params(&(pin_fmt.src_fmt), format); 1151 1152 format = &fmt->outputs[src_index].fmt; 1153 fill_pin_params(&(pin_fmt.dst_fmt), format); 1154 ret = skl_set_module_params(ctx, (void *)&pin_fmt, 1155 sizeof(struct skl_cpr_pin_fmt), 1156 CPR_SINK_FMT_PARAM_ID, src_mcfg); 1157 1158 if (ret < 0) 1159 goto out; 1160 } 1161 1162 msg.dst_queue = dst_index; 1163 1164 dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n", 1165 msg.src_queue, msg.dst_queue); 1166 1167 msg.module_id = src_mcfg->id.module_id; 1168 msg.instance_id = src_mcfg->id.pvt_id; 1169 msg.dst_module_id = dst_mcfg->id.module_id; 1170 msg.dst_instance_id = dst_mcfg->id.pvt_id; 1171 msg.bind = true; 1172 1173 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg); 1174 1175 if (!ret) { 1176 src_mcfg->m_state = SKL_MODULE_BIND_DONE; 1177 src_mcfg->m_out_pin[src_index].pin_state = SKL_PIN_BIND_DONE; 1178 dst_mcfg->m_in_pin[dst_index].pin_state = SKL_PIN_BIND_DONE; 1179 return ret; 1180 } 1181 out: 1182 /* error case , if IPC fails, clear the queue index */ 1183 skl_free_queue(src_mcfg->m_out_pin, src_index); 1184 skl_free_queue(dst_mcfg->m_in_pin, dst_index); 1185 1186 return ret; 1187 } 1188 1189 static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe, 1190 enum skl_ipc_pipeline_state state) 1191 { 1192 dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state); 1193 1194 return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state); 1195 } 1196 1197 /* 1198 * A pipeline is a collection of modules. Before a module in instantiated a 1199 * pipeline needs to be created for it. 1200 * This function creates pipeline, by sending create pipeline IPC messages 1201 * to FW 1202 */ 1203 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe) 1204 { 1205 int ret; 1206 1207 dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id); 1208 1209 ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages, 1210 pipe->pipe_priority, pipe->ppl_id, 1211 pipe->lp_mode); 1212 if (ret < 0) { 1213 dev_err(ctx->dev, "Failed to create pipeline\n"); 1214 return ret; 1215 } 1216 1217 pipe->state = SKL_PIPE_CREATED; 1218 1219 return 0; 1220 } 1221 1222 /* 1223 * A pipeline needs to be deleted on cleanup. If a pipeline is running, then 1224 * pause the pipeline first and then delete it 1225 * The pipe delete is done by sending delete pipeline IPC. DSP will stop the 1226 * DMA engines and releases resources 1227 */ 1228 int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1229 { 1230 int ret; 1231 1232 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1233 1234 /* If pipe is started, do stop the pipe in FW. */ 1235 if (pipe->state >= SKL_PIPE_STARTED) { 1236 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1237 if (ret < 0) { 1238 dev_err(ctx->dev, "Failed to stop pipeline\n"); 1239 return ret; 1240 } 1241 1242 pipe->state = SKL_PIPE_PAUSED; 1243 } 1244 1245 /* If pipe was not created in FW, do not try to delete it */ 1246 if (pipe->state < SKL_PIPE_CREATED) 1247 return 0; 1248 1249 ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id); 1250 if (ret < 0) { 1251 dev_err(ctx->dev, "Failed to delete pipeline\n"); 1252 return ret; 1253 } 1254 1255 pipe->state = SKL_PIPE_INVALID; 1256 1257 return ret; 1258 } 1259 1260 /* 1261 * A pipeline is also a scheduling entity in DSP which can be run, stopped 1262 * For processing data the pipe need to be run by sending IPC set pipe state 1263 * to DSP 1264 */ 1265 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1266 { 1267 int ret; 1268 1269 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id); 1270 1271 /* If pipe was not created in FW, do not try to pause or delete */ 1272 if (pipe->state < SKL_PIPE_CREATED) 1273 return 0; 1274 1275 /* Pipe has to be paused before it is started */ 1276 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1277 if (ret < 0) { 1278 dev_err(ctx->dev, "Failed to pause pipe\n"); 1279 return ret; 1280 } 1281 1282 pipe->state = SKL_PIPE_PAUSED; 1283 1284 ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING); 1285 if (ret < 0) { 1286 dev_err(ctx->dev, "Failed to start pipe\n"); 1287 return ret; 1288 } 1289 1290 pipe->state = SKL_PIPE_STARTED; 1291 1292 return 0; 1293 } 1294 1295 /* 1296 * Stop the pipeline by sending set pipe state IPC 1297 * DSP doesnt implement stop so we always send pause message 1298 */ 1299 int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1300 { 1301 int ret; 1302 1303 dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id); 1304 1305 /* If pipe was not created in FW, do not try to pause or delete */ 1306 if (pipe->state < SKL_PIPE_PAUSED) 1307 return 0; 1308 1309 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED); 1310 if (ret < 0) { 1311 dev_dbg(ctx->dev, "Failed to stop pipe\n"); 1312 return ret; 1313 } 1314 1315 pipe->state = SKL_PIPE_PAUSED; 1316 1317 return 0; 1318 } 1319 1320 /* 1321 * Reset the pipeline by sending set pipe state IPC this will reset the DMA 1322 * from the DSP side 1323 */ 1324 int skl_reset_pipe(struct skl_sst *ctx, struct skl_pipe *pipe) 1325 { 1326 int ret; 1327 1328 /* If pipe was not created in FW, do not try to pause or delete */ 1329 if (pipe->state < SKL_PIPE_PAUSED) 1330 return 0; 1331 1332 ret = skl_set_pipe_state(ctx, pipe, PPL_RESET); 1333 if (ret < 0) { 1334 dev_dbg(ctx->dev, "Failed to reset pipe ret=%d\n", ret); 1335 return ret; 1336 } 1337 1338 pipe->state = SKL_PIPE_RESET; 1339 1340 return 0; 1341 } 1342 1343 /* Algo parameter set helper function */ 1344 int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size, 1345 u32 param_id, struct skl_module_cfg *mcfg) 1346 { 1347 struct skl_ipc_large_config_msg msg; 1348 1349 msg.module_id = mcfg->id.module_id; 1350 msg.instance_id = mcfg->id.pvt_id; 1351 msg.param_data_size = size; 1352 msg.large_param_id = param_id; 1353 1354 return skl_ipc_set_large_config(&ctx->ipc, &msg, params); 1355 } 1356 1357 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size, 1358 u32 param_id, struct skl_module_cfg *mcfg) 1359 { 1360 struct skl_ipc_large_config_msg msg; 1361 1362 msg.module_id = mcfg->id.module_id; 1363 msg.instance_id = mcfg->id.pvt_id; 1364 msg.param_data_size = size; 1365 msg.large_param_id = param_id; 1366 1367 return skl_ipc_get_large_config(&ctx->ipc, &msg, params); 1368 } 1369