1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * skl-sst.c - HDA DSP library functions for SKL platform 4 * 5 * Copyright (C) 2014-15, Intel Corporation. 6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com> 7 * Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 */ 10 11 #include <linux/module.h> 12 #include <linux/delay.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/uuid.h> 16 #include "../common/sst-dsp.h" 17 #include "../common/sst-dsp-priv.h" 18 #include "../common/sst-ipc.h" 19 #include "skl-sst-ipc.h" 20 21 #define SKL_BASEFW_TIMEOUT 300 22 #define SKL_INIT_TIMEOUT 1000 23 24 /* Intel HD Audio SRAM Window 0*/ 25 #define SKL_ADSP_SRAM0_BASE 0x8000 26 27 /* Firmware status window */ 28 #define SKL_ADSP_FW_STATUS SKL_ADSP_SRAM0_BASE 29 #define SKL_ADSP_ERROR_CODE (SKL_ADSP_FW_STATUS + 0x4) 30 31 #define SKL_NUM_MODULES 1 32 33 static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status) 34 { 35 u32 cur_sts; 36 37 cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK; 38 39 return (cur_sts == status); 40 } 41 42 static int skl_transfer_firmware(struct sst_dsp *ctx, 43 const void *basefw, u32 base_fw_size) 44 { 45 int ret = 0; 46 47 ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size, 48 true); 49 if (ret < 0) 50 return ret; 51 52 ret = sst_dsp_register_poll(ctx, 53 SKL_ADSP_FW_STATUS, 54 SKL_FW_STS_MASK, 55 SKL_FW_RFW_START, 56 SKL_BASEFW_TIMEOUT, 57 "Firmware boot"); 58 59 ctx->cl_dev.ops.cl_stop_dma(ctx); 60 61 return ret; 62 } 63 64 #define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284 65 66 static int skl_load_base_firmware(struct sst_dsp *ctx) 67 { 68 int ret = 0, i; 69 struct skl_sst *skl = ctx->thread_context; 70 struct firmware stripped_fw; 71 u32 reg; 72 73 skl->boot_complete = false; 74 init_waitqueue_head(&skl->boot_wait); 75 76 if (ctx->fw == NULL) { 77 ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev); 78 if (ret < 0) { 79 dev_err(ctx->dev, "Request firmware failed %d\n", ret); 80 return -EIO; 81 } 82 } 83 84 /* prase uuids on first boot */ 85 if (skl->is_first_boot) { 86 ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0); 87 if (ret < 0) { 88 dev_err(ctx->dev, "UUID parsing err: %d\n", ret); 89 release_firmware(ctx->fw); 90 skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); 91 return ret; 92 } 93 } 94 95 /* check for extended manifest */ 96 stripped_fw.data = ctx->fw->data; 97 stripped_fw.size = ctx->fw->size; 98 99 skl_dsp_strip_extended_manifest(&stripped_fw); 100 101 ret = skl_dsp_boot(ctx); 102 if (ret < 0) { 103 dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret); 104 goto skl_load_base_firmware_failed; 105 } 106 107 ret = skl_cldma_prepare(ctx); 108 if (ret < 0) { 109 dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret); 110 goto skl_load_base_firmware_failed; 111 } 112 113 /* enable Interrupt */ 114 skl_ipc_int_enable(ctx); 115 skl_ipc_op_int_enable(ctx); 116 117 /* check ROM Status */ 118 for (i = SKL_INIT_TIMEOUT; i > 0; --i) { 119 if (skl_check_fw_status(ctx, SKL_FW_INIT)) { 120 dev_dbg(ctx->dev, 121 "ROM loaded, we can continue with FW loading\n"); 122 break; 123 } 124 mdelay(1); 125 } 126 if (!i) { 127 reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS); 128 dev_err(ctx->dev, 129 "Timeout waiting for ROM init done, reg:0x%x\n", reg); 130 ret = -EIO; 131 goto transfer_firmware_failed; 132 } 133 134 ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size); 135 if (ret < 0) { 136 dev_err(ctx->dev, "Transfer firmware failed%d\n", ret); 137 goto transfer_firmware_failed; 138 } else { 139 ret = wait_event_timeout(skl->boot_wait, skl->boot_complete, 140 msecs_to_jiffies(SKL_IPC_BOOT_MSECS)); 141 if (ret == 0) { 142 dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n"); 143 ret = -EIO; 144 goto transfer_firmware_failed; 145 } 146 147 dev_dbg(ctx->dev, "Download firmware successful%d\n", ret); 148 skl->fw_loaded = true; 149 } 150 return 0; 151 transfer_firmware_failed: 152 ctx->cl_dev.ops.cl_cleanup_controller(ctx); 153 skl_load_base_firmware_failed: 154 skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); 155 release_firmware(ctx->fw); 156 ctx->fw = NULL; 157 return ret; 158 } 159 160 static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id) 161 { 162 int ret; 163 struct skl_ipc_dxstate_info dx; 164 struct skl_sst *skl = ctx->thread_context; 165 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id); 166 167 /* If core0 is being turned on, we need to load the FW */ 168 if (core_id == SKL_DSP_CORE0_ID) { 169 ret = skl_load_base_firmware(ctx); 170 if (ret < 0) { 171 dev_err(ctx->dev, "unable to load firmware\n"); 172 return ret; 173 } 174 175 /* load libs as they are also lost on D3 */ 176 if (skl->lib_count > 1) { 177 ret = ctx->fw_ops.load_library(ctx, skl->lib_info, 178 skl->lib_count); 179 if (ret < 0) { 180 dev_err(ctx->dev, "reload libs failed: %d\n", 181 ret); 182 return ret; 183 } 184 185 } 186 } 187 188 /* 189 * If any core other than core 0 is being moved to D0, enable the 190 * core and send the set dx IPC for the core. 191 */ 192 if (core_id != SKL_DSP_CORE0_ID) { 193 ret = skl_dsp_enable_core(ctx, core_mask); 194 if (ret < 0) 195 return ret; 196 197 dx.core_mask = core_mask; 198 dx.dx_mask = core_mask; 199 200 ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, 201 SKL_BASE_FW_MODULE_ID, &dx); 202 if (ret < 0) { 203 dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n", 204 core_id); 205 skl_dsp_disable_core(ctx, core_mask); 206 } 207 } 208 209 skl->cores.state[core_id] = SKL_DSP_RUNNING; 210 211 return 0; 212 } 213 214 static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id) 215 { 216 int ret; 217 struct skl_ipc_dxstate_info dx; 218 struct skl_sst *skl = ctx->thread_context; 219 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id); 220 221 dx.core_mask = core_mask; 222 dx.dx_mask = SKL_IPC_D3_MASK; 223 224 ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx); 225 if (ret < 0) 226 dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret); 227 228 if (core_id == SKL_DSP_CORE0_ID) { 229 /* disable Interrupt */ 230 ctx->cl_dev.ops.cl_cleanup_controller(ctx); 231 skl_cldma_int_disable(ctx); 232 skl_ipc_op_int_disable(ctx); 233 skl_ipc_int_disable(ctx); 234 } 235 236 ret = skl_dsp_disable_core(ctx, core_mask); 237 if (ret < 0) 238 return ret; 239 240 skl->cores.state[core_id] = SKL_DSP_RESET; 241 return ret; 242 } 243 244 static unsigned int skl_get_errorcode(struct sst_dsp *ctx) 245 { 246 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE); 247 } 248 249 /* 250 * since get/set_module are called from DAPM context, 251 * we don't need lock for usage count 252 */ 253 static int skl_get_module(struct sst_dsp *ctx, u16 mod_id) 254 { 255 struct skl_module_table *module; 256 257 list_for_each_entry(module, &ctx->module_list, list) { 258 if (module->mod_info->mod_id == mod_id) 259 return ++module->usage_cnt; 260 } 261 262 return -EINVAL; 263 } 264 265 static int skl_put_module(struct sst_dsp *ctx, u16 mod_id) 266 { 267 struct skl_module_table *module; 268 269 list_for_each_entry(module, &ctx->module_list, list) { 270 if (module->mod_info->mod_id == mod_id) 271 return --module->usage_cnt; 272 } 273 274 return -EINVAL; 275 } 276 277 static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx, 278 char *mod_name, int mod_id) 279 { 280 const struct firmware *fw; 281 struct skl_module_table *skl_module; 282 unsigned int size; 283 int ret; 284 285 ret = request_firmware(&fw, mod_name, ctx->dev); 286 if (ret < 0) { 287 dev_err(ctx->dev, "Request Module %s failed :%d\n", 288 mod_name, ret); 289 return NULL; 290 } 291 292 skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL); 293 if (skl_module == NULL) { 294 release_firmware(fw); 295 return NULL; 296 } 297 298 size = sizeof(*skl_module->mod_info); 299 skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL); 300 if (skl_module->mod_info == NULL) { 301 release_firmware(fw); 302 return NULL; 303 } 304 305 skl_module->mod_info->mod_id = mod_id; 306 skl_module->mod_info->fw = fw; 307 list_add(&skl_module->list, &ctx->module_list); 308 309 return skl_module; 310 } 311 312 /* get a module from it's unique ID */ 313 static struct skl_module_table *skl_module_get_from_id( 314 struct sst_dsp *ctx, u16 mod_id) 315 { 316 struct skl_module_table *module; 317 318 if (list_empty(&ctx->module_list)) { 319 dev_err(ctx->dev, "Module list is empty\n"); 320 return NULL; 321 } 322 323 list_for_each_entry(module, &ctx->module_list, list) { 324 if (module->mod_info->mod_id == mod_id) 325 return module; 326 } 327 328 return NULL; 329 } 330 331 static int skl_transfer_module(struct sst_dsp *ctx, const void *data, 332 u32 size, u16 mod_id, u8 table_id, bool is_module) 333 { 334 int ret, bytes_left, curr_pos; 335 struct skl_sst *skl = ctx->thread_context; 336 skl->mod_load_complete = false; 337 338 bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, data, size, false); 339 if (bytes_left < 0) 340 return bytes_left; 341 342 /* check is_module flag to load module or library */ 343 if (is_module) 344 ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, &mod_id); 345 else 346 ret = skl_sst_ipc_load_library(&skl->ipc, 0, table_id, false); 347 348 if (ret < 0) { 349 dev_err(ctx->dev, "Failed to Load %s with err %d\n", 350 is_module ? "module" : "lib", ret); 351 goto out; 352 } 353 354 /* 355 * if bytes_left > 0 then wait for BDL complete interrupt and 356 * copy the next chunk till bytes_left is 0. if bytes_left is 357 * is zero, then wait for load module IPC reply 358 */ 359 while (bytes_left > 0) { 360 curr_pos = size - bytes_left; 361 362 ret = skl_cldma_wait_interruptible(ctx); 363 if (ret < 0) 364 goto out; 365 366 bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, 367 data + curr_pos, 368 bytes_left, false); 369 } 370 371 ret = wait_event_timeout(skl->mod_load_wait, skl->mod_load_complete, 372 msecs_to_jiffies(SKL_IPC_BOOT_MSECS)); 373 if (ret == 0 || !skl->mod_load_status) { 374 dev_err(ctx->dev, "Module Load failed\n"); 375 ret = -EIO; 376 } 377 378 out: 379 ctx->cl_dev.ops.cl_stop_dma(ctx); 380 381 return ret; 382 } 383 384 static int 385 skl_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count) 386 { 387 struct skl_sst *skl = ctx->thread_context; 388 struct firmware stripped_fw; 389 int ret, i; 390 391 /* library indices start from 1 to N. 0 represents base FW */ 392 for (i = 1; i < lib_count; i++) { 393 ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw, 394 SKL_ADSP_FW_BIN_HDR_OFFSET, i); 395 if (ret < 0) 396 goto load_library_failed; 397 ret = skl_transfer_module(ctx, stripped_fw.data, 398 stripped_fw.size, 0, i, false); 399 if (ret < 0) 400 goto load_library_failed; 401 } 402 403 return 0; 404 405 load_library_failed: 406 skl_release_library(linfo, lib_count); 407 return ret; 408 } 409 410 static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid) 411 { 412 struct skl_module_table *module_entry = NULL; 413 int ret = 0; 414 char mod_name[64]; /* guid str = 32 chars + 4 hyphens */ 415 416 snprintf(mod_name, sizeof(mod_name), "%s%pUL%s", 417 "intel/dsp_fw_", guid, ".bin"); 418 419 module_entry = skl_module_get_from_id(ctx, mod_id); 420 if (module_entry == NULL) { 421 module_entry = skl_fill_module_table(ctx, mod_name, mod_id); 422 if (module_entry == NULL) { 423 dev_err(ctx->dev, "Failed to Load module\n"); 424 return -EINVAL; 425 } 426 } 427 428 if (!module_entry->usage_cnt) { 429 ret = skl_transfer_module(ctx, module_entry->mod_info->fw->data, 430 module_entry->mod_info->fw->size, 431 mod_id, 0, true); 432 if (ret < 0) { 433 dev_err(ctx->dev, "Failed to Load module\n"); 434 return ret; 435 } 436 } 437 438 ret = skl_get_module(ctx, mod_id); 439 440 return ret; 441 } 442 443 static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id) 444 { 445 int usage_cnt; 446 struct skl_sst *skl = ctx->thread_context; 447 int ret = 0; 448 449 usage_cnt = skl_put_module(ctx, mod_id); 450 if (usage_cnt < 0) { 451 dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt); 452 return -EIO; 453 } 454 455 /* if module is used by others return, no need to unload */ 456 if (usage_cnt > 0) 457 return 0; 458 459 ret = skl_ipc_unload_modules(&skl->ipc, 460 SKL_NUM_MODULES, &mod_id); 461 if (ret < 0) { 462 dev_err(ctx->dev, "Failed to UnLoad module\n"); 463 skl_get_module(ctx, mod_id); 464 return ret; 465 } 466 467 return ret; 468 } 469 470 void skl_clear_module_cnt(struct sst_dsp *ctx) 471 { 472 struct skl_module_table *module; 473 474 if (list_empty(&ctx->module_list)) 475 return; 476 477 list_for_each_entry(module, &ctx->module_list, list) { 478 module->usage_cnt = 0; 479 } 480 } 481 EXPORT_SYMBOL_GPL(skl_clear_module_cnt); 482 483 static void skl_clear_module_table(struct sst_dsp *ctx) 484 { 485 struct skl_module_table *module, *tmp; 486 487 if (list_empty(&ctx->module_list)) 488 return; 489 490 list_for_each_entry_safe(module, tmp, &ctx->module_list, list) { 491 list_del(&module->list); 492 release_firmware(module->mod_info->fw); 493 } 494 } 495 496 static const struct skl_dsp_fw_ops skl_fw_ops = { 497 .set_state_D0 = skl_set_dsp_D0, 498 .set_state_D3 = skl_set_dsp_D3, 499 .load_fw = skl_load_base_firmware, 500 .get_fw_errcode = skl_get_errorcode, 501 .load_library = skl_load_library, 502 .load_mod = skl_load_module, 503 .unload_mod = skl_unload_module, 504 }; 505 506 static struct sst_ops skl_ops = { 507 .irq_handler = skl_dsp_sst_interrupt, 508 .write = sst_shim32_write, 509 .read = sst_shim32_read, 510 .ram_read = sst_memcpy_fromio_32, 511 .ram_write = sst_memcpy_toio_32, 512 .free = skl_dsp_free, 513 }; 514 515 static struct sst_dsp_device skl_dev = { 516 .thread = skl_dsp_irq_thread_handler, 517 .ops = &skl_ops, 518 }; 519 520 int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, 521 const char *fw_name, struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp) 522 { 523 struct skl_sst *skl; 524 struct sst_dsp *sst; 525 int ret; 526 527 ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev); 528 if (ret < 0) { 529 dev_err(dev, "%s: no device\n", __func__); 530 return ret; 531 } 532 533 skl = *dsp; 534 sst = skl->dsp; 535 sst->addr.lpe = mmio_base; 536 sst->addr.shim = mmio_base; 537 sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE; 538 sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE; 539 sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ; 540 sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ; 541 542 sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 543 SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 544 545 ret = skl_ipc_init(dev, skl); 546 if (ret) { 547 skl_dsp_free(sst); 548 return ret; 549 } 550 551 sst->fw_ops = skl_fw_ops; 552 553 return skl_dsp_acquire_irq(sst); 554 } 555 EXPORT_SYMBOL_GPL(skl_sst_dsp_init); 556 557 int skl_sst_init_fw(struct device *dev, struct skl_sst *ctx) 558 { 559 int ret; 560 struct sst_dsp *sst = ctx->dsp; 561 562 ret = sst->fw_ops.load_fw(sst); 563 if (ret < 0) { 564 dev_err(dev, "Load base fw failed : %d\n", ret); 565 return ret; 566 } 567 568 skl_dsp_init_core_state(sst); 569 570 if (ctx->lib_count > 1) { 571 ret = sst->fw_ops.load_library(sst, ctx->lib_info, 572 ctx->lib_count); 573 if (ret < 0) { 574 dev_err(dev, "Load Library failed : %x\n", ret); 575 return ret; 576 } 577 } 578 ctx->is_first_boot = false; 579 580 return 0; 581 } 582 EXPORT_SYMBOL_GPL(skl_sst_init_fw); 583 584 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx) 585 { 586 587 if (ctx->dsp->fw) 588 release_firmware(ctx->dsp->fw); 589 skl_clear_module_table(ctx->dsp); 590 skl_freeup_uuid_list(ctx); 591 skl_ipc_free(&ctx->ipc); 592 ctx->dsp->ops->free(ctx->dsp); 593 if (ctx->boot_complete) { 594 ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp); 595 skl_cldma_int_disable(ctx->dsp); 596 } 597 } 598 EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup); 599 600 MODULE_LICENSE("GPL v2"); 601 MODULE_DESCRIPTION("Intel Skylake IPC driver"); 602