1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation. All rights reserved. 7 // 8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> 10 // Rander Wang <rander.wang@intel.com> 11 // Keyon Jie <yang.jie@linux.intel.com> 12 // 13 14 /* 15 * Hardware interface for generic Intel audio DSP HDA IP 16 */ 17 18 #include <linux/module.h> 19 #include <sound/hdaudio_ext.h> 20 #include <sound/hda_register.h> 21 #include "../sof-audio.h" 22 #include "../ops.h" 23 #include "hda.h" 24 #include "hda-ipc.h" 25 26 static bool hda_enable_trace_D0I3_S0; 27 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG) 28 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444); 29 MODULE_PARM_DESC(enable_trace_D0I3_S0, 30 "SOF HDA enable trace when the DSP is in D0I3 in S0"); 31 #endif 32 33 /* 34 * DSP Core control. 35 */ 36 37 int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask) 38 { 39 u32 adspcs; 40 u32 reset; 41 int ret; 42 43 /* set reset bits for cores */ 44 reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 45 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 46 HDA_DSP_REG_ADSPCS, 47 reset, reset); 48 49 /* poll with timeout to check if operation successful */ 50 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 51 HDA_DSP_REG_ADSPCS, adspcs, 52 ((adspcs & reset) == reset), 53 HDA_DSP_REG_POLL_INTERVAL_US, 54 HDA_DSP_RESET_TIMEOUT_US); 55 if (ret < 0) { 56 dev_err(sdev->dev, 57 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 58 __func__); 59 return ret; 60 } 61 62 /* has core entered reset ? */ 63 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 64 HDA_DSP_REG_ADSPCS); 65 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 66 HDA_DSP_ADSPCS_CRST_MASK(core_mask)) { 67 dev_err(sdev->dev, 68 "error: reset enter failed: core_mask %x adspcs 0x%x\n", 69 core_mask, adspcs); 70 ret = -EIO; 71 } 72 73 return ret; 74 } 75 76 int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask) 77 { 78 unsigned int crst; 79 u32 adspcs; 80 int ret; 81 82 /* clear reset bits for cores */ 83 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 84 HDA_DSP_REG_ADSPCS, 85 HDA_DSP_ADSPCS_CRST_MASK(core_mask), 86 0); 87 88 /* poll with timeout to check if operation successful */ 89 crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask); 90 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 91 HDA_DSP_REG_ADSPCS, adspcs, 92 !(adspcs & crst), 93 HDA_DSP_REG_POLL_INTERVAL_US, 94 HDA_DSP_RESET_TIMEOUT_US); 95 96 if (ret < 0) { 97 dev_err(sdev->dev, 98 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 99 __func__); 100 return ret; 101 } 102 103 /* has core left reset ? */ 104 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 105 HDA_DSP_REG_ADSPCS); 106 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) { 107 dev_err(sdev->dev, 108 "error: reset leave failed: core_mask %x adspcs 0x%x\n", 109 core_mask, adspcs); 110 ret = -EIO; 111 } 112 113 return ret; 114 } 115 116 int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask) 117 { 118 /* stall core */ 119 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 120 HDA_DSP_REG_ADSPCS, 121 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 122 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 123 124 /* set reset state */ 125 return hda_dsp_core_reset_enter(sdev, core_mask); 126 } 127 128 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask) 129 { 130 int ret; 131 132 /* leave reset state */ 133 ret = hda_dsp_core_reset_leave(sdev, core_mask); 134 if (ret < 0) 135 return ret; 136 137 /* run core */ 138 dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask); 139 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 140 HDA_DSP_REG_ADSPCS, 141 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask), 142 0); 143 144 /* is core now running ? */ 145 if (!hda_dsp_core_is_enabled(sdev, core_mask)) { 146 hda_dsp_core_stall_reset(sdev, core_mask); 147 dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n", 148 core_mask); 149 ret = -EIO; 150 } 151 152 return ret; 153 } 154 155 /* 156 * Power Management. 157 */ 158 159 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask) 160 { 161 unsigned int cpa; 162 u32 adspcs; 163 int ret; 164 165 /* update bits */ 166 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS, 167 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 168 HDA_DSP_ADSPCS_SPA_MASK(core_mask)); 169 170 /* poll with timeout to check if operation successful */ 171 cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask); 172 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 173 HDA_DSP_REG_ADSPCS, adspcs, 174 (adspcs & cpa) == cpa, 175 HDA_DSP_REG_POLL_INTERVAL_US, 176 HDA_DSP_RESET_TIMEOUT_US); 177 if (ret < 0) { 178 dev_err(sdev->dev, 179 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 180 __func__); 181 return ret; 182 } 183 184 /* did core power up ? */ 185 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR, 186 HDA_DSP_REG_ADSPCS); 187 if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) != 188 HDA_DSP_ADSPCS_CPA_MASK(core_mask)) { 189 dev_err(sdev->dev, 190 "error: power up core failed core_mask %xadspcs 0x%x\n", 191 core_mask, adspcs); 192 ret = -EIO; 193 } 194 195 return ret; 196 } 197 198 int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask) 199 { 200 u32 adspcs; 201 int ret; 202 203 /* update bits */ 204 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 205 HDA_DSP_REG_ADSPCS, 206 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0); 207 208 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 209 HDA_DSP_REG_ADSPCS, adspcs, 210 !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)), 211 HDA_DSP_REG_POLL_INTERVAL_US, 212 HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC); 213 if (ret < 0) 214 dev_err(sdev->dev, 215 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n", 216 __func__); 217 218 return ret; 219 } 220 221 bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, 222 unsigned int core_mask) 223 { 224 int val; 225 bool is_enable; 226 227 val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS); 228 229 #define MASK_IS_EQUAL(v, m, field) ({ \ 230 u32 _m = field(m); \ 231 ((v) & _m) == _m; \ 232 }) 233 234 is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) && 235 MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) && 236 !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) && 237 !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)); 238 239 #undef MASK_IS_EQUAL 240 241 dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n", 242 is_enable, core_mask); 243 244 return is_enable; 245 } 246 247 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask) 248 { 249 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 250 const struct sof_intel_dsp_desc *chip = hda->desc; 251 int ret; 252 253 /* restrict core_mask to host managed cores mask */ 254 core_mask &= chip->host_managed_cores_mask; 255 256 /* return if core_mask is not valid or cores are already enabled */ 257 if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask)) 258 return 0; 259 260 /* power up */ 261 ret = hda_dsp_core_power_up(sdev, core_mask); 262 if (ret < 0) { 263 dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n", 264 core_mask); 265 return ret; 266 } 267 268 return hda_dsp_core_run(sdev, core_mask); 269 } 270 271 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev, 272 unsigned int core_mask) 273 { 274 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 275 const struct sof_intel_dsp_desc *chip = hda->desc; 276 int ret; 277 278 /* restrict core_mask to host managed cores mask */ 279 core_mask &= chip->host_managed_cores_mask; 280 281 /* return if core_mask is not valid */ 282 if (!core_mask) 283 return 0; 284 285 /* place core in reset prior to power down */ 286 ret = hda_dsp_core_stall_reset(sdev, core_mask); 287 if (ret < 0) { 288 dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n", 289 core_mask); 290 return ret; 291 } 292 293 /* power down core */ 294 ret = hda_dsp_core_power_down(sdev, core_mask); 295 if (ret < 0) { 296 dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n", 297 core_mask, ret); 298 return ret; 299 } 300 301 /* make sure we are in OFF state */ 302 if (hda_dsp_core_is_enabled(sdev, core_mask)) { 303 dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n", 304 core_mask, ret); 305 ret = -EIO; 306 } 307 308 return ret; 309 } 310 311 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev) 312 { 313 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 314 const struct sof_intel_dsp_desc *chip = hda->desc; 315 316 /* enable IPC DONE and BUSY interrupts */ 317 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 318 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY, 319 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY); 320 321 /* enable IPC interrupt */ 322 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 323 HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC); 324 } 325 326 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev) 327 { 328 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 329 const struct sof_intel_dsp_desc *chip = hda->desc; 330 331 /* disable IPC interrupt */ 332 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC, 333 HDA_DSP_ADSPIC_IPC, 0); 334 335 /* disable IPC BUSY and DONE interrupt */ 336 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl, 337 HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0); 338 } 339 340 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev) 341 { 342 struct hdac_bus *bus = sof_to_bus(sdev); 343 int retry = HDA_DSP_REG_POLL_RETRY_COUNT; 344 345 while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) { 346 if (!retry--) 347 return -ETIMEDOUT; 348 usleep_range(10, 15); 349 } 350 351 return 0; 352 } 353 354 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags) 355 { 356 struct sof_ipc_pm_gate pm_gate; 357 struct sof_ipc_reply reply; 358 359 memset(&pm_gate, 0, sizeof(pm_gate)); 360 361 /* configure pm_gate ipc message */ 362 pm_gate.hdr.size = sizeof(pm_gate); 363 pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE; 364 pm_gate.flags = flags; 365 366 /* send pm_gate ipc to dsp */ 367 return sof_ipc_tx_message_no_pm(sdev->ipc, pm_gate.hdr.cmd, 368 &pm_gate, sizeof(pm_gate), &reply, 369 sizeof(reply)); 370 } 371 372 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value) 373 { 374 struct hdac_bus *bus = sof_to_bus(sdev); 375 int ret; 376 377 /* Write to D0I3C after Command-In-Progress bit is cleared */ 378 ret = hda_dsp_wait_d0i3c_done(sdev); 379 if (ret < 0) { 380 dev_err(bus->dev, "CIP timeout before D0I3C update!\n"); 381 return ret; 382 } 383 384 /* Update D0I3C register */ 385 snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value); 386 387 /* Wait for cmd in progress to be cleared before exiting the function */ 388 ret = hda_dsp_wait_d0i3c_done(sdev); 389 if (ret < 0) { 390 dev_err(bus->dev, "CIP timeout after D0I3C update!\n"); 391 return ret; 392 } 393 394 dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n", 395 snd_hdac_chip_readb(bus, VS_D0I3C)); 396 397 return 0; 398 } 399 400 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev, 401 const struct sof_dsp_power_state *target_state) 402 { 403 u32 flags = 0; 404 int ret; 405 u8 value = 0; 406 407 /* 408 * Sanity check for illegal state transitions 409 * The only allowed transitions are: 410 * 1. D3 -> D0I0 411 * 2. D0I0 -> D0I3 412 * 3. D0I3 -> D0I0 413 */ 414 switch (sdev->dsp_power_state.state) { 415 case SOF_DSP_PM_D0: 416 /* Follow the sequence below for D0 substate transitions */ 417 break; 418 case SOF_DSP_PM_D3: 419 /* Follow regular flow for D3 -> D0 transition */ 420 return 0; 421 default: 422 dev_err(sdev->dev, "error: transition from %d to %d not allowed\n", 423 sdev->dsp_power_state.state, target_state->state); 424 return -EINVAL; 425 } 426 427 /* Set flags and register value for D0 target substate */ 428 if (target_state->substate == SOF_HDA_DSP_PM_D0I3) { 429 value = SOF_HDA_VS_D0I3C_I3; 430 431 /* 432 * Trace DMA need to be disabled when the DSP enters 433 * D0I3 for S0Ix suspend, but it can be kept enabled 434 * when the DSP enters D0I3 while the system is in S0 435 * for debug purpose. 436 */ 437 if (!sdev->dtrace_is_supported || 438 !hda_enable_trace_D0I3_S0 || 439 sdev->system_suspend_target != SOF_SUSPEND_NONE) 440 flags = HDA_PM_NO_DMA_TRACE; 441 } else { 442 /* prevent power gating in D0I0 */ 443 flags = HDA_PM_PPG; 444 } 445 446 /* update D0I3C register */ 447 ret = hda_dsp_update_d0i3c_register(sdev, value); 448 if (ret < 0) 449 return ret; 450 451 /* 452 * Notify the DSP of the state change. 453 * If this IPC fails, revert the D0I3C register update in order 454 * to prevent partial state change. 455 */ 456 ret = hda_dsp_send_pm_gate_ipc(sdev, flags); 457 if (ret < 0) { 458 dev_err(sdev->dev, 459 "error: PM_GATE ipc error %d\n", ret); 460 goto revert; 461 } 462 463 return ret; 464 465 revert: 466 /* fallback to the previous register value */ 467 value = value ? 0 : SOF_HDA_VS_D0I3C_I3; 468 469 /* 470 * This can fail but return the IPC error to signal that 471 * the state change failed. 472 */ 473 hda_dsp_update_d0i3c_register(sdev, value); 474 475 return ret; 476 } 477 478 /* helper to log DSP state */ 479 static void hda_dsp_state_log(struct snd_sof_dev *sdev) 480 { 481 switch (sdev->dsp_power_state.state) { 482 case SOF_DSP_PM_D0: 483 switch (sdev->dsp_power_state.substate) { 484 case SOF_HDA_DSP_PM_D0I0: 485 dev_dbg(sdev->dev, "Current DSP power state: D0I0\n"); 486 break; 487 case SOF_HDA_DSP_PM_D0I3: 488 dev_dbg(sdev->dev, "Current DSP power state: D0I3\n"); 489 break; 490 default: 491 dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n", 492 sdev->dsp_power_state.substate); 493 break; 494 } 495 break; 496 case SOF_DSP_PM_D1: 497 dev_dbg(sdev->dev, "Current DSP power state: D1\n"); 498 break; 499 case SOF_DSP_PM_D2: 500 dev_dbg(sdev->dev, "Current DSP power state: D2\n"); 501 break; 502 case SOF_DSP_PM_D3_HOT: 503 dev_dbg(sdev->dev, "Current DSP power state: D3_HOT\n"); 504 break; 505 case SOF_DSP_PM_D3: 506 dev_dbg(sdev->dev, "Current DSP power state: D3\n"); 507 break; 508 case SOF_DSP_PM_D3_COLD: 509 dev_dbg(sdev->dev, "Current DSP power state: D3_COLD\n"); 510 break; 511 default: 512 dev_dbg(sdev->dev, "Unknown DSP power state: %d\n", 513 sdev->dsp_power_state.state); 514 break; 515 } 516 } 517 518 /* 519 * All DSP power state transitions are initiated by the driver. 520 * If the requested state change fails, the error is simply returned. 521 * Further state transitions are attempted only when the set_power_save() op 522 * is called again either because of a new IPC sent to the DSP or 523 * during system suspend/resume. 524 */ 525 int hda_dsp_set_power_state(struct snd_sof_dev *sdev, 526 const struct sof_dsp_power_state *target_state) 527 { 528 int ret = 0; 529 530 /* 531 * When the DSP is already in D0I3 and the target state is D0I3, 532 * it could be the case that the DSP is in D0I3 during S0 533 * and the system is suspending to S0Ix. Therefore, 534 * hda_dsp_set_D0_state() must be called to disable trace DMA 535 * by sending the PM_GATE IPC to the FW. 536 */ 537 if (target_state->substate == SOF_HDA_DSP_PM_D0I3 && 538 sdev->system_suspend_target == SOF_SUSPEND_S0IX) 539 goto set_state; 540 541 /* 542 * For all other cases, return without doing anything if 543 * the DSP is already in the target state. 544 */ 545 if (target_state->state == sdev->dsp_power_state.state && 546 target_state->substate == sdev->dsp_power_state.substate) 547 return 0; 548 549 set_state: 550 switch (target_state->state) { 551 case SOF_DSP_PM_D0: 552 ret = hda_dsp_set_D0_state(sdev, target_state); 553 break; 554 case SOF_DSP_PM_D3: 555 /* The only allowed transition is: D0I0 -> D3 */ 556 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 && 557 sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0) 558 break; 559 560 dev_err(sdev->dev, 561 "error: transition from %d to %d not allowed\n", 562 sdev->dsp_power_state.state, target_state->state); 563 return -EINVAL; 564 default: 565 dev_err(sdev->dev, "error: target state unsupported %d\n", 566 target_state->state); 567 return -EINVAL; 568 } 569 if (ret < 0) { 570 dev_err(sdev->dev, 571 "failed to set requested target DSP state %d substate %d\n", 572 target_state->state, target_state->substate); 573 return ret; 574 } 575 576 sdev->dsp_power_state = *target_state; 577 hda_dsp_state_log(sdev); 578 return ret; 579 } 580 581 /* 582 * Audio DSP states may transform as below:- 583 * 584 * Opportunistic D0I3 in S0 585 * Runtime +---------------------+ Delayed D0i3 work timeout 586 * suspend | +--------------------+ 587 * +------------+ D0I0(active) | | 588 * | | <---------------+ | 589 * | +--------> | New IPC | | 590 * | |Runtime +--^--+---------^--+--+ (via mailbox) | | 591 * | |resume | | | | | | 592 * | | | | | | | | 593 * | | System| | | | | | 594 * | | resume| | S3/S0IX | | | | 595 * | | | | suspend | | S0IX | | 596 * | | | | | |suspend | | 597 * | | | | | | | | 598 * | | | | | | | | 599 * +-v---+-----------+--v-------+ | | +------+----v----+ 600 * | | | +-----------> | 601 * | D3 (suspended) | | | D0I3 | 602 * | | +--------------+ | 603 * | | System resume | | 604 * +----------------------------+ +----------------+ 605 * 606 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams 607 * ignored the suspend trigger. Otherwise the DSP 608 * is in D3. 609 */ 610 611 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) 612 { 613 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 614 const struct sof_intel_dsp_desc *chip = hda->desc; 615 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 616 struct hdac_bus *bus = sof_to_bus(sdev); 617 #endif 618 int ret; 619 620 hda_sdw_int_enable(sdev, false); 621 622 /* disable IPC interrupts */ 623 hda_dsp_ipc_int_disable(sdev); 624 625 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 626 if (runtime_suspend) 627 hda_codec_jack_wake_enable(sdev, true); 628 629 /* power down all hda link */ 630 snd_hdac_ext_bus_link_power_down_all(bus); 631 #endif 632 633 /* power down DSP */ 634 ret = snd_sof_dsp_core_power_down(sdev, chip->host_managed_cores_mask); 635 if (ret < 0) { 636 dev_err(sdev->dev, 637 "error: failed to power down core during suspend\n"); 638 return ret; 639 } 640 641 /* disable ppcap interrupt */ 642 hda_dsp_ctrl_ppcap_enable(sdev, false); 643 hda_dsp_ctrl_ppcap_int_enable(sdev, false); 644 645 /* disable hda bus irq and streams */ 646 hda_dsp_ctrl_stop_chip(sdev); 647 648 /* disable LP retention mode */ 649 snd_sof_pci_update_bits(sdev, PCI_PGCTL, 650 PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK); 651 652 /* reset controller */ 653 ret = hda_dsp_ctrl_link_reset(sdev, true); 654 if (ret < 0) { 655 dev_err(sdev->dev, 656 "error: failed to reset controller during suspend\n"); 657 return ret; 658 } 659 660 /* display codec can powered off after link reset */ 661 hda_codec_i915_display_power(sdev, false); 662 663 return 0; 664 } 665 666 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume) 667 { 668 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 669 struct hdac_bus *bus = sof_to_bus(sdev); 670 struct hdac_ext_link *hlink = NULL; 671 #endif 672 int ret; 673 674 /* display codec must be powered before link reset */ 675 hda_codec_i915_display_power(sdev, true); 676 677 /* 678 * clear TCSEL to clear playback on some HD Audio 679 * codecs. PCI TCSEL is defined in the Intel manuals. 680 */ 681 snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0); 682 683 /* reset and start hda controller */ 684 ret = hda_dsp_ctrl_init_chip(sdev, true); 685 if (ret < 0) { 686 dev_err(sdev->dev, 687 "error: failed to start controller after resume\n"); 688 return ret; 689 } 690 691 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 692 /* check jack status */ 693 if (runtime_resume) { 694 hda_codec_jack_wake_enable(sdev, false); 695 if (sdev->system_suspend_target == SOF_SUSPEND_NONE) 696 hda_codec_jack_check(sdev); 697 } 698 699 /* turn off the links that were off before suspend */ 700 list_for_each_entry(hlink, &bus->hlink_list, list) { 701 if (!hlink->ref_count) 702 snd_hdac_ext_bus_link_power_down(hlink); 703 } 704 705 /* check dma status and clean up CORB/RIRB buffers */ 706 if (!bus->cmd_dma_state) 707 snd_hdac_bus_stop_cmd_io(bus); 708 #endif 709 710 /* enable ppcap interrupt */ 711 hda_dsp_ctrl_ppcap_enable(sdev, true); 712 hda_dsp_ctrl_ppcap_int_enable(sdev, true); 713 714 return 0; 715 } 716 717 int hda_dsp_resume(struct snd_sof_dev *sdev) 718 { 719 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 720 struct pci_dev *pci = to_pci_dev(sdev->dev); 721 const struct sof_dsp_power_state target_state = { 722 .state = SOF_DSP_PM_D0, 723 .substate = SOF_HDA_DSP_PM_D0I0, 724 }; 725 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 726 struct hdac_bus *bus = sof_to_bus(sdev); 727 struct hdac_ext_link *hlink = NULL; 728 #endif 729 int ret; 730 731 /* resume from D0I3 */ 732 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) { 733 hda_codec_i915_display_power(sdev, true); 734 735 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 736 /* power up links that were active before suspend */ 737 list_for_each_entry(hlink, &bus->hlink_list, list) { 738 if (hlink->ref_count) { 739 ret = snd_hdac_ext_bus_link_power_up(hlink); 740 if (ret < 0) { 741 dev_dbg(sdev->dev, 742 "error %d in %s: failed to power up links", 743 ret, __func__); 744 return ret; 745 } 746 } 747 } 748 749 /* set up CORB/RIRB buffers if was on before suspend */ 750 if (bus->cmd_dma_state) 751 snd_hdac_bus_init_cmd_io(bus); 752 #endif 753 754 /* Set DSP power state */ 755 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 756 if (ret < 0) { 757 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 758 target_state.state, target_state.substate); 759 return ret; 760 } 761 762 /* restore L1SEN bit */ 763 if (hda->l1_support_changed) 764 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 765 HDA_VS_INTEL_EM2, 766 HDA_VS_INTEL_EM2_L1SEN, 0); 767 768 /* restore and disable the system wakeup */ 769 pci_restore_state(pci); 770 disable_irq_wake(pci->irq); 771 return 0; 772 } 773 774 /* init hda controller. DSP cores will be powered up during fw boot */ 775 ret = hda_resume(sdev, false); 776 if (ret < 0) 777 return ret; 778 779 return snd_sof_dsp_set_power_state(sdev, &target_state); 780 } 781 782 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev) 783 { 784 const struct sof_dsp_power_state target_state = { 785 .state = SOF_DSP_PM_D0, 786 }; 787 int ret; 788 789 /* init hda controller. DSP cores will be powered up during fw boot */ 790 ret = hda_resume(sdev, true); 791 if (ret < 0) 792 return ret; 793 794 return snd_sof_dsp_set_power_state(sdev, &target_state); 795 } 796 797 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev) 798 { 799 struct hdac_bus *hbus = sof_to_bus(sdev); 800 801 if (hbus->codec_powered) { 802 dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n", 803 (unsigned int)hbus->codec_powered); 804 return -EBUSY; 805 } 806 807 return 0; 808 } 809 810 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev) 811 { 812 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 813 const struct sof_dsp_power_state target_state = { 814 .state = SOF_DSP_PM_D3, 815 }; 816 int ret; 817 818 /* cancel any attempt for DSP D0I3 */ 819 cancel_delayed_work_sync(&hda->d0i3_work); 820 821 /* stop hda controller and power dsp off */ 822 ret = hda_suspend(sdev, true); 823 if (ret < 0) 824 return ret; 825 826 return snd_sof_dsp_set_power_state(sdev, &target_state); 827 } 828 829 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state) 830 { 831 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 832 struct hdac_bus *bus = sof_to_bus(sdev); 833 struct pci_dev *pci = to_pci_dev(sdev->dev); 834 const struct sof_dsp_power_state target_dsp_state = { 835 .state = target_state, 836 .substate = target_state == SOF_DSP_PM_D0 ? 837 SOF_HDA_DSP_PM_D0I3 : 0, 838 }; 839 int ret; 840 841 /* cancel any attempt for DSP D0I3 */ 842 cancel_delayed_work_sync(&hda->d0i3_work); 843 844 if (target_state == SOF_DSP_PM_D0) { 845 /* we can't keep a wakeref to display driver at suspend */ 846 hda_codec_i915_display_power(sdev, false); 847 848 /* Set DSP power state */ 849 ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 850 if (ret < 0) { 851 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n", 852 target_dsp_state.state, 853 target_dsp_state.substate); 854 return ret; 855 } 856 857 /* enable L1SEN to make sure the system can enter S0Ix */ 858 hda->l1_support_changed = 859 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 860 HDA_VS_INTEL_EM2, 861 HDA_VS_INTEL_EM2_L1SEN, 862 HDA_VS_INTEL_EM2_L1SEN); 863 864 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 865 /* stop the CORB/RIRB DMA if it is On */ 866 if (bus->cmd_dma_state) 867 snd_hdac_bus_stop_cmd_io(bus); 868 869 /* no link can be powered in s0ix state */ 870 ret = snd_hdac_ext_bus_link_power_down_all(bus); 871 if (ret < 0) { 872 dev_dbg(sdev->dev, 873 "error %d in %s: failed to power down links", 874 ret, __func__); 875 return ret; 876 } 877 #endif 878 879 /* enable the system waking up via IPC IRQ */ 880 enable_irq_wake(pci->irq); 881 pci_save_state(pci); 882 return 0; 883 } 884 885 /* stop hda controller and power dsp off */ 886 ret = hda_suspend(sdev, false); 887 if (ret < 0) { 888 dev_err(bus->dev, "error: suspending dsp\n"); 889 return ret; 890 } 891 892 return snd_sof_dsp_set_power_state(sdev, &target_dsp_state); 893 } 894 895 int hda_dsp_shutdown(struct snd_sof_dev *sdev) 896 { 897 sdev->system_suspend_target = SOF_SUSPEND_S3; 898 return snd_sof_suspend(sdev->dev); 899 } 900 901 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev) 902 { 903 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) 904 struct hdac_bus *bus = sof_to_bus(sdev); 905 struct snd_soc_pcm_runtime *rtd; 906 struct hdac_ext_stream *stream; 907 struct hdac_ext_link *link; 908 struct hdac_stream *s; 909 const char *name; 910 int stream_tag; 911 912 /* set internal flag for BE */ 913 list_for_each_entry(s, &bus->stream_list, list) { 914 stream = stream_to_hdac_ext_stream(s); 915 916 /* 917 * clear stream. This should already be taken care for running 918 * streams when the SUSPEND trigger is called. But paused 919 * streams do not get suspended, so this needs to be done 920 * explicitly during suspend. 921 */ 922 if (stream->link_substream) { 923 rtd = asoc_substream_to_rtd(stream->link_substream); 924 name = asoc_rtd_to_codec(rtd, 0)->component->name; 925 link = snd_hdac_ext_bus_get_link(bus, name); 926 if (!link) 927 return -EINVAL; 928 929 stream->link_prepared = 0; 930 931 if (hdac_stream(stream)->direction == 932 SNDRV_PCM_STREAM_CAPTURE) 933 continue; 934 935 stream_tag = hdac_stream(stream)->stream_tag; 936 snd_hdac_ext_link_clear_stream_id(link, stream_tag); 937 } 938 } 939 #endif 940 return 0; 941 } 942 943 void hda_dsp_d0i3_work(struct work_struct *work) 944 { 945 struct sof_intel_hda_dev *hdev = container_of(work, 946 struct sof_intel_hda_dev, 947 d0i3_work.work); 948 struct hdac_bus *bus = &hdev->hbus.core; 949 struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev); 950 struct sof_dsp_power_state target_state = { 951 .state = SOF_DSP_PM_D0, 952 .substate = SOF_HDA_DSP_PM_D0I3, 953 }; 954 int ret; 955 956 /* DSP can enter D0I3 iff only D0I3-compatible streams are active */ 957 if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev)) 958 /* remain in D0I0 */ 959 return; 960 961 /* This can fail but error cannot be propagated */ 962 ret = snd_sof_dsp_set_power_state(sdev, &target_state); 963 if (ret < 0) 964 dev_err_ratelimited(sdev->dev, 965 "error: failed to set DSP state %d substate %d\n", 966 target_state.state, target_state.substate); 967 } 968