1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 // Rander Wang <rander.wang@intel.com>
11 // Keyon Jie <yang.jie@linux.intel.com>
12 //
13
14 /*
15 * Hardware interface for generic Intel audio DSP HDA IP
16 */
17
18 #include <linux/module.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <sound/hda-mlink.h>
22 #include <trace/events/sof_intel.h>
23 #include "../sof-audio.h"
24 #include "../ops.h"
25 #include "hda.h"
26 #include "hda-ipc.h"
27
28 static bool hda_enable_trace_D0I3_S0;
29 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
30 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
31 MODULE_PARM_DESC(enable_trace_D0I3_S0,
32 "SOF HDA enable trace when the DSP is in D0I3 in S0");
33 #endif
34
35 /*
36 * DSP Core control.
37 */
38
hda_dsp_core_reset_enter(struct snd_sof_dev * sdev,unsigned int core_mask)39 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
40 {
41 u32 adspcs;
42 u32 reset;
43 int ret;
44
45 /* set reset bits for cores */
46 reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
47 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
48 HDA_DSP_REG_ADSPCS,
49 reset, reset);
50
51 /* poll with timeout to check if operation successful */
52 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
53 HDA_DSP_REG_ADSPCS, adspcs,
54 ((adspcs & reset) == reset),
55 HDA_DSP_REG_POLL_INTERVAL_US,
56 HDA_DSP_RESET_TIMEOUT_US);
57 if (ret < 0) {
58 dev_err(sdev->dev,
59 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
60 __func__);
61 return ret;
62 }
63
64 /* has core entered reset ? */
65 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
66 HDA_DSP_REG_ADSPCS);
67 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
68 HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
69 dev_err(sdev->dev,
70 "error: reset enter failed: core_mask %x adspcs 0x%x\n",
71 core_mask, adspcs);
72 ret = -EIO;
73 }
74
75 return ret;
76 }
77
hda_dsp_core_reset_leave(struct snd_sof_dev * sdev,unsigned int core_mask)78 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
79 {
80 unsigned int crst;
81 u32 adspcs;
82 int ret;
83
84 /* clear reset bits for cores */
85 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
86 HDA_DSP_REG_ADSPCS,
87 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
88 0);
89
90 /* poll with timeout to check if operation successful */
91 crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
92 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
93 HDA_DSP_REG_ADSPCS, adspcs,
94 !(adspcs & crst),
95 HDA_DSP_REG_POLL_INTERVAL_US,
96 HDA_DSP_RESET_TIMEOUT_US);
97
98 if (ret < 0) {
99 dev_err(sdev->dev,
100 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
101 __func__);
102 return ret;
103 }
104
105 /* has core left reset ? */
106 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
107 HDA_DSP_REG_ADSPCS);
108 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
109 dev_err(sdev->dev,
110 "error: reset leave failed: core_mask %x adspcs 0x%x\n",
111 core_mask, adspcs);
112 ret = -EIO;
113 }
114
115 return ret;
116 }
117
hda_dsp_core_stall_reset(struct snd_sof_dev * sdev,unsigned int core_mask)118 int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
119 {
120 /* stall core */
121 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
122 HDA_DSP_REG_ADSPCS,
123 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
124 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
125
126 /* set reset state */
127 return hda_dsp_core_reset_enter(sdev, core_mask);
128 }
129
hda_dsp_core_is_enabled(struct snd_sof_dev * sdev,unsigned int core_mask)130 bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
131 {
132 int val;
133 bool is_enable;
134
135 val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
136
137 #define MASK_IS_EQUAL(v, m, field) ({ \
138 u32 _m = field(m); \
139 ((v) & _m) == _m; \
140 })
141
142 is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
143 MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
144 !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
145 !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
146
147 #undef MASK_IS_EQUAL
148
149 dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
150 is_enable, core_mask);
151
152 return is_enable;
153 }
154
hda_dsp_core_run(struct snd_sof_dev * sdev,unsigned int core_mask)155 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
156 {
157 int ret;
158
159 /* leave reset state */
160 ret = hda_dsp_core_reset_leave(sdev, core_mask);
161 if (ret < 0)
162 return ret;
163
164 /* run core */
165 dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
166 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
167 HDA_DSP_REG_ADSPCS,
168 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
169 0);
170
171 /* is core now running ? */
172 if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
173 hda_dsp_core_stall_reset(sdev, core_mask);
174 dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
175 core_mask);
176 ret = -EIO;
177 }
178
179 return ret;
180 }
181
182 /*
183 * Power Management.
184 */
185
hda_dsp_core_power_up(struct snd_sof_dev * sdev,unsigned int core_mask)186 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
187 {
188 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
189 const struct sof_intel_dsp_desc *chip = hda->desc;
190 unsigned int cpa;
191 u32 adspcs;
192 int ret;
193
194 /* restrict core_mask to host managed cores mask */
195 core_mask &= chip->host_managed_cores_mask;
196 /* return if core_mask is not valid */
197 if (!core_mask)
198 return 0;
199
200 /* update bits */
201 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
202 HDA_DSP_ADSPCS_SPA_MASK(core_mask),
203 HDA_DSP_ADSPCS_SPA_MASK(core_mask));
204
205 /* poll with timeout to check if operation successful */
206 cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
207 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
208 HDA_DSP_REG_ADSPCS, adspcs,
209 (adspcs & cpa) == cpa,
210 HDA_DSP_REG_POLL_INTERVAL_US,
211 HDA_DSP_RESET_TIMEOUT_US);
212 if (ret < 0) {
213 dev_err(sdev->dev,
214 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
215 __func__);
216 return ret;
217 }
218
219 /* did core power up ? */
220 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
221 HDA_DSP_REG_ADSPCS);
222 if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
223 HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
224 dev_err(sdev->dev,
225 "error: power up core failed core_mask %xadspcs 0x%x\n",
226 core_mask, adspcs);
227 ret = -EIO;
228 }
229
230 return ret;
231 }
232
hda_dsp_core_power_down(struct snd_sof_dev * sdev,unsigned int core_mask)233 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
234 {
235 u32 adspcs;
236 int ret;
237
238 /* update bits */
239 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
240 HDA_DSP_REG_ADSPCS,
241 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
242
243 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
244 HDA_DSP_REG_ADSPCS, adspcs,
245 !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
246 HDA_DSP_REG_POLL_INTERVAL_US,
247 HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
248 if (ret < 0)
249 dev_err(sdev->dev,
250 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
251 __func__);
252
253 return ret;
254 }
255
hda_dsp_enable_core(struct snd_sof_dev * sdev,unsigned int core_mask)256 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
257 {
258 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
259 const struct sof_intel_dsp_desc *chip = hda->desc;
260 int ret;
261
262 /* restrict core_mask to host managed cores mask */
263 core_mask &= chip->host_managed_cores_mask;
264
265 /* return if core_mask is not valid or cores are already enabled */
266 if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
267 return 0;
268
269 /* power up */
270 ret = hda_dsp_core_power_up(sdev, core_mask);
271 if (ret < 0) {
272 dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
273 core_mask);
274 return ret;
275 }
276
277 return hda_dsp_core_run(sdev, core_mask);
278 }
279
hda_dsp_core_reset_power_down(struct snd_sof_dev * sdev,unsigned int core_mask)280 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
281 unsigned int core_mask)
282 {
283 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
284 const struct sof_intel_dsp_desc *chip = hda->desc;
285 int ret;
286
287 /* restrict core_mask to host managed cores mask */
288 core_mask &= chip->host_managed_cores_mask;
289
290 /* return if core_mask is not valid */
291 if (!core_mask)
292 return 0;
293
294 /* place core in reset prior to power down */
295 ret = hda_dsp_core_stall_reset(sdev, core_mask);
296 if (ret < 0) {
297 dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
298 core_mask);
299 return ret;
300 }
301
302 /* power down core */
303 ret = hda_dsp_core_power_down(sdev, core_mask);
304 if (ret < 0) {
305 dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
306 core_mask, ret);
307 return ret;
308 }
309
310 /* make sure we are in OFF state */
311 if (hda_dsp_core_is_enabled(sdev, core_mask)) {
312 dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
313 core_mask, ret);
314 ret = -EIO;
315 }
316
317 return ret;
318 }
319
hda_dsp_ipc_int_enable(struct snd_sof_dev * sdev)320 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
321 {
322 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
323 const struct sof_intel_dsp_desc *chip = hda->desc;
324
325 if (sdev->dspless_mode_selected)
326 return;
327
328 /* enable IPC DONE and BUSY interrupts */
329 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
330 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
331 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
332
333 /* enable IPC interrupt */
334 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
335 HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
336 }
337
hda_dsp_ipc_int_disable(struct snd_sof_dev * sdev)338 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
339 {
340 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
341 const struct sof_intel_dsp_desc *chip = hda->desc;
342
343 if (sdev->dspless_mode_selected)
344 return;
345
346 /* disable IPC interrupt */
347 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
348 HDA_DSP_ADSPIC_IPC, 0);
349
350 /* disable IPC BUSY and DONE interrupt */
351 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
352 HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
353 }
354
hda_dsp_wait_d0i3c_done(struct snd_sof_dev * sdev)355 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
356 {
357 int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
358 struct snd_sof_pdata *pdata = sdev->pdata;
359 const struct sof_intel_dsp_desc *chip;
360
361 chip = get_chip_info(pdata);
362 while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
363 SOF_HDA_VS_D0I3C_CIP) {
364 if (!retry--)
365 return -ETIMEDOUT;
366 usleep_range(10, 15);
367 }
368
369 return 0;
370 }
371
hda_dsp_send_pm_gate_ipc(struct snd_sof_dev * sdev,u32 flags)372 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
373 {
374 const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
375
376 if (pm_ops && pm_ops->set_pm_gate)
377 return pm_ops->set_pm_gate(sdev, flags);
378
379 return 0;
380 }
381
hda_dsp_update_d0i3c_register(struct snd_sof_dev * sdev,u8 value)382 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
383 {
384 struct snd_sof_pdata *pdata = sdev->pdata;
385 const struct sof_intel_dsp_desc *chip;
386 int ret;
387 u8 reg;
388
389 chip = get_chip_info(pdata);
390
391 /* Write to D0I3C after Command-In-Progress bit is cleared */
392 ret = hda_dsp_wait_d0i3c_done(sdev);
393 if (ret < 0) {
394 dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
395 return ret;
396 }
397
398 /* Update D0I3C register */
399 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
400 SOF_HDA_VS_D0I3C_I3, value);
401
402 /*
403 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
404 * A delay is recommended before checking if D0I3C::CIP is cleared
405 */
406 usleep_range(30, 40);
407
408 /* Wait for cmd in progress to be cleared before exiting the function */
409 ret = hda_dsp_wait_d0i3c_done(sdev);
410 if (ret < 0) {
411 dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
412 return ret;
413 }
414
415 reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
416 /* Confirm d0i3 state changed with paranoia check */
417 if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
418 dev_err(sdev->dev, "failed to update D0I3C!\n");
419 return -EIO;
420 }
421
422 trace_sof_intel_D0I3C_updated(sdev, reg);
423
424 return 0;
425 }
426
427 /*
428 * d0i3 streaming is enabled if all the active streams can
429 * work in d0i3 state and playback is enabled
430 */
hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev * sdev)431 static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
432 {
433 struct snd_pcm_substream *substream;
434 struct snd_sof_pcm *spcm;
435 bool playback_active = false;
436 int dir;
437
438 list_for_each_entry(spcm, &sdev->pcm_list, list) {
439 for_each_pcm_streams(dir) {
440 substream = spcm->stream[dir].substream;
441 if (!substream || !substream->runtime)
442 continue;
443
444 if (!spcm->stream[dir].d0i3_compatible)
445 return false;
446
447 if (dir == SNDRV_PCM_STREAM_PLAYBACK)
448 playback_active = true;
449 }
450 }
451
452 return playback_active;
453 }
454
hda_dsp_set_D0_state(struct snd_sof_dev * sdev,const struct sof_dsp_power_state * target_state)455 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
456 const struct sof_dsp_power_state *target_state)
457 {
458 u32 flags = 0;
459 int ret;
460 u8 value = 0;
461
462 /*
463 * Sanity check for illegal state transitions
464 * The only allowed transitions are:
465 * 1. D3 -> D0I0
466 * 2. D0I0 -> D0I3
467 * 3. D0I3 -> D0I0
468 */
469 switch (sdev->dsp_power_state.state) {
470 case SOF_DSP_PM_D0:
471 /* Follow the sequence below for D0 substate transitions */
472 break;
473 case SOF_DSP_PM_D3:
474 /* Follow regular flow for D3 -> D0 transition */
475 return 0;
476 default:
477 dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
478 sdev->dsp_power_state.state, target_state->state);
479 return -EINVAL;
480 }
481
482 /* Set flags and register value for D0 target substate */
483 if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
484 value = SOF_HDA_VS_D0I3C_I3;
485
486 /*
487 * Trace DMA need to be disabled when the DSP enters
488 * D0I3 for S0Ix suspend, but it can be kept enabled
489 * when the DSP enters D0I3 while the system is in S0
490 * for debug purpose.
491 */
492 if (!sdev->fw_trace_is_supported ||
493 !hda_enable_trace_D0I3_S0 ||
494 sdev->system_suspend_target != SOF_SUSPEND_NONE)
495 flags = HDA_PM_NO_DMA_TRACE;
496
497 if (hda_dsp_d0i3_streaming_applicable(sdev))
498 flags |= HDA_PM_PG_STREAMING;
499 } else {
500 /* prevent power gating in D0I0 */
501 flags = HDA_PM_PPG;
502 }
503
504 /* update D0I3C register */
505 ret = hda_dsp_update_d0i3c_register(sdev, value);
506 if (ret < 0)
507 return ret;
508
509 /*
510 * Notify the DSP of the state change.
511 * If this IPC fails, revert the D0I3C register update in order
512 * to prevent partial state change.
513 */
514 ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
515 if (ret < 0) {
516 dev_err(sdev->dev,
517 "error: PM_GATE ipc error %d\n", ret);
518 goto revert;
519 }
520
521 return ret;
522
523 revert:
524 /* fallback to the previous register value */
525 value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
526
527 /*
528 * This can fail but return the IPC error to signal that
529 * the state change failed.
530 */
531 hda_dsp_update_d0i3c_register(sdev, value);
532
533 return ret;
534 }
535
536 /* helper to log DSP state */
hda_dsp_state_log(struct snd_sof_dev * sdev)537 static void hda_dsp_state_log(struct snd_sof_dev *sdev)
538 {
539 switch (sdev->dsp_power_state.state) {
540 case SOF_DSP_PM_D0:
541 switch (sdev->dsp_power_state.substate) {
542 case SOF_HDA_DSP_PM_D0I0:
543 dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
544 break;
545 case SOF_HDA_DSP_PM_D0I3:
546 dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
547 break;
548 default:
549 dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
550 sdev->dsp_power_state.substate);
551 break;
552 }
553 break;
554 case SOF_DSP_PM_D1:
555 dev_dbg(sdev->dev, "Current DSP power state: D1\n");
556 break;
557 case SOF_DSP_PM_D2:
558 dev_dbg(sdev->dev, "Current DSP power state: D2\n");
559 break;
560 case SOF_DSP_PM_D3:
561 dev_dbg(sdev->dev, "Current DSP power state: D3\n");
562 break;
563 default:
564 dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
565 sdev->dsp_power_state.state);
566 break;
567 }
568 }
569
570 /*
571 * All DSP power state transitions are initiated by the driver.
572 * If the requested state change fails, the error is simply returned.
573 * Further state transitions are attempted only when the set_power_save() op
574 * is called again either because of a new IPC sent to the DSP or
575 * during system suspend/resume.
576 */
hda_dsp_set_power_state(struct snd_sof_dev * sdev,const struct sof_dsp_power_state * target_state)577 static int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
578 const struct sof_dsp_power_state *target_state)
579 {
580 int ret = 0;
581
582 switch (target_state->state) {
583 case SOF_DSP_PM_D0:
584 ret = hda_dsp_set_D0_state(sdev, target_state);
585 break;
586 case SOF_DSP_PM_D3:
587 /* The only allowed transition is: D0I0 -> D3 */
588 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
589 sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
590 break;
591
592 dev_err(sdev->dev,
593 "error: transition from %d to %d not allowed\n",
594 sdev->dsp_power_state.state, target_state->state);
595 return -EINVAL;
596 default:
597 dev_err(sdev->dev, "error: target state unsupported %d\n",
598 target_state->state);
599 return -EINVAL;
600 }
601 if (ret < 0) {
602 dev_err(sdev->dev,
603 "failed to set requested target DSP state %d substate %d\n",
604 target_state->state, target_state->substate);
605 return ret;
606 }
607
608 sdev->dsp_power_state = *target_state;
609 hda_dsp_state_log(sdev);
610 return ret;
611 }
612
hda_dsp_set_power_state_ipc3(struct snd_sof_dev * sdev,const struct sof_dsp_power_state * target_state)613 int hda_dsp_set_power_state_ipc3(struct snd_sof_dev *sdev,
614 const struct sof_dsp_power_state *target_state)
615 {
616 /*
617 * When the DSP is already in D0I3 and the target state is D0I3,
618 * it could be the case that the DSP is in D0I3 during S0
619 * and the system is suspending to S0Ix. Therefore,
620 * hda_dsp_set_D0_state() must be called to disable trace DMA
621 * by sending the PM_GATE IPC to the FW.
622 */
623 if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
624 sdev->system_suspend_target == SOF_SUSPEND_S0IX)
625 return hda_dsp_set_power_state(sdev, target_state);
626
627 /*
628 * For all other cases, return without doing anything if
629 * the DSP is already in the target state.
630 */
631 if (target_state->state == sdev->dsp_power_state.state &&
632 target_state->substate == sdev->dsp_power_state.substate)
633 return 0;
634
635 return hda_dsp_set_power_state(sdev, target_state);
636 }
637
hda_dsp_set_power_state_ipc4(struct snd_sof_dev * sdev,const struct sof_dsp_power_state * target_state)638 int hda_dsp_set_power_state_ipc4(struct snd_sof_dev *sdev,
639 const struct sof_dsp_power_state *target_state)
640 {
641 /* Return without doing anything if the DSP is already in the target state */
642 if (target_state->state == sdev->dsp_power_state.state &&
643 target_state->substate == sdev->dsp_power_state.substate)
644 return 0;
645
646 return hda_dsp_set_power_state(sdev, target_state);
647 }
648
649 /*
650 * Audio DSP states may transform as below:-
651 *
652 * Opportunistic D0I3 in S0
653 * Runtime +---------------------+ Delayed D0i3 work timeout
654 * suspend | +--------------------+
655 * +------------+ D0I0(active) | |
656 * | | <---------------+ |
657 * | +--------> | New IPC | |
658 * | |Runtime +--^--+---------^--+--+ (via mailbox) | |
659 * | |resume | | | | | |
660 * | | | | | | | |
661 * | | System| | | | | |
662 * | | resume| | S3/S0IX | | | |
663 * | | | | suspend | | S0IX | |
664 * | | | | | |suspend | |
665 * | | | | | | | |
666 * | | | | | | | |
667 * +-v---+-----------+--v-------+ | | +------+----v----+
668 * | | | +-----------> |
669 * | D3 (suspended) | | | D0I3 |
670 * | | +--------------+ |
671 * | | System resume | |
672 * +----------------------------+ +----------------+
673 *
674 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
675 * ignored the suspend trigger. Otherwise the DSP
676 * is in D3.
677 */
678
hda_suspend(struct snd_sof_dev * sdev,bool runtime_suspend)679 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
680 {
681 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
682 const struct sof_intel_dsp_desc *chip = hda->desc;
683 struct hdac_bus *bus = sof_to_bus(sdev);
684 bool imr_lost = false;
685 int ret, j;
686
687 /*
688 * The memory used for IMR boot loses its content in deeper than S3
689 * state on CAVS platforms.
690 * On ACE platforms due to the system architecture the IMR content is
691 * lost at S3 state already, they are tailored for s2idle use.
692 * We must not try IMR boot on next power up in these cases as it will
693 * fail.
694 */
695 if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
696 (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 &&
697 sdev->system_suspend_target == SOF_SUSPEND_S3))
698 imr_lost = true;
699
700 /*
701 * In case of firmware crash or boot failure set the skip_imr_boot to true
702 * as well in order to try to re-load the firmware to do a 'cold' boot.
703 */
704 if (imr_lost || sdev->fw_state == SOF_FW_CRASHED ||
705 sdev->fw_state == SOF_FW_BOOT_FAILED)
706 hda->skip_imr_boot = true;
707
708 ret = chip->disable_interrupts(sdev);
709 if (ret < 0)
710 return ret;
711
712 /* make sure that no irq handler is pending before shutdown */
713 synchronize_irq(sdev->ipc_irq);
714
715 hda_codec_jack_wake_enable(sdev, runtime_suspend);
716
717 /* power down all hda links */
718 hda_bus_ml_suspend(bus);
719
720 if (sdev->dspless_mode_selected)
721 goto skip_dsp;
722
723 ret = chip->power_down_dsp(sdev);
724 if (ret < 0) {
725 dev_err(sdev->dev, "failed to power down DSP during suspend\n");
726 return ret;
727 }
728
729 /* reset ref counts for all cores */
730 for (j = 0; j < chip->cores_num; j++)
731 sdev->dsp_core_ref_count[j] = 0;
732
733 /* disable ppcap interrupt */
734 hda_dsp_ctrl_ppcap_enable(sdev, false);
735 hda_dsp_ctrl_ppcap_int_enable(sdev, false);
736 skip_dsp:
737
738 /* disable hda bus irq and streams */
739 hda_dsp_ctrl_stop_chip(sdev);
740
741 /* disable LP retention mode */
742 snd_sof_pci_update_bits(sdev, PCI_PGCTL,
743 PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
744
745 /* reset controller */
746 ret = hda_dsp_ctrl_link_reset(sdev, true);
747 if (ret < 0) {
748 dev_err(sdev->dev,
749 "error: failed to reset controller during suspend\n");
750 return ret;
751 }
752
753 /* display codec can powered off after link reset */
754 hda_codec_i915_display_power(sdev, false);
755
756 return 0;
757 }
758
hda_resume(struct snd_sof_dev * sdev,bool runtime_resume)759 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
760 {
761 int ret;
762
763 /* display codec must be powered before link reset */
764 hda_codec_i915_display_power(sdev, true);
765
766 /*
767 * clear TCSEL to clear playback on some HD Audio
768 * codecs. PCI TCSEL is defined in the Intel manuals.
769 */
770 snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
771
772 /* reset and start hda controller */
773 ret = hda_dsp_ctrl_init_chip(sdev);
774 if (ret < 0) {
775 dev_err(sdev->dev,
776 "error: failed to start controller after resume\n");
777 goto cleanup;
778 }
779
780 /* check jack status */
781 if (runtime_resume) {
782 hda_codec_jack_wake_enable(sdev, false);
783 if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
784 hda_codec_jack_check(sdev);
785 }
786
787 if (!sdev->dspless_mode_selected) {
788 /* enable ppcap interrupt */
789 hda_dsp_ctrl_ppcap_enable(sdev, true);
790 hda_dsp_ctrl_ppcap_int_enable(sdev, true);
791 }
792
793 cleanup:
794 /* display codec can powered off after controller init */
795 hda_codec_i915_display_power(sdev, false);
796
797 return 0;
798 }
799
hda_dsp_resume(struct snd_sof_dev * sdev)800 int hda_dsp_resume(struct snd_sof_dev *sdev)
801 {
802 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
803 struct hdac_bus *bus = sof_to_bus(sdev);
804 struct pci_dev *pci = to_pci_dev(sdev->dev);
805 const struct sof_dsp_power_state target_state = {
806 .state = SOF_DSP_PM_D0,
807 .substate = SOF_HDA_DSP_PM_D0I0,
808 };
809 int ret;
810
811 /* resume from D0I3 */
812 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
813 ret = hda_bus_ml_resume(bus);
814 if (ret < 0) {
815 dev_err(sdev->dev,
816 "error %d in %s: failed to power up links",
817 ret, __func__);
818 return ret;
819 }
820
821 /* set up CORB/RIRB buffers if was on before suspend */
822 hda_codec_resume_cmd_io(sdev);
823
824 /* Set DSP power state */
825 ret = snd_sof_dsp_set_power_state(sdev, &target_state);
826 if (ret < 0) {
827 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
828 target_state.state, target_state.substate);
829 return ret;
830 }
831
832 /* restore L1SEN bit */
833 if (hda->l1_disabled)
834 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
835 HDA_VS_INTEL_EM2,
836 HDA_VS_INTEL_EM2_L1SEN, 0);
837
838 /* restore and disable the system wakeup */
839 pci_restore_state(pci);
840 disable_irq_wake(pci->irq);
841 return 0;
842 }
843
844 /* init hda controller. DSP cores will be powered up during fw boot */
845 ret = hda_resume(sdev, false);
846 if (ret < 0)
847 return ret;
848
849 return snd_sof_dsp_set_power_state(sdev, &target_state);
850 }
851
hda_dsp_runtime_resume(struct snd_sof_dev * sdev)852 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
853 {
854 const struct sof_dsp_power_state target_state = {
855 .state = SOF_DSP_PM_D0,
856 };
857 int ret;
858
859 /* init hda controller. DSP cores will be powered up during fw boot */
860 ret = hda_resume(sdev, true);
861 if (ret < 0)
862 return ret;
863
864 return snd_sof_dsp_set_power_state(sdev, &target_state);
865 }
866
hda_dsp_runtime_idle(struct snd_sof_dev * sdev)867 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
868 {
869 struct hdac_bus *hbus = sof_to_bus(sdev);
870
871 if (hbus->codec_powered) {
872 dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
873 (unsigned int)hbus->codec_powered);
874 return -EBUSY;
875 }
876
877 return 0;
878 }
879
hda_dsp_runtime_suspend(struct snd_sof_dev * sdev)880 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
881 {
882 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
883 const struct sof_dsp_power_state target_state = {
884 .state = SOF_DSP_PM_D3,
885 };
886 int ret;
887
888 if (!sdev->dspless_mode_selected) {
889 /* cancel any attempt for DSP D0I3 */
890 cancel_delayed_work_sync(&hda->d0i3_work);
891 }
892
893 /* stop hda controller and power dsp off */
894 ret = hda_suspend(sdev, true);
895 if (ret < 0)
896 return ret;
897
898 return snd_sof_dsp_set_power_state(sdev, &target_state);
899 }
900
hda_dsp_suspend(struct snd_sof_dev * sdev,u32 target_state)901 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
902 {
903 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
904 struct hdac_bus *bus = sof_to_bus(sdev);
905 struct pci_dev *pci = to_pci_dev(sdev->dev);
906 const struct sof_dsp_power_state target_dsp_state = {
907 .state = target_state,
908 .substate = target_state == SOF_DSP_PM_D0 ?
909 SOF_HDA_DSP_PM_D0I3 : 0,
910 };
911 int ret;
912
913 if (!sdev->dspless_mode_selected) {
914 /* cancel any attempt for DSP D0I3 */
915 cancel_delayed_work_sync(&hda->d0i3_work);
916 }
917
918 if (target_state == SOF_DSP_PM_D0) {
919 /* Set DSP power state */
920 ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
921 if (ret < 0) {
922 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
923 target_dsp_state.state,
924 target_dsp_state.substate);
925 return ret;
926 }
927
928 /* enable L1SEN to make sure the system can enter S0Ix */
929 if (hda->l1_disabled)
930 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
931 HDA_VS_INTEL_EM2_L1SEN, HDA_VS_INTEL_EM2_L1SEN);
932
933 /* stop the CORB/RIRB DMA if it is On */
934 hda_codec_suspend_cmd_io(sdev);
935
936 /* no link can be powered in s0ix state */
937 ret = hda_bus_ml_suspend(bus);
938 if (ret < 0) {
939 dev_err(sdev->dev,
940 "error %d in %s: failed to power down links",
941 ret, __func__);
942 return ret;
943 }
944
945 /* enable the system waking up via IPC IRQ */
946 enable_irq_wake(pci->irq);
947 pci_save_state(pci);
948 return 0;
949 }
950
951 /* stop hda controller and power dsp off */
952 ret = hda_suspend(sdev, false);
953 if (ret < 0) {
954 dev_err(bus->dev, "error: suspending dsp\n");
955 return ret;
956 }
957
958 return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
959 }
960
hda_dsp_check_for_dma_streams(struct snd_sof_dev * sdev)961 static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
962 {
963 struct hdac_bus *bus = sof_to_bus(sdev);
964 struct hdac_stream *s;
965 unsigned int active_streams = 0;
966 int sd_offset;
967 u32 val;
968
969 list_for_each_entry(s, &bus->stream_list, list) {
970 sd_offset = SOF_STREAM_SD_OFFSET(s);
971 val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
972 sd_offset);
973 if (val & SOF_HDA_SD_CTL_DMA_START)
974 active_streams |= BIT(s->index);
975 }
976
977 return active_streams;
978 }
979
hda_dsp_s5_quirk(struct snd_sof_dev * sdev)980 static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
981 {
982 int ret;
983
984 /*
985 * Do not assume a certain timing between the prior
986 * suspend flow, and running of this quirk function.
987 * This is needed if the controller was just put
988 * to reset before calling this function.
989 */
990 usleep_range(500, 1000);
991
992 /*
993 * Take controller out of reset to flush DMA
994 * transactions.
995 */
996 ret = hda_dsp_ctrl_link_reset(sdev, false);
997 if (ret < 0)
998 return ret;
999
1000 usleep_range(500, 1000);
1001
1002 /* Restore state for shutdown, back to reset */
1003 ret = hda_dsp_ctrl_link_reset(sdev, true);
1004 if (ret < 0)
1005 return ret;
1006
1007 return ret;
1008 }
1009
hda_dsp_shutdown_dma_flush(struct snd_sof_dev * sdev)1010 int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
1011 {
1012 unsigned int active_streams;
1013 int ret, ret2;
1014
1015 /* check if DMA cleanup has been successful */
1016 active_streams = hda_dsp_check_for_dma_streams(sdev);
1017
1018 sdev->system_suspend_target = SOF_SUSPEND_S3;
1019 ret = snd_sof_suspend(sdev->dev);
1020
1021 if (active_streams) {
1022 dev_warn(sdev->dev,
1023 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
1024 active_streams);
1025 ret2 = hda_dsp_s5_quirk(sdev);
1026 if (ret2 < 0)
1027 dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
1028 }
1029
1030 return ret;
1031 }
1032
hda_dsp_shutdown(struct snd_sof_dev * sdev)1033 int hda_dsp_shutdown(struct snd_sof_dev *sdev)
1034 {
1035 sdev->system_suspend_target = SOF_SUSPEND_S3;
1036 return snd_sof_suspend(sdev->dev);
1037 }
1038
hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev * sdev)1039 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
1040 {
1041 int ret;
1042
1043 /* make sure all DAI resources are freed */
1044 ret = hda_dsp_dais_suspend(sdev);
1045 if (ret < 0)
1046 dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
1047
1048 return ret;
1049 }
1050
hda_dsp_d0i3_work(struct work_struct * work)1051 void hda_dsp_d0i3_work(struct work_struct *work)
1052 {
1053 struct sof_intel_hda_dev *hdev = container_of(work,
1054 struct sof_intel_hda_dev,
1055 d0i3_work.work);
1056 struct hdac_bus *bus = &hdev->hbus.core;
1057 struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
1058 struct sof_dsp_power_state target_state = {
1059 .state = SOF_DSP_PM_D0,
1060 .substate = SOF_HDA_DSP_PM_D0I3,
1061 };
1062 int ret;
1063
1064 /* DSP can enter D0I3 iff only D0I3-compatible streams are active */
1065 if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
1066 /* remain in D0I0 */
1067 return;
1068
1069 /* This can fail but error cannot be propagated */
1070 ret = snd_sof_dsp_set_power_state(sdev, &target_state);
1071 if (ret < 0)
1072 dev_err_ratelimited(sdev->dev,
1073 "error: failed to set DSP state %d substate %d\n",
1074 target_state.state, target_state.substate);
1075 }
1076
hda_dsp_core_get(struct snd_sof_dev * sdev,int core)1077 int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
1078 {
1079 const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1080 int ret, ret1;
1081
1082 /* power up core */
1083 ret = hda_dsp_enable_core(sdev, BIT(core));
1084 if (ret < 0) {
1085 dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1086 core, ret);
1087 return ret;
1088 }
1089
1090 /* No need to send IPC for primary core or if FW boot is not complete */
1091 if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1092 return 0;
1093
1094 /* No need to continue the set_core_state ops is not available */
1095 if (!pm_ops->set_core_state)
1096 return 0;
1097
1098 /* Now notify DSP for secondary cores */
1099 ret = pm_ops->set_core_state(sdev, core, true);
1100 if (ret < 0) {
1101 dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1102 core, ret);
1103 goto power_down;
1104 }
1105
1106 return ret;
1107
1108 power_down:
1109 /* power down core if it is host managed and return the original error if this fails too */
1110 ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1111 if (ret1 < 0)
1112 dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1113
1114 return ret;
1115 }
1116
hda_dsp_disable_interrupts(struct snd_sof_dev * sdev)1117 int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1118 {
1119 hda_sdw_int_enable(sdev, false);
1120 hda_dsp_ipc_int_disable(sdev);
1121
1122 return 0;
1123 }
1124