xref: /openbmc/linux/sound/soc/sof/intel/hda-dsp.c (revision d95debbd)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for generic Intel audio DSP HDA IP
16  */
17 
18 #include <linux/module.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include <trace/events/sof_intel.h>
22 #include "../sof-audio.h"
23 #include "../ops.h"
24 #include "hda.h"
25 #include "hda-ipc.h"
26 
27 static bool hda_enable_trace_D0I3_S0;
28 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
29 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
30 MODULE_PARM_DESC(enable_trace_D0I3_S0,
31 		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
32 #endif
33 
34 /*
35  * DSP Core control.
36  */
37 
38 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
39 {
40 	u32 adspcs;
41 	u32 reset;
42 	int ret;
43 
44 	/* set reset bits for cores */
45 	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
46 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
47 					 HDA_DSP_REG_ADSPCS,
48 					 reset, reset);
49 
50 	/* poll with timeout to check if operation successful */
51 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
52 					HDA_DSP_REG_ADSPCS, adspcs,
53 					((adspcs & reset) == reset),
54 					HDA_DSP_REG_POLL_INTERVAL_US,
55 					HDA_DSP_RESET_TIMEOUT_US);
56 	if (ret < 0) {
57 		dev_err(sdev->dev,
58 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
59 			__func__);
60 		return ret;
61 	}
62 
63 	/* has core entered reset ? */
64 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
65 				  HDA_DSP_REG_ADSPCS);
66 	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
67 		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
68 		dev_err(sdev->dev,
69 			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
70 			core_mask, adspcs);
71 		ret = -EIO;
72 	}
73 
74 	return ret;
75 }
76 
77 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
78 {
79 	unsigned int crst;
80 	u32 adspcs;
81 	int ret;
82 
83 	/* clear reset bits for cores */
84 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
85 					 HDA_DSP_REG_ADSPCS,
86 					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
87 					 0);
88 
89 	/* poll with timeout to check if operation successful */
90 	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
91 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
92 					    HDA_DSP_REG_ADSPCS, adspcs,
93 					    !(adspcs & crst),
94 					    HDA_DSP_REG_POLL_INTERVAL_US,
95 					    HDA_DSP_RESET_TIMEOUT_US);
96 
97 	if (ret < 0) {
98 		dev_err(sdev->dev,
99 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
100 			__func__);
101 		return ret;
102 	}
103 
104 	/* has core left reset ? */
105 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
106 				  HDA_DSP_REG_ADSPCS);
107 	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
108 		dev_err(sdev->dev,
109 			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
110 			core_mask, adspcs);
111 		ret = -EIO;
112 	}
113 
114 	return ret;
115 }
116 
117 int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
118 {
119 	/* stall core */
120 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
121 					 HDA_DSP_REG_ADSPCS,
122 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
123 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
124 
125 	/* set reset state */
126 	return hda_dsp_core_reset_enter(sdev, core_mask);
127 }
128 
129 bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
130 {
131 	int val;
132 	bool is_enable;
133 
134 	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
135 
136 #define MASK_IS_EQUAL(v, m, field) ({	\
137 	u32 _m = field(m);		\
138 	((v) & _m) == _m;		\
139 })
140 
141 	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
142 		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
143 		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
144 		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
145 
146 #undef MASK_IS_EQUAL
147 
148 	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
149 		is_enable, core_mask);
150 
151 	return is_enable;
152 }
153 
154 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
155 {
156 	int ret;
157 
158 	/* leave reset state */
159 	ret = hda_dsp_core_reset_leave(sdev, core_mask);
160 	if (ret < 0)
161 		return ret;
162 
163 	/* run core */
164 	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
165 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
166 					 HDA_DSP_REG_ADSPCS,
167 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
168 					 0);
169 
170 	/* is core now running ? */
171 	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
172 		hda_dsp_core_stall_reset(sdev, core_mask);
173 		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
174 			core_mask);
175 		ret = -EIO;
176 	}
177 
178 	return ret;
179 }
180 
181 /*
182  * Power Management.
183  */
184 
185 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
186 {
187 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
188 	const struct sof_intel_dsp_desc *chip = hda->desc;
189 	unsigned int cpa;
190 	u32 adspcs;
191 	int ret;
192 
193 	/* restrict core_mask to host managed cores mask */
194 	core_mask &= chip->host_managed_cores_mask;
195 	/* return if core_mask is not valid */
196 	if (!core_mask)
197 		return 0;
198 
199 	/* update bits */
200 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
201 				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
202 				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
203 
204 	/* poll with timeout to check if operation successful */
205 	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
206 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
207 					    HDA_DSP_REG_ADSPCS, adspcs,
208 					    (adspcs & cpa) == cpa,
209 					    HDA_DSP_REG_POLL_INTERVAL_US,
210 					    HDA_DSP_RESET_TIMEOUT_US);
211 	if (ret < 0) {
212 		dev_err(sdev->dev,
213 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
214 			__func__);
215 		return ret;
216 	}
217 
218 	/* did core power up ? */
219 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
220 				  HDA_DSP_REG_ADSPCS);
221 	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
222 		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
223 		dev_err(sdev->dev,
224 			"error: power up core failed core_mask %xadspcs 0x%x\n",
225 			core_mask, adspcs);
226 		ret = -EIO;
227 	}
228 
229 	return ret;
230 }
231 
232 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
233 {
234 	u32 adspcs;
235 	int ret;
236 
237 	/* update bits */
238 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
239 					 HDA_DSP_REG_ADSPCS,
240 					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
241 
242 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
243 				HDA_DSP_REG_ADSPCS, adspcs,
244 				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
245 				HDA_DSP_REG_POLL_INTERVAL_US,
246 				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
247 	if (ret < 0)
248 		dev_err(sdev->dev,
249 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
250 			__func__);
251 
252 	return ret;
253 }
254 
255 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
256 {
257 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
258 	const struct sof_intel_dsp_desc *chip = hda->desc;
259 	int ret;
260 
261 	/* restrict core_mask to host managed cores mask */
262 	core_mask &= chip->host_managed_cores_mask;
263 
264 	/* return if core_mask is not valid or cores are already enabled */
265 	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
266 		return 0;
267 
268 	/* power up */
269 	ret = hda_dsp_core_power_up(sdev, core_mask);
270 	if (ret < 0) {
271 		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
272 			core_mask);
273 		return ret;
274 	}
275 
276 	return hda_dsp_core_run(sdev, core_mask);
277 }
278 
279 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
280 				  unsigned int core_mask)
281 {
282 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
283 	const struct sof_intel_dsp_desc *chip = hda->desc;
284 	int ret;
285 
286 	/* restrict core_mask to host managed cores mask */
287 	core_mask &= chip->host_managed_cores_mask;
288 
289 	/* return if core_mask is not valid */
290 	if (!core_mask)
291 		return 0;
292 
293 	/* place core in reset prior to power down */
294 	ret = hda_dsp_core_stall_reset(sdev, core_mask);
295 	if (ret < 0) {
296 		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
297 			core_mask);
298 		return ret;
299 	}
300 
301 	/* power down core */
302 	ret = hda_dsp_core_power_down(sdev, core_mask);
303 	if (ret < 0) {
304 		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
305 			core_mask, ret);
306 		return ret;
307 	}
308 
309 	/* make sure we are in OFF state */
310 	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
311 		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
312 			core_mask, ret);
313 		ret = -EIO;
314 	}
315 
316 	return ret;
317 }
318 
319 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
320 {
321 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
322 	const struct sof_intel_dsp_desc *chip = hda->desc;
323 
324 	/* enable IPC DONE and BUSY interrupts */
325 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
326 			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
327 			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
328 
329 	/* enable IPC interrupt */
330 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
331 				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
332 }
333 
334 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
335 {
336 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
337 	const struct sof_intel_dsp_desc *chip = hda->desc;
338 
339 	/* disable IPC interrupt */
340 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
341 				HDA_DSP_ADSPIC_IPC, 0);
342 
343 	/* disable IPC BUSY and DONE interrupt */
344 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
345 			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
346 }
347 
348 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
349 {
350 	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
351 	struct snd_sof_pdata *pdata = sdev->pdata;
352 	const struct sof_intel_dsp_desc *chip;
353 
354 	chip = get_chip_info(pdata);
355 	while (snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset) &
356 		SOF_HDA_VS_D0I3C_CIP) {
357 		if (!retry--)
358 			return -ETIMEDOUT;
359 		usleep_range(10, 15);
360 	}
361 
362 	return 0;
363 }
364 
365 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
366 {
367 	const struct sof_ipc_pm_ops *pm_ops = sof_ipc_get_ops(sdev, pm);
368 
369 	if (pm_ops && pm_ops->set_pm_gate)
370 		return pm_ops->set_pm_gate(sdev, flags);
371 
372 	return 0;
373 }
374 
375 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
376 {
377 	struct snd_sof_pdata *pdata = sdev->pdata;
378 	const struct sof_intel_dsp_desc *chip;
379 	int ret;
380 	u8 reg;
381 
382 	chip = get_chip_info(pdata);
383 
384 	/* Write to D0I3C after Command-In-Progress bit is cleared */
385 	ret = hda_dsp_wait_d0i3c_done(sdev);
386 	if (ret < 0) {
387 		dev_err(sdev->dev, "CIP timeout before D0I3C update!\n");
388 		return ret;
389 	}
390 
391 	/* Update D0I3C register */
392 	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
393 			    SOF_HDA_VS_D0I3C_I3, value);
394 
395 	/*
396 	 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
397 	 * A delay is recommended before checking if D0I3C::CIP is cleared
398 	 */
399 	usleep_range(30, 40);
400 
401 	/* Wait for cmd in progress to be cleared before exiting the function */
402 	ret = hda_dsp_wait_d0i3c_done(sdev);
403 	if (ret < 0) {
404 		dev_err(sdev->dev, "CIP timeout after D0I3C update!\n");
405 		return ret;
406 	}
407 
408 	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
409 	/* Confirm d0i3 state changed with paranoia check */
410 	if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
411 		dev_err(sdev->dev, "failed to update D0I3C!\n");
412 		return -EIO;
413 	}
414 
415 	trace_sof_intel_D0I3C_updated(sdev, reg);
416 
417 	return 0;
418 }
419 
420 /*
421  * d0i3 streaming is enabled if all the active streams can
422  * work in d0i3 state and playback is enabled
423  */
424 static bool hda_dsp_d0i3_streaming_applicable(struct snd_sof_dev *sdev)
425 {
426 	struct snd_pcm_substream *substream;
427 	struct snd_sof_pcm *spcm;
428 	bool playback_active = false;
429 	int dir;
430 
431 	list_for_each_entry(spcm, &sdev->pcm_list, list) {
432 		for_each_pcm_streams(dir) {
433 			substream = spcm->stream[dir].substream;
434 			if (!substream || !substream->runtime)
435 				continue;
436 
437 			if (!spcm->stream[dir].d0i3_compatible)
438 				return false;
439 
440 			if (dir == SNDRV_PCM_STREAM_PLAYBACK)
441 				playback_active = true;
442 		}
443 	}
444 
445 	return playback_active;
446 }
447 
448 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
449 				const struct sof_dsp_power_state *target_state)
450 {
451 	u32 flags = 0;
452 	int ret;
453 	u8 value = 0;
454 
455 	/*
456 	 * Sanity check for illegal state transitions
457 	 * The only allowed transitions are:
458 	 * 1. D3 -> D0I0
459 	 * 2. D0I0 -> D0I3
460 	 * 3. D0I3 -> D0I0
461 	 */
462 	switch (sdev->dsp_power_state.state) {
463 	case SOF_DSP_PM_D0:
464 		/* Follow the sequence below for D0 substate transitions */
465 		break;
466 	case SOF_DSP_PM_D3:
467 		/* Follow regular flow for D3 -> D0 transition */
468 		return 0;
469 	default:
470 		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
471 			sdev->dsp_power_state.state, target_state->state);
472 		return -EINVAL;
473 	}
474 
475 	/* Set flags and register value for D0 target substate */
476 	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
477 		value = SOF_HDA_VS_D0I3C_I3;
478 
479 		/*
480 		 * Trace DMA need to be disabled when the DSP enters
481 		 * D0I3 for S0Ix suspend, but it can be kept enabled
482 		 * when the DSP enters D0I3 while the system is in S0
483 		 * for debug purpose.
484 		 */
485 		if (!sdev->fw_trace_is_supported ||
486 		    !hda_enable_trace_D0I3_S0 ||
487 		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
488 			flags = HDA_PM_NO_DMA_TRACE;
489 
490 		if (hda_dsp_d0i3_streaming_applicable(sdev))
491 			flags |= HDA_PM_PG_STREAMING;
492 	} else {
493 		/* prevent power gating in D0I0 */
494 		flags = HDA_PM_PPG;
495 	}
496 
497 	/* update D0I3C register */
498 	ret = hda_dsp_update_d0i3c_register(sdev, value);
499 	if (ret < 0)
500 		return ret;
501 
502 	/*
503 	 * Notify the DSP of the state change.
504 	 * If this IPC fails, revert the D0I3C register update in order
505 	 * to prevent partial state change.
506 	 */
507 	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
508 	if (ret < 0) {
509 		dev_err(sdev->dev,
510 			"error: PM_GATE ipc error %d\n", ret);
511 		goto revert;
512 	}
513 
514 	return ret;
515 
516 revert:
517 	/* fallback to the previous register value */
518 	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
519 
520 	/*
521 	 * This can fail but return the IPC error to signal that
522 	 * the state change failed.
523 	 */
524 	hda_dsp_update_d0i3c_register(sdev, value);
525 
526 	return ret;
527 }
528 
529 /* helper to log DSP state */
530 static void hda_dsp_state_log(struct snd_sof_dev *sdev)
531 {
532 	switch (sdev->dsp_power_state.state) {
533 	case SOF_DSP_PM_D0:
534 		switch (sdev->dsp_power_state.substate) {
535 		case SOF_HDA_DSP_PM_D0I0:
536 			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
537 			break;
538 		case SOF_HDA_DSP_PM_D0I3:
539 			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
540 			break;
541 		default:
542 			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
543 				sdev->dsp_power_state.substate);
544 			break;
545 		}
546 		break;
547 	case SOF_DSP_PM_D1:
548 		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
549 		break;
550 	case SOF_DSP_PM_D2:
551 		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
552 		break;
553 	case SOF_DSP_PM_D3:
554 		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
555 		break;
556 	default:
557 		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
558 			sdev->dsp_power_state.state);
559 		break;
560 	}
561 }
562 
563 /*
564  * All DSP power state transitions are initiated by the driver.
565  * If the requested state change fails, the error is simply returned.
566  * Further state transitions are attempted only when the set_power_save() op
567  * is called again either because of a new IPC sent to the DSP or
568  * during system suspend/resume.
569  */
570 int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
571 			    const struct sof_dsp_power_state *target_state)
572 {
573 	int ret = 0;
574 
575 	/*
576 	 * When the DSP is already in D0I3 and the target state is D0I3,
577 	 * it could be the case that the DSP is in D0I3 during S0
578 	 * and the system is suspending to S0Ix. Therefore,
579 	 * hda_dsp_set_D0_state() must be called to disable trace DMA
580 	 * by sending the PM_GATE IPC to the FW.
581 	 */
582 	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
583 	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
584 		goto set_state;
585 
586 	/*
587 	 * For all other cases, return without doing anything if
588 	 * the DSP is already in the target state.
589 	 */
590 	if (target_state->state == sdev->dsp_power_state.state &&
591 	    target_state->substate == sdev->dsp_power_state.substate)
592 		return 0;
593 
594 set_state:
595 	switch (target_state->state) {
596 	case SOF_DSP_PM_D0:
597 		ret = hda_dsp_set_D0_state(sdev, target_state);
598 		break;
599 	case SOF_DSP_PM_D3:
600 		/* The only allowed transition is: D0I0 -> D3 */
601 		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
602 		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
603 			break;
604 
605 		dev_err(sdev->dev,
606 			"error: transition from %d to %d not allowed\n",
607 			sdev->dsp_power_state.state, target_state->state);
608 		return -EINVAL;
609 	default:
610 		dev_err(sdev->dev, "error: target state unsupported %d\n",
611 			target_state->state);
612 		return -EINVAL;
613 	}
614 	if (ret < 0) {
615 		dev_err(sdev->dev,
616 			"failed to set requested target DSP state %d substate %d\n",
617 			target_state->state, target_state->substate);
618 		return ret;
619 	}
620 
621 	sdev->dsp_power_state = *target_state;
622 	hda_dsp_state_log(sdev);
623 	return ret;
624 }
625 
626 /*
627  * Audio DSP states may transform as below:-
628  *
629  *                                         Opportunistic D0I3 in S0
630  *     Runtime    +---------------------+  Delayed D0i3 work timeout
631  *     suspend    |                     +--------------------+
632  *   +------------+       D0I0(active)  |                    |
633  *   |            |                     <---------------+    |
634  *   |   +-------->                     |    New IPC	|    |
635  *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
636  *   |   |resume     |  |         |  |			|    |
637  *   |   |           |  |         |  |			|    |
638  *   |   |     System|  |         |  |			|    |
639  *   |   |     resume|  | S3/S0IX |  |                  |    |
640  *   |   |	     |  | suspend |  | S0IX             |    |
641  *   |   |           |  |         |  |suspend           |    |
642  *   |   |           |  |         |  |                  |    |
643  *   |   |           |  |         |  |                  |    |
644  * +-v---+-----------+--v-------+ |  |           +------+----v----+
645  * |                            | |  +----------->                |
646  * |       D3 (suspended)       | |              |      D0I3      |
647  * |                            | +--------------+                |
648  * |                            |  System resume |                |
649  * +----------------------------+		 +----------------+
650  *
651  * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
652  *		 ignored the suspend trigger. Otherwise the DSP
653  *		 is in D3.
654  */
655 
656 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
657 {
658 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
659 	const struct sof_intel_dsp_desc *chip = hda->desc;
660 	struct hdac_bus *bus = sof_to_bus(sdev);
661 	int ret, j;
662 
663 	/*
664 	 * The memory used for IMR boot loses its content in deeper than S3 state
665 	 * We must not try IMR boot on next power up (as it will fail).
666 	 *
667 	 * In case of firmware crash or boot failure set the skip_imr_boot to true
668 	 * as well in order to try to re-load the firmware to do a 'cold' boot.
669 	 */
670 	if (sdev->system_suspend_target > SOF_SUSPEND_S3 ||
671 	    sdev->fw_state == SOF_FW_CRASHED ||
672 	    sdev->fw_state == SOF_FW_BOOT_FAILED)
673 		hda->skip_imr_boot = true;
674 
675 	ret = chip->disable_interrupts(sdev);
676 	if (ret < 0)
677 		return ret;
678 
679 	hda_codec_jack_wake_enable(sdev, runtime_suspend);
680 
681 	/* power down all hda links */
682 	hda_bus_ml_suspend(bus);
683 
684 	ret = chip->power_down_dsp(sdev);
685 	if (ret < 0) {
686 		dev_err(sdev->dev, "failed to power down DSP during suspend\n");
687 		return ret;
688 	}
689 
690 	/* reset ref counts for all cores */
691 	for (j = 0; j < chip->cores_num; j++)
692 		sdev->dsp_core_ref_count[j] = 0;
693 
694 	/* disable ppcap interrupt */
695 	hda_dsp_ctrl_ppcap_enable(sdev, false);
696 	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
697 
698 	/* disable hda bus irq and streams */
699 	hda_dsp_ctrl_stop_chip(sdev);
700 
701 	/* disable LP retention mode */
702 	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
703 				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
704 
705 	/* reset controller */
706 	ret = hda_dsp_ctrl_link_reset(sdev, true);
707 	if (ret < 0) {
708 		dev_err(sdev->dev,
709 			"error: failed to reset controller during suspend\n");
710 		return ret;
711 	}
712 
713 	/* display codec can powered off after link reset */
714 	hda_codec_i915_display_power(sdev, false);
715 
716 	return 0;
717 }
718 
719 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
720 {
721 	int ret;
722 
723 	/* display codec must be powered before link reset */
724 	hda_codec_i915_display_power(sdev, true);
725 
726 	/*
727 	 * clear TCSEL to clear playback on some HD Audio
728 	 * codecs. PCI TCSEL is defined in the Intel manuals.
729 	 */
730 	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
731 
732 	/* reset and start hda controller */
733 	ret = hda_dsp_ctrl_init_chip(sdev);
734 	if (ret < 0) {
735 		dev_err(sdev->dev,
736 			"error: failed to start controller after resume\n");
737 		goto cleanup;
738 	}
739 
740 	/* check jack status */
741 	if (runtime_resume) {
742 		hda_codec_jack_wake_enable(sdev, false);
743 		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
744 			hda_codec_jack_check(sdev);
745 	}
746 
747 	/* enable ppcap interrupt */
748 	hda_dsp_ctrl_ppcap_enable(sdev, true);
749 	hda_dsp_ctrl_ppcap_int_enable(sdev, true);
750 
751 cleanup:
752 	/* display codec can powered off after controller init */
753 	hda_codec_i915_display_power(sdev, false);
754 
755 	return 0;
756 }
757 
758 int hda_dsp_resume(struct snd_sof_dev *sdev)
759 {
760 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
761 	struct hdac_bus *bus = sof_to_bus(sdev);
762 	struct pci_dev *pci = to_pci_dev(sdev->dev);
763 	const struct sof_dsp_power_state target_state = {
764 		.state = SOF_DSP_PM_D0,
765 		.substate = SOF_HDA_DSP_PM_D0I0,
766 	};
767 	int ret;
768 
769 	/* resume from D0I3 */
770 	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
771 		ret = hda_bus_ml_resume(bus);
772 		if (ret < 0) {
773 			dev_err(sdev->dev,
774 				"error %d in %s: failed to power up links",
775 				ret, __func__);
776 			return ret;
777 		}
778 
779 		/* set up CORB/RIRB buffers if was on before suspend */
780 		hda_codec_resume_cmd_io(sdev);
781 
782 		/* Set DSP power state */
783 		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
784 		if (ret < 0) {
785 			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
786 				target_state.state, target_state.substate);
787 			return ret;
788 		}
789 
790 		/* restore L1SEN bit */
791 		if (hda->l1_support_changed)
792 			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
793 						HDA_VS_INTEL_EM2,
794 						HDA_VS_INTEL_EM2_L1SEN, 0);
795 
796 		/* restore and disable the system wakeup */
797 		pci_restore_state(pci);
798 		disable_irq_wake(pci->irq);
799 		return 0;
800 	}
801 
802 	/* init hda controller. DSP cores will be powered up during fw boot */
803 	ret = hda_resume(sdev, false);
804 	if (ret < 0)
805 		return ret;
806 
807 	return snd_sof_dsp_set_power_state(sdev, &target_state);
808 }
809 
810 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
811 {
812 	const struct sof_dsp_power_state target_state = {
813 		.state = SOF_DSP_PM_D0,
814 	};
815 	int ret;
816 
817 	/* init hda controller. DSP cores will be powered up during fw boot */
818 	ret = hda_resume(sdev, true);
819 	if (ret < 0)
820 		return ret;
821 
822 	return snd_sof_dsp_set_power_state(sdev, &target_state);
823 }
824 
825 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
826 {
827 	struct hdac_bus *hbus = sof_to_bus(sdev);
828 
829 	if (hbus->codec_powered) {
830 		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
831 			(unsigned int)hbus->codec_powered);
832 		return -EBUSY;
833 	}
834 
835 	return 0;
836 }
837 
838 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
839 {
840 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
841 	const struct sof_dsp_power_state target_state = {
842 		.state = SOF_DSP_PM_D3,
843 	};
844 	int ret;
845 
846 	/* cancel any attempt for DSP D0I3 */
847 	cancel_delayed_work_sync(&hda->d0i3_work);
848 
849 	/* stop hda controller and power dsp off */
850 	ret = hda_suspend(sdev, true);
851 	if (ret < 0)
852 		return ret;
853 
854 	return snd_sof_dsp_set_power_state(sdev, &target_state);
855 }
856 
857 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
858 {
859 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
860 	struct hdac_bus *bus = sof_to_bus(sdev);
861 	struct pci_dev *pci = to_pci_dev(sdev->dev);
862 	const struct sof_dsp_power_state target_dsp_state = {
863 		.state = target_state,
864 		.substate = target_state == SOF_DSP_PM_D0 ?
865 				SOF_HDA_DSP_PM_D0I3 : 0,
866 	};
867 	int ret;
868 
869 	/* cancel any attempt for DSP D0I3 */
870 	cancel_delayed_work_sync(&hda->d0i3_work);
871 
872 	if (target_state == SOF_DSP_PM_D0) {
873 		/* Set DSP power state */
874 		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
875 		if (ret < 0) {
876 			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
877 				target_dsp_state.state,
878 				target_dsp_state.substate);
879 			return ret;
880 		}
881 
882 		/* enable L1SEN to make sure the system can enter S0Ix */
883 		hda->l1_support_changed =
884 			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
885 						HDA_VS_INTEL_EM2,
886 						HDA_VS_INTEL_EM2_L1SEN,
887 						HDA_VS_INTEL_EM2_L1SEN);
888 
889 		/* stop the CORB/RIRB DMA if it is On */
890 		hda_codec_suspend_cmd_io(sdev);
891 
892 		/* no link can be powered in s0ix state */
893 		ret = hda_bus_ml_suspend(bus);
894 		if (ret < 0) {
895 			dev_err(sdev->dev,
896 				"error %d in %s: failed to power down links",
897 				ret, __func__);
898 			return ret;
899 		}
900 
901 		/* enable the system waking up via IPC IRQ */
902 		enable_irq_wake(pci->irq);
903 		pci_save_state(pci);
904 		return 0;
905 	}
906 
907 	/* stop hda controller and power dsp off */
908 	ret = hda_suspend(sdev, false);
909 	if (ret < 0) {
910 		dev_err(bus->dev, "error: suspending dsp\n");
911 		return ret;
912 	}
913 
914 	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
915 }
916 
917 static unsigned int hda_dsp_check_for_dma_streams(struct snd_sof_dev *sdev)
918 {
919 	struct hdac_bus *bus = sof_to_bus(sdev);
920 	struct hdac_stream *s;
921 	unsigned int active_streams = 0;
922 	int sd_offset;
923 	u32 val;
924 
925 	list_for_each_entry(s, &bus->stream_list, list) {
926 		sd_offset = SOF_STREAM_SD_OFFSET(s);
927 		val = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR,
928 				       sd_offset);
929 		if (val & SOF_HDA_SD_CTL_DMA_START)
930 			active_streams |= BIT(s->index);
931 	}
932 
933 	return active_streams;
934 }
935 
936 static int hda_dsp_s5_quirk(struct snd_sof_dev *sdev)
937 {
938 	int ret;
939 
940 	/*
941 	 * Do not assume a certain timing between the prior
942 	 * suspend flow, and running of this quirk function.
943 	 * This is needed if the controller was just put
944 	 * to reset before calling this function.
945 	 */
946 	usleep_range(500, 1000);
947 
948 	/*
949 	 * Take controller out of reset to flush DMA
950 	 * transactions.
951 	 */
952 	ret = hda_dsp_ctrl_link_reset(sdev, false);
953 	if (ret < 0)
954 		return ret;
955 
956 	usleep_range(500, 1000);
957 
958 	/* Restore state for shutdown, back to reset */
959 	ret = hda_dsp_ctrl_link_reset(sdev, true);
960 	if (ret < 0)
961 		return ret;
962 
963 	return ret;
964 }
965 
966 int hda_dsp_shutdown_dma_flush(struct snd_sof_dev *sdev)
967 {
968 	unsigned int active_streams;
969 	int ret, ret2;
970 
971 	/* check if DMA cleanup has been successful */
972 	active_streams = hda_dsp_check_for_dma_streams(sdev);
973 
974 	sdev->system_suspend_target = SOF_SUSPEND_S3;
975 	ret = snd_sof_suspend(sdev->dev);
976 
977 	if (active_streams) {
978 		dev_warn(sdev->dev,
979 			 "There were active DSP streams (%#x) at shutdown, trying to recover\n",
980 			 active_streams);
981 		ret2 = hda_dsp_s5_quirk(sdev);
982 		if (ret2 < 0)
983 			dev_err(sdev->dev, "shutdown recovery failed (%d)\n", ret2);
984 	}
985 
986 	return ret;
987 }
988 
989 int hda_dsp_shutdown(struct snd_sof_dev *sdev)
990 {
991 	sdev->system_suspend_target = SOF_SUSPEND_S3;
992 	return snd_sof_suspend(sdev->dev);
993 }
994 
995 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
996 {
997 	int ret;
998 
999 	/* make sure all DAI resources are freed */
1000 	ret = hda_dsp_dais_suspend(sdev);
1001 	if (ret < 0)
1002 		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
1003 
1004 	return ret;
1005 }
1006 
1007 void hda_dsp_d0i3_work(struct work_struct *work)
1008 {
1009 	struct sof_intel_hda_dev *hdev = container_of(work,
1010 						      struct sof_intel_hda_dev,
1011 						      d0i3_work.work);
1012 	struct hdac_bus *bus = &hdev->hbus.core;
1013 	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
1014 	struct sof_dsp_power_state target_state = {
1015 		.state = SOF_DSP_PM_D0,
1016 		.substate = SOF_HDA_DSP_PM_D0I3,
1017 	};
1018 	int ret;
1019 
1020 	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
1021 	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
1022 		/* remain in D0I0 */
1023 		return;
1024 
1025 	/* This can fail but error cannot be propagated */
1026 	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
1027 	if (ret < 0)
1028 		dev_err_ratelimited(sdev->dev,
1029 				    "error: failed to set DSP state %d substate %d\n",
1030 				    target_state.state, target_state.substate);
1031 }
1032 
1033 int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
1034 {
1035 	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
1036 	int ret, ret1;
1037 
1038 	/* power up core */
1039 	ret = hda_dsp_enable_core(sdev, BIT(core));
1040 	if (ret < 0) {
1041 		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
1042 			core, ret);
1043 		return ret;
1044 	}
1045 
1046 	/* No need to send IPC for primary core or if FW boot is not complete */
1047 	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
1048 		return 0;
1049 
1050 	/* No need to continue the set_core_state ops is not available */
1051 	if (!pm_ops->set_core_state)
1052 		return 0;
1053 
1054 	/* Now notify DSP for secondary cores */
1055 	ret = pm_ops->set_core_state(sdev, core, true);
1056 	if (ret < 0) {
1057 		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
1058 			core, ret);
1059 		goto power_down;
1060 	}
1061 
1062 	return ret;
1063 
1064 power_down:
1065 	/* power down core if it is host managed and return the original error if this fails too */
1066 	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
1067 	if (ret1 < 0)
1068 		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
1069 
1070 	return ret;
1071 }
1072 
1073 int hda_dsp_disable_interrupts(struct snd_sof_dev *sdev)
1074 {
1075 	hda_sdw_int_enable(sdev, false);
1076 	hda_dsp_ipc_int_disable(sdev);
1077 
1078 	return 0;
1079 }
1080