xref: /openbmc/linux/sound/soc/sof/intel/hda-dsp.c (revision 7cc39531)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //	    Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 //	    Rander Wang <rander.wang@intel.com>
11 //          Keyon Jie <yang.jie@linux.intel.com>
12 //
13 
14 /*
15  * Hardware interface for generic Intel audio DSP HDA IP
16  */
17 
18 #include <linux/module.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include "../sof-audio.h"
22 #include "../ops.h"
23 #include "hda.h"
24 #include "hda-ipc.h"
25 
26 static bool hda_enable_trace_D0I3_S0;
27 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
28 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
29 MODULE_PARM_DESC(enable_trace_D0I3_S0,
30 		 "SOF HDA enable trace when the DSP is in D0I3 in S0");
31 #endif
32 
33 /*
34  * DSP Core control.
35  */
36 
37 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
38 {
39 	u32 adspcs;
40 	u32 reset;
41 	int ret;
42 
43 	/* set reset bits for cores */
44 	reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
45 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
46 					 HDA_DSP_REG_ADSPCS,
47 					 reset, reset);
48 
49 	/* poll with timeout to check if operation successful */
50 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
51 					HDA_DSP_REG_ADSPCS, adspcs,
52 					((adspcs & reset) == reset),
53 					HDA_DSP_REG_POLL_INTERVAL_US,
54 					HDA_DSP_RESET_TIMEOUT_US);
55 	if (ret < 0) {
56 		dev_err(sdev->dev,
57 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
58 			__func__);
59 		return ret;
60 	}
61 
62 	/* has core entered reset ? */
63 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
64 				  HDA_DSP_REG_ADSPCS);
65 	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
66 		HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
67 		dev_err(sdev->dev,
68 			"error: reset enter failed: core_mask %x adspcs 0x%x\n",
69 			core_mask, adspcs);
70 		ret = -EIO;
71 	}
72 
73 	return ret;
74 }
75 
76 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
77 {
78 	unsigned int crst;
79 	u32 adspcs;
80 	int ret;
81 
82 	/* clear reset bits for cores */
83 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
84 					 HDA_DSP_REG_ADSPCS,
85 					 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
86 					 0);
87 
88 	/* poll with timeout to check if operation successful */
89 	crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
90 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
91 					    HDA_DSP_REG_ADSPCS, adspcs,
92 					    !(adspcs & crst),
93 					    HDA_DSP_REG_POLL_INTERVAL_US,
94 					    HDA_DSP_RESET_TIMEOUT_US);
95 
96 	if (ret < 0) {
97 		dev_err(sdev->dev,
98 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
99 			__func__);
100 		return ret;
101 	}
102 
103 	/* has core left reset ? */
104 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
105 				  HDA_DSP_REG_ADSPCS);
106 	if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
107 		dev_err(sdev->dev,
108 			"error: reset leave failed: core_mask %x adspcs 0x%x\n",
109 			core_mask, adspcs);
110 		ret = -EIO;
111 	}
112 
113 	return ret;
114 }
115 
116 static int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
117 {
118 	/* stall core */
119 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
120 					 HDA_DSP_REG_ADSPCS,
121 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
122 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
123 
124 	/* set reset state */
125 	return hda_dsp_core_reset_enter(sdev, core_mask);
126 }
127 
128 static bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
129 {
130 	int val;
131 	bool is_enable;
132 
133 	val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
134 
135 #define MASK_IS_EQUAL(v, m, field) ({	\
136 	u32 _m = field(m);		\
137 	((v) & _m) == _m;		\
138 })
139 
140 	is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
141 		MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
142 		!(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
143 		!(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
144 
145 #undef MASK_IS_EQUAL
146 
147 	dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
148 		is_enable, core_mask);
149 
150 	return is_enable;
151 }
152 
153 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
154 {
155 	int ret;
156 
157 	/* leave reset state */
158 	ret = hda_dsp_core_reset_leave(sdev, core_mask);
159 	if (ret < 0)
160 		return ret;
161 
162 	/* run core */
163 	dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
164 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
165 					 HDA_DSP_REG_ADSPCS,
166 					 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
167 					 0);
168 
169 	/* is core now running ? */
170 	if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
171 		hda_dsp_core_stall_reset(sdev, core_mask);
172 		dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
173 			core_mask);
174 		ret = -EIO;
175 	}
176 
177 	return ret;
178 }
179 
180 /*
181  * Power Management.
182  */
183 
184 int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
185 {
186 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
187 	const struct sof_intel_dsp_desc *chip = hda->desc;
188 	unsigned int cpa;
189 	u32 adspcs;
190 	int ret;
191 
192 	/* restrict core_mask to host managed cores mask */
193 	core_mask &= chip->host_managed_cores_mask;
194 	/* return if core_mask is not valid */
195 	if (!core_mask)
196 		return 0;
197 
198 	/* update bits */
199 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
200 				HDA_DSP_ADSPCS_SPA_MASK(core_mask),
201 				HDA_DSP_ADSPCS_SPA_MASK(core_mask));
202 
203 	/* poll with timeout to check if operation successful */
204 	cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
205 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
206 					    HDA_DSP_REG_ADSPCS, adspcs,
207 					    (adspcs & cpa) == cpa,
208 					    HDA_DSP_REG_POLL_INTERVAL_US,
209 					    HDA_DSP_RESET_TIMEOUT_US);
210 	if (ret < 0) {
211 		dev_err(sdev->dev,
212 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
213 			__func__);
214 		return ret;
215 	}
216 
217 	/* did core power up ? */
218 	adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
219 				  HDA_DSP_REG_ADSPCS);
220 	if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
221 		HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
222 		dev_err(sdev->dev,
223 			"error: power up core failed core_mask %xadspcs 0x%x\n",
224 			core_mask, adspcs);
225 		ret = -EIO;
226 	}
227 
228 	return ret;
229 }
230 
231 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
232 {
233 	u32 adspcs;
234 	int ret;
235 
236 	/* update bits */
237 	snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
238 					 HDA_DSP_REG_ADSPCS,
239 					 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
240 
241 	ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
242 				HDA_DSP_REG_ADSPCS, adspcs,
243 				!(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
244 				HDA_DSP_REG_POLL_INTERVAL_US,
245 				HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
246 	if (ret < 0)
247 		dev_err(sdev->dev,
248 			"error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
249 			__func__);
250 
251 	return ret;
252 }
253 
254 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
255 {
256 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
257 	const struct sof_intel_dsp_desc *chip = hda->desc;
258 	int ret;
259 
260 	/* restrict core_mask to host managed cores mask */
261 	core_mask &= chip->host_managed_cores_mask;
262 
263 	/* return if core_mask is not valid or cores are already enabled */
264 	if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
265 		return 0;
266 
267 	/* power up */
268 	ret = hda_dsp_core_power_up(sdev, core_mask);
269 	if (ret < 0) {
270 		dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
271 			core_mask);
272 		return ret;
273 	}
274 
275 	return hda_dsp_core_run(sdev, core_mask);
276 }
277 
278 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
279 				  unsigned int core_mask)
280 {
281 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
282 	const struct sof_intel_dsp_desc *chip = hda->desc;
283 	int ret;
284 
285 	/* restrict core_mask to host managed cores mask */
286 	core_mask &= chip->host_managed_cores_mask;
287 
288 	/* return if core_mask is not valid */
289 	if (!core_mask)
290 		return 0;
291 
292 	/* place core in reset prior to power down */
293 	ret = hda_dsp_core_stall_reset(sdev, core_mask);
294 	if (ret < 0) {
295 		dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
296 			core_mask);
297 		return ret;
298 	}
299 
300 	/* power down core */
301 	ret = hda_dsp_core_power_down(sdev, core_mask);
302 	if (ret < 0) {
303 		dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
304 			core_mask, ret);
305 		return ret;
306 	}
307 
308 	/* make sure we are in OFF state */
309 	if (hda_dsp_core_is_enabled(sdev, core_mask)) {
310 		dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
311 			core_mask, ret);
312 		ret = -EIO;
313 	}
314 
315 	return ret;
316 }
317 
318 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
319 {
320 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
321 	const struct sof_intel_dsp_desc *chip = hda->desc;
322 
323 	/* enable IPC DONE and BUSY interrupts */
324 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
325 			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
326 			HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
327 
328 	/* enable IPC interrupt */
329 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
330 				HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
331 }
332 
333 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
334 {
335 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
336 	const struct sof_intel_dsp_desc *chip = hda->desc;
337 
338 	/* disable IPC interrupt */
339 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
340 				HDA_DSP_ADSPIC_IPC, 0);
341 
342 	/* disable IPC BUSY and DONE interrupt */
343 	snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
344 			HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
345 }
346 
347 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
348 {
349 	struct hdac_bus *bus = sof_to_bus(sdev);
350 	int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
351 
352 	while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) {
353 		if (!retry--)
354 			return -ETIMEDOUT;
355 		usleep_range(10, 15);
356 	}
357 
358 	return 0;
359 }
360 
361 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
362 {
363 	struct sof_ipc_pm_gate pm_gate;
364 	struct sof_ipc_reply reply;
365 
366 	memset(&pm_gate, 0, sizeof(pm_gate));
367 
368 	/* configure pm_gate ipc message */
369 	pm_gate.hdr.size = sizeof(pm_gate);
370 	pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
371 	pm_gate.flags = flags;
372 
373 	/* send pm_gate ipc to dsp */
374 	return sof_ipc_tx_message_no_pm(sdev->ipc, &pm_gate, sizeof(pm_gate),
375 					&reply, sizeof(reply));
376 }
377 
378 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
379 {
380 	struct hdac_bus *bus = sof_to_bus(sdev);
381 	int ret;
382 
383 	/* Write to D0I3C after Command-In-Progress bit is cleared */
384 	ret = hda_dsp_wait_d0i3c_done(sdev);
385 	if (ret < 0) {
386 		dev_err(bus->dev, "CIP timeout before D0I3C update!\n");
387 		return ret;
388 	}
389 
390 	/* Update D0I3C register */
391 	snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
392 
393 	/* Wait for cmd in progress to be cleared before exiting the function */
394 	ret = hda_dsp_wait_d0i3c_done(sdev);
395 	if (ret < 0) {
396 		dev_err(bus->dev, "CIP timeout after D0I3C update!\n");
397 		return ret;
398 	}
399 
400 	dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n",
401 		 snd_hdac_chip_readb(bus, VS_D0I3C));
402 
403 	return 0;
404 }
405 
406 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
407 				const struct sof_dsp_power_state *target_state)
408 {
409 	u32 flags = 0;
410 	int ret;
411 	u8 value = 0;
412 
413 	/*
414 	 * Sanity check for illegal state transitions
415 	 * The only allowed transitions are:
416 	 * 1. D3 -> D0I0
417 	 * 2. D0I0 -> D0I3
418 	 * 3. D0I3 -> D0I0
419 	 */
420 	switch (sdev->dsp_power_state.state) {
421 	case SOF_DSP_PM_D0:
422 		/* Follow the sequence below for D0 substate transitions */
423 		break;
424 	case SOF_DSP_PM_D3:
425 		/* Follow regular flow for D3 -> D0 transition */
426 		return 0;
427 	default:
428 		dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
429 			sdev->dsp_power_state.state, target_state->state);
430 		return -EINVAL;
431 	}
432 
433 	/* Set flags and register value for D0 target substate */
434 	if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
435 		value = SOF_HDA_VS_D0I3C_I3;
436 
437 		/*
438 		 * Trace DMA need to be disabled when the DSP enters
439 		 * D0I3 for S0Ix suspend, but it can be kept enabled
440 		 * when the DSP enters D0I3 while the system is in S0
441 		 * for debug purpose.
442 		 */
443 		if (!sdev->fw_trace_is_supported ||
444 		    !hda_enable_trace_D0I3_S0 ||
445 		    sdev->system_suspend_target != SOF_SUSPEND_NONE)
446 			flags = HDA_PM_NO_DMA_TRACE;
447 	} else {
448 		/* prevent power gating in D0I0 */
449 		flags = HDA_PM_PPG;
450 	}
451 
452 	/* update D0I3C register */
453 	ret = hda_dsp_update_d0i3c_register(sdev, value);
454 	if (ret < 0)
455 		return ret;
456 
457 	/*
458 	 * Notify the DSP of the state change.
459 	 * If this IPC fails, revert the D0I3C register update in order
460 	 * to prevent partial state change.
461 	 */
462 	ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
463 	if (ret < 0) {
464 		dev_err(sdev->dev,
465 			"error: PM_GATE ipc error %d\n", ret);
466 		goto revert;
467 	}
468 
469 	return ret;
470 
471 revert:
472 	/* fallback to the previous register value */
473 	value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
474 
475 	/*
476 	 * This can fail but return the IPC error to signal that
477 	 * the state change failed.
478 	 */
479 	hda_dsp_update_d0i3c_register(sdev, value);
480 
481 	return ret;
482 }
483 
484 /* helper to log DSP state */
485 static void hda_dsp_state_log(struct snd_sof_dev *sdev)
486 {
487 	switch (sdev->dsp_power_state.state) {
488 	case SOF_DSP_PM_D0:
489 		switch (sdev->dsp_power_state.substate) {
490 		case SOF_HDA_DSP_PM_D0I0:
491 			dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
492 			break;
493 		case SOF_HDA_DSP_PM_D0I3:
494 			dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
495 			break;
496 		default:
497 			dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
498 				sdev->dsp_power_state.substate);
499 			break;
500 		}
501 		break;
502 	case SOF_DSP_PM_D1:
503 		dev_dbg(sdev->dev, "Current DSP power state: D1\n");
504 		break;
505 	case SOF_DSP_PM_D2:
506 		dev_dbg(sdev->dev, "Current DSP power state: D2\n");
507 		break;
508 	case SOF_DSP_PM_D3:
509 		dev_dbg(sdev->dev, "Current DSP power state: D3\n");
510 		break;
511 	default:
512 		dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
513 			sdev->dsp_power_state.state);
514 		break;
515 	}
516 }
517 
518 /*
519  * All DSP power state transitions are initiated by the driver.
520  * If the requested state change fails, the error is simply returned.
521  * Further state transitions are attempted only when the set_power_save() op
522  * is called again either because of a new IPC sent to the DSP or
523  * during system suspend/resume.
524  */
525 int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
526 			    const struct sof_dsp_power_state *target_state)
527 {
528 	int ret = 0;
529 
530 	/*
531 	 * When the DSP is already in D0I3 and the target state is D0I3,
532 	 * it could be the case that the DSP is in D0I3 during S0
533 	 * and the system is suspending to S0Ix. Therefore,
534 	 * hda_dsp_set_D0_state() must be called to disable trace DMA
535 	 * by sending the PM_GATE IPC to the FW.
536 	 */
537 	if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
538 	    sdev->system_suspend_target == SOF_SUSPEND_S0IX)
539 		goto set_state;
540 
541 	/*
542 	 * For all other cases, return without doing anything if
543 	 * the DSP is already in the target state.
544 	 */
545 	if (target_state->state == sdev->dsp_power_state.state &&
546 	    target_state->substate == sdev->dsp_power_state.substate)
547 		return 0;
548 
549 set_state:
550 	switch (target_state->state) {
551 	case SOF_DSP_PM_D0:
552 		ret = hda_dsp_set_D0_state(sdev, target_state);
553 		break;
554 	case SOF_DSP_PM_D3:
555 		/* The only allowed transition is: D0I0 -> D3 */
556 		if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
557 		    sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
558 			break;
559 
560 		dev_err(sdev->dev,
561 			"error: transition from %d to %d not allowed\n",
562 			sdev->dsp_power_state.state, target_state->state);
563 		return -EINVAL;
564 	default:
565 		dev_err(sdev->dev, "error: target state unsupported %d\n",
566 			target_state->state);
567 		return -EINVAL;
568 	}
569 	if (ret < 0) {
570 		dev_err(sdev->dev,
571 			"failed to set requested target DSP state %d substate %d\n",
572 			target_state->state, target_state->substate);
573 		return ret;
574 	}
575 
576 	sdev->dsp_power_state = *target_state;
577 	hda_dsp_state_log(sdev);
578 	return ret;
579 }
580 
581 /*
582  * Audio DSP states may transform as below:-
583  *
584  *                                         Opportunistic D0I3 in S0
585  *     Runtime    +---------------------+  Delayed D0i3 work timeout
586  *     suspend    |                     +--------------------+
587  *   +------------+       D0I0(active)  |                    |
588  *   |            |                     <---------------+    |
589  *   |   +-------->                     |    New IPC	|    |
590  *   |   |Runtime +--^--+---------^--+--+ (via mailbox)	|    |
591  *   |   |resume     |  |         |  |			|    |
592  *   |   |           |  |         |  |			|    |
593  *   |   |     System|  |         |  |			|    |
594  *   |   |     resume|  | S3/S0IX |  |                  |    |
595  *   |   |	     |  | suspend |  | S0IX             |    |
596  *   |   |           |  |         |  |suspend           |    |
597  *   |   |           |  |         |  |                  |    |
598  *   |   |           |  |         |  |                  |    |
599  * +-v---+-----------+--v-------+ |  |           +------+----v----+
600  * |                            | |  +----------->                |
601  * |       D3 (suspended)       | |              |      D0I3      |
602  * |                            | +--------------+                |
603  * |                            |  System resume |                |
604  * +----------------------------+		 +----------------+
605  *
606  * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
607  *		 ignored the suspend trigger. Otherwise the DSP
608  *		 is in D3.
609  */
610 
611 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
612 {
613 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
614 	const struct sof_intel_dsp_desc *chip = hda->desc;
615 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
616 	struct hdac_bus *bus = sof_to_bus(sdev);
617 #endif
618 	int ret, j;
619 
620 	/*
621 	 * The memory used for IMR boot loses its content in deeper than S3 state
622 	 * We must not try IMR boot on next power up (as it will fail).
623 	 */
624 	if (sdev->system_suspend_target > SOF_SUSPEND_S3)
625 		hda->skip_imr_boot = true;
626 
627 	hda_sdw_int_enable(sdev, false);
628 
629 	/* disable IPC interrupts */
630 	hda_dsp_ipc_int_disable(sdev);
631 
632 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
633 	hda_codec_jack_wake_enable(sdev, runtime_suspend);
634 
635 	/* power down all hda link */
636 	snd_hdac_ext_bus_link_power_down_all(bus);
637 #endif
638 
639 	/* power down DSP */
640 	ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
641 	if (ret < 0) {
642 		dev_err(sdev->dev,
643 			"error: failed to power down core during suspend\n");
644 		return ret;
645 	}
646 
647 	/* reset ref counts for all cores */
648 	for (j = 0; j < chip->cores_num; j++)
649 		sdev->dsp_core_ref_count[j] = 0;
650 
651 	/* disable ppcap interrupt */
652 	hda_dsp_ctrl_ppcap_enable(sdev, false);
653 	hda_dsp_ctrl_ppcap_int_enable(sdev, false);
654 
655 	/* disable hda bus irq and streams */
656 	hda_dsp_ctrl_stop_chip(sdev);
657 
658 	/* disable LP retention mode */
659 	snd_sof_pci_update_bits(sdev, PCI_PGCTL,
660 				PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
661 
662 	/* reset controller */
663 	ret = hda_dsp_ctrl_link_reset(sdev, true);
664 	if (ret < 0) {
665 		dev_err(sdev->dev,
666 			"error: failed to reset controller during suspend\n");
667 		return ret;
668 	}
669 
670 	/* display codec can powered off after link reset */
671 	hda_codec_i915_display_power(sdev, false);
672 
673 	return 0;
674 }
675 
676 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
677 {
678 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
679 	struct hdac_bus *bus = sof_to_bus(sdev);
680 	struct hdac_ext_link *hlink = NULL;
681 #endif
682 	int ret;
683 
684 	/* display codec must be powered before link reset */
685 	hda_codec_i915_display_power(sdev, true);
686 
687 	/*
688 	 * clear TCSEL to clear playback on some HD Audio
689 	 * codecs. PCI TCSEL is defined in the Intel manuals.
690 	 */
691 	snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
692 
693 	/* reset and start hda controller */
694 	ret = hda_dsp_ctrl_init_chip(sdev, true);
695 	if (ret < 0) {
696 		dev_err(sdev->dev,
697 			"error: failed to start controller after resume\n");
698 		goto cleanup;
699 	}
700 
701 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
702 	/* check jack status */
703 	if (runtime_resume) {
704 		hda_codec_jack_wake_enable(sdev, false);
705 		if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
706 			hda_codec_jack_check(sdev);
707 	}
708 
709 	/* turn off the links that were off before suspend */
710 	list_for_each_entry(hlink, &bus->hlink_list, list) {
711 		if (!hlink->ref_count)
712 			snd_hdac_ext_bus_link_power_down(hlink);
713 	}
714 
715 	/* check dma status and clean up CORB/RIRB buffers */
716 	if (!bus->cmd_dma_state)
717 		snd_hdac_bus_stop_cmd_io(bus);
718 #endif
719 
720 	/* enable ppcap interrupt */
721 	hda_dsp_ctrl_ppcap_enable(sdev, true);
722 	hda_dsp_ctrl_ppcap_int_enable(sdev, true);
723 
724 cleanup:
725 	/* display codec can powered off after controller init */
726 	hda_codec_i915_display_power(sdev, false);
727 
728 	return 0;
729 }
730 
731 int hda_dsp_resume(struct snd_sof_dev *sdev)
732 {
733 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
734 	struct pci_dev *pci = to_pci_dev(sdev->dev);
735 	const struct sof_dsp_power_state target_state = {
736 		.state = SOF_DSP_PM_D0,
737 		.substate = SOF_HDA_DSP_PM_D0I0,
738 	};
739 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
740 	struct hdac_bus *bus = sof_to_bus(sdev);
741 	struct hdac_ext_link *hlink = NULL;
742 #endif
743 	int ret;
744 
745 	/* resume from D0I3 */
746 	if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
747 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
748 		/* power up links that were active before suspend */
749 		list_for_each_entry(hlink, &bus->hlink_list, list) {
750 			if (hlink->ref_count) {
751 				ret = snd_hdac_ext_bus_link_power_up(hlink);
752 				if (ret < 0) {
753 					dev_err(sdev->dev,
754 						"error %d in %s: failed to power up links",
755 						ret, __func__);
756 					return ret;
757 				}
758 			}
759 		}
760 
761 		/* set up CORB/RIRB buffers if was on before suspend */
762 		if (bus->cmd_dma_state)
763 			snd_hdac_bus_init_cmd_io(bus);
764 #endif
765 
766 		/* Set DSP power state */
767 		ret = snd_sof_dsp_set_power_state(sdev, &target_state);
768 		if (ret < 0) {
769 			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
770 				target_state.state, target_state.substate);
771 			return ret;
772 		}
773 
774 		/* restore L1SEN bit */
775 		if (hda->l1_support_changed)
776 			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
777 						HDA_VS_INTEL_EM2,
778 						HDA_VS_INTEL_EM2_L1SEN, 0);
779 
780 		/* restore and disable the system wakeup */
781 		pci_restore_state(pci);
782 		disable_irq_wake(pci->irq);
783 		return 0;
784 	}
785 
786 	/* init hda controller. DSP cores will be powered up during fw boot */
787 	ret = hda_resume(sdev, false);
788 	if (ret < 0)
789 		return ret;
790 
791 	return snd_sof_dsp_set_power_state(sdev, &target_state);
792 }
793 
794 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
795 {
796 	const struct sof_dsp_power_state target_state = {
797 		.state = SOF_DSP_PM_D0,
798 	};
799 	int ret;
800 
801 	/* init hda controller. DSP cores will be powered up during fw boot */
802 	ret = hda_resume(sdev, true);
803 	if (ret < 0)
804 		return ret;
805 
806 	return snd_sof_dsp_set_power_state(sdev, &target_state);
807 }
808 
809 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
810 {
811 	struct hdac_bus *hbus = sof_to_bus(sdev);
812 
813 	if (hbus->codec_powered) {
814 		dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
815 			(unsigned int)hbus->codec_powered);
816 		return -EBUSY;
817 	}
818 
819 	return 0;
820 }
821 
822 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
823 {
824 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
825 	const struct sof_dsp_power_state target_state = {
826 		.state = SOF_DSP_PM_D3,
827 	};
828 	int ret;
829 
830 	/* cancel any attempt for DSP D0I3 */
831 	cancel_delayed_work_sync(&hda->d0i3_work);
832 
833 	/* stop hda controller and power dsp off */
834 	ret = hda_suspend(sdev, true);
835 	if (ret < 0)
836 		return ret;
837 
838 	return snd_sof_dsp_set_power_state(sdev, &target_state);
839 }
840 
841 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
842 {
843 	struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
844 	struct hdac_bus *bus = sof_to_bus(sdev);
845 	struct pci_dev *pci = to_pci_dev(sdev->dev);
846 	const struct sof_dsp_power_state target_dsp_state = {
847 		.state = target_state,
848 		.substate = target_state == SOF_DSP_PM_D0 ?
849 				SOF_HDA_DSP_PM_D0I3 : 0,
850 	};
851 	int ret;
852 
853 	/* cancel any attempt for DSP D0I3 */
854 	cancel_delayed_work_sync(&hda->d0i3_work);
855 
856 	if (target_state == SOF_DSP_PM_D0) {
857 		/* Set DSP power state */
858 		ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
859 		if (ret < 0) {
860 			dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
861 				target_dsp_state.state,
862 				target_dsp_state.substate);
863 			return ret;
864 		}
865 
866 		/* enable L1SEN to make sure the system can enter S0Ix */
867 		hda->l1_support_changed =
868 			snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
869 						HDA_VS_INTEL_EM2,
870 						HDA_VS_INTEL_EM2_L1SEN,
871 						HDA_VS_INTEL_EM2_L1SEN);
872 
873 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
874 		/* stop the CORB/RIRB DMA if it is On */
875 		if (bus->cmd_dma_state)
876 			snd_hdac_bus_stop_cmd_io(bus);
877 
878 		/* no link can be powered in s0ix state */
879 		ret = snd_hdac_ext_bus_link_power_down_all(bus);
880 		if (ret < 0) {
881 			dev_err(sdev->dev,
882 				"error %d in %s: failed to power down links",
883 				ret, __func__);
884 			return ret;
885 		}
886 #endif
887 
888 		/* enable the system waking up via IPC IRQ */
889 		enable_irq_wake(pci->irq);
890 		pci_save_state(pci);
891 		return 0;
892 	}
893 
894 	/* stop hda controller and power dsp off */
895 	ret = hda_suspend(sdev, false);
896 	if (ret < 0) {
897 		dev_err(bus->dev, "error: suspending dsp\n");
898 		return ret;
899 	}
900 
901 	return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
902 }
903 
904 int hda_dsp_shutdown(struct snd_sof_dev *sdev)
905 {
906 	sdev->system_suspend_target = SOF_SUSPEND_S3;
907 	return snd_sof_suspend(sdev->dev);
908 }
909 
910 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
911 {
912 	int ret;
913 
914 	/* make sure all DAI resources are freed */
915 	ret = hda_dsp_dais_suspend(sdev);
916 	if (ret < 0)
917 		dev_warn(sdev->dev, "%s: failure in hda_dsp_dais_suspend\n", __func__);
918 
919 	return ret;
920 }
921 
922 void hda_dsp_d0i3_work(struct work_struct *work)
923 {
924 	struct sof_intel_hda_dev *hdev = container_of(work,
925 						      struct sof_intel_hda_dev,
926 						      d0i3_work.work);
927 	struct hdac_bus *bus = &hdev->hbus.core;
928 	struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
929 	struct sof_dsp_power_state target_state = {
930 		.state = SOF_DSP_PM_D0,
931 		.substate = SOF_HDA_DSP_PM_D0I3,
932 	};
933 	int ret;
934 
935 	/* DSP can enter D0I3 iff only D0I3-compatible streams are active */
936 	if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
937 		/* remain in D0I0 */
938 		return;
939 
940 	/* This can fail but error cannot be propagated */
941 	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
942 	if (ret < 0)
943 		dev_err_ratelimited(sdev->dev,
944 				    "error: failed to set DSP state %d substate %d\n",
945 				    target_state.state, target_state.substate);
946 }
947 
948 int hda_dsp_core_get(struct snd_sof_dev *sdev, int core)
949 {
950 	const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
951 	int ret, ret1;
952 
953 	/* power up core */
954 	ret = hda_dsp_enable_core(sdev, BIT(core));
955 	if (ret < 0) {
956 		dev_err(sdev->dev, "failed to power up core %d with err: %d\n",
957 			core, ret);
958 		return ret;
959 	}
960 
961 	/* No need to send IPC for primary core or if FW boot is not complete */
962 	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE || core == SOF_DSP_PRIMARY_CORE)
963 		return 0;
964 
965 	/* No need to continue the set_core_state ops is not available */
966 	if (!pm_ops->set_core_state)
967 		return 0;
968 
969 	/* Now notify DSP for secondary cores */
970 	ret = pm_ops->set_core_state(sdev, core, true);
971 	if (ret < 0) {
972 		dev_err(sdev->dev, "failed to enable secondary core '%d' failed with %d\n",
973 			core, ret);
974 		goto power_down;
975 	}
976 
977 	return ret;
978 
979 power_down:
980 	/* power down core if it is host managed and return the original error if this fails too */
981 	ret1 = hda_dsp_core_reset_power_down(sdev, BIT(core));
982 	if (ret1 < 0)
983 		dev_err(sdev->dev, "failed to power down core: %d with err: %d\n", core, ret1);
984 
985 	return ret;
986 }
987