xref: /openbmc/linux/sound/soc/intel/avs/core.c (revision 3db55767)
1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 // Special thanks to:
9 //    Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
10 //    Michal Sienkiewicz <michal.sienkiewicz@intel.com>
11 //    Filip Proborszcz
12 //
13 // for sharing Intel AudioDSP expertise and helping shape the very
14 // foundation of this driver
15 //
16 
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <sound/hda_codec.h>
20 #include <sound/hda_i915.h>
21 #include <sound/hda_register.h>
22 #include <sound/hdaudio.h>
23 #include <sound/hdaudio_ext.h>
24 #include <sound/intel-dsp-config.h>
25 #include <sound/intel-nhlt.h>
26 #include "../../codecs/hda.h"
27 #include "avs.h"
28 #include "cldma.h"
29 
30 static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK;
31 module_param(pgctl_mask, uint, 0444);
32 MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override");
33 
34 static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK;
35 module_param(cgctl_mask, uint, 0444);
36 MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override");
37 
38 static void
39 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
40 {
41 	struct pci_dev *pci = to_pci_dev(bus->dev);
42 	u32 data;
43 
44 	pci_read_config_dword(pci, reg, &data);
45 	data &= ~mask;
46 	data |= (value & mask);
47 	pci_write_config_dword(pci, reg, data);
48 }
49 
50 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
51 {
52 	u32 value = enable ? 0 : pgctl_mask;
53 
54 	avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value);
55 }
56 
57 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
58 {
59 	u32 value = enable ? cgctl_mask : 0;
60 
61 	avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value);
62 }
63 
64 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
65 {
66 	avs_hdac_clock_gating_enable(&adev->base.core, enable);
67 }
68 
69 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
70 {
71 	u32 value = enable ? AZX_VS_EM2_L1SEN : 0;
72 
73 	snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value);
74 }
75 
76 static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
77 {
78 	unsigned int cp_streams, pb_streams;
79 	unsigned int gcap;
80 
81 	gcap = snd_hdac_chip_readw(bus, GCAP);
82 	cp_streams = (gcap >> 8) & 0x0F;
83 	pb_streams = (gcap >> 12) & 0x0F;
84 	bus->num_streams = cp_streams + pb_streams;
85 
86 	snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
87 	snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
88 
89 	return snd_hdac_bus_alloc_stream_pages(bus);
90 }
91 
92 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
93 {
94 	struct hdac_ext_link *hlink;
95 	bool ret;
96 
97 	avs_hdac_clock_gating_enable(bus, false);
98 	ret = snd_hdac_bus_init_chip(bus, full_reset);
99 
100 	/* Reset stream-to-link mapping */
101 	list_for_each_entry(hlink, &bus->hlink_list, list)
102 		writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
103 
104 	avs_hdac_clock_gating_enable(bus, true);
105 
106 	/* Set DUM bit to address incorrect position reporting for capture
107 	 * streams. In order to do so, CTRL needs to be out of reset state
108 	 */
109 	snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
110 
111 	return ret;
112 }
113 
114 static int probe_codec(struct hdac_bus *bus, int addr)
115 {
116 	struct hda_codec *codec;
117 	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
118 			   (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
119 	unsigned int res = -1;
120 	int ret;
121 
122 	mutex_lock(&bus->cmd_mutex);
123 	snd_hdac_bus_send_cmd(bus, cmd);
124 	snd_hdac_bus_get_response(bus, addr, &res);
125 	mutex_unlock(&bus->cmd_mutex);
126 	if (res == -1)
127 		return -EIO;
128 
129 	dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
130 
131 	codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
132 	if (IS_ERR(codec)) {
133 		dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
134 		return PTR_ERR(codec);
135 	}
136 	/*
137 	 * Allow avs_core suspend by forcing suspended state on all
138 	 * of its codec child devices. Component interested in
139 	 * dealing with hda codecs directly takes pm responsibilities
140 	 */
141 	pm_runtime_set_suspended(hda_codec_dev(codec));
142 
143 	/* configure effectively creates new ASoC component */
144 	ret = snd_hda_codec_configure(codec);
145 	if (ret < 0) {
146 		dev_err(bus->dev, "failed to config codec %d\n", ret);
147 		return ret;
148 	}
149 
150 	return 0;
151 }
152 
153 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
154 {
155 	int c;
156 
157 	/* First try to probe all given codec slots */
158 	for (c = 0; c < HDA_MAX_CODECS; c++) {
159 		if (!(bus->codec_mask & BIT(c)))
160 			continue;
161 
162 		if (!probe_codec(bus, c))
163 			/* success, continue probing */
164 			continue;
165 
166 		/*
167 		 * Some BIOSen give you wrong codec addresses
168 		 * that don't exist
169 		 */
170 		dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
171 		bus->codec_mask &= ~BIT(c);
172 		/*
173 		 * More badly, accessing to a non-existing
174 		 * codec often screws up the controller bus,
175 		 * and disturbs the further communications.
176 		 * Thus if an error occurs during probing,
177 		 * better to reset the controller bus to get
178 		 * back to the sanity state.
179 		 */
180 		snd_hdac_bus_stop_chip(bus);
181 		avs_hdac_bus_init_chip(bus, true);
182 	}
183 }
184 
185 static void avs_hda_probe_work(struct work_struct *work)
186 {
187 	struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
188 	struct hdac_bus *bus = &adev->base.core;
189 	struct hdac_ext_link *hlink;
190 	int ret;
191 
192 	pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
193 
194 	ret = snd_hdac_i915_init(bus);
195 	if (ret < 0)
196 		dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
197 
198 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
199 	avs_hdac_bus_init_chip(bus, true);
200 	avs_hdac_bus_probe_codecs(bus);
201 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
202 
203 	/* with all codecs probed, links can be powered down */
204 	list_for_each_entry(hlink, &bus->hlink_list, list)
205 		snd_hdac_ext_bus_link_put(bus, hlink);
206 
207 	snd_hdac_ext_bus_ppcap_enable(bus, true);
208 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
209 
210 	ret = avs_dsp_first_boot_firmware(adev);
211 	if (ret < 0)
212 		return;
213 
214 	adev->nhlt = intel_nhlt_init(adev->dev);
215 	if (!adev->nhlt)
216 		dev_info(bus->dev, "platform has no NHLT\n");
217 	avs_debugfs_init(adev);
218 
219 	avs_register_all_boards(adev);
220 
221 	/* configure PM */
222 	pm_runtime_set_autosuspend_delay(bus->dev, 2000);
223 	pm_runtime_use_autosuspend(bus->dev);
224 	pm_runtime_mark_last_busy(bus->dev);
225 	pm_runtime_put_autosuspend(bus->dev);
226 	pm_runtime_allow(bus->dev);
227 }
228 
229 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
230 {
231 	u64 prev_pos, pos, num_bytes;
232 
233 	div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
234 	pos = snd_hdac_stream_get_pos_posbuf(stream);
235 
236 	if (pos < prev_pos)
237 		num_bytes = (buffer_size - prev_pos) +  pos;
238 	else
239 		num_bytes = pos - prev_pos;
240 
241 	stream->curr_pos += num_bytes;
242 }
243 
244 /* called from IRQ */
245 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
246 {
247 	if (stream->substream) {
248 		snd_pcm_period_elapsed(stream->substream);
249 	} else if (stream->cstream) {
250 		u64 buffer_size = stream->cstream->runtime->buffer_size;
251 
252 		hdac_stream_update_pos(stream, buffer_size);
253 		snd_compr_fragment_elapsed(stream->cstream);
254 	}
255 }
256 
257 static irqreturn_t hdac_bus_irq_handler(int irq, void *context)
258 {
259 	struct hdac_bus *bus = context;
260 	u32 mask, int_enable;
261 	u32 status;
262 	int ret = IRQ_NONE;
263 
264 	if (!pm_runtime_active(bus->dev))
265 		return ret;
266 
267 	spin_lock(&bus->reg_lock);
268 
269 	status = snd_hdac_chip_readl(bus, INTSTS);
270 	if (status == 0 || status == UINT_MAX) {
271 		spin_unlock(&bus->reg_lock);
272 		return ret;
273 	}
274 
275 	/* clear rirb int */
276 	status = snd_hdac_chip_readb(bus, RIRBSTS);
277 	if (status & RIRB_INT_MASK) {
278 		if (status & RIRB_INT_RESPONSE)
279 			snd_hdac_bus_update_rirb(bus);
280 		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
281 	}
282 
283 	mask = (0x1 << bus->num_streams) - 1;
284 
285 	status = snd_hdac_chip_readl(bus, INTSTS);
286 	status &= mask;
287 	if (status) {
288 		/* Disable stream interrupts; Re-enable in bottom half */
289 		int_enable = snd_hdac_chip_readl(bus, INTCTL);
290 		snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask)));
291 		ret = IRQ_WAKE_THREAD;
292 	} else {
293 		ret = IRQ_HANDLED;
294 	}
295 
296 	spin_unlock(&bus->reg_lock);
297 	return ret;
298 }
299 
300 static irqreturn_t hdac_bus_irq_thread(int irq, void *context)
301 {
302 	struct hdac_bus *bus = context;
303 	u32 status;
304 	u32 int_enable;
305 	u32 mask;
306 	unsigned long flags;
307 
308 	status = snd_hdac_chip_readl(bus, INTSTS);
309 
310 	snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream);
311 
312 	/* Re-enable stream interrupts */
313 	mask = (0x1 << bus->num_streams) - 1;
314 	spin_lock_irqsave(&bus->reg_lock, flags);
315 	int_enable = snd_hdac_chip_readl(bus, INTCTL);
316 	snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask));
317 	spin_unlock_irqrestore(&bus->reg_lock, flags);
318 
319 	return IRQ_HANDLED;
320 }
321 
322 static int avs_hdac_acquire_irq(struct avs_dev *adev)
323 {
324 	struct hdac_bus *bus = &adev->base.core;
325 	struct pci_dev *pci = to_pci_dev(bus->dev);
326 	int ret;
327 
328 	/* request one and check that we only got one interrupt */
329 	ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
330 	if (ret != 1) {
331 		dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
332 		return ret;
333 	}
334 
335 	ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus,
336 			      KBUILD_MODNAME);
337 	if (ret < 0) {
338 		dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
339 		goto free_vector;
340 	}
341 
342 	ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
343 			      KBUILD_MODNAME);
344 	if (ret < 0) {
345 		dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
346 		goto free_stream_irq;
347 	}
348 
349 	return 0;
350 
351 free_stream_irq:
352 	pci_free_irq(pci, 0, bus);
353 free_vector:
354 	pci_free_irq_vectors(pci);
355 	return ret;
356 }
357 
358 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
359 {
360 	struct hda_bus *bus = &adev->base;
361 	struct avs_ipc *ipc;
362 	struct device *dev = &pci->dev;
363 	int ret;
364 
365 	ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
366 	if (ret < 0)
367 		return ret;
368 
369 	bus->core.use_posbuf = 1;
370 	bus->core.bdl_pos_adj = 0;
371 	bus->core.sync_write = 1;
372 	bus->pci = pci;
373 	bus->mixer_assigned = -1;
374 	mutex_init(&bus->prepare_mutex);
375 
376 	ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
377 	if (!ipc)
378 		return -ENOMEM;
379 	ret = avs_ipc_init(ipc, dev);
380 	if (ret < 0)
381 		return ret;
382 
383 	adev->dev = dev;
384 	adev->spec = (const struct avs_spec *)id->driver_data;
385 	adev->ipc = ipc;
386 	adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
387 	INIT_WORK(&adev->probe_work, avs_hda_probe_work);
388 	INIT_LIST_HEAD(&adev->comp_list);
389 	INIT_LIST_HEAD(&adev->path_list);
390 	INIT_LIST_HEAD(&adev->fw_list);
391 	init_completion(&adev->fw_ready);
392 	spin_lock_init(&adev->path_list_lock);
393 	mutex_init(&adev->modres_mutex);
394 	mutex_init(&adev->comp_list_mutex);
395 	mutex_init(&adev->path_mutex);
396 
397 	return 0;
398 }
399 
400 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
401 {
402 	struct hdac_bus *bus;
403 	struct avs_dev *adev;
404 	struct device *dev = &pci->dev;
405 	int ret;
406 
407 	ret = snd_intel_dsp_driver_probe(pci);
408 	if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS)
409 		return -ENODEV;
410 
411 	ret = pcim_enable_device(pci);
412 	if (ret < 0)
413 		return ret;
414 
415 	adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
416 	if (!adev)
417 		return -ENOMEM;
418 	ret = avs_bus_init(adev, pci, id);
419 	if (ret < 0) {
420 		dev_err(dev, "failed to init avs bus: %d\n", ret);
421 		return ret;
422 	}
423 
424 	ret = pci_request_regions(pci, "AVS HDAudio");
425 	if (ret < 0)
426 		return ret;
427 
428 	bus = &adev->base.core;
429 	bus->addr = pci_resource_start(pci, 0);
430 	bus->remap_addr = pci_ioremap_bar(pci, 0);
431 	if (!bus->remap_addr) {
432 		dev_err(bus->dev, "ioremap error\n");
433 		ret = -ENXIO;
434 		goto err_remap_bar0;
435 	}
436 
437 	adev->dsp_ba = pci_ioremap_bar(pci, 4);
438 	if (!adev->dsp_ba) {
439 		dev_err(bus->dev, "ioremap error\n");
440 		ret = -ENXIO;
441 		goto err_remap_bar4;
442 	}
443 
444 	snd_hdac_bus_parse_capabilities(bus);
445 	if (bus->mlcap)
446 		snd_hdac_ext_bus_get_ml_capabilities(bus);
447 
448 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
449 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
450 	dma_set_max_seg_size(dev, UINT_MAX);
451 
452 	ret = avs_hdac_bus_init_streams(bus);
453 	if (ret < 0) {
454 		dev_err(dev, "failed to init streams: %d\n", ret);
455 		goto err_init_streams;
456 	}
457 
458 	ret = avs_hdac_acquire_irq(adev);
459 	if (ret < 0) {
460 		dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
461 		goto err_acquire_irq;
462 	}
463 
464 	pci_set_master(pci);
465 	pci_set_drvdata(pci, bus);
466 	device_disable_async_suspend(dev);
467 
468 	schedule_work(&adev->probe_work);
469 
470 	return 0;
471 
472 err_acquire_irq:
473 	snd_hdac_bus_free_stream_pages(bus);
474 	snd_hdac_ext_stream_free_all(bus);
475 err_init_streams:
476 	iounmap(adev->dsp_ba);
477 err_remap_bar4:
478 	iounmap(bus->remap_addr);
479 err_remap_bar0:
480 	pci_release_regions(pci);
481 	return ret;
482 }
483 
484 static void avs_pci_shutdown(struct pci_dev *pci)
485 {
486 	struct hdac_bus *bus = pci_get_drvdata(pci);
487 	struct avs_dev *adev = hdac_to_avs(bus);
488 
489 	cancel_work_sync(&adev->probe_work);
490 	avs_ipc_block(adev->ipc);
491 
492 	snd_hdac_stop_streams(bus);
493 	avs_dsp_op(adev, int_control, false);
494 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
495 	snd_hdac_ext_bus_link_power_down_all(bus);
496 
497 	snd_hdac_bus_stop_chip(bus);
498 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
499 
500 	if (avs_platattr_test(adev, CLDMA))
501 		pci_free_irq(pci, 0, &code_loader);
502 	pci_free_irq(pci, 0, adev);
503 	pci_free_irq(pci, 0, bus);
504 	pci_free_irq_vectors(pci);
505 }
506 
507 static void avs_pci_remove(struct pci_dev *pci)
508 {
509 	struct hdac_device *hdev, *save;
510 	struct hdac_bus *bus = pci_get_drvdata(pci);
511 	struct avs_dev *adev = hdac_to_avs(bus);
512 
513 	cancel_work_sync(&adev->probe_work);
514 	avs_ipc_block(adev->ipc);
515 
516 	avs_unregister_all_boards(adev);
517 
518 	avs_debugfs_exit(adev);
519 	if (adev->nhlt)
520 		intel_nhlt_free(adev->nhlt);
521 
522 	if (avs_platattr_test(adev, CLDMA))
523 		hda_cldma_free(&code_loader);
524 
525 	snd_hdac_stop_streams_and_chip(bus);
526 	avs_dsp_op(adev, int_control, false);
527 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
528 
529 	/* it is safe to remove all codecs from the system now */
530 	list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
531 		snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
532 
533 	snd_hdac_bus_free_stream_pages(bus);
534 	snd_hdac_ext_stream_free_all(bus);
535 	/* reverse ml_capabilities */
536 	snd_hdac_ext_link_free_all(bus);
537 	snd_hdac_ext_bus_exit(bus);
538 
539 	avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
540 	snd_hdac_ext_bus_ppcap_enable(bus, false);
541 
542 	/* snd_hdac_stop_streams_and_chip does that already? */
543 	snd_hdac_bus_stop_chip(bus);
544 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
545 	if (bus->audio_component)
546 		snd_hdac_i915_exit(bus);
547 
548 	avs_module_info_free(adev);
549 	pci_free_irq(pci, 0, adev);
550 	pci_free_irq(pci, 0, bus);
551 	pci_free_irq_vectors(pci);
552 	iounmap(bus->remap_addr);
553 	iounmap(adev->dsp_ba);
554 	pci_release_regions(pci);
555 
556 	/* Firmware is not needed anymore */
557 	avs_release_firmwares(adev);
558 
559 	/* pm_runtime_forbid() can rpm_resume() which we do not want */
560 	pm_runtime_disable(&pci->dev);
561 	pm_runtime_forbid(&pci->dev);
562 	pm_runtime_enable(&pci->dev);
563 	pm_runtime_get_noresume(&pci->dev);
564 }
565 
566 static int avs_suspend_standby(struct avs_dev *adev)
567 {
568 	struct hdac_bus *bus = &adev->base.core;
569 	struct pci_dev *pci = adev->base.pci;
570 
571 	if (bus->cmd_dma_state)
572 		snd_hdac_bus_stop_cmd_io(bus);
573 
574 	snd_hdac_ext_bus_link_power_down_all(bus);
575 
576 	enable_irq_wake(pci->irq);
577 	pci_save_state(pci);
578 
579 	return 0;
580 }
581 
582 static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power)
583 {
584 	struct hdac_bus *bus = &adev->base.core;
585 	int ret;
586 
587 	flush_work(&adev->probe_work);
588 	if (low_power && adev->num_lp_paths)
589 		return avs_suspend_standby(adev);
590 
591 	snd_hdac_ext_bus_link_power_down_all(bus);
592 
593 	ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
594 	/*
595 	 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
596 	 * Do not block entire system from suspending if that's the case.
597 	 */
598 	if (ret && ret != -EPERM) {
599 		dev_err(adev->dev, "set dx failed: %d\n", ret);
600 		return AVS_IPC_RET(ret);
601 	}
602 
603 	avs_ipc_block(adev->ipc);
604 	avs_dsp_op(adev, int_control, false);
605 	snd_hdac_ext_bus_ppcap_int_enable(bus, false);
606 
607 	ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
608 	if (ret < 0) {
609 		dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
610 		return ret;
611 	}
612 
613 	snd_hdac_ext_bus_ppcap_enable(bus, false);
614 	/* disable LP SRAM retention */
615 	avs_hda_power_gating_enable(adev, false);
616 	snd_hdac_bus_stop_chip(bus);
617 	/* disable CG when putting controller to reset */
618 	avs_hdac_clock_gating_enable(bus, false);
619 	snd_hdac_bus_enter_link_reset(bus);
620 	avs_hdac_clock_gating_enable(bus, true);
621 
622 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
623 
624 	return 0;
625 }
626 
627 static int avs_resume_standby(struct avs_dev *adev)
628 {
629 	struct hdac_bus *bus = &adev->base.core;
630 	struct pci_dev *pci = adev->base.pci;
631 
632 	pci_restore_state(pci);
633 	disable_irq_wake(pci->irq);
634 
635 	snd_hdac_ext_bus_link_power_up_all(bus);
636 
637 	if (bus->cmd_dma_state)
638 		snd_hdac_bus_init_cmd_io(bus);
639 
640 	return 0;
641 }
642 
643 static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge)
644 {
645 	struct hdac_bus *bus = &adev->base.core;
646 	int ret;
647 
648 	if (low_power && adev->num_lp_paths)
649 		return avs_resume_standby(adev);
650 
651 	snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
652 	avs_hdac_bus_init_chip(bus, true);
653 
654 	snd_hdac_ext_bus_ppcap_enable(bus, true);
655 	snd_hdac_ext_bus_ppcap_int_enable(bus, true);
656 
657 	ret = avs_dsp_boot_firmware(adev, purge);
658 	if (ret < 0) {
659 		dev_err(adev->dev, "firmware boot failed: %d\n", ret);
660 		return ret;
661 	}
662 
663 	return 0;
664 }
665 
666 static int __maybe_unused avs_suspend(struct device *dev)
667 {
668 	return avs_suspend_common(to_avs_dev(dev), true);
669 }
670 
671 static int __maybe_unused avs_resume(struct device *dev)
672 {
673 	return avs_resume_common(to_avs_dev(dev), true, true);
674 }
675 
676 static int __maybe_unused avs_runtime_suspend(struct device *dev)
677 {
678 	return avs_suspend_common(to_avs_dev(dev), true);
679 }
680 
681 static int __maybe_unused avs_runtime_resume(struct device *dev)
682 {
683 	return avs_resume_common(to_avs_dev(dev), true, false);
684 }
685 
686 static int __maybe_unused avs_freeze(struct device *dev)
687 {
688 	return avs_suspend_common(to_avs_dev(dev), false);
689 }
690 static int __maybe_unused avs_thaw(struct device *dev)
691 {
692 	return avs_resume_common(to_avs_dev(dev), false, true);
693 }
694 
695 static int __maybe_unused avs_poweroff(struct device *dev)
696 {
697 	return avs_suspend_common(to_avs_dev(dev), false);
698 }
699 
700 static int __maybe_unused avs_restore(struct device *dev)
701 {
702 	return avs_resume_common(to_avs_dev(dev), false, true);
703 }
704 
705 static const struct dev_pm_ops avs_dev_pm = {
706 	.suspend = avs_suspend,
707 	.resume = avs_resume,
708 	.freeze = avs_freeze,
709 	.thaw = avs_thaw,
710 	.poweroff = avs_poweroff,
711 	.restore = avs_restore,
712 	SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
713 };
714 
715 static const struct avs_spec skl_desc = {
716 	.name = "skl",
717 	.min_fw_version = {
718 		.major = 9,
719 		.minor = 21,
720 		.hotfix = 0,
721 		.build = 4732,
722 	},
723 	.dsp_ops = &skl_dsp_ops,
724 	.core_init_mask = 1,
725 	.attributes = AVS_PLATATTR_CLDMA,
726 	.sram_base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
727 	.sram_window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
728 	.rom_status = SKL_ADSP_SRAM_BASE_OFFSET,
729 };
730 
731 static const struct avs_spec apl_desc = {
732 	.name = "apl",
733 	.min_fw_version = {
734 		.major = 9,
735 		.minor = 22,
736 		.hotfix = 1,
737 		.build = 4323,
738 	},
739 	.dsp_ops = &apl_dsp_ops,
740 	.core_init_mask = 3,
741 	.attributes = AVS_PLATATTR_IMR,
742 	.sram_base_offset = APL_ADSP_SRAM_BASE_OFFSET,
743 	.sram_window_size = APL_ADSP_SRAM_WINDOW_SIZE,
744 	.rom_status = APL_ADSP_SRAM_BASE_OFFSET,
745 };
746 
747 static const struct pci_device_id avs_ids[] = {
748 	{ PCI_VDEVICE(INTEL, 0x9d70), (unsigned long)&skl_desc }, /* SKL */
749 	{ PCI_VDEVICE(INTEL, 0xa170), (unsigned long)&skl_desc }, /* SKL-H */
750 	{ PCI_VDEVICE(INTEL, 0x9d71), (unsigned long)&skl_desc }, /* KBL */
751 	{ PCI_VDEVICE(INTEL, 0xa171), (unsigned long)&skl_desc }, /* KBL-H */
752 	{ PCI_VDEVICE(INTEL, 0xa2f0), (unsigned long)&skl_desc }, /* KBL-S */
753 	{ PCI_VDEVICE(INTEL, 0xa3f0), (unsigned long)&skl_desc }, /* CML-V */
754 	{ PCI_VDEVICE(INTEL, 0x5a98), (unsigned long)&apl_desc }, /* APL */
755 	{ PCI_VDEVICE(INTEL, 0x3198), (unsigned long)&apl_desc }, /* GML */
756 	{ 0 }
757 };
758 MODULE_DEVICE_TABLE(pci, avs_ids);
759 
760 static struct pci_driver avs_pci_driver = {
761 	.name = KBUILD_MODNAME,
762 	.id_table = avs_ids,
763 	.probe = avs_pci_probe,
764 	.remove = avs_pci_remove,
765 	.shutdown = avs_pci_shutdown,
766 	.driver = {
767 		.pm = &avs_dev_pm,
768 	},
769 };
770 module_pci_driver(avs_pci_driver);
771 
772 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
773 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
774 MODULE_DESCRIPTION("Intel cAVS sound driver");
775 MODULE_LICENSE("GPL");
776