1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. 4 // 5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com> 6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> 7 // 8 // Special thanks to: 9 // Krzysztof Hejmowski <krzysztof.hejmowski@intel.com> 10 // Michal Sienkiewicz <michal.sienkiewicz@intel.com> 11 // Filip Proborszcz 12 // 13 // for sharing Intel AudioDSP expertise and helping shape the very 14 // foundation of this driver 15 // 16 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 #include <sound/hda_codec.h> 20 #include <sound/hda_i915.h> 21 #include <sound/hda_register.h> 22 #include <sound/hdaudio.h> 23 #include <sound/hdaudio_ext.h> 24 #include <sound/intel-dsp-config.h> 25 #include <sound/intel-nhlt.h> 26 #include "avs.h" 27 #include "cldma.h" 28 29 static void 30 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value) 31 { 32 struct pci_dev *pci = to_pci_dev(bus->dev); 33 u32 data; 34 35 pci_read_config_dword(pci, reg, &data); 36 data &= ~mask; 37 data |= (value & mask); 38 pci_write_config_dword(pci, reg, data); 39 } 40 41 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable) 42 { 43 u32 value; 44 45 value = enable ? 0 : AZX_PGCTL_LSRMD_MASK; 46 avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, 47 AZX_PGCTL_LSRMD_MASK, value); 48 } 49 50 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable) 51 { 52 u32 value; 53 54 value = enable ? AZX_CGCTL_MISCBDCGE_MASK : 0; 55 avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, AZX_CGCTL_MISCBDCGE_MASK, value); 56 } 57 58 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable) 59 { 60 avs_hdac_clock_gating_enable(&adev->base.core, enable); 61 } 62 63 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable) 64 { 65 u32 value; 66 67 value = enable ? AZX_VS_EM2_L1SEN : 0; 68 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value); 69 } 70 71 static int avs_hdac_bus_init_streams(struct hdac_bus *bus) 72 { 73 unsigned int cp_streams, pb_streams; 74 unsigned int gcap; 75 76 gcap = snd_hdac_chip_readw(bus, GCAP); 77 cp_streams = (gcap >> 8) & 0x0F; 78 pb_streams = (gcap >> 12) & 0x0F; 79 bus->num_streams = cp_streams + pb_streams; 80 81 snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE); 82 snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK); 83 84 return snd_hdac_bus_alloc_stream_pages(bus); 85 } 86 87 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) 88 { 89 struct hdac_ext_link *hlink; 90 bool ret; 91 92 avs_hdac_clock_gating_enable(bus, false); 93 ret = snd_hdac_bus_init_chip(bus, full_reset); 94 95 /* Reset stream-to-link mapping */ 96 list_for_each_entry(hlink, &bus->hlink_list, list) 97 writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV); 98 99 avs_hdac_clock_gating_enable(bus, true); 100 101 /* Set DUM bit to address incorrect position reporting for capture 102 * streams. In order to do so, CTRL needs to be out of reset state 103 */ 104 snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM); 105 106 return ret; 107 } 108 109 static int probe_codec(struct hdac_bus *bus, int addr) 110 { 111 struct hda_codec *codec; 112 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | 113 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; 114 unsigned int res = -1; 115 int ret; 116 117 mutex_lock(&bus->cmd_mutex); 118 snd_hdac_bus_send_cmd(bus, cmd); 119 snd_hdac_bus_get_response(bus, addr, &res); 120 mutex_unlock(&bus->cmd_mutex); 121 if (res == -1) 122 return -EIO; 123 124 dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res); 125 126 codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr); 127 if (IS_ERR(codec)) { 128 dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec)); 129 return PTR_ERR(codec); 130 } 131 /* 132 * Allow avs_core suspend by forcing suspended state on all 133 * of its codec child devices. Component interested in 134 * dealing with hda codecs directly takes pm responsibilities 135 */ 136 pm_runtime_set_suspended(hda_codec_dev(codec)); 137 138 /* configure effectively creates new ASoC component */ 139 ret = snd_hda_codec_configure(codec); 140 if (ret < 0) { 141 dev_err(bus->dev, "failed to config codec %d\n", ret); 142 return ret; 143 } 144 145 return 0; 146 } 147 148 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus) 149 { 150 int c; 151 152 /* First try to probe all given codec slots */ 153 for (c = 0; c < HDA_MAX_CODECS; c++) { 154 if (!(bus->codec_mask & BIT(c))) 155 continue; 156 157 if (!probe_codec(bus, c)) 158 /* success, continue probing */ 159 continue; 160 161 /* 162 * Some BIOSen give you wrong codec addresses 163 * that don't exist 164 */ 165 dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c); 166 bus->codec_mask &= ~BIT(c); 167 /* 168 * More badly, accessing to a non-existing 169 * codec often screws up the controller bus, 170 * and disturbs the further communications. 171 * Thus if an error occurs during probing, 172 * better to reset the controller bus to get 173 * back to the sanity state. 174 */ 175 snd_hdac_bus_stop_chip(bus); 176 avs_hdac_bus_init_chip(bus, true); 177 } 178 } 179 180 static void avs_hda_probe_work(struct work_struct *work) 181 { 182 struct avs_dev *adev = container_of(work, struct avs_dev, probe_work); 183 struct hdac_bus *bus = &adev->base.core; 184 struct hdac_ext_link *hlink; 185 int ret; 186 187 pm_runtime_set_active(bus->dev); /* clear runtime_error flag */ 188 189 ret = snd_hdac_i915_init(bus); 190 if (ret < 0) 191 dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret); 192 193 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); 194 avs_hdac_bus_init_chip(bus, true); 195 avs_hdac_bus_probe_codecs(bus); 196 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); 197 198 /* with all codecs probed, links can be powered down */ 199 list_for_each_entry(hlink, &bus->hlink_list, list) 200 snd_hdac_ext_bus_link_put(bus, hlink); 201 202 snd_hdac_ext_bus_ppcap_enable(bus, true); 203 snd_hdac_ext_bus_ppcap_int_enable(bus, true); 204 205 ret = avs_dsp_first_boot_firmware(adev); 206 if (ret < 0) 207 return; 208 209 adev->nhlt = intel_nhlt_init(adev->dev); 210 if (!adev->nhlt) 211 dev_info(bus->dev, "platform has no NHLT\n"); 212 213 avs_register_all_boards(adev); 214 215 /* configure PM */ 216 pm_runtime_set_autosuspend_delay(bus->dev, 2000); 217 pm_runtime_use_autosuspend(bus->dev); 218 pm_runtime_mark_last_busy(bus->dev); 219 pm_runtime_put_autosuspend(bus->dev); 220 pm_runtime_allow(bus->dev); 221 } 222 223 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size) 224 { 225 u64 prev_pos, pos, num_bytes; 226 227 div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos); 228 pos = snd_hdac_stream_get_pos_posbuf(stream); 229 230 if (pos < prev_pos) 231 num_bytes = (buffer_size - prev_pos) + pos; 232 else 233 num_bytes = pos - prev_pos; 234 235 stream->curr_pos += num_bytes; 236 } 237 238 /* called from IRQ */ 239 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream) 240 { 241 if (stream->substream) { 242 snd_pcm_period_elapsed(stream->substream); 243 } else if (stream->cstream) { 244 u64 buffer_size = stream->cstream->runtime->buffer_size; 245 246 hdac_stream_update_pos(stream, buffer_size); 247 snd_compr_fragment_elapsed(stream->cstream); 248 } 249 } 250 251 static irqreturn_t hdac_bus_irq_handler(int irq, void *context) 252 { 253 struct hdac_bus *bus = context; 254 u32 mask, int_enable; 255 u32 status; 256 int ret = IRQ_NONE; 257 258 if (!pm_runtime_active(bus->dev)) 259 return ret; 260 261 spin_lock(&bus->reg_lock); 262 263 status = snd_hdac_chip_readl(bus, INTSTS); 264 if (status == 0 || status == UINT_MAX) { 265 spin_unlock(&bus->reg_lock); 266 return ret; 267 } 268 269 /* clear rirb int */ 270 status = snd_hdac_chip_readb(bus, RIRBSTS); 271 if (status & RIRB_INT_MASK) { 272 if (status & RIRB_INT_RESPONSE) 273 snd_hdac_bus_update_rirb(bus); 274 snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK); 275 } 276 277 mask = (0x1 << bus->num_streams) - 1; 278 279 status = snd_hdac_chip_readl(bus, INTSTS); 280 status &= mask; 281 if (status) { 282 /* Disable stream interrupts; Re-enable in bottom half */ 283 int_enable = snd_hdac_chip_readl(bus, INTCTL); 284 snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask))); 285 ret = IRQ_WAKE_THREAD; 286 } else { 287 ret = IRQ_HANDLED; 288 } 289 290 spin_unlock(&bus->reg_lock); 291 return ret; 292 } 293 294 static irqreturn_t hdac_bus_irq_thread(int irq, void *context) 295 { 296 struct hdac_bus *bus = context; 297 u32 status; 298 u32 int_enable; 299 u32 mask; 300 unsigned long flags; 301 302 status = snd_hdac_chip_readl(bus, INTSTS); 303 304 snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream); 305 306 /* Re-enable stream interrupts */ 307 mask = (0x1 << bus->num_streams) - 1; 308 spin_lock_irqsave(&bus->reg_lock, flags); 309 int_enable = snd_hdac_chip_readl(bus, INTCTL); 310 snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask)); 311 spin_unlock_irqrestore(&bus->reg_lock, flags); 312 313 return IRQ_HANDLED; 314 } 315 316 static int avs_hdac_acquire_irq(struct avs_dev *adev) 317 { 318 struct hdac_bus *bus = &adev->base.core; 319 struct pci_dev *pci = to_pci_dev(bus->dev); 320 int ret; 321 322 /* request one and check that we only got one interrupt */ 323 ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY); 324 if (ret != 1) { 325 dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret); 326 return ret; 327 } 328 329 ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus, 330 KBUILD_MODNAME); 331 if (ret < 0) { 332 dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret); 333 goto free_vector; 334 } 335 336 ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev, 337 KBUILD_MODNAME); 338 if (ret < 0) { 339 dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret); 340 goto free_stream_irq; 341 } 342 343 return 0; 344 345 free_stream_irq: 346 pci_free_irq(pci, 0, bus); 347 free_vector: 348 pci_free_irq_vectors(pci); 349 return ret; 350 } 351 352 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id) 353 { 354 struct hda_bus *bus = &adev->base; 355 struct avs_ipc *ipc; 356 struct device *dev = &pci->dev; 357 int ret; 358 359 ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, NULL); 360 if (ret < 0) 361 return ret; 362 363 bus->core.use_posbuf = 1; 364 bus->core.bdl_pos_adj = 0; 365 bus->core.sync_write = 1; 366 bus->pci = pci; 367 bus->mixer_assigned = -1; 368 mutex_init(&bus->prepare_mutex); 369 370 ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL); 371 if (!ipc) 372 return -ENOMEM; 373 ret = avs_ipc_init(ipc, dev); 374 if (ret < 0) 375 return ret; 376 377 adev->dev = dev; 378 adev->spec = (const struct avs_spec *)id->driver_data; 379 adev->ipc = ipc; 380 adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK); 381 INIT_WORK(&adev->probe_work, avs_hda_probe_work); 382 INIT_LIST_HEAD(&adev->comp_list); 383 INIT_LIST_HEAD(&adev->path_list); 384 INIT_LIST_HEAD(&adev->fw_list); 385 init_completion(&adev->fw_ready); 386 spin_lock_init(&adev->path_list_lock); 387 mutex_init(&adev->modres_mutex); 388 mutex_init(&adev->comp_list_mutex); 389 mutex_init(&adev->path_mutex); 390 391 return 0; 392 } 393 394 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) 395 { 396 struct hdac_bus *bus; 397 struct avs_dev *adev; 398 struct device *dev = &pci->dev; 399 int ret; 400 401 ret = snd_intel_dsp_driver_probe(pci); 402 if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS) 403 return -ENODEV; 404 405 ret = pcim_enable_device(pci); 406 if (ret < 0) 407 return ret; 408 409 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); 410 if (!adev) 411 return -ENOMEM; 412 ret = avs_bus_init(adev, pci, id); 413 if (ret < 0) { 414 dev_err(dev, "failed to init avs bus: %d\n", ret); 415 return ret; 416 } 417 418 ret = pci_request_regions(pci, "AVS HDAudio"); 419 if (ret < 0) 420 return ret; 421 422 bus = &adev->base.core; 423 bus->addr = pci_resource_start(pci, 0); 424 bus->remap_addr = pci_ioremap_bar(pci, 0); 425 if (!bus->remap_addr) { 426 dev_err(bus->dev, "ioremap error\n"); 427 ret = -ENXIO; 428 goto err_remap_bar0; 429 } 430 431 adev->dsp_ba = pci_ioremap_bar(pci, 4); 432 if (!adev->dsp_ba) { 433 dev_err(bus->dev, "ioremap error\n"); 434 ret = -ENXIO; 435 goto err_remap_bar4; 436 } 437 438 snd_hdac_bus_parse_capabilities(bus); 439 if (bus->mlcap) 440 snd_hdac_ext_bus_get_ml_capabilities(bus); 441 442 if (!dma_set_mask(dev, DMA_BIT_MASK(64))) { 443 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 444 } else { 445 dma_set_mask(dev, DMA_BIT_MASK(32)); 446 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 447 } 448 449 ret = avs_hdac_bus_init_streams(bus); 450 if (ret < 0) { 451 dev_err(dev, "failed to init streams: %d\n", ret); 452 goto err_init_streams; 453 } 454 455 ret = avs_hdac_acquire_irq(adev); 456 if (ret < 0) { 457 dev_err(bus->dev, "failed to acquire irq: %d\n", ret); 458 goto err_acquire_irq; 459 } 460 461 pci_set_master(pci); 462 pci_set_drvdata(pci, bus); 463 device_disable_async_suspend(dev); 464 465 schedule_work(&adev->probe_work); 466 467 return 0; 468 469 err_acquire_irq: 470 snd_hdac_bus_free_stream_pages(bus); 471 snd_hdac_stream_free_all(bus); 472 err_init_streams: 473 iounmap(adev->dsp_ba); 474 err_remap_bar4: 475 iounmap(bus->remap_addr); 476 err_remap_bar0: 477 pci_release_regions(pci); 478 return ret; 479 } 480 481 static void avs_pci_remove(struct pci_dev *pci) 482 { 483 struct hdac_device *hdev, *save; 484 struct hdac_bus *bus = pci_get_drvdata(pci); 485 struct avs_dev *adev = hdac_to_avs(bus); 486 487 cancel_work_sync(&adev->probe_work); 488 avs_ipc_block(adev->ipc); 489 490 avs_unregister_all_boards(adev); 491 492 if (adev->nhlt) 493 intel_nhlt_free(adev->nhlt); 494 495 if (avs_platattr_test(adev, CLDMA)) 496 hda_cldma_free(&code_loader); 497 498 snd_hdac_stop_streams_and_chip(bus); 499 avs_dsp_op(adev, int_control, false); 500 snd_hdac_ext_bus_ppcap_int_enable(bus, false); 501 502 /* it is safe to remove all codecs from the system now */ 503 list_for_each_entry_safe(hdev, save, &bus->codec_list, list) 504 snd_hda_codec_unregister(hdac_to_hda_codec(hdev)); 505 506 snd_hdac_bus_free_stream_pages(bus); 507 snd_hdac_stream_free_all(bus); 508 /* reverse ml_capabilities */ 509 snd_hdac_link_free_all(bus); 510 snd_hdac_ext_bus_exit(bus); 511 512 avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0)); 513 snd_hdac_ext_bus_ppcap_enable(bus, false); 514 515 /* snd_hdac_stop_streams_and_chip does that already? */ 516 snd_hdac_bus_stop_chip(bus); 517 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); 518 if (bus->audio_component) 519 snd_hdac_i915_exit(bus); 520 521 avs_module_info_free(adev); 522 pci_free_irq(pci, 0, adev); 523 pci_free_irq(pci, 0, bus); 524 pci_free_irq_vectors(pci); 525 iounmap(bus->remap_addr); 526 iounmap(adev->dsp_ba); 527 pci_release_regions(pci); 528 529 /* Firmware is not needed anymore */ 530 avs_release_firmwares(adev); 531 532 /* pm_runtime_forbid() can rpm_resume() which we do not want */ 533 pm_runtime_disable(&pci->dev); 534 pm_runtime_forbid(&pci->dev); 535 pm_runtime_enable(&pci->dev); 536 pm_runtime_get_noresume(&pci->dev); 537 } 538 539 static int __maybe_unused avs_suspend_common(struct avs_dev *adev) 540 { 541 struct hdac_bus *bus = &adev->base.core; 542 int ret; 543 544 flush_work(&adev->probe_work); 545 546 snd_hdac_ext_bus_link_power_down_all(bus); 547 548 ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false); 549 /* 550 * pm_runtime is blocked on DSP failure but system-wide suspend is not. 551 * Do not block entire system from suspending if that's the case. 552 */ 553 if (ret && ret != -EPERM) { 554 dev_err(adev->dev, "set dx failed: %d\n", ret); 555 return AVS_IPC_RET(ret); 556 } 557 558 avs_dsp_op(adev, int_control, false); 559 snd_hdac_ext_bus_ppcap_int_enable(bus, false); 560 561 ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); 562 if (ret < 0) { 563 dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret); 564 return ret; 565 } 566 567 snd_hdac_ext_bus_ppcap_enable(bus, false); 568 /* disable LP SRAM retention */ 569 avs_hda_power_gating_enable(adev, false); 570 snd_hdac_bus_stop_chip(bus); 571 /* disable CG when putting controller to reset */ 572 avs_hdac_clock_gating_enable(bus, false); 573 snd_hdac_bus_enter_link_reset(bus); 574 avs_hdac_clock_gating_enable(bus, true); 575 576 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); 577 578 return 0; 579 } 580 581 static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool purge) 582 { 583 struct hdac_bus *bus = &adev->base.core; 584 struct hdac_ext_link *hlink; 585 int ret; 586 587 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); 588 avs_hdac_bus_init_chip(bus, true); 589 590 snd_hdac_ext_bus_ppcap_enable(bus, true); 591 snd_hdac_ext_bus_ppcap_int_enable(bus, true); 592 593 ret = avs_dsp_boot_firmware(adev, purge); 594 if (ret < 0) { 595 dev_err(adev->dev, "firmware boot failed: %d\n", ret); 596 return ret; 597 } 598 599 /* turn off the links that were off before suspend */ 600 list_for_each_entry(hlink, &bus->hlink_list, list) { 601 if (!hlink->ref_count) 602 snd_hdac_ext_bus_link_power_down(hlink); 603 } 604 605 /* check dma status and clean up CORB/RIRB buffers */ 606 if (!bus->cmd_dma_state) 607 snd_hdac_bus_stop_cmd_io(bus); 608 609 return 0; 610 } 611 612 static int __maybe_unused avs_suspend(struct device *dev) 613 { 614 return avs_suspend_common(to_avs_dev(dev)); 615 } 616 617 static int __maybe_unused avs_resume(struct device *dev) 618 { 619 return avs_resume_common(to_avs_dev(dev), true); 620 } 621 622 static int __maybe_unused avs_runtime_suspend(struct device *dev) 623 { 624 return avs_suspend_common(to_avs_dev(dev)); 625 } 626 627 static int __maybe_unused avs_runtime_resume(struct device *dev) 628 { 629 return avs_resume_common(to_avs_dev(dev), true); 630 } 631 632 static const struct dev_pm_ops avs_dev_pm = { 633 SET_SYSTEM_SLEEP_PM_OPS(avs_suspend, avs_resume) 634 SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL) 635 }; 636 637 static const struct avs_spec skl_desc = { 638 .name = "skl", 639 .min_fw_version = { 640 .major = 9, 641 .minor = 21, 642 .hotfix = 0, 643 .build = 4732, 644 }, 645 .dsp_ops = &skl_dsp_ops, 646 .core_init_mask = 1, 647 .attributes = AVS_PLATATTR_CLDMA, 648 .sram_base_offset = SKL_ADSP_SRAM_BASE_OFFSET, 649 .sram_window_size = SKL_ADSP_SRAM_WINDOW_SIZE, 650 .rom_status = SKL_ADSP_SRAM_BASE_OFFSET, 651 }; 652 653 static const struct avs_spec apl_desc = { 654 .name = "apl", 655 .min_fw_version = { 656 .major = 9, 657 .minor = 22, 658 .hotfix = 1, 659 .build = 4323, 660 }, 661 .dsp_ops = &apl_dsp_ops, 662 .core_init_mask = 3, 663 .attributes = AVS_PLATATTR_IMR, 664 .sram_base_offset = APL_ADSP_SRAM_BASE_OFFSET, 665 .sram_window_size = APL_ADSP_SRAM_WINDOW_SIZE, 666 .rom_status = APL_ADSP_SRAM_BASE_OFFSET, 667 }; 668 669 static const struct pci_device_id avs_ids[] = { 670 { PCI_VDEVICE(INTEL, 0x9d70), (unsigned long)&skl_desc }, /* SKL */ 671 { PCI_VDEVICE(INTEL, 0x9d71), (unsigned long)&skl_desc }, /* KBL */ 672 { PCI_VDEVICE(INTEL, 0x5a98), (unsigned long)&apl_desc }, /* APL */ 673 { PCI_VDEVICE(INTEL, 0x3198), (unsigned long)&apl_desc }, /* GML */ 674 { 0 } 675 }; 676 MODULE_DEVICE_TABLE(pci, avs_ids); 677 678 static struct pci_driver avs_pci_driver = { 679 .name = KBUILD_MODNAME, 680 .id_table = avs_ids, 681 .probe = avs_pci_probe, 682 .remove = avs_pci_remove, 683 .driver = { 684 .pm = &avs_dev_pm, 685 }, 686 }; 687 module_pci_driver(avs_pci_driver); 688 689 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>"); 690 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>"); 691 MODULE_DESCRIPTION("Intel cAVS sound driver"); 692 MODULE_LICENSE("GPL"); 693