1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2021 Advanced Micro Devices, Inc. All rights reserved. 7 // 8 // Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com> 9 // Ajit Kumar Pandey <AjitKumar.Pandey@amd.com> 10 11 /* 12 * Hardware interface for generic AMD ACP processor 13 */ 14 15 #include <linux/io.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 19 #include "../ops.h" 20 #include "acp.h" 21 #include "acp-dsp-offset.h" 22 23 static int smn_write(struct pci_dev *dev, u32 smn_addr, u32 data) 24 { 25 pci_write_config_dword(dev, 0x60, smn_addr); 26 pci_write_config_dword(dev, 0x64, data); 27 28 return 0; 29 } 30 31 static int smn_read(struct pci_dev *dev, u32 smn_addr, u32 *data) 32 { 33 pci_write_config_dword(dev, 0x60, smn_addr); 34 pci_read_config_dword(dev, 0x64, data); 35 36 return 0; 37 } 38 39 static void init_dma_descriptor(struct acp_dev_data *adata) 40 { 41 struct snd_sof_dev *sdev = adata->dev; 42 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 43 unsigned int addr; 44 45 addr = desc->sram_pte_offset + sdev->debug_box.offset + 46 offsetof(struct scratch_reg_conf, dma_desc); 47 48 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_BASE_ADDR, addr); 49 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_MAX_NUM_DSCR, ACP_MAX_DESC_CNT); 50 } 51 52 static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx, 53 struct dma_descriptor *dscr_info) 54 { 55 struct snd_sof_dev *sdev = adata->dev; 56 unsigned int offset; 57 58 offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset + 59 offsetof(struct scratch_reg_conf, dma_desc) + 60 idx * sizeof(struct dma_descriptor); 61 62 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr); 63 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr); 64 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all); 65 } 66 67 static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch, 68 unsigned int idx, unsigned int dscr_count) 69 { 70 struct snd_sof_dev *sdev = adata->dev; 71 unsigned int val, status; 72 int ret; 73 74 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), 75 ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN); 76 77 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_RST_STS, val, 78 val & (1 << ch), ACP_REG_POLL_INTERVAL, 79 ACP_REG_POLL_TIMEOUT_US); 80 if (ret < 0) { 81 status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS); 82 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32)); 83 84 dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status); 85 return ret; 86 } 87 88 snd_sof_dsp_write(sdev, ACP_DSP_BAR, (ACP_DMA_CNTL_0 + ch * sizeof(u32)), 0); 89 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_CNT_0 + ch * sizeof(u32), dscr_count); 90 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_STRT_IDX_0 + ch * sizeof(u32), idx); 91 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_PRIO_0 + ch * sizeof(u32), 0); 92 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), ACP_DMA_CH_RUN); 93 94 return ret; 95 } 96 97 static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch, 98 unsigned int dscr_count, struct dma_descriptor *dscr_info) 99 { 100 struct snd_sof_dev *sdev = adata->dev; 101 int ret; 102 u16 dscr; 103 104 if (!dscr_info || !dscr_count) 105 return -EINVAL; 106 107 for (dscr = 0; dscr < dscr_count; dscr++) 108 configure_dma_descriptor(adata, dscr, dscr_info++); 109 110 ret = config_dma_channel(adata, ch, 0, dscr_count); 111 if (ret < 0) 112 dev_err(sdev->dev, "config dma ch failed:%d\n", ret); 113 114 return ret; 115 } 116 117 int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr, 118 unsigned int dest_addr, int dsp_data_size) 119 { 120 struct snd_sof_dev *sdev = adata->dev; 121 unsigned int desc_count, index; 122 int ret; 123 124 for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0; 125 desc_count++, dsp_data_size -= ACP_PAGE_SIZE) { 126 adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE; 127 adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE; 128 adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE; 129 if (dsp_data_size < ACP_PAGE_SIZE) 130 adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size; 131 } 132 133 ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info); 134 if (ret) 135 dev_err(sdev->dev, "acpbus_dma_start failed\n"); 136 137 /* Clear descriptor array */ 138 for (index = 0; index < desc_count; index++) 139 memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor)); 140 141 return ret; 142 } 143 144 /* 145 * psp_mbox_ready- function to poll ready bit of psp mbox 146 * @adata: acp device data 147 * @ack: bool variable to check ready bit status or psp ack 148 */ 149 150 static int psp_mbox_ready(struct acp_dev_data *adata, bool ack) 151 { 152 struct snd_sof_dev *sdev = adata->dev; 153 int timeout; 154 u32 data; 155 156 for (timeout = ACP_PSP_TIMEOUT_COUNTER; timeout > 0; timeout--) { 157 msleep(20); 158 smn_read(adata->smn_dev, MP0_C2PMSG_114_REG, &data); 159 if (data & MBOX_READY_MASK) 160 return 0; 161 } 162 163 dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK); 164 165 if (ack) 166 return -ETIMEDOUT; 167 168 return -EBUSY; 169 } 170 171 /* 172 * psp_send_cmd - function to send psp command over mbox 173 * @adata: acp device data 174 * @cmd: non zero integer value for command type 175 */ 176 177 static int psp_send_cmd(struct acp_dev_data *adata, int cmd) 178 { 179 struct snd_sof_dev *sdev = adata->dev; 180 int ret, timeout; 181 u32 data; 182 183 if (!cmd) 184 return -EINVAL; 185 186 /* Get a non-zero Doorbell value from PSP */ 187 for (timeout = ACP_PSP_TIMEOUT_COUNTER; timeout > 0; timeout--) { 188 msleep(MBOX_DELAY); 189 smn_read(adata->smn_dev, MP0_C2PMSG_73_REG, &data); 190 if (data) 191 break; 192 } 193 194 if (!timeout) { 195 dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG); 196 return -EINVAL; 197 } 198 199 /* Check if PSP is ready for new command */ 200 ret = psp_mbox_ready(adata, 0); 201 if (ret) 202 return ret; 203 204 smn_write(adata->smn_dev, MP0_C2PMSG_114_REG, cmd); 205 206 /* Ring the Doorbell for PSP */ 207 smn_write(adata->smn_dev, MP0_C2PMSG_73_REG, data); 208 209 /* Check MBOX ready as PSP ack */ 210 ret = psp_mbox_ready(adata, 1); 211 212 return ret; 213 } 214 215 int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr, 216 unsigned int start_addr, unsigned int dest_addr, 217 unsigned int image_length) 218 { 219 struct snd_sof_dev *sdev = adata->dev; 220 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 221 unsigned int tx_count, fw_qualifier, val; 222 int ret; 223 224 if (!image_addr) { 225 dev_err(sdev->dev, "SHA DMA image address is NULL\n"); 226 return -EINVAL; 227 } 228 229 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD); 230 if (val & ACP_SHA_RUN) { 231 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET); 232 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS, 233 val, val & ACP_SHA_RESET, 234 ACP_REG_POLL_INTERVAL, 235 ACP_REG_POLL_TIMEOUT_US); 236 if (ret < 0) { 237 dev_err(sdev->dev, "SHA DMA Failed to Reset\n"); 238 return ret; 239 } 240 } 241 242 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr); 243 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr); 244 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length); 245 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN); 246 247 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT, 248 tx_count, tx_count == image_length, 249 ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); 250 if (ret < 0) { 251 dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count); 252 return ret; 253 } 254 255 /* psp_send_cmd only required for renoir platform (rev - 3) */ 256 if (desc->rev == 3) { 257 ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND); 258 if (ret) 259 return ret; 260 } 261 262 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER, 263 fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE, 264 ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); 265 if (ret < 0) { 266 dev_err(sdev->dev, "PSP validation failed\n"); 267 return ret; 268 } 269 270 return 0; 271 } 272 273 int acp_dma_status(struct acp_dev_data *adata, unsigned char ch) 274 { 275 struct snd_sof_dev *sdev = adata->dev; 276 unsigned int val; 277 int ret = 0; 278 279 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32)); 280 if (val & ACP_DMA_CH_RUN) { 281 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val, 282 ACP_REG_POLL_INTERVAL, 283 ACP_DMA_COMPLETE_TIMEOUT_US); 284 if (ret < 0) 285 dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch); 286 } 287 288 return ret; 289 } 290 291 void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes) 292 { 293 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 294 int i, j; 295 296 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 297 dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i); 298 } 299 300 void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes) 301 { 302 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 303 int i, j; 304 305 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 306 snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]); 307 } 308 309 static int acp_memory_init(struct snd_sof_dev *sdev) 310 { 311 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 312 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 313 314 snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET, 315 ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK); 316 init_dma_descriptor(adata); 317 318 return 0; 319 } 320 321 static irqreturn_t acp_irq_thread(int irq, void *context) 322 { 323 struct snd_sof_dev *sdev = context; 324 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 325 unsigned int val, count = ACP_HW_SEM_RETRY_COUNT; 326 327 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat); 328 if (val & ACP_SHA_STAT) { 329 /* Clear SHA interrupt raised by PSP */ 330 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, val); 331 return IRQ_HANDLED; 332 } 333 334 while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) { 335 /* Wait until acquired HW Semaphore lock or timeout */ 336 count--; 337 if (!count) { 338 dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__); 339 return IRQ_NONE; 340 } 341 } 342 343 sof_ops(sdev)->irq_thread(irq, sdev); 344 /* Unlock or Release HW Semaphore */ 345 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0); 346 347 return IRQ_HANDLED; 348 }; 349 350 static irqreturn_t acp_irq_handler(int irq, void *dev_id) 351 { 352 struct snd_sof_dev *sdev = dev_id; 353 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 354 unsigned int base = desc->dsp_intr_base; 355 unsigned int val; 356 357 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET); 358 if (val) { 359 val |= ACP_DSP_TO_HOST_IRQ; 360 snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val); 361 return IRQ_WAKE_THREAD; 362 } 363 364 return IRQ_NONE; 365 } 366 367 static int acp_power_on(struct snd_sof_dev *sdev) 368 { 369 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 370 unsigned int base = desc->pgfsm_base; 371 unsigned int val; 372 int ret; 373 374 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET); 375 376 if (val == ACP_POWERED_ON) 377 return 0; 378 379 if (val & ACP_PGFSM_STATUS_MASK) 380 snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET, 381 ACP_PGFSM_CNTL_POWER_ON_MASK); 382 383 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val, 384 !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 385 if (ret < 0) 386 dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n"); 387 388 return ret; 389 } 390 391 static int acp_reset(struct snd_sof_dev *sdev) 392 { 393 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 394 unsigned int val; 395 int ret; 396 397 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET); 398 399 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, 400 val & ACP_SOFT_RESET_DONE_MASK, 401 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 402 if (ret < 0) { 403 dev_err(sdev->dev, "timeout asserting reset\n"); 404 return ret; 405 } 406 407 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET); 408 409 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val, 410 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 411 if (ret < 0) 412 dev_err(sdev->dev, "timeout in releasing reset\n"); 413 414 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK); 415 return ret; 416 } 417 418 static int acp_init(struct snd_sof_dev *sdev) 419 { 420 int ret; 421 422 /* power on */ 423 ret = acp_power_on(sdev); 424 if (ret) { 425 dev_err(sdev->dev, "ACP power on failed\n"); 426 return ret; 427 } 428 429 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01); 430 /* Reset */ 431 return acp_reset(sdev); 432 } 433 434 int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state) 435 { 436 int ret; 437 438 ret = acp_reset(sdev); 439 if (ret) { 440 dev_err(sdev->dev, "ACP Reset failed\n"); 441 return ret; 442 } 443 444 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00); 445 446 return 0; 447 } 448 EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON); 449 450 int amd_sof_acp_resume(struct snd_sof_dev *sdev) 451 { 452 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 453 int ret; 454 455 ret = acp_init(sdev); 456 if (ret) { 457 dev_err(sdev->dev, "ACP Init failed\n"); 458 return ret; 459 } 460 461 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK); 462 463 ret = acp_memory_init(sdev); 464 465 return ret; 466 } 467 EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON); 468 469 int amd_sof_acp_probe(struct snd_sof_dev *sdev) 470 { 471 struct pci_dev *pci = to_pci_dev(sdev->dev); 472 struct acp_dev_data *adata; 473 const struct sof_amd_acp_desc *chip; 474 unsigned int addr; 475 int ret; 476 477 chip = get_chip_info(sdev->pdata); 478 if (!chip) { 479 dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device); 480 return -EIO; 481 } 482 adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data), 483 GFP_KERNEL); 484 if (!adata) 485 return -ENOMEM; 486 487 adata->dev = sdev; 488 adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec", 489 PLATFORM_DEVID_NONE, NULL, 0); 490 if (IS_ERR(adata->dmic_dev)) { 491 dev_err(sdev->dev, "failed to register platform for dmic codec\n"); 492 return PTR_ERR(adata->dmic_dev); 493 } 494 addr = pci_resource_start(pci, ACP_DSP_BAR); 495 sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR)); 496 if (!sdev->bar[ACP_DSP_BAR]) { 497 dev_err(sdev->dev, "ioremap error\n"); 498 ret = -ENXIO; 499 goto unregister_dev; 500 } 501 502 pci_set_master(pci); 503 504 sdev->pdata->hw_pdata = adata; 505 adata->smn_dev = pci_get_device(PCI_VENDOR_ID_AMD, chip->host_bridge_id, NULL); 506 if (!adata->smn_dev) { 507 dev_err(sdev->dev, "Failed to get host bridge device\n"); 508 ret = -ENODEV; 509 goto unregister_dev; 510 } 511 512 sdev->ipc_irq = pci->irq; 513 ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread, 514 IRQF_SHARED, "AudioDSP", sdev); 515 if (ret < 0) { 516 dev_err(sdev->dev, "failed to register IRQ %d\n", 517 sdev->ipc_irq); 518 goto free_smn_dev; 519 } 520 521 ret = acp_init(sdev); 522 if (ret < 0) 523 goto free_ipc_irq; 524 525 sdev->dsp_box.offset = 0; 526 sdev->dsp_box.size = BOX_SIZE_512; 527 528 sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size; 529 sdev->host_box.size = BOX_SIZE_512; 530 531 sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size; 532 sdev->debug_box.size = BOX_SIZE_1024; 533 534 acp_memory_init(sdev); 535 536 acp_dsp_stream_init(sdev); 537 538 return 0; 539 540 free_ipc_irq: 541 free_irq(sdev->ipc_irq, sdev); 542 free_smn_dev: 543 pci_dev_put(adata->smn_dev); 544 unregister_dev: 545 platform_device_unregister(adata->dmic_dev); 546 return ret; 547 } 548 EXPORT_SYMBOL_NS(amd_sof_acp_probe, SND_SOC_SOF_AMD_COMMON); 549 550 int amd_sof_acp_remove(struct snd_sof_dev *sdev) 551 { 552 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 553 554 if (adata->smn_dev) 555 pci_dev_put(adata->smn_dev); 556 557 if (sdev->ipc_irq) 558 free_irq(sdev->ipc_irq, sdev); 559 560 if (adata->dmic_dev) 561 platform_device_unregister(adata->dmic_dev); 562 563 return acp_reset(sdev); 564 } 565 EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON); 566 567 MODULE_DESCRIPTION("AMD ACP sof driver"); 568 MODULE_LICENSE("Dual BSD/GPL"); 569