1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2021 Advanced Micro Devices, Inc. All rights reserved. 7 // 8 // Authors: Vijendar Mukunda <Vijendar.Mukunda@amd.com> 9 // Ajit Kumar Pandey <AjitKumar.Pandey@amd.com> 10 11 /* 12 * Hardware interface for generic AMD ACP processor 13 */ 14 15 #include <linux/io.h> 16 #include <linux/module.h> 17 #include <linux/pci.h> 18 19 #include "../ops.h" 20 #include "acp.h" 21 #include "acp-dsp-offset.h" 22 23 static int smn_write(struct pci_dev *dev, u32 smn_addr, u32 data) 24 { 25 pci_write_config_dword(dev, 0x60, smn_addr); 26 pci_write_config_dword(dev, 0x64, data); 27 28 return 0; 29 } 30 31 static int smn_read(struct pci_dev *dev, u32 smn_addr) 32 { 33 u32 data = 0; 34 35 pci_write_config_dword(dev, 0x60, smn_addr); 36 pci_read_config_dword(dev, 0x64, &data); 37 38 return data; 39 } 40 41 static void init_dma_descriptor(struct acp_dev_data *adata) 42 { 43 struct snd_sof_dev *sdev = adata->dev; 44 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 45 unsigned int addr; 46 47 addr = desc->sram_pte_offset + sdev->debug_box.offset + 48 offsetof(struct scratch_reg_conf, dma_desc); 49 50 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_BASE_ADDR, addr); 51 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_MAX_NUM_DSCR, ACP_MAX_DESC_CNT); 52 } 53 54 static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx, 55 struct dma_descriptor *dscr_info) 56 { 57 struct snd_sof_dev *sdev = adata->dev; 58 unsigned int offset; 59 60 offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset + 61 offsetof(struct scratch_reg_conf, dma_desc) + 62 idx * sizeof(struct dma_descriptor); 63 64 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr); 65 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr); 66 snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all); 67 } 68 69 static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch, 70 unsigned int idx, unsigned int dscr_count) 71 { 72 struct snd_sof_dev *sdev = adata->dev; 73 unsigned int val, status; 74 int ret; 75 76 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), 77 ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN); 78 79 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_RST_STS, val, 80 val & (1 << ch), ACP_REG_POLL_INTERVAL, 81 ACP_REG_POLL_TIMEOUT_US); 82 if (ret < 0) { 83 status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS); 84 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32)); 85 86 dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status); 87 return ret; 88 } 89 90 snd_sof_dsp_write(sdev, ACP_DSP_BAR, (ACP_DMA_CNTL_0 + ch * sizeof(u32)), 0); 91 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_CNT_0 + ch * sizeof(u32), dscr_count); 92 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_STRT_IDX_0 + ch * sizeof(u32), idx); 93 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_PRIO_0 + ch * sizeof(u32), 0); 94 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), ACP_DMA_CH_RUN); 95 96 return ret; 97 } 98 99 static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch, 100 unsigned int dscr_count, struct dma_descriptor *dscr_info) 101 { 102 struct snd_sof_dev *sdev = adata->dev; 103 int ret; 104 u16 dscr; 105 106 if (!dscr_info || !dscr_count) 107 return -EINVAL; 108 109 for (dscr = 0; dscr < dscr_count; dscr++) 110 configure_dma_descriptor(adata, dscr, dscr_info++); 111 112 ret = config_dma_channel(adata, ch, 0, dscr_count); 113 if (ret < 0) 114 dev_err(sdev->dev, "config dma ch failed:%d\n", ret); 115 116 return ret; 117 } 118 119 int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr, 120 unsigned int dest_addr, int dsp_data_size) 121 { 122 struct snd_sof_dev *sdev = adata->dev; 123 unsigned int desc_count, index; 124 int ret; 125 126 for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0; 127 desc_count++, dsp_data_size -= ACP_PAGE_SIZE) { 128 adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE; 129 adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE; 130 adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE; 131 if (dsp_data_size < ACP_PAGE_SIZE) 132 adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size; 133 } 134 135 ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info); 136 if (ret) 137 dev_err(sdev->dev, "acpbus_dma_start failed\n"); 138 139 /* Clear descriptor array */ 140 for (index = 0; index < desc_count; index++) 141 memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor)); 142 143 return ret; 144 } 145 146 /* 147 * psp_mbox_ready- function to poll ready bit of psp mbox 148 * @adata: acp device data 149 * @ack: bool variable to check ready bit status or psp ack 150 */ 151 152 static int psp_mbox_ready(struct acp_dev_data *adata, bool ack) 153 { 154 struct snd_sof_dev *sdev = adata->dev; 155 int ret; 156 u32 data; 157 158 ret = read_poll_timeout(smn_read, data, data & MBOX_READY_MASK, MBOX_DELAY_US, 159 ACP_PSP_TIMEOUT_US, false, adata->smn_dev, MP0_C2PMSG_114_REG); 160 if (!ret) 161 return 0; 162 163 dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK); 164 165 if (ack) 166 return -ETIMEDOUT; 167 168 return -EBUSY; 169 } 170 171 /* 172 * psp_send_cmd - function to send psp command over mbox 173 * @adata: acp device data 174 * @cmd: non zero integer value for command type 175 */ 176 177 static int psp_send_cmd(struct acp_dev_data *adata, int cmd) 178 { 179 struct snd_sof_dev *sdev = adata->dev; 180 int ret; 181 u32 data; 182 183 if (!cmd) 184 return -EINVAL; 185 186 /* Get a non-zero Doorbell value from PSP */ 187 ret = read_poll_timeout(smn_read, data, data, MBOX_DELAY_US, ACP_PSP_TIMEOUT_US, false, 188 adata->smn_dev, MP0_C2PMSG_73_REG); 189 190 if (ret) { 191 dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG); 192 return ret; 193 } 194 195 /* Check if PSP is ready for new command */ 196 ret = psp_mbox_ready(adata, 0); 197 if (ret) 198 return ret; 199 200 smn_write(adata->smn_dev, MP0_C2PMSG_114_REG, cmd); 201 202 /* Ring the Doorbell for PSP */ 203 smn_write(adata->smn_dev, MP0_C2PMSG_73_REG, data); 204 205 /* Check MBOX ready as PSP ack */ 206 ret = psp_mbox_ready(adata, 1); 207 208 return ret; 209 } 210 211 int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr, 212 unsigned int start_addr, unsigned int dest_addr, 213 unsigned int image_length) 214 { 215 struct snd_sof_dev *sdev = adata->dev; 216 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 217 unsigned int tx_count, fw_qualifier, val; 218 int ret; 219 220 if (!image_addr) { 221 dev_err(sdev->dev, "SHA DMA image address is NULL\n"); 222 return -EINVAL; 223 } 224 225 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD); 226 if (val & ACP_SHA_RUN) { 227 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET); 228 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS, 229 val, val & ACP_SHA_RESET, 230 ACP_REG_POLL_INTERVAL, 231 ACP_REG_POLL_TIMEOUT_US); 232 if (ret < 0) { 233 dev_err(sdev->dev, "SHA DMA Failed to Reset\n"); 234 return ret; 235 } 236 } 237 238 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr); 239 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr); 240 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length); 241 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN); 242 243 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT, 244 tx_count, tx_count == image_length, 245 ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); 246 if (ret < 0) { 247 dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count); 248 return ret; 249 } 250 251 /* psp_send_cmd only required for renoir platform (rev - 3) */ 252 if (desc->rev == 3) { 253 ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND); 254 if (ret) 255 return ret; 256 } 257 258 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER, 259 fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE, 260 ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); 261 if (ret < 0) { 262 dev_err(sdev->dev, "PSP validation failed\n"); 263 return ret; 264 } 265 266 return 0; 267 } 268 269 int acp_dma_status(struct acp_dev_data *adata, unsigned char ch) 270 { 271 struct snd_sof_dev *sdev = adata->dev; 272 unsigned int val; 273 int ret = 0; 274 275 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32)); 276 if (val & ACP_DMA_CH_RUN) { 277 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val, 278 ACP_REG_POLL_INTERVAL, 279 ACP_DMA_COMPLETE_TIMEOUT_US); 280 if (ret < 0) 281 dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch); 282 } 283 284 return ret; 285 } 286 287 void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes) 288 { 289 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 290 int i, j; 291 292 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 293 dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i); 294 } 295 296 void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes) 297 { 298 unsigned int reg_offset = offset + ACP_SCRATCH_REG_0; 299 int i, j; 300 301 for (i = 0, j = 0; i < bytes; i = i + 4, j++) 302 snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]); 303 } 304 305 static int acp_memory_init(struct snd_sof_dev *sdev) 306 { 307 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 308 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 309 310 snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET, 311 ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK); 312 init_dma_descriptor(adata); 313 314 return 0; 315 } 316 317 static irqreturn_t acp_irq_thread(int irq, void *context) 318 { 319 struct snd_sof_dev *sdev = context; 320 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 321 unsigned int val, count = ACP_HW_SEM_RETRY_COUNT; 322 323 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->ext_intr_stat); 324 if (val & ACP_SHA_STAT) { 325 /* Clear SHA interrupt raised by PSP */ 326 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_stat, val); 327 return IRQ_HANDLED; 328 } 329 330 while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) { 331 /* Wait until acquired HW Semaphore lock or timeout */ 332 count--; 333 if (!count) { 334 dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__); 335 return IRQ_NONE; 336 } 337 } 338 339 sof_ops(sdev)->irq_thread(irq, sdev); 340 /* Unlock or Release HW Semaphore */ 341 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0); 342 343 return IRQ_HANDLED; 344 }; 345 346 static irqreturn_t acp_irq_handler(int irq, void *dev_id) 347 { 348 struct snd_sof_dev *sdev = dev_id; 349 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 350 unsigned int base = desc->dsp_intr_base; 351 unsigned int val; 352 353 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET); 354 if (val) { 355 val |= ACP_DSP_TO_HOST_IRQ; 356 snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET, val); 357 return IRQ_WAKE_THREAD; 358 } 359 360 return IRQ_NONE; 361 } 362 363 static int acp_power_on(struct snd_sof_dev *sdev) 364 { 365 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 366 unsigned int base = desc->pgfsm_base; 367 unsigned int val; 368 int ret; 369 370 val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET); 371 372 if (val == ACP_POWERED_ON) 373 return 0; 374 375 if (val & ACP_PGFSM_STATUS_MASK) 376 snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET, 377 ACP_PGFSM_CNTL_POWER_ON_MASK); 378 379 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val, 380 !val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 381 if (ret < 0) 382 dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n"); 383 384 return ret; 385 } 386 387 static int acp_reset(struct snd_sof_dev *sdev) 388 { 389 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 390 unsigned int val; 391 int ret; 392 393 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET); 394 395 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, 396 val & ACP_SOFT_RESET_DONE_MASK, 397 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 398 if (ret < 0) { 399 dev_err(sdev->dev, "timeout asserting reset\n"); 400 return ret; 401 } 402 403 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET); 404 405 ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val, 406 ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US); 407 if (ret < 0) 408 dev_err(sdev->dev, "timeout in releasing reset\n"); 409 410 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK); 411 return ret; 412 } 413 414 static int acp_init(struct snd_sof_dev *sdev) 415 { 416 int ret; 417 418 /* power on */ 419 ret = acp_power_on(sdev); 420 if (ret) { 421 dev_err(sdev->dev, "ACP power on failed\n"); 422 return ret; 423 } 424 425 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01); 426 /* Reset */ 427 return acp_reset(sdev); 428 } 429 430 int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state) 431 { 432 int ret; 433 434 ret = acp_reset(sdev); 435 if (ret) { 436 dev_err(sdev->dev, "ACP Reset failed\n"); 437 return ret; 438 } 439 440 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00); 441 442 return 0; 443 } 444 EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON); 445 446 int amd_sof_acp_resume(struct snd_sof_dev *sdev) 447 { 448 const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata); 449 int ret; 450 451 ret = acp_init(sdev); 452 if (ret) { 453 dev_err(sdev->dev, "ACP Init failed\n"); 454 return ret; 455 } 456 457 snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK); 458 459 ret = acp_memory_init(sdev); 460 461 return ret; 462 } 463 EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON); 464 465 int amd_sof_acp_probe(struct snd_sof_dev *sdev) 466 { 467 struct pci_dev *pci = to_pci_dev(sdev->dev); 468 struct acp_dev_data *adata; 469 const struct sof_amd_acp_desc *chip; 470 unsigned int addr; 471 int ret; 472 473 chip = get_chip_info(sdev->pdata); 474 if (!chip) { 475 dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device); 476 return -EIO; 477 } 478 adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data), 479 GFP_KERNEL); 480 if (!adata) 481 return -ENOMEM; 482 483 adata->dev = sdev; 484 adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec", 485 PLATFORM_DEVID_NONE, NULL, 0); 486 if (IS_ERR(adata->dmic_dev)) { 487 dev_err(sdev->dev, "failed to register platform for dmic codec\n"); 488 return PTR_ERR(adata->dmic_dev); 489 } 490 addr = pci_resource_start(pci, ACP_DSP_BAR); 491 sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR)); 492 if (!sdev->bar[ACP_DSP_BAR]) { 493 dev_err(sdev->dev, "ioremap error\n"); 494 ret = -ENXIO; 495 goto unregister_dev; 496 } 497 498 pci_set_master(pci); 499 500 sdev->pdata->hw_pdata = adata; 501 adata->smn_dev = pci_get_device(PCI_VENDOR_ID_AMD, chip->host_bridge_id, NULL); 502 if (!adata->smn_dev) { 503 dev_err(sdev->dev, "Failed to get host bridge device\n"); 504 ret = -ENODEV; 505 goto unregister_dev; 506 } 507 508 sdev->ipc_irq = pci->irq; 509 ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread, 510 IRQF_SHARED, "AudioDSP", sdev); 511 if (ret < 0) { 512 dev_err(sdev->dev, "failed to register IRQ %d\n", 513 sdev->ipc_irq); 514 goto free_smn_dev; 515 } 516 517 ret = acp_init(sdev); 518 if (ret < 0) 519 goto free_ipc_irq; 520 521 sdev->dsp_box.offset = 0; 522 sdev->dsp_box.size = BOX_SIZE_512; 523 524 sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size; 525 sdev->host_box.size = BOX_SIZE_512; 526 527 sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size; 528 sdev->debug_box.size = BOX_SIZE_1024; 529 530 acp_memory_init(sdev); 531 532 acp_dsp_stream_init(sdev); 533 534 return 0; 535 536 free_ipc_irq: 537 free_irq(sdev->ipc_irq, sdev); 538 free_smn_dev: 539 pci_dev_put(adata->smn_dev); 540 unregister_dev: 541 platform_device_unregister(adata->dmic_dev); 542 return ret; 543 } 544 EXPORT_SYMBOL_NS(amd_sof_acp_probe, SND_SOC_SOF_AMD_COMMON); 545 546 int amd_sof_acp_remove(struct snd_sof_dev *sdev) 547 { 548 struct acp_dev_data *adata = sdev->pdata->hw_pdata; 549 550 if (adata->smn_dev) 551 pci_dev_put(adata->smn_dev); 552 553 if (sdev->ipc_irq) 554 free_irq(sdev->ipc_irq, sdev); 555 556 if (adata->dmic_dev) 557 platform_device_unregister(adata->dmic_dev); 558 559 return acp_reset(sdev); 560 } 561 EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON); 562 563 MODULE_DESCRIPTION("AMD ACP sof driver"); 564 MODULE_LICENSE("Dual BSD/GPL"); 565