1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/irqdomain.h> 27 #include <linux/pm_domain.h> 28 #include <linux/platform_device.h> 29 #include <sound/designware_i2s.h> 30 #include <sound/pcm.h> 31 32 #include "amdgpu.h" 33 #include "atom.h" 34 #include "amdgpu_acp.h" 35 36 #include "acp_gfx_if.h" 37 38 #define ACP_TILE_ON_MASK 0x03 39 #define ACP_TILE_OFF_MASK 0x02 40 #define ACP_TILE_ON_RETAIN_REG_MASK 0x1f 41 #define ACP_TILE_OFF_RETAIN_REG_MASK 0x20 42 43 #define ACP_TILE_P1_MASK 0x3e 44 #define ACP_TILE_P2_MASK 0x3d 45 #define ACP_TILE_DSP0_MASK 0x3b 46 #define ACP_TILE_DSP1_MASK 0x37 47 48 #define ACP_TILE_DSP2_MASK 0x2f 49 50 #define ACP_DMA_REGS_END 0x146c0 51 #define ACP_I2S_PLAY_REGS_START 0x14840 52 #define ACP_I2S_PLAY_REGS_END 0x148b4 53 #define ACP_I2S_CAP_REGS_START 0x148b8 54 #define ACP_I2S_CAP_REGS_END 0x1496c 55 56 #define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac 57 #define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 58 #define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c 59 #define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 60 61 #define mmACP_PGFSM_RETAIN_REG 0x51c9 62 #define mmACP_PGFSM_CONFIG_REG 0x51ca 63 #define mmACP_PGFSM_READ_REG_0 0x51cc 64 65 #define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8 66 #define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9 67 #define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa 68 #define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb 69 70 #define mmACP_CONTROL 0x5131 71 #define mmACP_STATUS 0x5133 72 #define mmACP_SOFT_RESET 0x5134 73 #define ACP_CONTROL__ClkEn_MASK 0x1 74 #define ACP_SOFT_RESET__SoftResetAud_MASK 0x100 75 #define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000 76 #define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF 77 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF 78 79 #define ACP_TIMEOUT_LOOP 0x000000FF 80 #define ACP_DEVS 3 81 #define ACP_SRC_ID 162 82 83 enum { 84 ACP_TILE_P1 = 0, 85 ACP_TILE_P2, 86 ACP_TILE_DSP0, 87 ACP_TILE_DSP1, 88 ACP_TILE_DSP2, 89 }; 90 91 static int acp_sw_init(void *handle) 92 { 93 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 94 95 adev->acp.parent = adev->dev; 96 97 adev->acp.cgs_device = 98 amdgpu_cgs_create_device(adev); 99 if (!adev->acp.cgs_device) 100 return -EINVAL; 101 102 return 0; 103 } 104 105 static int acp_sw_fini(void *handle) 106 { 107 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 108 109 if (adev->acp.cgs_device) 110 amdgpu_cgs_destroy_device(adev->acp.cgs_device); 111 112 return 0; 113 } 114 115 /* power off a tile/block within ACP */ 116 static int acp_suspend_tile(void *cgs_dev, int tile) 117 { 118 u32 val = 0; 119 u32 count = 0; 120 121 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { 122 pr_err("Invalid ACP tile : %d to suspend\n", tile); 123 return -1; 124 } 125 126 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); 127 val &= ACP_TILE_ON_MASK; 128 129 if (val == 0x0) { 130 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 131 val = val | (1 << tile); 132 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 133 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, 134 0x500 + tile); 135 136 count = ACP_TIMEOUT_LOOP; 137 while (true) { 138 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 139 + tile); 140 val = val & ACP_TILE_ON_MASK; 141 if (val == ACP_TILE_OFF_MASK) 142 break; 143 if (--count == 0) { 144 pr_err("Timeout reading ACP PGFSM status\n"); 145 return -ETIMEDOUT; 146 } 147 udelay(100); 148 } 149 150 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 151 152 val |= ACP_TILE_OFF_RETAIN_REG_MASK; 153 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 154 } 155 return 0; 156 } 157 158 /* power on a tile/block within ACP */ 159 static int acp_resume_tile(void *cgs_dev, int tile) 160 { 161 u32 val = 0; 162 u32 count = 0; 163 164 if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) { 165 pr_err("Invalid ACP tile to resume\n"); 166 return -1; 167 } 168 169 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile); 170 val = val & ACP_TILE_ON_MASK; 171 172 if (val != 0x0) { 173 cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG, 174 0x600 + tile); 175 count = ACP_TIMEOUT_LOOP; 176 while (true) { 177 val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 178 + tile); 179 val = val & ACP_TILE_ON_MASK; 180 if (val == 0x0) 181 break; 182 if (--count == 0) { 183 pr_err("Timeout reading ACP PGFSM status\n"); 184 return -ETIMEDOUT; 185 } 186 udelay(100); 187 } 188 val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG); 189 if (tile == ACP_TILE_P1) 190 val = val & (ACP_TILE_P1_MASK); 191 else if (tile == ACP_TILE_P2) 192 val = val & (ACP_TILE_P2_MASK); 193 194 cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val); 195 } 196 return 0; 197 } 198 199 struct acp_pm_domain { 200 void *cgs_dev; 201 struct generic_pm_domain gpd; 202 }; 203 204 static int acp_poweroff(struct generic_pm_domain *genpd) 205 { 206 int i, ret; 207 struct acp_pm_domain *apd; 208 209 apd = container_of(genpd, struct acp_pm_domain, gpd); 210 if (apd != NULL) { 211 /* Donot return abruptly if any of power tile fails to suspend. 212 * Log it and continue powering off other tile 213 */ 214 for (i = 4; i >= 0 ; i--) { 215 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); 216 if (ret) 217 pr_err("ACP tile %d tile suspend failed\n", i); 218 } 219 } 220 return 0; 221 } 222 223 static int acp_poweron(struct generic_pm_domain *genpd) 224 { 225 int i, ret; 226 struct acp_pm_domain *apd; 227 228 apd = container_of(genpd, struct acp_pm_domain, gpd); 229 if (apd != NULL) { 230 for (i = 0; i < 2; i++) { 231 ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i); 232 if (ret) { 233 pr_err("ACP tile %d resume failed\n", i); 234 break; 235 } 236 } 237 238 /* Disable DSPs which are not going to be used */ 239 for (i = 0; i < 3; i++) { 240 ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i); 241 /* Continue suspending other DSP, even if one fails */ 242 if (ret) 243 pr_err("ACP DSP %d suspend failed\n", i); 244 } 245 } 246 return 0; 247 } 248 249 static struct device *get_mfd_cell_dev(const char *device_name, int r) 250 { 251 char auto_dev_name[25]; 252 struct device *dev; 253 254 snprintf(auto_dev_name, sizeof(auto_dev_name), 255 "%s.%d.auto", device_name, r); 256 dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name); 257 dev_info(dev, "device %s added to pm domain\n", auto_dev_name); 258 259 return dev; 260 } 261 262 /** 263 * acp_hw_init - start and test ACP block 264 * 265 * @adev: amdgpu_device pointer 266 * 267 */ 268 static int acp_hw_init(void *handle) 269 { 270 int r, i; 271 uint64_t acp_base; 272 u32 val = 0; 273 u32 count = 0; 274 struct device *dev; 275 struct i2s_platform_data *i2s_pdata; 276 277 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 278 279 const struct amdgpu_ip_block *ip_block = 280 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); 281 282 if (!ip_block) 283 return -EINVAL; 284 285 r = amd_acp_hw_init(adev->acp.cgs_device, 286 ip_block->version->major, ip_block->version->minor); 287 /* -ENODEV means board uses AZ rather than ACP */ 288 if (r == -ENODEV) 289 return 0; 290 else if (r) 291 return r; 292 293 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) 294 return -EINVAL; 295 296 acp_base = adev->rmmio_base; 297 298 if (adev->asic_type != CHIP_STONEY) { 299 adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); 300 if (adev->acp.acp_genpd == NULL) 301 return -ENOMEM; 302 303 adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; 304 adev->acp.acp_genpd->gpd.power_off = acp_poweroff; 305 adev->acp.acp_genpd->gpd.power_on = acp_poweron; 306 307 308 adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; 309 310 pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); 311 } 312 313 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), 314 GFP_KERNEL); 315 316 if (adev->acp.acp_cell == NULL) 317 return -ENOMEM; 318 319 adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL); 320 321 if (adev->acp.acp_res == NULL) { 322 kfree(adev->acp.acp_cell); 323 return -ENOMEM; 324 } 325 326 i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL); 327 if (i2s_pdata == NULL) { 328 kfree(adev->acp.acp_res); 329 kfree(adev->acp.acp_cell); 330 return -ENOMEM; 331 } 332 333 switch (adev->asic_type) { 334 case CHIP_STONEY: 335 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | 336 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; 337 break; 338 default: 339 i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; 340 } 341 i2s_pdata[0].cap = DWC_I2S_PLAY; 342 i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; 343 i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; 344 i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; 345 switch (adev->asic_type) { 346 case CHIP_STONEY: 347 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | 348 DW_I2S_QUIRK_COMP_PARAM1 | 349 DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; 350 break; 351 default: 352 i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | 353 DW_I2S_QUIRK_COMP_PARAM1; 354 } 355 356 i2s_pdata[1].cap = DWC_I2S_RECORD; 357 i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; 358 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; 359 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; 360 361 adev->acp.acp_res[0].name = "acp2x_dma"; 362 adev->acp.acp_res[0].flags = IORESOURCE_MEM; 363 adev->acp.acp_res[0].start = acp_base; 364 adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END; 365 366 adev->acp.acp_res[1].name = "acp2x_dw_i2s_play"; 367 adev->acp.acp_res[1].flags = IORESOURCE_MEM; 368 adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START; 369 adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END; 370 371 adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap"; 372 adev->acp.acp_res[2].flags = IORESOURCE_MEM; 373 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; 374 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; 375 376 adev->acp.acp_res[3].name = "acp2x_dma_irq"; 377 adev->acp.acp_res[3].flags = IORESOURCE_IRQ; 378 adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); 379 adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; 380 381 adev->acp.acp_cell[0].name = "acp_audio_dma"; 382 adev->acp.acp_cell[0].num_resources = 4; 383 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; 384 adev->acp.acp_cell[0].platform_data = &adev->asic_type; 385 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); 386 387 adev->acp.acp_cell[1].name = "designware-i2s"; 388 adev->acp.acp_cell[1].num_resources = 1; 389 adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1]; 390 adev->acp.acp_cell[1].platform_data = &i2s_pdata[0]; 391 adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data); 392 393 adev->acp.acp_cell[2].name = "designware-i2s"; 394 adev->acp.acp_cell[2].num_resources = 1; 395 adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2]; 396 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; 397 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); 398 399 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 400 ACP_DEVS); 401 if (r) 402 return r; 403 404 if (adev->asic_type != CHIP_STONEY) { 405 for (i = 0; i < ACP_DEVS ; i++) { 406 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 407 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); 408 if (r) { 409 dev_err(dev, "Failed to add dev to genpd\n"); 410 return r; 411 } 412 } 413 } 414 415 /* Assert Soft reset of ACP */ 416 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 417 418 val |= ACP_SOFT_RESET__SoftResetAud_MASK; 419 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 420 421 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; 422 while (true) { 423 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 424 if (ACP_SOFT_RESET__SoftResetAudDone_MASK == 425 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) 426 break; 427 if (--count == 0) { 428 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 429 return -ETIMEDOUT; 430 } 431 udelay(100); 432 } 433 /* Enable clock to ACP and wait until the clock is enabled */ 434 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); 435 val = val | ACP_CONTROL__ClkEn_MASK; 436 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); 437 438 count = ACP_CLOCK_EN_TIME_OUT_VALUE; 439 440 while (true) { 441 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); 442 if (val & (u32) 0x1) 443 break; 444 if (--count == 0) { 445 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 446 return -ETIMEDOUT; 447 } 448 udelay(100); 449 } 450 /* Deassert the SOFT RESET flags */ 451 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 452 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; 453 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 454 455 return 0; 456 } 457 458 /** 459 * acp_hw_fini - stop the hardware block 460 * 461 * @adev: amdgpu_device pointer 462 * 463 */ 464 static int acp_hw_fini(void *handle) 465 { 466 int i, ret; 467 u32 val = 0; 468 u32 count = 0; 469 struct device *dev; 470 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 471 472 /* return early if no ACP */ 473 if (!adev->acp.acp_cell) 474 return 0; 475 476 /* Assert Soft reset of ACP */ 477 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 478 479 val |= ACP_SOFT_RESET__SoftResetAud_MASK; 480 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 481 482 count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE; 483 while (true) { 484 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 485 if (ACP_SOFT_RESET__SoftResetAudDone_MASK == 486 (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) 487 break; 488 if (--count == 0) { 489 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 490 return -ETIMEDOUT; 491 } 492 udelay(100); 493 } 494 /* Disable ACP clock */ 495 val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL); 496 val &= ~ACP_CONTROL__ClkEn_MASK; 497 cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val); 498 499 count = ACP_CLOCK_EN_TIME_OUT_VALUE; 500 501 while (true) { 502 val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS); 503 if (val & (u32) 0x1) 504 break; 505 if (--count == 0) { 506 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 507 return -ETIMEDOUT; 508 } 509 udelay(100); 510 } 511 512 if (adev->acp.acp_genpd) { 513 for (i = 0; i < ACP_DEVS ; i++) { 514 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 515 ret = pm_genpd_remove_device(dev); 516 /* If removal fails, dont giveup and try rest */ 517 if (ret) 518 dev_err(dev, "remove dev from genpd failed\n"); 519 } 520 kfree(adev->acp.acp_genpd); 521 } 522 523 mfd_remove_devices(adev->acp.parent); 524 kfree(adev->acp.acp_res); 525 kfree(adev->acp.acp_cell); 526 527 return 0; 528 } 529 530 static int acp_suspend(void *handle) 531 { 532 return 0; 533 } 534 535 static int acp_resume(void *handle) 536 { 537 return 0; 538 } 539 540 static int acp_early_init(void *handle) 541 { 542 return 0; 543 } 544 545 static bool acp_is_idle(void *handle) 546 { 547 return true; 548 } 549 550 static int acp_wait_for_idle(void *handle) 551 { 552 return 0; 553 } 554 555 static int acp_soft_reset(void *handle) 556 { 557 return 0; 558 } 559 560 static int acp_set_clockgating_state(void *handle, 561 enum amd_clockgating_state state) 562 { 563 return 0; 564 } 565 566 static int acp_set_powergating_state(void *handle, 567 enum amd_powergating_state state) 568 { 569 return 0; 570 } 571 572 static const struct amd_ip_funcs acp_ip_funcs = { 573 .name = "acp_ip", 574 .early_init = acp_early_init, 575 .late_init = NULL, 576 .sw_init = acp_sw_init, 577 .sw_fini = acp_sw_fini, 578 .hw_init = acp_hw_init, 579 .hw_fini = acp_hw_fini, 580 .suspend = acp_suspend, 581 .resume = acp_resume, 582 .is_idle = acp_is_idle, 583 .wait_for_idle = acp_wait_for_idle, 584 .soft_reset = acp_soft_reset, 585 .set_clockgating_state = acp_set_clockgating_state, 586 .set_powergating_state = acp_set_powergating_state, 587 }; 588 589 const struct amdgpu_ip_block_version acp_ip_block = 590 { 591 .type = AMD_IP_BLOCK_TYPE_ACP, 592 .major = 2, 593 .minor = 2, 594 .rev = 0, 595 .funcs = &acp_ip_funcs, 596 }; 597