1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/interconnect.h> 6 #include <linux/pm_domain.h> 7 #include <linux/pm_opp.h> 8 #include <soc/qcom/cmd-db.h> 9 #include <drm/drm_gem.h> 10 11 #include "a6xx_gpu.h" 12 #include "a6xx_gmu.xml.h" 13 #include "msm_gem.h" 14 #include "msm_gpu_trace.h" 15 #include "msm_mmu.h" 16 17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) 18 { 19 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 20 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 21 struct msm_gpu *gpu = &adreno_gpu->base; 22 23 /* FIXME: add a banner here */ 24 gmu->hung = true; 25 26 /* Turn off the hangcheck timer while we are resetting */ 27 del_timer(&gpu->hangcheck_timer); 28 29 /* Queue the GPU handler because we need to treat this as a recovery */ 30 kthread_queue_work(gpu->worker, &gpu->recover_work); 31 } 32 33 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 34 { 35 struct a6xx_gmu *gmu = data; 36 u32 status; 37 38 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 39 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 40 41 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 42 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 43 44 a6xx_gmu_fault(gmu); 45 } 46 47 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 48 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 49 50 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 51 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 52 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 53 54 return IRQ_HANDLED; 55 } 56 57 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 58 { 59 struct a6xx_gmu *gmu = data; 60 u32 status; 61 62 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 63 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 64 65 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 66 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 67 68 a6xx_gmu_fault(gmu); 69 } 70 71 return IRQ_HANDLED; 72 } 73 74 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) 75 { 76 u32 val; 77 78 /* This can be called from gpu state code so make sure GMU is valid */ 79 if (!gmu->initialized) 80 return false; 81 82 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 83 84 return !(val & 85 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | 86 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); 87 } 88 89 /* Check to see if the GX rail is still powered */ 90 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 91 { 92 u32 val; 93 94 /* This can be called from gpu state code so make sure GMU is valid */ 95 if (!gmu->initialized) 96 return false; 97 98 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 99 100 return !(val & 101 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 102 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 103 } 104 105 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) 106 { 107 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 108 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 109 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 110 u32 perf_index; 111 unsigned long gpu_freq; 112 int ret = 0; 113 114 gpu_freq = dev_pm_opp_get_freq(opp); 115 116 if (gpu_freq == gmu->freq) 117 return; 118 119 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 120 if (gpu_freq == gmu->gpu_freqs[perf_index]) 121 break; 122 123 gmu->current_perf_index = perf_index; 124 gmu->freq = gmu->gpu_freqs[perf_index]; 125 126 trace_msm_gmu_freq_change(gmu->freq, perf_index); 127 128 /* 129 * This can get called from devfreq while the hardware is idle. Don't 130 * bring up the power if it isn't already active 131 */ 132 if (pm_runtime_get_if_in_use(gmu->dev) == 0) 133 return; 134 135 if (!gmu->legacy) { 136 a6xx_hfi_set_freq(gmu, perf_index); 137 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); 138 pm_runtime_put(gmu->dev); 139 return; 140 } 141 142 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 143 144 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 145 ((3 & 0xf) << 28) | perf_index); 146 147 /* 148 * Send an invalid index as a vote for the bus bandwidth and let the 149 * firmware decide on the right vote 150 */ 151 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 152 153 /* Set and clear the OOB for DCVS to trigger the GMU */ 154 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 155 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 156 157 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 158 if (ret) 159 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 160 161 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); 162 pm_runtime_put(gmu->dev); 163 } 164 165 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 166 { 167 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 168 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 169 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 170 171 return gmu->freq; 172 } 173 174 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 175 { 176 u32 val; 177 int local = gmu->idle_level; 178 179 /* SPTP and IFPC both report as IFPC */ 180 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 181 local = GMU_IDLE_STATE_IFPC; 182 183 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 184 185 if (val == local) { 186 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 187 !a6xx_gmu_gx_is_on(gmu)) 188 return true; 189 } 190 191 return false; 192 } 193 194 /* Wait for the GMU to get to its most idle state */ 195 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) 196 { 197 return spin_until(a6xx_gmu_check_idle_level(gmu)); 198 } 199 200 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 201 { 202 int ret; 203 u32 val; 204 u32 mask, reset_val; 205 206 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); 207 if (val <= 0x20010004) { 208 mask = 0xffffffff; 209 reset_val = 0xbabeface; 210 } else { 211 mask = 0x1ff; 212 reset_val = 0x100; 213 } 214 215 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 216 217 /* Set the log wptr index 218 * note: downstream saves the value in poweroff and restores it here 219 */ 220 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); 221 222 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 223 224 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 225 (val & mask) == reset_val, 100, 10000); 226 227 if (ret) 228 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 229 230 return ret; 231 } 232 233 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 234 { 235 u32 val; 236 int ret; 237 238 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 239 240 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 241 val & 1, 100, 10000); 242 if (ret) 243 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 244 245 return ret; 246 } 247 248 struct a6xx_gmu_oob_bits { 249 int set, ack, set_new, ack_new, clear, clear_new; 250 const char *name; 251 }; 252 253 /* These are the interrupt / ack bits for each OOB request that are set 254 * in a6xx_gmu_set_oob and a6xx_clear_oob 255 */ 256 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { 257 [GMU_OOB_GPU_SET] = { 258 .name = "GPU_SET", 259 .set = 16, 260 .ack = 24, 261 .set_new = 30, 262 .ack_new = 31, 263 .clear = 24, 264 .clear_new = 31, 265 }, 266 267 [GMU_OOB_PERFCOUNTER_SET] = { 268 .name = "PERFCOUNTER", 269 .set = 17, 270 .ack = 25, 271 .set_new = 28, 272 .ack_new = 30, 273 .clear = 25, 274 .clear_new = 29, 275 }, 276 277 [GMU_OOB_BOOT_SLUMBER] = { 278 .name = "BOOT_SLUMBER", 279 .set = 22, 280 .ack = 30, 281 .clear = 30, 282 }, 283 284 [GMU_OOB_DCVS_SET] = { 285 .name = "GPU_DCVS", 286 .set = 23, 287 .ack = 31, 288 .clear = 31, 289 }, 290 }; 291 292 /* Trigger a OOB (out of band) request to the GMU */ 293 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 294 { 295 int ret; 296 u32 val; 297 int request, ack; 298 299 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) 300 return -EINVAL; 301 302 if (gmu->legacy) { 303 request = a6xx_gmu_oob_bits[state].set; 304 ack = a6xx_gmu_oob_bits[state].ack; 305 } else { 306 request = a6xx_gmu_oob_bits[state].set_new; 307 ack = a6xx_gmu_oob_bits[state].ack_new; 308 if (!request || !ack) { 309 DRM_DEV_ERROR(gmu->dev, 310 "Invalid non-legacy GMU request %s\n", 311 a6xx_gmu_oob_bits[state].name); 312 return -EINVAL; 313 } 314 } 315 316 /* Trigger the equested OOB operation */ 317 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 318 319 /* Wait for the acknowledge interrupt */ 320 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 321 val & (1 << ack), 100, 10000); 322 323 if (ret) 324 DRM_DEV_ERROR(gmu->dev, 325 "Timeout waiting for GMU OOB set %s: 0x%x\n", 326 a6xx_gmu_oob_bits[state].name, 327 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 328 329 /* Clear the acknowledge interrupt */ 330 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 331 332 return ret; 333 } 334 335 /* Clear a pending OOB state in the GMU */ 336 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 337 { 338 int bit; 339 340 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) 341 return; 342 343 if (gmu->legacy) 344 bit = a6xx_gmu_oob_bits[state].clear; 345 else 346 bit = a6xx_gmu_oob_bits[state].clear_new; 347 348 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); 349 } 350 351 /* Enable CPU control of SPTP power power collapse */ 352 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 353 { 354 int ret; 355 u32 val; 356 357 if (!gmu->legacy) 358 return 0; 359 360 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 361 362 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 363 (val & 0x38) == 0x28, 1, 100); 364 365 if (ret) { 366 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 367 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 368 } 369 370 return 0; 371 } 372 373 /* Disable CPU control of SPTP power power collapse */ 374 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 375 { 376 u32 val; 377 int ret; 378 379 if (!gmu->legacy) 380 return; 381 382 /* Make sure retention is on */ 383 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 384 385 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 386 387 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 388 (val & 0x04), 100, 10000); 389 390 if (ret) 391 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 392 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 393 } 394 395 /* Let the GMU know we are starting a boot sequence */ 396 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 397 { 398 u32 vote; 399 400 /* Let the GMU know we are getting ready for boot */ 401 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 402 403 /* Choose the "default" power level as the highest available */ 404 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 405 406 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 407 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 408 409 /* Let the GMU know the boot sequence has started */ 410 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 411 } 412 413 /* Let the GMU know that we are about to go into slumber */ 414 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 415 { 416 int ret; 417 418 /* Disable the power counter so the GMU isn't busy */ 419 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 420 421 /* Disable SPTP_PC if the CPU is responsible for it */ 422 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 423 a6xx_sptprac_disable(gmu); 424 425 if (!gmu->legacy) { 426 ret = a6xx_hfi_send_prep_slumber(gmu); 427 goto out; 428 } 429 430 /* Tell the GMU to get ready to slumber */ 431 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 432 433 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 434 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 435 436 if (!ret) { 437 /* Check to see if the GMU really did slumber */ 438 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 439 != 0x0f) { 440 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 441 ret = -ETIMEDOUT; 442 } 443 } 444 445 out: 446 /* Put fence into allow mode */ 447 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 448 return ret; 449 } 450 451 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 452 { 453 int ret; 454 u32 val; 455 456 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 457 /* Wait for the register to finish posting */ 458 wmb(); 459 460 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 461 val & (1 << 1), 100, 10000); 462 if (ret) { 463 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 464 return ret; 465 } 466 467 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 468 !val, 100, 10000); 469 470 if (ret) { 471 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 472 return ret; 473 } 474 475 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 476 477 /* Set up CX GMU counter 0 to count busy ticks */ 478 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); 479 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); 480 481 /* Enable the power counter */ 482 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 483 return 0; 484 } 485 486 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 487 { 488 int ret; 489 u32 val; 490 491 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 492 493 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 494 val, val & (1 << 16), 100, 10000); 495 if (ret) 496 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 497 498 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 499 } 500 501 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 502 { 503 return msm_writel(value, ptr + (offset << 2)); 504 } 505 506 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 507 const char *name); 508 509 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 510 { 511 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 512 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 513 struct platform_device *pdev = to_platform_device(gmu->dev); 514 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 515 void __iomem *seqptr; 516 uint32_t pdc_address_offset; 517 bool pdc_in_aop = false; 518 519 if (!pdcptr) 520 goto err; 521 522 if (adreno_is_a650(adreno_gpu)) 523 pdc_in_aop = true; 524 else if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) 525 pdc_address_offset = 0x30090; 526 else 527 pdc_address_offset = 0x30080; 528 529 if (!pdc_in_aop) { 530 seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 531 if (!seqptr) 532 goto err; 533 } 534 535 /* Disable SDE clock gating */ 536 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 537 538 /* Setup RSC PDC handshake for sleep and wakeup */ 539 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 540 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 541 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 542 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 543 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 544 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 545 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 546 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 547 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 548 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 549 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 550 551 /* Load RSC sequencer uCode for sleep and wakeup */ 552 if (adreno_is_a650(adreno_gpu)) { 553 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); 554 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); 555 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); 556 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); 557 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); 558 } else { 559 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 560 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 561 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 562 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 563 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 564 } 565 566 if (pdc_in_aop) 567 goto setup_pdc; 568 569 /* Load PDC sequencer uCode for power up and power down sequence */ 570 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 571 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 572 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 573 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 574 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 575 576 /* Set TCS commands used by PDC sequence for low power modes */ 577 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 578 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 579 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 580 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 581 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 582 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 583 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 584 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 585 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 586 587 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 588 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset); 589 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 590 591 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 592 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 593 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 594 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 595 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 596 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 597 598 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 599 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 600 if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu)) 601 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); 602 else 603 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 604 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 605 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset); 606 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 607 608 /* Setup GPU PDC */ 609 setup_pdc: 610 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 611 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 612 613 /* ensure no writes happen before the uCode is fully written */ 614 wmb(); 615 616 err: 617 if (!IS_ERR_OR_NULL(pdcptr)) 618 iounmap(pdcptr); 619 if (!IS_ERR_OR_NULL(seqptr)) 620 iounmap(seqptr); 621 } 622 623 /* 624 * The lowest 16 bits of this value are the number of XO clock cycles for main 625 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 626 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 627 */ 628 629 #define GMU_PWR_COL_HYST 0x000a1680 630 631 /* Set up the idle state for the GMU */ 632 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 633 { 634 /* Disable GMU WB/RB buffer */ 635 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 636 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); 637 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); 638 639 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 640 641 switch (gmu->idle_level) { 642 case GMU_IDLE_STATE_IFPC: 643 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 644 GMU_PWR_COL_HYST); 645 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 646 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 647 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 648 fallthrough; 649 case GMU_IDLE_STATE_SPTP: 650 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 651 GMU_PWR_COL_HYST); 652 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 653 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 654 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 655 } 656 657 /* Enable RPMh GPU client */ 658 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 659 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 660 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 661 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 662 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 663 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 664 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 665 } 666 667 struct block_header { 668 u32 addr; 669 u32 size; 670 u32 type; 671 u32 value; 672 u32 data[]; 673 }; 674 675 /* this should be a general kernel helper */ 676 static int in_range(u32 addr, u32 start, u32 size) 677 { 678 return addr >= start && addr < start + size; 679 } 680 681 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) 682 { 683 if (!in_range(blk->addr, bo->iova, bo->size)) 684 return false; 685 686 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); 687 return true; 688 } 689 690 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) 691 { 692 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 693 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 694 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; 695 const struct block_header *blk; 696 u32 reg_offset; 697 698 u32 itcm_base = 0x00000000; 699 u32 dtcm_base = 0x00040000; 700 701 if (adreno_is_a650(adreno_gpu)) 702 dtcm_base = 0x10004000; 703 704 if (gmu->legacy) { 705 /* Sanity check the size of the firmware that was loaded */ 706 if (fw_image->size > 0x8000) { 707 DRM_DEV_ERROR(gmu->dev, 708 "GMU firmware is bigger than the available region\n"); 709 return -EINVAL; 710 } 711 712 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, 713 (u32*) fw_image->data, fw_image->size); 714 return 0; 715 } 716 717 718 for (blk = (const struct block_header *) fw_image->data; 719 (const u8*) blk < fw_image->data + fw_image->size; 720 blk = (const struct block_header *) &blk->data[blk->size >> 2]) { 721 if (blk->size == 0) 722 continue; 723 724 if (in_range(blk->addr, itcm_base, SZ_16K)) { 725 reg_offset = (blk->addr - itcm_base) >> 2; 726 gmu_write_bulk(gmu, 727 REG_A6XX_GMU_CM3_ITCM_START + reg_offset, 728 blk->data, blk->size); 729 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { 730 reg_offset = (blk->addr - dtcm_base) >> 2; 731 gmu_write_bulk(gmu, 732 REG_A6XX_GMU_CM3_DTCM_START + reg_offset, 733 blk->data, blk->size); 734 } else if (!fw_block_mem(&gmu->icache, blk) && 735 !fw_block_mem(&gmu->dcache, blk) && 736 !fw_block_mem(&gmu->dummy, blk)) { 737 DRM_DEV_ERROR(gmu->dev, 738 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n", 739 blk->addr, blk->size, blk->data[0]); 740 } 741 } 742 743 return 0; 744 } 745 746 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 747 { 748 static bool rpmh_init; 749 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 750 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 751 int ret; 752 u32 chipid; 753 754 if (adreno_is_a650(adreno_gpu)) 755 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); 756 757 if (state == GMU_WARM_BOOT) { 758 ret = a6xx_rpmh_start(gmu); 759 if (ret) 760 return ret; 761 } else { 762 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 763 "GMU firmware is not loaded\n")) 764 return -ENOENT; 765 766 /* Turn on register retention */ 767 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 768 769 /* We only need to load the RPMh microcode once */ 770 if (!rpmh_init) { 771 a6xx_gmu_rpmh_init(gmu); 772 rpmh_init = true; 773 } else { 774 ret = a6xx_rpmh_start(gmu); 775 if (ret) 776 return ret; 777 } 778 779 ret = a6xx_gmu_fw_load(gmu); 780 if (ret) 781 return ret; 782 } 783 784 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 785 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 786 787 /* Write the iova of the HFI table */ 788 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); 789 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 790 791 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 792 (1 << 31) | (0xa << 18) | (0xa0)); 793 794 chipid = adreno_gpu->rev.core << 24; 795 chipid |= adreno_gpu->rev.major << 16; 796 chipid |= adreno_gpu->rev.minor << 12; 797 chipid |= adreno_gpu->rev.patchid << 8; 798 799 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 800 801 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, 802 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); 803 804 /* Set up the lowest idle level on the GMU */ 805 a6xx_gmu_power_config(gmu); 806 807 ret = a6xx_gmu_start(gmu); 808 if (ret) 809 return ret; 810 811 if (gmu->legacy) { 812 ret = a6xx_gmu_gfx_rail_on(gmu); 813 if (ret) 814 return ret; 815 } 816 817 /* Enable SPTP_PC if the CPU is responsible for it */ 818 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 819 ret = a6xx_sptprac_enable(gmu); 820 if (ret) 821 return ret; 822 } 823 824 ret = a6xx_gmu_hfi_start(gmu); 825 if (ret) 826 return ret; 827 828 /* FIXME: Do we need this wmb() here? */ 829 wmb(); 830 831 return 0; 832 } 833 834 #define A6XX_HFI_IRQ_MASK \ 835 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 836 837 #define A6XX_GMU_IRQ_MASK \ 838 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 839 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 840 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 841 842 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 843 { 844 disable_irq(gmu->gmu_irq); 845 disable_irq(gmu->hfi_irq); 846 847 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 848 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 849 } 850 851 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) 852 { 853 u32 val; 854 855 /* Make sure there are no outstanding RPMh votes */ 856 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 857 (val & 1), 100, 10000); 858 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 859 (val & 1), 100, 10000); 860 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 861 (val & 1), 100, 10000); 862 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 863 (val & 1), 100, 1000); 864 } 865 866 /* Force the GMU off in case it isn't responsive */ 867 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) 868 { 869 /* Flush all the queues */ 870 a6xx_hfi_stop(gmu); 871 872 /* Stop the interrupts */ 873 a6xx_gmu_irq_disable(gmu); 874 875 /* Force off SPTP in case the GMU is managing it */ 876 a6xx_sptprac_disable(gmu); 877 878 /* Make sure there are no outstanding RPMh votes */ 879 a6xx_gmu_rpmh_off(gmu); 880 } 881 882 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) 883 { 884 struct dev_pm_opp *gpu_opp; 885 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; 886 887 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); 888 if (IS_ERR_OR_NULL(gpu_opp)) 889 return; 890 891 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ 892 a6xx_gmu_set_freq(gpu, gpu_opp); 893 dev_pm_opp_put(gpu_opp); 894 } 895 896 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) 897 { 898 struct dev_pm_opp *gpu_opp; 899 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; 900 901 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); 902 if (IS_ERR_OR_NULL(gpu_opp)) 903 return; 904 905 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); 906 dev_pm_opp_put(gpu_opp); 907 } 908 909 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 910 { 911 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 912 struct msm_gpu *gpu = &adreno_gpu->base; 913 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 914 int status, ret; 915 916 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) 917 return 0; 918 919 gmu->hung = false; 920 921 /* Turn on the resources */ 922 pm_runtime_get_sync(gmu->dev); 923 924 /* 925 * "enable" the GX power domain which won't actually do anything but it 926 * will make sure that the refcounting is correct in case we need to 927 * bring down the GX after a GMU failure 928 */ 929 if (!IS_ERR_OR_NULL(gmu->gxpd)) 930 pm_runtime_get_sync(gmu->gxpd); 931 932 /* Use a known rate to bring up the GMU */ 933 clk_set_rate(gmu->core_clk, 200000000); 934 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 935 if (ret) { 936 pm_runtime_put(gmu->gxpd); 937 pm_runtime_put(gmu->dev); 938 return ret; 939 } 940 941 /* Set the bus quota to a reasonable value for boot */ 942 a6xx_gmu_set_initial_bw(gpu, gmu); 943 944 /* Enable the GMU interrupt */ 945 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 946 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); 947 enable_irq(gmu->gmu_irq); 948 949 /* Check to see if we are doing a cold or warm boot */ 950 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 951 GMU_WARM_BOOT : GMU_COLD_BOOT; 952 953 /* 954 * Warm boot path does not work on newer GPUs 955 * Presumably this is because icache/dcache regions must be restored 956 */ 957 if (!gmu->legacy) 958 status = GMU_COLD_BOOT; 959 960 ret = a6xx_gmu_fw_start(gmu, status); 961 if (ret) 962 goto out; 963 964 ret = a6xx_hfi_start(gmu, status); 965 if (ret) 966 goto out; 967 968 /* 969 * Turn on the GMU firmware fault interrupt after we know the boot 970 * sequence is successful 971 */ 972 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 973 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); 974 enable_irq(gmu->hfi_irq); 975 976 /* Set the GPU to the current freq */ 977 a6xx_gmu_set_initial_freq(gpu, gmu); 978 979 out: 980 /* On failure, shut down the GMU to leave it in a good state */ 981 if (ret) { 982 disable_irq(gmu->gmu_irq); 983 a6xx_rpmh_stop(gmu); 984 pm_runtime_put(gmu->gxpd); 985 pm_runtime_put(gmu->dev); 986 } 987 988 return ret; 989 } 990 991 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 992 { 993 u32 reg; 994 995 if (!gmu->initialized) 996 return true; 997 998 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 999 1000 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 1001 return false; 1002 1003 return true; 1004 } 1005 1006 #define GBIF_CLIENT_HALT_MASK BIT(0) 1007 #define GBIF_ARB_HALT_MASK BIT(1) 1008 1009 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 1010 { 1011 struct msm_gpu *gpu = &adreno_gpu->base; 1012 1013 if (!a6xx_has_gbif(adreno_gpu)) { 1014 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 1015 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 1016 0xf) == 0xf); 1017 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 1018 1019 return; 1020 } 1021 1022 /* Halt new client requests on GBIF */ 1023 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 1024 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 1025 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 1026 1027 /* Halt all AXI requests on GBIF */ 1028 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 1029 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 1030 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 1031 1032 /* The GBIF halt needs to be explicitly cleared */ 1033 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 1034 } 1035 1036 /* Gracefully try to shut down the GMU and by extension the GPU */ 1037 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) 1038 { 1039 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1040 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1041 u32 val; 1042 1043 /* 1044 * The GMU may still be in slumber unless the GPU started so check and 1045 * skip putting it back into slumber if so 1046 */ 1047 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 1048 1049 if (val != 0xf) { 1050 int ret = a6xx_gmu_wait_for_idle(gmu); 1051 1052 /* If the GMU isn't responding assume it is hung */ 1053 if (ret) { 1054 a6xx_gmu_force_off(gmu); 1055 return; 1056 } 1057 1058 a6xx_bus_clear_pending_transactions(adreno_gpu); 1059 1060 /* tell the GMU we want to slumber */ 1061 a6xx_gmu_notify_slumber(gmu); 1062 1063 ret = gmu_poll_timeout(gmu, 1064 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 1065 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 1066 100, 10000); 1067 1068 /* 1069 * Let the user know we failed to slumber but don't worry too 1070 * much because we are powering down anyway 1071 */ 1072 1073 if (ret) 1074 DRM_DEV_ERROR(gmu->dev, 1075 "Unable to slumber GMU: status = 0%x/0%x\n", 1076 gmu_read(gmu, 1077 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 1078 gmu_read(gmu, 1079 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 1080 } 1081 1082 /* Turn off HFI */ 1083 a6xx_hfi_stop(gmu); 1084 1085 /* Stop the interrupts and mask the hardware */ 1086 a6xx_gmu_irq_disable(gmu); 1087 1088 /* Tell RPMh to power off the GPU */ 1089 a6xx_rpmh_stop(gmu); 1090 } 1091 1092 1093 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 1094 { 1095 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1096 struct msm_gpu *gpu = &a6xx_gpu->base.base; 1097 1098 if (!pm_runtime_active(gmu->dev)) 1099 return 0; 1100 1101 /* 1102 * Force the GMU off if we detected a hang, otherwise try to shut it 1103 * down gracefully 1104 */ 1105 if (gmu->hung) 1106 a6xx_gmu_force_off(gmu); 1107 else 1108 a6xx_gmu_shutdown(gmu); 1109 1110 /* Remove the bus vote */ 1111 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); 1112 1113 /* 1114 * Make sure the GX domain is off before turning off the GMU (CX) 1115 * domain. Usually the GMU does this but only if the shutdown sequence 1116 * was successful 1117 */ 1118 if (!IS_ERR_OR_NULL(gmu->gxpd)) 1119 pm_runtime_put_sync(gmu->gxpd); 1120 1121 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 1122 1123 pm_runtime_put_sync(gmu->dev); 1124 1125 return 0; 1126 } 1127 1128 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) 1129 { 1130 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); 1131 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); 1132 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false); 1133 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false); 1134 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false); 1135 msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false); 1136 1137 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); 1138 msm_gem_address_space_put(gmu->aspace); 1139 } 1140 1141 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, 1142 size_t size, u64 iova) 1143 { 1144 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1145 struct drm_device *dev = a6xx_gpu->base.base.dev; 1146 uint32_t flags = MSM_BO_WC; 1147 u64 range_start, range_end; 1148 int ret; 1149 1150 size = PAGE_ALIGN(size); 1151 if (!iova) { 1152 /* no fixed address - use GMU's uncached range */ 1153 range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ 1154 range_end = 0x80000000; 1155 } else { 1156 /* range for fixed address */ 1157 range_start = iova; 1158 range_end = iova + size; 1159 /* use IOMMU_PRIV for icache/dcache */ 1160 flags |= MSM_BO_MAP_PRIV; 1161 } 1162 1163 bo->obj = msm_gem_new(dev, size, flags); 1164 if (IS_ERR(bo->obj)) 1165 return PTR_ERR(bo->obj); 1166 1167 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, 1168 range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT); 1169 if (ret) { 1170 drm_gem_object_put(bo->obj); 1171 return ret; 1172 } 1173 1174 bo->virt = msm_gem_get_vaddr(bo->obj); 1175 bo->size = size; 1176 1177 return 0; 1178 } 1179 1180 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 1181 { 1182 struct iommu_domain *domain; 1183 struct msm_mmu *mmu; 1184 1185 domain = iommu_domain_alloc(&platform_bus_type); 1186 if (!domain) 1187 return -ENODEV; 1188 1189 mmu = msm_iommu_new(gmu->dev, domain); 1190 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); 1191 if (IS_ERR(gmu->aspace)) { 1192 iommu_domain_free(domain); 1193 return PTR_ERR(gmu->aspace); 1194 } 1195 1196 return 0; 1197 } 1198 1199 /* Return the 'arc-level' for the given frequency */ 1200 static unsigned int a6xx_gmu_get_arc_level(struct device *dev, 1201 unsigned long freq) 1202 { 1203 struct dev_pm_opp *opp; 1204 unsigned int val; 1205 1206 if (!freq) 1207 return 0; 1208 1209 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 1210 if (IS_ERR(opp)) 1211 return 0; 1212 1213 val = dev_pm_opp_get_level(opp); 1214 1215 dev_pm_opp_put(opp); 1216 1217 return val; 1218 } 1219 1220 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 1221 unsigned long *freqs, int freqs_count, const char *id) 1222 { 1223 int i, j; 1224 const u16 *pri, *sec; 1225 size_t pri_count, sec_count; 1226 1227 pri = cmd_db_read_aux_data(id, &pri_count); 1228 if (IS_ERR(pri)) 1229 return PTR_ERR(pri); 1230 /* 1231 * The data comes back as an array of unsigned shorts so adjust the 1232 * count accordingly 1233 */ 1234 pri_count >>= 1; 1235 if (!pri_count) 1236 return -EINVAL; 1237 1238 sec = cmd_db_read_aux_data("mx.lvl", &sec_count); 1239 if (IS_ERR(sec)) 1240 return PTR_ERR(sec); 1241 1242 sec_count >>= 1; 1243 if (!sec_count) 1244 return -EINVAL; 1245 1246 /* Construct a vote for each frequency */ 1247 for (i = 0; i < freqs_count; i++) { 1248 u8 pindex = 0, sindex = 0; 1249 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); 1250 1251 /* Get the primary index that matches the arc level */ 1252 for (j = 0; j < pri_count; j++) { 1253 if (pri[j] >= level) { 1254 pindex = j; 1255 break; 1256 } 1257 } 1258 1259 if (j == pri_count) { 1260 DRM_DEV_ERROR(dev, 1261 "Level %u not found in the RPMh list\n", 1262 level); 1263 DRM_DEV_ERROR(dev, "Available levels:\n"); 1264 for (j = 0; j < pri_count; j++) 1265 DRM_DEV_ERROR(dev, " %u\n", pri[j]); 1266 1267 return -EINVAL; 1268 } 1269 1270 /* 1271 * Look for a level in in the secondary list that matches. If 1272 * nothing fits, use the maximum non zero vote 1273 */ 1274 1275 for (j = 0; j < sec_count; j++) { 1276 if (sec[j] >= level) { 1277 sindex = j; 1278 break; 1279 } else if (sec[j]) { 1280 sindex = j; 1281 } 1282 } 1283 1284 /* Construct the vote */ 1285 votes[i] = ((pri[pindex] & 0xffff) << 16) | 1286 (sindex << 8) | pindex; 1287 } 1288 1289 return 0; 1290 } 1291 1292 /* 1293 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1294 * to construct the list of votes on the CPU and send it over. Query the RPMh 1295 * voltage levels and build the votes 1296 */ 1297 1298 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1299 { 1300 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1301 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1302 struct msm_gpu *gpu = &adreno_gpu->base; 1303 int ret; 1304 1305 /* Build the GX votes */ 1306 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1307 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); 1308 1309 /* Build the CX votes */ 1310 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1311 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); 1312 1313 return ret; 1314 } 1315 1316 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1317 u32 size) 1318 { 1319 int count = dev_pm_opp_get_opp_count(dev); 1320 struct dev_pm_opp *opp; 1321 int i, index = 0; 1322 unsigned long freq = 1; 1323 1324 /* 1325 * The OPP table doesn't contain the "off" frequency level so we need to 1326 * add 1 to the table size to account for it 1327 */ 1328 1329 if (WARN(count + 1 > size, 1330 "The GMU frequency table is being truncated\n")) 1331 count = size - 1; 1332 1333 /* Set the "off" frequency */ 1334 freqs[index++] = 0; 1335 1336 for (i = 0; i < count; i++) { 1337 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1338 if (IS_ERR(opp)) 1339 break; 1340 1341 dev_pm_opp_put(opp); 1342 freqs[index++] = freq++; 1343 } 1344 1345 return index; 1346 } 1347 1348 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1349 { 1350 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1351 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1352 struct msm_gpu *gpu = &adreno_gpu->base; 1353 1354 int ret = 0; 1355 1356 /* 1357 * The GMU handles its own frequency switching so build a list of 1358 * available frequencies to send during initialization 1359 */ 1360 ret = devm_pm_opp_of_add_table(gmu->dev); 1361 if (ret) { 1362 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1363 return ret; 1364 } 1365 1366 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1367 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1368 1369 /* 1370 * The GMU also handles GPU frequency switching so build a list 1371 * from the GPU OPP table 1372 */ 1373 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1374 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1375 1376 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; 1377 1378 /* Build the list of RPMh votes that we'll send to the GMU */ 1379 return a6xx_gmu_rpmh_votes_init(gmu); 1380 } 1381 1382 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1383 { 1384 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); 1385 1386 if (ret < 1) 1387 return ret; 1388 1389 gmu->nr_clocks = ret; 1390 1391 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1392 gmu->nr_clocks, "gmu"); 1393 1394 return 0; 1395 } 1396 1397 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1398 const char *name) 1399 { 1400 void __iomem *ret; 1401 struct resource *res = platform_get_resource_byname(pdev, 1402 IORESOURCE_MEM, name); 1403 1404 if (!res) { 1405 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1406 return ERR_PTR(-EINVAL); 1407 } 1408 1409 ret = ioremap(res->start, resource_size(res)); 1410 if (!ret) { 1411 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1412 return ERR_PTR(-EINVAL); 1413 } 1414 1415 return ret; 1416 } 1417 1418 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1419 const char *name, irq_handler_t handler) 1420 { 1421 int irq, ret; 1422 1423 irq = platform_get_irq_byname(pdev, name); 1424 1425 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); 1426 if (ret) { 1427 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", 1428 name, ret); 1429 return ret; 1430 } 1431 1432 disable_irq(irq); 1433 1434 return irq; 1435 } 1436 1437 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1438 { 1439 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1440 struct platform_device *pdev = to_platform_device(gmu->dev); 1441 1442 if (!gmu->initialized) 1443 return; 1444 1445 pm_runtime_force_suspend(gmu->dev); 1446 1447 if (!IS_ERR_OR_NULL(gmu->gxpd)) { 1448 pm_runtime_disable(gmu->gxpd); 1449 dev_pm_domain_detach(gmu->gxpd, false); 1450 } 1451 1452 iounmap(gmu->mmio); 1453 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) 1454 iounmap(gmu->rscc); 1455 gmu->mmio = NULL; 1456 gmu->rscc = NULL; 1457 1458 a6xx_gmu_memory_free(gmu); 1459 1460 free_irq(gmu->gmu_irq, gmu); 1461 free_irq(gmu->hfi_irq, gmu); 1462 1463 /* Drop reference taken in of_find_device_by_node */ 1464 put_device(gmu->dev); 1465 1466 gmu->initialized = false; 1467 } 1468 1469 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1470 { 1471 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1472 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1473 struct platform_device *pdev = of_find_device_by_node(node); 1474 int ret; 1475 1476 if (!pdev) 1477 return -ENODEV; 1478 1479 gmu->dev = &pdev->dev; 1480 1481 of_dma_configure(gmu->dev, node, true); 1482 1483 /* Fow now, don't do anything fancy until we get our feet under us */ 1484 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1485 1486 pm_runtime_enable(gmu->dev); 1487 1488 /* Get the list of clocks */ 1489 ret = a6xx_gmu_clocks_probe(gmu); 1490 if (ret) 1491 goto err_put_device; 1492 1493 ret = a6xx_gmu_memory_probe(gmu); 1494 if (ret) 1495 goto err_put_device; 1496 1497 /* Allocate memory for the GMU dummy page */ 1498 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000); 1499 if (ret) 1500 goto err_memory; 1501 1502 if (adreno_is_a650(adreno_gpu)) { 1503 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, 1504 SZ_16M - SZ_16K, 0x04000); 1505 if (ret) 1506 goto err_memory; 1507 } else if (adreno_is_a640(adreno_gpu)) { 1508 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, 1509 SZ_256K - SZ_16K, 0x04000); 1510 if (ret) 1511 goto err_memory; 1512 1513 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, 1514 SZ_256K - SZ_16K, 0x44000); 1515 if (ret) 1516 goto err_memory; 1517 } else { 1518 /* HFI v1, has sptprac */ 1519 gmu->legacy = true; 1520 1521 /* Allocate memory for the GMU debug region */ 1522 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); 1523 if (ret) 1524 goto err_memory; 1525 } 1526 1527 /* Allocate memory for for the HFI queues */ 1528 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); 1529 if (ret) 1530 goto err_memory; 1531 1532 /* Allocate memory for the GMU log region */ 1533 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0); 1534 if (ret) 1535 goto err_memory; 1536 1537 /* Map the GMU registers */ 1538 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1539 if (IS_ERR(gmu->mmio)) { 1540 ret = PTR_ERR(gmu->mmio); 1541 goto err_memory; 1542 } 1543 1544 if (adreno_is_a650(adreno_gpu)) { 1545 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); 1546 if (IS_ERR(gmu->rscc)) 1547 goto err_mmio; 1548 } else { 1549 gmu->rscc = gmu->mmio + 0x23000; 1550 } 1551 1552 /* Get the HFI and GMU interrupts */ 1553 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1554 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1555 1556 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1557 goto err_mmio; 1558 1559 /* 1560 * Get a link to the GX power domain to reset the GPU in case of GMU 1561 * crash 1562 */ 1563 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); 1564 1565 /* Get the power levels for the GMU and GPU */ 1566 a6xx_gmu_pwrlevels_probe(gmu); 1567 1568 /* Set up the HFI queues */ 1569 a6xx_hfi_init(gmu); 1570 1571 gmu->initialized = true; 1572 1573 return 0; 1574 1575 err_mmio: 1576 iounmap(gmu->mmio); 1577 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) 1578 iounmap(gmu->rscc); 1579 free_irq(gmu->gmu_irq, gmu); 1580 free_irq(gmu->hfi_irq, gmu); 1581 1582 ret = -ENODEV; 1583 1584 err_memory: 1585 a6xx_gmu_memory_free(gmu); 1586 err_put_device: 1587 /* Drop reference taken in of_find_device_by_node */ 1588 put_device(gmu->dev); 1589 1590 return ret; 1591 } 1592