1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/interconnect.h> 6 #include <linux/pm_domain.h> 7 #include <linux/pm_opp.h> 8 #include <soc/qcom/cmd-db.h> 9 #include <drm/drm_gem.h> 10 11 #include "a6xx_gpu.h" 12 #include "a6xx_gmu.xml.h" 13 #include "msm_gem.h" 14 #include "msm_gpu_trace.h" 15 #include "msm_mmu.h" 16 17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) 18 { 19 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 20 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 21 struct msm_gpu *gpu = &adreno_gpu->base; 22 23 /* FIXME: add a banner here */ 24 gmu->hung = true; 25 26 /* Turn off the hangcheck timer while we are resetting */ 27 del_timer(&gpu->hangcheck_timer); 28 29 /* Queue the GPU handler because we need to treat this as a recovery */ 30 kthread_queue_work(gpu->worker, &gpu->recover_work); 31 } 32 33 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 34 { 35 struct a6xx_gmu *gmu = data; 36 u32 status; 37 38 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 39 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 40 41 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 42 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 43 44 a6xx_gmu_fault(gmu); 45 } 46 47 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 48 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 49 50 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 51 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 52 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 53 54 return IRQ_HANDLED; 55 } 56 57 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 58 { 59 struct a6xx_gmu *gmu = data; 60 u32 status; 61 62 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 63 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 64 65 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 66 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 67 68 a6xx_gmu_fault(gmu); 69 } 70 71 return IRQ_HANDLED; 72 } 73 74 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) 75 { 76 u32 val; 77 78 /* This can be called from gpu state code so make sure GMU is valid */ 79 if (!gmu->initialized) 80 return false; 81 82 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 83 84 return !(val & 85 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | 86 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); 87 } 88 89 /* Check to see if the GX rail is still powered */ 90 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 91 { 92 u32 val; 93 94 /* This can be called from gpu state code so make sure GMU is valid */ 95 if (!gmu->initialized) 96 return false; 97 98 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 99 100 return !(val & 101 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 102 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 103 } 104 105 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, 106 bool suspended) 107 { 108 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 109 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 110 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 111 u32 perf_index; 112 unsigned long gpu_freq; 113 int ret = 0; 114 115 gpu_freq = dev_pm_opp_get_freq(opp); 116 117 if (gpu_freq == gmu->freq) 118 return; 119 120 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 121 if (gpu_freq == gmu->gpu_freqs[perf_index]) 122 break; 123 124 gmu->current_perf_index = perf_index; 125 gmu->freq = gmu->gpu_freqs[perf_index]; 126 127 trace_msm_gmu_freq_change(gmu->freq, perf_index); 128 129 /* 130 * This can get called from devfreq while the hardware is idle. Don't 131 * bring up the power if it isn't already active. All we're doing here 132 * is updating the frequency so that when we come back online we're at 133 * the right rate. 134 */ 135 if (suspended) 136 return; 137 138 if (!gmu->legacy) { 139 a6xx_hfi_set_freq(gmu, perf_index); 140 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); 141 return; 142 } 143 144 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 145 146 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 147 ((3 & 0xf) << 28) | perf_index); 148 149 /* 150 * Send an invalid index as a vote for the bus bandwidth and let the 151 * firmware decide on the right vote 152 */ 153 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 154 155 /* Set and clear the OOB for DCVS to trigger the GMU */ 156 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 157 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 158 159 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 160 if (ret) 161 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 162 163 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); 164 } 165 166 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 167 { 168 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 169 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 170 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 171 172 return gmu->freq; 173 } 174 175 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 176 { 177 u32 val; 178 int local = gmu->idle_level; 179 180 /* SPTP and IFPC both report as IFPC */ 181 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 182 local = GMU_IDLE_STATE_IFPC; 183 184 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 185 186 if (val == local) { 187 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 188 !a6xx_gmu_gx_is_on(gmu)) 189 return true; 190 } 191 192 return false; 193 } 194 195 /* Wait for the GMU to get to its most idle state */ 196 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) 197 { 198 return spin_until(a6xx_gmu_check_idle_level(gmu)); 199 } 200 201 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 202 { 203 int ret; 204 u32 val; 205 u32 mask, reset_val; 206 207 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); 208 if (val <= 0x20010004) { 209 mask = 0xffffffff; 210 reset_val = 0xbabeface; 211 } else { 212 mask = 0x1ff; 213 reset_val = 0x100; 214 } 215 216 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 217 218 /* Set the log wptr index 219 * note: downstream saves the value in poweroff and restores it here 220 */ 221 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); 222 223 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 224 225 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 226 (val & mask) == reset_val, 100, 10000); 227 228 if (ret) 229 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 230 231 return ret; 232 } 233 234 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 235 { 236 u32 val; 237 int ret; 238 239 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 240 241 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 242 val & 1, 100, 10000); 243 if (ret) 244 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 245 246 return ret; 247 } 248 249 struct a6xx_gmu_oob_bits { 250 int set, ack, set_new, ack_new, clear, clear_new; 251 const char *name; 252 }; 253 254 /* These are the interrupt / ack bits for each OOB request that are set 255 * in a6xx_gmu_set_oob and a6xx_clear_oob 256 */ 257 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { 258 [GMU_OOB_GPU_SET] = { 259 .name = "GPU_SET", 260 .set = 16, 261 .ack = 24, 262 .set_new = 30, 263 .ack_new = 31, 264 .clear = 24, 265 .clear_new = 31, 266 }, 267 268 [GMU_OOB_PERFCOUNTER_SET] = { 269 .name = "PERFCOUNTER", 270 .set = 17, 271 .ack = 25, 272 .set_new = 28, 273 .ack_new = 30, 274 .clear = 25, 275 .clear_new = 29, 276 }, 277 278 [GMU_OOB_BOOT_SLUMBER] = { 279 .name = "BOOT_SLUMBER", 280 .set = 22, 281 .ack = 30, 282 .clear = 30, 283 }, 284 285 [GMU_OOB_DCVS_SET] = { 286 .name = "GPU_DCVS", 287 .set = 23, 288 .ack = 31, 289 .clear = 31, 290 }, 291 }; 292 293 /* Trigger a OOB (out of band) request to the GMU */ 294 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 295 { 296 int ret; 297 u32 val; 298 int request, ack; 299 300 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); 301 302 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) 303 return -EINVAL; 304 305 if (gmu->legacy) { 306 request = a6xx_gmu_oob_bits[state].set; 307 ack = a6xx_gmu_oob_bits[state].ack; 308 } else { 309 request = a6xx_gmu_oob_bits[state].set_new; 310 ack = a6xx_gmu_oob_bits[state].ack_new; 311 if (!request || !ack) { 312 DRM_DEV_ERROR(gmu->dev, 313 "Invalid non-legacy GMU request %s\n", 314 a6xx_gmu_oob_bits[state].name); 315 return -EINVAL; 316 } 317 } 318 319 /* Trigger the equested OOB operation */ 320 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 321 322 /* Wait for the acknowledge interrupt */ 323 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 324 val & (1 << ack), 100, 10000); 325 326 if (ret) 327 DRM_DEV_ERROR(gmu->dev, 328 "Timeout waiting for GMU OOB set %s: 0x%x\n", 329 a6xx_gmu_oob_bits[state].name, 330 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 331 332 /* Clear the acknowledge interrupt */ 333 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 334 335 return ret; 336 } 337 338 /* Clear a pending OOB state in the GMU */ 339 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 340 { 341 int bit; 342 343 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); 344 345 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) 346 return; 347 348 if (gmu->legacy) 349 bit = a6xx_gmu_oob_bits[state].clear; 350 else 351 bit = a6xx_gmu_oob_bits[state].clear_new; 352 353 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); 354 } 355 356 /* Enable CPU control of SPTP power power collapse */ 357 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 358 { 359 int ret; 360 u32 val; 361 362 if (!gmu->legacy) 363 return 0; 364 365 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 366 367 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 368 (val & 0x38) == 0x28, 1, 100); 369 370 if (ret) { 371 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 372 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 373 } 374 375 return 0; 376 } 377 378 /* Disable CPU control of SPTP power power collapse */ 379 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 380 { 381 u32 val; 382 int ret; 383 384 if (!gmu->legacy) 385 return; 386 387 /* Make sure retention is on */ 388 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 389 390 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 391 392 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 393 (val & 0x04), 100, 10000); 394 395 if (ret) 396 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 397 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 398 } 399 400 /* Let the GMU know we are starting a boot sequence */ 401 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 402 { 403 u32 vote; 404 405 /* Let the GMU know we are getting ready for boot */ 406 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 407 408 /* Choose the "default" power level as the highest available */ 409 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 410 411 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 412 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 413 414 /* Let the GMU know the boot sequence has started */ 415 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 416 } 417 418 /* Let the GMU know that we are about to go into slumber */ 419 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 420 { 421 int ret; 422 423 /* Disable the power counter so the GMU isn't busy */ 424 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 425 426 /* Disable SPTP_PC if the CPU is responsible for it */ 427 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 428 a6xx_sptprac_disable(gmu); 429 430 if (!gmu->legacy) { 431 ret = a6xx_hfi_send_prep_slumber(gmu); 432 goto out; 433 } 434 435 /* Tell the GMU to get ready to slumber */ 436 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 437 438 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 439 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 440 441 if (!ret) { 442 /* Check to see if the GMU really did slumber */ 443 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 444 != 0x0f) { 445 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 446 ret = -ETIMEDOUT; 447 } 448 } 449 450 out: 451 /* Put fence into allow mode */ 452 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 453 return ret; 454 } 455 456 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 457 { 458 int ret; 459 u32 val; 460 461 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 462 /* Wait for the register to finish posting */ 463 wmb(); 464 465 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 466 val & (1 << 1), 100, 10000); 467 if (ret) { 468 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 469 return ret; 470 } 471 472 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 473 !val, 100, 10000); 474 475 if (ret) { 476 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 477 return ret; 478 } 479 480 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 481 482 /* Set up CX GMU counter 0 to count busy ticks */ 483 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); 484 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); 485 486 /* Enable the power counter */ 487 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 488 return 0; 489 } 490 491 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 492 { 493 int ret; 494 u32 val; 495 496 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 497 498 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 499 val, val & (1 << 16), 100, 10000); 500 if (ret) 501 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 502 503 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 504 } 505 506 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 507 { 508 return msm_writel(value, ptr + (offset << 2)); 509 } 510 511 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 512 const char *name); 513 514 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 515 { 516 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 517 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 518 struct platform_device *pdev = to_platform_device(gmu->dev); 519 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 520 void __iomem *seqptr = NULL; 521 uint32_t pdc_address_offset; 522 bool pdc_in_aop = false; 523 524 if (IS_ERR(pdcptr)) 525 goto err; 526 527 if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) 528 pdc_in_aop = true; 529 else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu)) 530 pdc_address_offset = 0x30090; 531 else 532 pdc_address_offset = 0x30080; 533 534 if (!pdc_in_aop) { 535 seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 536 if (IS_ERR(seqptr)) 537 goto err; 538 } 539 540 /* Disable SDE clock gating */ 541 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 542 543 /* Setup RSC PDC handshake for sleep and wakeup */ 544 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 545 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 546 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 547 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 548 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 549 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 550 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 551 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 552 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 553 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 554 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 555 556 /* Load RSC sequencer uCode for sleep and wakeup */ 557 if (adreno_is_a650_family(adreno_gpu)) { 558 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); 559 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); 560 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); 561 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); 562 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); 563 } else { 564 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 565 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 566 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 567 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 568 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 569 } 570 571 if (pdc_in_aop) 572 goto setup_pdc; 573 574 /* Load PDC sequencer uCode for power up and power down sequence */ 575 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 576 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 577 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 578 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 579 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 580 581 /* Set TCS commands used by PDC sequence for low power modes */ 582 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 583 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 584 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 585 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 586 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 587 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 588 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 589 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 590 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 591 592 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 593 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset); 594 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 595 596 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 597 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 598 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 599 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 600 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 601 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 602 603 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 604 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 605 if (adreno_is_a618(adreno_gpu) || adreno_is_a650_family(adreno_gpu)) 606 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); 607 else 608 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 609 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 610 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset); 611 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 612 613 /* Setup GPU PDC */ 614 setup_pdc: 615 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 616 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 617 618 /* ensure no writes happen before the uCode is fully written */ 619 wmb(); 620 621 err: 622 if (!IS_ERR_OR_NULL(pdcptr)) 623 iounmap(pdcptr); 624 if (!IS_ERR_OR_NULL(seqptr)) 625 iounmap(seqptr); 626 } 627 628 /* 629 * The lowest 16 bits of this value are the number of XO clock cycles for main 630 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 631 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 632 */ 633 634 #define GMU_PWR_COL_HYST 0x000a1680 635 636 /* Set up the idle state for the GMU */ 637 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 638 { 639 /* Disable GMU WB/RB buffer */ 640 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 641 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); 642 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); 643 644 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 645 646 switch (gmu->idle_level) { 647 case GMU_IDLE_STATE_IFPC: 648 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 649 GMU_PWR_COL_HYST); 650 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 651 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 652 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 653 fallthrough; 654 case GMU_IDLE_STATE_SPTP: 655 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 656 GMU_PWR_COL_HYST); 657 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 658 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 659 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 660 } 661 662 /* Enable RPMh GPU client */ 663 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 664 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 665 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 666 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 667 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 668 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 669 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 670 } 671 672 struct block_header { 673 u32 addr; 674 u32 size; 675 u32 type; 676 u32 value; 677 u32 data[]; 678 }; 679 680 /* this should be a general kernel helper */ 681 static int in_range(u32 addr, u32 start, u32 size) 682 { 683 return addr >= start && addr < start + size; 684 } 685 686 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) 687 { 688 if (!in_range(blk->addr, bo->iova, bo->size)) 689 return false; 690 691 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); 692 return true; 693 } 694 695 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) 696 { 697 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 698 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 699 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; 700 const struct block_header *blk; 701 u32 reg_offset; 702 703 u32 itcm_base = 0x00000000; 704 u32 dtcm_base = 0x00040000; 705 706 if (adreno_is_a650_family(adreno_gpu)) 707 dtcm_base = 0x10004000; 708 709 if (gmu->legacy) { 710 /* Sanity check the size of the firmware that was loaded */ 711 if (fw_image->size > 0x8000) { 712 DRM_DEV_ERROR(gmu->dev, 713 "GMU firmware is bigger than the available region\n"); 714 return -EINVAL; 715 } 716 717 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, 718 (u32*) fw_image->data, fw_image->size); 719 return 0; 720 } 721 722 723 for (blk = (const struct block_header *) fw_image->data; 724 (const u8*) blk < fw_image->data + fw_image->size; 725 blk = (const struct block_header *) &blk->data[blk->size >> 2]) { 726 if (blk->size == 0) 727 continue; 728 729 if (in_range(blk->addr, itcm_base, SZ_16K)) { 730 reg_offset = (blk->addr - itcm_base) >> 2; 731 gmu_write_bulk(gmu, 732 REG_A6XX_GMU_CM3_ITCM_START + reg_offset, 733 blk->data, blk->size); 734 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { 735 reg_offset = (blk->addr - dtcm_base) >> 2; 736 gmu_write_bulk(gmu, 737 REG_A6XX_GMU_CM3_DTCM_START + reg_offset, 738 blk->data, blk->size); 739 } else if (!fw_block_mem(&gmu->icache, blk) && 740 !fw_block_mem(&gmu->dcache, blk) && 741 !fw_block_mem(&gmu->dummy, blk)) { 742 DRM_DEV_ERROR(gmu->dev, 743 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n", 744 blk->addr, blk->size, blk->data[0]); 745 } 746 } 747 748 return 0; 749 } 750 751 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 752 { 753 static bool rpmh_init; 754 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 755 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 756 int ret; 757 u32 chipid; 758 759 if (adreno_is_a650_family(adreno_gpu)) { 760 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); 761 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); 762 } 763 764 if (state == GMU_WARM_BOOT) { 765 ret = a6xx_rpmh_start(gmu); 766 if (ret) 767 return ret; 768 } else { 769 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 770 "GMU firmware is not loaded\n")) 771 return -ENOENT; 772 773 /* Turn on register retention */ 774 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 775 776 /* We only need to load the RPMh microcode once */ 777 if (!rpmh_init) { 778 a6xx_gmu_rpmh_init(gmu); 779 rpmh_init = true; 780 } else { 781 ret = a6xx_rpmh_start(gmu); 782 if (ret) 783 return ret; 784 } 785 786 ret = a6xx_gmu_fw_load(gmu); 787 if (ret) 788 return ret; 789 } 790 791 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 792 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 793 794 /* Write the iova of the HFI table */ 795 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); 796 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 797 798 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 799 (1 << 31) | (0xa << 18) | (0xa0)); 800 801 chipid = adreno_gpu->rev.core << 24; 802 chipid |= adreno_gpu->rev.major << 16; 803 chipid |= adreno_gpu->rev.minor << 12; 804 chipid |= adreno_gpu->rev.patchid << 8; 805 806 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 807 808 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, 809 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); 810 811 /* Set up the lowest idle level on the GMU */ 812 a6xx_gmu_power_config(gmu); 813 814 ret = a6xx_gmu_start(gmu); 815 if (ret) 816 return ret; 817 818 if (gmu->legacy) { 819 ret = a6xx_gmu_gfx_rail_on(gmu); 820 if (ret) 821 return ret; 822 } 823 824 /* Enable SPTP_PC if the CPU is responsible for it */ 825 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 826 ret = a6xx_sptprac_enable(gmu); 827 if (ret) 828 return ret; 829 } 830 831 ret = a6xx_gmu_hfi_start(gmu); 832 if (ret) 833 return ret; 834 835 /* FIXME: Do we need this wmb() here? */ 836 wmb(); 837 838 return 0; 839 } 840 841 #define A6XX_HFI_IRQ_MASK \ 842 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 843 844 #define A6XX_GMU_IRQ_MASK \ 845 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 846 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 847 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 848 849 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 850 { 851 disable_irq(gmu->gmu_irq); 852 disable_irq(gmu->hfi_irq); 853 854 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 855 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 856 } 857 858 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) 859 { 860 u32 val; 861 862 /* Make sure there are no outstanding RPMh votes */ 863 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 864 (val & 1), 100, 10000); 865 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 866 (val & 1), 100, 10000); 867 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 868 (val & 1), 100, 10000); 869 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 870 (val & 1), 100, 1000); 871 } 872 873 /* Force the GMU off in case it isn't responsive */ 874 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) 875 { 876 /* Flush all the queues */ 877 a6xx_hfi_stop(gmu); 878 879 /* Stop the interrupts */ 880 a6xx_gmu_irq_disable(gmu); 881 882 /* Force off SPTP in case the GMU is managing it */ 883 a6xx_sptprac_disable(gmu); 884 885 /* Make sure there are no outstanding RPMh votes */ 886 a6xx_gmu_rpmh_off(gmu); 887 } 888 889 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) 890 { 891 struct dev_pm_opp *gpu_opp; 892 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; 893 894 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); 895 if (IS_ERR(gpu_opp)) 896 return; 897 898 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ 899 a6xx_gmu_set_freq(gpu, gpu_opp, false); 900 dev_pm_opp_put(gpu_opp); 901 } 902 903 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) 904 { 905 struct dev_pm_opp *gpu_opp; 906 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; 907 908 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); 909 if (IS_ERR(gpu_opp)) 910 return; 911 912 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); 913 dev_pm_opp_put(gpu_opp); 914 } 915 916 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 917 { 918 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 919 struct msm_gpu *gpu = &adreno_gpu->base; 920 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 921 int status, ret; 922 923 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) 924 return 0; 925 926 gmu->hung = false; 927 928 /* Turn on the resources */ 929 pm_runtime_get_sync(gmu->dev); 930 931 /* 932 * "enable" the GX power domain which won't actually do anything but it 933 * will make sure that the refcounting is correct in case we need to 934 * bring down the GX after a GMU failure 935 */ 936 if (!IS_ERR_OR_NULL(gmu->gxpd)) 937 pm_runtime_get_sync(gmu->gxpd); 938 939 /* Use a known rate to bring up the GMU */ 940 clk_set_rate(gmu->core_clk, 200000000); 941 clk_set_rate(gmu->hub_clk, 150000000); 942 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 943 if (ret) { 944 pm_runtime_put(gmu->gxpd); 945 pm_runtime_put(gmu->dev); 946 return ret; 947 } 948 949 /* Set the bus quota to a reasonable value for boot */ 950 a6xx_gmu_set_initial_bw(gpu, gmu); 951 952 /* Enable the GMU interrupt */ 953 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 954 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); 955 enable_irq(gmu->gmu_irq); 956 957 /* Check to see if we are doing a cold or warm boot */ 958 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 959 GMU_WARM_BOOT : GMU_COLD_BOOT; 960 961 /* 962 * Warm boot path does not work on newer GPUs 963 * Presumably this is because icache/dcache regions must be restored 964 */ 965 if (!gmu->legacy) 966 status = GMU_COLD_BOOT; 967 968 ret = a6xx_gmu_fw_start(gmu, status); 969 if (ret) 970 goto out; 971 972 ret = a6xx_hfi_start(gmu, status); 973 if (ret) 974 goto out; 975 976 /* 977 * Turn on the GMU firmware fault interrupt after we know the boot 978 * sequence is successful 979 */ 980 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 981 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); 982 enable_irq(gmu->hfi_irq); 983 984 /* Set the GPU to the current freq */ 985 a6xx_gmu_set_initial_freq(gpu, gmu); 986 987 out: 988 /* On failure, shut down the GMU to leave it in a good state */ 989 if (ret) { 990 disable_irq(gmu->gmu_irq); 991 a6xx_rpmh_stop(gmu); 992 pm_runtime_put(gmu->gxpd); 993 pm_runtime_put(gmu->dev); 994 } 995 996 return ret; 997 } 998 999 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 1000 { 1001 u32 reg; 1002 1003 if (!gmu->initialized) 1004 return true; 1005 1006 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 1007 1008 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 1009 return false; 1010 1011 return true; 1012 } 1013 1014 #define GBIF_CLIENT_HALT_MASK BIT(0) 1015 #define GBIF_ARB_HALT_MASK BIT(1) 1016 1017 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 1018 { 1019 struct msm_gpu *gpu = &adreno_gpu->base; 1020 1021 if (!a6xx_has_gbif(adreno_gpu)) { 1022 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 1023 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 1024 0xf) == 0xf); 1025 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 1026 1027 return; 1028 } 1029 1030 /* Halt new client requests on GBIF */ 1031 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 1032 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 1033 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 1034 1035 /* Halt all AXI requests on GBIF */ 1036 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 1037 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 1038 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 1039 1040 /* The GBIF halt needs to be explicitly cleared */ 1041 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 1042 } 1043 1044 /* Gracefully try to shut down the GMU and by extension the GPU */ 1045 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) 1046 { 1047 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1048 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1049 u32 val; 1050 1051 /* 1052 * The GMU may still be in slumber unless the GPU started so check and 1053 * skip putting it back into slumber if so 1054 */ 1055 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 1056 1057 if (val != 0xf) { 1058 int ret = a6xx_gmu_wait_for_idle(gmu); 1059 1060 /* If the GMU isn't responding assume it is hung */ 1061 if (ret) { 1062 a6xx_gmu_force_off(gmu); 1063 return; 1064 } 1065 1066 a6xx_bus_clear_pending_transactions(adreno_gpu); 1067 1068 /* tell the GMU we want to slumber */ 1069 a6xx_gmu_notify_slumber(gmu); 1070 1071 ret = gmu_poll_timeout(gmu, 1072 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 1073 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 1074 100, 10000); 1075 1076 /* 1077 * Let the user know we failed to slumber but don't worry too 1078 * much because we are powering down anyway 1079 */ 1080 1081 if (ret) 1082 DRM_DEV_ERROR(gmu->dev, 1083 "Unable to slumber GMU: status = 0%x/0%x\n", 1084 gmu_read(gmu, 1085 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 1086 gmu_read(gmu, 1087 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 1088 } 1089 1090 /* Turn off HFI */ 1091 a6xx_hfi_stop(gmu); 1092 1093 /* Stop the interrupts and mask the hardware */ 1094 a6xx_gmu_irq_disable(gmu); 1095 1096 /* Tell RPMh to power off the GPU */ 1097 a6xx_rpmh_stop(gmu); 1098 } 1099 1100 1101 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 1102 { 1103 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1104 struct msm_gpu *gpu = &a6xx_gpu->base.base; 1105 1106 if (!pm_runtime_active(gmu->dev)) 1107 return 0; 1108 1109 /* 1110 * Force the GMU off if we detected a hang, otherwise try to shut it 1111 * down gracefully 1112 */ 1113 if (gmu->hung) 1114 a6xx_gmu_force_off(gmu); 1115 else 1116 a6xx_gmu_shutdown(gmu); 1117 1118 /* Remove the bus vote */ 1119 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); 1120 1121 /* 1122 * Make sure the GX domain is off before turning off the GMU (CX) 1123 * domain. Usually the GMU does this but only if the shutdown sequence 1124 * was successful 1125 */ 1126 if (!IS_ERR_OR_NULL(gmu->gxpd)) 1127 pm_runtime_put_sync(gmu->gxpd); 1128 1129 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 1130 1131 pm_runtime_put_sync(gmu->dev); 1132 1133 return 0; 1134 } 1135 1136 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) 1137 { 1138 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); 1139 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); 1140 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); 1141 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); 1142 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); 1143 msm_gem_kernel_put(gmu->log.obj, gmu->aspace); 1144 1145 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); 1146 msm_gem_address_space_put(gmu->aspace); 1147 } 1148 1149 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, 1150 size_t size, u64 iova, const char *name) 1151 { 1152 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1153 struct drm_device *dev = a6xx_gpu->base.base.dev; 1154 uint32_t flags = MSM_BO_WC; 1155 u64 range_start, range_end; 1156 int ret; 1157 1158 size = PAGE_ALIGN(size); 1159 if (!iova) { 1160 /* no fixed address - use GMU's uncached range */ 1161 range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ 1162 range_end = 0x80000000; 1163 } else { 1164 /* range for fixed address */ 1165 range_start = iova; 1166 range_end = iova + size; 1167 /* use IOMMU_PRIV for icache/dcache */ 1168 flags |= MSM_BO_MAP_PRIV; 1169 } 1170 1171 bo->obj = msm_gem_new(dev, size, flags); 1172 if (IS_ERR(bo->obj)) 1173 return PTR_ERR(bo->obj); 1174 1175 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, 1176 range_start, range_end); 1177 if (ret) { 1178 drm_gem_object_put(bo->obj); 1179 return ret; 1180 } 1181 1182 bo->virt = msm_gem_get_vaddr(bo->obj); 1183 bo->size = size; 1184 1185 msm_gem_object_set_name(bo->obj, name); 1186 1187 return 0; 1188 } 1189 1190 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 1191 { 1192 struct iommu_domain *domain; 1193 struct msm_mmu *mmu; 1194 1195 domain = iommu_domain_alloc(&platform_bus_type); 1196 if (!domain) 1197 return -ENODEV; 1198 1199 mmu = msm_iommu_new(gmu->dev, domain); 1200 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); 1201 if (IS_ERR(gmu->aspace)) { 1202 iommu_domain_free(domain); 1203 return PTR_ERR(gmu->aspace); 1204 } 1205 1206 return 0; 1207 } 1208 1209 /* Return the 'arc-level' for the given frequency */ 1210 static unsigned int a6xx_gmu_get_arc_level(struct device *dev, 1211 unsigned long freq) 1212 { 1213 struct dev_pm_opp *opp; 1214 unsigned int val; 1215 1216 if (!freq) 1217 return 0; 1218 1219 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 1220 if (IS_ERR(opp)) 1221 return 0; 1222 1223 val = dev_pm_opp_get_level(opp); 1224 1225 dev_pm_opp_put(opp); 1226 1227 return val; 1228 } 1229 1230 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 1231 unsigned long *freqs, int freqs_count, const char *id) 1232 { 1233 int i, j; 1234 const u16 *pri, *sec; 1235 size_t pri_count, sec_count; 1236 1237 pri = cmd_db_read_aux_data(id, &pri_count); 1238 if (IS_ERR(pri)) 1239 return PTR_ERR(pri); 1240 /* 1241 * The data comes back as an array of unsigned shorts so adjust the 1242 * count accordingly 1243 */ 1244 pri_count >>= 1; 1245 if (!pri_count) 1246 return -EINVAL; 1247 1248 sec = cmd_db_read_aux_data("mx.lvl", &sec_count); 1249 if (IS_ERR(sec)) 1250 return PTR_ERR(sec); 1251 1252 sec_count >>= 1; 1253 if (!sec_count) 1254 return -EINVAL; 1255 1256 /* Construct a vote for each frequency */ 1257 for (i = 0; i < freqs_count; i++) { 1258 u8 pindex = 0, sindex = 0; 1259 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); 1260 1261 /* Get the primary index that matches the arc level */ 1262 for (j = 0; j < pri_count; j++) { 1263 if (pri[j] >= level) { 1264 pindex = j; 1265 break; 1266 } 1267 } 1268 1269 if (j == pri_count) { 1270 DRM_DEV_ERROR(dev, 1271 "Level %u not found in the RPMh list\n", 1272 level); 1273 DRM_DEV_ERROR(dev, "Available levels:\n"); 1274 for (j = 0; j < pri_count; j++) 1275 DRM_DEV_ERROR(dev, " %u\n", pri[j]); 1276 1277 return -EINVAL; 1278 } 1279 1280 /* 1281 * Look for a level in in the secondary list that matches. If 1282 * nothing fits, use the maximum non zero vote 1283 */ 1284 1285 for (j = 0; j < sec_count; j++) { 1286 if (sec[j] >= level) { 1287 sindex = j; 1288 break; 1289 } else if (sec[j]) { 1290 sindex = j; 1291 } 1292 } 1293 1294 /* Construct the vote */ 1295 votes[i] = ((pri[pindex] & 0xffff) << 16) | 1296 (sindex << 8) | pindex; 1297 } 1298 1299 return 0; 1300 } 1301 1302 /* 1303 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1304 * to construct the list of votes on the CPU and send it over. Query the RPMh 1305 * voltage levels and build the votes 1306 */ 1307 1308 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1309 { 1310 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1311 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1312 struct msm_gpu *gpu = &adreno_gpu->base; 1313 int ret; 1314 1315 /* Build the GX votes */ 1316 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1317 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); 1318 1319 /* Build the CX votes */ 1320 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1321 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); 1322 1323 return ret; 1324 } 1325 1326 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1327 u32 size) 1328 { 1329 int count = dev_pm_opp_get_opp_count(dev); 1330 struct dev_pm_opp *opp; 1331 int i, index = 0; 1332 unsigned long freq = 1; 1333 1334 /* 1335 * The OPP table doesn't contain the "off" frequency level so we need to 1336 * add 1 to the table size to account for it 1337 */ 1338 1339 if (WARN(count + 1 > size, 1340 "The GMU frequency table is being truncated\n")) 1341 count = size - 1; 1342 1343 /* Set the "off" frequency */ 1344 freqs[index++] = 0; 1345 1346 for (i = 0; i < count; i++) { 1347 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1348 if (IS_ERR(opp)) 1349 break; 1350 1351 dev_pm_opp_put(opp); 1352 freqs[index++] = freq++; 1353 } 1354 1355 return index; 1356 } 1357 1358 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1359 { 1360 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1361 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1362 struct msm_gpu *gpu = &adreno_gpu->base; 1363 1364 int ret = 0; 1365 1366 /* 1367 * The GMU handles its own frequency switching so build a list of 1368 * available frequencies to send during initialization 1369 */ 1370 ret = devm_pm_opp_of_add_table(gmu->dev); 1371 if (ret) { 1372 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1373 return ret; 1374 } 1375 1376 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1377 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1378 1379 /* 1380 * The GMU also handles GPU frequency switching so build a list 1381 * from the GPU OPP table 1382 */ 1383 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1384 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1385 1386 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; 1387 1388 /* Build the list of RPMh votes that we'll send to the GMU */ 1389 return a6xx_gmu_rpmh_votes_init(gmu); 1390 } 1391 1392 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1393 { 1394 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); 1395 1396 if (ret < 1) 1397 return ret; 1398 1399 gmu->nr_clocks = ret; 1400 1401 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1402 gmu->nr_clocks, "gmu"); 1403 1404 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, 1405 gmu->nr_clocks, "hub"); 1406 1407 return 0; 1408 } 1409 1410 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1411 const char *name) 1412 { 1413 void __iomem *ret; 1414 struct resource *res = platform_get_resource_byname(pdev, 1415 IORESOURCE_MEM, name); 1416 1417 if (!res) { 1418 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1419 return ERR_PTR(-EINVAL); 1420 } 1421 1422 ret = ioremap(res->start, resource_size(res)); 1423 if (!ret) { 1424 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1425 return ERR_PTR(-EINVAL); 1426 } 1427 1428 return ret; 1429 } 1430 1431 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1432 const char *name, irq_handler_t handler) 1433 { 1434 int irq, ret; 1435 1436 irq = platform_get_irq_byname(pdev, name); 1437 1438 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); 1439 if (ret) { 1440 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", 1441 name, ret); 1442 return ret; 1443 } 1444 1445 disable_irq(irq); 1446 1447 return irq; 1448 } 1449 1450 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1451 { 1452 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1453 struct platform_device *pdev = to_platform_device(gmu->dev); 1454 1455 if (!gmu->initialized) 1456 return; 1457 1458 pm_runtime_force_suspend(gmu->dev); 1459 1460 if (!IS_ERR_OR_NULL(gmu->gxpd)) { 1461 pm_runtime_disable(gmu->gxpd); 1462 dev_pm_domain_detach(gmu->gxpd, false); 1463 } 1464 1465 iounmap(gmu->mmio); 1466 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) 1467 iounmap(gmu->rscc); 1468 gmu->mmio = NULL; 1469 gmu->rscc = NULL; 1470 1471 a6xx_gmu_memory_free(gmu); 1472 1473 free_irq(gmu->gmu_irq, gmu); 1474 free_irq(gmu->hfi_irq, gmu); 1475 1476 /* Drop reference taken in of_find_device_by_node */ 1477 put_device(gmu->dev); 1478 1479 gmu->initialized = false; 1480 } 1481 1482 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1483 { 1484 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1485 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1486 struct platform_device *pdev = of_find_device_by_node(node); 1487 int ret; 1488 1489 if (!pdev) 1490 return -ENODEV; 1491 1492 mutex_init(&gmu->lock); 1493 1494 gmu->dev = &pdev->dev; 1495 1496 of_dma_configure(gmu->dev, node, true); 1497 1498 /* Fow now, don't do anything fancy until we get our feet under us */ 1499 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1500 1501 pm_runtime_enable(gmu->dev); 1502 1503 /* Get the list of clocks */ 1504 ret = a6xx_gmu_clocks_probe(gmu); 1505 if (ret) 1506 goto err_put_device; 1507 1508 ret = a6xx_gmu_memory_probe(gmu); 1509 if (ret) 1510 goto err_put_device; 1511 1512 1513 /* A660 now requires handling "prealloc requests" in GMU firmware 1514 * For now just hardcode allocations based on the known firmware. 1515 * note: there is no indication that these correspond to "dummy" or 1516 * "debug" regions, but this "guess" allows reusing these BOs which 1517 * are otherwise unused by a660. 1518 */ 1519 gmu->dummy.size = SZ_4K; 1520 if (adreno_is_a660_family(adreno_gpu)) { 1521 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 1522 0x60400000, "debug"); 1523 if (ret) 1524 goto err_memory; 1525 1526 gmu->dummy.size = SZ_8K; 1527 } 1528 1529 /* Allocate memory for the GMU dummy page */ 1530 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 1531 0x60000000, "dummy"); 1532 if (ret) 1533 goto err_memory; 1534 1535 /* Note that a650 family also includes a660 family: */ 1536 if (adreno_is_a650_family(adreno_gpu)) { 1537 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, 1538 SZ_16M - SZ_16K, 0x04000, "icache"); 1539 if (ret) 1540 goto err_memory; 1541 } else if (adreno_is_a640_family(adreno_gpu)) { 1542 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, 1543 SZ_256K - SZ_16K, 0x04000, "icache"); 1544 if (ret) 1545 goto err_memory; 1546 1547 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, 1548 SZ_256K - SZ_16K, 0x44000, "dcache"); 1549 if (ret) 1550 goto err_memory; 1551 } else { 1552 /* HFI v1, has sptprac */ 1553 gmu->legacy = true; 1554 1555 /* Allocate memory for the GMU debug region */ 1556 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); 1557 if (ret) 1558 goto err_memory; 1559 } 1560 1561 /* Allocate memory for for the HFI queues */ 1562 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); 1563 if (ret) 1564 goto err_memory; 1565 1566 /* Allocate memory for the GMU log region */ 1567 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log"); 1568 if (ret) 1569 goto err_memory; 1570 1571 /* Map the GMU registers */ 1572 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1573 if (IS_ERR(gmu->mmio)) { 1574 ret = PTR_ERR(gmu->mmio); 1575 goto err_memory; 1576 } 1577 1578 if (adreno_is_a650_family(adreno_gpu)) { 1579 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); 1580 if (IS_ERR(gmu->rscc)) 1581 goto err_mmio; 1582 } else { 1583 gmu->rscc = gmu->mmio + 0x23000; 1584 } 1585 1586 /* Get the HFI and GMU interrupts */ 1587 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1588 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1589 1590 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1591 goto err_mmio; 1592 1593 /* 1594 * Get a link to the GX power domain to reset the GPU in case of GMU 1595 * crash 1596 */ 1597 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); 1598 1599 /* Get the power levels for the GMU and GPU */ 1600 a6xx_gmu_pwrlevels_probe(gmu); 1601 1602 /* Set up the HFI queues */ 1603 a6xx_hfi_init(gmu); 1604 1605 gmu->initialized = true; 1606 1607 return 0; 1608 1609 err_mmio: 1610 iounmap(gmu->mmio); 1611 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) 1612 iounmap(gmu->rscc); 1613 free_irq(gmu->gmu_irq, gmu); 1614 free_irq(gmu->hfi_irq, gmu); 1615 1616 ret = -ENODEV; 1617 1618 err_memory: 1619 a6xx_gmu_memory_free(gmu); 1620 err_put_device: 1621 /* Drop reference taken in of_find_device_by_node */ 1622 put_device(gmu->dev); 1623 1624 return ret; 1625 } 1626