1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/interconnect.h> 6 #include <linux/pm_domain.h> 7 #include <linux/pm_opp.h> 8 #include <soc/qcom/cmd-db.h> 9 #include <drm/drm_gem.h> 10 11 #include "a6xx_gpu.h" 12 #include "a6xx_gmu.xml.h" 13 #include "msm_gem.h" 14 #include "msm_mmu.h" 15 16 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) 17 { 18 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 19 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 20 struct msm_gpu *gpu = &adreno_gpu->base; 21 struct drm_device *dev = gpu->dev; 22 struct msm_drm_private *priv = dev->dev_private; 23 24 /* FIXME: add a banner here */ 25 gmu->hung = true; 26 27 /* Turn off the hangcheck timer while we are resetting */ 28 del_timer(&gpu->hangcheck_timer); 29 30 /* Queue the GPU handler because we need to treat this as a recovery */ 31 queue_work(priv->wq, &gpu->recover_work); 32 } 33 34 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 35 { 36 struct a6xx_gmu *gmu = data; 37 u32 status; 38 39 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 40 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 41 42 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 43 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 44 45 a6xx_gmu_fault(gmu); 46 } 47 48 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 49 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 50 51 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 52 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 53 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 54 55 return IRQ_HANDLED; 56 } 57 58 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 59 { 60 struct a6xx_gmu *gmu = data; 61 u32 status; 62 63 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 64 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 65 66 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 67 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 68 69 a6xx_gmu_fault(gmu); 70 } 71 72 return IRQ_HANDLED; 73 } 74 75 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) 76 { 77 u32 val; 78 79 /* This can be called from gpu state code so make sure GMU is valid */ 80 if (!gmu->initialized) 81 return false; 82 83 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 84 85 return !(val & 86 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | 87 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); 88 } 89 90 /* Check to see if the GX rail is still powered */ 91 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 92 { 93 u32 val; 94 95 /* This can be called from gpu state code so make sure GMU is valid */ 96 if (!gmu->initialized) 97 return false; 98 99 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 100 101 return !(val & 102 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 103 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 104 } 105 106 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 107 { 108 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 109 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 110 struct msm_gpu *gpu = &adreno_gpu->base; 111 int ret; 112 113 /* 114 * This can get called from devfreq while the hardware is idle. Don't 115 * bring up the power if it isn't already active 116 */ 117 if (pm_runtime_get_if_in_use(gmu->dev) == 0) 118 return; 119 120 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 121 122 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 123 ((3 & 0xf) << 28) | index); 124 125 /* 126 * Send an invalid index as a vote for the bus bandwidth and let the 127 * firmware decide on the right vote 128 */ 129 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 130 131 /* Set and clear the OOB for DCVS to trigger the GMU */ 132 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 133 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 134 135 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 136 if (ret) 137 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 138 139 /* 140 * Eventually we will want to scale the path vote with the frequency but 141 * for now leave it at max so that the performance is nominal. 142 */ 143 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); 144 pm_runtime_put(gmu->dev); 145 } 146 147 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) 148 { 149 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 150 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 151 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 152 u32 perf_index = 0; 153 154 if (freq == gmu->freq) 155 return; 156 157 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 158 if (freq == gmu->gpu_freqs[perf_index]) 159 break; 160 161 gmu->current_perf_index = perf_index; 162 163 if (gmu->legacy) 164 __a6xx_gmu_set_freq(gmu, perf_index); 165 else 166 a6xx_hfi_set_freq(gmu, perf_index); 167 168 gmu->freq = gmu->gpu_freqs[perf_index]; 169 } 170 171 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 172 { 173 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 174 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 175 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 176 177 return gmu->freq; 178 } 179 180 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 181 { 182 u32 val; 183 int local = gmu->idle_level; 184 185 /* SPTP and IFPC both report as IFPC */ 186 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 187 local = GMU_IDLE_STATE_IFPC; 188 189 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 190 191 if (val == local) { 192 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 193 !a6xx_gmu_gx_is_on(gmu)) 194 return true; 195 } 196 197 return false; 198 } 199 200 /* Wait for the GMU to get to its most idle state */ 201 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) 202 { 203 return spin_until(a6xx_gmu_check_idle_level(gmu)); 204 } 205 206 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 207 { 208 int ret; 209 u32 val; 210 211 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 212 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 213 214 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 215 val == 0xbabeface, 100, 10000); 216 217 if (ret) 218 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 219 220 return ret; 221 } 222 223 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 224 { 225 u32 val; 226 int ret; 227 228 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 229 230 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 231 val & 1, 100, 10000); 232 if (ret) 233 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 234 235 return ret; 236 } 237 238 /* Trigger a OOB (out of band) request to the GMU */ 239 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 240 { 241 int ret; 242 u32 val; 243 int request, ack; 244 const char *name; 245 246 switch (state) { 247 case GMU_OOB_GPU_SET: 248 if (gmu->legacy) { 249 request = GMU_OOB_GPU_SET_REQUEST; 250 ack = GMU_OOB_GPU_SET_ACK; 251 } else { 252 request = GMU_OOB_GPU_SET_REQUEST_NEW; 253 ack = GMU_OOB_GPU_SET_ACK_NEW; 254 } 255 name = "GPU_SET"; 256 break; 257 case GMU_OOB_BOOT_SLUMBER: 258 request = GMU_OOB_BOOT_SLUMBER_REQUEST; 259 ack = GMU_OOB_BOOT_SLUMBER_ACK; 260 name = "BOOT_SLUMBER"; 261 break; 262 case GMU_OOB_DCVS_SET: 263 request = GMU_OOB_DCVS_REQUEST; 264 ack = GMU_OOB_DCVS_ACK; 265 name = "GPU_DCVS"; 266 break; 267 default: 268 return -EINVAL; 269 } 270 271 /* Trigger the equested OOB operation */ 272 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 273 274 /* Wait for the acknowledge interrupt */ 275 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 276 val & (1 << ack), 100, 10000); 277 278 if (ret) 279 DRM_DEV_ERROR(gmu->dev, 280 "Timeout waiting for GMU OOB set %s: 0x%x\n", 281 name, 282 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 283 284 /* Clear the acknowledge interrupt */ 285 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 286 287 return ret; 288 } 289 290 /* Clear a pending OOB state in the GMU */ 291 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 292 { 293 if (!gmu->legacy) { 294 WARN_ON(state != GMU_OOB_GPU_SET); 295 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 296 1 << GMU_OOB_GPU_SET_CLEAR_NEW); 297 return; 298 } 299 300 switch (state) { 301 case GMU_OOB_GPU_SET: 302 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 303 1 << GMU_OOB_GPU_SET_CLEAR); 304 break; 305 case GMU_OOB_BOOT_SLUMBER: 306 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 307 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); 308 break; 309 case GMU_OOB_DCVS_SET: 310 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 311 1 << GMU_OOB_DCVS_CLEAR); 312 break; 313 } 314 } 315 316 /* Enable CPU control of SPTP power power collapse */ 317 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 318 { 319 int ret; 320 u32 val; 321 322 if (!gmu->legacy) 323 return 0; 324 325 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 326 327 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 328 (val & 0x38) == 0x28, 1, 100); 329 330 if (ret) { 331 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 332 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 333 } 334 335 return 0; 336 } 337 338 /* Disable CPU control of SPTP power power collapse */ 339 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 340 { 341 u32 val; 342 int ret; 343 344 if (!gmu->legacy) 345 return; 346 347 /* Make sure retention is on */ 348 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 349 350 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 351 352 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 353 (val & 0x04), 100, 10000); 354 355 if (ret) 356 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 357 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 358 } 359 360 /* Let the GMU know we are starting a boot sequence */ 361 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 362 { 363 u32 vote; 364 365 /* Let the GMU know we are getting ready for boot */ 366 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 367 368 /* Choose the "default" power level as the highest available */ 369 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 370 371 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 372 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 373 374 /* Let the GMU know the boot sequence has started */ 375 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 376 } 377 378 /* Let the GMU know that we are about to go into slumber */ 379 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 380 { 381 int ret; 382 383 /* Disable the power counter so the GMU isn't busy */ 384 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 385 386 /* Disable SPTP_PC if the CPU is responsible for it */ 387 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 388 a6xx_sptprac_disable(gmu); 389 390 if (!gmu->legacy) { 391 ret = a6xx_hfi_send_prep_slumber(gmu); 392 goto out; 393 } 394 395 /* Tell the GMU to get ready to slumber */ 396 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 397 398 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 399 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 400 401 if (!ret) { 402 /* Check to see if the GMU really did slumber */ 403 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 404 != 0x0f) { 405 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 406 ret = -ETIMEDOUT; 407 } 408 } 409 410 out: 411 /* Put fence into allow mode */ 412 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 413 return ret; 414 } 415 416 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 417 { 418 int ret; 419 u32 val; 420 421 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 422 /* Wait for the register to finish posting */ 423 wmb(); 424 425 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 426 val & (1 << 1), 100, 10000); 427 if (ret) { 428 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 429 return ret; 430 } 431 432 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 433 !val, 100, 10000); 434 435 if (ret) { 436 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 437 return ret; 438 } 439 440 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 441 442 /* Set up CX GMU counter 0 to count busy ticks */ 443 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); 444 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); 445 446 /* Enable the power counter */ 447 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 448 return 0; 449 } 450 451 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 452 { 453 int ret; 454 u32 val; 455 456 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 457 458 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 459 val, val & (1 << 16), 100, 10000); 460 if (ret) 461 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 462 463 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 464 } 465 466 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 467 { 468 return msm_writel(value, ptr + (offset << 2)); 469 } 470 471 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 472 const char *name); 473 474 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 475 { 476 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 477 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 478 struct platform_device *pdev = to_platform_device(gmu->dev); 479 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 480 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 481 uint32_t pdc_address_offset; 482 483 if (!pdcptr || !seqptr) 484 goto err; 485 486 if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) 487 pdc_address_offset = 0x30090; 488 else if (adreno_is_a650(adreno_gpu)) 489 pdc_address_offset = 0x300a0; 490 else 491 pdc_address_offset = 0x30080; 492 493 /* Disable SDE clock gating */ 494 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 495 496 /* Setup RSC PDC handshake for sleep and wakeup */ 497 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 498 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 499 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 500 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 501 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 502 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 503 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 504 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 505 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 506 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 507 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 508 509 /* Load RSC sequencer uCode for sleep and wakeup */ 510 if (adreno_is_a650(adreno_gpu)) { 511 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); 512 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); 513 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); 514 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); 515 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); 516 } else { 517 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 518 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 519 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 520 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 521 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 522 } 523 524 /* Load PDC sequencer uCode for power up and power down sequence */ 525 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 526 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 527 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 528 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 529 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 530 531 /* Set TCS commands used by PDC sequence for low power modes */ 532 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 533 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 534 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 535 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 536 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 537 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 538 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 539 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 540 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 541 542 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 543 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset); 544 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 545 546 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 547 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 548 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 549 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 550 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 551 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 552 553 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 554 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 555 if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu)) 556 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); 557 else 558 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 559 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 560 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset); 561 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 562 563 /* Setup GPU PDC */ 564 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 565 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 566 567 /* ensure no writes happen before the uCode is fully written */ 568 wmb(); 569 570 err: 571 if (!IS_ERR_OR_NULL(pdcptr)) 572 iounmap(pdcptr); 573 if (!IS_ERR_OR_NULL(seqptr)) 574 iounmap(seqptr); 575 } 576 577 /* 578 * The lowest 16 bits of this value are the number of XO clock cycles for main 579 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 580 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 581 */ 582 583 #define GMU_PWR_COL_HYST 0x000a1680 584 585 /* Set up the idle state for the GMU */ 586 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 587 { 588 /* Disable GMU WB/RB buffer */ 589 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 590 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); 591 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); 592 593 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 594 595 switch (gmu->idle_level) { 596 case GMU_IDLE_STATE_IFPC: 597 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 598 GMU_PWR_COL_HYST); 599 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 600 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 601 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 602 /* Fall through */ 603 case GMU_IDLE_STATE_SPTP: 604 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 605 GMU_PWR_COL_HYST); 606 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 607 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 608 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 609 } 610 611 /* Enable RPMh GPU client */ 612 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 613 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 614 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 615 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 616 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 617 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 618 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 619 } 620 621 struct block_header { 622 u32 addr; 623 u32 size; 624 u32 type; 625 u32 value; 626 u32 data[]; 627 }; 628 629 /* this should be a general kernel helper */ 630 static int in_range(u32 addr, u32 start, u32 size) 631 { 632 return addr >= start && addr < start + size; 633 } 634 635 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) 636 { 637 if (!in_range(blk->addr, bo->iova, bo->size)) 638 return false; 639 640 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); 641 return true; 642 } 643 644 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) 645 { 646 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 647 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 648 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; 649 const struct block_header *blk; 650 u32 reg_offset; 651 652 u32 itcm_base = 0x00000000; 653 u32 dtcm_base = 0x00040000; 654 655 if (adreno_is_a650(adreno_gpu)) 656 dtcm_base = 0x10004000; 657 658 if (gmu->legacy) { 659 /* Sanity check the size of the firmware that was loaded */ 660 if (fw_image->size > 0x8000) { 661 DRM_DEV_ERROR(gmu->dev, 662 "GMU firmware is bigger than the available region\n"); 663 return -EINVAL; 664 } 665 666 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, 667 (u32*) fw_image->data, fw_image->size); 668 return 0; 669 } 670 671 672 for (blk = (const struct block_header *) fw_image->data; 673 (const u8*) blk < fw_image->data + fw_image->size; 674 blk = (const struct block_header *) &blk->data[blk->size >> 2]) { 675 if (blk->size == 0) 676 continue; 677 678 if (in_range(blk->addr, itcm_base, SZ_16K)) { 679 reg_offset = (blk->addr - itcm_base) >> 2; 680 gmu_write_bulk(gmu, 681 REG_A6XX_GMU_CM3_ITCM_START + reg_offset, 682 blk->data, blk->size); 683 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { 684 reg_offset = (blk->addr - dtcm_base) >> 2; 685 gmu_write_bulk(gmu, 686 REG_A6XX_GMU_CM3_DTCM_START + reg_offset, 687 blk->data, blk->size); 688 } else if (!fw_block_mem(&gmu->icache, blk) && 689 !fw_block_mem(&gmu->dcache, blk) && 690 !fw_block_mem(&gmu->dummy, blk)) { 691 DRM_DEV_ERROR(gmu->dev, 692 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n", 693 blk->addr, blk->size, blk->data[0]); 694 } 695 } 696 697 return 0; 698 } 699 700 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 701 { 702 static bool rpmh_init; 703 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 704 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 705 int ret; 706 u32 chipid; 707 708 if (adreno_is_a650(adreno_gpu)) 709 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); 710 711 if (state == GMU_WARM_BOOT) { 712 ret = a6xx_rpmh_start(gmu); 713 if (ret) 714 return ret; 715 } else { 716 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 717 "GMU firmware is not loaded\n")) 718 return -ENOENT; 719 720 /* Turn on register retention */ 721 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 722 723 /* We only need to load the RPMh microcode once */ 724 if (!rpmh_init) { 725 a6xx_gmu_rpmh_init(gmu); 726 rpmh_init = true; 727 } else { 728 ret = a6xx_rpmh_start(gmu); 729 if (ret) 730 return ret; 731 } 732 733 ret = a6xx_gmu_fw_load(gmu); 734 if (ret) 735 return ret; 736 } 737 738 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 739 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 740 741 /* Write the iova of the HFI table */ 742 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); 743 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 744 745 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 746 (1 << 31) | (0xa << 18) | (0xa0)); 747 748 chipid = adreno_gpu->rev.core << 24; 749 chipid |= adreno_gpu->rev.major << 16; 750 chipid |= adreno_gpu->rev.minor << 12; 751 chipid |= adreno_gpu->rev.patchid << 8; 752 753 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 754 755 /* Set up the lowest idle level on the GMU */ 756 a6xx_gmu_power_config(gmu); 757 758 ret = a6xx_gmu_start(gmu); 759 if (ret) 760 return ret; 761 762 if (gmu->legacy) { 763 ret = a6xx_gmu_gfx_rail_on(gmu); 764 if (ret) 765 return ret; 766 } 767 768 /* Enable SPTP_PC if the CPU is responsible for it */ 769 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 770 ret = a6xx_sptprac_enable(gmu); 771 if (ret) 772 return ret; 773 } 774 775 ret = a6xx_gmu_hfi_start(gmu); 776 if (ret) 777 return ret; 778 779 /* FIXME: Do we need this wmb() here? */ 780 wmb(); 781 782 return 0; 783 } 784 785 #define A6XX_HFI_IRQ_MASK \ 786 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 787 788 #define A6XX_GMU_IRQ_MASK \ 789 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 790 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 791 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 792 793 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 794 { 795 disable_irq(gmu->gmu_irq); 796 disable_irq(gmu->hfi_irq); 797 798 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 799 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 800 } 801 802 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) 803 { 804 u32 val; 805 806 /* Make sure there are no outstanding RPMh votes */ 807 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 808 (val & 1), 100, 10000); 809 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 810 (val & 1), 100, 10000); 811 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 812 (val & 1), 100, 10000); 813 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 814 (val & 1), 100, 1000); 815 } 816 817 /* Force the GMU off in case it isn't responsive */ 818 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) 819 { 820 /* Flush all the queues */ 821 a6xx_hfi_stop(gmu); 822 823 /* Stop the interrupts */ 824 a6xx_gmu_irq_disable(gmu); 825 826 /* Force off SPTP in case the GMU is managing it */ 827 a6xx_sptprac_disable(gmu); 828 829 /* Make sure there are no outstanding RPMh votes */ 830 a6xx_gmu_rpmh_off(gmu); 831 } 832 833 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 834 { 835 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 836 struct msm_gpu *gpu = &adreno_gpu->base; 837 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 838 int status, ret; 839 840 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) 841 return 0; 842 843 gmu->hung = false; 844 845 /* Turn on the resources */ 846 pm_runtime_get_sync(gmu->dev); 847 848 /* Use a known rate to bring up the GMU */ 849 clk_set_rate(gmu->core_clk, 200000000); 850 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 851 if (ret) { 852 pm_runtime_put(gmu->dev); 853 return ret; 854 } 855 856 /* Set the bus quota to a reasonable value for boot */ 857 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); 858 859 /* Enable the GMU interrupt */ 860 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 861 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); 862 enable_irq(gmu->gmu_irq); 863 864 /* Check to see if we are doing a cold or warm boot */ 865 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 866 GMU_WARM_BOOT : GMU_COLD_BOOT; 867 868 /* 869 * Warm boot path does not work on newer GPUs 870 * Presumably this is because icache/dcache regions must be restored 871 */ 872 if (!gmu->legacy) 873 status = GMU_COLD_BOOT; 874 875 ret = a6xx_gmu_fw_start(gmu, status); 876 if (ret) 877 goto out; 878 879 ret = a6xx_hfi_start(gmu, status); 880 if (ret) 881 goto out; 882 883 /* 884 * Turn on the GMU firmware fault interrupt after we know the boot 885 * sequence is successful 886 */ 887 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 888 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); 889 enable_irq(gmu->hfi_irq); 890 891 /* Set the GPU to the current freq */ 892 if (gmu->legacy) 893 __a6xx_gmu_set_freq(gmu, gmu->current_perf_index); 894 else 895 a6xx_hfi_set_freq(gmu, gmu->current_perf_index); 896 897 /* 898 * "enable" the GX power domain which won't actually do anything but it 899 * will make sure that the refcounting is correct in case we need to 900 * bring down the GX after a GMU failure 901 */ 902 if (!IS_ERR_OR_NULL(gmu->gxpd)) 903 pm_runtime_get(gmu->gxpd); 904 905 out: 906 /* On failure, shut down the GMU to leave it in a good state */ 907 if (ret) { 908 disable_irq(gmu->gmu_irq); 909 a6xx_rpmh_stop(gmu); 910 pm_runtime_put(gmu->dev); 911 } 912 913 return ret; 914 } 915 916 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 917 { 918 u32 reg; 919 920 if (!gmu->initialized) 921 return true; 922 923 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 924 925 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 926 return false; 927 928 return true; 929 } 930 931 #define GBIF_CLIENT_HALT_MASK BIT(0) 932 #define GBIF_ARB_HALT_MASK BIT(1) 933 934 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 935 { 936 struct msm_gpu *gpu = &adreno_gpu->base; 937 938 if (!a6xx_has_gbif(adreno_gpu)) { 939 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 940 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 941 0xf) == 0xf); 942 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 943 944 return; 945 } 946 947 /* Halt new client requests on GBIF */ 948 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 949 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 950 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 951 952 /* Halt all AXI requests on GBIF */ 953 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 954 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 955 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 956 957 /* The GBIF halt needs to be explicitly cleared */ 958 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 959 } 960 961 /* Gracefully try to shut down the GMU and by extension the GPU */ 962 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) 963 { 964 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 965 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 966 u32 val; 967 968 /* 969 * The GMU may still be in slumber unless the GPU started so check and 970 * skip putting it back into slumber if so 971 */ 972 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 973 974 if (val != 0xf) { 975 int ret = a6xx_gmu_wait_for_idle(gmu); 976 977 /* If the GMU isn't responding assume it is hung */ 978 if (ret) { 979 a6xx_gmu_force_off(gmu); 980 return; 981 } 982 983 a6xx_bus_clear_pending_transactions(adreno_gpu); 984 985 /* tell the GMU we want to slumber */ 986 a6xx_gmu_notify_slumber(gmu); 987 988 ret = gmu_poll_timeout(gmu, 989 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 990 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 991 100, 10000); 992 993 /* 994 * Let the user know we failed to slumber but don't worry too 995 * much because we are powering down anyway 996 */ 997 998 if (ret) 999 DRM_DEV_ERROR(gmu->dev, 1000 "Unable to slumber GMU: status = 0%x/0%x\n", 1001 gmu_read(gmu, 1002 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 1003 gmu_read(gmu, 1004 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 1005 } 1006 1007 /* Turn off HFI */ 1008 a6xx_hfi_stop(gmu); 1009 1010 /* Stop the interrupts and mask the hardware */ 1011 a6xx_gmu_irq_disable(gmu); 1012 1013 /* Tell RPMh to power off the GPU */ 1014 a6xx_rpmh_stop(gmu); 1015 } 1016 1017 1018 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 1019 { 1020 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1021 struct msm_gpu *gpu = &a6xx_gpu->base.base; 1022 1023 if (!pm_runtime_active(gmu->dev)) 1024 return 0; 1025 1026 /* 1027 * Force the GMU off if we detected a hang, otherwise try to shut it 1028 * down gracefully 1029 */ 1030 if (gmu->hung) 1031 a6xx_gmu_force_off(gmu); 1032 else 1033 a6xx_gmu_shutdown(gmu); 1034 1035 /* Remove the bus vote */ 1036 icc_set_bw(gpu->icc_path, 0, 0); 1037 1038 /* 1039 * Make sure the GX domain is off before turning off the GMU (CX) 1040 * domain. Usually the GMU does this but only if the shutdown sequence 1041 * was successful 1042 */ 1043 if (!IS_ERR_OR_NULL(gmu->gxpd)) 1044 pm_runtime_put_sync(gmu->gxpd); 1045 1046 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 1047 1048 pm_runtime_put_sync(gmu->dev); 1049 1050 return 0; 1051 } 1052 1053 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) 1054 { 1055 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); 1056 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); 1057 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false); 1058 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false); 1059 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false); 1060 1061 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); 1062 msm_gem_address_space_put(gmu->aspace); 1063 } 1064 1065 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, 1066 size_t size, u64 iova) 1067 { 1068 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1069 struct drm_device *dev = a6xx_gpu->base.base.dev; 1070 uint32_t flags = MSM_BO_WC; 1071 u64 range_start, range_end; 1072 int ret; 1073 1074 size = PAGE_ALIGN(size); 1075 if (!iova) { 1076 /* no fixed address - use GMU's uncached range */ 1077 range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ 1078 range_end = 0x80000000; 1079 } else { 1080 /* range for fixed address */ 1081 range_start = iova; 1082 range_end = iova + size; 1083 /* use IOMMU_PRIV for icache/dcache */ 1084 flags |= MSM_BO_MAP_PRIV; 1085 } 1086 1087 bo->obj = msm_gem_new(dev, size, flags); 1088 if (IS_ERR(bo->obj)) 1089 return PTR_ERR(bo->obj); 1090 1091 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, 1092 range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT); 1093 if (ret) { 1094 drm_gem_object_put(bo->obj); 1095 return ret; 1096 } 1097 1098 bo->virt = msm_gem_get_vaddr(bo->obj); 1099 bo->size = size; 1100 1101 return 0; 1102 } 1103 1104 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 1105 { 1106 struct iommu_domain *domain; 1107 int ret; 1108 1109 domain = iommu_domain_alloc(&platform_bus_type); 1110 if (!domain) 1111 return -ENODEV; 1112 1113 domain->geometry.aperture_start = 0x00000000; 1114 domain->geometry.aperture_end = 0x7fffffff; 1115 1116 gmu->aspace = msm_gem_address_space_create(gmu->dev, domain, "gmu"); 1117 if (IS_ERR(gmu->aspace)) { 1118 iommu_domain_free(domain); 1119 return PTR_ERR(gmu->aspace); 1120 } 1121 1122 ret = gmu->aspace->mmu->funcs->attach(gmu->aspace->mmu); 1123 if (ret) { 1124 msm_gem_address_space_put(gmu->aspace); 1125 return ret; 1126 } 1127 1128 return 0; 1129 } 1130 1131 /* Return the 'arc-level' for the given frequency */ 1132 static unsigned int a6xx_gmu_get_arc_level(struct device *dev, 1133 unsigned long freq) 1134 { 1135 struct dev_pm_opp *opp; 1136 unsigned int val; 1137 1138 if (!freq) 1139 return 0; 1140 1141 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 1142 if (IS_ERR(opp)) 1143 return 0; 1144 1145 val = dev_pm_opp_get_level(opp); 1146 1147 dev_pm_opp_put(opp); 1148 1149 return val; 1150 } 1151 1152 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 1153 unsigned long *freqs, int freqs_count, const char *id) 1154 { 1155 int i, j; 1156 const u16 *pri, *sec; 1157 size_t pri_count, sec_count; 1158 1159 pri = cmd_db_read_aux_data(id, &pri_count); 1160 if (IS_ERR(pri)) 1161 return PTR_ERR(pri); 1162 /* 1163 * The data comes back as an array of unsigned shorts so adjust the 1164 * count accordingly 1165 */ 1166 pri_count >>= 1; 1167 if (!pri_count) 1168 return -EINVAL; 1169 1170 sec = cmd_db_read_aux_data("mx.lvl", &sec_count); 1171 if (IS_ERR(sec)) 1172 return PTR_ERR(sec); 1173 1174 sec_count >>= 1; 1175 if (!sec_count) 1176 return -EINVAL; 1177 1178 /* Construct a vote for each frequency */ 1179 for (i = 0; i < freqs_count; i++) { 1180 u8 pindex = 0, sindex = 0; 1181 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); 1182 1183 /* Get the primary index that matches the arc level */ 1184 for (j = 0; j < pri_count; j++) { 1185 if (pri[j] >= level) { 1186 pindex = j; 1187 break; 1188 } 1189 } 1190 1191 if (j == pri_count) { 1192 DRM_DEV_ERROR(dev, 1193 "Level %u not found in the RPMh list\n", 1194 level); 1195 DRM_DEV_ERROR(dev, "Available levels:\n"); 1196 for (j = 0; j < pri_count; j++) 1197 DRM_DEV_ERROR(dev, " %u\n", pri[j]); 1198 1199 return -EINVAL; 1200 } 1201 1202 /* 1203 * Look for a level in in the secondary list that matches. If 1204 * nothing fits, use the maximum non zero vote 1205 */ 1206 1207 for (j = 0; j < sec_count; j++) { 1208 if (sec[j] >= level) { 1209 sindex = j; 1210 break; 1211 } else if (sec[j]) { 1212 sindex = j; 1213 } 1214 } 1215 1216 /* Construct the vote */ 1217 votes[i] = ((pri[pindex] & 0xffff) << 16) | 1218 (sindex << 8) | pindex; 1219 } 1220 1221 return 0; 1222 } 1223 1224 /* 1225 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1226 * to construct the list of votes on the CPU and send it over. Query the RPMh 1227 * voltage levels and build the votes 1228 */ 1229 1230 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1231 { 1232 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1233 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1234 struct msm_gpu *gpu = &adreno_gpu->base; 1235 int ret; 1236 1237 /* Build the GX votes */ 1238 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1239 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); 1240 1241 /* Build the CX votes */ 1242 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1243 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); 1244 1245 return ret; 1246 } 1247 1248 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1249 u32 size) 1250 { 1251 int count = dev_pm_opp_get_opp_count(dev); 1252 struct dev_pm_opp *opp; 1253 int i, index = 0; 1254 unsigned long freq = 1; 1255 1256 /* 1257 * The OPP table doesn't contain the "off" frequency level so we need to 1258 * add 1 to the table size to account for it 1259 */ 1260 1261 if (WARN(count + 1 > size, 1262 "The GMU frequency table is being truncated\n")) 1263 count = size - 1; 1264 1265 /* Set the "off" frequency */ 1266 freqs[index++] = 0; 1267 1268 for (i = 0; i < count; i++) { 1269 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1270 if (IS_ERR(opp)) 1271 break; 1272 1273 dev_pm_opp_put(opp); 1274 freqs[index++] = freq++; 1275 } 1276 1277 return index; 1278 } 1279 1280 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1281 { 1282 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1283 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1284 struct msm_gpu *gpu = &adreno_gpu->base; 1285 1286 int ret = 0; 1287 1288 /* 1289 * The GMU handles its own frequency switching so build a list of 1290 * available frequencies to send during initialization 1291 */ 1292 ret = dev_pm_opp_of_add_table(gmu->dev); 1293 if (ret) { 1294 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1295 return ret; 1296 } 1297 1298 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1299 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1300 1301 /* 1302 * The GMU also handles GPU frequency switching so build a list 1303 * from the GPU OPP table 1304 */ 1305 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1306 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1307 1308 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; 1309 1310 /* Build the list of RPMh votes that we'll send to the GMU */ 1311 return a6xx_gmu_rpmh_votes_init(gmu); 1312 } 1313 1314 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1315 { 1316 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); 1317 1318 if (ret < 1) 1319 return ret; 1320 1321 gmu->nr_clocks = ret; 1322 1323 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1324 gmu->nr_clocks, "gmu"); 1325 1326 return 0; 1327 } 1328 1329 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1330 const char *name) 1331 { 1332 void __iomem *ret; 1333 struct resource *res = platform_get_resource_byname(pdev, 1334 IORESOURCE_MEM, name); 1335 1336 if (!res) { 1337 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1338 return ERR_PTR(-EINVAL); 1339 } 1340 1341 ret = ioremap(res->start, resource_size(res)); 1342 if (!ret) { 1343 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1344 return ERR_PTR(-EINVAL); 1345 } 1346 1347 return ret; 1348 } 1349 1350 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1351 const char *name, irq_handler_t handler) 1352 { 1353 int irq, ret; 1354 1355 irq = platform_get_irq_byname(pdev, name); 1356 1357 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); 1358 if (ret) { 1359 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", 1360 name, ret); 1361 return ret; 1362 } 1363 1364 disable_irq(irq); 1365 1366 return irq; 1367 } 1368 1369 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1370 { 1371 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1372 struct platform_device *pdev = to_platform_device(gmu->dev); 1373 1374 if (!gmu->initialized) 1375 return; 1376 1377 pm_runtime_force_suspend(gmu->dev); 1378 1379 if (!IS_ERR_OR_NULL(gmu->gxpd)) { 1380 pm_runtime_disable(gmu->gxpd); 1381 dev_pm_domain_detach(gmu->gxpd, false); 1382 } 1383 1384 iounmap(gmu->mmio); 1385 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) 1386 iounmap(gmu->rscc); 1387 gmu->mmio = NULL; 1388 gmu->rscc = NULL; 1389 1390 a6xx_gmu_memory_free(gmu); 1391 1392 free_irq(gmu->gmu_irq, gmu); 1393 free_irq(gmu->hfi_irq, gmu); 1394 1395 /* Drop reference taken in of_find_device_by_node */ 1396 put_device(gmu->dev); 1397 1398 gmu->initialized = false; 1399 } 1400 1401 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1402 { 1403 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1404 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1405 struct platform_device *pdev = of_find_device_by_node(node); 1406 int ret; 1407 1408 if (!pdev) 1409 return -ENODEV; 1410 1411 gmu->dev = &pdev->dev; 1412 1413 of_dma_configure(gmu->dev, node, true); 1414 1415 /* Fow now, don't do anything fancy until we get our feet under us */ 1416 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1417 1418 pm_runtime_enable(gmu->dev); 1419 1420 /* Get the list of clocks */ 1421 ret = a6xx_gmu_clocks_probe(gmu); 1422 if (ret) 1423 goto err_put_device; 1424 1425 ret = a6xx_gmu_memory_probe(gmu); 1426 if (ret) 1427 goto err_put_device; 1428 1429 /* Allocate memory for the GMU dummy page */ 1430 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000); 1431 if (ret) 1432 goto err_memory; 1433 1434 if (adreno_is_a650(adreno_gpu)) { 1435 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, 1436 SZ_16M - SZ_16K, 0x04000); 1437 if (ret) 1438 goto err_memory; 1439 } else if (adreno_is_a640(adreno_gpu)) { 1440 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, 1441 SZ_256K - SZ_16K, 0x04000); 1442 if (ret) 1443 goto err_memory; 1444 1445 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, 1446 SZ_256K - SZ_16K, 0x44000); 1447 if (ret) 1448 goto err_memory; 1449 } else { 1450 /* HFI v1, has sptprac */ 1451 gmu->legacy = true; 1452 1453 /* Allocate memory for the GMU debug region */ 1454 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); 1455 if (ret) 1456 goto err_memory; 1457 } 1458 1459 /* Allocate memory for for the HFI queues */ 1460 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); 1461 if (ret) 1462 goto err_memory; 1463 1464 /* Map the GMU registers */ 1465 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1466 if (IS_ERR(gmu->mmio)) { 1467 ret = PTR_ERR(gmu->mmio); 1468 goto err_memory; 1469 } 1470 1471 if (adreno_is_a650(adreno_gpu)) { 1472 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); 1473 if (IS_ERR(gmu->rscc)) 1474 goto err_mmio; 1475 } else { 1476 gmu->rscc = gmu->mmio + 0x23000; 1477 } 1478 1479 /* Get the HFI and GMU interrupts */ 1480 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1481 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1482 1483 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1484 goto err_mmio; 1485 1486 /* 1487 * Get a link to the GX power domain to reset the GPU in case of GMU 1488 * crash 1489 */ 1490 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); 1491 1492 /* Get the power levels for the GMU and GPU */ 1493 a6xx_gmu_pwrlevels_probe(gmu); 1494 1495 /* Set up the HFI queues */ 1496 a6xx_hfi_init(gmu); 1497 1498 gmu->initialized = true; 1499 1500 return 0; 1501 1502 err_mmio: 1503 iounmap(gmu->mmio); 1504 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) 1505 iounmap(gmu->rscc); 1506 free_irq(gmu->gmu_irq, gmu); 1507 free_irq(gmu->hfi_irq, gmu); 1508 1509 ret = -ENODEV; 1510 1511 err_memory: 1512 a6xx_gmu_memory_free(gmu); 1513 err_put_device: 1514 /* Drop reference taken in of_find_device_by_node */ 1515 put_device(gmu->dev); 1516 1517 return ret; 1518 } 1519