1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/interconnect.h> 6 #include <linux/pm_domain.h> 7 #include <linux/pm_opp.h> 8 #include <soc/qcom/cmd-db.h> 9 #include <drm/drm_gem.h> 10 11 #include "a6xx_gpu.h" 12 #include "a6xx_gmu.xml.h" 13 #include "msm_gem.h" 14 #include "msm_mmu.h" 15 16 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) 17 { 18 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 19 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 20 struct msm_gpu *gpu = &adreno_gpu->base; 21 struct drm_device *dev = gpu->dev; 22 struct msm_drm_private *priv = dev->dev_private; 23 24 /* FIXME: add a banner here */ 25 gmu->hung = true; 26 27 /* Turn off the hangcheck timer while we are resetting */ 28 del_timer(&gpu->hangcheck_timer); 29 30 /* Queue the GPU handler because we need to treat this as a recovery */ 31 queue_work(priv->wq, &gpu->recover_work); 32 } 33 34 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 35 { 36 struct a6xx_gmu *gmu = data; 37 u32 status; 38 39 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 40 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 41 42 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 43 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 44 45 a6xx_gmu_fault(gmu); 46 } 47 48 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 49 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 50 51 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 52 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 53 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 54 55 return IRQ_HANDLED; 56 } 57 58 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 59 { 60 struct a6xx_gmu *gmu = data; 61 u32 status; 62 63 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 64 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 65 66 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 67 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 68 69 a6xx_gmu_fault(gmu); 70 } 71 72 return IRQ_HANDLED; 73 } 74 75 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) 76 { 77 u32 val; 78 79 /* This can be called from gpu state code so make sure GMU is valid */ 80 if (!gmu->initialized) 81 return false; 82 83 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 84 85 return !(val & 86 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | 87 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); 88 } 89 90 /* Check to see if the GX rail is still powered */ 91 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 92 { 93 u32 val; 94 95 /* This can be called from gpu state code so make sure GMU is valid */ 96 if (!gmu->initialized) 97 return false; 98 99 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 100 101 return !(val & 102 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 103 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 104 } 105 106 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 107 { 108 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 109 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 110 struct msm_gpu *gpu = &adreno_gpu->base; 111 int ret; 112 113 /* 114 * This can get called from devfreq while the hardware is idle. Don't 115 * bring up the power if it isn't already active 116 */ 117 if (pm_runtime_get_if_in_use(gmu->dev) == 0) 118 return; 119 120 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 121 122 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 123 ((3 & 0xf) << 28) | index); 124 125 /* 126 * Send an invalid index as a vote for the bus bandwidth and let the 127 * firmware decide on the right vote 128 */ 129 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 130 131 /* Set and clear the OOB for DCVS to trigger the GMU */ 132 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 133 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 134 135 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 136 if (ret) 137 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 138 139 gmu->freq = gmu->gpu_freqs[index]; 140 141 /* 142 * Eventually we will want to scale the path vote with the frequency but 143 * for now leave it at max so that the performance is nominal. 144 */ 145 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); 146 pm_runtime_put(gmu->dev); 147 } 148 149 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) 150 { 151 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 152 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 153 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 154 u32 perf_index = 0; 155 156 if (freq == gmu->freq) 157 return; 158 159 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 160 if (freq == gmu->gpu_freqs[perf_index]) 161 break; 162 163 gmu->current_perf_index = perf_index; 164 165 __a6xx_gmu_set_freq(gmu, perf_index); 166 } 167 168 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 169 { 170 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 171 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 172 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 173 174 return gmu->freq; 175 } 176 177 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 178 { 179 u32 val; 180 int local = gmu->idle_level; 181 182 /* SPTP and IFPC both report as IFPC */ 183 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 184 local = GMU_IDLE_STATE_IFPC; 185 186 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 187 188 if (val == local) { 189 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 190 !a6xx_gmu_gx_is_on(gmu)) 191 return true; 192 } 193 194 return false; 195 } 196 197 /* Wait for the GMU to get to its most idle state */ 198 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) 199 { 200 return spin_until(a6xx_gmu_check_idle_level(gmu)); 201 } 202 203 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 204 { 205 int ret; 206 u32 val; 207 208 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 209 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 210 211 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 212 val == 0xbabeface, 100, 10000); 213 214 if (ret) 215 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 216 217 return ret; 218 } 219 220 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 221 { 222 u32 val; 223 int ret; 224 225 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 226 227 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 228 val & 1, 100, 10000); 229 if (ret) 230 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 231 232 return ret; 233 } 234 235 /* Trigger a OOB (out of band) request to the GMU */ 236 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 237 { 238 int ret; 239 u32 val; 240 int request, ack; 241 const char *name; 242 243 switch (state) { 244 case GMU_OOB_GPU_SET: 245 request = GMU_OOB_GPU_SET_REQUEST; 246 ack = GMU_OOB_GPU_SET_ACK; 247 name = "GPU_SET"; 248 break; 249 case GMU_OOB_BOOT_SLUMBER: 250 request = GMU_OOB_BOOT_SLUMBER_REQUEST; 251 ack = GMU_OOB_BOOT_SLUMBER_ACK; 252 name = "BOOT_SLUMBER"; 253 break; 254 case GMU_OOB_DCVS_SET: 255 request = GMU_OOB_DCVS_REQUEST; 256 ack = GMU_OOB_DCVS_ACK; 257 name = "GPU_DCVS"; 258 break; 259 default: 260 return -EINVAL; 261 } 262 263 /* Trigger the equested OOB operation */ 264 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 265 266 /* Wait for the acknowledge interrupt */ 267 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 268 val & (1 << ack), 100, 10000); 269 270 if (ret) 271 DRM_DEV_ERROR(gmu->dev, 272 "Timeout waiting for GMU OOB set %s: 0x%x\n", 273 name, 274 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 275 276 /* Clear the acknowledge interrupt */ 277 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 278 279 return ret; 280 } 281 282 /* Clear a pending OOB state in the GMU */ 283 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 284 { 285 switch (state) { 286 case GMU_OOB_GPU_SET: 287 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 288 1 << GMU_OOB_GPU_SET_CLEAR); 289 break; 290 case GMU_OOB_BOOT_SLUMBER: 291 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 292 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); 293 break; 294 case GMU_OOB_DCVS_SET: 295 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 296 1 << GMU_OOB_DCVS_CLEAR); 297 break; 298 } 299 } 300 301 /* Enable CPU control of SPTP power power collapse */ 302 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 303 { 304 int ret; 305 u32 val; 306 307 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 308 309 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 310 (val & 0x38) == 0x28, 1, 100); 311 312 if (ret) { 313 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 314 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 315 } 316 317 return 0; 318 } 319 320 /* Disable CPU control of SPTP power power collapse */ 321 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 322 { 323 u32 val; 324 int ret; 325 326 /* Make sure retention is on */ 327 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 328 329 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 330 331 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 332 (val & 0x04), 100, 10000); 333 334 if (ret) 335 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 336 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 337 } 338 339 /* Let the GMU know we are starting a boot sequence */ 340 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 341 { 342 u32 vote; 343 344 /* Let the GMU know we are getting ready for boot */ 345 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 346 347 /* Choose the "default" power level as the highest available */ 348 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 349 350 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 351 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 352 353 /* Let the GMU know the boot sequence has started */ 354 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 355 } 356 357 /* Let the GMU know that we are about to go into slumber */ 358 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 359 { 360 int ret; 361 362 /* Disable the power counter so the GMU isn't busy */ 363 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 364 365 /* Disable SPTP_PC if the CPU is responsible for it */ 366 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 367 a6xx_sptprac_disable(gmu); 368 369 /* Tell the GMU to get ready to slumber */ 370 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 371 372 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 373 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 374 375 if (!ret) { 376 /* Check to see if the GMU really did slumber */ 377 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 378 != 0x0f) { 379 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 380 ret = -ETIMEDOUT; 381 } 382 } 383 384 /* Put fence into allow mode */ 385 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 386 return ret; 387 } 388 389 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 390 { 391 int ret; 392 u32 val; 393 394 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 395 /* Wait for the register to finish posting */ 396 wmb(); 397 398 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 399 val & (1 << 1), 100, 10000); 400 if (ret) { 401 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 402 return ret; 403 } 404 405 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 406 !val, 100, 10000); 407 408 if (ret) { 409 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 410 return ret; 411 } 412 413 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 414 415 /* Set up CX GMU counter 0 to count busy ticks */ 416 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); 417 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); 418 419 /* Enable the power counter */ 420 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 421 return 0; 422 } 423 424 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 425 { 426 int ret; 427 u32 val; 428 429 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 430 431 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 432 val, val & (1 << 16), 100, 10000); 433 if (ret) 434 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 435 436 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 437 } 438 439 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 440 { 441 return msm_writel(value, ptr + (offset << 2)); 442 } 443 444 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 445 const char *name); 446 447 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 448 { 449 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 450 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 451 struct platform_device *pdev = to_platform_device(gmu->dev); 452 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 453 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 454 455 if (!pdcptr || !seqptr) 456 goto err; 457 458 /* Disable SDE clock gating */ 459 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 460 461 /* Setup RSC PDC handshake for sleep and wakeup */ 462 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 463 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 464 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 465 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 466 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 467 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 468 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 469 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 470 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 471 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 472 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 473 474 /* Load RSC sequencer uCode for sleep and wakeup */ 475 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 476 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 477 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 478 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 479 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 480 481 /* Load PDC sequencer uCode for power up and power down sequence */ 482 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 483 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 484 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 485 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 486 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 487 488 /* Set TCS commands used by PDC sequence for low power modes */ 489 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 490 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 491 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 492 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 493 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 494 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 495 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 496 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 497 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 498 499 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 500 if (adreno_is_a618(adreno_gpu)) 501 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090); 502 else 503 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 504 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 505 506 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 507 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 508 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 509 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 510 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 511 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 512 513 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 514 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 515 if (adreno_is_a618(adreno_gpu)) 516 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); 517 else 518 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 519 520 521 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 522 if (adreno_is_a618(adreno_gpu)) 523 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090); 524 else 525 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 526 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 527 528 /* Setup GPU PDC */ 529 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 530 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 531 532 /* ensure no writes happen before the uCode is fully written */ 533 wmb(); 534 535 err: 536 if (!IS_ERR_OR_NULL(pdcptr)) 537 iounmap(pdcptr); 538 if (!IS_ERR_OR_NULL(seqptr)) 539 iounmap(seqptr); 540 } 541 542 /* 543 * The lowest 16 bits of this value are the number of XO clock cycles for main 544 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 545 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 546 */ 547 548 #define GMU_PWR_COL_HYST 0x000a1680 549 550 /* Set up the idle state for the GMU */ 551 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 552 { 553 /* Disable GMU WB/RB buffer */ 554 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 555 556 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 557 558 switch (gmu->idle_level) { 559 case GMU_IDLE_STATE_IFPC: 560 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 561 GMU_PWR_COL_HYST); 562 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 563 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 564 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 565 /* Fall through */ 566 case GMU_IDLE_STATE_SPTP: 567 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 568 GMU_PWR_COL_HYST); 569 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 570 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 571 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 572 } 573 574 /* Enable RPMh GPU client */ 575 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 576 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 577 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 578 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 579 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 580 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 581 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 582 } 583 584 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 585 { 586 static bool rpmh_init; 587 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 588 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 589 int i, ret; 590 u32 chipid; 591 u32 *image; 592 593 if (state == GMU_WARM_BOOT) { 594 ret = a6xx_rpmh_start(gmu); 595 if (ret) 596 return ret; 597 } else { 598 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 599 "GMU firmware is not loaded\n")) 600 return -ENOENT; 601 602 /* Sanity check the size of the firmware that was loaded */ 603 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { 604 DRM_DEV_ERROR(gmu->dev, 605 "GMU firmware is bigger than the available region\n"); 606 return -EINVAL; 607 } 608 609 /* Turn on register retention */ 610 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 611 612 /* We only need to load the RPMh microcode once */ 613 if (!rpmh_init) { 614 a6xx_gmu_rpmh_init(gmu); 615 rpmh_init = true; 616 } else { 617 ret = a6xx_rpmh_start(gmu); 618 if (ret) 619 return ret; 620 } 621 622 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; 623 624 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) 625 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, 626 image[i]); 627 } 628 629 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 630 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 631 632 /* Write the iova of the HFI table */ 633 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); 634 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 635 636 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 637 (1 << 31) | (0xa << 18) | (0xa0)); 638 639 chipid = adreno_gpu->rev.core << 24; 640 chipid |= adreno_gpu->rev.major << 16; 641 chipid |= adreno_gpu->rev.minor << 12; 642 chipid |= adreno_gpu->rev.patchid << 8; 643 644 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 645 646 /* Set up the lowest idle level on the GMU */ 647 a6xx_gmu_power_config(gmu); 648 649 ret = a6xx_gmu_start(gmu); 650 if (ret) 651 return ret; 652 653 ret = a6xx_gmu_gfx_rail_on(gmu); 654 if (ret) 655 return ret; 656 657 /* Enable SPTP_PC if the CPU is responsible for it */ 658 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 659 ret = a6xx_sptprac_enable(gmu); 660 if (ret) 661 return ret; 662 } 663 664 ret = a6xx_gmu_hfi_start(gmu); 665 if (ret) 666 return ret; 667 668 /* FIXME: Do we need this wmb() here? */ 669 wmb(); 670 671 return 0; 672 } 673 674 #define A6XX_HFI_IRQ_MASK \ 675 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 676 677 #define A6XX_GMU_IRQ_MASK \ 678 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 679 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 680 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 681 682 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 683 { 684 disable_irq(gmu->gmu_irq); 685 disable_irq(gmu->hfi_irq); 686 687 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 688 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 689 } 690 691 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) 692 { 693 u32 val; 694 695 /* Make sure there are no outstanding RPMh votes */ 696 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 697 (val & 1), 100, 10000); 698 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 699 (val & 1), 100, 10000); 700 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 701 (val & 1), 100, 10000); 702 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 703 (val & 1), 100, 1000); 704 } 705 706 /* Force the GMU off in case it isn't responsive */ 707 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) 708 { 709 /* Flush all the queues */ 710 a6xx_hfi_stop(gmu); 711 712 /* Stop the interrupts */ 713 a6xx_gmu_irq_disable(gmu); 714 715 /* Force off SPTP in case the GMU is managing it */ 716 a6xx_sptprac_disable(gmu); 717 718 /* Make sure there are no outstanding RPMh votes */ 719 a6xx_gmu_rpmh_off(gmu); 720 } 721 722 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 723 { 724 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 725 struct msm_gpu *gpu = &adreno_gpu->base; 726 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 727 int status, ret; 728 729 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) 730 return 0; 731 732 gmu->hung = false; 733 734 /* Turn on the resources */ 735 pm_runtime_get_sync(gmu->dev); 736 737 /* Use a known rate to bring up the GMU */ 738 clk_set_rate(gmu->core_clk, 200000000); 739 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 740 if (ret) { 741 pm_runtime_put(gmu->dev); 742 return ret; 743 } 744 745 /* Set the bus quota to a reasonable value for boot */ 746 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); 747 748 /* Enable the GMU interrupt */ 749 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 750 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); 751 enable_irq(gmu->gmu_irq); 752 753 /* Check to see if we are doing a cold or warm boot */ 754 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 755 GMU_WARM_BOOT : GMU_COLD_BOOT; 756 757 ret = a6xx_gmu_fw_start(gmu, status); 758 if (ret) 759 goto out; 760 761 ret = a6xx_hfi_start(gmu, status); 762 if (ret) 763 goto out; 764 765 /* 766 * Turn on the GMU firmware fault interrupt after we know the boot 767 * sequence is successful 768 */ 769 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 770 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); 771 enable_irq(gmu->hfi_irq); 772 773 /* Set the GPU to the current freq */ 774 __a6xx_gmu_set_freq(gmu, gmu->current_perf_index); 775 776 /* 777 * "enable" the GX power domain which won't actually do anything but it 778 * will make sure that the refcounting is correct in case we need to 779 * bring down the GX after a GMU failure 780 */ 781 if (!IS_ERR_OR_NULL(gmu->gxpd)) 782 pm_runtime_get(gmu->gxpd); 783 784 out: 785 /* On failure, shut down the GMU to leave it in a good state */ 786 if (ret) { 787 disable_irq(gmu->gmu_irq); 788 a6xx_rpmh_stop(gmu); 789 pm_runtime_put(gmu->dev); 790 } 791 792 return ret; 793 } 794 795 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 796 { 797 u32 reg; 798 799 if (!gmu->initialized) 800 return true; 801 802 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 803 804 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 805 return false; 806 807 return true; 808 } 809 810 #define GBIF_CLIENT_HALT_MASK BIT(0) 811 #define GBIF_ARB_HALT_MASK BIT(1) 812 813 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 814 { 815 struct msm_gpu *gpu = &adreno_gpu->base; 816 817 if (!a6xx_has_gbif(adreno_gpu)) { 818 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 819 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 820 0xf) == 0xf); 821 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 822 823 return; 824 } 825 826 /* Halt new client requests on GBIF */ 827 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 828 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 829 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 830 831 /* Halt all AXI requests on GBIF */ 832 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 833 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 834 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 835 836 /* The GBIF halt needs to be explicitly cleared */ 837 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 838 } 839 840 /* Gracefully try to shut down the GMU and by extension the GPU */ 841 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) 842 { 843 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 844 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 845 u32 val; 846 847 /* 848 * The GMU may still be in slumber unless the GPU started so check and 849 * skip putting it back into slumber if so 850 */ 851 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 852 853 if (val != 0xf) { 854 int ret = a6xx_gmu_wait_for_idle(gmu); 855 856 /* If the GMU isn't responding assume it is hung */ 857 if (ret) { 858 a6xx_gmu_force_off(gmu); 859 return; 860 } 861 862 a6xx_bus_clear_pending_transactions(adreno_gpu); 863 864 /* tell the GMU we want to slumber */ 865 a6xx_gmu_notify_slumber(gmu); 866 867 ret = gmu_poll_timeout(gmu, 868 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 869 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 870 100, 10000); 871 872 /* 873 * Let the user know we failed to slumber but don't worry too 874 * much because we are powering down anyway 875 */ 876 877 if (ret) 878 DRM_DEV_ERROR(gmu->dev, 879 "Unable to slumber GMU: status = 0%x/0%x\n", 880 gmu_read(gmu, 881 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 882 gmu_read(gmu, 883 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 884 } 885 886 /* Turn off HFI */ 887 a6xx_hfi_stop(gmu); 888 889 /* Stop the interrupts and mask the hardware */ 890 a6xx_gmu_irq_disable(gmu); 891 892 /* Tell RPMh to power off the GPU */ 893 a6xx_rpmh_stop(gmu); 894 } 895 896 897 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 898 { 899 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 900 struct msm_gpu *gpu = &a6xx_gpu->base.base; 901 902 if (!pm_runtime_active(gmu->dev)) 903 return 0; 904 905 /* 906 * Force the GMU off if we detected a hang, otherwise try to shut it 907 * down gracefully 908 */ 909 if (gmu->hung) 910 a6xx_gmu_force_off(gmu); 911 else 912 a6xx_gmu_shutdown(gmu); 913 914 /* Remove the bus vote */ 915 icc_set_bw(gpu->icc_path, 0, 0); 916 917 /* 918 * Make sure the GX domain is off before turning off the GMU (CX) 919 * domain. Usually the GMU does this but only if the shutdown sequence 920 * was successful 921 */ 922 if (!IS_ERR_OR_NULL(gmu->gxpd)) 923 pm_runtime_put_sync(gmu->gxpd); 924 925 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 926 927 pm_runtime_put_sync(gmu->dev); 928 929 return 0; 930 } 931 932 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) 933 { 934 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); 935 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); 936 937 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); 938 msm_gem_address_space_put(gmu->aspace); 939 } 940 941 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, 942 size_t size, u64 iova) 943 { 944 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 945 struct drm_device *dev = a6xx_gpu->base.base.dev; 946 uint32_t flags = MSM_BO_WC; 947 u64 range_start, range_end; 948 int ret; 949 950 size = PAGE_ALIGN(size); 951 if (!iova) { 952 /* no fixed address - use GMU's uncached range */ 953 range_start = 0x60000000; 954 range_end = 0x80000000; 955 } else { 956 /* range for fixed address */ 957 range_start = iova; 958 range_end = iova + size; 959 } 960 961 bo->obj = msm_gem_new(dev, size, flags); 962 if (IS_ERR(bo->obj)) 963 return PTR_ERR(bo->obj); 964 965 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, 966 range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT); 967 if (ret) { 968 drm_gem_object_put(bo->obj); 969 return ret; 970 } 971 972 bo->virt = msm_gem_get_vaddr(bo->obj); 973 bo->size = size; 974 975 return 0; 976 } 977 978 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 979 { 980 struct iommu_domain *domain; 981 int ret; 982 983 domain = iommu_domain_alloc(&platform_bus_type); 984 if (!domain) 985 return -ENODEV; 986 987 domain->geometry.aperture_start = 0x00000000; 988 domain->geometry.aperture_end = 0x7fffffff; 989 990 gmu->aspace = msm_gem_address_space_create(gmu->dev, domain, "gmu"); 991 if (IS_ERR(gmu->aspace)) { 992 iommu_domain_free(domain); 993 return PTR_ERR(gmu->aspace); 994 } 995 996 ret = gmu->aspace->mmu->funcs->attach(gmu->aspace->mmu); 997 if (ret) { 998 msm_gem_address_space_put(gmu->aspace); 999 return ret; 1000 } 1001 1002 return 0; 1003 } 1004 1005 /* Return the 'arc-level' for the given frequency */ 1006 static unsigned int a6xx_gmu_get_arc_level(struct device *dev, 1007 unsigned long freq) 1008 { 1009 struct dev_pm_opp *opp; 1010 unsigned int val; 1011 1012 if (!freq) 1013 return 0; 1014 1015 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 1016 if (IS_ERR(opp)) 1017 return 0; 1018 1019 val = dev_pm_opp_get_level(opp); 1020 1021 dev_pm_opp_put(opp); 1022 1023 return val; 1024 } 1025 1026 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 1027 unsigned long *freqs, int freqs_count, const char *id) 1028 { 1029 int i, j; 1030 const u16 *pri, *sec; 1031 size_t pri_count, sec_count; 1032 1033 pri = cmd_db_read_aux_data(id, &pri_count); 1034 if (IS_ERR(pri)) 1035 return PTR_ERR(pri); 1036 /* 1037 * The data comes back as an array of unsigned shorts so adjust the 1038 * count accordingly 1039 */ 1040 pri_count >>= 1; 1041 if (!pri_count) 1042 return -EINVAL; 1043 1044 sec = cmd_db_read_aux_data("mx.lvl", &sec_count); 1045 if (IS_ERR(sec)) 1046 return PTR_ERR(sec); 1047 1048 sec_count >>= 1; 1049 if (!sec_count) 1050 return -EINVAL; 1051 1052 /* Construct a vote for each frequency */ 1053 for (i = 0; i < freqs_count; i++) { 1054 u8 pindex = 0, sindex = 0; 1055 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); 1056 1057 /* Get the primary index that matches the arc level */ 1058 for (j = 0; j < pri_count; j++) { 1059 if (pri[j] >= level) { 1060 pindex = j; 1061 break; 1062 } 1063 } 1064 1065 if (j == pri_count) { 1066 DRM_DEV_ERROR(dev, 1067 "Level %u not found in the RPMh list\n", 1068 level); 1069 DRM_DEV_ERROR(dev, "Available levels:\n"); 1070 for (j = 0; j < pri_count; j++) 1071 DRM_DEV_ERROR(dev, " %u\n", pri[j]); 1072 1073 return -EINVAL; 1074 } 1075 1076 /* 1077 * Look for a level in in the secondary list that matches. If 1078 * nothing fits, use the maximum non zero vote 1079 */ 1080 1081 for (j = 0; j < sec_count; j++) { 1082 if (sec[j] >= level) { 1083 sindex = j; 1084 break; 1085 } else if (sec[j]) { 1086 sindex = j; 1087 } 1088 } 1089 1090 /* Construct the vote */ 1091 votes[i] = ((pri[pindex] & 0xffff) << 16) | 1092 (sindex << 8) | pindex; 1093 } 1094 1095 return 0; 1096 } 1097 1098 /* 1099 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1100 * to construct the list of votes on the CPU and send it over. Query the RPMh 1101 * voltage levels and build the votes 1102 */ 1103 1104 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1105 { 1106 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1107 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1108 struct msm_gpu *gpu = &adreno_gpu->base; 1109 int ret; 1110 1111 /* Build the GX votes */ 1112 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1113 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); 1114 1115 /* Build the CX votes */ 1116 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1117 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); 1118 1119 return ret; 1120 } 1121 1122 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1123 u32 size) 1124 { 1125 int count = dev_pm_opp_get_opp_count(dev); 1126 struct dev_pm_opp *opp; 1127 int i, index = 0; 1128 unsigned long freq = 1; 1129 1130 /* 1131 * The OPP table doesn't contain the "off" frequency level so we need to 1132 * add 1 to the table size to account for it 1133 */ 1134 1135 if (WARN(count + 1 > size, 1136 "The GMU frequency table is being truncated\n")) 1137 count = size - 1; 1138 1139 /* Set the "off" frequency */ 1140 freqs[index++] = 0; 1141 1142 for (i = 0; i < count; i++) { 1143 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1144 if (IS_ERR(opp)) 1145 break; 1146 1147 dev_pm_opp_put(opp); 1148 freqs[index++] = freq++; 1149 } 1150 1151 return index; 1152 } 1153 1154 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1155 { 1156 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1157 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1158 struct msm_gpu *gpu = &adreno_gpu->base; 1159 1160 int ret = 0; 1161 1162 /* 1163 * The GMU handles its own frequency switching so build a list of 1164 * available frequencies to send during initialization 1165 */ 1166 ret = dev_pm_opp_of_add_table(gmu->dev); 1167 if (ret) { 1168 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1169 return ret; 1170 } 1171 1172 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1173 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1174 1175 /* 1176 * The GMU also handles GPU frequency switching so build a list 1177 * from the GPU OPP table 1178 */ 1179 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1180 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1181 1182 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; 1183 1184 /* Build the list of RPMh votes that we'll send to the GMU */ 1185 return a6xx_gmu_rpmh_votes_init(gmu); 1186 } 1187 1188 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1189 { 1190 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); 1191 1192 if (ret < 1) 1193 return ret; 1194 1195 gmu->nr_clocks = ret; 1196 1197 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1198 gmu->nr_clocks, "gmu"); 1199 1200 return 0; 1201 } 1202 1203 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1204 const char *name) 1205 { 1206 void __iomem *ret; 1207 struct resource *res = platform_get_resource_byname(pdev, 1208 IORESOURCE_MEM, name); 1209 1210 if (!res) { 1211 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1212 return ERR_PTR(-EINVAL); 1213 } 1214 1215 ret = ioremap(res->start, resource_size(res)); 1216 if (!ret) { 1217 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1218 return ERR_PTR(-EINVAL); 1219 } 1220 1221 return ret; 1222 } 1223 1224 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1225 const char *name, irq_handler_t handler) 1226 { 1227 int irq, ret; 1228 1229 irq = platform_get_irq_byname(pdev, name); 1230 1231 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); 1232 if (ret) { 1233 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", 1234 name, ret); 1235 return ret; 1236 } 1237 1238 disable_irq(irq); 1239 1240 return irq; 1241 } 1242 1243 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1244 { 1245 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1246 1247 if (!gmu->initialized) 1248 return; 1249 1250 pm_runtime_force_suspend(gmu->dev); 1251 1252 if (!IS_ERR_OR_NULL(gmu->gxpd)) { 1253 pm_runtime_disable(gmu->gxpd); 1254 dev_pm_domain_detach(gmu->gxpd, false); 1255 } 1256 1257 iounmap(gmu->mmio); 1258 gmu->mmio = NULL; 1259 1260 a6xx_gmu_memory_free(gmu); 1261 1262 free_irq(gmu->gmu_irq, gmu); 1263 free_irq(gmu->hfi_irq, gmu); 1264 1265 /* Drop reference taken in of_find_device_by_node */ 1266 put_device(gmu->dev); 1267 1268 gmu->initialized = false; 1269 } 1270 1271 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1272 { 1273 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1274 struct platform_device *pdev = of_find_device_by_node(node); 1275 int ret; 1276 1277 if (!pdev) 1278 return -ENODEV; 1279 1280 gmu->dev = &pdev->dev; 1281 1282 of_dma_configure(gmu->dev, node, true); 1283 1284 /* Fow now, don't do anything fancy until we get our feet under us */ 1285 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1286 1287 pm_runtime_enable(gmu->dev); 1288 1289 /* Get the list of clocks */ 1290 ret = a6xx_gmu_clocks_probe(gmu); 1291 if (ret) 1292 goto err_put_device; 1293 1294 ret = a6xx_gmu_memory_probe(gmu); 1295 if (ret) 1296 goto err_put_device; 1297 1298 /* Allocate memory for for the HFI queues */ 1299 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); 1300 if (ret) 1301 goto err_memory; 1302 1303 /* Allocate memory for the GMU debug region */ 1304 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); 1305 if (ret) 1306 goto err_memory; 1307 1308 /* Map the GMU registers */ 1309 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1310 if (IS_ERR(gmu->mmio)) { 1311 ret = PTR_ERR(gmu->mmio); 1312 goto err_memory; 1313 } 1314 1315 /* Get the HFI and GMU interrupts */ 1316 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1317 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1318 1319 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1320 goto err_mmio; 1321 1322 /* 1323 * Get a link to the GX power domain to reset the GPU in case of GMU 1324 * crash 1325 */ 1326 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); 1327 1328 /* Get the power levels for the GMU and GPU */ 1329 a6xx_gmu_pwrlevels_probe(gmu); 1330 1331 /* Set up the HFI queues */ 1332 a6xx_hfi_init(gmu); 1333 1334 gmu->initialized = true; 1335 1336 return 0; 1337 1338 err_mmio: 1339 iounmap(gmu->mmio); 1340 free_irq(gmu->gmu_irq, gmu); 1341 free_irq(gmu->hfi_irq, gmu); 1342 1343 ret = -ENODEV; 1344 1345 err_memory: 1346 a6xx_gmu_memory_free(gmu); 1347 err_put_device: 1348 /* Drop reference taken in of_find_device_by_node */ 1349 put_device(gmu->dev); 1350 1351 return ret; 1352 } 1353