1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/dma-mapping.h> 6 #include <linux/interconnect.h> 7 #include <linux/pm_domain.h> 8 #include <linux/pm_opp.h> 9 #include <soc/qcom/cmd-db.h> 10 11 #include "a6xx_gpu.h" 12 #include "a6xx_gmu.xml.h" 13 14 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) 15 { 16 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 17 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 18 struct msm_gpu *gpu = &adreno_gpu->base; 19 struct drm_device *dev = gpu->dev; 20 struct msm_drm_private *priv = dev->dev_private; 21 22 /* FIXME: add a banner here */ 23 gmu->hung = true; 24 25 /* Turn off the hangcheck timer while we are resetting */ 26 del_timer(&gpu->hangcheck_timer); 27 28 /* Queue the GPU handler because we need to treat this as a recovery */ 29 queue_work(priv->wq, &gpu->recover_work); 30 } 31 32 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 33 { 34 struct a6xx_gmu *gmu = data; 35 u32 status; 36 37 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 38 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 39 40 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 41 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 42 43 a6xx_gmu_fault(gmu); 44 } 45 46 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 47 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 48 49 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 50 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 51 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 52 53 return IRQ_HANDLED; 54 } 55 56 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 57 { 58 struct a6xx_gmu *gmu = data; 59 u32 status; 60 61 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 62 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 63 64 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 65 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 66 67 a6xx_gmu_fault(gmu); 68 } 69 70 return IRQ_HANDLED; 71 } 72 73 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) 74 { 75 u32 val; 76 77 /* This can be called from gpu state code so make sure GMU is valid */ 78 if (!gmu->initialized) 79 return false; 80 81 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 82 83 return !(val & 84 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | 85 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); 86 } 87 88 /* Check to see if the GX rail is still powered */ 89 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 90 { 91 u32 val; 92 93 /* This can be called from gpu state code so make sure GMU is valid */ 94 if (!gmu->initialized) 95 return false; 96 97 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 98 99 return !(val & 100 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 101 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 102 } 103 104 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 105 { 106 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 107 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 108 struct msm_gpu *gpu = &adreno_gpu->base; 109 int ret; 110 111 /* 112 * This can get called from devfreq while the hardware is idle. Don't 113 * bring up the power if it isn't already active 114 */ 115 if (pm_runtime_get_if_in_use(gmu->dev) == 0) 116 return; 117 118 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 119 120 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 121 ((3 & 0xf) << 28) | index); 122 123 /* 124 * Send an invalid index as a vote for the bus bandwidth and let the 125 * firmware decide on the right vote 126 */ 127 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 128 129 /* Set and clear the OOB for DCVS to trigger the GMU */ 130 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 131 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 132 133 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 134 if (ret) 135 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 136 137 gmu->freq = gmu->gpu_freqs[index]; 138 139 /* 140 * Eventually we will want to scale the path vote with the frequency but 141 * for now leave it at max so that the performance is nominal. 142 */ 143 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); 144 pm_runtime_put(gmu->dev); 145 } 146 147 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) 148 { 149 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 150 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 151 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 152 u32 perf_index = 0; 153 154 if (freq == gmu->freq) 155 return; 156 157 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 158 if (freq == gmu->gpu_freqs[perf_index]) 159 break; 160 161 gmu->current_perf_index = perf_index; 162 163 __a6xx_gmu_set_freq(gmu, perf_index); 164 } 165 166 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 167 { 168 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 169 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 170 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 171 172 return gmu->freq; 173 } 174 175 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 176 { 177 u32 val; 178 int local = gmu->idle_level; 179 180 /* SPTP and IFPC both report as IFPC */ 181 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 182 local = GMU_IDLE_STATE_IFPC; 183 184 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 185 186 if (val == local) { 187 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 188 !a6xx_gmu_gx_is_on(gmu)) 189 return true; 190 } 191 192 return false; 193 } 194 195 /* Wait for the GMU to get to its most idle state */ 196 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) 197 { 198 return spin_until(a6xx_gmu_check_idle_level(gmu)); 199 } 200 201 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 202 { 203 int ret; 204 u32 val; 205 206 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 207 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 208 209 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 210 val == 0xbabeface, 100, 10000); 211 212 if (ret) 213 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 214 215 return ret; 216 } 217 218 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 219 { 220 u32 val; 221 int ret; 222 223 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 224 225 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 226 val & 1, 100, 10000); 227 if (ret) 228 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 229 230 return ret; 231 } 232 233 /* Trigger a OOB (out of band) request to the GMU */ 234 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 235 { 236 int ret; 237 u32 val; 238 int request, ack; 239 const char *name; 240 241 switch (state) { 242 case GMU_OOB_GPU_SET: 243 request = GMU_OOB_GPU_SET_REQUEST; 244 ack = GMU_OOB_GPU_SET_ACK; 245 name = "GPU_SET"; 246 break; 247 case GMU_OOB_BOOT_SLUMBER: 248 request = GMU_OOB_BOOT_SLUMBER_REQUEST; 249 ack = GMU_OOB_BOOT_SLUMBER_ACK; 250 name = "BOOT_SLUMBER"; 251 break; 252 case GMU_OOB_DCVS_SET: 253 request = GMU_OOB_DCVS_REQUEST; 254 ack = GMU_OOB_DCVS_ACK; 255 name = "GPU_DCVS"; 256 break; 257 default: 258 return -EINVAL; 259 } 260 261 /* Trigger the equested OOB operation */ 262 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 263 264 /* Wait for the acknowledge interrupt */ 265 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 266 val & (1 << ack), 100, 10000); 267 268 if (ret) 269 DRM_DEV_ERROR(gmu->dev, 270 "Timeout waiting for GMU OOB set %s: 0x%x\n", 271 name, 272 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 273 274 /* Clear the acknowledge interrupt */ 275 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 276 277 return ret; 278 } 279 280 /* Clear a pending OOB state in the GMU */ 281 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 282 { 283 switch (state) { 284 case GMU_OOB_GPU_SET: 285 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 286 1 << GMU_OOB_GPU_SET_CLEAR); 287 break; 288 case GMU_OOB_BOOT_SLUMBER: 289 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 290 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); 291 break; 292 case GMU_OOB_DCVS_SET: 293 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 294 1 << GMU_OOB_DCVS_CLEAR); 295 break; 296 } 297 } 298 299 /* Enable CPU control of SPTP power power collapse */ 300 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 301 { 302 int ret; 303 u32 val; 304 305 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 306 307 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 308 (val & 0x38) == 0x28, 1, 100); 309 310 if (ret) { 311 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 312 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 313 } 314 315 return 0; 316 } 317 318 /* Disable CPU control of SPTP power power collapse */ 319 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 320 { 321 u32 val; 322 int ret; 323 324 /* Make sure retention is on */ 325 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 326 327 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 328 329 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 330 (val & 0x04), 100, 10000); 331 332 if (ret) 333 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 334 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 335 } 336 337 /* Let the GMU know we are starting a boot sequence */ 338 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 339 { 340 u32 vote; 341 342 /* Let the GMU know we are getting ready for boot */ 343 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 344 345 /* Choose the "default" power level as the highest available */ 346 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 347 348 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 349 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 350 351 /* Let the GMU know the boot sequence has started */ 352 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 353 } 354 355 /* Let the GMU know that we are about to go into slumber */ 356 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 357 { 358 int ret; 359 360 /* Disable the power counter so the GMU isn't busy */ 361 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 362 363 /* Disable SPTP_PC if the CPU is responsible for it */ 364 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 365 a6xx_sptprac_disable(gmu); 366 367 /* Tell the GMU to get ready to slumber */ 368 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 369 370 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 371 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 372 373 if (!ret) { 374 /* Check to see if the GMU really did slumber */ 375 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 376 != 0x0f) { 377 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 378 ret = -ETIMEDOUT; 379 } 380 } 381 382 /* Put fence into allow mode */ 383 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 384 return ret; 385 } 386 387 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 388 { 389 int ret; 390 u32 val; 391 392 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 393 /* Wait for the register to finish posting */ 394 wmb(); 395 396 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 397 val & (1 << 1), 100, 10000); 398 if (ret) { 399 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 400 return ret; 401 } 402 403 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 404 !val, 100, 10000); 405 406 if (ret) { 407 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 408 return ret; 409 } 410 411 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 412 413 /* Set up CX GMU counter 0 to count busy ticks */ 414 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); 415 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); 416 417 /* Enable the power counter */ 418 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 419 return 0; 420 } 421 422 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 423 { 424 int ret; 425 u32 val; 426 427 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 428 429 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 430 val, val & (1 << 16), 100, 10000); 431 if (ret) 432 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 433 434 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 435 } 436 437 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 438 { 439 return msm_writel(value, ptr + (offset << 2)); 440 } 441 442 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 443 const char *name); 444 445 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 446 { 447 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 448 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 449 struct platform_device *pdev = to_platform_device(gmu->dev); 450 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 451 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 452 453 if (!pdcptr || !seqptr) 454 goto err; 455 456 /* Disable SDE clock gating */ 457 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 458 459 /* Setup RSC PDC handshake for sleep and wakeup */ 460 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 461 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 462 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 463 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 464 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 465 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 466 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 467 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 468 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 469 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 470 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 471 472 /* Load RSC sequencer uCode for sleep and wakeup */ 473 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 474 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 475 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 476 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 477 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 478 479 /* Load PDC sequencer uCode for power up and power down sequence */ 480 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 481 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 482 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 483 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 484 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 485 486 /* Set TCS commands used by PDC sequence for low power modes */ 487 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 488 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 489 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 490 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 491 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 492 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 493 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 494 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 495 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 496 497 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 498 if (adreno_is_a618(adreno_gpu)) 499 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090); 500 else 501 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 502 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 503 504 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 505 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 506 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 507 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 508 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 509 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 510 511 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 512 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 513 if (adreno_is_a618(adreno_gpu)) 514 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); 515 else 516 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 517 518 519 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 520 if (adreno_is_a618(adreno_gpu)) 521 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090); 522 else 523 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 524 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 525 526 /* Setup GPU PDC */ 527 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 528 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 529 530 /* ensure no writes happen before the uCode is fully written */ 531 wmb(); 532 533 err: 534 if (!IS_ERR_OR_NULL(pdcptr)) 535 iounmap(pdcptr); 536 if (!IS_ERR_OR_NULL(seqptr)) 537 iounmap(seqptr); 538 } 539 540 /* 541 * The lowest 16 bits of this value are the number of XO clock cycles for main 542 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 543 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 544 */ 545 546 #define GMU_PWR_COL_HYST 0x000a1680 547 548 /* Set up the idle state for the GMU */ 549 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 550 { 551 /* Disable GMU WB/RB buffer */ 552 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 553 554 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 555 556 switch (gmu->idle_level) { 557 case GMU_IDLE_STATE_IFPC: 558 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 559 GMU_PWR_COL_HYST); 560 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 561 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 562 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 563 /* Fall through */ 564 case GMU_IDLE_STATE_SPTP: 565 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 566 GMU_PWR_COL_HYST); 567 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 568 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 569 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 570 } 571 572 /* Enable RPMh GPU client */ 573 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 574 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 575 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 576 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 577 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 578 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 579 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 580 } 581 582 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 583 { 584 static bool rpmh_init; 585 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 586 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 587 int i, ret; 588 u32 chipid; 589 u32 *image; 590 591 if (state == GMU_WARM_BOOT) { 592 ret = a6xx_rpmh_start(gmu); 593 if (ret) 594 return ret; 595 } else { 596 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 597 "GMU firmware is not loaded\n")) 598 return -ENOENT; 599 600 /* Sanity check the size of the firmware that was loaded */ 601 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { 602 DRM_DEV_ERROR(gmu->dev, 603 "GMU firmware is bigger than the available region\n"); 604 return -EINVAL; 605 } 606 607 /* Turn on register retention */ 608 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 609 610 /* We only need to load the RPMh microcode once */ 611 if (!rpmh_init) { 612 a6xx_gmu_rpmh_init(gmu); 613 rpmh_init = true; 614 } else { 615 ret = a6xx_rpmh_start(gmu); 616 if (ret) 617 return ret; 618 } 619 620 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; 621 622 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) 623 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, 624 image[i]); 625 } 626 627 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 628 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 629 630 /* Write the iova of the HFI table */ 631 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova); 632 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 633 634 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 635 (1 << 31) | (0xa << 18) | (0xa0)); 636 637 chipid = adreno_gpu->rev.core << 24; 638 chipid |= adreno_gpu->rev.major << 16; 639 chipid |= adreno_gpu->rev.minor << 12; 640 chipid |= adreno_gpu->rev.patchid << 8; 641 642 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 643 644 /* Set up the lowest idle level on the GMU */ 645 a6xx_gmu_power_config(gmu); 646 647 ret = a6xx_gmu_start(gmu); 648 if (ret) 649 return ret; 650 651 ret = a6xx_gmu_gfx_rail_on(gmu); 652 if (ret) 653 return ret; 654 655 /* Enable SPTP_PC if the CPU is responsible for it */ 656 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 657 ret = a6xx_sptprac_enable(gmu); 658 if (ret) 659 return ret; 660 } 661 662 ret = a6xx_gmu_hfi_start(gmu); 663 if (ret) 664 return ret; 665 666 /* FIXME: Do we need this wmb() here? */ 667 wmb(); 668 669 return 0; 670 } 671 672 #define A6XX_HFI_IRQ_MASK \ 673 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 674 675 #define A6XX_GMU_IRQ_MASK \ 676 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 677 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 678 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 679 680 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 681 { 682 disable_irq(gmu->gmu_irq); 683 disable_irq(gmu->hfi_irq); 684 685 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 686 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 687 } 688 689 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) 690 { 691 u32 val; 692 693 /* Make sure there are no outstanding RPMh votes */ 694 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 695 (val & 1), 100, 10000); 696 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 697 (val & 1), 100, 10000); 698 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 699 (val & 1), 100, 10000); 700 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 701 (val & 1), 100, 1000); 702 } 703 704 /* Force the GMU off in case it isn't responsive */ 705 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) 706 { 707 /* Flush all the queues */ 708 a6xx_hfi_stop(gmu); 709 710 /* Stop the interrupts */ 711 a6xx_gmu_irq_disable(gmu); 712 713 /* Force off SPTP in case the GMU is managing it */ 714 a6xx_sptprac_disable(gmu); 715 716 /* Make sure there are no outstanding RPMh votes */ 717 a6xx_gmu_rpmh_off(gmu); 718 } 719 720 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 721 { 722 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 723 struct msm_gpu *gpu = &adreno_gpu->base; 724 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 725 int status, ret; 726 727 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) 728 return 0; 729 730 gmu->hung = false; 731 732 /* Turn on the resources */ 733 pm_runtime_get_sync(gmu->dev); 734 735 /* Use a known rate to bring up the GMU */ 736 clk_set_rate(gmu->core_clk, 200000000); 737 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 738 if (ret) { 739 pm_runtime_put(gmu->dev); 740 return ret; 741 } 742 743 /* Set the bus quota to a reasonable value for boot */ 744 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); 745 746 /* Enable the GMU interrupt */ 747 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 748 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); 749 enable_irq(gmu->gmu_irq); 750 751 /* Check to see if we are doing a cold or warm boot */ 752 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 753 GMU_WARM_BOOT : GMU_COLD_BOOT; 754 755 ret = a6xx_gmu_fw_start(gmu, status); 756 if (ret) 757 goto out; 758 759 ret = a6xx_hfi_start(gmu, status); 760 if (ret) 761 goto out; 762 763 /* 764 * Turn on the GMU firmware fault interrupt after we know the boot 765 * sequence is successful 766 */ 767 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 768 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); 769 enable_irq(gmu->hfi_irq); 770 771 /* Set the GPU to the current freq */ 772 __a6xx_gmu_set_freq(gmu, gmu->current_perf_index); 773 774 /* 775 * "enable" the GX power domain which won't actually do anything but it 776 * will make sure that the refcounting is correct in case we need to 777 * bring down the GX after a GMU failure 778 */ 779 if (!IS_ERR_OR_NULL(gmu->gxpd)) 780 pm_runtime_get(gmu->gxpd); 781 782 out: 783 /* On failure, shut down the GMU to leave it in a good state */ 784 if (ret) { 785 disable_irq(gmu->gmu_irq); 786 a6xx_rpmh_stop(gmu); 787 pm_runtime_put(gmu->dev); 788 } 789 790 return ret; 791 } 792 793 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 794 { 795 u32 reg; 796 797 if (!gmu->initialized) 798 return true; 799 800 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 801 802 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 803 return false; 804 805 return true; 806 } 807 808 #define GBIF_CLIENT_HALT_MASK BIT(0) 809 #define GBIF_ARB_HALT_MASK BIT(1) 810 811 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) 812 { 813 struct msm_gpu *gpu = &adreno_gpu->base; 814 815 if (!a6xx_has_gbif(adreno_gpu)) { 816 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); 817 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 818 0xf) == 0xf); 819 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); 820 821 return; 822 } 823 824 /* Halt new client requests on GBIF */ 825 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); 826 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 827 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); 828 829 /* Halt all AXI requests on GBIF */ 830 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); 831 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & 832 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); 833 834 /* The GBIF halt needs to be explicitly cleared */ 835 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); 836 } 837 838 /* Gracefully try to shut down the GMU and by extension the GPU */ 839 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) 840 { 841 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 842 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 843 u32 val; 844 845 /* 846 * The GMU may still be in slumber unless the GPU started so check and 847 * skip putting it back into slumber if so 848 */ 849 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 850 851 if (val != 0xf) { 852 int ret = a6xx_gmu_wait_for_idle(gmu); 853 854 /* If the GMU isn't responding assume it is hung */ 855 if (ret) { 856 a6xx_gmu_force_off(gmu); 857 return; 858 } 859 860 a6xx_bus_clear_pending_transactions(adreno_gpu); 861 862 /* tell the GMU we want to slumber */ 863 a6xx_gmu_notify_slumber(gmu); 864 865 ret = gmu_poll_timeout(gmu, 866 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 867 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 868 100, 10000); 869 870 /* 871 * Let the user know we failed to slumber but don't worry too 872 * much because we are powering down anyway 873 */ 874 875 if (ret) 876 DRM_DEV_ERROR(gmu->dev, 877 "Unable to slumber GMU: status = 0%x/0%x\n", 878 gmu_read(gmu, 879 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 880 gmu_read(gmu, 881 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 882 } 883 884 /* Turn off HFI */ 885 a6xx_hfi_stop(gmu); 886 887 /* Stop the interrupts and mask the hardware */ 888 a6xx_gmu_irq_disable(gmu); 889 890 /* Tell RPMh to power off the GPU */ 891 a6xx_rpmh_stop(gmu); 892 } 893 894 895 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 896 { 897 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 898 struct msm_gpu *gpu = &a6xx_gpu->base.base; 899 900 if (!pm_runtime_active(gmu->dev)) 901 return 0; 902 903 /* 904 * Force the GMU off if we detected a hang, otherwise try to shut it 905 * down gracefully 906 */ 907 if (gmu->hung) 908 a6xx_gmu_force_off(gmu); 909 else 910 a6xx_gmu_shutdown(gmu); 911 912 /* Remove the bus vote */ 913 icc_set_bw(gpu->icc_path, 0, 0); 914 915 /* 916 * Make sure the GX domain is off before turning off the GMU (CX) 917 * domain. Usually the GMU does this but only if the shutdown sequence 918 * was successful 919 */ 920 if (!IS_ERR_OR_NULL(gmu->gxpd)) 921 pm_runtime_put_sync(gmu->gxpd); 922 923 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 924 925 pm_runtime_put_sync(gmu->dev); 926 927 return 0; 928 } 929 930 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) 931 { 932 if (IS_ERR_OR_NULL(bo)) 933 return; 934 935 dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova); 936 kfree(bo); 937 } 938 939 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, 940 size_t size) 941 { 942 struct a6xx_gmu_bo *bo; 943 944 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 945 if (!bo) 946 return ERR_PTR(-ENOMEM); 947 948 bo->size = PAGE_ALIGN(size); 949 950 bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL); 951 952 if (!bo->virt) { 953 kfree(bo); 954 return ERR_PTR(-ENOMEM); 955 } 956 957 return bo; 958 } 959 960 /* Return the 'arc-level' for the given frequency */ 961 static unsigned int a6xx_gmu_get_arc_level(struct device *dev, 962 unsigned long freq) 963 { 964 struct dev_pm_opp *opp; 965 unsigned int val; 966 967 if (!freq) 968 return 0; 969 970 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 971 if (IS_ERR(opp)) 972 return 0; 973 974 val = dev_pm_opp_get_level(opp); 975 976 dev_pm_opp_put(opp); 977 978 return val; 979 } 980 981 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 982 unsigned long *freqs, int freqs_count, const char *id) 983 { 984 int i, j; 985 const u16 *pri, *sec; 986 size_t pri_count, sec_count; 987 988 pri = cmd_db_read_aux_data(id, &pri_count); 989 if (IS_ERR(pri)) 990 return PTR_ERR(pri); 991 /* 992 * The data comes back as an array of unsigned shorts so adjust the 993 * count accordingly 994 */ 995 pri_count >>= 1; 996 if (!pri_count) 997 return -EINVAL; 998 999 sec = cmd_db_read_aux_data("mx.lvl", &sec_count); 1000 if (IS_ERR(sec)) 1001 return PTR_ERR(sec); 1002 1003 sec_count >>= 1; 1004 if (!sec_count) 1005 return -EINVAL; 1006 1007 /* Construct a vote for each frequency */ 1008 for (i = 0; i < freqs_count; i++) { 1009 u8 pindex = 0, sindex = 0; 1010 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); 1011 1012 /* Get the primary index that matches the arc level */ 1013 for (j = 0; j < pri_count; j++) { 1014 if (pri[j] >= level) { 1015 pindex = j; 1016 break; 1017 } 1018 } 1019 1020 if (j == pri_count) { 1021 DRM_DEV_ERROR(dev, 1022 "Level %u not found in the RPMh list\n", 1023 level); 1024 DRM_DEV_ERROR(dev, "Available levels:\n"); 1025 for (j = 0; j < pri_count; j++) 1026 DRM_DEV_ERROR(dev, " %u\n", pri[j]); 1027 1028 return -EINVAL; 1029 } 1030 1031 /* 1032 * Look for a level in in the secondary list that matches. If 1033 * nothing fits, use the maximum non zero vote 1034 */ 1035 1036 for (j = 0; j < sec_count; j++) { 1037 if (sec[j] >= level) { 1038 sindex = j; 1039 break; 1040 } else if (sec[j]) { 1041 sindex = j; 1042 } 1043 } 1044 1045 /* Construct the vote */ 1046 votes[i] = ((pri[pindex] & 0xffff) << 16) | 1047 (sindex << 8) | pindex; 1048 } 1049 1050 return 0; 1051 } 1052 1053 /* 1054 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1055 * to construct the list of votes on the CPU and send it over. Query the RPMh 1056 * voltage levels and build the votes 1057 */ 1058 1059 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1060 { 1061 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1062 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1063 struct msm_gpu *gpu = &adreno_gpu->base; 1064 int ret; 1065 1066 /* Build the GX votes */ 1067 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1068 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); 1069 1070 /* Build the CX votes */ 1071 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1072 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); 1073 1074 return ret; 1075 } 1076 1077 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1078 u32 size) 1079 { 1080 int count = dev_pm_opp_get_opp_count(dev); 1081 struct dev_pm_opp *opp; 1082 int i, index = 0; 1083 unsigned long freq = 1; 1084 1085 /* 1086 * The OPP table doesn't contain the "off" frequency level so we need to 1087 * add 1 to the table size to account for it 1088 */ 1089 1090 if (WARN(count + 1 > size, 1091 "The GMU frequency table is being truncated\n")) 1092 count = size - 1; 1093 1094 /* Set the "off" frequency */ 1095 freqs[index++] = 0; 1096 1097 for (i = 0; i < count; i++) { 1098 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1099 if (IS_ERR(opp)) 1100 break; 1101 1102 dev_pm_opp_put(opp); 1103 freqs[index++] = freq++; 1104 } 1105 1106 return index; 1107 } 1108 1109 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1110 { 1111 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1112 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1113 struct msm_gpu *gpu = &adreno_gpu->base; 1114 1115 int ret = 0; 1116 1117 /* 1118 * The GMU handles its own frequency switching so build a list of 1119 * available frequencies to send during initialization 1120 */ 1121 ret = dev_pm_opp_of_add_table(gmu->dev); 1122 if (ret) { 1123 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1124 return ret; 1125 } 1126 1127 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1128 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1129 1130 /* 1131 * The GMU also handles GPU frequency switching so build a list 1132 * from the GPU OPP table 1133 */ 1134 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1135 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1136 1137 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; 1138 1139 /* Build the list of RPMh votes that we'll send to the GMU */ 1140 return a6xx_gmu_rpmh_votes_init(gmu); 1141 } 1142 1143 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1144 { 1145 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); 1146 1147 if (ret < 1) 1148 return ret; 1149 1150 gmu->nr_clocks = ret; 1151 1152 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1153 gmu->nr_clocks, "gmu"); 1154 1155 return 0; 1156 } 1157 1158 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1159 const char *name) 1160 { 1161 void __iomem *ret; 1162 struct resource *res = platform_get_resource_byname(pdev, 1163 IORESOURCE_MEM, name); 1164 1165 if (!res) { 1166 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1167 return ERR_PTR(-EINVAL); 1168 } 1169 1170 ret = ioremap(res->start, resource_size(res)); 1171 if (!ret) { 1172 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1173 return ERR_PTR(-EINVAL); 1174 } 1175 1176 return ret; 1177 } 1178 1179 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1180 const char *name, irq_handler_t handler) 1181 { 1182 int irq, ret; 1183 1184 irq = platform_get_irq_byname(pdev, name); 1185 1186 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); 1187 if (ret) { 1188 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", 1189 name, ret); 1190 return ret; 1191 } 1192 1193 disable_irq(irq); 1194 1195 return irq; 1196 } 1197 1198 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1199 { 1200 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1201 1202 if (!gmu->initialized) 1203 return; 1204 1205 pm_runtime_force_suspend(gmu->dev); 1206 1207 if (!IS_ERR_OR_NULL(gmu->gxpd)) { 1208 pm_runtime_disable(gmu->gxpd); 1209 dev_pm_domain_detach(gmu->gxpd, false); 1210 } 1211 1212 iounmap(gmu->mmio); 1213 gmu->mmio = NULL; 1214 1215 a6xx_gmu_memory_free(gmu, gmu->hfi); 1216 1217 free_irq(gmu->gmu_irq, gmu); 1218 free_irq(gmu->hfi_irq, gmu); 1219 1220 /* Drop reference taken in of_find_device_by_node */ 1221 put_device(gmu->dev); 1222 1223 gmu->initialized = false; 1224 } 1225 1226 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1227 { 1228 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1229 struct platform_device *pdev = of_find_device_by_node(node); 1230 int ret; 1231 1232 if (!pdev) 1233 return -ENODEV; 1234 1235 gmu->dev = &pdev->dev; 1236 1237 /* Pass force_dma false to require the DT to set the dma region */ 1238 ret = of_dma_configure(gmu->dev, node, false); 1239 if (ret) 1240 return ret; 1241 1242 /* Set the mask after the of_dma_configure() */ 1243 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31)); 1244 if (ret) 1245 return ret; 1246 1247 /* Fow now, don't do anything fancy until we get our feet under us */ 1248 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1249 1250 pm_runtime_enable(gmu->dev); 1251 1252 /* Get the list of clocks */ 1253 ret = a6xx_gmu_clocks_probe(gmu); 1254 if (ret) 1255 goto err_put_device; 1256 1257 /* Allocate memory for for the HFI queues */ 1258 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1259 if (IS_ERR(gmu->hfi)) 1260 goto err_memory; 1261 1262 /* Allocate memory for the GMU debug region */ 1263 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1264 if (IS_ERR(gmu->debug)) 1265 goto err_memory; 1266 1267 /* Map the GMU registers */ 1268 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1269 if (IS_ERR(gmu->mmio)) 1270 goto err_memory; 1271 1272 /* Get the HFI and GMU interrupts */ 1273 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1274 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1275 1276 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1277 goto err_mmio; 1278 1279 /* 1280 * Get a link to the GX power domain to reset the GPU in case of GMU 1281 * crash 1282 */ 1283 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); 1284 1285 /* Get the power levels for the GMU and GPU */ 1286 a6xx_gmu_pwrlevels_probe(gmu); 1287 1288 /* Set up the HFI queues */ 1289 a6xx_hfi_init(gmu); 1290 1291 gmu->initialized = true; 1292 1293 return 0; 1294 1295 err_mmio: 1296 iounmap(gmu->mmio); 1297 free_irq(gmu->gmu_irq, gmu); 1298 free_irq(gmu->hfi_irq, gmu); 1299 err_memory: 1300 a6xx_gmu_memory_free(gmu, gmu->hfi); 1301 1302 ret = -ENODEV; 1303 1304 err_put_device: 1305 /* Drop reference taken in of_find_device_by_node */ 1306 put_device(gmu->dev); 1307 1308 return ret; 1309 } 1310