1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/pm_opp.h> 6 #include <soc/qcom/cmd-db.h> 7 8 #include "a6xx_gpu.h" 9 #include "a6xx_gmu.xml.h" 10 11 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 12 { 13 struct a6xx_gmu *gmu = data; 14 u32 status; 15 16 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 17 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 18 19 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 20 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 21 22 /* Temporary until we can recover safely */ 23 BUG(); 24 } 25 26 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 27 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 28 29 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 30 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 31 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 32 33 return IRQ_HANDLED; 34 } 35 36 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 37 { 38 struct a6xx_gmu *gmu = data; 39 u32 status; 40 41 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 42 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 43 44 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 45 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 46 47 /* Temporary until we can recover safely */ 48 BUG(); 49 } 50 51 return IRQ_HANDLED; 52 } 53 54 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) 55 { 56 u32 val; 57 58 /* This can be called from gpu state code so make sure GMU is valid */ 59 if (IS_ERR_OR_NULL(gmu->mmio)) 60 return false; 61 62 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 63 64 return !(val & 65 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | 66 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); 67 } 68 69 /* Check to see if the GX rail is still powered */ 70 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 71 { 72 u32 val; 73 74 /* This can be called from gpu state code so make sure GMU is valid */ 75 if (IS_ERR_OR_NULL(gmu->mmio)) 76 return false; 77 78 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 79 80 return !(val & 81 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 82 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 83 } 84 85 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 86 { 87 int ret; 88 89 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 90 91 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 92 ((3 & 0xf) << 28) | index); 93 94 /* 95 * Send an invalid index as a vote for the bus bandwidth and let the 96 * firmware decide on the right vote 97 */ 98 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 99 100 /* Set and clear the OOB for DCVS to trigger the GMU */ 101 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 102 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 103 104 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 105 if (ret) 106 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 107 108 gmu->freq = gmu->gpu_freqs[index]; 109 } 110 111 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) 112 { 113 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 114 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 115 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 116 u32 perf_index = 0; 117 118 if (freq == gmu->freq) 119 return; 120 121 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 122 if (freq == gmu->gpu_freqs[perf_index]) 123 break; 124 125 __a6xx_gmu_set_freq(gmu, perf_index); 126 } 127 128 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 129 { 130 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 131 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 132 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 133 134 return gmu->freq; 135 } 136 137 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 138 { 139 u32 val; 140 int local = gmu->idle_level; 141 142 /* SPTP and IFPC both report as IFPC */ 143 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 144 local = GMU_IDLE_STATE_IFPC; 145 146 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 147 148 if (val == local) { 149 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 150 !a6xx_gmu_gx_is_on(gmu)) 151 return true; 152 } 153 154 return false; 155 } 156 157 /* Wait for the GMU to get to its most idle state */ 158 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu) 159 { 160 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 161 162 return spin_until(a6xx_gmu_check_idle_level(gmu)); 163 } 164 165 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 166 { 167 int ret; 168 u32 val; 169 170 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 171 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 172 173 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 174 val == 0xbabeface, 100, 10000); 175 176 if (ret) 177 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 178 179 return ret; 180 } 181 182 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 183 { 184 u32 val; 185 int ret; 186 187 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 188 189 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 190 val & 1, 100, 10000); 191 if (ret) 192 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 193 194 return ret; 195 } 196 197 /* Trigger a OOB (out of band) request to the GMU */ 198 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 199 { 200 int ret; 201 u32 val; 202 int request, ack; 203 const char *name; 204 205 switch (state) { 206 case GMU_OOB_GPU_SET: 207 request = GMU_OOB_GPU_SET_REQUEST; 208 ack = GMU_OOB_GPU_SET_ACK; 209 name = "GPU_SET"; 210 break; 211 case GMU_OOB_BOOT_SLUMBER: 212 request = GMU_OOB_BOOT_SLUMBER_REQUEST; 213 ack = GMU_OOB_BOOT_SLUMBER_ACK; 214 name = "BOOT_SLUMBER"; 215 break; 216 case GMU_OOB_DCVS_SET: 217 request = GMU_OOB_DCVS_REQUEST; 218 ack = GMU_OOB_DCVS_ACK; 219 name = "GPU_DCVS"; 220 break; 221 default: 222 return -EINVAL; 223 } 224 225 /* Trigger the equested OOB operation */ 226 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 227 228 /* Wait for the acknowledge interrupt */ 229 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 230 val & (1 << ack), 100, 10000); 231 232 if (ret) 233 DRM_DEV_ERROR(gmu->dev, 234 "Timeout waiting for GMU OOB set %s: 0x%x\n", 235 name, 236 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 237 238 /* Clear the acknowledge interrupt */ 239 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 240 241 return ret; 242 } 243 244 /* Clear a pending OOB state in the GMU */ 245 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 246 { 247 switch (state) { 248 case GMU_OOB_GPU_SET: 249 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 250 1 << GMU_OOB_GPU_SET_CLEAR); 251 break; 252 case GMU_OOB_BOOT_SLUMBER: 253 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 254 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); 255 break; 256 case GMU_OOB_DCVS_SET: 257 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 258 1 << GMU_OOB_DCVS_CLEAR); 259 break; 260 } 261 } 262 263 /* Enable CPU control of SPTP power power collapse */ 264 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 265 { 266 int ret; 267 u32 val; 268 269 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 270 271 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 272 (val & 0x38) == 0x28, 1, 100); 273 274 if (ret) { 275 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 276 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 277 } 278 279 return 0; 280 } 281 282 /* Disable CPU control of SPTP power power collapse */ 283 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 284 { 285 u32 val; 286 int ret; 287 288 /* Make sure retention is on */ 289 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 290 291 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 292 293 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 294 (val & 0x04), 100, 10000); 295 296 if (ret) 297 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 298 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 299 } 300 301 /* Let the GMU know we are starting a boot sequence */ 302 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 303 { 304 u32 vote; 305 306 /* Let the GMU know we are getting ready for boot */ 307 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 308 309 /* Choose the "default" power level as the highest available */ 310 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 311 312 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 313 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 314 315 /* Let the GMU know the boot sequence has started */ 316 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 317 } 318 319 /* Let the GMU know that we are about to go into slumber */ 320 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 321 { 322 int ret; 323 324 /* Disable the power counter so the GMU isn't busy */ 325 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 326 327 /* Disable SPTP_PC if the CPU is responsible for it */ 328 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 329 a6xx_sptprac_disable(gmu); 330 331 /* Tell the GMU to get ready to slumber */ 332 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 333 334 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 335 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 336 337 if (!ret) { 338 /* Check to see if the GMU really did slumber */ 339 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 340 != 0x0f) { 341 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 342 ret = -ETIMEDOUT; 343 } 344 } 345 346 /* Put fence into allow mode */ 347 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 348 return ret; 349 } 350 351 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 352 { 353 int ret; 354 u32 val; 355 356 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 357 /* Wait for the register to finish posting */ 358 wmb(); 359 360 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 361 val & (1 << 1), 100, 10000); 362 if (ret) { 363 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 364 return ret; 365 } 366 367 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 368 !val, 100, 10000); 369 370 if (ret) { 371 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 372 return ret; 373 } 374 375 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 376 377 /* Set up CX GMU counter 0 to count busy ticks */ 378 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); 379 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); 380 381 /* Enable the power counter */ 382 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 383 return 0; 384 } 385 386 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 387 { 388 int ret; 389 u32 val; 390 391 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 392 393 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 394 val, val & (1 << 16), 100, 10000); 395 if (ret) 396 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 397 398 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 399 } 400 401 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 402 { 403 return msm_writel(value, ptr + (offset << 2)); 404 } 405 406 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 407 const char *name); 408 409 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 410 { 411 struct platform_device *pdev = to_platform_device(gmu->dev); 412 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 413 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 414 415 if (!pdcptr || !seqptr) 416 goto err; 417 418 /* Disable SDE clock gating */ 419 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 420 421 /* Setup RSC PDC handshake for sleep and wakeup */ 422 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 423 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 424 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 425 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 426 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 427 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 428 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 429 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 430 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 431 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 432 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 433 434 /* Load RSC sequencer uCode for sleep and wakeup */ 435 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 436 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 437 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 438 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 439 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 440 441 /* Load PDC sequencer uCode for power up and power down sequence */ 442 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 443 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 444 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 445 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 446 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 447 448 /* Set TCS commands used by PDC sequence for low power modes */ 449 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 450 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 451 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 452 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 453 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 454 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 455 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 456 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 457 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 458 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 459 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 460 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 461 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 462 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 463 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 464 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 465 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 466 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 467 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 468 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 469 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 470 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 471 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 472 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 473 474 /* Setup GPU PDC */ 475 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 476 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 477 478 /* ensure no writes happen before the uCode is fully written */ 479 wmb(); 480 481 err: 482 devm_iounmap(gmu->dev, pdcptr); 483 devm_iounmap(gmu->dev, seqptr); 484 } 485 486 /* 487 * The lowest 16 bits of this value are the number of XO clock cycles for main 488 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 489 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 490 */ 491 492 #define GMU_PWR_COL_HYST 0x000a1680 493 494 /* Set up the idle state for the GMU */ 495 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 496 { 497 /* Disable GMU WB/RB buffer */ 498 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 499 500 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 501 502 switch (gmu->idle_level) { 503 case GMU_IDLE_STATE_IFPC: 504 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 505 GMU_PWR_COL_HYST); 506 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 507 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 508 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 509 /* Fall through */ 510 case GMU_IDLE_STATE_SPTP: 511 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 512 GMU_PWR_COL_HYST); 513 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 514 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 515 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 516 } 517 518 /* Enable RPMh GPU client */ 519 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 520 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 521 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 522 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 523 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 524 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 525 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 526 } 527 528 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 529 { 530 static bool rpmh_init; 531 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 532 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 533 int i, ret; 534 u32 chipid; 535 u32 *image; 536 537 if (state == GMU_WARM_BOOT) { 538 ret = a6xx_rpmh_start(gmu); 539 if (ret) 540 return ret; 541 } else { 542 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 543 "GMU firmware is not loaded\n")) 544 return -ENOENT; 545 546 /* Sanity check the size of the firmware that was loaded */ 547 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { 548 DRM_DEV_ERROR(gmu->dev, 549 "GMU firmware is bigger than the available region\n"); 550 return -EINVAL; 551 } 552 553 /* Turn on register retention */ 554 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 555 556 /* We only need to load the RPMh microcode once */ 557 if (!rpmh_init) { 558 a6xx_gmu_rpmh_init(gmu); 559 rpmh_init = true; 560 } else if (state != GMU_RESET) { 561 ret = a6xx_rpmh_start(gmu); 562 if (ret) 563 return ret; 564 } 565 566 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; 567 568 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) 569 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, 570 image[i]); 571 } 572 573 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 574 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 575 576 /* Write the iova of the HFI table */ 577 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova); 578 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 579 580 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 581 (1 << 31) | (0xa << 18) | (0xa0)); 582 583 chipid = adreno_gpu->rev.core << 24; 584 chipid |= adreno_gpu->rev.major << 16; 585 chipid |= adreno_gpu->rev.minor << 12; 586 chipid |= adreno_gpu->rev.patchid << 8; 587 588 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 589 590 /* Set up the lowest idle level on the GMU */ 591 a6xx_gmu_power_config(gmu); 592 593 ret = a6xx_gmu_start(gmu); 594 if (ret) 595 return ret; 596 597 ret = a6xx_gmu_gfx_rail_on(gmu); 598 if (ret) 599 return ret; 600 601 /* Enable SPTP_PC if the CPU is responsible for it */ 602 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 603 ret = a6xx_sptprac_enable(gmu); 604 if (ret) 605 return ret; 606 } 607 608 ret = a6xx_gmu_hfi_start(gmu); 609 if (ret) 610 return ret; 611 612 /* FIXME: Do we need this wmb() here? */ 613 wmb(); 614 615 return 0; 616 } 617 618 #define A6XX_HFI_IRQ_MASK \ 619 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 620 621 #define A6XX_GMU_IRQ_MASK \ 622 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 623 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 624 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 625 626 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu) 627 { 628 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 629 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 630 631 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, 632 ~A6XX_GMU_IRQ_MASK); 633 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, 634 ~A6XX_HFI_IRQ_MASK); 635 636 enable_irq(gmu->gmu_irq); 637 enable_irq(gmu->hfi_irq); 638 } 639 640 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 641 { 642 disable_irq(gmu->gmu_irq); 643 disable_irq(gmu->hfi_irq); 644 645 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 646 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 647 } 648 649 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) 650 { 651 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 652 int ret; 653 u32 val; 654 655 /* Flush all the queues */ 656 a6xx_hfi_stop(gmu); 657 658 /* Stop the interrupts */ 659 a6xx_gmu_irq_disable(gmu); 660 661 /* Force off SPTP in case the GMU is managing it */ 662 a6xx_sptprac_disable(gmu); 663 664 /* Make sure there are no outstanding RPMh votes */ 665 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 666 (val & 1), 100, 10000); 667 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 668 (val & 1), 100, 10000); 669 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 670 (val & 1), 100, 10000); 671 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 672 (val & 1), 100, 1000); 673 674 /* Force off the GX GSDC */ 675 regulator_force_disable(gmu->gx); 676 677 /* Disable the resources */ 678 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 679 pm_runtime_put_sync(gmu->dev); 680 681 /* Re-enable the resources */ 682 pm_runtime_get_sync(gmu->dev); 683 684 /* Use a known rate to bring up the GMU */ 685 clk_set_rate(gmu->core_clk, 200000000); 686 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 687 if (ret) 688 goto out; 689 690 a6xx_gmu_irq_enable(gmu); 691 692 ret = a6xx_gmu_fw_start(gmu, GMU_RESET); 693 if (!ret) 694 ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); 695 696 /* Set the GPU back to the highest power frequency */ 697 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 698 699 out: 700 if (ret) 701 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 702 703 return ret; 704 } 705 706 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 707 { 708 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 709 int status, ret; 710 711 if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) 712 return 0; 713 714 /* Turn on the resources */ 715 pm_runtime_get_sync(gmu->dev); 716 717 /* Use a known rate to bring up the GMU */ 718 clk_set_rate(gmu->core_clk, 200000000); 719 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 720 if (ret) 721 goto out; 722 723 a6xx_gmu_irq_enable(gmu); 724 725 /* Check to see if we are doing a cold or warm boot */ 726 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 727 GMU_WARM_BOOT : GMU_COLD_BOOT; 728 729 ret = a6xx_gmu_fw_start(gmu, status); 730 if (ret) 731 goto out; 732 733 ret = a6xx_hfi_start(gmu, status); 734 735 /* Set the GPU to the highest power frequency */ 736 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 737 738 out: 739 /* Make sure to turn off the boot OOB request on error */ 740 if (ret) 741 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 742 743 return ret; 744 } 745 746 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 747 { 748 u32 reg; 749 750 if (!gmu->mmio) 751 return true; 752 753 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 754 755 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 756 return false; 757 758 return true; 759 } 760 761 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 762 { 763 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 764 u32 val; 765 766 /* 767 * The GMU may still be in slumber unless the GPU started so check and 768 * skip putting it back into slumber if so 769 */ 770 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 771 772 if (val != 0xf) { 773 int ret = a6xx_gmu_wait_for_idle(a6xx_gpu); 774 775 /* Temporary until we can recover safely */ 776 BUG_ON(ret); 777 778 /* tell the GMU we want to slumber */ 779 a6xx_gmu_notify_slumber(gmu); 780 781 ret = gmu_poll_timeout(gmu, 782 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 783 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 784 100, 10000); 785 786 /* 787 * Let the user know we failed to slumber but don't worry too 788 * much because we are powering down anyway 789 */ 790 791 if (ret) 792 DRM_DEV_ERROR(gmu->dev, 793 "Unable to slumber GMU: status = 0%x/0%x\n", 794 gmu_read(gmu, 795 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 796 gmu_read(gmu, 797 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 798 } 799 800 /* Turn off HFI */ 801 a6xx_hfi_stop(gmu); 802 803 /* Stop the interrupts and mask the hardware */ 804 a6xx_gmu_irq_disable(gmu); 805 806 /* Tell RPMh to power off the GPU */ 807 a6xx_rpmh_stop(gmu); 808 809 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 810 811 pm_runtime_put_sync(gmu->dev); 812 813 return 0; 814 } 815 816 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) 817 { 818 int count, i; 819 u64 iova; 820 821 if (IS_ERR_OR_NULL(bo)) 822 return; 823 824 count = bo->size >> PAGE_SHIFT; 825 iova = bo->iova; 826 827 for (i = 0; i < count; i++, iova += PAGE_SIZE) { 828 iommu_unmap(gmu->domain, iova, PAGE_SIZE); 829 __free_pages(bo->pages[i], 0); 830 } 831 832 kfree(bo->pages); 833 kfree(bo); 834 } 835 836 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, 837 size_t size) 838 { 839 struct a6xx_gmu_bo *bo; 840 int ret, count, i; 841 842 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 843 if (!bo) 844 return ERR_PTR(-ENOMEM); 845 846 bo->size = PAGE_ALIGN(size); 847 848 count = bo->size >> PAGE_SHIFT; 849 850 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); 851 if (!bo->pages) { 852 kfree(bo); 853 return ERR_PTR(-ENOMEM); 854 } 855 856 for (i = 0; i < count; i++) { 857 bo->pages[i] = alloc_page(GFP_KERNEL); 858 if (!bo->pages[i]) 859 goto err; 860 } 861 862 bo->iova = gmu->uncached_iova_base; 863 864 for (i = 0; i < count; i++) { 865 ret = iommu_map(gmu->domain, 866 bo->iova + (PAGE_SIZE * i), 867 page_to_phys(bo->pages[i]), PAGE_SIZE, 868 IOMMU_READ | IOMMU_WRITE); 869 870 if (ret) { 871 DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n"); 872 873 for (i = i - 1 ; i >= 0; i--) 874 iommu_unmap(gmu->domain, 875 bo->iova + (PAGE_SIZE * i), 876 PAGE_SIZE); 877 878 goto err; 879 } 880 } 881 882 bo->virt = vmap(bo->pages, count, VM_IOREMAP, 883 pgprot_writecombine(PAGE_KERNEL)); 884 if (!bo->virt) 885 goto err; 886 887 /* Align future IOVA addresses on 1MB boundaries */ 888 gmu->uncached_iova_base += ALIGN(size, SZ_1M); 889 890 return bo; 891 892 err: 893 for (i = 0; i < count; i++) { 894 if (bo->pages[i]) 895 __free_pages(bo->pages[i], 0); 896 } 897 898 kfree(bo->pages); 899 kfree(bo); 900 901 return ERR_PTR(-ENOMEM); 902 } 903 904 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 905 { 906 int ret; 907 908 /* 909 * The GMU address space is hardcoded to treat the range 910 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared 911 * between the GMU and the CPU will live in this space 912 */ 913 gmu->uncached_iova_base = 0x60000000; 914 915 916 gmu->domain = iommu_domain_alloc(&platform_bus_type); 917 if (!gmu->domain) 918 return -ENODEV; 919 920 ret = iommu_attach_device(gmu->domain, gmu->dev); 921 922 if (ret) { 923 iommu_domain_free(gmu->domain); 924 gmu->domain = NULL; 925 } 926 927 return ret; 928 } 929 930 /* Return the 'arc-level' for the given frequency */ 931 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) 932 { 933 struct dev_pm_opp *opp; 934 struct device_node *np; 935 u32 val = 0; 936 937 if (!freq) 938 return 0; 939 940 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 941 if (IS_ERR(opp)) 942 return 0; 943 944 np = dev_pm_opp_get_of_node(opp); 945 946 if (np) { 947 of_property_read_u32(np, "qcom,level", &val); 948 of_node_put(np); 949 } 950 951 dev_pm_opp_put(opp); 952 953 return val; 954 } 955 956 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 957 unsigned long *freqs, int freqs_count, const char *id) 958 { 959 int i, j; 960 const u16 *pri, *sec; 961 size_t pri_count, sec_count; 962 963 pri = cmd_db_read_aux_data(id, &pri_count); 964 if (IS_ERR(pri)) 965 return PTR_ERR(pri); 966 /* 967 * The data comes back as an array of unsigned shorts so adjust the 968 * count accordingly 969 */ 970 pri_count >>= 1; 971 if (!pri_count) 972 return -EINVAL; 973 974 sec = cmd_db_read_aux_data("mx.lvl", &sec_count); 975 if (IS_ERR(sec)) 976 return PTR_ERR(sec); 977 978 sec_count >>= 1; 979 if (!sec_count) 980 return -EINVAL; 981 982 /* Construct a vote for each frequency */ 983 for (i = 0; i < freqs_count; i++) { 984 u8 pindex = 0, sindex = 0; 985 u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]); 986 987 /* Get the primary index that matches the arc level */ 988 for (j = 0; j < pri_count; j++) { 989 if (pri[j] >= level) { 990 pindex = j; 991 break; 992 } 993 } 994 995 if (j == pri_count) { 996 DRM_DEV_ERROR(dev, 997 "Level %u not found in in the RPMh list\n", 998 level); 999 DRM_DEV_ERROR(dev, "Available levels:\n"); 1000 for (j = 0; j < pri_count; j++) 1001 DRM_DEV_ERROR(dev, " %u\n", pri[j]); 1002 1003 return -EINVAL; 1004 } 1005 1006 /* 1007 * Look for a level in in the secondary list that matches. If 1008 * nothing fits, use the maximum non zero vote 1009 */ 1010 1011 for (j = 0; j < sec_count; j++) { 1012 if (sec[j] >= level) { 1013 sindex = j; 1014 break; 1015 } else if (sec[j]) { 1016 sindex = j; 1017 } 1018 } 1019 1020 /* Construct the vote */ 1021 votes[i] = ((pri[pindex] & 0xffff) << 16) | 1022 (sindex << 8) | pindex; 1023 } 1024 1025 return 0; 1026 } 1027 1028 /* 1029 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1030 * to construct the list of votes on the CPU and send it over. Query the RPMh 1031 * voltage levels and build the votes 1032 */ 1033 1034 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1035 { 1036 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1037 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1038 struct msm_gpu *gpu = &adreno_gpu->base; 1039 int ret; 1040 1041 /* Build the GX votes */ 1042 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1043 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); 1044 1045 /* Build the CX votes */ 1046 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1047 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); 1048 1049 return ret; 1050 } 1051 1052 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1053 u32 size) 1054 { 1055 int count = dev_pm_opp_get_opp_count(dev); 1056 struct dev_pm_opp *opp; 1057 int i, index = 0; 1058 unsigned long freq = 1; 1059 1060 /* 1061 * The OPP table doesn't contain the "off" frequency level so we need to 1062 * add 1 to the table size to account for it 1063 */ 1064 1065 if (WARN(count + 1 > size, 1066 "The GMU frequency table is being truncated\n")) 1067 count = size - 1; 1068 1069 /* Set the "off" frequency */ 1070 freqs[index++] = 0; 1071 1072 for (i = 0; i < count; i++) { 1073 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1074 if (IS_ERR(opp)) 1075 break; 1076 1077 dev_pm_opp_put(opp); 1078 freqs[index++] = freq++; 1079 } 1080 1081 return index; 1082 } 1083 1084 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1085 { 1086 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1087 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1088 struct msm_gpu *gpu = &adreno_gpu->base; 1089 1090 int ret = 0; 1091 1092 /* 1093 * The GMU handles its own frequency switching so build a list of 1094 * available frequencies to send during initialization 1095 */ 1096 ret = dev_pm_opp_of_add_table(gmu->dev); 1097 if (ret) { 1098 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1099 return ret; 1100 } 1101 1102 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1103 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1104 1105 /* 1106 * The GMU also handles GPU frequency switching so build a list 1107 * from the GPU OPP table 1108 */ 1109 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1110 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1111 1112 /* Build the list of RPMh votes that we'll send to the GMU */ 1113 return a6xx_gmu_rpmh_votes_init(gmu); 1114 } 1115 1116 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1117 { 1118 int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks); 1119 1120 if (ret < 1) 1121 return ret; 1122 1123 gmu->nr_clocks = ret; 1124 1125 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1126 gmu->nr_clocks, "gmu"); 1127 1128 return 0; 1129 } 1130 1131 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1132 const char *name) 1133 { 1134 void __iomem *ret; 1135 struct resource *res = platform_get_resource_byname(pdev, 1136 IORESOURCE_MEM, name); 1137 1138 if (!res) { 1139 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1140 return ERR_PTR(-EINVAL); 1141 } 1142 1143 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1144 if (!ret) { 1145 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1146 return ERR_PTR(-EINVAL); 1147 } 1148 1149 return ret; 1150 } 1151 1152 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1153 const char *name, irq_handler_t handler) 1154 { 1155 int irq, ret; 1156 1157 irq = platform_get_irq_byname(pdev, name); 1158 1159 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH, 1160 name, gmu); 1161 if (ret) { 1162 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name); 1163 return ret; 1164 } 1165 1166 disable_irq(irq); 1167 1168 return irq; 1169 } 1170 1171 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1172 { 1173 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1174 1175 if (IS_ERR_OR_NULL(gmu->mmio)) 1176 return; 1177 1178 pm_runtime_disable(gmu->dev); 1179 a6xx_gmu_stop(a6xx_gpu); 1180 1181 a6xx_gmu_irq_disable(gmu); 1182 a6xx_gmu_memory_free(gmu, gmu->hfi); 1183 1184 iommu_detach_device(gmu->domain, gmu->dev); 1185 1186 iommu_domain_free(gmu->domain); 1187 } 1188 1189 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1190 { 1191 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1192 struct platform_device *pdev = of_find_device_by_node(node); 1193 int ret; 1194 1195 if (!pdev) 1196 return -ENODEV; 1197 1198 gmu->dev = &pdev->dev; 1199 1200 of_dma_configure(gmu->dev, node, true); 1201 1202 /* Fow now, don't do anything fancy until we get our feet under us */ 1203 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1204 1205 pm_runtime_enable(gmu->dev); 1206 gmu->gx = devm_regulator_get(gmu->dev, "vdd"); 1207 1208 /* Get the list of clocks */ 1209 ret = a6xx_gmu_clocks_probe(gmu); 1210 if (ret) 1211 return ret; 1212 1213 /* Set up the IOMMU context bank */ 1214 ret = a6xx_gmu_memory_probe(gmu); 1215 if (ret) 1216 return ret; 1217 1218 /* Allocate memory for for the HFI queues */ 1219 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1220 if (IS_ERR(gmu->hfi)) 1221 goto err; 1222 1223 /* Allocate memory for the GMU debug region */ 1224 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1225 if (IS_ERR(gmu->debug)) 1226 goto err; 1227 1228 /* Map the GMU registers */ 1229 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1230 if (IS_ERR(gmu->mmio)) 1231 goto err; 1232 1233 /* Get the HFI and GMU interrupts */ 1234 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1235 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1236 1237 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1238 goto err; 1239 1240 /* Get the power levels for the GMU and GPU */ 1241 a6xx_gmu_pwrlevels_probe(gmu); 1242 1243 /* Set up the HFI queues */ 1244 a6xx_hfi_init(gmu); 1245 1246 return 0; 1247 err: 1248 a6xx_gmu_memory_free(gmu, gmu->hfi); 1249 1250 if (gmu->domain) { 1251 iommu_detach_device(gmu->domain, gmu->dev); 1252 1253 iommu_domain_free(gmu->domain); 1254 } 1255 1256 return -ENODEV; 1257 } 1258