1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/pm_opp.h> 6 #include <soc/qcom/cmd-db.h> 7 8 #include "a6xx_gpu.h" 9 #include "a6xx_gmu.xml.h" 10 11 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 12 { 13 struct a6xx_gmu *gmu = data; 14 u32 status; 15 16 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 17 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 18 19 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 20 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 21 22 /* Temporary until we can recover safely */ 23 BUG(); 24 } 25 26 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 27 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 28 29 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 30 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 31 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 32 33 return IRQ_HANDLED; 34 } 35 36 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 37 { 38 struct a6xx_gmu *gmu = data; 39 u32 status; 40 41 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 42 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 43 44 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 45 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 46 47 /* Temporary until we can recover safely */ 48 BUG(); 49 } 50 51 return IRQ_HANDLED; 52 } 53 54 /* Check to see if the GX rail is still powered */ 55 static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 56 { 57 u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 58 59 return !(val & 60 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 61 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 62 } 63 64 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 65 { 66 int ret; 67 68 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 69 70 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 71 ((3 & 0xf) << 28) | index); 72 73 /* 74 * Send an invalid index as a vote for the bus bandwidth and let the 75 * firmware decide on the right vote 76 */ 77 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 78 79 /* Set and clear the OOB for DCVS to trigger the GMU */ 80 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 81 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 82 83 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 84 if (ret) 85 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 86 87 gmu->freq = gmu->gpu_freqs[index]; 88 } 89 90 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) 91 { 92 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 93 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 94 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 95 u32 perf_index = 0; 96 97 if (freq == gmu->freq) 98 return; 99 100 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 101 if (freq == gmu->gpu_freqs[perf_index]) 102 break; 103 104 __a6xx_gmu_set_freq(gmu, perf_index); 105 } 106 107 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 108 { 109 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 110 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 111 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 112 113 return gmu->freq; 114 } 115 116 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 117 { 118 u32 val; 119 int local = gmu->idle_level; 120 121 /* SPTP and IFPC both report as IFPC */ 122 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 123 local = GMU_IDLE_STATE_IFPC; 124 125 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 126 127 if (val == local) { 128 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 129 !a6xx_gmu_gx_is_on(gmu)) 130 return true; 131 } 132 133 return false; 134 } 135 136 /* Wait for the GMU to get to its most idle state */ 137 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu) 138 { 139 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 140 141 return spin_until(a6xx_gmu_check_idle_level(gmu)); 142 } 143 144 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 145 { 146 int ret; 147 u32 val; 148 149 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 150 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 151 152 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 153 val == 0xbabeface, 100, 10000); 154 155 if (ret) 156 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); 157 158 return ret; 159 } 160 161 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 162 { 163 u32 val; 164 int ret; 165 166 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 167 168 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 169 val & 1, 100, 10000); 170 if (ret) 171 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); 172 173 return ret; 174 } 175 176 /* Trigger a OOB (out of band) request to the GMU */ 177 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 178 { 179 int ret; 180 u32 val; 181 int request, ack; 182 const char *name; 183 184 switch (state) { 185 case GMU_OOB_GPU_SET: 186 request = GMU_OOB_GPU_SET_REQUEST; 187 ack = GMU_OOB_GPU_SET_ACK; 188 name = "GPU_SET"; 189 break; 190 case GMU_OOB_BOOT_SLUMBER: 191 request = GMU_OOB_BOOT_SLUMBER_REQUEST; 192 ack = GMU_OOB_BOOT_SLUMBER_ACK; 193 name = "BOOT_SLUMBER"; 194 break; 195 case GMU_OOB_DCVS_SET: 196 request = GMU_OOB_DCVS_REQUEST; 197 ack = GMU_OOB_DCVS_ACK; 198 name = "GPU_DCVS"; 199 break; 200 default: 201 return -EINVAL; 202 } 203 204 /* Trigger the equested OOB operation */ 205 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 206 207 /* Wait for the acknowledge interrupt */ 208 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 209 val & (1 << ack), 100, 10000); 210 211 if (ret) 212 DRM_DEV_ERROR(gmu->dev, 213 "Timeout waiting for GMU OOB set %s: 0x%x\n", 214 name, 215 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 216 217 /* Clear the acknowledge interrupt */ 218 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 219 220 return ret; 221 } 222 223 /* Clear a pending OOB state in the GMU */ 224 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 225 { 226 switch (state) { 227 case GMU_OOB_GPU_SET: 228 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 229 1 << GMU_OOB_GPU_SET_CLEAR); 230 break; 231 case GMU_OOB_BOOT_SLUMBER: 232 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 233 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); 234 break; 235 case GMU_OOB_DCVS_SET: 236 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 237 1 << GMU_OOB_DCVS_CLEAR); 238 break; 239 } 240 } 241 242 /* Enable CPU control of SPTP power power collapse */ 243 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 244 { 245 int ret; 246 u32 val; 247 248 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 249 250 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 251 (val & 0x38) == 0x28, 1, 100); 252 253 if (ret) { 254 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 255 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 256 } 257 258 return 0; 259 } 260 261 /* Disable CPU control of SPTP power power collapse */ 262 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 263 { 264 u32 val; 265 int ret; 266 267 /* Make sure retention is on */ 268 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 269 270 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 271 272 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 273 (val & 0x04), 100, 10000); 274 275 if (ret) 276 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 277 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 278 } 279 280 /* Let the GMU know we are starting a boot sequence */ 281 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 282 { 283 u32 vote; 284 285 /* Let the GMU know we are getting ready for boot */ 286 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 287 288 /* Choose the "default" power level as the highest available */ 289 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 290 291 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 292 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 293 294 /* Let the GMU know the boot sequence has started */ 295 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 296 } 297 298 /* Let the GMU know that we are about to go into slumber */ 299 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 300 { 301 int ret; 302 303 /* Disable the power counter so the GMU isn't busy */ 304 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 305 306 /* Disable SPTP_PC if the CPU is responsible for it */ 307 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 308 a6xx_sptprac_disable(gmu); 309 310 /* Tell the GMU to get ready to slumber */ 311 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 312 313 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 314 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 315 316 if (!ret) { 317 /* Check to see if the GMU really did slumber */ 318 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 319 != 0x0f) { 320 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); 321 ret = -ETIMEDOUT; 322 } 323 } 324 325 /* Put fence into allow mode */ 326 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 327 return ret; 328 } 329 330 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 331 { 332 int ret; 333 u32 val; 334 335 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 336 /* Wait for the register to finish posting */ 337 wmb(); 338 339 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 340 val & (1 << 1), 100, 10000); 341 if (ret) { 342 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); 343 return ret; 344 } 345 346 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 347 !val, 100, 10000); 348 349 if (ret) { 350 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 351 return ret; 352 } 353 354 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 355 356 /* Set up CX GMU counter 0 to count busy ticks */ 357 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000); 358 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20); 359 360 /* Enable the power counter */ 361 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 362 return 0; 363 } 364 365 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 366 { 367 int ret; 368 u32 val; 369 370 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 371 372 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 373 val, val & (1 << 16), 100, 10000); 374 if (ret) 375 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); 376 377 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 378 } 379 380 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 381 { 382 return msm_writel(value, ptr + (offset << 2)); 383 } 384 385 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 386 const char *name); 387 388 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 389 { 390 struct platform_device *pdev = to_platform_device(gmu->dev); 391 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 392 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 393 394 if (!pdcptr || !seqptr) 395 goto err; 396 397 /* Disable SDE clock gating */ 398 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 399 400 /* Setup RSC PDC handshake for sleep and wakeup */ 401 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 402 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 403 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 404 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 405 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 406 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 407 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 408 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 409 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 410 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 411 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 412 413 /* Load RSC sequencer uCode for sleep and wakeup */ 414 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 415 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 416 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 417 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 418 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 419 420 /* Load PDC sequencer uCode for power up and power down sequence */ 421 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 422 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 423 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 424 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 425 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 426 427 /* Set TCS commands used by PDC sequence for low power modes */ 428 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 429 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 430 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 431 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 432 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 433 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 434 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 435 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 436 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 437 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 438 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 439 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 440 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 441 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 442 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 443 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 444 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 445 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 446 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 447 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 448 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 449 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 450 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 451 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 452 453 /* Setup GPU PDC */ 454 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 455 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 456 457 /* ensure no writes happen before the uCode is fully written */ 458 wmb(); 459 460 err: 461 devm_iounmap(gmu->dev, pdcptr); 462 devm_iounmap(gmu->dev, seqptr); 463 } 464 465 /* 466 * The lowest 16 bits of this value are the number of XO clock cycles for main 467 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 468 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 469 */ 470 471 #define GMU_PWR_COL_HYST 0x000a1680 472 473 /* Set up the idle state for the GMU */ 474 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 475 { 476 /* Disable GMU WB/RB buffer */ 477 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 478 479 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 480 481 switch (gmu->idle_level) { 482 case GMU_IDLE_STATE_IFPC: 483 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 484 GMU_PWR_COL_HYST); 485 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 486 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 487 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 488 /* Fall through */ 489 case GMU_IDLE_STATE_SPTP: 490 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 491 GMU_PWR_COL_HYST); 492 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 493 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 494 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 495 } 496 497 /* Enable RPMh GPU client */ 498 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 499 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 500 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 501 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 502 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 503 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 504 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 505 } 506 507 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 508 { 509 static bool rpmh_init; 510 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 511 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 512 int i, ret; 513 u32 chipid; 514 u32 *image; 515 516 if (state == GMU_WARM_BOOT) { 517 ret = a6xx_rpmh_start(gmu); 518 if (ret) 519 return ret; 520 } else { 521 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 522 "GMU firmware is not loaded\n")) 523 return -ENOENT; 524 525 /* Sanity check the size of the firmware that was loaded */ 526 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { 527 DRM_DEV_ERROR(gmu->dev, 528 "GMU firmware is bigger than the available region\n"); 529 return -EINVAL; 530 } 531 532 /* Turn on register retention */ 533 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 534 535 /* We only need to load the RPMh microcode once */ 536 if (!rpmh_init) { 537 a6xx_gmu_rpmh_init(gmu); 538 rpmh_init = true; 539 } else if (state != GMU_RESET) { 540 ret = a6xx_rpmh_start(gmu); 541 if (ret) 542 return ret; 543 } 544 545 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; 546 547 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) 548 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, 549 image[i]); 550 } 551 552 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 553 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 554 555 /* Write the iova of the HFI table */ 556 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova); 557 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 558 559 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 560 (1 << 31) | (0xa << 18) | (0xa0)); 561 562 chipid = adreno_gpu->rev.core << 24; 563 chipid |= adreno_gpu->rev.major << 16; 564 chipid |= adreno_gpu->rev.minor << 12; 565 chipid |= adreno_gpu->rev.patchid << 8; 566 567 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 568 569 /* Set up the lowest idle level on the GMU */ 570 a6xx_gmu_power_config(gmu); 571 572 ret = a6xx_gmu_start(gmu); 573 if (ret) 574 return ret; 575 576 ret = a6xx_gmu_gfx_rail_on(gmu); 577 if (ret) 578 return ret; 579 580 /* Enable SPTP_PC if the CPU is responsible for it */ 581 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 582 ret = a6xx_sptprac_enable(gmu); 583 if (ret) 584 return ret; 585 } 586 587 ret = a6xx_gmu_hfi_start(gmu); 588 if (ret) 589 return ret; 590 591 /* FIXME: Do we need this wmb() here? */ 592 wmb(); 593 594 return 0; 595 } 596 597 #define A6XX_HFI_IRQ_MASK \ 598 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 599 600 #define A6XX_GMU_IRQ_MASK \ 601 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 602 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 603 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 604 605 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu) 606 { 607 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 608 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 609 610 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, 611 ~A6XX_GMU_IRQ_MASK); 612 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, 613 ~A6XX_HFI_IRQ_MASK); 614 615 enable_irq(gmu->gmu_irq); 616 enable_irq(gmu->hfi_irq); 617 } 618 619 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 620 { 621 disable_irq(gmu->gmu_irq); 622 disable_irq(gmu->hfi_irq); 623 624 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 625 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 626 } 627 628 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) 629 { 630 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 631 int ret; 632 u32 val; 633 634 /* Flush all the queues */ 635 a6xx_hfi_stop(gmu); 636 637 /* Stop the interrupts */ 638 a6xx_gmu_irq_disable(gmu); 639 640 /* Force off SPTP in case the GMU is managing it */ 641 a6xx_sptprac_disable(gmu); 642 643 /* Make sure there are no outstanding RPMh votes */ 644 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 645 (val & 1), 100, 10000); 646 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 647 (val & 1), 100, 10000); 648 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 649 (val & 1), 100, 10000); 650 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 651 (val & 1), 100, 1000); 652 653 /* Force off the GX GSDC */ 654 regulator_force_disable(gmu->gx); 655 656 /* Disable the resources */ 657 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 658 pm_runtime_put_sync(gmu->dev); 659 660 /* Re-enable the resources */ 661 pm_runtime_get_sync(gmu->dev); 662 663 /* Use a known rate to bring up the GMU */ 664 clk_set_rate(gmu->core_clk, 200000000); 665 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 666 if (ret) 667 goto out; 668 669 a6xx_gmu_irq_enable(gmu); 670 671 ret = a6xx_gmu_fw_start(gmu, GMU_RESET); 672 if (!ret) 673 ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); 674 675 /* Set the GPU back to the highest power frequency */ 676 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 677 678 out: 679 if (ret) 680 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 681 682 return ret; 683 } 684 685 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 686 { 687 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 688 int status, ret; 689 690 if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) 691 return 0; 692 693 /* Turn on the resources */ 694 pm_runtime_get_sync(gmu->dev); 695 696 /* Use a known rate to bring up the GMU */ 697 clk_set_rate(gmu->core_clk, 200000000); 698 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 699 if (ret) 700 goto out; 701 702 a6xx_gmu_irq_enable(gmu); 703 704 /* Check to see if we are doing a cold or warm boot */ 705 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 706 GMU_WARM_BOOT : GMU_COLD_BOOT; 707 708 ret = a6xx_gmu_fw_start(gmu, status); 709 if (ret) 710 goto out; 711 712 ret = a6xx_hfi_start(gmu, status); 713 714 /* Set the GPU to the highest power frequency */ 715 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 716 717 out: 718 /* Make sure to turn off the boot OOB request on error */ 719 if (ret) 720 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 721 722 return ret; 723 } 724 725 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 726 { 727 u32 reg; 728 729 if (!gmu->mmio) 730 return true; 731 732 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 733 734 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 735 return false; 736 737 return true; 738 } 739 740 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 741 { 742 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 743 u32 val; 744 745 /* 746 * The GMU may still be in slumber unless the GPU started so check and 747 * skip putting it back into slumber if so 748 */ 749 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 750 751 if (val != 0xf) { 752 int ret = a6xx_gmu_wait_for_idle(a6xx_gpu); 753 754 /* Temporary until we can recover safely */ 755 BUG_ON(ret); 756 757 /* tell the GMU we want to slumber */ 758 a6xx_gmu_notify_slumber(gmu); 759 760 ret = gmu_poll_timeout(gmu, 761 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 762 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 763 100, 10000); 764 765 /* 766 * Let the user know we failed to slumber but don't worry too 767 * much because we are powering down anyway 768 */ 769 770 if (ret) 771 DRM_DEV_ERROR(gmu->dev, 772 "Unable to slumber GMU: status = 0%x/0%x\n", 773 gmu_read(gmu, 774 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 775 gmu_read(gmu, 776 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 777 } 778 779 /* Turn off HFI */ 780 a6xx_hfi_stop(gmu); 781 782 /* Stop the interrupts and mask the hardware */ 783 a6xx_gmu_irq_disable(gmu); 784 785 /* Tell RPMh to power off the GPU */ 786 a6xx_rpmh_stop(gmu); 787 788 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 789 790 pm_runtime_put_sync(gmu->dev); 791 792 return 0; 793 } 794 795 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) 796 { 797 int count, i; 798 u64 iova; 799 800 if (IS_ERR_OR_NULL(bo)) 801 return; 802 803 count = bo->size >> PAGE_SHIFT; 804 iova = bo->iova; 805 806 for (i = 0; i < count; i++, iova += PAGE_SIZE) { 807 iommu_unmap(gmu->domain, iova, PAGE_SIZE); 808 __free_pages(bo->pages[i], 0); 809 } 810 811 kfree(bo->pages); 812 kfree(bo); 813 } 814 815 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, 816 size_t size) 817 { 818 struct a6xx_gmu_bo *bo; 819 int ret, count, i; 820 821 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 822 if (!bo) 823 return ERR_PTR(-ENOMEM); 824 825 bo->size = PAGE_ALIGN(size); 826 827 count = bo->size >> PAGE_SHIFT; 828 829 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); 830 if (!bo->pages) { 831 kfree(bo); 832 return ERR_PTR(-ENOMEM); 833 } 834 835 for (i = 0; i < count; i++) { 836 bo->pages[i] = alloc_page(GFP_KERNEL); 837 if (!bo->pages[i]) 838 goto err; 839 } 840 841 bo->iova = gmu->uncached_iova_base; 842 843 for (i = 0; i < count; i++) { 844 ret = iommu_map(gmu->domain, 845 bo->iova + (PAGE_SIZE * i), 846 page_to_phys(bo->pages[i]), PAGE_SIZE, 847 IOMMU_READ | IOMMU_WRITE); 848 849 if (ret) { 850 DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n"); 851 852 for (i = i - 1 ; i >= 0; i--) 853 iommu_unmap(gmu->domain, 854 bo->iova + (PAGE_SIZE * i), 855 PAGE_SIZE); 856 857 goto err; 858 } 859 } 860 861 bo->virt = vmap(bo->pages, count, VM_IOREMAP, 862 pgprot_writecombine(PAGE_KERNEL)); 863 if (!bo->virt) 864 goto err; 865 866 /* Align future IOVA addresses on 1MB boundaries */ 867 gmu->uncached_iova_base += ALIGN(size, SZ_1M); 868 869 return bo; 870 871 err: 872 for (i = 0; i < count; i++) { 873 if (bo->pages[i]) 874 __free_pages(bo->pages[i], 0); 875 } 876 877 kfree(bo->pages); 878 kfree(bo); 879 880 return ERR_PTR(-ENOMEM); 881 } 882 883 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 884 { 885 int ret; 886 887 /* 888 * The GMU address space is hardcoded to treat the range 889 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared 890 * between the GMU and the CPU will live in this space 891 */ 892 gmu->uncached_iova_base = 0x60000000; 893 894 895 gmu->domain = iommu_domain_alloc(&platform_bus_type); 896 if (!gmu->domain) 897 return -ENODEV; 898 899 ret = iommu_attach_device(gmu->domain, gmu->dev); 900 901 if (ret) { 902 iommu_domain_free(gmu->domain); 903 gmu->domain = NULL; 904 } 905 906 return ret; 907 } 908 909 /* Get the list of RPMh voltage levels from cmd-db */ 910 static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size) 911 { 912 u32 len = cmd_db_read_aux_data_len(id); 913 914 if (!len) 915 return 0; 916 917 if (WARN_ON(len > size)) 918 return -EINVAL; 919 920 cmd_db_read_aux_data(id, vals, len); 921 922 /* 923 * The data comes back as an array of unsigned shorts so adjust the 924 * count accordingly 925 */ 926 return len >> 1; 927 } 928 929 /* Return the 'arc-level' for the given frequency */ 930 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) 931 { 932 struct dev_pm_opp *opp; 933 struct device_node *np; 934 u32 val = 0; 935 936 if (!freq) 937 return 0; 938 939 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 940 if (IS_ERR(opp)) 941 return 0; 942 943 np = dev_pm_opp_get_of_node(opp); 944 945 if (np) { 946 of_property_read_u32(np, "qcom,level", &val); 947 of_node_put(np); 948 } 949 950 dev_pm_opp_put(opp); 951 952 return val; 953 } 954 955 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 956 unsigned long *freqs, int freqs_count, 957 u16 *pri, int pri_count, 958 u16 *sec, int sec_count) 959 { 960 int i, j; 961 962 /* Construct a vote for each frequency */ 963 for (i = 0; i < freqs_count; i++) { 964 u8 pindex = 0, sindex = 0; 965 u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]); 966 967 /* Get the primary index that matches the arc level */ 968 for (j = 0; j < pri_count; j++) { 969 if (pri[j] >= level) { 970 pindex = j; 971 break; 972 } 973 } 974 975 if (j == pri_count) { 976 DRM_DEV_ERROR(dev, 977 "Level %u not found in in the RPMh list\n", 978 level); 979 DRM_DEV_ERROR(dev, "Available levels:\n"); 980 for (j = 0; j < pri_count; j++) 981 DRM_DEV_ERROR(dev, " %u\n", pri[j]); 982 983 return -EINVAL; 984 } 985 986 /* 987 * Look for a level in in the secondary list that matches. If 988 * nothing fits, use the maximum non zero vote 989 */ 990 991 for (j = 0; j < sec_count; j++) { 992 if (sec[j] >= level) { 993 sindex = j; 994 break; 995 } else if (sec[j]) { 996 sindex = j; 997 } 998 } 999 1000 /* Construct the vote */ 1001 votes[i] = ((pri[pindex] & 0xffff) << 16) | 1002 (sindex << 8) | pindex; 1003 } 1004 1005 return 0; 1006 } 1007 1008 /* 1009 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1010 * to construct the list of votes on the CPU and send it over. Query the RPMh 1011 * voltage levels and build the votes 1012 */ 1013 1014 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1015 { 1016 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1017 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1018 struct msm_gpu *gpu = &adreno_gpu->base; 1019 1020 u16 gx[16], cx[16], mx[16]; 1021 u32 gxcount, cxcount, mxcount; 1022 int ret; 1023 1024 /* Get the list of available voltage levels for each component */ 1025 gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx)); 1026 cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx)); 1027 mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx)); 1028 1029 /* Build the GX votes */ 1030 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1031 gmu->gpu_freqs, gmu->nr_gpu_freqs, 1032 gx, gxcount, mx, mxcount); 1033 1034 /* Build the CX votes */ 1035 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1036 gmu->gmu_freqs, gmu->nr_gmu_freqs, 1037 cx, cxcount, mx, mxcount); 1038 1039 return ret; 1040 } 1041 1042 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1043 u32 size) 1044 { 1045 int count = dev_pm_opp_get_opp_count(dev); 1046 struct dev_pm_opp *opp; 1047 int i, index = 0; 1048 unsigned long freq = 1; 1049 1050 /* 1051 * The OPP table doesn't contain the "off" frequency level so we need to 1052 * add 1 to the table size to account for it 1053 */ 1054 1055 if (WARN(count + 1 > size, 1056 "The GMU frequency table is being truncated\n")) 1057 count = size - 1; 1058 1059 /* Set the "off" frequency */ 1060 freqs[index++] = 0; 1061 1062 for (i = 0; i < count; i++) { 1063 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1064 if (IS_ERR(opp)) 1065 break; 1066 1067 dev_pm_opp_put(opp); 1068 freqs[index++] = freq++; 1069 } 1070 1071 return index; 1072 } 1073 1074 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1075 { 1076 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1077 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1078 struct msm_gpu *gpu = &adreno_gpu->base; 1079 1080 int ret = 0; 1081 1082 /* 1083 * The GMU handles its own frequency switching so build a list of 1084 * available frequencies to send during initialization 1085 */ 1086 ret = dev_pm_opp_of_add_table(gmu->dev); 1087 if (ret) { 1088 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1089 return ret; 1090 } 1091 1092 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1093 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1094 1095 /* 1096 * The GMU also handles GPU frequency switching so build a list 1097 * from the GPU OPP table 1098 */ 1099 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1100 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1101 1102 /* Build the list of RPMh votes that we'll send to the GMU */ 1103 return a6xx_gmu_rpmh_votes_init(gmu); 1104 } 1105 1106 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1107 { 1108 int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks); 1109 1110 if (ret < 1) 1111 return ret; 1112 1113 gmu->nr_clocks = ret; 1114 1115 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1116 gmu->nr_clocks, "gmu"); 1117 1118 return 0; 1119 } 1120 1121 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1122 const char *name) 1123 { 1124 void __iomem *ret; 1125 struct resource *res = platform_get_resource_byname(pdev, 1126 IORESOURCE_MEM, name); 1127 1128 if (!res) { 1129 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); 1130 return ERR_PTR(-EINVAL); 1131 } 1132 1133 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1134 if (!ret) { 1135 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); 1136 return ERR_PTR(-EINVAL); 1137 } 1138 1139 return ret; 1140 } 1141 1142 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1143 const char *name, irq_handler_t handler) 1144 { 1145 int irq, ret; 1146 1147 irq = platform_get_irq_byname(pdev, name); 1148 1149 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH, 1150 name, gmu); 1151 if (ret) { 1152 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name); 1153 return ret; 1154 } 1155 1156 disable_irq(irq); 1157 1158 return irq; 1159 } 1160 1161 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1162 { 1163 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1164 1165 if (IS_ERR_OR_NULL(gmu->mmio)) 1166 return; 1167 1168 pm_runtime_disable(gmu->dev); 1169 a6xx_gmu_stop(a6xx_gpu); 1170 1171 a6xx_gmu_irq_disable(gmu); 1172 a6xx_gmu_memory_free(gmu, gmu->hfi); 1173 1174 iommu_detach_device(gmu->domain, gmu->dev); 1175 1176 iommu_domain_free(gmu->domain); 1177 } 1178 1179 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1180 { 1181 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1182 struct platform_device *pdev = of_find_device_by_node(node); 1183 int ret; 1184 1185 if (!pdev) 1186 return -ENODEV; 1187 1188 gmu->dev = &pdev->dev; 1189 1190 of_dma_configure(gmu->dev, node, true); 1191 1192 /* Fow now, don't do anything fancy until we get our feet under us */ 1193 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1194 1195 pm_runtime_enable(gmu->dev); 1196 gmu->gx = devm_regulator_get(gmu->dev, "vdd"); 1197 1198 /* Get the list of clocks */ 1199 ret = a6xx_gmu_clocks_probe(gmu); 1200 if (ret) 1201 return ret; 1202 1203 /* Set up the IOMMU context bank */ 1204 ret = a6xx_gmu_memory_probe(gmu); 1205 if (ret) 1206 return ret; 1207 1208 /* Allocate memory for for the HFI queues */ 1209 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1210 if (IS_ERR(gmu->hfi)) 1211 goto err; 1212 1213 /* Allocate memory for the GMU debug region */ 1214 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1215 if (IS_ERR(gmu->debug)) 1216 goto err; 1217 1218 /* Map the GMU registers */ 1219 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1220 if (IS_ERR(gmu->mmio)) 1221 goto err; 1222 1223 /* Get the HFI and GMU interrupts */ 1224 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1225 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1226 1227 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1228 goto err; 1229 1230 /* Get the power levels for the GMU and GPU */ 1231 a6xx_gmu_pwrlevels_probe(gmu); 1232 1233 /* Set up the HFI queues */ 1234 a6xx_hfi_init(gmu); 1235 1236 return 0; 1237 err: 1238 a6xx_gmu_memory_free(gmu, gmu->hfi); 1239 1240 if (gmu->domain) { 1241 iommu_detach_device(gmu->domain, gmu->dev); 1242 1243 iommu_domain_free(gmu->domain); 1244 } 1245 1246 return -ENODEV; 1247 } 1248