1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/pm_opp.h> 6 #include <soc/qcom/cmd-db.h> 7 8 #include "a6xx_gpu.h" 9 #include "a6xx_gmu.xml.h" 10 11 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 12 { 13 struct a6xx_gmu *gmu = data; 14 u32 status; 15 16 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 17 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 18 19 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 20 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 21 22 /* Temporary until we can recover safely */ 23 BUG(); 24 } 25 26 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 27 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 28 29 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 30 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 31 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 32 33 return IRQ_HANDLED; 34 } 35 36 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 37 { 38 struct a6xx_gmu *gmu = data; 39 u32 status; 40 41 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 42 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 43 44 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 45 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 46 47 /* Temporary until we can recover safely */ 48 BUG(); 49 } 50 51 return IRQ_HANDLED; 52 } 53 54 /* Check to see if the GX rail is still powered */ 55 static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 56 { 57 u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 58 59 return !(val & 60 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 61 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 62 } 63 64 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 65 { 66 int ret; 67 68 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 69 70 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 71 ((3 & 0xf) << 28) | index); 72 73 /* 74 * Send an invalid index as a vote for the bus bandwidth and let the 75 * firmware decide on the right vote 76 */ 77 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 78 79 /* Set and clear the OOB for DCVS to trigger the GMU */ 80 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 81 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 82 83 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 84 if (ret) 85 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); 86 87 gmu->freq = gmu->gpu_freqs[index]; 88 } 89 90 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) 91 { 92 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 93 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 94 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 95 u32 perf_index = 0; 96 97 if (freq == gmu->freq) 98 return; 99 100 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) 101 if (freq == gmu->gpu_freqs[perf_index]) 102 break; 103 104 __a6xx_gmu_set_freq(gmu, perf_index); 105 } 106 107 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) 108 { 109 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 110 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 111 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 112 113 return gmu->freq; 114 } 115 116 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 117 { 118 u32 val; 119 int local = gmu->idle_level; 120 121 /* SPTP and IFPC both report as IFPC */ 122 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 123 local = GMU_IDLE_STATE_IFPC; 124 125 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 126 127 if (val == local) { 128 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 129 !a6xx_gmu_gx_is_on(gmu)) 130 return true; 131 } 132 133 return false; 134 } 135 136 /* Wait for the GMU to get to its most idle state */ 137 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu) 138 { 139 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 140 141 return spin_until(a6xx_gmu_check_idle_level(gmu)); 142 } 143 144 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 145 { 146 int ret; 147 u32 val; 148 149 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 150 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 151 152 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 153 val == 0xbabeface, 100, 10000); 154 155 if (ret) 156 dev_err(gmu->dev, "GMU firmware initialization timed out\n"); 157 158 return ret; 159 } 160 161 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 162 { 163 u32 val; 164 int ret; 165 166 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 167 168 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 169 val & 1, 100, 10000); 170 if (ret) 171 dev_err(gmu->dev, "Unable to start the HFI queues\n"); 172 173 return ret; 174 } 175 176 /* Trigger a OOB (out of band) request to the GMU */ 177 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 178 { 179 int ret; 180 u32 val; 181 int request, ack; 182 const char *name; 183 184 switch (state) { 185 case GMU_OOB_GPU_SET: 186 request = GMU_OOB_GPU_SET_REQUEST; 187 ack = GMU_OOB_GPU_SET_ACK; 188 name = "GPU_SET"; 189 break; 190 case GMU_OOB_BOOT_SLUMBER: 191 request = GMU_OOB_BOOT_SLUMBER_REQUEST; 192 ack = GMU_OOB_BOOT_SLUMBER_ACK; 193 name = "BOOT_SLUMBER"; 194 break; 195 case GMU_OOB_DCVS_SET: 196 request = GMU_OOB_DCVS_REQUEST; 197 ack = GMU_OOB_DCVS_ACK; 198 name = "GPU_DCVS"; 199 break; 200 default: 201 return -EINVAL; 202 } 203 204 /* Trigger the equested OOB operation */ 205 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 206 207 /* Wait for the acknowledge interrupt */ 208 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 209 val & (1 << ack), 100, 10000); 210 211 if (ret) 212 dev_err(gmu->dev, 213 "Timeout waiting for GMU OOB set %s: 0x%x\n", 214 name, 215 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 216 217 /* Clear the acknowledge interrupt */ 218 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 219 220 return ret; 221 } 222 223 /* Clear a pending OOB state in the GMU */ 224 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 225 { 226 switch (state) { 227 case GMU_OOB_GPU_SET: 228 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 229 1 << GMU_OOB_GPU_SET_CLEAR); 230 break; 231 case GMU_OOB_BOOT_SLUMBER: 232 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 233 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); 234 break; 235 case GMU_OOB_DCVS_SET: 236 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 237 1 << GMU_OOB_DCVS_CLEAR); 238 break; 239 } 240 } 241 242 /* Enable CPU control of SPTP power power collapse */ 243 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 244 { 245 int ret; 246 u32 val; 247 248 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 249 250 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 251 (val & 0x38) == 0x28, 1, 100); 252 253 if (ret) { 254 dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 255 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 256 } 257 258 return 0; 259 } 260 261 /* Disable CPU control of SPTP power power collapse */ 262 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 263 { 264 u32 val; 265 int ret; 266 267 /* Make sure retention is on */ 268 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 269 270 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 271 272 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 273 (val & 0x04), 100, 10000); 274 275 if (ret) 276 dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 277 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 278 } 279 280 /* Let the GMU know we are starting a boot sequence */ 281 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 282 { 283 u32 vote; 284 285 /* Let the GMU know we are getting ready for boot */ 286 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 287 288 /* Choose the "default" power level as the highest available */ 289 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 290 291 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 292 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 293 294 /* Let the GMU know the boot sequence has started */ 295 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 296 } 297 298 /* Let the GMU know that we are about to go into slumber */ 299 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 300 { 301 int ret; 302 303 /* Disable the power counter so the GMU isn't busy */ 304 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 305 306 /* Disable SPTP_PC if the CPU is responsible for it */ 307 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 308 a6xx_sptprac_disable(gmu); 309 310 /* Tell the GMU to get ready to slumber */ 311 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 312 313 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 314 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 315 316 if (!ret) { 317 /* Check to see if the GMU really did slumber */ 318 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 319 != 0x0f) { 320 dev_err(gmu->dev, "The GMU did not go into slumber\n"); 321 ret = -ETIMEDOUT; 322 } 323 } 324 325 /* Put fence into allow mode */ 326 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 327 return ret; 328 } 329 330 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 331 { 332 int ret; 333 u32 val; 334 335 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 336 /* Wait for the register to finish posting */ 337 wmb(); 338 339 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 340 val & (1 << 1), 100, 10000); 341 if (ret) { 342 dev_err(gmu->dev, "Unable to power on the GPU RSC\n"); 343 return ret; 344 } 345 346 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 347 !val, 100, 10000); 348 349 if (!ret) { 350 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 351 352 /* Re-enable the power counter */ 353 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 354 return 0; 355 } 356 357 dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 358 return ret; 359 } 360 361 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 362 { 363 int ret; 364 u32 val; 365 366 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 367 368 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 369 val, val & (1 << 16), 100, 10000); 370 if (ret) 371 dev_err(gmu->dev, "Unable to power off the GPU RSC\n"); 372 373 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 374 } 375 376 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 377 { 378 return msm_writel(value, ptr + (offset << 2)); 379 } 380 381 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 382 const char *name); 383 384 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 385 { 386 struct platform_device *pdev = to_platform_device(gmu->dev); 387 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 388 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 389 390 if (!pdcptr || !seqptr) 391 goto err; 392 393 /* Disable SDE clock gating */ 394 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 395 396 /* Setup RSC PDC handshake for sleep and wakeup */ 397 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 398 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 399 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 400 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 401 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 402 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 403 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 404 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 405 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 406 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 407 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 408 409 /* Load RSC sequencer uCode for sleep and wakeup */ 410 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 411 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 412 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 413 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 414 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 415 416 /* Load PDC sequencer uCode for power up and power down sequence */ 417 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 418 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 419 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 420 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 421 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 422 423 /* Set TCS commands used by PDC sequence for low power modes */ 424 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 425 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 426 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 427 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 428 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 429 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 430 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 431 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 432 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 433 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 434 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 435 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 436 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 437 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 438 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 439 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 440 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 441 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 442 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 443 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 444 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 445 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 446 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 447 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 448 449 /* Setup GPU PDC */ 450 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 451 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 452 453 /* ensure no writes happen before the uCode is fully written */ 454 wmb(); 455 456 err: 457 devm_iounmap(gmu->dev, pdcptr); 458 devm_iounmap(gmu->dev, seqptr); 459 } 460 461 /* 462 * The lowest 16 bits of this value are the number of XO clock cycles for main 463 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 464 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 465 */ 466 467 #define GMU_PWR_COL_HYST 0x000a1680 468 469 /* Set up the idle state for the GMU */ 470 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 471 { 472 /* Disable GMU WB/RB buffer */ 473 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 474 475 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 476 477 switch (gmu->idle_level) { 478 case GMU_IDLE_STATE_IFPC: 479 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 480 GMU_PWR_COL_HYST); 481 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 482 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 483 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 484 /* Fall through */ 485 case GMU_IDLE_STATE_SPTP: 486 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 487 GMU_PWR_COL_HYST); 488 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 489 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 490 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 491 } 492 493 /* Enable RPMh GPU client */ 494 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 495 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 496 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 497 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 498 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 499 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 500 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 501 } 502 503 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 504 { 505 static bool rpmh_init; 506 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 507 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 508 int i, ret; 509 u32 chipid; 510 u32 *image; 511 512 if (state == GMU_WARM_BOOT) { 513 ret = a6xx_rpmh_start(gmu); 514 if (ret) 515 return ret; 516 } else { 517 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 518 "GMU firmware is not loaded\n")) 519 return -ENOENT; 520 521 /* Sanity check the size of the firmware that was loaded */ 522 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { 523 dev_err(gmu->dev, 524 "GMU firmware is bigger than the available region\n"); 525 return -EINVAL; 526 } 527 528 /* Turn on register retention */ 529 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 530 531 /* We only need to load the RPMh microcode once */ 532 if (!rpmh_init) { 533 a6xx_gmu_rpmh_init(gmu); 534 rpmh_init = true; 535 } else if (state != GMU_RESET) { 536 ret = a6xx_rpmh_start(gmu); 537 if (ret) 538 return ret; 539 } 540 541 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; 542 543 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) 544 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, 545 image[i]); 546 } 547 548 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 549 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 550 551 /* Write the iova of the HFI table */ 552 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova); 553 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 554 555 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 556 (1 << 31) | (0xa << 18) | (0xa0)); 557 558 chipid = adreno_gpu->rev.core << 24; 559 chipid |= adreno_gpu->rev.major << 16; 560 chipid |= adreno_gpu->rev.minor << 12; 561 chipid |= adreno_gpu->rev.patchid << 8; 562 563 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 564 565 /* Set up the lowest idle level on the GMU */ 566 a6xx_gmu_power_config(gmu); 567 568 ret = a6xx_gmu_start(gmu); 569 if (ret) 570 return ret; 571 572 ret = a6xx_gmu_gfx_rail_on(gmu); 573 if (ret) 574 return ret; 575 576 /* Enable SPTP_PC if the CPU is responsible for it */ 577 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 578 ret = a6xx_sptprac_enable(gmu); 579 if (ret) 580 return ret; 581 } 582 583 ret = a6xx_gmu_hfi_start(gmu); 584 if (ret) 585 return ret; 586 587 /* FIXME: Do we need this wmb() here? */ 588 wmb(); 589 590 return 0; 591 } 592 593 #define A6XX_HFI_IRQ_MASK \ 594 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 595 596 #define A6XX_GMU_IRQ_MASK \ 597 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 598 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 599 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 600 601 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu) 602 { 603 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 604 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 605 606 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, 607 ~A6XX_GMU_IRQ_MASK); 608 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, 609 ~A6XX_HFI_IRQ_MASK); 610 611 enable_irq(gmu->gmu_irq); 612 enable_irq(gmu->hfi_irq); 613 } 614 615 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 616 { 617 disable_irq(gmu->gmu_irq); 618 disable_irq(gmu->hfi_irq); 619 620 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 621 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 622 } 623 624 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) 625 { 626 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 627 int ret; 628 u32 val; 629 630 /* Flush all the queues */ 631 a6xx_hfi_stop(gmu); 632 633 /* Stop the interrupts */ 634 a6xx_gmu_irq_disable(gmu); 635 636 /* Force off SPTP in case the GMU is managing it */ 637 a6xx_sptprac_disable(gmu); 638 639 /* Make sure there are no outstanding RPMh votes */ 640 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 641 (val & 1), 100, 10000); 642 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 643 (val & 1), 100, 10000); 644 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 645 (val & 1), 100, 10000); 646 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 647 (val & 1), 100, 1000); 648 649 /* Force off the GX GSDC */ 650 regulator_force_disable(gmu->gx); 651 652 /* Disable the resources */ 653 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 654 pm_runtime_put_sync(gmu->dev); 655 656 /* Re-enable the resources */ 657 pm_runtime_get_sync(gmu->dev); 658 659 /* Use a known rate to bring up the GMU */ 660 clk_set_rate(gmu->core_clk, 200000000); 661 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 662 if (ret) 663 goto out; 664 665 a6xx_gmu_irq_enable(gmu); 666 667 ret = a6xx_gmu_fw_start(gmu, GMU_RESET); 668 if (!ret) 669 ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); 670 671 /* Set the GPU back to the highest power frequency */ 672 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 673 674 out: 675 if (ret) 676 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 677 678 return ret; 679 } 680 681 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 682 { 683 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 684 int status, ret; 685 686 if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) 687 return 0; 688 689 /* Turn on the resources */ 690 pm_runtime_get_sync(gmu->dev); 691 692 /* Use a known rate to bring up the GMU */ 693 clk_set_rate(gmu->core_clk, 200000000); 694 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 695 if (ret) 696 goto out; 697 698 a6xx_gmu_irq_enable(gmu); 699 700 /* Check to see if we are doing a cold or warm boot */ 701 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 702 GMU_WARM_BOOT : GMU_COLD_BOOT; 703 704 ret = a6xx_gmu_fw_start(gmu, status); 705 if (ret) 706 goto out; 707 708 ret = a6xx_hfi_start(gmu, status); 709 710 /* Set the GPU to the highest power frequency */ 711 __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 712 713 out: 714 /* Make sure to turn off the boot OOB request on error */ 715 if (ret) 716 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 717 718 return ret; 719 } 720 721 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 722 { 723 u32 reg; 724 725 if (!gmu->mmio) 726 return true; 727 728 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 729 730 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 731 return false; 732 733 return true; 734 } 735 736 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 737 { 738 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 739 u32 val; 740 741 /* 742 * The GMU may still be in slumber unless the GPU started so check and 743 * skip putting it back into slumber if so 744 */ 745 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 746 747 if (val != 0xf) { 748 int ret = a6xx_gmu_wait_for_idle(a6xx_gpu); 749 750 /* Temporary until we can recover safely */ 751 BUG_ON(ret); 752 753 /* tell the GMU we want to slumber */ 754 a6xx_gmu_notify_slumber(gmu); 755 756 ret = gmu_poll_timeout(gmu, 757 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 758 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 759 100, 10000); 760 761 /* 762 * Let the user know we failed to slumber but don't worry too 763 * much because we are powering down anyway 764 */ 765 766 if (ret) 767 dev_err(gmu->dev, 768 "Unable to slumber GMU: status = 0%x/0%x\n", 769 gmu_read(gmu, 770 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 771 gmu_read(gmu, 772 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 773 } 774 775 /* Turn off HFI */ 776 a6xx_hfi_stop(gmu); 777 778 /* Stop the interrupts and mask the hardware */ 779 a6xx_gmu_irq_disable(gmu); 780 781 /* Tell RPMh to power off the GPU */ 782 a6xx_rpmh_stop(gmu); 783 784 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 785 786 pm_runtime_put_sync(gmu->dev); 787 788 return 0; 789 } 790 791 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) 792 { 793 int count, i; 794 u64 iova; 795 796 if (IS_ERR_OR_NULL(bo)) 797 return; 798 799 count = bo->size >> PAGE_SHIFT; 800 iova = bo->iova; 801 802 for (i = 0; i < count; i++, iova += PAGE_SIZE) { 803 iommu_unmap(gmu->domain, iova, PAGE_SIZE); 804 __free_pages(bo->pages[i], 0); 805 } 806 807 kfree(bo->pages); 808 kfree(bo); 809 } 810 811 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, 812 size_t size) 813 { 814 struct a6xx_gmu_bo *bo; 815 int ret, count, i; 816 817 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 818 if (!bo) 819 return ERR_PTR(-ENOMEM); 820 821 bo->size = PAGE_ALIGN(size); 822 823 count = bo->size >> PAGE_SHIFT; 824 825 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); 826 if (!bo->pages) { 827 kfree(bo); 828 return ERR_PTR(-ENOMEM); 829 } 830 831 for (i = 0; i < count; i++) { 832 bo->pages[i] = alloc_page(GFP_KERNEL); 833 if (!bo->pages[i]) 834 goto err; 835 } 836 837 bo->iova = gmu->uncached_iova_base; 838 839 for (i = 0; i < count; i++) { 840 ret = iommu_map(gmu->domain, 841 bo->iova + (PAGE_SIZE * i), 842 page_to_phys(bo->pages[i]), PAGE_SIZE, 843 IOMMU_READ | IOMMU_WRITE); 844 845 if (ret) { 846 dev_err(gmu->dev, "Unable to map GMU buffer object\n"); 847 848 for (i = i - 1 ; i >= 0; i--) 849 iommu_unmap(gmu->domain, 850 bo->iova + (PAGE_SIZE * i), 851 PAGE_SIZE); 852 853 goto err; 854 } 855 } 856 857 bo->virt = vmap(bo->pages, count, VM_IOREMAP, 858 pgprot_writecombine(PAGE_KERNEL)); 859 if (!bo->virt) 860 goto err; 861 862 /* Align future IOVA addresses on 1MB boundaries */ 863 gmu->uncached_iova_base += ALIGN(size, SZ_1M); 864 865 return bo; 866 867 err: 868 for (i = 0; i < count; i++) { 869 if (bo->pages[i]) 870 __free_pages(bo->pages[i], 0); 871 } 872 873 kfree(bo->pages); 874 kfree(bo); 875 876 return ERR_PTR(-ENOMEM); 877 } 878 879 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 880 { 881 int ret; 882 883 /* 884 * The GMU address space is hardcoded to treat the range 885 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared 886 * between the GMU and the CPU will live in this space 887 */ 888 gmu->uncached_iova_base = 0x60000000; 889 890 891 gmu->domain = iommu_domain_alloc(&platform_bus_type); 892 if (!gmu->domain) 893 return -ENODEV; 894 895 ret = iommu_attach_device(gmu->domain, gmu->dev); 896 897 if (ret) { 898 iommu_domain_free(gmu->domain); 899 gmu->domain = NULL; 900 } 901 902 return ret; 903 } 904 905 /* Get the list of RPMh voltage levels from cmd-db */ 906 static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size) 907 { 908 u32 len = cmd_db_read_aux_data_len(id); 909 910 if (!len) 911 return 0; 912 913 if (WARN_ON(len > size)) 914 return -EINVAL; 915 916 cmd_db_read_aux_data(id, vals, len); 917 918 /* 919 * The data comes back as an array of unsigned shorts so adjust the 920 * count accordingly 921 */ 922 return len >> 1; 923 } 924 925 /* Return the 'arc-level' for the given frequency */ 926 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) 927 { 928 struct dev_pm_opp *opp; 929 struct device_node *np; 930 u32 val = 0; 931 932 if (!freq) 933 return 0; 934 935 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 936 if (IS_ERR(opp)) 937 return 0; 938 939 np = dev_pm_opp_get_of_node(opp); 940 941 if (np) { 942 of_property_read_u32(np, "qcom,level", &val); 943 of_node_put(np); 944 } 945 946 dev_pm_opp_put(opp); 947 948 return val; 949 } 950 951 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 952 unsigned long *freqs, int freqs_count, 953 u16 *pri, int pri_count, 954 u16 *sec, int sec_count) 955 { 956 int i, j; 957 958 /* Construct a vote for each frequency */ 959 for (i = 0; i < freqs_count; i++) { 960 u8 pindex = 0, sindex = 0; 961 u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]); 962 963 /* Get the primary index that matches the arc level */ 964 for (j = 0; j < pri_count; j++) { 965 if (pri[j] >= level) { 966 pindex = j; 967 break; 968 } 969 } 970 971 if (j == pri_count) { 972 dev_err(dev, 973 "Level %u not found in in the RPMh list\n", 974 level); 975 dev_err(dev, "Available levels:\n"); 976 for (j = 0; j < pri_count; j++) 977 dev_err(dev, " %u\n", pri[j]); 978 979 return -EINVAL; 980 } 981 982 /* 983 * Look for a level in in the secondary list that matches. If 984 * nothing fits, use the maximum non zero vote 985 */ 986 987 for (j = 0; j < sec_count; j++) { 988 if (sec[j] >= level) { 989 sindex = j; 990 break; 991 } else if (sec[j]) { 992 sindex = j; 993 } 994 } 995 996 /* Construct the vote */ 997 votes[i] = ((pri[pindex] & 0xffff) << 16) | 998 (sindex << 8) | pindex; 999 } 1000 1001 return 0; 1002 } 1003 1004 /* 1005 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 1006 * to construct the list of votes on the CPU and send it over. Query the RPMh 1007 * voltage levels and build the votes 1008 */ 1009 1010 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 1011 { 1012 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1013 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1014 struct msm_gpu *gpu = &adreno_gpu->base; 1015 1016 u16 gx[16], cx[16], mx[16]; 1017 u32 gxcount, cxcount, mxcount; 1018 int ret; 1019 1020 /* Get the list of available voltage levels for each component */ 1021 gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx)); 1022 cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx)); 1023 mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx)); 1024 1025 /* Build the GX votes */ 1026 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 1027 gmu->gpu_freqs, gmu->nr_gpu_freqs, 1028 gx, gxcount, mx, mxcount); 1029 1030 /* Build the CX votes */ 1031 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1032 gmu->gmu_freqs, gmu->nr_gmu_freqs, 1033 cx, cxcount, mx, mxcount); 1034 1035 return ret; 1036 } 1037 1038 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1039 u32 size) 1040 { 1041 int count = dev_pm_opp_get_opp_count(dev); 1042 struct dev_pm_opp *opp; 1043 int i, index = 0; 1044 unsigned long freq = 1; 1045 1046 /* 1047 * The OPP table doesn't contain the "off" frequency level so we need to 1048 * add 1 to the table size to account for it 1049 */ 1050 1051 if (WARN(count + 1 > size, 1052 "The GMU frequency table is being truncated\n")) 1053 count = size - 1; 1054 1055 /* Set the "off" frequency */ 1056 freqs[index++] = 0; 1057 1058 for (i = 0; i < count; i++) { 1059 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1060 if (IS_ERR(opp)) 1061 break; 1062 1063 dev_pm_opp_put(opp); 1064 freqs[index++] = freq++; 1065 } 1066 1067 return index; 1068 } 1069 1070 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1071 { 1072 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1073 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1074 struct msm_gpu *gpu = &adreno_gpu->base; 1075 1076 int ret = 0; 1077 1078 /* 1079 * The GMU handles its own frequency switching so build a list of 1080 * available frequencies to send during initialization 1081 */ 1082 ret = dev_pm_opp_of_add_table(gmu->dev); 1083 if (ret) { 1084 dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1085 return ret; 1086 } 1087 1088 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1089 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1090 1091 /* 1092 * The GMU also handles GPU frequency switching so build a list 1093 * from the GPU OPP table 1094 */ 1095 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1096 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1097 1098 /* Build the list of RPMh votes that we'll send to the GMU */ 1099 return a6xx_gmu_rpmh_votes_init(gmu); 1100 } 1101 1102 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1103 { 1104 int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks); 1105 1106 if (ret < 1) 1107 return ret; 1108 1109 gmu->nr_clocks = ret; 1110 1111 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1112 gmu->nr_clocks, "gmu"); 1113 1114 return 0; 1115 } 1116 1117 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1118 const char *name) 1119 { 1120 void __iomem *ret; 1121 struct resource *res = platform_get_resource_byname(pdev, 1122 IORESOURCE_MEM, name); 1123 1124 if (!res) { 1125 dev_err(&pdev->dev, "Unable to find the %s registers\n", name); 1126 return ERR_PTR(-EINVAL); 1127 } 1128 1129 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1130 if (!ret) { 1131 dev_err(&pdev->dev, "Unable to map the %s registers\n", name); 1132 return ERR_PTR(-EINVAL); 1133 } 1134 1135 return ret; 1136 } 1137 1138 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1139 const char *name, irq_handler_t handler) 1140 { 1141 int irq, ret; 1142 1143 irq = platform_get_irq_byname(pdev, name); 1144 1145 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH, 1146 name, gmu); 1147 if (ret) { 1148 dev_err(&pdev->dev, "Unable to get interrupt %s\n", name); 1149 return ret; 1150 } 1151 1152 disable_irq(irq); 1153 1154 return irq; 1155 } 1156 1157 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1158 { 1159 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1160 1161 if (IS_ERR_OR_NULL(gmu->mmio)) 1162 return; 1163 1164 pm_runtime_disable(gmu->dev); 1165 a6xx_gmu_stop(a6xx_gpu); 1166 1167 a6xx_gmu_irq_disable(gmu); 1168 a6xx_gmu_memory_free(gmu, gmu->hfi); 1169 1170 iommu_detach_device(gmu->domain, gmu->dev); 1171 1172 iommu_domain_free(gmu->domain); 1173 } 1174 1175 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1176 { 1177 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1178 struct platform_device *pdev = of_find_device_by_node(node); 1179 int ret; 1180 1181 if (!pdev) 1182 return -ENODEV; 1183 1184 gmu->dev = &pdev->dev; 1185 1186 of_dma_configure(gmu->dev, node, true); 1187 1188 /* Fow now, don't do anything fancy until we get our feet under us */ 1189 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1190 1191 pm_runtime_enable(gmu->dev); 1192 gmu->gx = devm_regulator_get(gmu->dev, "vdd"); 1193 1194 /* Get the list of clocks */ 1195 ret = a6xx_gmu_clocks_probe(gmu); 1196 if (ret) 1197 return ret; 1198 1199 /* Set up the IOMMU context bank */ 1200 ret = a6xx_gmu_memory_probe(gmu); 1201 if (ret) 1202 return ret; 1203 1204 /* Allocate memory for for the HFI queues */ 1205 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1206 if (IS_ERR(gmu->hfi)) 1207 goto err; 1208 1209 /* Allocate memory for the GMU debug region */ 1210 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1211 if (IS_ERR(gmu->debug)) 1212 goto err; 1213 1214 /* Map the GMU registers */ 1215 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1216 if (IS_ERR(gmu->mmio)) 1217 goto err; 1218 1219 /* Get the HFI and GMU interrupts */ 1220 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1221 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1222 1223 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1224 goto err; 1225 1226 /* Get the power levels for the GMU and GPU */ 1227 a6xx_gmu_pwrlevels_probe(gmu); 1228 1229 /* Set up the HFI queues */ 1230 a6xx_hfi_init(gmu); 1231 1232 return 0; 1233 err: 1234 a6xx_gmu_memory_free(gmu, gmu->hfi); 1235 1236 if (gmu->domain) { 1237 iommu_detach_device(gmu->domain, gmu->dev); 1238 1239 iommu_domain_free(gmu->domain); 1240 } 1241 1242 return -ENODEV; 1243 } 1244