1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/clk.h> 5 #include <linux/pm_opp.h> 6 #include <soc/qcom/cmd-db.h> 7 8 #include "a6xx_gpu.h" 9 #include "a6xx_gmu.xml.h" 10 11 static irqreturn_t a6xx_gmu_irq(int irq, void *data) 12 { 13 struct a6xx_gmu *gmu = data; 14 u32 status; 15 16 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 17 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 18 19 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { 20 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 21 22 /* Temporary until we can recover safely */ 23 BUG(); 24 } 25 26 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) 27 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); 28 29 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 30 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", 31 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); 32 33 return IRQ_HANDLED; 34 } 35 36 static irqreturn_t a6xx_hfi_irq(int irq, void *data) 37 { 38 struct a6xx_gmu *gmu = data; 39 u32 status; 40 41 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); 42 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); 43 44 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { 45 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); 46 47 /* Temporary until we can recover safely */ 48 BUG(); 49 } 50 51 return IRQ_HANDLED; 52 } 53 54 /* Check to see if the GX rail is still powered */ 55 static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) 56 { 57 u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); 58 59 return !(val & 60 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | 61 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); 62 } 63 64 static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) 65 { 66 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); 67 68 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, 69 ((3 & 0xf) << 28) | index); 70 71 /* 72 * Send an invalid index as a vote for the bus bandwidth and let the 73 * firmware decide on the right vote 74 */ 75 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); 76 77 /* Set and clear the OOB for DCVS to trigger the GMU */ 78 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); 79 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); 80 81 return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); 82 } 83 84 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) 85 { 86 u32 val; 87 int local = gmu->idle_level; 88 89 /* SPTP and IFPC both report as IFPC */ 90 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) 91 local = GMU_IDLE_STATE_IFPC; 92 93 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 94 95 if (val == local) { 96 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || 97 !a6xx_gmu_gx_is_on(gmu)) 98 return true; 99 } 100 101 return false; 102 } 103 104 /* Wait for the GMU to get to its most idle state */ 105 int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu) 106 { 107 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 108 109 return spin_until(a6xx_gmu_check_idle_level(gmu)); 110 } 111 112 static int a6xx_gmu_start(struct a6xx_gmu *gmu) 113 { 114 int ret; 115 u32 val; 116 117 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); 118 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); 119 120 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, 121 val == 0xbabeface, 100, 10000); 122 123 if (ret) 124 dev_err(gmu->dev, "GMU firmware initialization timed out\n"); 125 126 return ret; 127 } 128 129 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) 130 { 131 u32 val; 132 int ret; 133 134 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); 135 136 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, 137 val & 1, 100, 10000); 138 if (ret) 139 dev_err(gmu->dev, "Unable to start the HFI queues\n"); 140 141 return ret; 142 } 143 144 /* Trigger a OOB (out of band) request to the GMU */ 145 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 146 { 147 int ret; 148 u32 val; 149 int request, ack; 150 const char *name; 151 152 switch (state) { 153 case GMU_OOB_GPU_SET: 154 request = GMU_OOB_GPU_SET_REQUEST; 155 ack = GMU_OOB_GPU_SET_ACK; 156 name = "GPU_SET"; 157 break; 158 case GMU_OOB_BOOT_SLUMBER: 159 request = GMU_OOB_BOOT_SLUMBER_REQUEST; 160 ack = GMU_OOB_BOOT_SLUMBER_ACK; 161 name = "BOOT_SLUMBER"; 162 break; 163 case GMU_OOB_DCVS_SET: 164 request = GMU_OOB_DCVS_REQUEST; 165 ack = GMU_OOB_DCVS_ACK; 166 name = "GPU_DCVS"; 167 break; 168 default: 169 return -EINVAL; 170 } 171 172 /* Trigger the equested OOB operation */ 173 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); 174 175 /* Wait for the acknowledge interrupt */ 176 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 177 val & (1 << ack), 100, 10000); 178 179 if (ret) 180 dev_err(gmu->dev, 181 "Timeout waiting for GMU OOB set %s: 0x%x\n", 182 name, 183 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); 184 185 /* Clear the acknowledge interrupt */ 186 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); 187 188 return ret; 189 } 190 191 /* Clear a pending OOB state in the GMU */ 192 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) 193 { 194 switch (state) { 195 case GMU_OOB_GPU_SET: 196 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 197 1 << GMU_OOB_GPU_SET_CLEAR); 198 break; 199 case GMU_OOB_BOOT_SLUMBER: 200 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 201 1 << GMU_OOB_BOOT_SLUMBER_CLEAR); 202 break; 203 case GMU_OOB_DCVS_SET: 204 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 205 1 << GMU_OOB_DCVS_CLEAR); 206 break; 207 } 208 } 209 210 /* Enable CPU control of SPTP power power collapse */ 211 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) 212 { 213 int ret; 214 u32 val; 215 216 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); 217 218 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 219 (val & 0x38) == 0x28, 1, 100); 220 221 if (ret) { 222 dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", 223 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 224 } 225 226 return 0; 227 } 228 229 /* Disable CPU control of SPTP power power collapse */ 230 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) 231 { 232 u32 val; 233 int ret; 234 235 /* Make sure retention is on */ 236 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); 237 238 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); 239 240 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, 241 (val & 0x04), 100, 10000); 242 243 if (ret) 244 dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", 245 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); 246 } 247 248 /* Let the GMU know we are starting a boot sequence */ 249 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) 250 { 251 u32 vote; 252 253 /* Let the GMU know we are getting ready for boot */ 254 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); 255 256 /* Choose the "default" power level as the highest available */ 257 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; 258 259 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); 260 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); 261 262 /* Let the GMU know the boot sequence has started */ 263 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 264 } 265 266 /* Let the GMU know that we are about to go into slumber */ 267 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) 268 { 269 int ret; 270 271 /* Disable the power counter so the GMU isn't busy */ 272 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); 273 274 /* Disable SPTP_PC if the CPU is responsible for it */ 275 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) 276 a6xx_sptprac_disable(gmu); 277 278 /* Tell the GMU to get ready to slumber */ 279 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); 280 281 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); 282 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 283 284 if (!ret) { 285 /* Check to see if the GMU really did slumber */ 286 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) 287 != 0x0f) { 288 dev_err(gmu->dev, "The GMU did not go into slumber\n"); 289 ret = -ETIMEDOUT; 290 } 291 } 292 293 /* Put fence into allow mode */ 294 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); 295 return ret; 296 } 297 298 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) 299 { 300 int ret; 301 u32 val; 302 303 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); 304 /* Wait for the register to finish posting */ 305 wmb(); 306 307 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, 308 val & (1 << 1), 100, 10000); 309 if (ret) { 310 dev_err(gmu->dev, "Unable to power on the GPU RSC\n"); 311 return ret; 312 } 313 314 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, 315 !val, 100, 10000); 316 317 if (!ret) { 318 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 319 320 /* Re-enable the power counter */ 321 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1); 322 return 0; 323 } 324 325 dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); 326 return ret; 327 } 328 329 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) 330 { 331 int ret; 332 u32 val; 333 334 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); 335 336 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, 337 val, val & (1 << 16), 100, 10000); 338 if (ret) 339 dev_err(gmu->dev, "Unable to power off the GPU RSC\n"); 340 341 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); 342 } 343 344 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) 345 { 346 return msm_writel(value, ptr + (offset << 2)); 347 } 348 349 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 350 const char *name); 351 352 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) 353 { 354 struct platform_device *pdev = to_platform_device(gmu->dev); 355 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); 356 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); 357 358 if (!pdcptr || !seqptr) 359 goto err; 360 361 /* Disable SDE clock gating */ 362 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); 363 364 /* Setup RSC PDC handshake for sleep and wakeup */ 365 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); 366 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); 367 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); 368 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); 369 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); 370 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); 371 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); 372 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); 373 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); 374 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); 375 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); 376 377 /* Load RSC sequencer uCode for sleep and wakeup */ 378 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); 379 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); 380 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); 381 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); 382 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); 383 384 /* Load PDC sequencer uCode for power up and power down sequence */ 385 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); 386 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2); 387 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0); 388 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284); 389 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc); 390 391 /* Set TCS commands used by PDC sequence for low power modes */ 392 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7); 393 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0); 394 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0); 395 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108); 396 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010); 397 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1); 398 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108); 399 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000); 400 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); 401 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); 402 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); 403 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); 404 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); 405 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0); 406 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0); 407 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108); 408 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010); 409 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2); 410 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); 411 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); 412 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); 413 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); 414 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); 415 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); 416 417 /* Setup GPU PDC */ 418 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0); 419 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001); 420 421 /* ensure no writes happen before the uCode is fully written */ 422 wmb(); 423 424 err: 425 devm_iounmap(gmu->dev, pdcptr); 426 devm_iounmap(gmu->dev, seqptr); 427 } 428 429 /* 430 * The lowest 16 bits of this value are the number of XO clock cycles for main 431 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are 432 * for the shorter hysteresis that happens after main - this is 0xa (.5 us) 433 */ 434 435 #define GMU_PWR_COL_HYST 0x000a1680 436 437 /* Set up the idle state for the GMU */ 438 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) 439 { 440 /* Disable GMU WB/RB buffer */ 441 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); 442 443 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); 444 445 switch (gmu->idle_level) { 446 case GMU_IDLE_STATE_IFPC: 447 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, 448 GMU_PWR_COL_HYST); 449 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 450 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 451 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); 452 /* Fall through */ 453 case GMU_IDLE_STATE_SPTP: 454 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, 455 GMU_PWR_COL_HYST); 456 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, 457 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | 458 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); 459 } 460 461 /* Enable RPMh GPU client */ 462 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, 463 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | 464 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | 465 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | 466 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | 467 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | 468 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); 469 } 470 471 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) 472 { 473 static bool rpmh_init; 474 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 475 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 476 int i, ret; 477 u32 chipid; 478 u32 *image; 479 480 if (state == GMU_WARM_BOOT) { 481 ret = a6xx_rpmh_start(gmu); 482 if (ret) 483 return ret; 484 } else { 485 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], 486 "GMU firmware is not loaded\n")) 487 return -ENOENT; 488 489 /* Sanity check the size of the firmware that was loaded */ 490 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { 491 dev_err(gmu->dev, 492 "GMU firmware is bigger than the available region\n"); 493 return -EINVAL; 494 } 495 496 /* Turn on register retention */ 497 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); 498 499 /* We only need to load the RPMh microcode once */ 500 if (!rpmh_init) { 501 a6xx_gmu_rpmh_init(gmu); 502 rpmh_init = true; 503 } else if (state != GMU_RESET) { 504 ret = a6xx_rpmh_start(gmu); 505 if (ret) 506 return ret; 507 } 508 509 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; 510 511 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) 512 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, 513 image[i]); 514 } 515 516 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); 517 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); 518 519 /* Write the iova of the HFI table */ 520 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova); 521 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); 522 523 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, 524 (1 << 31) | (0xa << 18) | (0xa0)); 525 526 chipid = adreno_gpu->rev.core << 24; 527 chipid |= adreno_gpu->rev.major << 16; 528 chipid |= adreno_gpu->rev.minor << 12; 529 chipid |= adreno_gpu->rev.patchid << 8; 530 531 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); 532 533 /* Set up the lowest idle level on the GMU */ 534 a6xx_gmu_power_config(gmu); 535 536 ret = a6xx_gmu_start(gmu); 537 if (ret) 538 return ret; 539 540 ret = a6xx_gmu_gfx_rail_on(gmu); 541 if (ret) 542 return ret; 543 544 /* Enable SPTP_PC if the CPU is responsible for it */ 545 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { 546 ret = a6xx_sptprac_enable(gmu); 547 if (ret) 548 return ret; 549 } 550 551 ret = a6xx_gmu_hfi_start(gmu); 552 if (ret) 553 return ret; 554 555 /* FIXME: Do we need this wmb() here? */ 556 wmb(); 557 558 return 0; 559 } 560 561 #define A6XX_HFI_IRQ_MASK \ 562 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) 563 564 #define A6XX_GMU_IRQ_MASK \ 565 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ 566 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ 567 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) 568 569 static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu) 570 { 571 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); 572 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); 573 574 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, 575 ~A6XX_GMU_IRQ_MASK); 576 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, 577 ~A6XX_HFI_IRQ_MASK); 578 579 enable_irq(gmu->gmu_irq); 580 enable_irq(gmu->hfi_irq); 581 } 582 583 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) 584 { 585 disable_irq(gmu->gmu_irq); 586 disable_irq(gmu->hfi_irq); 587 588 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); 589 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); 590 } 591 592 int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu) 593 { 594 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 595 int ret; 596 u32 val; 597 598 /* Flush all the queues */ 599 a6xx_hfi_stop(gmu); 600 601 /* Stop the interrupts */ 602 a6xx_gmu_irq_disable(gmu); 603 604 /* Force off SPTP in case the GMU is managing it */ 605 a6xx_sptprac_disable(gmu); 606 607 /* Make sure there are no outstanding RPMh votes */ 608 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, 609 (val & 1), 100, 10000); 610 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, 611 (val & 1), 100, 10000); 612 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, 613 (val & 1), 100, 10000); 614 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, 615 (val & 1), 100, 1000); 616 617 /* Force off the GX GSDC */ 618 regulator_force_disable(gmu->gx); 619 620 /* Disable the resources */ 621 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 622 pm_runtime_put_sync(gmu->dev); 623 624 /* Re-enable the resources */ 625 pm_runtime_get_sync(gmu->dev); 626 627 /* Use a known rate to bring up the GMU */ 628 clk_set_rate(gmu->core_clk, 200000000); 629 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 630 if (ret) 631 goto out; 632 633 a6xx_gmu_irq_enable(gmu); 634 635 ret = a6xx_gmu_fw_start(gmu, GMU_RESET); 636 if (!ret) 637 ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT); 638 639 /* Set the GPU back to the highest power frequency */ 640 a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 641 642 out: 643 if (ret) 644 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 645 646 return ret; 647 } 648 649 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) 650 { 651 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 652 int status, ret; 653 654 if (WARN(!gmu->mmio, "The GMU is not set up yet\n")) 655 return 0; 656 657 /* Turn on the resources */ 658 pm_runtime_get_sync(gmu->dev); 659 660 /* Use a known rate to bring up the GMU */ 661 clk_set_rate(gmu->core_clk, 200000000); 662 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); 663 if (ret) 664 goto out; 665 666 a6xx_gmu_irq_enable(gmu); 667 668 /* Check to see if we are doing a cold or warm boot */ 669 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? 670 GMU_WARM_BOOT : GMU_COLD_BOOT; 671 672 ret = a6xx_gmu_fw_start(gmu, status); 673 if (ret) 674 goto out; 675 676 ret = a6xx_hfi_start(gmu, status); 677 678 /* Set the GPU to the highest power frequency */ 679 a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1); 680 681 out: 682 /* Make sure to turn off the boot OOB request on error */ 683 if (ret) 684 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); 685 686 return ret; 687 } 688 689 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) 690 { 691 u32 reg; 692 693 if (!gmu->mmio) 694 return true; 695 696 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); 697 698 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) 699 return false; 700 701 return true; 702 } 703 704 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) 705 { 706 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 707 u32 val; 708 709 /* 710 * The GMU may still be in slumber unless the GPU started so check and 711 * skip putting it back into slumber if so 712 */ 713 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); 714 715 if (val != 0xf) { 716 int ret = a6xx_gmu_wait_for_idle(a6xx_gpu); 717 718 /* Temporary until we can recover safely */ 719 BUG_ON(ret); 720 721 /* tell the GMU we want to slumber */ 722 a6xx_gmu_notify_slumber(gmu); 723 724 ret = gmu_poll_timeout(gmu, 725 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, 726 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), 727 100, 10000); 728 729 /* 730 * Let the user know we failed to slumber but don't worry too 731 * much because we are powering down anyway 732 */ 733 734 if (ret) 735 dev_err(gmu->dev, 736 "Unable to slumber GMU: status = 0%x/0%x\n", 737 gmu_read(gmu, 738 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), 739 gmu_read(gmu, 740 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); 741 } 742 743 /* Turn off HFI */ 744 a6xx_hfi_stop(gmu); 745 746 /* Stop the interrupts and mask the hardware */ 747 a6xx_gmu_irq_disable(gmu); 748 749 /* Tell RPMh to power off the GPU */ 750 a6xx_rpmh_stop(gmu); 751 752 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); 753 754 pm_runtime_put_sync(gmu->dev); 755 756 return 0; 757 } 758 759 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) 760 { 761 int count, i; 762 u64 iova; 763 764 if (IS_ERR_OR_NULL(bo)) 765 return; 766 767 count = bo->size >> PAGE_SHIFT; 768 iova = bo->iova; 769 770 for (i = 0; i < count; i++, iova += PAGE_SIZE) { 771 iommu_unmap(gmu->domain, iova, PAGE_SIZE); 772 __free_pages(bo->pages[i], 0); 773 } 774 775 kfree(bo->pages); 776 kfree(bo); 777 } 778 779 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, 780 size_t size) 781 { 782 struct a6xx_gmu_bo *bo; 783 int ret, count, i; 784 785 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 786 if (!bo) 787 return ERR_PTR(-ENOMEM); 788 789 bo->size = PAGE_ALIGN(size); 790 791 count = bo->size >> PAGE_SHIFT; 792 793 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL); 794 if (!bo->pages) { 795 kfree(bo); 796 return ERR_PTR(-ENOMEM); 797 } 798 799 for (i = 0; i < count; i++) { 800 bo->pages[i] = alloc_page(GFP_KERNEL); 801 if (!bo->pages[i]) 802 goto err; 803 } 804 805 bo->iova = gmu->uncached_iova_base; 806 807 for (i = 0; i < count; i++) { 808 ret = iommu_map(gmu->domain, 809 bo->iova + (PAGE_SIZE * i), 810 page_to_phys(bo->pages[i]), PAGE_SIZE, 811 IOMMU_READ | IOMMU_WRITE); 812 813 if (ret) { 814 dev_err(gmu->dev, "Unable to map GMU buffer object\n"); 815 816 for (i = i - 1 ; i >= 0; i--) 817 iommu_unmap(gmu->domain, 818 bo->iova + (PAGE_SIZE * i), 819 PAGE_SIZE); 820 821 goto err; 822 } 823 } 824 825 bo->virt = vmap(bo->pages, count, VM_IOREMAP, 826 pgprot_writecombine(PAGE_KERNEL)); 827 if (!bo->virt) 828 goto err; 829 830 /* Align future IOVA addresses on 1MB boundaries */ 831 gmu->uncached_iova_base += ALIGN(size, SZ_1M); 832 833 return bo; 834 835 err: 836 for (i = 0; i < count; i++) { 837 if (bo->pages[i]) 838 __free_pages(bo->pages[i], 0); 839 } 840 841 kfree(bo->pages); 842 kfree(bo); 843 844 return ERR_PTR(-ENOMEM); 845 } 846 847 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) 848 { 849 int ret; 850 851 /* 852 * The GMU address space is hardcoded to treat the range 853 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared 854 * between the GMU and the CPU will live in this space 855 */ 856 gmu->uncached_iova_base = 0x60000000; 857 858 859 gmu->domain = iommu_domain_alloc(&platform_bus_type); 860 if (!gmu->domain) 861 return -ENODEV; 862 863 ret = iommu_attach_device(gmu->domain, gmu->dev); 864 865 if (ret) { 866 iommu_domain_free(gmu->domain); 867 gmu->domain = NULL; 868 } 869 870 return ret; 871 } 872 873 /* Get the list of RPMh voltage levels from cmd-db */ 874 static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size) 875 { 876 u32 len = cmd_db_read_aux_data_len(id); 877 878 if (!len) 879 return 0; 880 881 if (WARN_ON(len > size)) 882 return -EINVAL; 883 884 cmd_db_read_aux_data(id, vals, len); 885 886 /* 887 * The data comes back as an array of unsigned shorts so adjust the 888 * count accordingly 889 */ 890 return len >> 1; 891 } 892 893 /* Return the 'arc-level' for the given frequency */ 894 static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) 895 { 896 struct dev_pm_opp *opp; 897 struct device_node *np; 898 u32 val = 0; 899 900 if (!freq) 901 return 0; 902 903 opp = dev_pm_opp_find_freq_exact(dev, freq, true); 904 if (IS_ERR(opp)) 905 return 0; 906 907 np = dev_pm_opp_get_of_node(opp); 908 909 if (np) { 910 of_property_read_u32(np, "qcom,level", &val); 911 of_node_put(np); 912 } 913 914 dev_pm_opp_put(opp); 915 916 return val; 917 } 918 919 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, 920 unsigned long *freqs, int freqs_count, 921 u16 *pri, int pri_count, 922 u16 *sec, int sec_count) 923 { 924 int i, j; 925 926 /* Construct a vote for each frequency */ 927 for (i = 0; i < freqs_count; i++) { 928 u8 pindex = 0, sindex = 0; 929 u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]); 930 931 /* Get the primary index that matches the arc level */ 932 for (j = 0; j < pri_count; j++) { 933 if (pri[j] >= level) { 934 pindex = j; 935 break; 936 } 937 } 938 939 if (j == pri_count) { 940 dev_err(dev, 941 "Level %u not found in in the RPMh list\n", 942 level); 943 dev_err(dev, "Available levels:\n"); 944 for (j = 0; j < pri_count; j++) 945 dev_err(dev, " %u\n", pri[j]); 946 947 return -EINVAL; 948 } 949 950 /* 951 * Look for a level in in the secondary list that matches. If 952 * nothing fits, use the maximum non zero vote 953 */ 954 955 for (j = 0; j < sec_count; j++) { 956 if (sec[j] >= level) { 957 sindex = j; 958 break; 959 } else if (sec[j]) { 960 sindex = j; 961 } 962 } 963 964 /* Construct the vote */ 965 votes[i] = ((pri[pindex] & 0xffff) << 16) | 966 (sindex << 8) | pindex; 967 } 968 969 return 0; 970 } 971 972 /* 973 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need 974 * to construct the list of votes on the CPU and send it over. Query the RPMh 975 * voltage levels and build the votes 976 */ 977 978 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) 979 { 980 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 981 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 982 struct msm_gpu *gpu = &adreno_gpu->base; 983 984 u16 gx[16], cx[16], mx[16]; 985 u32 gxcount, cxcount, mxcount; 986 int ret; 987 988 /* Get the list of available voltage levels for each component */ 989 gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx)); 990 cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx)); 991 mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx)); 992 993 /* Build the GX votes */ 994 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, 995 gmu->gpu_freqs, gmu->nr_gpu_freqs, 996 gx, gxcount, mx, mxcount); 997 998 /* Build the CX votes */ 999 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, 1000 gmu->gmu_freqs, gmu->nr_gmu_freqs, 1001 cx, cxcount, mx, mxcount); 1002 1003 return ret; 1004 } 1005 1006 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, 1007 u32 size) 1008 { 1009 int count = dev_pm_opp_get_opp_count(dev); 1010 struct dev_pm_opp *opp; 1011 int i, index = 0; 1012 unsigned long freq = 1; 1013 1014 /* 1015 * The OPP table doesn't contain the "off" frequency level so we need to 1016 * add 1 to the table size to account for it 1017 */ 1018 1019 if (WARN(count + 1 > size, 1020 "The GMU frequency table is being truncated\n")) 1021 count = size - 1; 1022 1023 /* Set the "off" frequency */ 1024 freqs[index++] = 0; 1025 1026 for (i = 0; i < count; i++) { 1027 opp = dev_pm_opp_find_freq_ceil(dev, &freq); 1028 if (IS_ERR(opp)) 1029 break; 1030 1031 dev_pm_opp_put(opp); 1032 freqs[index++] = freq++; 1033 } 1034 1035 return index; 1036 } 1037 1038 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) 1039 { 1040 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 1041 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 1042 struct msm_gpu *gpu = &adreno_gpu->base; 1043 1044 int ret = 0; 1045 1046 /* 1047 * The GMU handles its own frequency switching so build a list of 1048 * available frequencies to send during initialization 1049 */ 1050 ret = dev_pm_opp_of_add_table(gmu->dev); 1051 if (ret) { 1052 dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n"); 1053 return ret; 1054 } 1055 1056 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, 1057 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); 1058 1059 /* 1060 * The GMU also handles GPU frequency switching so build a list 1061 * from the GPU OPP table 1062 */ 1063 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, 1064 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); 1065 1066 /* Build the list of RPMh votes that we'll send to the GMU */ 1067 return a6xx_gmu_rpmh_votes_init(gmu); 1068 } 1069 1070 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) 1071 { 1072 int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks); 1073 1074 if (ret < 1) 1075 return ret; 1076 1077 gmu->nr_clocks = ret; 1078 1079 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, 1080 gmu->nr_clocks, "gmu"); 1081 1082 return 0; 1083 } 1084 1085 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, 1086 const char *name) 1087 { 1088 void __iomem *ret; 1089 struct resource *res = platform_get_resource_byname(pdev, 1090 IORESOURCE_MEM, name); 1091 1092 if (!res) { 1093 dev_err(&pdev->dev, "Unable to find the %s registers\n", name); 1094 return ERR_PTR(-EINVAL); 1095 } 1096 1097 ret = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1098 if (!ret) { 1099 dev_err(&pdev->dev, "Unable to map the %s registers\n", name); 1100 return ERR_PTR(-EINVAL); 1101 } 1102 1103 return ret; 1104 } 1105 1106 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, 1107 const char *name, irq_handler_t handler) 1108 { 1109 int irq, ret; 1110 1111 irq = platform_get_irq_byname(pdev, name); 1112 1113 ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH, 1114 name, gmu); 1115 if (ret) { 1116 dev_err(&pdev->dev, "Unable to get interrupt %s\n", name); 1117 return ret; 1118 } 1119 1120 disable_irq(irq); 1121 1122 return irq; 1123 } 1124 1125 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) 1126 { 1127 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1128 1129 if (IS_ERR_OR_NULL(gmu->mmio)) 1130 return; 1131 1132 pm_runtime_disable(gmu->dev); 1133 a6xx_gmu_stop(a6xx_gpu); 1134 1135 a6xx_gmu_irq_disable(gmu); 1136 a6xx_gmu_memory_free(gmu, gmu->hfi); 1137 1138 iommu_detach_device(gmu->domain, gmu->dev); 1139 1140 iommu_domain_free(gmu->domain); 1141 } 1142 1143 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) 1144 { 1145 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; 1146 struct platform_device *pdev = of_find_device_by_node(node); 1147 int ret; 1148 1149 if (!pdev) 1150 return -ENODEV; 1151 1152 gmu->dev = &pdev->dev; 1153 1154 of_dma_configure(gmu->dev, node, true); 1155 1156 /* Fow now, don't do anything fancy until we get our feet under us */ 1157 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; 1158 1159 pm_runtime_enable(gmu->dev); 1160 gmu->gx = devm_regulator_get(gmu->dev, "vdd"); 1161 1162 /* Get the list of clocks */ 1163 ret = a6xx_gmu_clocks_probe(gmu); 1164 if (ret) 1165 return ret; 1166 1167 /* Set up the IOMMU context bank */ 1168 ret = a6xx_gmu_memory_probe(gmu); 1169 if (ret) 1170 return ret; 1171 1172 /* Allocate memory for for the HFI queues */ 1173 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1174 if (IS_ERR(gmu->hfi)) 1175 goto err; 1176 1177 /* Allocate memory for the GMU debug region */ 1178 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); 1179 if (IS_ERR(gmu->debug)) 1180 goto err; 1181 1182 /* Map the GMU registers */ 1183 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); 1184 if (IS_ERR(gmu->mmio)) 1185 goto err; 1186 1187 /* Get the HFI and GMU interrupts */ 1188 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); 1189 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); 1190 1191 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) 1192 goto err; 1193 1194 /* Get the power levels for the GMU and GPU */ 1195 a6xx_gmu_pwrlevels_probe(gmu); 1196 1197 /* Set up the HFI queues */ 1198 a6xx_hfi_init(gmu); 1199 1200 return 0; 1201 err: 1202 a6xx_gmu_memory_free(gmu, gmu->hfi); 1203 1204 if (gmu->domain) { 1205 iommu_detach_device(gmu->domain, gmu->dev); 1206 1207 iommu_domain_free(gmu->domain); 1208 } 1209 1210 return -ENODEV; 1211 } 1212