Lines Matching refs:gmu

19 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)  in a6xx_gmu_fault()  argument
21 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fault()
26 gmu->hung = true; in a6xx_gmu_fault()
37 struct a6xx_gmu *gmu = data; in a6xx_gmu_irq() local
40 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); in a6xx_gmu_irq()
41 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); in a6xx_gmu_irq()
44 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); in a6xx_gmu_irq()
46 a6xx_gmu_fault(gmu); in a6xx_gmu_irq()
50 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n"); in a6xx_gmu_irq()
53 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n", in a6xx_gmu_irq()
54 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); in a6xx_gmu_irq()
61 struct a6xx_gmu *gmu = data; in a6xx_hfi_irq() local
64 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); in a6xx_hfi_irq()
65 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status); in a6xx_hfi_irq()
68 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n"); in a6xx_hfi_irq()
70 a6xx_gmu_fault(gmu); in a6xx_hfi_irq()
76 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_sptprac_is_on() argument
81 if (!gmu->initialized) in a6xx_gmu_sptprac_is_on()
84 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_sptprac_is_on()
92 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) in a6xx_gmu_gx_is_on() argument
97 if (!gmu->initialized) in a6xx_gmu_gx_is_on()
100 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); in a6xx_gmu_gx_is_on()
112 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_set_freq() local
119 if (gpu_freq == gmu->freq) in a6xx_gmu_set_freq()
122 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) in a6xx_gmu_set_freq()
123 if (gpu_freq == gmu->gpu_freqs[perf_index]) in a6xx_gmu_set_freq()
126 gmu->current_perf_index = perf_index; in a6xx_gmu_set_freq()
127 gmu->freq = gmu->gpu_freqs[perf_index]; in a6xx_gmu_set_freq()
129 trace_msm_gmu_freq_change(gmu->freq, perf_index); in a6xx_gmu_set_freq()
140 if (!gmu->legacy) { in a6xx_gmu_set_freq()
141 a6xx_hfi_set_freq(gmu, perf_index); in a6xx_gmu_set_freq()
146 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); in a6xx_gmu_set_freq()
148 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, in a6xx_gmu_set_freq()
155 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff); in a6xx_gmu_set_freq()
158 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
159 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET); in a6xx_gmu_set_freq()
161 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); in a6xx_gmu_set_freq()
163 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); in a6xx_gmu_set_freq()
172 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_get_freq() local
174 return gmu->freq; in a6xx_gmu_get_freq()
177 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) in a6xx_gmu_check_idle_level() argument
180 int local = gmu->idle_level; in a6xx_gmu_check_idle_level()
183 if (gmu->idle_level == GMU_IDLE_STATE_SPTP) in a6xx_gmu_check_idle_level()
186 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); in a6xx_gmu_check_idle_level()
189 if (gmu->idle_level != GMU_IDLE_STATE_IFPC || in a6xx_gmu_check_idle_level()
190 !a6xx_gmu_gx_is_on(gmu)) in a6xx_gmu_check_idle_level()
198 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) in a6xx_gmu_wait_for_idle() argument
200 return spin_until(a6xx_gmu_check_idle_level(gmu)); in a6xx_gmu_wait_for_idle()
203 static int a6xx_gmu_start(struct a6xx_gmu *gmu) in a6xx_gmu_start() argument
209 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); in a6xx_gmu_start()
218 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); in a6xx_gmu_start()
223 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); in a6xx_gmu_start()
225 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); in a6xx_gmu_start()
227 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, in a6xx_gmu_start()
231 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); in a6xx_gmu_start()
236 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) in a6xx_gmu_hfi_start() argument
241 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1); in a6xx_gmu_hfi_start()
243 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, in a6xx_gmu_hfi_start()
246 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n"); in a6xx_gmu_hfi_start()
296 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_set_oob() argument
302 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_set_oob()
307 if (gmu->legacy) { in a6xx_gmu_set_oob()
314 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
322 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); in a6xx_gmu_set_oob()
325 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, in a6xx_gmu_set_oob()
329 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_set_oob()
332 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); in a6xx_gmu_set_oob()
335 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack); in a6xx_gmu_set_oob()
341 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) in a6xx_gmu_clear_oob() argument
345 WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); in a6xx_gmu_clear_oob()
350 if (gmu->legacy) in a6xx_gmu_clear_oob()
355 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); in a6xx_gmu_clear_oob()
359 int a6xx_sptprac_enable(struct a6xx_gmu *gmu) in a6xx_sptprac_enable() argument
364 if (!gmu->legacy) in a6xx_sptprac_enable()
367 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); in a6xx_sptprac_enable()
369 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_enable()
373 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n", in a6xx_sptprac_enable()
374 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_enable()
381 void a6xx_sptprac_disable(struct a6xx_gmu *gmu) in a6xx_sptprac_disable() argument
386 if (!gmu->legacy) in a6xx_sptprac_disable()
390 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); in a6xx_sptprac_disable()
392 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001); in a6xx_sptprac_disable()
394 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, in a6xx_sptprac_disable()
398 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n", in a6xx_sptprac_disable()
399 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); in a6xx_sptprac_disable()
403 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) in a6xx_gmu_gfx_rail_on() argument
408 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0); in a6xx_gmu_gfx_rail_on()
411 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; in a6xx_gmu_gfx_rail_on()
413 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff); in a6xx_gmu_gfx_rail_on()
414 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff); in a6xx_gmu_gfx_rail_on()
417 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_gfx_rail_on()
421 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) in a6xx_gmu_notify_slumber() argument
426 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); in a6xx_gmu_notify_slumber()
429 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) in a6xx_gmu_notify_slumber()
430 a6xx_sptprac_disable(gmu); in a6xx_gmu_notify_slumber()
432 if (!gmu->legacy) { in a6xx_gmu_notify_slumber()
433 ret = a6xx_hfi_send_prep_slumber(gmu); in a6xx_gmu_notify_slumber()
438 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); in a6xx_gmu_notify_slumber()
440 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
441 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER); in a6xx_gmu_notify_slumber()
445 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) in a6xx_gmu_notify_slumber()
447 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n"); in a6xx_gmu_notify_slumber()
454 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_gmu_notify_slumber()
458 static int a6xx_rpmh_start(struct a6xx_gmu *gmu) in a6xx_rpmh_start() argument
463 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1); in a6xx_rpmh_start()
467 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, in a6xx_rpmh_start()
470 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n"); in a6xx_rpmh_start()
474 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, in a6xx_rpmh_start()
478 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n"); in a6xx_rpmh_start()
482 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_start()
487 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) in a6xx_rpmh_stop() argument
492 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); in a6xx_rpmh_stop()
494 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, in a6xx_rpmh_stop()
497 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); in a6xx_rpmh_stop()
499 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0); in a6xx_rpmh_stop()
510 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_init() argument
512 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_init()
514 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_rpmh_init()
539 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); in a6xx_gmu_rpmh_init()
542 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); in a6xx_gmu_rpmh_init()
543 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); in a6xx_gmu_rpmh_init()
544 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); in a6xx_gmu_rpmh_init()
545 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); in a6xx_gmu_rpmh_init()
546 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); in a6xx_gmu_rpmh_init()
547 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); in a6xx_gmu_rpmh_init()
548 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); in a6xx_gmu_rpmh_init()
549 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); in a6xx_gmu_rpmh_init()
550 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); in a6xx_gmu_rpmh_init()
551 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); in a6xx_gmu_rpmh_init()
552 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); in a6xx_gmu_rpmh_init()
556 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); in a6xx_gmu_rpmh_init()
557 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); in a6xx_gmu_rpmh_init()
558 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); in a6xx_gmu_rpmh_init()
559 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); in a6xx_gmu_rpmh_init()
560 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); in a6xx_gmu_rpmh_init()
562 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); in a6xx_gmu_rpmh_init()
563 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); in a6xx_gmu_rpmh_init()
564 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); in a6xx_gmu_rpmh_init()
565 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); in a6xx_gmu_rpmh_init()
566 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); in a6xx_gmu_rpmh_init()
620 a6xx_rpmh_stop(gmu); in a6xx_gmu_rpmh_init()
638 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) in a6xx_gmu_power_config() argument
641 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); in a6xx_gmu_power_config()
642 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
643 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); in a6xx_gmu_power_config()
645 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); in a6xx_gmu_power_config()
647 switch (gmu->idle_level) { in a6xx_gmu_power_config()
649 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, in a6xx_gmu_power_config()
651 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
656 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, in a6xx_gmu_power_config()
658 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0, in a6xx_gmu_power_config()
664 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0, in a6xx_gmu_power_config()
690 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) in a6xx_gmu_fw_load() argument
692 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_load()
704 if (gmu->legacy) { in a6xx_gmu_fw_load()
707 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
712 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, in a6xx_gmu_fw_load()
726 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
731 gmu_write_bulk(gmu, in a6xx_gmu_fw_load()
734 } else if (!fw_block_mem(&gmu->icache, blk) && in a6xx_gmu_fw_load()
735 !fw_block_mem(&gmu->dcache, blk) && in a6xx_gmu_fw_load()
736 !fw_block_mem(&gmu->dummy, blk)) { in a6xx_gmu_fw_load()
737 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_fw_load()
746 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) in a6xx_gmu_fw_start() argument
748 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_fw_start()
754 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); in a6xx_gmu_fw_start()
755 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); in a6xx_gmu_fw_start()
759 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
768 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); in a6xx_gmu_fw_start()
770 ret = a6xx_rpmh_start(gmu); in a6xx_gmu_fw_start()
774 ret = a6xx_gmu_fw_load(gmu); in a6xx_gmu_fw_start()
779 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); in a6xx_gmu_fw_start()
780 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); in a6xx_gmu_fw_start()
783 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); in a6xx_gmu_fw_start()
784 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); in a6xx_gmu_fw_start()
786 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, in a6xx_gmu_fw_start()
793 gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052); in a6xx_gmu_fw_start()
806 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); in a6xx_gmu_fw_start()
808 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, in a6xx_gmu_fw_start()
809 gmu->log.iova | (gmu->log.size / SZ_4K - 1)); in a6xx_gmu_fw_start()
812 a6xx_gmu_power_config(gmu); in a6xx_gmu_fw_start()
814 ret = a6xx_gmu_start(gmu); in a6xx_gmu_fw_start()
818 if (gmu->legacy) { in a6xx_gmu_fw_start()
819 ret = a6xx_gmu_gfx_rail_on(gmu); in a6xx_gmu_fw_start()
825 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { in a6xx_gmu_fw_start()
826 ret = a6xx_sptprac_enable(gmu); in a6xx_gmu_fw_start()
831 ret = a6xx_gmu_hfi_start(gmu); in a6xx_gmu_fw_start()
849 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) in a6xx_gmu_irq_disable() argument
851 disable_irq(gmu->gmu_irq); in a6xx_gmu_irq_disable()
852 disable_irq(gmu->hfi_irq); in a6xx_gmu_irq_disable()
854 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0); in a6xx_gmu_irq_disable()
855 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0); in a6xx_gmu_irq_disable()
858 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_off() argument
863 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
865 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
867 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
869 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, in a6xx_gmu_rpmh_off()
874 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) in a6xx_gmu_force_off() argument
876 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_force_off()
884 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); in a6xx_gmu_force_off()
887 a6xx_hfi_stop(gmu); in a6xx_gmu_force_off()
890 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_force_off()
893 a6xx_sptprac_disable(gmu); in a6xx_gmu_force_off()
896 a6xx_gmu_rpmh_off(gmu); in a6xx_gmu_force_off()
899 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7); in a6xx_gmu_force_off()
900 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); in a6xx_gmu_force_off()
906 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); in a6xx_gmu_force_off()
914 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_freq() argument
917 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_freq()
923 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ in a6xx_gmu_set_initial_freq()
928 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) in a6xx_gmu_set_initial_bw() argument
931 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; in a6xx_gmu_set_initial_bw()
945 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_resume() local
948 if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) in a6xx_gmu_resume()
951 gmu->hung = false; in a6xx_gmu_resume()
954 pm_runtime_get_sync(gmu->dev); in a6xx_gmu_resume()
961 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_resume()
962 pm_runtime_get_sync(gmu->gxpd); in a6xx_gmu_resume()
965 clk_set_rate(gmu->core_clk, 200000000); in a6xx_gmu_resume()
966 clk_set_rate(gmu->hub_clk, 150000000); in a6xx_gmu_resume()
967 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_resume()
969 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
970 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
975 a6xx_gmu_set_initial_bw(gpu, gmu); in a6xx_gmu_resume()
978 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); in a6xx_gmu_resume()
979 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK); in a6xx_gmu_resume()
980 enable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
983 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? in a6xx_gmu_resume()
990 if (!gmu->legacy) in a6xx_gmu_resume()
993 ret = a6xx_gmu_fw_start(gmu, status); in a6xx_gmu_resume()
997 ret = a6xx_hfi_start(gmu, status); in a6xx_gmu_resume()
1005 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0); in a6xx_gmu_resume()
1006 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK); in a6xx_gmu_resume()
1007 enable_irq(gmu->hfi_irq); in a6xx_gmu_resume()
1010 a6xx_gmu_set_initial_freq(gpu, gmu); in a6xx_gmu_resume()
1015 disable_irq(gmu->gmu_irq); in a6xx_gmu_resume()
1016 a6xx_rpmh_stop(gmu); in a6xx_gmu_resume()
1017 pm_runtime_put(gmu->gxpd); in a6xx_gmu_resume()
1018 pm_runtime_put(gmu->dev); in a6xx_gmu_resume()
1024 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) in a6xx_gmu_isidle() argument
1028 if (!gmu->initialized) in a6xx_gmu_isidle()
1031 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); in a6xx_gmu_isidle()
1040 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) in a6xx_gmu_shutdown() argument
1042 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_shutdown()
1050 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); in a6xx_gmu_shutdown()
1053 int ret = a6xx_gmu_wait_for_idle(gmu); in a6xx_gmu_shutdown()
1057 a6xx_gmu_force_off(gmu); in a6xx_gmu_shutdown()
1064 ret = a6xx_gmu_notify_slumber(gmu); in a6xx_gmu_shutdown()
1066 a6xx_gmu_force_off(gmu); in a6xx_gmu_shutdown()
1070 ret = gmu_poll_timeout(gmu, in a6xx_gmu_shutdown()
1081 DRM_DEV_ERROR(gmu->dev, in a6xx_gmu_shutdown()
1083 gmu_read(gmu, in a6xx_gmu_shutdown()
1085 gmu_read(gmu, in a6xx_gmu_shutdown()
1090 a6xx_hfi_stop(gmu); in a6xx_gmu_shutdown()
1093 a6xx_gmu_irq_disable(gmu); in a6xx_gmu_shutdown()
1096 a6xx_rpmh_stop(gmu); in a6xx_gmu_shutdown()
1102 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_stop() local
1105 if (!pm_runtime_active(gmu->dev)) in a6xx_gmu_stop()
1112 if (gmu->hung) in a6xx_gmu_stop()
1113 a6xx_gmu_force_off(gmu); in a6xx_gmu_stop()
1115 a6xx_gmu_shutdown(gmu); in a6xx_gmu_stop()
1125 if (!IS_ERR_OR_NULL(gmu->gxpd)) in a6xx_gmu_stop()
1126 pm_runtime_put_sync(gmu->gxpd); in a6xx_gmu_stop()
1128 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); in a6xx_gmu_stop()
1130 pm_runtime_put_sync(gmu->dev); in a6xx_gmu_stop()
1135 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) in a6xx_gmu_memory_free() argument
1137 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); in a6xx_gmu_memory_free()
1138 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); in a6xx_gmu_memory_free()
1139 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1140 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); in a6xx_gmu_memory_free()
1141 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); in a6xx_gmu_memory_free()
1142 msm_gem_kernel_put(gmu->log.obj, gmu->aspace); in a6xx_gmu_memory_free()
1144 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); in a6xx_gmu_memory_free()
1145 msm_gem_address_space_put(gmu->aspace); in a6xx_gmu_memory_free()
1148 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, in a6xx_gmu_memory_alloc() argument
1151 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_memory_alloc()
1174 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, in a6xx_gmu_memory_alloc()
1189 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) in a6xx_gmu_memory_probe() argument
1193 mmu = msm_iommu_new(gmu->dev, 0); in a6xx_gmu_memory_probe()
1199 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000); in a6xx_gmu_memory_probe()
1200 if (IS_ERR(gmu->aspace)) in a6xx_gmu_memory_probe()
1201 return PTR_ERR(gmu->aspace); in a6xx_gmu_memory_probe()
1305 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) in a6xx_gmu_rpmh_votes_init() argument
1307 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_rpmh_votes_init()
1313 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1314 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); in a6xx_gmu_rpmh_votes_init()
1317 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, in a6xx_gmu_rpmh_votes_init()
1318 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); in a6xx_gmu_rpmh_votes_init()
1355 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) in a6xx_gmu_pwrlevels_probe() argument
1357 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); in a6xx_gmu_pwrlevels_probe()
1367 ret = devm_pm_opp_of_add_table(gmu->dev); in a6xx_gmu_pwrlevels_probe()
1369 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n"); in a6xx_gmu_pwrlevels_probe()
1373 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev, in a6xx_gmu_pwrlevels_probe()
1374 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); in a6xx_gmu_pwrlevels_probe()
1380 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev, in a6xx_gmu_pwrlevels_probe()
1381 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); in a6xx_gmu_pwrlevels_probe()
1383 gmu->current_perf_index = gmu->nr_gpu_freqs - 1; in a6xx_gmu_pwrlevels_probe()
1386 return a6xx_gmu_rpmh_votes_init(gmu); in a6xx_gmu_pwrlevels_probe()
1389 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) in a6xx_gmu_clocks_probe() argument
1391 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); in a6xx_gmu_clocks_probe()
1396 gmu->nr_clocks = ret; in a6xx_gmu_clocks_probe()
1398 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1399 gmu->nr_clocks, "gmu"); in a6xx_gmu_clocks_probe()
1401 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, in a6xx_gmu_clocks_probe()
1402 gmu->nr_clocks, "hub"); in a6xx_gmu_clocks_probe()
1428 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, in a6xx_gmu_get_irq() argument
1435 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); in a6xx_gmu_get_irq()
1450 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_remove() local
1451 struct platform_device *pdev = to_platform_device(gmu->dev); in a6xx_gmu_remove()
1453 mutex_lock(&gmu->lock); in a6xx_gmu_remove()
1454 if (!gmu->initialized) { in a6xx_gmu_remove()
1455 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1459 gmu->initialized = false; in a6xx_gmu_remove()
1461 mutex_unlock(&gmu->lock); in a6xx_gmu_remove()
1463 pm_runtime_force_suspend(gmu->dev); in a6xx_gmu_remove()
1469 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_remove()
1471 if (!IS_ERR_OR_NULL(gmu->gxpd)) { in a6xx_gmu_remove()
1472 pm_runtime_disable(gmu->gxpd); in a6xx_gmu_remove()
1473 dev_pm_domain_detach(gmu->gxpd, false); in a6xx_gmu_remove()
1476 iounmap(gmu->mmio); in a6xx_gmu_remove()
1478 iounmap(gmu->rscc); in a6xx_gmu_remove()
1479 gmu->mmio = NULL; in a6xx_gmu_remove()
1480 gmu->rscc = NULL; in a6xx_gmu_remove()
1483 a6xx_gmu_memory_free(gmu); in a6xx_gmu_remove()
1485 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_remove()
1486 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_remove()
1490 put_device(gmu->dev); in a6xx_gmu_remove()
1496 struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); in cxpd_notifier_cb() local
1499 complete_all(&gmu->pd_gate); in cxpd_notifier_cb()
1507 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_wrapper_init() local
1513 gmu->dev = &pdev->dev; in a6xx_gmu_wrapper_init()
1515 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_wrapper_init()
1517 pm_runtime_enable(gmu->dev); in a6xx_gmu_wrapper_init()
1520 gmu->legacy = true; in a6xx_gmu_wrapper_init()
1523 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_wrapper_init()
1524 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_wrapper_init()
1525 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_wrapper_init()
1529 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_wrapper_init()
1530 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_wrapper_init()
1531 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_wrapper_init()
1535 if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) { in a6xx_gmu_wrapper_init()
1540 init_completion(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1541 complete_all(&gmu->pd_gate); in a6xx_gmu_wrapper_init()
1542 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_wrapper_init()
1545 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_wrapper_init()
1546 if (IS_ERR(gmu->gxpd)) { in a6xx_gmu_wrapper_init()
1547 ret = PTR_ERR(gmu->gxpd); in a6xx_gmu_wrapper_init()
1551 gmu->initialized = true; in a6xx_gmu_wrapper_init()
1556 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_wrapper_init()
1559 iounmap(gmu->mmio); in a6xx_gmu_wrapper_init()
1562 put_device(gmu->dev); in a6xx_gmu_wrapper_init()
1570 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_gmu_init() local
1577 gmu->dev = &pdev->dev; in a6xx_gmu_init()
1579 of_dma_configure(gmu->dev, node, true); in a6xx_gmu_init()
1582 gmu->idle_level = GMU_IDLE_STATE_ACTIVE; in a6xx_gmu_init()
1584 pm_runtime_enable(gmu->dev); in a6xx_gmu_init()
1587 ret = a6xx_gmu_clocks_probe(gmu); in a6xx_gmu_init()
1591 ret = a6xx_gmu_memory_probe(gmu); in a6xx_gmu_init()
1602 gmu->dummy.size = SZ_4K; in a6xx_gmu_init()
1604 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, in a6xx_gmu_init()
1609 gmu->dummy.size = SZ_8K; in a6xx_gmu_init()
1613 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, in a6xx_gmu_init()
1620 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1631 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, in a6xx_gmu_init()
1636 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, in a6xx_gmu_init()
1642 gmu->legacy = true; in a6xx_gmu_init()
1645 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug"); in a6xx_gmu_init()
1651 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log"); in a6xx_gmu_init()
1656 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi"); in a6xx_gmu_init()
1661 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); in a6xx_gmu_init()
1662 if (IS_ERR(gmu->mmio)) { in a6xx_gmu_init()
1663 ret = PTR_ERR(gmu->mmio); in a6xx_gmu_init()
1668 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); in a6xx_gmu_init()
1669 if (IS_ERR(gmu->rscc)) { in a6xx_gmu_init()
1674 gmu->rscc = gmu->mmio + 0x23000; in a6xx_gmu_init()
1678 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); in a6xx_gmu_init()
1679 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); in a6xx_gmu_init()
1681 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { in a6xx_gmu_init()
1686 gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); in a6xx_gmu_init()
1687 if (IS_ERR(gmu->cxpd)) { in a6xx_gmu_init()
1688 ret = PTR_ERR(gmu->cxpd); in a6xx_gmu_init()
1692 if (!device_link_add(gmu->dev, gmu->cxpd, in a6xx_gmu_init()
1698 init_completion(&gmu->pd_gate); in a6xx_gmu_init()
1699 complete_all(&gmu->pd_gate); in a6xx_gmu_init()
1700 gmu->pd_nb.notifier_call = cxpd_notifier_cb; in a6xx_gmu_init()
1706 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); in a6xx_gmu_init()
1709 a6xx_gmu_pwrlevels_probe(gmu); in a6xx_gmu_init()
1712 a6xx_hfi_init(gmu); in a6xx_gmu_init()
1715 a6xx_gmu_rpmh_init(gmu); in a6xx_gmu_init()
1717 gmu->initialized = true; in a6xx_gmu_init()
1722 dev_pm_domain_detach(gmu->cxpd, false); in a6xx_gmu_init()
1725 iounmap(gmu->mmio); in a6xx_gmu_init()
1727 iounmap(gmu->rscc); in a6xx_gmu_init()
1728 free_irq(gmu->gmu_irq, gmu); in a6xx_gmu_init()
1729 free_irq(gmu->hfi_irq, gmu); in a6xx_gmu_init()
1732 a6xx_gmu_memory_free(gmu); in a6xx_gmu_init()
1735 put_device(gmu->dev); in a6xx_gmu_init()