Lines Matching +full:gpu +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
10 #include <linux/nvmem-consumer.h>
17 static void a5xx_dump(struct msm_gpu *gpu);
21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument
23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr()
26 if (a5xx_gpu->has_whereami) { in update_shadow_rptr()
33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument
36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush()
46 update_shadow_rptr(gpu, ring); in a5xx_flush()
48 spin_lock_irqsave(&ring->preempt_lock, flags); in a5xx_flush()
51 ring->cur = ring->next; in a5xx_flush()
56 spin_unlock_irqrestore(&ring->preempt_lock, flags); in a5xx_flush()
62 if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu)) in a5xx_flush()
63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush()
66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument
68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb()
70 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit_in_rb()
75 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb()
76 switch (submit->cmd[i].type) { in a5xx_submit_in_rb()
80 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit_in_rb()
85 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb()
86 dwords = submit->cmd[i].size; in a5xx_submit_in_rb()
114 a5xx_gpu->last_seqno[ring->id] = submit->seqno; in a5xx_submit_in_rb()
115 a5xx_flush(gpu, ring, true); in a5xx_submit_in_rb()
116 a5xx_preempt_trigger(gpu); in a5xx_submit_in_rb()
122 a5xx_idle(gpu, ring); in a5xx_submit_in_rb()
123 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb()
124 msm_gpu_retire(gpu); in a5xx_submit_in_rb()
127 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit() argument
129 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit()
131 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit()
134 if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { in a5xx_submit()
135 gpu->cur_ctx_seqno = 0; in a5xx_submit()
136 a5xx_submit_in_rb(gpu, submit); in a5xx_submit()
149 OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); in a5xx_submit()
150 OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); in a5xx_submit()
158 * user-space to be aware of it and provide additional handling in a5xx_submit()
169 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit()
170 switch (submit->cmd[i].type) { in a5xx_submit()
174 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit()
179 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a5xx_submit()
180 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a5xx_submit()
181 OUT_RING(ring, submit->cmd[i].size); in a5xx_submit()
187 * Periodically update shadow-wptr if needed, so that we in a5xx_submit()
194 update_shadow_rptr(gpu, ring); in a5xx_submit()
199 * are done rendering - otherwise a lucky preemption would start in a5xx_submit()
215 OUT_RING(ring, submit->seqno); in a5xx_submit()
216 a5xx_gpu->last_seqno[ring->id] = submit->seqno; in a5xx_submit()
227 OUT_RING(ring, submit->seqno); in a5xx_submit()
238 /* Data value - not used if the address above is 0 */ in a5xx_submit()
244 a5xx_flush(gpu, ring, false); in a5xx_submit()
247 a5xx_preempt_trigger(gpu); in a5xx_submit()
444 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) in a5xx_set_hwcg() argument
446 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_set_hwcg()
462 gpu_write(gpu, regs[i].offset, in a5xx_set_hwcg()
466 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0); in a5xx_set_hwcg()
467 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0); in a5xx_set_hwcg()
470 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); in a5xx_set_hwcg()
471 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); in a5xx_set_hwcg()
474 static int a5xx_me_init(struct msm_gpu *gpu) in a5xx_me_init() argument
476 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_me_init()
477 struct msm_ringbuffer *ring = gpu->rb[0]; in a5xx_me_init()
496 * Force a WFI after every direct-render 3D mode draw and every in a5xx_me_init()
511 a5xx_flush(gpu, ring, true); in a5xx_me_init()
512 return a5xx_idle(gpu, ring) ? 0 : -EINVAL; in a5xx_me_init()
515 static int a5xx_preempt_start(struct msm_gpu *gpu) in a5xx_preempt_start() argument
517 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_preempt_start()
519 struct msm_ringbuffer *ring = gpu->rb[0]; in a5xx_preempt_start()
521 if (gpu->nr_rings == 1) in a5xx_preempt_start()
530 OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id])); in a5xx_preempt_start()
531 OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id])); in a5xx_preempt_start()
554 a5xx_flush(gpu, ring, false); in a5xx_preempt_start()
556 return a5xx_idle(gpu, ring) ? 0 : -EINVAL; in a5xx_preempt_start()
573 a5xx_gpu->has_whereami = true; in a5xx_ucode_check_version()
578 static int a5xx_ucode_load(struct msm_gpu *gpu) in a5xx_ucode_load() argument
580 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_ucode_load()
584 if (!a5xx_gpu->pm4_bo) { in a5xx_ucode_load()
585 a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu, in a5xx_ucode_load()
586 adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova); in a5xx_ucode_load()
589 if (IS_ERR(a5xx_gpu->pm4_bo)) { in a5xx_ucode_load()
590 ret = PTR_ERR(a5xx_gpu->pm4_bo); in a5xx_ucode_load()
591 a5xx_gpu->pm4_bo = NULL; in a5xx_ucode_load()
592 DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n", in a5xx_ucode_load()
597 msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw"); in a5xx_ucode_load()
600 if (!a5xx_gpu->pfp_bo) { in a5xx_ucode_load()
601 a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu, in a5xx_ucode_load()
602 adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova); in a5xx_ucode_load()
604 if (IS_ERR(a5xx_gpu->pfp_bo)) { in a5xx_ucode_load()
605 ret = PTR_ERR(a5xx_gpu->pfp_bo); in a5xx_ucode_load()
606 a5xx_gpu->pfp_bo = NULL; in a5xx_ucode_load()
607 DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n", in a5xx_ucode_load()
612 msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw"); in a5xx_ucode_load()
613 a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo); in a5xx_ucode_load()
616 if (a5xx_gpu->has_whereami) { in a5xx_ucode_load()
617 if (!a5xx_gpu->shadow_bo) { in a5xx_ucode_load()
618 a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, in a5xx_ucode_load()
619 sizeof(u32) * gpu->nr_rings, in a5xx_ucode_load()
621 gpu->aspace, &a5xx_gpu->shadow_bo, in a5xx_ucode_load()
622 &a5xx_gpu->shadow_iova); in a5xx_ucode_load()
624 if (IS_ERR(a5xx_gpu->shadow)) in a5xx_ucode_load()
625 return PTR_ERR(a5xx_gpu->shadow); in a5xx_ucode_load()
627 msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow"); in a5xx_ucode_load()
629 } else if (gpu->nr_rings > 1) { in a5xx_ucode_load()
631 a5xx_preempt_fini(gpu); in a5xx_ucode_load()
632 gpu->nr_rings = 1; in a5xx_ucode_load()
640 static int a5xx_zap_shader_resume(struct msm_gpu *gpu) in a5xx_zap_shader_resume() argument
642 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_zap_shader_resume()
654 DRM_ERROR("%s: zap-shader resume failed: %d\n", in a5xx_zap_shader_resume()
655 gpu->name, ret); in a5xx_zap_shader_resume()
660 static int a5xx_zap_shader_init(struct msm_gpu *gpu) in a5xx_zap_shader_init() argument
670 return a5xx_zap_shader_resume(gpu); in a5xx_zap_shader_init()
672 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); in a5xx_zap_shader_init()
691 static int a5xx_hw_init(struct msm_gpu *gpu) in a5xx_hw_init() argument
693 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_hw_init()
698 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); in a5xx_hw_init()
702 gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); in a5xx_hw_init()
704 /* Make all blocks contribute to the GPU BUSY perf counter */ in a5xx_hw_init()
705 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF); in a5xx_hw_init()
708 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); in a5xx_hw_init()
710 if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { in a5xx_hw_init()
712 * Mask out the activity signals from RB1-3 to avoid false in a5xx_hw_init()
716 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11, in a5xx_hw_init()
718 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12, in a5xx_hw_init()
720 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13, in a5xx_hw_init()
722 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14, in a5xx_hw_init()
724 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15, in a5xx_hw_init()
726 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16, in a5xx_hw_init()
728 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17, in a5xx_hw_init()
730 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18, in a5xx_hw_init()
735 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL, in a5xx_hw_init()
739 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01); in a5xx_hw_init()
742 gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT); in a5xx_hw_init()
745 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6); in a5xx_hw_init()
748 gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02); in a5xx_hw_init()
751 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000); in a5xx_hw_init()
752 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF); in a5xx_hw_init()
753 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000); in a5xx_hw_init()
754 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF); in a5xx_hw_init()
756 /* Set the GMEM VA range (0 to gpu->gmem) */ in a5xx_hw_init()
757 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); in a5xx_hw_init()
758 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000); in a5xx_hw_init()
759 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO, in a5xx_hw_init()
760 0x00100000 + adreno_gpu->info->gmem - 1); in a5xx_hw_init()
761 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000); in a5xx_hw_init()
765 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20); in a5xx_hw_init()
767 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); in a5xx_hw_init()
769 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20); in a5xx_hw_init()
770 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030); in a5xx_hw_init()
771 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A); in a5xx_hw_init()
773 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40); in a5xx_hw_init()
775 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40); in a5xx_hw_init()
777 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400); in a5xx_hw_init()
778 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060); in a5xx_hw_init()
779 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16); in a5xx_hw_init()
783 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, in a5xx_hw_init()
787 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, in a5xx_hw_init()
790 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, in a5xx_hw_init()
793 if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) in a5xx_hw_init()
794 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); in a5xx_hw_init()
798 * for 1-SP GPUs, as it is enabled by default. in a5xx_hw_init()
802 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9)); in a5xx_hw_init()
805 gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29)); in a5xx_hw_init()
808 gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000); in a5xx_hw_init()
811 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); in a5xx_hw_init()
818 * CCU to be interpreted differently. This can cause gpu fault. This in a5xx_hw_init()
819 * bug is fixed in latest A510 revision. To enable this bug fix - in a5xx_hw_init()
824 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0); in a5xx_hw_init()
827 a5xx_set_hwcg(gpu, true); in a5xx_hw_init()
829 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); in a5xx_hw_init()
837 gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7); in a5xx_hw_init()
838 gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1); in a5xx_hw_init()
842 gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit); in a5xx_hw_init()
845 gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10)); in a5xx_hw_init()
848 gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007); in a5xx_hw_init()
851 gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4)); in a5xx_hw_init()
852 gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8)); in a5xx_hw_init()
853 gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16)); in a5xx_hw_init()
854 gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32)); in a5xx_hw_init()
855 gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64)); in a5xx_hw_init()
856 gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64)); in a5xx_hw_init()
859 gpu_write(gpu, REG_A5XX_CP_PROTECT(6), in a5xx_hw_init()
862 gpu_write(gpu, REG_A5XX_CP_PROTECT(7), in a5xx_hw_init()
866 gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64)); in a5xx_hw_init()
867 gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8)); in a5xx_hw_init()
868 gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32)); in a5xx_hw_init()
869 gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1)); in a5xx_hw_init()
872 gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1)); in a5xx_hw_init()
873 gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2)); in a5xx_hw_init()
876 gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8)); in a5xx_hw_init()
877 gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16)); in a5xx_hw_init()
880 gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16)); in a5xx_hw_init()
883 gpu_write(gpu, REG_A5XX_CP_PROTECT(17), in a5xx_hw_init()
886 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0); in a5xx_hw_init()
888 * Disable the trusted memory range - we don't actually supported secure in a5xx_hw_init()
892 gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000); in a5xx_hw_init()
893 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); in a5xx_hw_init()
895 /* Put the GPU into 64 bit by default */ in a5xx_hw_init()
896 gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
897 gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
898 gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
899 gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
900 gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
901 gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
902 gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
903 gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
904 gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
905 gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
906 gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
907 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); in a5xx_hw_init()
914 if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) { in a5xx_hw_init()
915 gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23)); in a5xx_hw_init()
916 gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0); in a5xx_hw_init()
919 ret = adreno_hw_init(gpu); in a5xx_hw_init()
924 a5xx_gpmu_ucode_init(gpu); in a5xx_hw_init()
926 gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova); in a5xx_hw_init()
927 gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova); in a5xx_hw_init()
930 gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova); in a5xx_hw_init()
938 gpu_write(gpu, REG_A5XX_CP_RB_CNTL, in a5xx_hw_init()
942 if (a5xx_gpu->shadow_bo) { in a5xx_hw_init()
943 gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR, in a5xx_hw_init()
944 shadowptr(a5xx_gpu, gpu->rb[0])); in a5xx_hw_init()
947 a5xx_preempt_hw_init(gpu); in a5xx_hw_init()
950 gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK); in a5xx_hw_init()
953 gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0); in a5xx_hw_init()
954 ret = a5xx_me_init(gpu); in a5xx_hw_init()
958 ret = a5xx_power_init(gpu); in a5xx_hw_init()
967 OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1); in a5xx_hw_init()
968 OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT)); in a5xx_hw_init()
970 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init()
971 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init()
972 return -EINVAL; in a5xx_hw_init()
983 ret = a5xx_zap_shader_init(gpu); in a5xx_hw_init()
985 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); in a5xx_hw_init()
986 OUT_RING(gpu->rb[0], 0x00000000); in a5xx_hw_init()
988 a5xx_flush(gpu, gpu->rb[0], true); in a5xx_hw_init()
989 if (!a5xx_idle(gpu, gpu->rb[0])) in a5xx_hw_init()
990 return -EINVAL; in a5xx_hw_init()
991 } else if (ret == -ENODEV) { in a5xx_hw_init()
998 dev_warn_once(gpu->dev->dev, in a5xx_hw_init()
999 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); in a5xx_hw_init()
1000 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); in a5xx_hw_init()
1005 /* Last step - yield the ringbuffer */ in a5xx_hw_init()
1006 a5xx_preempt_start(gpu); in a5xx_hw_init()
1011 static void a5xx_recover(struct msm_gpu *gpu) in a5xx_recover() argument
1015 adreno_dump_info(gpu); in a5xx_recover()
1019 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i))); in a5xx_recover()
1023 a5xx_dump(gpu); in a5xx_recover()
1025 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1); in a5xx_recover()
1026 gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD); in a5xx_recover()
1027 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0); in a5xx_recover()
1028 adreno_recover(gpu); in a5xx_recover()
1031 static void a5xx_destroy(struct msm_gpu *gpu) in a5xx_destroy() argument
1033 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_destroy()
1036 DBG("%s", gpu->name); in a5xx_destroy()
1038 a5xx_preempt_fini(gpu); in a5xx_destroy()
1040 if (a5xx_gpu->pm4_bo) { in a5xx_destroy()
1041 msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace); in a5xx_destroy()
1042 drm_gem_object_put(a5xx_gpu->pm4_bo); in a5xx_destroy()
1045 if (a5xx_gpu->pfp_bo) { in a5xx_destroy()
1046 msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace); in a5xx_destroy()
1047 drm_gem_object_put(a5xx_gpu->pfp_bo); in a5xx_destroy()
1050 if (a5xx_gpu->gpmu_bo) { in a5xx_destroy()
1051 msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace); in a5xx_destroy()
1052 drm_gem_object_put(a5xx_gpu->gpmu_bo); in a5xx_destroy()
1055 if (a5xx_gpu->shadow_bo) { in a5xx_destroy()
1056 msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace); in a5xx_destroy()
1057 drm_gem_object_put(a5xx_gpu->shadow_bo); in a5xx_destroy()
1064 static inline bool _a5xx_check_idle(struct msm_gpu *gpu) in _a5xx_check_idle() argument
1066 if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY) in _a5xx_check_idle()
1070 * Nearly every abnormality ends up pausing the GPU and triggering a in _a5xx_check_idle()
1073 return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) & in _a5xx_check_idle()
1077 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a5xx_idle() argument
1079 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_idle()
1082 if (ring != a5xx_gpu->cur_ring) { in a5xx_idle()
1083 WARN(1, "Tried to idle a non-current ringbuffer\n"); in a5xx_idle()
1088 if (!adreno_idle(gpu, ring)) in a5xx_idle()
1091 if (spin_until(_a5xx_check_idle(gpu))) { in a5xx_idle()
1092 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a5xx_idle()
1093 gpu->name, __builtin_return_address(0), in a5xx_idle()
1094 gpu_read(gpu, REG_A5XX_RBBM_STATUS), in a5xx_idle()
1095 gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS), in a5xx_idle()
1096 gpu_read(gpu, REG_A5XX_CP_RB_RPTR), in a5xx_idle()
1097 gpu_read(gpu, REG_A5XX_CP_RB_WPTR)); in a5xx_idle()
1106 struct msm_gpu *gpu = arg; in a5xx_fault_handler() local
1110 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)), in a5xx_fault_handler()
1111 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)), in a5xx_fault_handler()
1112 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)), in a5xx_fault_handler()
1113 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)), in a5xx_fault_handler()
1117 snprintf(block, sizeof(block), "%x", info->fsynr1); in a5xx_fault_handler()
1119 return adreno_fault_handler(gpu, iova, flags, info, block, scratch); in a5xx_fault_handler()
1122 static void a5xx_cp_err_irq(struct msm_gpu *gpu) in a5xx_cp_err_irq() argument
1124 u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); in a5xx_cp_err_irq()
1129 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0); in a5xx_cp_err_irq()
1136 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); in a5xx_cp_err_irq()
1137 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA); in a5xx_cp_err_irq()
1139 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n", in a5xx_cp_err_irq()
1144 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n", in a5xx_cp_err_irq()
1145 gpu_read(gpu, REG_A5XX_CP_HW_FAULT)); in a5xx_cp_err_irq()
1148 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n"); in a5xx_cp_err_irq()
1151 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS); in a5xx_cp_err_irq()
1153 dev_err_ratelimited(gpu->dev->dev, in a5xx_cp_err_irq()
1160 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT); in a5xx_cp_err_irq()
1166 dev_err_ratelimited(gpu->dev->dev, in a5xx_cp_err_irq()
1173 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status) in a5xx_rbbm_err_irq() argument
1176 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); in a5xx_rbbm_err_irq()
1178 dev_err_ratelimited(gpu->dev->dev, in a5xx_rbbm_err_irq()
1185 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); in a5xx_rbbm_err_irq()
1188 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, in a5xx_rbbm_err_irq()
1193 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n"); in a5xx_rbbm_err_irq()
1196 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n", in a5xx_rbbm_err_irq()
1197 gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS)); in a5xx_rbbm_err_irq()
1200 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n", in a5xx_rbbm_err_irq()
1201 gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS)); in a5xx_rbbm_err_irq()
1204 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n", in a5xx_rbbm_err_irq()
1205 gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS)); in a5xx_rbbm_err_irq()
1208 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n"); in a5xx_rbbm_err_irq()
1211 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n"); in a5xx_rbbm_err_irq()
1214 static void a5xx_uche_err_irq(struct msm_gpu *gpu) in a5xx_uche_err_irq() argument
1216 uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI); in a5xx_uche_err_irq()
1218 addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO); in a5xx_uche_err_irq()
1220 dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n", in a5xx_uche_err_irq()
1224 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) in a5xx_gpmu_err_irq() argument
1226 dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); in a5xx_gpmu_err_irq()
1229 static void a5xx_fault_detect_irq(struct msm_gpu *gpu) in a5xx_fault_detect_irq() argument
1231 struct drm_device *dev = gpu->dev; in a5xx_fault_detect_irq()
1232 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a5xx_fault_detect_irq()
1235 * If stalled on SMMU fault, we could trip the GPU's hang detection, in a5xx_fault_detect_irq()
1240 if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24)) in a5xx_fault_detect_irq()
1243 …DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4… in a5xx_fault_detect_irq()
1244 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, in a5xx_fault_detect_irq()
1245 gpu_read(gpu, REG_A5XX_RBBM_STATUS), in a5xx_fault_detect_irq()
1246 gpu_read(gpu, REG_A5XX_CP_RB_RPTR), in a5xx_fault_detect_irq()
1247 gpu_read(gpu, REG_A5XX_CP_RB_WPTR), in a5xx_fault_detect_irq()
1248 gpu_read64(gpu, REG_A5XX_CP_IB1_BASE), in a5xx_fault_detect_irq()
1249 gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ), in a5xx_fault_detect_irq()
1250 gpu_read64(gpu, REG_A5XX_CP_IB2_BASE), in a5xx_fault_detect_irq()
1251 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); in a5xx_fault_detect_irq()
1254 del_timer(&gpu->hangcheck_timer); in a5xx_fault_detect_irq()
1256 kthread_queue_work(gpu->worker, &gpu->recover_work); in a5xx_fault_detect_irq()
1267 static irqreturn_t a5xx_irq(struct msm_gpu *gpu) in a5xx_irq() argument
1269 struct msm_drm_private *priv = gpu->dev->dev_private; in a5xx_irq()
1270 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); in a5xx_irq()
1273 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it in a5xx_irq()
1276 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, in a5xx_irq()
1279 if (priv->disable_err_irq) { in a5xx_irq()
1286 a5xx_rbbm_err_irq(gpu, status); in a5xx_irq()
1289 a5xx_cp_err_irq(gpu); in a5xx_irq()
1292 a5xx_fault_detect_irq(gpu); in a5xx_irq()
1295 a5xx_uche_err_irq(gpu); in a5xx_irq()
1298 a5xx_gpmu_err_irq(gpu); in a5xx_irq()
1301 a5xx_preempt_trigger(gpu); in a5xx_irq()
1302 msm_gpu_retire(gpu); in a5xx_irq()
1306 a5xx_preempt_irq(gpu); in a5xx_irq()
1342 static void a5xx_dump(struct msm_gpu *gpu) in a5xx_dump() argument
1344 DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n", in a5xx_dump()
1345 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); in a5xx_dump()
1346 adreno_dump(gpu); in a5xx_dump()
1349 static int a5xx_pm_resume(struct msm_gpu *gpu) in a5xx_pm_resume() argument
1351 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_pm_resume()
1355 ret = msm_gpu_pm_resume(gpu); in a5xx_pm_resume()
1362 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055); in a5xx_pm_resume()
1363 a5xx_set_hwcg(gpu, true); in a5xx_pm_resume()
1365 gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0); in a5xx_pm_resume()
1370 gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); in a5xx_pm_resume()
1375 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS, in a5xx_pm_resume()
1379 gpu->name, in a5xx_pm_resume()
1380 gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS)); in a5xx_pm_resume()
1385 gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000); in a5xx_pm_resume()
1386 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS, in a5xx_pm_resume()
1390 gpu->name); in a5xx_pm_resume()
1395 static int a5xx_pm_suspend(struct msm_gpu *gpu) in a5xx_pm_suspend() argument
1397 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_pm_suspend()
1408 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask); in a5xx_pm_suspend()
1409 spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & in a5xx_pm_suspend()
1412 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); in a5xx_pm_suspend()
1419 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); in a5xx_pm_suspend()
1420 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); in a5xx_pm_suspend()
1423 ret = msm_gpu_pm_suspend(gpu); in a5xx_pm_suspend()
1427 if (a5xx_gpu->has_whereami) in a5xx_pm_suspend()
1428 for (i = 0; i < gpu->nr_rings; i++) in a5xx_pm_suspend()
1429 a5xx_gpu->shadow[i] = 0; in a5xx_pm_suspend()
1434 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a5xx_get_timestamp() argument
1436 *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO); in a5xx_get_timestamp()
1452 static int a5xx_crashdumper_init(struct msm_gpu *gpu, in a5xx_crashdumper_init() argument
1455 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a5xx_crashdumper_init()
1456 SZ_1M, MSM_BO_WC, gpu->aspace, in a5xx_crashdumper_init()
1457 &dumper->bo, &dumper->iova); in a5xx_crashdumper_init()
1459 if (!IS_ERR(dumper->ptr)) in a5xx_crashdumper_init()
1460 msm_gem_object_set_name(dumper->bo, "crashdump"); in a5xx_crashdumper_init()
1462 return PTR_ERR_OR_ZERO(dumper->ptr); in a5xx_crashdumper_init()
1465 static int a5xx_crashdumper_run(struct msm_gpu *gpu, in a5xx_crashdumper_run() argument
1470 if (IS_ERR_OR_NULL(dumper->ptr)) in a5xx_crashdumper_run()
1471 return -EINVAL; in a5xx_crashdumper_run()
1473 gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova); in a5xx_crashdumper_run()
1475 gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1); in a5xx_crashdumper_run()
1477 return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val, in a5xx_crashdumper_run()
1491 { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
1496 { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
1501 { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
1508 static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu, in a5xx_gpu_state_get_hlsq_regs() argument
1516 if (a5xx_crashdumper_init(gpu, &dumper)) in a5xx_gpu_state_get_hlsq_regs()
1529 a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL); in a5xx_gpu_state_get_hlsq_regs()
1530 if (!a5xx_state->hlsqregs) in a5xx_gpu_state_get_hlsq_regs()
1554 if (a5xx_crashdumper_run(gpu, &dumper)) { in a5xx_gpu_state_get_hlsq_regs()
1555 kfree(a5xx_state->hlsqregs); in a5xx_gpu_state_get_hlsq_regs()
1556 msm_gem_kernel_put(dumper.bo, gpu->aspace); in a5xx_gpu_state_get_hlsq_regs()
1561 memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K), in a5xx_gpu_state_get_hlsq_regs()
1564 msm_gem_kernel_put(dumper.bo, gpu->aspace); in a5xx_gpu_state_get_hlsq_regs()
1567 static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu) in a5xx_gpu_state_get() argument
1571 bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24)); in a5xx_gpu_state_get()
1574 return ERR_PTR(-ENOMEM); in a5xx_gpu_state_get()
1577 a5xx_set_hwcg(gpu, false); in a5xx_gpu_state_get()
1580 adreno_gpu_state_get(gpu, &(a5xx_state->base)); in a5xx_gpu_state_get()
1582 a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS); in a5xx_gpu_state_get()
1590 a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state); in a5xx_gpu_state_get()
1592 a5xx_set_hwcg(gpu, true); in a5xx_gpu_state_get()
1594 return &a5xx_state->base; in a5xx_gpu_state_get()
1604 kfree(a5xx_state->hlsqregs); in a5xx_gpu_state_destroy()
1615 return kref_put(&state->ref, a5xx_gpu_state_destroy); in a5xx_gpu_state_put()
1620 static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state, in a5xx_show() argument
1631 adreno_show(gpu, state, p); in a5xx_show()
1634 if (!a5xx_state->hlsqregs) in a5xx_show()
1637 drm_printf(p, "registers-hlsq:\n"); in a5xx_show()
1651 if (a5xx_state->hlsqregs[pos] == 0xdeadbeef) in a5xx_show()
1654 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", in a5xx_show()
1655 o << 2, a5xx_state->hlsqregs[pos]); in a5xx_show()
1661 static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu) in a5xx_active_ring() argument
1663 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_active_ring()
1666 return a5xx_gpu->cur_ring; in a5xx_active_ring()
1669 static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) in a5xx_gpu_busy() argument
1673 busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO); in a5xx_gpu_busy()
1674 *out_sample_rate = clk_get_rate(gpu->core_clk); in a5xx_gpu_busy()
1679 static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a5xx_get_rptr() argument
1681 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_get_rptr()
1684 if (a5xx_gpu->has_whereami) in a5xx_get_rptr()
1685 return a5xx_gpu->shadow[ring->id]; in a5xx_get_rptr()
1687 return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR); in a5xx_get_rptr()
1724 * If the OPP table specifies a opp-supported-hw property then we have in check_speed_bin()
1752 struct msm_drm_private *priv = dev->dev_private; in a5xx_gpu_init()
1753 struct platform_device *pdev = priv->gpu_pdev; in a5xx_gpu_init()
1754 struct adreno_platform_config *config = pdev->dev.platform_data; in a5xx_gpu_init()
1757 struct msm_gpu *gpu; in a5xx_gpu_init() local
1762 DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n"); in a5xx_gpu_init()
1763 return ERR_PTR(-ENXIO); in a5xx_gpu_init()
1768 return ERR_PTR(-ENOMEM); in a5xx_gpu_init()
1770 adreno_gpu = &a5xx_gpu->base; in a5xx_gpu_init()
1771 gpu = &adreno_gpu->base; in a5xx_gpu_init()
1773 adreno_gpu->registers = a5xx_registers; in a5xx_gpu_init()
1775 a5xx_gpu->lm_leakage = 0x4E001A; in a5xx_gpu_init()
1777 check_speed_bin(&pdev->dev); in a5xx_gpu_init()
1781 if (config->info->revn == 510) in a5xx_gpu_init()
1786 a5xx_destroy(&(a5xx_gpu->base.base)); in a5xx_gpu_init()
1790 if (gpu->aspace) in a5xx_gpu_init()
1791 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); in a5xx_gpu_init()
1794 a5xx_preempt_init(gpu); in a5xx_gpu_init()
1796 return gpu; in a5xx_gpu_init()