Lines Matching +full:gpu +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
14 #include <linux/soc/qcom/llcc-qcom.h>
18 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument
20 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle()
24 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) in _a6xx_check_idle()
28 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle()
32 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle()
36 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument
39 if (!adreno_idle(gpu, ring)) in a6xx_idle()
42 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle()
43 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle()
44 gpu->name, __builtin_return_address(0), in a6xx_idle()
45 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle()
46 gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS), in a6xx_idle()
47 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_idle()
48 gpu_read(gpu, REG_A6XX_CP_RB_WPTR)); in a6xx_idle()
55 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument
57 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr()
61 if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) { in update_shadow_rptr()
68 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_flush() argument
73 update_shadow_rptr(gpu, ring); in a6xx_flush()
75 spin_lock_irqsave(&ring->preempt_lock, flags); in a6xx_flush()
78 ring->cur = ring->next; in a6xx_flush()
83 spin_unlock_irqrestore(&ring->preempt_lock, flags); in a6xx_flush()
88 gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr); in a6xx_flush()
105 bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1; in a6xx_set_pagetable()
110 if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) in a6xx_set_pagetable()
113 if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) in a6xx_set_pagetable()
146 * lingering in that part of the GPU in a6xx_set_pagetable()
166 /* Re-enable protected mode: */ in a6xx_set_pagetable()
172 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a6xx_submit() argument
174 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a6xx_submit()
175 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_submit()
177 struct msm_ringbuffer *ring = submit->ring; in a6xx_submit()
180 a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx); in a6xx_submit()
187 * GPU registers so we need to add 0x1a800 to the register value on A630 in a6xx_submit()
201 for (i = 0; i < submit->nr_cmds; i++) { in a6xx_submit()
202 switch (submit->cmd[i].type) { in a6xx_submit()
206 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a6xx_submit()
211 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a6xx_submit()
212 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); in a6xx_submit()
213 OUT_RING(ring, submit->cmd[i].size); in a6xx_submit()
219 * Periodically update shadow-wptr if needed, so that we in a6xx_submit()
226 update_shadow_rptr(gpu, ring); in a6xx_submit()
236 OUT_RING(ring, submit->seqno); in a6xx_submit()
247 OUT_RING(ring, submit->seqno); in a6xx_submit()
250 gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER)); in a6xx_submit()
252 a6xx_flush(gpu, ring); in a6xx_submit()
698 static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state) in a6xx_set_hwcg() argument
700 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_hwcg()
702 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_set_hwcg()
707 if (!adreno_gpu->info->hwcg) in a6xx_set_hwcg()
717 val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL); in a6xx_set_hwcg()
719 /* Don't re-program the registers if they are already correct */ in a6xx_set_hwcg()
727 for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++) in a6xx_set_hwcg()
728 gpu_write(gpu, reg->offset, state ? reg->value : 0); in a6xx_set_hwcg()
734 gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0); in a6xx_set_hwcg()
900 static void a6xx_set_cp_protect(struct msm_gpu *gpu) in a6xx_set_cp_protect() argument
902 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_cp_protect()
933 gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, in a6xx_set_cp_protect()
938 for (i = 0; i < count - 1; i++) { in a6xx_set_cp_protect()
941 gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]); in a6xx_set_cp_protect()
944 gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]); in a6xx_set_cp_protect()
947 static void a6xx_set_ubwc_config(struct msm_gpu *gpu) in a6xx_set_ubwc_config() argument
949 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_set_ubwc_config()
956 /* Entirely magic, per-GPU-gen value */ in a6xx_set_ubwc_config()
1009 gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, in a6xx_set_ubwc_config()
1013 gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 | in a6xx_set_ubwc_config()
1016 gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 | in a6xx_set_ubwc_config()
1020 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, min_acc_len << 23 | hbb_lo << 21); in a6xx_set_ubwc_config()
1023 static int a6xx_cp_init(struct msm_gpu *gpu) in a6xx_cp_init() argument
1025 struct msm_ringbuffer *ring = gpu->rb[0]; in a6xx_cp_init()
1048 a6xx_flush(gpu, ring); in a6xx_cp_init()
1049 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; in a6xx_cp_init()
1059 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_ucode_check_version()
1060 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_ucode_check_version() local
1061 const char *sqe_name = adreno_gpu->info->fw[ADRENO_FW_SQE]; in a6xx_ucode_check_version()
1091 a6xx_gpu->has_whereami = true; in a6xx_ucode_check_version()
1096 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
1105 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
1111 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_check_version()
1112 "unknown GPU, add it to a6xx_ucode_check_version()!!\n"); in a6xx_ucode_check_version()
1119 static int a6xx_ucode_load(struct msm_gpu *gpu) in a6xx_ucode_load() argument
1121 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_ucode_load()
1124 if (!a6xx_gpu->sqe_bo) { in a6xx_ucode_load()
1125 a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu, in a6xx_ucode_load()
1126 adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova); in a6xx_ucode_load()
1128 if (IS_ERR(a6xx_gpu->sqe_bo)) { in a6xx_ucode_load()
1129 int ret = PTR_ERR(a6xx_gpu->sqe_bo); in a6xx_ucode_load()
1131 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_load()
1132 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_ucode_load()
1138 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); in a6xx_ucode_load()
1139 if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { in a6xx_ucode_load()
1140 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_ucode_load()
1141 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_ucode_load()
1143 a6xx_gpu->sqe_bo = NULL; in a6xx_ucode_load()
1144 return -EPERM; in a6xx_ucode_load()
1152 if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) && in a6xx_ucode_load()
1153 !a6xx_gpu->shadow_bo) { in a6xx_ucode_load()
1154 a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev, in a6xx_ucode_load()
1155 sizeof(u32) * gpu->nr_rings, in a6xx_ucode_load()
1157 gpu->aspace, &a6xx_gpu->shadow_bo, in a6xx_ucode_load()
1158 &a6xx_gpu->shadow_iova); in a6xx_ucode_load()
1160 if (IS_ERR(a6xx_gpu->shadow)) in a6xx_ucode_load()
1161 return PTR_ERR(a6xx_gpu->shadow); in a6xx_ucode_load()
1163 msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow"); in a6xx_ucode_load()
1169 static int a6xx_zap_shader_init(struct msm_gpu *gpu) in a6xx_zap_shader_init() argument
1177 ret = adreno_zap_shader_load(gpu, GPU_PAS_ID); in a6xx_zap_shader_init()
1195 static int hw_init(struct msm_gpu *gpu) in hw_init() argument
1197 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in hw_init()
1199 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in hw_init()
1203 /* Make sure the GMU keeps the GPU on while we set it up */ in hw_init()
1204 ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1211 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); in hw_init()
1212 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0); in hw_init()
1213 /* Let's make extra sure that the GPU can access the memory.. */ in hw_init()
1216 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0); in hw_init()
1217 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); in hw_init()
1218 /* Let's make extra sure that the GPU can access the memory.. */ in hw_init()
1222 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); in hw_init()
1228 * Disable the trusted memory range - we don't actually supported secure in hw_init()
1232 gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000); in hw_init()
1233 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000); in hw_init()
1236 gpu_write(gpu, REG_A6XX_CP_ADDR_MODE_CNTL, 0x1); in hw_init()
1237 gpu_write(gpu, REG_A6XX_VSC_ADDR_MODE_CNTL, 0x1); in hw_init()
1238 gpu_write(gpu, REG_A6XX_GRAS_ADDR_MODE_CNTL, 0x1); in hw_init()
1239 gpu_write(gpu, REG_A6XX_RB_ADDR_MODE_CNTL, 0x1); in hw_init()
1240 gpu_write(gpu, REG_A6XX_PC_ADDR_MODE_CNTL, 0x1); in hw_init()
1241 gpu_write(gpu, REG_A6XX_HLSQ_ADDR_MODE_CNTL, 0x1); in hw_init()
1242 gpu_write(gpu, REG_A6XX_VFD_ADDR_MODE_CNTL, 0x1); in hw_init()
1243 gpu_write(gpu, REG_A6XX_VPC_ADDR_MODE_CNTL, 0x1); in hw_init()
1244 gpu_write(gpu, REG_A6XX_UCHE_ADDR_MODE_CNTL, 0x1); in hw_init()
1245 gpu_write(gpu, REG_A6XX_SP_ADDR_MODE_CNTL, 0x1); in hw_init()
1246 gpu_write(gpu, REG_A6XX_TPL1_ADDR_MODE_CNTL, 0x1); in hw_init()
1247 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); in hw_init()
1250 a6xx_set_hwcg(gpu, true); in hw_init()
1256 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); in hw_init()
1257 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); in hw_init()
1258 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); in hw_init()
1259 gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); in hw_init()
1260 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3); in hw_init()
1262 gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); in hw_init()
1266 gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); in hw_init()
1268 /* Make all blocks contribute to the GPU BUSY perf counter */ in hw_init()
1269 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff); in hw_init()
1272 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu); in hw_init()
1273 gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu); in hw_init()
1274 gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu); in hw_init()
1277 /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ in hw_init()
1278 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN, 0x00100000); in hw_init()
1280 gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX, in hw_init()
1281 0x00100000 + adreno_gpu->info->gmem - 1); in hw_init()
1284 gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); in hw_init()
1285 gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); in hw_init()
1288 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); in hw_init()
1289 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); in hw_init()
1291 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060); in hw_init()
1292 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16); in hw_init()
1294 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); in hw_init()
1295 gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); in hw_init()
1299 gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020); in hw_init()
1303 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48); in hw_init()
1304 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47); in hw_init()
1306 gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); in hw_init()
1312 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); in hw_init()
1314 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200); in hw_init()
1316 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200); in hw_init()
1318 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000); in hw_init()
1320 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000); in hw_init()
1322 gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000); in hw_init()
1325 gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); in hw_init()
1328 gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1); in hw_init()
1331 gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL(0), PERF_CP_ALWAYS_COUNT); in hw_init()
1333 a6xx_set_ubwc_config(gpu); in hw_init()
1337 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff); in hw_init()
1339 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3ffff); in hw_init()
1341 gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff); in hw_init()
1343 gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); in hw_init()
1347 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); in hw_init()
1348 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, in hw_init()
1350 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, in hw_init()
1352 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, in hw_init()
1354 gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, in hw_init()
1366 a6xx_set_cp_protect(gpu); in hw_init()
1369 gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1); in hw_init()
1370 gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0); in hw_init()
1373 /* Set dualQ + disable afull for A660 GPU */ in hw_init()
1375 gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906); in hw_init()
1378 if (gpu->hw_apriv) { in hw_init()
1379 gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, in hw_init()
1384 gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); in hw_init()
1386 ret = adreno_hw_init(gpu); in hw_init()
1390 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova); in hw_init()
1393 gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova); in hw_init()
1399 if (adreno_gpu->base.hw_apriv) in hw_init()
1400 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT); in hw_init()
1402 gpu_write(gpu, REG_A6XX_CP_RB_CNTL, in hw_init()
1406 if (a6xx_gpu->shadow_bo) { in hw_init()
1407 gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, in hw_init()
1408 shadowptr(a6xx_gpu, gpu->rb[0])); in hw_init()
1412 a6xx_gpu->cur_ring = gpu->rb[0]; in hw_init()
1414 gpu->cur_ctx_seqno = 0; in hw_init()
1417 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); in hw_init()
1419 ret = a6xx_cp_init(gpu); in hw_init()
1430 ret = a6xx_zap_shader_init(gpu); in hw_init()
1432 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); in hw_init()
1433 OUT_RING(gpu->rb[0], 0x00000000); in hw_init()
1435 a6xx_flush(gpu, gpu->rb[0]); in hw_init()
1436 if (!a6xx_idle(gpu, gpu->rb[0])) in hw_init()
1437 return -EINVAL; in hw_init()
1438 } else if (ret == -ENODEV) { in hw_init()
1445 dev_warn_once(gpu->dev->dev, in hw_init()
1446 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n"); in hw_init()
1447 gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0); in hw_init()
1457 * Tell the GMU that we are done touching the GPU and it can start power in hw_init()
1460 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); in hw_init()
1462 if (a6xx_gpu->gmu.legacy) { in hw_init()
1464 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); in hw_init()
1470 static int a6xx_hw_init(struct msm_gpu *gpu) in a6xx_hw_init() argument
1472 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_hw_init()
1476 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1477 ret = hw_init(gpu); in a6xx_hw_init()
1478 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_hw_init()
1483 static void a6xx_dump(struct msm_gpu *gpu) in a6xx_dump() argument
1485 DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", in a6xx_dump()
1486 gpu_read(gpu, REG_A6XX_RBBM_STATUS)); in a6xx_dump()
1487 adreno_dump(gpu); in a6xx_dump()
1490 static void a6xx_recover(struct msm_gpu *gpu) in a6xx_recover() argument
1492 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_recover()
1494 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_recover()
1497 adreno_dump_info(gpu); in a6xx_recover()
1500 DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i, in a6xx_recover()
1501 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i))); in a6xx_recover()
1504 a6xx_dump(gpu); in a6xx_recover()
1510 a6xx_gpu->hung = true; in a6xx_recover()
1513 gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); in a6xx_recover()
1515 pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1518 mutex_lock(&gpu->active_lock); in a6xx_recover()
1519 active_submits = gpu->active_submits; in a6xx_recover()
1525 gpu->active_submits = 0; in a6xx_recover()
1531 /* Reset the GPU to a clean state */ in a6xx_recover()
1532 a6xx_gpu_sw_reset(gpu, true); in a6xx_recover()
1533 a6xx_gpu_sw_reset(gpu, false); in a6xx_recover()
1536 reinit_completion(&gmu->pd_gate); in a6xx_recover()
1537 dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb); in a6xx_recover()
1538 dev_pm_genpd_synced_poweroff(gmu->cxpd); in a6xx_recover()
1542 pm_runtime_put(&gpu->pdev->dev); in a6xx_recover()
1545 pm_runtime_put_sync(&gpu->pdev->dev); in a6xx_recover()
1547 if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000))) in a6xx_recover()
1548 DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n"); in a6xx_recover()
1550 dev_pm_genpd_remove_notifier(gmu->cxpd); in a6xx_recover()
1552 pm_runtime_use_autosuspend(&gpu->pdev->dev); in a6xx_recover()
1555 pm_runtime_get(&gpu->pdev->dev); in a6xx_recover()
1557 pm_runtime_get_sync(&gpu->pdev->dev); in a6xx_recover()
1559 gpu->active_submits = active_submits; in a6xx_recover()
1560 mutex_unlock(&gpu->active_lock); in a6xx_recover()
1562 msm_gpu_hw_init(gpu); in a6xx_recover()
1563 a6xx_gpu->hung = false; in a6xx_recover()
1566 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid) in a6xx_uche_fault_block() argument
1577 * The source of the data depends on the mid ID read from FSYNR1. in a6xx_uche_fault_block()
1578 * and the client ID read from the UCHE block in a6xx_uche_fault_block()
1580 val = gpu_read(gpu, REG_A6XX_UCHE_CLIENT_PF); in a6xx_uche_fault_block()
1586 /* For mid=2 the source is TP or VFD except when the client id is 0 */ in a6xx_uche_fault_block()
1594 static const char *a6xx_fault_block(struct msm_gpu *gpu, u32 id) in a6xx_fault_block() argument
1596 if (id == 0) in a6xx_fault_block()
1598 else if (id == 4) in a6xx_fault_block()
1600 else if (id == 6) in a6xx_fault_block()
1603 return a6xx_uche_fault_block(gpu, id); in a6xx_fault_block()
1608 struct msm_gpu *gpu = arg; in a6xx_fault_handler() local
1613 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)), in a6xx_fault_handler()
1614 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)), in a6xx_fault_handler()
1615 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)), in a6xx_fault_handler()
1616 gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)), in a6xx_fault_handler()
1620 block = a6xx_fault_block(gpu, info->fsynr1 & 0xff); in a6xx_fault_handler()
1622 return adreno_fault_handler(gpu, iova, flags, info, block, scratch); in a6xx_fault_handler()
1625 static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu) in a6xx_cp_hw_err_irq() argument
1627 u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS); in a6xx_cp_hw_err_irq()
1632 gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1); in a6xx_cp_hw_err_irq()
1633 val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA); in a6xx_cp_hw_err_irq()
1634 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1640 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1644 dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n", in a6xx_cp_hw_err_irq()
1645 gpu_read(gpu, REG_A6XX_CP_HW_FAULT)); in a6xx_cp_hw_err_irq()
1648 u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS); in a6xx_cp_hw_err_irq()
1650 dev_err_ratelimited(&gpu->pdev->dev, in a6xx_cp_hw_err_irq()
1657 dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n"); in a6xx_cp_hw_err_irq()
1660 dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n"); in a6xx_cp_hw_err_irq()
1663 dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n"); in a6xx_cp_hw_err_irq()
1667 static void a6xx_fault_detect_irq(struct msm_gpu *gpu) in a6xx_fault_detect_irq() argument
1669 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_fault_detect_irq()
1671 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); in a6xx_fault_detect_irq()
1674 * If stalled on SMMU fault, we could trip the GPU's hang detection, in a6xx_fault_detect_irq()
1679 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT) in a6xx_fault_detect_irq()
1683 * Force the GPU to stay on until after we finish in a6xx_fault_detect_irq()
1687 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1); in a6xx_fault_detect_irq()
1689 DRM_DEV_ERROR(&gpu->pdev->dev, in a6xx_fault_detect_irq()
1690 …"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", in a6xx_fault_detect_irq()
1691 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0, in a6xx_fault_detect_irq()
1692 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_fault_detect_irq()
1693 gpu_read(gpu, REG_A6XX_CP_RB_RPTR), in a6xx_fault_detect_irq()
1694 gpu_read(gpu, REG_A6XX_CP_RB_WPTR), in a6xx_fault_detect_irq()
1695 gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), in a6xx_fault_detect_irq()
1696 gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), in a6xx_fault_detect_irq()
1697 gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), in a6xx_fault_detect_irq()
1698 gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE)); in a6xx_fault_detect_irq()
1701 del_timer(&gpu->hangcheck_timer); in a6xx_fault_detect_irq()
1703 kthread_queue_work(gpu->worker, &gpu->recover_work); in a6xx_fault_detect_irq()
1706 static irqreturn_t a6xx_irq(struct msm_gpu *gpu) in a6xx_irq() argument
1708 struct msm_drm_private *priv = gpu->dev->dev_private; in a6xx_irq()
1709 u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS); in a6xx_irq()
1711 gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status); in a6xx_irq()
1713 if (priv->disable_err_irq) in a6xx_irq()
1717 a6xx_fault_detect_irq(gpu); in a6xx_irq()
1720 dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n"); in a6xx_irq()
1723 a6xx_cp_hw_err_irq(gpu); in a6xx_irq()
1726 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n"); in a6xx_irq()
1729 dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n"); in a6xx_irq()
1732 dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n"); in a6xx_irq()
1735 msm_gpu_retire(gpu); in a6xx_irq()
1742 llcc_slice_deactivate(a6xx_gpu->llc_slice); in a6xx_llc_deactivate()
1743 llcc_slice_deactivate(a6xx_gpu->htw_llc_slice); in a6xx_llc_deactivate()
1748 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; in a6xx_llc_activate()
1749 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_llc_activate() local
1752 if (IS_ERR(a6xx_gpu->llc_mmio)) in a6xx_llc_activate()
1755 if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { in a6xx_llc_activate()
1756 u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); in a6xx_llc_activate()
1766 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | in a6xx_llc_activate()
1774 if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) { in a6xx_llc_activate()
1775 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
1776 u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice); in a6xx_llc_activate()
1787 * Program the slice IDs for the various GPU blocks and GPU MMU in a6xx_llc_activate()
1790 if (!a6xx_gpu->have_mmu500) { in a6xx_llc_activate()
1803 gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); in a6xx_llc_activate()
1808 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ in a6xx_llc_slices_destroy()
1809 if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) in a6xx_llc_slices_destroy()
1812 llcc_slice_putd(a6xx_gpu->llc_slice); in a6xx_llc_slices_destroy()
1813 llcc_slice_putd(a6xx_gpu->htw_llc_slice); in a6xx_llc_slices_destroy()
1821 /* No LLCC on non-RPMh (and by extension, non-GMU) SoCs */ in a6xx_llc_slices_init()
1822 if (adreno_has_gmu_wrapper(&a6xx_gpu->base)) in a6xx_llc_slices_init()
1829 phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0); in a6xx_llc_slices_init()
1830 a6xx_gpu->have_mmu500 = (phandle && in a6xx_llc_slices_init()
1831 of_device_is_compatible(phandle, "arm,mmu-500")); in a6xx_llc_slices_init()
1834 if (a6xx_gpu->have_mmu500) in a6xx_llc_slices_init()
1835 a6xx_gpu->llc_mmio = NULL; in a6xx_llc_slices_init()
1837 a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem"); in a6xx_llc_slices_init()
1839 a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); in a6xx_llc_slices_init()
1840 a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); in a6xx_llc_slices_init()
1842 if (IS_ERR_OR_NULL(a6xx_gpu->llc_slice) && IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice)) in a6xx_llc_slices_init()
1843 a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL); in a6xx_llc_slices_init()
1854 struct msm_gpu *gpu = &adreno_gpu->base; in a6xx_bus_clear_pending_transactions() local
1857 gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, GPR0_GBIF_HALT_REQUEST); in a6xx_bus_clear_pending_transactions()
1858 spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) & in a6xx_bus_clear_pending_transactions()
1861 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, VBIF_XIN_HALT_CTRL0_MASK); in a6xx_bus_clear_pending_transactions()
1862 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & in a6xx_bus_clear_pending_transactions()
1864 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); in a6xx_bus_clear_pending_transactions()
1871 gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); in a6xx_bus_clear_pending_transactions()
1872 spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); in a6xx_bus_clear_pending_transactions()
1876 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); in a6xx_bus_clear_pending_transactions()
1877 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & in a6xx_bus_clear_pending_transactions()
1881 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); in a6xx_bus_clear_pending_transactions()
1882 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & in a6xx_bus_clear_pending_transactions()
1886 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); in a6xx_bus_clear_pending_transactions()
1889 void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert) in a6xx_gpu_sw_reset() argument
1892 if (adreno_is_a610(to_adreno_gpu(gpu))) in a6xx_gpu_sw_reset()
1895 gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert); in a6xx_gpu_sw_reset()
1897 gpu_read(gpu, REG_A6XX_RBBM_SW_RESET_CMD); in a6xx_gpu_sw_reset()
1905 static int a6xx_gmu_pm_resume(struct msm_gpu *gpu) in a6xx_gmu_pm_resume() argument
1907 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_pm_resume()
1911 gpu->needs_hw_init = true; in a6xx_gmu_pm_resume()
1915 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_resume()
1917 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_resume()
1921 msm_devfreq_resume(gpu); in a6xx_gmu_pm_resume()
1928 static int a6xx_pm_resume(struct msm_gpu *gpu) in a6xx_pm_resume() argument
1930 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_resume()
1932 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_pm_resume()
1933 unsigned long freq = gpu->fast_rate; in a6xx_pm_resume()
1937 gpu->needs_hw_init = true; in a6xx_pm_resume()
1941 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
1943 opp = dev_pm_opp_find_freq_ceil(&gpu->pdev->dev, &freq); in a6xx_pm_resume()
1951 dev_pm_opp_set_opp(&gpu->pdev->dev, opp); in a6xx_pm_resume()
1953 pm_runtime_resume_and_get(gmu->dev); in a6xx_pm_resume()
1954 pm_runtime_resume_and_get(gmu->gxpd); in a6xx_pm_resume()
1956 ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_resume()
1963 /* If anything goes south, tear the GPU down piece by piece.. */ in a6xx_pm_resume()
1966 pm_runtime_put(gmu->gxpd); in a6xx_pm_resume()
1967 pm_runtime_put(gmu->dev); in a6xx_pm_resume()
1968 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_resume()
1971 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_resume()
1974 msm_devfreq_resume(gpu); in a6xx_pm_resume()
1979 static int a6xx_gmu_pm_suspend(struct msm_gpu *gpu) in a6xx_gmu_pm_suspend() argument
1981 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_pm_suspend()
1989 msm_devfreq_suspend(gpu); in a6xx_gmu_pm_suspend()
1991 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_suspend()
1993 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_pm_suspend()
1997 if (a6xx_gpu->shadow_bo) in a6xx_gmu_pm_suspend()
1998 for (i = 0; i < gpu->nr_rings; i++) in a6xx_gmu_pm_suspend()
1999 a6xx_gpu->shadow[i] = 0; in a6xx_gmu_pm_suspend()
2001 gpu->suspend_count++; in a6xx_gmu_pm_suspend()
2006 static int a6xx_pm_suspend(struct msm_gpu *gpu) in a6xx_pm_suspend() argument
2008 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_pm_suspend()
2010 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; in a6xx_pm_suspend()
2015 msm_devfreq_suspend(gpu); in a6xx_pm_suspend()
2017 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
2025 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); in a6xx_pm_suspend()
2027 pm_runtime_put_sync(gmu->gxpd); in a6xx_pm_suspend()
2028 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL); in a6xx_pm_suspend()
2029 pm_runtime_put_sync(gmu->dev); in a6xx_pm_suspend()
2031 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_pm_suspend()
2033 if (a6xx_gpu->shadow_bo) in a6xx_pm_suspend()
2034 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
2035 a6xx_gpu->shadow[i] = 0; in a6xx_pm_suspend()
2037 gpu->suspend_count++; in a6xx_pm_suspend()
2042 static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a6xx_gmu_get_timestamp() argument
2044 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gmu_get_timestamp()
2047 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gmu_get_timestamp()
2049 /* Force the GPU power on so we can read this register */ in a6xx_gmu_get_timestamp()
2050 a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_gmu_get_timestamp()
2052 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); in a6xx_gmu_get_timestamp()
2054 a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); in a6xx_gmu_get_timestamp()
2056 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gmu_get_timestamp()
2061 static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) in a6xx_get_timestamp() argument
2063 *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER); in a6xx_get_timestamp()
2067 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu) in a6xx_active_ring() argument
2069 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_active_ring()
2072 return a6xx_gpu->cur_ring; in a6xx_active_ring()
2075 static void a6xx_destroy(struct msm_gpu *gpu) in a6xx_destroy() argument
2077 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_destroy()
2080 if (a6xx_gpu->sqe_bo) { in a6xx_destroy()
2081 msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); in a6xx_destroy()
2082 drm_gem_object_put(a6xx_gpu->sqe_bo); in a6xx_destroy()
2085 if (a6xx_gpu->shadow_bo) { in a6xx_destroy()
2086 msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace); in a6xx_destroy()
2087 drm_gem_object_put(a6xx_gpu->shadow_bo); in a6xx_destroy()
2099 static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) in a6xx_gpu_busy() argument
2101 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_busy()
2108 busy_cycles = gmu_read64(&a6xx_gpu->gmu, in a6xx_gpu_busy()
2115 static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, in a6xx_gpu_set_freq() argument
2118 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_gpu_set_freq()
2121 mutex_lock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
2122 a6xx_gmu_set_freq(gpu, opp, suspended); in a6xx_gpu_set_freq()
2123 mutex_unlock(&a6xx_gpu->gmu.lock); in a6xx_gpu_set_freq()
2127 a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) in a6xx_create_address_space() argument
2129 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_create_address_space()
2134 * This allows GPU to set the bus attributes required to use system in a6xx_create_address_space()
2137 if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice) && in a6xx_create_address_space()
2138 !device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) in a6xx_create_address_space()
2141 return adreno_iommu_create_address_space(gpu, pdev, quirks); in a6xx_create_address_space()
2145 a6xx_create_private_address_space(struct msm_gpu *gpu) in a6xx_create_private_address_space() argument
2149 mmu = msm_iommu_pagetable_create(gpu->aspace->mmu); in a6xx_create_private_address_space()
2155 "gpu", 0x100000000ULL, in a6xx_create_private_address_space()
2156 adreno_private_address_space_size(gpu)); in a6xx_create_private_address_space()
2159 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_get_rptr() argument
2161 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_get_rptr()
2164 if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) in a6xx_get_rptr()
2165 return a6xx_gpu->shadow[ring->id]; in a6xx_get_rptr()
2167 return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR); in a6xx_get_rptr()
2170 static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_progress() argument
2173 .ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE), in a6xx_progress()
2174 .ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE), in a6xx_progress()
2175 .ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE), in a6xx_progress()
2176 .ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE), in a6xx_progress()
2193 cp_state.ib1_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB1) >> 16; in a6xx_progress()
2194 cp_state.ib2_rem += gpu_read(gpu, REG_A6XX_CP_ROQ_AVAIL_IB2) >> 16; in a6xx_progress()
2196 progress = !!memcmp(&cp_state, &ring->last_cp_state, sizeof(cp_state)); in a6xx_progress()
2198 ring->last_cp_state = cp_state; in a6xx_progress()
2205 if (!info->speedbins) in fuse_to_supp_hw()
2208 for (int i = 0; info->speedbins[i].fuse != SHRT_MAX; i++) in fuse_to_supp_hw()
2209 if (info->speedbins[i].fuse == fuse) in fuse_to_supp_hw()
2210 return BIT(info->speedbins[i].speedbin); in fuse_to_supp_hw()
2223 * -ENOENT means that the platform doesn't support speedbin which is in a6xx_set_supported_hw()
2226 if (ret == -ENOENT) { in a6xx_set_supported_hw()
2230 "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); in a6xx_set_supported_hw()
2238 "missing support for speed-bin: %u. Some OPPs may not be supported by hardware\n", in a6xx_set_supported_hw()
2312 struct msm_drm_private *priv = dev->dev_private; in a6xx_gpu_init()
2313 struct platform_device *pdev = priv->gpu_pdev; in a6xx_gpu_init()
2314 struct adreno_platform_config *config = pdev->dev.platform_data; in a6xx_gpu_init()
2318 struct msm_gpu *gpu; in a6xx_gpu_init() local
2323 return ERR_PTR(-ENOMEM); in a6xx_gpu_init()
2325 adreno_gpu = &a6xx_gpu->base; in a6xx_gpu_init()
2326 gpu = &adreno_gpu->base; in a6xx_gpu_init()
2328 mutex_init(&a6xx_gpu->gmu.lock); in a6xx_gpu_init()
2330 adreno_gpu->registers = NULL; in a6xx_gpu_init()
2333 node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); in a6xx_gpu_init()
2337 adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper"); in a6xx_gpu_init()
2339 adreno_gpu->base.hw_apriv = in a6xx_gpu_init()
2340 !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV); in a6xx_gpu_init()
2344 ret = a6xx_set_supported_hw(&pdev->dev, config->info); in a6xx_gpu_init()
2356 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2365 priv->gpu_clamp_to_idle = true; in a6xx_gpu_init()
2373 a6xx_destroy(&(a6xx_gpu->base.base)); in a6xx_gpu_init()
2377 if (gpu->aspace) in a6xx_gpu_init()
2378 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, in a6xx_gpu_init()
2381 return gpu; in a6xx_gpu_init()