Lines Matching refs:gpu

30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,  in zap_shader_load_mdt()  argument
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
90 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt()
140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
176 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument
178 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load()
179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
195 adreno_create_address_space(struct msm_gpu *gpu, in adreno_create_address_space() argument
198 return adreno_iommu_create_address_space(gpu, pdev, 0); in adreno_create_address_space()
202 adreno_iommu_create_address_space(struct msm_gpu *gpu, in adreno_iommu_create_address_space() argument
211 mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); in adreno_iommu_create_address_space()
236 u64 adreno_private_address_space_size(struct msm_gpu *gpu) in adreno_private_address_space_size() argument
238 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_private_address_space_size()
253 int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, in adreno_fault_handler() argument
258 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in adreno_fault_handler()
265 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in adreno_fault_handler()
295 del_timer(&gpu->hangcheck_timer); in adreno_fault_handler()
297 gpu->fault_info.ttbr0 = info->ttbr0; in adreno_fault_handler()
298 gpu->fault_info.iova = iova; in adreno_fault_handler()
299 gpu->fault_info.flags = flags; in adreno_fault_handler()
300 gpu->fault_info.type = type; in adreno_fault_handler()
301 gpu->fault_info.block = block; in adreno_fault_handler()
303 kthread_queue_work(gpu->worker, &gpu->fault_work); in adreno_fault_handler()
309 int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, in adreno_get_param() argument
312 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_get_param()
340 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
341 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
342 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
348 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param()
355 *value = gpu->global_faults + ctx->aspace->faults; in adreno_get_param()
357 *value = gpu->global_faults; in adreno_get_param()
360 *value = gpu->suspend_count; in adreno_get_param()
363 if (ctx->aspace == gpu->aspace) in adreno_get_param()
368 if (ctx->aspace == gpu->aspace) in adreno_get_param()
373 DBG("%s: invalid param: %u", gpu->name, param); in adreno_get_param()
378 int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, in adreno_set_param() argument
404 mutex_lock(&gpu->lock); in adreno_set_param()
415 mutex_unlock(&gpu->lock); in adreno_set_param()
422 return msm_file_private_set_sysprof(ctx, gpu, value); in adreno_set_param()
424 DBG("%s: invalid param: %u", gpu->name, param); in adreno_set_param()
538 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, in adreno_fw_create_bo() argument
544 ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
545 MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
557 int adreno_hw_init(struct msm_gpu *gpu) in adreno_hw_init() argument
559 VERB("%s", gpu->name); in adreno_hw_init()
561 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
562 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
587 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr() local
589 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
592 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) in adreno_active_ring() argument
594 return gpu->rb[0]; in adreno_active_ring()
597 void adreno_recover(struct msm_gpu *gpu) in adreno_recover() argument
599 struct drm_device *dev = gpu->dev; in adreno_recover()
605 gpu->funcs->pm_suspend(gpu); in adreno_recover()
606 gpu->funcs->pm_resume(gpu); in adreno_recover()
608 ret = msm_gpu_hw_init(gpu); in adreno_recover()
615 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg) in adreno_flush() argument
632 gpu_write(gpu, reg, wptr); in adreno_flush()
635 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in adreno_idle() argument
637 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_idle()
646 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
651 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) in adreno_gpu_state_get() argument
653 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_gpu_state_get()
656 WARN_ON(!mutex_is_locked(&gpu->lock)); in adreno_gpu_state_get()
662 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
665 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
666 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
667 state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; in adreno_gpu_state_get()
668 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
669 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
676 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
682 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); in adreno_gpu_state_get()
708 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
830 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, in adreno_show() argument
833 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_show()
862 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
907 void adreno_dump_info(struct msm_gpu *gpu) in adreno_dump_info() argument
909 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump_info()
916 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
917 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
929 void adreno_dump(struct msm_gpu *gpu) in adreno_dump() argument
931 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump()
938 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
945 uint32_t val = gpu_read(gpu, addr); in adreno_dump()
953 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
964 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
970 struct msm_gpu *gpu) in adreno_get_pwrlevels() argument
972 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_get_pwrlevels()
977 gpu->fast_rate = 0; in adreno_get_pwrlevels()
1003 gpu->fast_rate = freq; in adreno_get_pwrlevels()
1006 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
1064 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init() local
1073 gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; in adreno_gpu_init()
1106 ret = adreno_get_pwrlevels(dev, gpu); in adreno_gpu_init()
1120 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup() local
1121 struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; in adreno_gpu_cleanup()