Lines Matching +full:zap +full:- +full:shader
1 // SPDX-License-Identifier: GPL-2.0-only
17 #include <linux/nvmem-consumer.h>
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
45 return -EINVAL; in zap_shader_load_mdt()
48 np = of_get_child_by_name(dev->of_node, "zap-shader"); in zap_shader_load_mdt()
51 return -ENODEV; in zap_shader_load_mdt()
54 mem_np = of_parse_phandle(np, "memory-region", 0); in zap_shader_load_mdt()
58 return -EINVAL; in zap_shader_load_mdt()
69 * Check for a firmware-name property. This is the new scheme in zap_shader_load_mdt()
71 * keys, allowing us to have a different zap fw path for different in zap_shader_load_mdt()
74 * If the firmware-name property is found, we bypass the in zap_shader_load_mdt()
78 * If the firmware-name property is not found, for backwards in zap_shader_load_mdt()
82 of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); in zap_shader_load_mdt()
85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
93 * For new targets, we require the firmware-name property, in zap_shader_load_mdt()
94 * if a zap-shader is required, rather than falling back in zap_shader_load_mdt()
102 return -ENOENT; in zap_shader_load_mdt()
120 ret = -E2BIG; in zap_shader_load_mdt()
127 ret = -ENOMEM; in zap_shader_load_mdt()
135 * with upstream linux-firmware it would be in a qcom/ subdir.. in zap_shader_load_mdt()
140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
159 * If the scm call returns -EOPNOTSUPP we assume that this target in zap_shader_load_mdt()
160 * doesn't need/support the zap shader so quietly fail in zap_shader_load_mdt()
162 if (ret == -EOPNOTSUPP) in zap_shader_load_mdt()
179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
181 /* Short cut if we determine the zap shader isn't available/needed */ in adreno_zap_shader_load()
183 return -ENODEV; in adreno_zap_shader_load()
187 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); in adreno_zap_shader_load()
188 return -EPROBE_DEFER; in adreno_zap_shader_load()
191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
211 mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); in adreno_iommu_create_address_space()
224 start = max_t(u64, SZ_16M, geometry->aperture_start); in adreno_iommu_create_address_space()
225 size = geometry->aperture_end - start + 1; in adreno_iommu_create_address_space()
231 mmu->funcs->destroy(mmu); in adreno_iommu_create_address_space()
243 if (adreno_gpu->info->address_space_size) in adreno_private_address_space_size()
244 return adreno_gpu->info->address_space_size; in adreno_private_address_space_size()
258 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in adreno_fault_handler()
265 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in adreno_fault_handler()
270 * adreno-smmu-priv in adreno_fault_handler()
280 if (info->fsr & ARM_SMMU_FSR_TF) in adreno_fault_handler()
282 else if (info->fsr & ARM_SMMU_FSR_PF) in adreno_fault_handler()
284 else if (info->fsr & ARM_SMMU_FSR_EF) in adreno_fault_handler()
288 info->ttbr0, iova, in adreno_fault_handler()
295 del_timer(&gpu->hangcheck_timer); in adreno_fault_handler()
297 gpu->fault_info.ttbr0 = info->ttbr0; in adreno_fault_handler()
298 gpu->fault_info.iova = iova; in adreno_fault_handler()
299 gpu->fault_info.flags = flags; in adreno_fault_handler()
300 gpu->fault_info.type = type; in adreno_fault_handler()
301 gpu->fault_info.block = block; in adreno_fault_handler()
303 kthread_queue_work(gpu->worker, &gpu->fault_work); in adreno_fault_handler()
316 return -EINVAL; in adreno_get_param()
320 *value = adreno_gpu->info->revn; in adreno_get_param()
323 *value = adreno_gpu->info->gmem; in adreno_get_param()
329 *value = adreno_gpu->chip_id; in adreno_get_param()
330 if (!adreno_gpu->info->revn) in adreno_get_param()
331 *value |= ((uint64_t) adreno_gpu->speedbin) << 32; in adreno_get_param()
334 *value = adreno_gpu->base.fast_rate; in adreno_get_param()
337 if (adreno_gpu->funcs->get_timestamp) { in adreno_get_param()
340 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
341 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
342 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
346 return -EINVAL; in adreno_get_param()
348 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param()
354 if (ctx->aspace) in adreno_get_param()
355 *value = gpu->global_faults + ctx->aspace->faults; in adreno_get_param()
357 *value = gpu->global_faults; in adreno_get_param()
360 *value = gpu->suspend_count; in adreno_get_param()
363 if (ctx->aspace == gpu->aspace) in adreno_get_param()
364 return -EINVAL; in adreno_get_param()
365 *value = ctx->aspace->va_start; in adreno_get_param()
368 if (ctx->aspace == gpu->aspace) in adreno_get_param()
369 return -EINVAL; in adreno_get_param()
370 *value = ctx->aspace->va_size; in adreno_get_param()
373 DBG("%s: invalid param: %u", gpu->name, param); in adreno_get_param()
374 return -EINVAL; in adreno_get_param()
388 return -EINVAL; in adreno_set_param()
392 return -EINVAL; in adreno_set_param()
404 mutex_lock(&gpu->lock); in adreno_set_param()
407 paramp = &ctx->comm; in adreno_set_param()
409 paramp = &ctx->cmdline; in adreno_set_param()
415 mutex_unlock(&gpu->lock); in adreno_set_param()
421 return -EPERM; in adreno_set_param()
424 DBG("%s: invalid param: %u", gpu->name, param); in adreno_set_param()
425 return -EINVAL; in adreno_set_param()
432 struct drm_device *drm = adreno_gpu->base.dev; in adreno_request_fw()
439 return ERR_PTR(-ENOMEM); in adreno_request_fw()
445 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
446 (adreno_gpu->fwloc == FW_LOCATION_NEW)) { in adreno_request_fw()
448 ret = request_firmware_direct(&fw, newname, drm->dev); in adreno_request_fw()
450 DRM_DEV_INFO(drm->dev, "loaded %s from new location\n", in adreno_request_fw()
452 adreno_gpu->fwloc = FW_LOCATION_NEW; in adreno_request_fw()
454 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
455 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
465 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
466 (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) { in adreno_request_fw()
468 ret = request_firmware_direct(&fw, fwname, drm->dev); in adreno_request_fw()
470 DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", in adreno_request_fw()
472 adreno_gpu->fwloc = FW_LOCATION_LEGACY; in adreno_request_fw()
474 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
475 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
486 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
487 (adreno_gpu->fwloc == FW_LOCATION_HELPER)) { in adreno_request_fw()
489 ret = request_firmware(&fw, newname, drm->dev); in adreno_request_fw()
491 DRM_DEV_INFO(drm->dev, "loaded %s with helper\n", in adreno_request_fw()
493 adreno_gpu->fwloc = FW_LOCATION_HELPER; in adreno_request_fw()
495 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
496 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
503 DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname); in adreno_request_fw()
504 fw = ERR_PTR(-ENOENT); in adreno_request_fw()
514 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { in adreno_load_fw()
517 if (!adreno_gpu->info->fw[i]) in adreno_load_fw()
525 if (adreno_gpu->fw[i]) in adreno_load_fw()
528 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); in adreno_load_fw()
532 adreno_gpu->fw[i] = fw; in adreno_load_fw()
544 ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
545 MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
550 memcpy(ptr, &fw->data[4], fw->size - 4); in adreno_fw_create_bo()
559 VERB("%s", gpu->name); in adreno_hw_init()
561 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
562 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
567 ring->cur = ring->start; in adreno_hw_init()
568 ring->next = ring->start; in adreno_hw_init()
569 ring->memptrs->rptr = 0; in adreno_hw_init()
575 if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { in adreno_hw_init()
576 ring->memptrs->fence = ring->fctx->last_fence; in adreno_hw_init()
587 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr()
589 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
594 return gpu->rb[0]; in adreno_active_ring()
599 struct drm_device *dev = gpu->dev; in adreno_recover()
602 // XXX pm-runtime?? we *need* the device to be off after this in adreno_recover()
603 // so maybe continuing to call ->pm_suspend/resume() is better? in adreno_recover()
605 gpu->funcs->pm_suspend(gpu); in adreno_recover()
606 gpu->funcs->pm_resume(gpu); in adreno_recover()
610 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); in adreno_recover()
620 ring->cur = ring->next; in adreno_flush()
625 * the ringbuffer and rb->next hasn't wrapped to zero yet in adreno_flush()
646 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
656 WARN_ON(!mutex_is_locked(&gpu->lock)); in adreno_gpu_state_get()
658 kref_init(&state->ref); in adreno_gpu_state_get()
660 ktime_get_real_ts64(&state->time); in adreno_gpu_state_get()
662 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
665 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
666 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
667 state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; in adreno_gpu_state_get()
668 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
669 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
672 size = state->ring[i].wptr; in adreno_gpu_state_get()
675 for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++) in adreno_gpu_state_get()
676 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
680 state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL); in adreno_gpu_state_get()
681 if (state->ring[i].data) { in adreno_gpu_state_get()
682 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); in adreno_gpu_state_get()
683 state->ring[i].data_size = size << 2; in adreno_gpu_state_get()
689 if (!adreno_gpu->registers) in adreno_gpu_state_get()
693 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) in adreno_gpu_state_get()
694 count += adreno_gpu->registers[i + 1] - in adreno_gpu_state_get()
695 adreno_gpu->registers[i] + 1; in adreno_gpu_state_get()
697 state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL); in adreno_gpu_state_get()
698 if (state->registers) { in adreno_gpu_state_get()
701 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_gpu_state_get()
702 u32 start = adreno_gpu->registers[i]; in adreno_gpu_state_get()
703 u32 end = adreno_gpu->registers[i + 1]; in adreno_gpu_state_get()
707 state->registers[pos++] = addr; in adreno_gpu_state_get()
708 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
712 state->nr_registers = count; in adreno_gpu_state_get()
722 for (i = 0; i < ARRAY_SIZE(state->ring); i++) in adreno_gpu_state_destroy()
723 kvfree(state->ring[i].data); in adreno_gpu_state_destroy()
725 for (i = 0; state->bos && i < state->nr_bos; i++) in adreno_gpu_state_destroy()
726 kvfree(state->bos[i].data); in adreno_gpu_state_destroy()
728 kfree(state->bos); in adreno_gpu_state_destroy()
729 kfree(state->comm); in adreno_gpu_state_destroy()
730 kfree(state->cmd); in adreno_gpu_state_destroy()
731 kfree(state->registers); in adreno_gpu_state_destroy()
748 return kref_put(&state->ref, adreno_gpu_state_kref_destroy); in adreno_gpu_state_put()
777 buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", in adreno_gpu_ascii85_encode()
800 * Only dump the non-zero part of the buffer - rarely will in adreno_show_object()
840 adreno_gpu->info->revn, in adreno_show()
841 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_show()
847 if (state->fault_info.ttbr0) { in adreno_show()
848 const struct msm_gpu_fault_info *info = &state->fault_info; in adreno_show()
850 drm_puts(p, "fault-info:\n"); in adreno_show()
851 drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0); in adreno_show()
852 drm_printf(p, " - iova=%.16lx\n", info->iova); in adreno_show()
853 drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); in adreno_show()
854 drm_printf(p, " - type=%s\n", info->type); in adreno_show()
855 drm_printf(p, " - source=%s\n", info->block); in adreno_show()
858 drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); in adreno_show()
862 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
863 drm_printf(p, " - id: %d\n", i); in adreno_show()
864 drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova); in adreno_show()
865 drm_printf(p, " last-fence: %u\n", state->ring[i].seqno); in adreno_show()
866 drm_printf(p, " retired-fence: %u\n", state->ring[i].fence); in adreno_show()
867 drm_printf(p, " rptr: %u\n", state->ring[i].rptr); in adreno_show()
868 drm_printf(p, " wptr: %u\n", state->ring[i].wptr); in adreno_show()
871 adreno_show_object(p, &state->ring[i].data, in adreno_show()
872 state->ring[i].data_size, &state->ring[i].encoded); in adreno_show()
875 if (state->bos) { in adreno_show()
878 for (i = 0; i < state->nr_bos; i++) { in adreno_show()
879 drm_printf(p, " - iova: 0x%016llx\n", in adreno_show()
880 state->bos[i].iova); in adreno_show()
881 drm_printf(p, " size: %zd\n", state->bos[i].size); in adreno_show()
882 drm_printf(p, " name: %-32s\n", state->bos[i].name); in adreno_show()
884 adreno_show_object(p, &state->bos[i].data, in adreno_show()
885 state->bos[i].size, &state->bos[i].encoded); in adreno_show()
889 if (state->nr_registers) { in adreno_show()
892 for (i = 0; i < state->nr_registers; i++) { in adreno_show()
893 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", in adreno_show()
894 state->registers[i * 2] << 2, in adreno_show()
895 state->registers[(i * 2) + 1]); in adreno_show()
913 adreno_gpu->info->revn, in adreno_dump_info()
914 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_dump_info()
916 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
917 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
920 ring->memptrs->fence, in adreno_dump_info()
921 ring->fctx->last_fence); in adreno_dump_info()
934 if (!adreno_gpu->registers) in adreno_dump()
938 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
939 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_dump()
940 uint32_t start = adreno_gpu->registers[i]; in adreno_dump()
941 uint32_t end = adreno_gpu->registers[i+1]; in adreno_dump()
953 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
955 /* Use ring->next to calculate free size */ in ring_freewords()
956 uint32_t wptr = ring->next - ring->start; in ring_freewords()
958 return (rptr + (size - 1) - wptr) % size; in ring_freewords()
964 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
966 ring->id); in adreno_wait_ring()
977 gpu->fast_rate = 0; in adreno_get_pwrlevels()
981 if (ret == -ENODEV) { in adreno_get_pwrlevels()
991 return -ENODEV; in adreno_get_pwrlevels()
1003 gpu->fast_rate = freq; in adreno_get_pwrlevels()
1006 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
1019 if (PTR_ERR(ocmem) == -ENODEV) { in adreno_gpu_ocmem_init()
1031 ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem); in adreno_gpu_ocmem_init()
1035 adreno_ocmem->ocmem = ocmem; in adreno_gpu_ocmem_init()
1036 adreno_ocmem->base = ocmem_hdl->addr; in adreno_gpu_ocmem_init()
1037 adreno_ocmem->hdl = ocmem_hdl; in adreno_gpu_ocmem_init()
1039 if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem)) in adreno_gpu_ocmem_init()
1040 return -ENOMEM; in adreno_gpu_ocmem_init()
1047 if (adreno_ocmem && adreno_ocmem->base) in adreno_gpu_ocmem_cleanup()
1048 ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, in adreno_gpu_ocmem_cleanup()
1049 adreno_ocmem->hdl); in adreno_gpu_ocmem_cleanup()
1061 struct device *dev = &pdev->dev; in adreno_gpu_init()
1062 struct adreno_platform_config *config = dev->platform_data; in adreno_gpu_init()
1064 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init()
1069 adreno_gpu->funcs = funcs; in adreno_gpu_init()
1070 adreno_gpu->info = config->info; in adreno_gpu_init()
1071 adreno_gpu->chip_id = config->chip_id; in adreno_gpu_init()
1073 gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; in adreno_gpu_init()
1074 gpu->pdev = pdev; in adreno_gpu_init()
1078 adreno_gpu->info->family < ADRENO_6XX_GEN1) { in adreno_gpu_init()
1096 adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); in adreno_gpu_init()
1099 ADRENO_CHIPID_ARGS(config->chip_id)); in adreno_gpu_init()
1101 return -ENOMEM; in adreno_gpu_init()
1112 adreno_gpu->info->inactive_period); in adreno_gpu_init()
1115 return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, in adreno_gpu_init()
1121 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup()
1122 struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; in adreno_gpu_cleanup()
1125 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) in adreno_gpu_cleanup()
1126 release_firmware(adreno_gpu->fw[i]); in adreno_gpu_cleanup()
1128 if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev)) in adreno_gpu_cleanup()
1129 pm_runtime_disable(&priv->gpu_pdev->dev); in adreno_gpu_cleanup()
1131 msm_gpu_cleanup(&adreno_gpu->base); in adreno_gpu_cleanup()