Lines Matching refs:pdd

71 	struct kfd_process_device *pdd;  in kfd_lock_pdd_by_id()  local
74 pdd = kfd_process_device_data_by_id(p, gpu_id); in kfd_lock_pdd_by_id()
76 if (pdd) in kfd_lock_pdd_by_id()
77 return pdd; in kfd_lock_pdd_by_id()
83 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd) in kfd_unlock_pdd() argument
85 mutex_unlock(&pdd->process->mutex); in kfd_unlock_pdd()
305 struct kfd_process_device *pdd; in kfd_ioctl_create_queue() local
322 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_create_queue()
323 if (!pdd) { in kfd_ioctl_create_queue()
328 dev = pdd->dev; in kfd_ioctl_create_queue()
330 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_create_queue()
331 if (IS_ERR(pdd)) { in kfd_ioctl_create_queue()
336 if (!pdd->qpd.proc_doorbells) { in kfd_ioctl_create_queue()
337 err = kfd_alloc_process_doorbells(dev->kfd, pdd); in kfd_ioctl_create_queue()
353 wptr_vm = drm_priv_to_vm(pdd->drm_priv); in kfd_ioctl_create_queue()
576 struct kfd_process_device *pdd; in kfd_ioctl_set_memory_policy() local
590 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_memory_policy()
591 if (!pdd) { in kfd_ioctl_set_memory_policy()
597 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_memory_policy()
598 if (IS_ERR(pdd)) { in kfd_ioctl_set_memory_policy()
610 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy()
611 &pdd->qpd, in kfd_ioctl_set_memory_policy()
630 struct kfd_process_device *pdd; in kfd_ioctl_set_trap_handler() local
634 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_trap_handler()
635 if (!pdd) { in kfd_ioctl_set_trap_handler()
640 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_trap_handler()
641 if (IS_ERR(pdd)) { in kfd_ioctl_set_trap_handler()
646 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); in kfd_ioctl_set_trap_handler()
684 struct kfd_process_device *pdd; in kfd_ioctl_get_clock_counters() local
687 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_clock_counters()
689 if (pdd) in kfd_ioctl_get_clock_counters()
691 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev); in kfd_ioctl_get_clock_counters()
721 struct kfd_process_device *pdd = p->pdds[i]; in kfd_ioctl_get_process_apertures() local
725 pAperture->gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures()
726 pAperture->lds_base = pdd->lds_base; in kfd_ioctl_get_process_apertures()
727 pAperture->lds_limit = pdd->lds_limit; in kfd_ioctl_get_process_apertures()
728 pAperture->gpuvm_base = pdd->gpuvm_base; in kfd_ioctl_get_process_apertures()
729 pAperture->gpuvm_limit = pdd->gpuvm_limit; in kfd_ioctl_get_process_apertures()
730 pAperture->scratch_base = pdd->scratch_base; in kfd_ioctl_get_process_apertures()
731 pAperture->scratch_limit = pdd->scratch_limit; in kfd_ioctl_get_process_apertures()
736 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures()
738 "lds_base %llX\n", pdd->lds_base); in kfd_ioctl_get_process_apertures()
740 "lds_limit %llX\n", pdd->lds_limit); in kfd_ioctl_get_process_apertures()
742 "gpuvm_base %llX\n", pdd->gpuvm_base); in kfd_ioctl_get_process_apertures()
744 "gpuvm_limit %llX\n", pdd->gpuvm_limit); in kfd_ioctl_get_process_apertures()
746 "scratch_base %llX\n", pdd->scratch_base); in kfd_ioctl_get_process_apertures()
748 "scratch_limit %llX\n", pdd->scratch_limit); in kfd_ioctl_get_process_apertures()
796 struct kfd_process_device *pdd = p->pdds[i]; in kfd_ioctl_get_process_apertures_new() local
798 pa[i].gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures_new()
799 pa[i].lds_base = pdd->lds_base; in kfd_ioctl_get_process_apertures_new()
800 pa[i].lds_limit = pdd->lds_limit; in kfd_ioctl_get_process_apertures_new()
801 pa[i].gpuvm_base = pdd->gpuvm_base; in kfd_ioctl_get_process_apertures_new()
802 pa[i].gpuvm_limit = pdd->gpuvm_limit; in kfd_ioctl_get_process_apertures_new()
803 pa[i].scratch_base = pdd->scratch_base; in kfd_ioctl_get_process_apertures_new()
804 pa[i].scratch_limit = pdd->scratch_limit; in kfd_ioctl_get_process_apertures_new()
807 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures_new()
809 "lds_base %llX\n", pdd->lds_base); in kfd_ioctl_get_process_apertures_new()
811 "lds_limit %llX\n", pdd->lds_limit); in kfd_ioctl_get_process_apertures_new()
813 "gpuvm_base %llX\n", pdd->gpuvm_base); in kfd_ioctl_get_process_apertures_new()
815 "gpuvm_limit %llX\n", pdd->gpuvm_limit); in kfd_ioctl_get_process_apertures_new()
817 "scratch_base %llX\n", pdd->scratch_base); in kfd_ioctl_get_process_apertures_new()
819 "scratch_limit %llX\n", pdd->scratch_limit); in kfd_ioctl_get_process_apertures_new()
902 struct kfd_process_device *pdd; in kfd_ioctl_set_scratch_backing_va() local
907 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_set_scratch_backing_va()
908 if (!pdd) { in kfd_ioctl_set_scratch_backing_va()
912 dev = pdd->dev; in kfd_ioctl_set_scratch_backing_va()
914 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_set_scratch_backing_va()
915 if (IS_ERR(pdd)) { in kfd_ioctl_set_scratch_backing_va()
916 err = PTR_ERR(pdd); in kfd_ioctl_set_scratch_backing_va()
920 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va()
925 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) in kfd_ioctl_set_scratch_backing_va()
927 dev->adev, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
941 struct kfd_process_device *pdd; in kfd_ioctl_get_tile_config() local
946 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_get_tile_config()
948 if (!pdd) in kfd_ioctl_get_tile_config()
951 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config); in kfd_ioctl_get_tile_config()
985 struct kfd_process_device *pdd; in kfd_ioctl_acquire_vm() local
994 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_acquire_vm()
995 if (!pdd) { in kfd_ioctl_acquire_vm()
1000 if (pdd->drm_file) { in kfd_ioctl_acquire_vm()
1001 ret = pdd->drm_file == drm_file ? 0 : -EBUSY; in kfd_ioctl_acquire_vm()
1005 ret = kfd_process_device_init_vm(pdd, drm_file); in kfd_ioctl_acquire_vm()
1046 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id); in kfd_ioctl_get_available_memory() local
1048 if (!pdd) in kfd_ioctl_get_available_memory()
1050 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, in kfd_ioctl_get_available_memory()
1051 pdd->dev->node_id); in kfd_ioctl_get_available_memory()
1052 kfd_unlock_pdd(pdd); in kfd_ioctl_get_available_memory()
1060 struct kfd_process_device *pdd; in kfd_ioctl_alloc_memory_of_gpu() local
1103 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_alloc_memory_of_gpu()
1104 if (!pdd) { in kfd_ioctl_alloc_memory_of_gpu()
1109 dev = pdd->dev; in kfd_ioctl_alloc_memory_of_gpu()
1119 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_alloc_memory_of_gpu()
1120 if (IS_ERR(pdd)) { in kfd_ioctl_alloc_memory_of_gpu()
1121 err = PTR_ERR(pdd); in kfd_ioctl_alloc_memory_of_gpu()
1130 offset = kfd_get_process_doorbells(pdd); in kfd_ioctl_alloc_memory_of_gpu()
1149 pdd->drm_priv, (struct kgd_mem **) &mem, &offset, in kfd_ioctl_alloc_memory_of_gpu()
1155 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_ioctl_alloc_memory_of_gpu()
1167 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size)); in kfd_ioctl_alloc_memory_of_gpu()
1186 pdd->drm_priv, NULL); in kfd_ioctl_alloc_memory_of_gpu()
1198 struct kfd_process_device *pdd; in kfd_ioctl_free_memory_of_gpu() local
1214 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1215 if (!pdd) { in kfd_ioctl_free_memory_of_gpu()
1222 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1228 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, in kfd_ioctl_free_memory_of_gpu()
1229 (struct kgd_mem *)mem, pdd->drm_priv, &size); in kfd_ioctl_free_memory_of_gpu()
1236 pdd, GET_IDR_HANDLE(args->handle)); in kfd_ioctl_free_memory_of_gpu()
1238 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size); in kfd_ioctl_free_memory_of_gpu()
1250 struct kfd_process_device *pdd, *peer_pdd; in kfd_ioctl_map_memory_to_gpu() local
1280 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_map_memory_to_gpu()
1281 if (!pdd) { in kfd_ioctl_map_memory_to_gpu()
1285 dev = pdd->dev; in kfd_ioctl_map_memory_to_gpu()
1287 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_map_memory_to_gpu()
1288 if (IS_ERR(pdd)) { in kfd_ioctl_map_memory_to_gpu()
1289 err = PTR_ERR(pdd); in kfd_ioctl_map_memory_to_gpu()
1293 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_map_memory_to_gpu()
1368 struct kfd_process_device *pdd, *peer_pdd; in kfd_ioctl_unmap_memory_from_gpu() local
1397 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); in kfd_ioctl_unmap_memory_from_gpu()
1398 if (!pdd) { in kfd_ioctl_unmap_memory_from_gpu()
1403 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_unmap_memory_from_gpu()
1426 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); in kfd_ioctl_unmap_memory_from_gpu()
1428 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, in kfd_ioctl_unmap_memory_from_gpu()
1568 struct kfd_process_device *pdd; in kfd_ioctl_import_dmabuf() local
1580 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_import_dmabuf()
1581 if (!pdd) { in kfd_ioctl_import_dmabuf()
1586 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_import_dmabuf()
1587 if (IS_ERR(pdd)) { in kfd_ioctl_import_dmabuf()
1588 r = PTR_ERR(pdd); in kfd_ioctl_import_dmabuf()
1592 r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf, in kfd_ioctl_import_dmabuf()
1593 args->va_addr, pdd->drm_priv, in kfd_ioctl_import_dmabuf()
1599 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); in kfd_ioctl_import_dmabuf()
1613 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem, in kfd_ioctl_import_dmabuf()
1614 pdd->drm_priv, NULL); in kfd_ioctl_import_dmabuf()
1625 struct kfd_process_device *pdd; in kfd_ioctl_export_dmabuf() local
1637 pdd = kfd_get_process_device_data(dev, p); in kfd_ioctl_export_dmabuf()
1638 if (!pdd) { in kfd_ioctl_export_dmabuf()
1643 mem = kfd_process_device_translate_handle(pdd, in kfd_ioctl_export_dmabuf()
1678 struct kfd_process_device *pdd; in kfd_ioctl_smi_events() local
1682 pdd = kfd_process_device_data_by_id(p, args->gpuid); in kfd_ioctl_smi_events()
1684 if (!pdd) in kfd_ioctl_smi_events()
1687 return kfd_smi_event_open(pdd->dev, &args->anon_fd); in kfd_ioctl_smi_events()
1806 struct kfd_process_device *pdd = p->pdds[i]; in criu_checkpoint_devices() local
1808 device_buckets[i].user_gpu_id = pdd->user_gpu_id; in criu_checkpoint_devices()
1809 device_buckets[i].actual_gpu_id = pdd->dev->id; in criu_checkpoint_devices()
1846 struct kfd_process_device *pdd = p->pdds[i]; in get_process_num_bos() local
1850 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in get_process_num_bos()
1853 if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base) in get_process_num_bos()
1908 struct kfd_process_device *pdd = p->pdds[pdd_index]; in criu_checkpoint_bos() local
1912 idr_for_each_entry(&pdd->alloc_idr, mem, id) { in criu_checkpoint_bos()
1929 if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base) in criu_checkpoint_bos()
1935 bo_bucket->gpu_id = pdd->user_gpu_id; in criu_checkpoint_bos()
1963 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
1967 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
2213 struct kfd_process_device *pdd; in criu_restore_devices() local
2232 pdd = kfd_get_process_device_data(dev, p); in criu_restore_devices()
2233 if (!pdd) { in criu_restore_devices()
2239 pdd->user_gpu_id = device_buckets[i].user_gpu_id; in criu_restore_devices()
2249 if (pdd->drm_file) { in criu_restore_devices()
2255 if (kfd_process_device_init_vm(pdd, drm_file)) { in criu_restore_devices()
2267 pdd = kfd_bind_process_to_device(dev, p); in criu_restore_devices()
2268 if (IS_ERR(pdd)) { in criu_restore_devices()
2269 ret = PTR_ERR(pdd); in criu_restore_devices()
2273 if (!pdd->qpd.proc_doorbells) { in criu_restore_devices()
2274 ret = kfd_alloc_process_doorbells(dev->kfd, pdd); in criu_restore_devices()
2291 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, in criu_restore_memory_of_gpu() argument
2303 kfd_doorbell_process_slice(pdd->dev->kfd)) in criu_restore_memory_of_gpu()
2306 offset = kfd_get_process_doorbells(pdd); in criu_restore_memory_of_gpu()
2315 offset = pdd->dev->adev->rmmio_remap.bus_addr; in criu_restore_memory_of_gpu()
2324 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr, in criu_restore_memory_of_gpu()
2325 bo_bucket->size, pdd->drm_priv, kgd_mem, in criu_restore_memory_of_gpu()
2336 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle, in criu_restore_memory_of_gpu()
2341 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv, in criu_restore_memory_of_gpu()
2347 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2349 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2355 WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size); in criu_restore_memory_of_gpu()
2364 struct kfd_process_device *pdd; in criu_restore_bo() local
2373 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id); in criu_restore_bo()
2374 if (!pdd) { in criu_restore_bo()
2379 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem); in criu_restore_bo()
2762 struct kfd_process_device *pdd = p->pdds[i]; in runtime_enable() local
2764 if (pdd->qpd.queue_count) in runtime_enable()
2774 if (pdd->dev->kfd->shared_resources.enable_mes) in runtime_enable()
2775 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); in runtime_enable()
2784 struct kfd_process_device *pdd = p->pdds[i]; in runtime_enable() local
2786 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) { in runtime_enable()
2787 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); in runtime_enable()
2788 pdd->dev->kfd2kgd->enable_debug_trap( in runtime_enable()
2789 pdd->dev->adev, in runtime_enable()
2791 pdd->dev->vm_info.last_vmid_kfd); in runtime_enable()
2792 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { in runtime_enable()
2793 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( in runtime_enable()
2794 pdd->dev->adev, in runtime_enable()
2846 struct kfd_process_device *pdd = p->pdds[i]; in runtime_disable() local
2848 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) in runtime_disable()
2849 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); in runtime_disable()
2857 struct kfd_process_device *pdd = p->pdds[i]; in runtime_disable() local
2859 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { in runtime_disable()
2860 pdd->spi_dbg_override = in runtime_disable()
2861 pdd->dev->kfd2kgd->disable_debug_trap( in runtime_disable()
2862 pdd->dev->adev, in runtime_disable()
2864 pdd->dev->vm_info.last_vmid_kfd); in runtime_disable()
2866 if (!pdd->dev->kfd->shared_resources.enable_mes) in runtime_disable()
2867 debug_refresh_runlist(pdd->dev->dqm); in runtime_disable()
2869 kfd_dbg_set_mes_debug_mode(pdd, in runtime_disable()
2870 !kfd_dbg_has_cwsr_workaround(pdd->dev)); in runtime_disable()
2902 struct kfd_process_device *pdd = NULL; in kfd_ioctl_set_debug_trap() local
2987 pdd = kfd_process_device_data_by_id(target, user_gpu_id); in kfd_ioctl_set_debug_trap()
2988 if (user_gpu_id == -EINVAL || !pdd) { in kfd_ioctl_set_debug_trap()
3045 r = kfd_dbg_trap_set_dev_address_watch(pdd, in kfd_ioctl_set_debug_trap()
3052 r = kfd_dbg_trap_clear_dev_address_watch(pdd, in kfd_ioctl_set_debug_trap()