Lines Matching refs:dev
302 struct kfd_node *dev; in kfd_ioctl_create_queue() local
328 dev = pdd->dev; in kfd_ioctl_create_queue()
330 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_create_queue()
337 err = kfd_alloc_process_doorbells(dev->kfd, pdd); in kfd_ioctl_create_queue()
347 if (dev->kfd->shared_resources.enable_mes && in kfd_ioctl_create_queue()
348 ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) in kfd_ioctl_create_queue()
374 err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo); in kfd_ioctl_create_queue()
383 dev->id); in kfd_ioctl_create_queue()
385 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo, in kfd_ioctl_create_queue()
396 if (KFD_IS_SOC15(dev)) in kfd_ioctl_create_queue()
415 kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0); in kfd_ioctl_create_queue()
420 amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&wptr_bo); in kfd_ioctl_create_queue()
597 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_memory_policy()
610 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy()
640 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_set_trap_handler()
691 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev); in kfd_ioctl_get_clock_counters()
725 pAperture->gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures()
736 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures()
798 pa[i].gpu_id = pdd->dev->id; in kfd_ioctl_get_process_apertures_new()
807 "gpu id %u\n", pdd->dev->id); in kfd_ioctl_get_process_apertures_new()
903 struct kfd_node *dev; in kfd_ioctl_set_scratch_backing_va() local
912 dev = pdd->dev; in kfd_ioctl_set_scratch_backing_va()
914 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_set_scratch_backing_va()
924 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && in kfd_ioctl_set_scratch_backing_va()
925 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) in kfd_ioctl_set_scratch_backing_va()
926 dev->kfd2kgd->set_scratch_backing_va( in kfd_ioctl_set_scratch_backing_va()
927 dev->adev, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va()
951 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config); in kfd_ioctl_get_tile_config()
1022 bool kfd_dev_is_large_bar(struct kfd_node *dev) in kfd_dev_is_large_bar() argument
1029 if (dev->local_mem_info.local_mem_size_private == 0 && in kfd_dev_is_large_bar()
1030 dev->local_mem_info.local_mem_size_public > 0) in kfd_dev_is_large_bar()
1033 if (dev->local_mem_info.local_mem_size_public == 0 && in kfd_dev_is_large_bar()
1034 dev->kfd->adev->gmc.is_app_apu) { in kfd_dev_is_large_bar()
1050 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, in kfd_ioctl_get_available_memory()
1051 pdd->dev->node_id); in kfd_ioctl_get_available_memory()
1062 struct kfd_node *dev; in kfd_ioctl_alloc_memory_of_gpu() local
1109 dev = pdd->dev; in kfd_ioctl_alloc_memory_of_gpu()
1113 !kfd_dev_is_large_bar(dev)) { in kfd_ioctl_alloc_memory_of_gpu()
1119 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_alloc_memory_of_gpu()
1126 if (args->size != kfd_doorbell_process_slice(dev->kfd)) { in kfd_ioctl_alloc_memory_of_gpu()
1140 offset = dev->adev->rmmio_remap.bus_addr; in kfd_ioctl_alloc_memory_of_gpu()
1148 dev->adev, args->va_addr, args->size, in kfd_ioctl_alloc_memory_of_gpu()
1185 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, in kfd_ioctl_alloc_memory_of_gpu()
1228 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, in kfd_ioctl_free_memory_of_gpu()
1252 struct kfd_node *dev; in kfd_ioctl_map_memory_to_gpu() local
1285 dev = pdd->dev; in kfd_ioctl_map_memory_to_gpu()
1287 pdd = kfd_bind_process_to_device(dev, p); in kfd_ioctl_map_memory_to_gpu()
1309 peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p); in kfd_ioctl_map_memory_to_gpu()
1316 peer_pdd->dev->adev, (struct kgd_mem *)mem, in kfd_ioctl_map_memory_to_gpu()
1319 struct pci_dev *pdev = peer_pdd->dev->adev->pdev; in kfd_ioctl_map_memory_to_gpu()
1321 dev_err(dev->adev->dev, in kfd_ioctl_map_memory_to_gpu()
1333 err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); in kfd_ioctl_map_memory_to_gpu()
1417 peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); in kfd_ioctl_unmap_memory_from_gpu()
1426 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); in kfd_ioctl_unmap_memory_from_gpu()
1428 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, in kfd_ioctl_unmap_memory_from_gpu()
1472 struct kfd_node *dev; in kfd_ioctl_alloc_queue_gws() local
1478 dev = q->device; in kfd_ioctl_alloc_queue_gws()
1484 if (!dev->gws) { in kfd_ioctl_alloc_queue_gws()
1489 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_ioctl_alloc_queue_gws()
1494 if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) || in kfd_ioctl_alloc_queue_gws()
1495 kfd_dbg_has_cwsr_workaround(dev))) { in kfd_ioctl_alloc_queue_gws()
1500 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); in kfd_ioctl_alloc_queue_gws()
1515 struct kfd_node *dev = NULL; in kfd_ioctl_get_dmabuf_info() local
1524 for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++) in kfd_ioctl_get_dmabuf_info()
1525 if (dev && !kfd_devcgroup_check_permission(dev)) in kfd_ioctl_get_dmabuf_info()
1527 if (!dev) in kfd_ioctl_get_dmabuf_info()
1537 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, in kfd_ioctl_get_dmabuf_info()
1545 args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; in kfd_ioctl_get_dmabuf_info()
1547 args->gpu_id = dev->id; in kfd_ioctl_get_dmabuf_info()
1586 pdd = kfd_bind_process_to_device(pdd->dev, p); in kfd_ioctl_import_dmabuf()
1592 r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf, in kfd_ioctl_import_dmabuf()
1613 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem, in kfd_ioctl_import_dmabuf()
1627 struct kfd_node *dev; in kfd_ioctl_export_dmabuf() local
1631 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); in kfd_ioctl_export_dmabuf()
1632 if (!dev) in kfd_ioctl_export_dmabuf()
1637 pdd = kfd_get_process_device_data(dev, p); in kfd_ioctl_export_dmabuf()
1687 return kfd_smi_event_open(pdd->dev, &args->anon_fd); in kfd_ioctl_smi_events()
1809 device_buckets[i].actual_gpu_id = pdd->dev->id; in criu_checkpoint_devices()
1963 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
1967 KFD_MMAP_GPU_ID(pdd->dev->id); in criu_checkpoint_bos()
1972 if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem)) in criu_checkpoint_bos()
2212 struct kfd_node *dev; in criu_restore_devices() local
2224 dev = kfd_device_by_id(device_buckets[i].actual_gpu_id); in criu_restore_devices()
2225 if (!dev) { in criu_restore_devices()
2232 pdd = kfd_get_process_device_data(dev, p); in criu_restore_devices()
2267 pdd = kfd_bind_process_to_device(dev, p); in criu_restore_devices()
2274 ret = kfd_alloc_process_doorbells(dev->kfd, pdd); in criu_restore_devices()
2303 kfd_doorbell_process_slice(pdd->dev->kfd)) in criu_restore_memory_of_gpu()
2315 offset = pdd->dev->adev->rmmio_remap.bus_addr; in criu_restore_memory_of_gpu()
2324 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr, in criu_restore_memory_of_gpu()
2341 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv, in criu_restore_memory_of_gpu()
2347 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2349 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id); in criu_restore_memory_of_gpu()
2395 peer = peer_pdd->dev; in criu_restore_bo()
2774 if (pdd->dev->kfd->shared_resources.enable_mes) in runtime_enable()
2775 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); in runtime_enable()
2786 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) { in runtime_enable()
2787 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); in runtime_enable()
2788 pdd->dev->kfd2kgd->enable_debug_trap( in runtime_enable()
2789 pdd->dev->adev, in runtime_enable()
2791 pdd->dev->vm_info.last_vmid_kfd); in runtime_enable()
2792 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { in runtime_enable()
2793 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( in runtime_enable()
2794 pdd->dev->adev, in runtime_enable()
2848 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) in runtime_disable()
2849 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); in runtime_disable()
2859 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { in runtime_disable()
2861 pdd->dev->kfd2kgd->disable_debug_trap( in runtime_disable()
2862 pdd->dev->adev, in runtime_disable()
2864 pdd->dev->vm_info.last_vmid_kfd); in runtime_disable()
2866 if (!pdd->dev->kfd->shared_resources.enable_mes) in runtime_disable()
2867 debug_refresh_runlist(pdd->dev->dqm); in runtime_disable()
2870 !kfd_dbg_has_cwsr_workaround(pdd->dev)); in runtime_disable()
3349 static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process, in kfd_mmio_mmap() argument
3360 address = dev->adev->rmmio_remap.bus_addr; in kfd_mmio_mmap()
3386 struct kfd_node *dev = NULL; in kfd_mmap() local
3397 dev = kfd_device_by_id(gpu_id); in kfd_mmap()
3401 if (!dev) in kfd_mmap()
3403 return kfd_doorbell_mmap(dev, process, vma); in kfd_mmap()
3409 if (!dev) in kfd_mmap()
3411 return kfd_reserved_mem_mmap(dev, process, vma); in kfd_mmap()
3413 if (!dev) in kfd_mmap()
3415 return kfd_mmio_mmap(dev, process, vma); in kfd_mmap()