Lines Matching refs:p
302 struct kfd_process *p = container_of(attr, struct kfd_process, in kfd_procfs_show() local
305 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid); in kfd_procfs_show()
534 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p) in kfd_procfs_add_sysfs_stats() argument
540 if (!p || !p->kobj) in kfd_procfs_add_sysfs_stats()
549 for (i = 0; i < p->n_pdds; i++) { in kfd_procfs_add_sysfs_stats()
550 struct kfd_process_device *pdd = p->pdds[i]; in kfd_procfs_add_sysfs_stats()
560 p->kobj, in kfd_procfs_add_sysfs_stats()
581 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p) in kfd_procfs_add_sysfs_counters() argument
587 if (!p || !p->kobj) in kfd_procfs_add_sysfs_counters()
597 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { in kfd_procfs_add_sysfs_counters()
598 struct kfd_process_device *pdd = p->pdds[i]; in kfd_procfs_add_sysfs_counters()
608 p->kobj, counters_dir_filename); in kfd_procfs_add_sysfs_counters()
626 static void kfd_procfs_add_sysfs_files(struct kfd_process *p) in kfd_procfs_add_sysfs_files() argument
630 if (!p || !p->kobj) in kfd_procfs_add_sysfs_files()
638 for (i = 0; i < p->n_pdds; i++) { in kfd_procfs_add_sysfs_files()
639 struct kfd_process_device *pdd = p->pdds[i]; in kfd_procfs_add_sysfs_files()
643 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram, in kfd_procfs_add_sysfs_files()
648 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma, in kfd_procfs_add_sysfs_files()
916 struct kfd_process *p; in find_process() local
920 p = find_process_by_mm(thread->mm); in find_process()
921 if (p && ref) in find_process()
922 kref_get(&p->ref); in find_process()
925 return p; in find_process()
928 void kfd_unref_process(struct kfd_process *p) in kfd_unref_process() argument
930 kref_put(&p->ref, kfd_process_ref_release); in kfd_unref_process()
937 struct kfd_process *p = NULL; in kfd_lookup_process_by_pid() local
947 p = find_process(task, true); in kfd_lookup_process_by_pid()
951 return p; in kfd_lookup_process_by_pid()
956 struct kfd_process *p = pdd->process; in kfd_process_device_free_bos() local
967 for (i = 0; i < p->n_pdds; i++) { in kfd_process_device_free_bos()
968 struct kfd_process_device *peer_pdd = p->pdds[i]; in kfd_process_device_free_bos()
986 static void kfd_process_kunmap_signal_bo(struct kfd_process *p) in kfd_process_kunmap_signal_bo() argument
992 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); in kfd_process_kunmap_signal_bo()
996 mutex_lock(&p->mutex); in kfd_process_kunmap_signal_bo()
998 pdd = kfd_get_process_device_data(kdev, p); in kfd_process_kunmap_signal_bo()
1003 pdd, GET_IDR_HANDLE(p->signal_handle)); in kfd_process_kunmap_signal_bo()
1010 mutex_unlock(&p->mutex); in kfd_process_kunmap_signal_bo()
1013 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) in kfd_process_free_outstanding_kfd_bos() argument
1017 for (i = 0; i < p->n_pdds; i++) in kfd_process_free_outstanding_kfd_bos()
1018 kfd_process_device_free_bos(p->pdds[i]); in kfd_process_free_outstanding_kfd_bos()
1021 static void kfd_process_destroy_pdds(struct kfd_process *p) in kfd_process_destroy_pdds() argument
1025 for (i = 0; i < p->n_pdds; i++) { in kfd_process_destroy_pdds()
1026 struct kfd_process_device *pdd = p->pdds[i]; in kfd_process_destroy_pdds()
1029 pdd->dev->id, p->pasid); in kfd_process_destroy_pdds()
1062 p->pdds[i] = NULL; in kfd_process_destroy_pdds()
1064 p->n_pdds = 0; in kfd_process_destroy_pdds()
1067 static void kfd_process_remove_sysfs(struct kfd_process *p) in kfd_process_remove_sysfs() argument
1072 if (!p->kobj) in kfd_process_remove_sysfs()
1075 sysfs_remove_file(p->kobj, &p->attr_pasid); in kfd_process_remove_sysfs()
1076 kobject_del(p->kobj_queues); in kfd_process_remove_sysfs()
1077 kobject_put(p->kobj_queues); in kfd_process_remove_sysfs()
1078 p->kobj_queues = NULL; in kfd_process_remove_sysfs()
1080 for (i = 0; i < p->n_pdds; i++) { in kfd_process_remove_sysfs()
1081 pdd = p->pdds[i]; in kfd_process_remove_sysfs()
1083 sysfs_remove_file(p->kobj, &pdd->attr_vram); in kfd_process_remove_sysfs()
1084 sysfs_remove_file(p->kobj, &pdd->attr_sdma); in kfd_process_remove_sysfs()
1095 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { in kfd_process_remove_sysfs()
1096 pdd = p->pdds[i]; in kfd_process_remove_sysfs()
1106 kobject_del(p->kobj); in kfd_process_remove_sysfs()
1107 kobject_put(p->kobj); in kfd_process_remove_sysfs()
1108 p->kobj = NULL; in kfd_process_remove_sysfs()
1118 struct kfd_process *p = container_of(work, struct kfd_process, in kfd_process_wq_release() local
1121 kfd_process_dequeue_from_all_devices(p); in kfd_process_wq_release()
1122 pqm_uninit(&p->pqm); in kfd_process_wq_release()
1128 dma_fence_signal(p->ef); in kfd_process_wq_release()
1130 kfd_process_remove_sysfs(p); in kfd_process_wq_release()
1132 kfd_process_kunmap_signal_bo(p); in kfd_process_wq_release()
1133 kfd_process_free_outstanding_kfd_bos(p); in kfd_process_wq_release()
1134 svm_range_list_fini(p); in kfd_process_wq_release()
1136 kfd_process_destroy_pdds(p); in kfd_process_wq_release()
1137 dma_fence_put(p->ef); in kfd_process_wq_release()
1139 kfd_event_free_process(p); in kfd_process_wq_release()
1141 kfd_pasid_free(p->pasid); in kfd_process_wq_release()
1142 mutex_destroy(&p->mutex); in kfd_process_wq_release()
1144 put_task_struct(p->lead_thread); in kfd_process_wq_release()
1146 kfree(p); in kfd_process_wq_release()
1151 struct kfd_process *p = container_of(ref, struct kfd_process, ref); in kfd_process_ref_release() local
1153 INIT_WORK(&p->release_work, kfd_process_wq_release); in kfd_process_ref_release()
1154 queue_work(kfd_process_wq, &p->release_work); in kfd_process_ref_release()
1160 struct kfd_process *p = find_process_by_mm(mm); in kfd_process_alloc_notifier() local
1164 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); in kfd_process_alloc_notifier()
1172 static void kfd_process_notifier_release_internal(struct kfd_process *p) in kfd_process_notifier_release_internal() argument
1176 cancel_delayed_work_sync(&p->eviction_work); in kfd_process_notifier_release_internal()
1177 cancel_delayed_work_sync(&p->restore_work); in kfd_process_notifier_release_internal()
1179 for (i = 0; i < p->n_pdds; i++) { in kfd_process_notifier_release_internal()
1180 struct kfd_process_device *pdd = p->pdds[i]; in kfd_process_notifier_release_internal()
1183 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup) in kfd_process_notifier_release_internal()
1188 p->mm = NULL; in kfd_process_notifier_release_internal()
1189 kfd_dbg_trap_disable(p); in kfd_process_notifier_release_internal()
1191 if (atomic_read(&p->debugged_process_count) > 0) { in kfd_process_notifier_release_internal()
1197 if (target->debugger_process && target->debugger_process == p) { in kfd_process_notifier_release_internal()
1201 if (atomic_read(&p->debugged_process_count) == 0) in kfd_process_notifier_release_internal()
1209 mmu_notifier_put(&p->mmu_notifier); in kfd_process_notifier_release_internal()
1215 struct kfd_process *p; in kfd_process_notifier_release() local
1221 p = container_of(mn, struct kfd_process, mmu_notifier); in kfd_process_notifier_release()
1222 if (WARN_ON(p->mm != mm)) in kfd_process_notifier_release()
1237 hash_del_rcu(&p->kfd_processes); in kfd_process_notifier_release()
1241 kfd_process_notifier_release_internal(p); in kfd_process_notifier_release()
1258 struct kfd_process *p; in kfd_cleanup_processes() local
1270 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) { in kfd_cleanup_processes()
1271 hash_del_rcu(&p->kfd_processes); in kfd_cleanup_processes()
1273 hlist_add_head(&p->kfd_processes, &cleanup_list); in kfd_cleanup_processes()
1277 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes) in kfd_cleanup_processes()
1278 kfd_process_notifier_release_internal(p); in kfd_cleanup_processes()
1287 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) in kfd_process_init_cwsr_apu() argument
1292 if (p->has_cwsr) in kfd_process_init_cwsr_apu()
1295 for (i = 0; i < p->n_pdds; i++) { in kfd_process_init_cwsr_apu()
1296 struct kfd_node *dev = p->pdds[i]->dev; in kfd_process_init_cwsr_apu()
1297 struct qcm_process_device *qpd = &p->pdds[i]->qpd; in kfd_process_init_cwsr_apu()
1318 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled); in kfd_process_init_cwsr_apu()
1325 p->has_cwsr = true; in kfd_process_init_cwsr_apu()
1396 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) in kfd_process_xnack_mode() argument
1414 for (i = 0; i < p->n_pdds; i++) { in kfd_process_xnack_mode()
1415 struct kfd_node *dev = p->pdds[i]->dev; in kfd_process_xnack_mode()
1558 struct kfd_process *p) in kfd_get_process_device_data() argument
1562 for (i = 0; i < p->n_pdds; i++) in kfd_get_process_device_data()
1563 if (p->pdds[i]->dev == dev) in kfd_get_process_device_data()
1564 return p->pdds[i]; in kfd_get_process_device_data()
1570 struct kfd_process *p) in kfd_create_process_device_data() argument
1575 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) in kfd_create_process_device_data()
1585 pdd->qpd.pqm = &p->pqm; in kfd_create_process_device_data()
1588 pdd->process = p; in kfd_create_process_device_data()
1611 p->pdds[p->n_pdds++] = pdd; in kfd_create_process_device_data()
1647 struct kfd_process *p; in kfd_process_device_init_vm() local
1662 p = pdd->process; in kfd_process_device_init_vm()
1666 &p->kgd_process_info, in kfd_process_device_init_vm()
1667 &p->ef); in kfd_process_device_init_vm()
1682 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid); in kfd_process_device_init_vm()
1709 struct kfd_process *p) in kfd_bind_process_to_device() argument
1714 pdd = kfd_get_process_device_data(dev, p); in kfd_bind_process_to_device()
1779 struct kfd_process *p, *ret_p = NULL; in kfd_lookup_process_by_pasid() local
1784 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { in kfd_lookup_process_by_pasid()
1785 if (p->pasid == pasid) { in kfd_lookup_process_by_pasid()
1786 kref_get(&p->ref); in kfd_lookup_process_by_pasid()
1787 ret_p = p; in kfd_lookup_process_by_pasid()
1800 struct kfd_process *p; in kfd_lookup_process_by_mm() local
1804 p = find_process_by_mm(mm); in kfd_lookup_process_by_mm()
1805 if (p) in kfd_lookup_process_by_mm()
1806 kref_get(&p->ref); in kfd_lookup_process_by_mm()
1810 return p; in kfd_lookup_process_by_mm()
1818 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) in kfd_process_evict_queues() argument
1824 for (i = 0; i < p->n_pdds; i++) { in kfd_process_evict_queues()
1825 struct kfd_process_device *pdd = p->pdds[i]; in kfd_process_evict_queues()
1827 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, in kfd_process_evict_queues()
1849 for (i = 0; i < p->n_pdds; i++) { in kfd_process_evict_queues()
1850 struct kfd_process_device *pdd = p->pdds[i]; in kfd_process_evict_queues()
1855 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); in kfd_process_evict_queues()
1868 int kfd_process_restore_queues(struct kfd_process *p) in kfd_process_restore_queues() argument
1873 for (i = 0; i < p->n_pdds; i++) { in kfd_process_restore_queues()
1874 struct kfd_process_device *pdd = p->pdds[i]; in kfd_process_restore_queues()
1876 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); in kfd_process_restore_queues()
1890 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) in kfd_process_gpuidx_from_gpuid() argument
1894 for (i = 0; i < p->n_pdds; i++) in kfd_process_gpuidx_from_gpuid()
1895 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id) in kfd_process_gpuidx_from_gpuid()
1901 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, in kfd_process_gpuid_from_node() argument
1906 for (i = 0; i < p->n_pdds; i++) in kfd_process_gpuid_from_node()
1907 if (p->pdds[i] && p->pdds[i]->dev == node) { in kfd_process_gpuid_from_node()
1908 *gpuid = p->pdds[i]->user_gpu_id; in kfd_process_gpuid_from_node()
1918 struct kfd_process *p; in evict_process_worker() local
1926 p = container_of(dwork, struct kfd_process, eviction_work); in evict_process_worker()
1927 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, in evict_process_worker()
1936 flush_delayed_work(&p->restore_work); in evict_process_worker()
1938 pr_debug("Started evicting pasid 0x%x\n", p->pasid); in evict_process_worker()
1939 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM); in evict_process_worker()
1941 dma_fence_signal(p->ef); in evict_process_worker()
1942 dma_fence_put(p->ef); in evict_process_worker()
1943 p->ef = NULL; in evict_process_worker()
1944 queue_delayed_work(kfd_restore_wq, &p->restore_work, in evict_process_worker()
1947 pr_debug("Finished evicting pasid 0x%x\n", p->pasid); in evict_process_worker()
1949 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); in evict_process_worker()
1955 struct kfd_process *p; in restore_process_worker() local
1963 p = container_of(dwork, struct kfd_process, restore_work); in restore_process_worker()
1964 pr_debug("Started restoring pasid 0x%x\n", p->pasid); in restore_process_worker()
1976 p->last_restore_timestamp = get_jiffies_64(); in restore_process_worker()
1978 if (p->kgd_process_info) in restore_process_worker()
1979 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, in restore_process_worker()
1980 &p->ef); in restore_process_worker()
1983 p->pasid, PROCESS_BACK_OFF_TIME_MS); in restore_process_worker()
1984 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, in restore_process_worker()
1990 ret = kfd_process_restore_queues(p); in restore_process_worker()
1992 pr_debug("Finished restoring pasid 0x%x\n", p->pasid); in restore_process_worker()
1994 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); in restore_process_worker()
1999 struct kfd_process *p; in kfd_suspend_all_processes() local
2004 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { in kfd_suspend_all_processes()
2005 cancel_delayed_work_sync(&p->eviction_work); in kfd_suspend_all_processes()
2006 flush_delayed_work(&p->restore_work); in kfd_suspend_all_processes()
2008 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) in kfd_suspend_all_processes()
2009 pr_err("Failed to suspend process 0x%x\n", p->pasid); in kfd_suspend_all_processes()
2010 dma_fence_signal(p->ef); in kfd_suspend_all_processes()
2011 dma_fence_put(p->ef); in kfd_suspend_all_processes()
2012 p->ef = NULL; in kfd_suspend_all_processes()
2019 struct kfd_process *p; in kfd_resume_all_processes() local
2023 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { in kfd_resume_all_processes()
2024 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { in kfd_resume_all_processes()
2026 p->pasid); in kfd_resume_all_processes()
2137 struct kfd_process *p; in kfd_process_close_interrupt_drain() local
2139 p = kfd_lookup_process_by_pasid(pasid); in kfd_process_close_interrupt_drain()
2141 if (!p) in kfd_process_close_interrupt_drain()
2144 WRITE_ONCE(p->irq_drain_is_open, false); in kfd_process_close_interrupt_drain()
2145 wake_up_all(&p->wait_irq_drain); in kfd_process_close_interrupt_drain()
2146 kfd_unref_process(p); in kfd_process_close_interrupt_drain()
2151 struct kfd_process *p; member
2159 struct kfd_process *p; in send_exception_work_handler() local
2170 p = workarea->p; in send_exception_work_handler()
2172 mm = get_task_mm(p->lead_thread); in send_exception_work_handler()
2179 q = pqm_get_user_queue(&p->pqm, workarea->queue_id); in send_exception_work_handler()
2192 kfd_set_event(p, ev_id); in send_exception_work_handler()
2199 int kfd_send_exception_to_runtime(struct kfd_process *p, in kfd_send_exception_to_runtime() argument
2207 worker.p = p; in kfd_send_exception_to_runtime()
2218 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id) in kfd_process_device_data_by_id() argument
2223 for (i = 0; i < p->n_pdds; i++) { in kfd_process_device_data_by_id()
2224 struct kfd_process_device *pdd = p->pdds[i]; in kfd_process_device_data_by_id()
2233 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id) in kfd_process_get_user_gpu_id() argument
2240 for (i = 0; i < p->n_pdds; i++) { in kfd_process_get_user_gpu_id()
2241 struct kfd_process_device *pdd = p->pdds[i]; in kfd_process_get_user_gpu_id()
2253 struct kfd_process *p; in kfd_debugfs_mqds_by_process() local
2259 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { in kfd_debugfs_mqds_by_process()
2261 p->lead_thread->tgid, p->pasid); in kfd_debugfs_mqds_by_process()
2263 mutex_lock(&p->mutex); in kfd_debugfs_mqds_by_process()
2264 r = pqm_debugfs_mqds(m, &p->pqm); in kfd_debugfs_mqds_by_process()
2265 mutex_unlock(&p->mutex); in kfd_debugfs_mqds_by_process()