1d87f36a0SRajneesh Bhardwaj /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 24a488a7aSOded Gabbay /* 3d87f36a0SRajneesh Bhardwaj * Copyright 2014-2022 Advanced Micro Devices, Inc. 44a488a7aSOded Gabbay * 54a488a7aSOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 64a488a7aSOded Gabbay * copy of this software and associated documentation files (the "Software"), 74a488a7aSOded Gabbay * to deal in the Software without restriction, including without limitation 84a488a7aSOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 94a488a7aSOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 104a488a7aSOded Gabbay * Software is furnished to do so, subject to the following conditions: 114a488a7aSOded Gabbay * 124a488a7aSOded Gabbay * The above copyright notice and this permission notice shall be included in 134a488a7aSOded Gabbay * all copies or substantial portions of the Software. 144a488a7aSOded Gabbay * 154a488a7aSOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 164a488a7aSOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 174a488a7aSOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 184a488a7aSOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 194a488a7aSOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 204a488a7aSOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 214a488a7aSOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 224a488a7aSOded Gabbay */ 234a488a7aSOded Gabbay 244a488a7aSOded Gabbay #ifndef KFD_PRIV_H_INCLUDED 254a488a7aSOded Gabbay #define KFD_PRIV_H_INCLUDED 264a488a7aSOded Gabbay 274a488a7aSOded Gabbay #include <linux/hashtable.h> 284a488a7aSOded Gabbay #include <linux/mmu_notifier.h> 29dc90f084SChristoph Hellwig #include <linux/memremap.h> 304a488a7aSOded Gabbay #include <linux/mutex.h> 314a488a7aSOded Gabbay #include <linux/types.h> 324a488a7aSOded Gabbay #include <linux/atomic.h> 334a488a7aSOded Gabbay #include <linux/workqueue.h> 344a488a7aSOded Gabbay #include <linux/spinlock.h> 3519f6d2a6SOded Gabbay #include <linux/kfd_ioctl.h> 36482f0777SFelix Kuehling #include <linux/idr.h> 3704ad47bdSAndres Rodriguez #include <linux/kfifo.h> 38851a645eSFelix Kuehling #include <linux/seq_file.h> 395ce10687SFelix Kuehling #include <linux/kref.h> 40de9f26bbSKent Russell #include <linux/sysfs.h> 416b855f7bSHarish Kasiviswanathan #include <linux/device_cgroup.h> 421cd4d9eeSStephen Rothwell #include <drm/drm_file.h> 431cd4d9eeSStephen Rothwell #include <drm/drm_drv.h> 441cd4d9eeSStephen Rothwell #include <drm/drm_device.h> 4599c7b309SLorenz Brun #include <drm/drm_ioctl.h> 464a488a7aSOded Gabbay #include <kgd_kfd_interface.h> 476d220a7eSAmber Lin #include <linux/swap.h> 484a488a7aSOded Gabbay 49e596b903SYong Zhao #include "amd_shared.h" 506ae27841SAlex Sierra #include "amdgpu.h" 51e596b903SYong Zhao 52af47b390SLaura Abbott #define KFD_MAX_RING_ENTRY_SIZE 8 53af47b390SLaura Abbott 545b5c4e40SEvgeny Pinchuk #define KFD_SYSFS_FILE_MODE 0444 555b5c4e40SEvgeny Pinchuk 56df03ef93SHarish Kasiviswanathan /* GPU ID hash width in bits */ 57df03ef93SHarish Kasiviswanathan #define KFD_GPU_ID_HASH_WIDTH 16 58df03ef93SHarish Kasiviswanathan 59df03ef93SHarish Kasiviswanathan /* Use upper bits of mmap offset to store KFD driver specific information. 60df03ef93SHarish Kasiviswanathan * BITS[63:62] - Encode MMAP type 61df03ef93SHarish Kasiviswanathan * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 62df03ef93SHarish Kasiviswanathan * BITS[45:0] - MMAP offset value 63df03ef93SHarish Kasiviswanathan * 64df03ef93SHarish Kasiviswanathan * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 65df03ef93SHarish Kasiviswanathan * defines are w.r.t to PAGE_SIZE 66df03ef93SHarish Kasiviswanathan */ 6729453755SYong Zhao #define KFD_MMAP_TYPE_SHIFT 62 68df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 69df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 70df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 71df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 72d33ea570SOak Zeng #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 73df03ef93SHarish Kasiviswanathan 7429453755SYong Zhao #define KFD_MMAP_GPU_ID_SHIFT 46 75df03ef93SHarish Kasiviswanathan #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 76df03ef93SHarish Kasiviswanathan << KFD_MMAP_GPU_ID_SHIFT) 77df03ef93SHarish Kasiviswanathan #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 78df03ef93SHarish Kasiviswanathan & KFD_MMAP_GPU_ID_MASK) 7929453755SYong Zhao #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 80df03ef93SHarish Kasiviswanathan >> KFD_MMAP_GPU_ID_SHIFT) 81df03ef93SHarish Kasiviswanathan 82ed6e6a34SBen Goz /* 83ed6e6a34SBen Goz * When working with cp scheduler we should assign the HIQ manually or via 84e7016d8eSYong Zhao * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 85ed6e6a34SBen Goz * definitions for Kaveri. In Kaveri only the first ME queues participates 86ed6e6a34SBen Goz * in the cp scheduling taking that in mind we set the HIQ slot in the 87ed6e6a34SBen Goz * second ME. 88ed6e6a34SBen Goz */ 89ed6e6a34SBen Goz #define KFD_CIK_HIQ_PIPE 4 90ed6e6a34SBen Goz #define KFD_CIK_HIQ_QUEUE 0 91ed6e6a34SBen Goz 925b5c4e40SEvgeny Pinchuk /* Macro for allocating structures */ 935b5c4e40SEvgeny Pinchuk #define kfd_alloc_struct(ptr_to_struct) \ 945b5c4e40SEvgeny Pinchuk ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 955b5c4e40SEvgeny Pinchuk 9619f6d2a6SOded Gabbay #define KFD_MAX_NUM_OF_PROCESSES 512 97b8cbab04SOded Gabbay #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 9819f6d2a6SOded Gabbay 9919f6d2a6SOded Gabbay /* 100373d7080SFelix Kuehling * Size of the per-process TBA+TMA buffer: 2 pages 101373d7080SFelix Kuehling * 102373d7080SFelix Kuehling * The first page is the TBA used for the CWSR ISA code. The second 103a4497974SRajneesh Bhardwaj * page is used as TMA for user-mode trap handler setup in daisy-chain mode. 104373d7080SFelix Kuehling */ 105373d7080SFelix Kuehling #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106373d7080SFelix Kuehling #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 107373d7080SFelix Kuehling 10874523943SYong Zhao #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 10974523943SYong Zhao (KFD_MAX_NUM_OF_PROCESSES * \ 11074523943SYong Zhao KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 11174523943SYong Zhao 11274523943SYong Zhao #define KFD_KERNEL_QUEUE_SIZE 2048 11374523943SYong Zhao 11414328aa5SPhilip Cox #define KFD_UNMAP_LATENCY_MS (4000) 11514328aa5SPhilip Cox 116373d7080SFelix Kuehling /* 1171f86805aSYong Zhao * 512 = 0x200 1181f86805aSYong Zhao * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 1191f86805aSYong Zhao * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 1201f86805aSYong Zhao * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 1211f86805aSYong Zhao * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 1221f86805aSYong Zhao * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 1231f86805aSYong Zhao */ 1241f86805aSYong Zhao #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 1251f86805aSYong Zhao 12636988070SRajneesh Bhardwaj /** 12736988070SRajneesh Bhardwaj * enum kfd_ioctl_flags - KFD ioctl flags 12836988070SRajneesh Bhardwaj * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 12936988070SRajneesh Bhardwaj * userspace can use a given ioctl. 13036988070SRajneesh Bhardwaj */ 13136988070SRajneesh Bhardwaj enum kfd_ioctl_flags { 13236988070SRajneesh Bhardwaj /* 13336988070SRajneesh Bhardwaj * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 13436988070SRajneesh Bhardwaj * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 13536988070SRajneesh Bhardwaj * perform privileged operations and load arbitrary data into MQDs and 13636988070SRajneesh Bhardwaj * eventually HQD registers when the queue is mapped by HWS. In order to 13736988070SRajneesh Bhardwaj * prevent this we should perform additional security checks. 13836988070SRajneesh Bhardwaj * 13936988070SRajneesh Bhardwaj * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 14036988070SRajneesh Bhardwaj * 14136988070SRajneesh Bhardwaj * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 14236988070SRajneesh Bhardwaj * we also allow ioctls with SYS_ADMIN capability. 14336988070SRajneesh Bhardwaj */ 14436988070SRajneesh Bhardwaj KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 14536988070SRajneesh Bhardwaj }; 1461f86805aSYong Zhao /* 147b8cbab04SOded Gabbay * Kernel module parameter to specify maximum number of supported queues per 148b8cbab04SOded Gabbay * device 14919f6d2a6SOded Gabbay */ 150b8cbab04SOded Gabbay extern int max_num_of_queues_per_device; 15119f6d2a6SOded Gabbay 152ed6e6a34SBen Goz 15331c21fecSBen Goz /* Kernel module parameter to specify the scheduling policy */ 15431c21fecSBen Goz extern int sched_policy; 15531c21fecSBen Goz 156a99c6d4fSFelix Kuehling /* 157a99c6d4fSFelix Kuehling * Kernel module parameter to specify the maximum process 158a99c6d4fSFelix Kuehling * number per HW scheduler 159a99c6d4fSFelix Kuehling */ 160a99c6d4fSFelix Kuehling extern int hws_max_conc_proc; 161a99c6d4fSFelix Kuehling 162373d7080SFelix Kuehling extern int cwsr_enable; 163373d7080SFelix Kuehling 16481663016SOded Gabbay /* 16581663016SOded Gabbay * Kernel module parameter to specify whether to send sigterm to HSA process on 16681663016SOded Gabbay * unhandled exception 16781663016SOded Gabbay */ 16881663016SOded Gabbay extern int send_sigterm; 16981663016SOded Gabbay 170ebcfd1e2SFelix Kuehling /* 171374200b1SFelix Kuehling * This kernel module is used to simulate large bar machine on non-large bar 172374200b1SFelix Kuehling * enabled machines. 173374200b1SFelix Kuehling */ 174374200b1SFelix Kuehling extern int debug_largebar; 175374200b1SFelix Kuehling 176374200b1SFelix Kuehling /* 177ebcfd1e2SFelix Kuehling * Ignore CRAT table during KFD initialization, can be used to work around 178ebcfd1e2SFelix Kuehling * broken CRAT tables on some AMD systems 179ebcfd1e2SFelix Kuehling */ 180ebcfd1e2SFelix Kuehling extern int ignore_crat; 181ebcfd1e2SFelix Kuehling 182a4497974SRajneesh Bhardwaj /* Set sh_mem_config.retry_disable on GFX v9 */ 18375ee6487SFelix Kuehling extern int amdgpu_noretry; 184bed4f110SFelix Kuehling 185a4497974SRajneesh Bhardwaj /* Halt if HWS hang is detected */ 1860e9a860cSYong Zhao extern int halt_if_hws_hang; 1870e9a860cSYong Zhao 188a4497974SRajneesh Bhardwaj /* Whether MEC FW support GWS barriers */ 18929e76462SOak Zeng extern bool hws_gws_support; 19029e76462SOak Zeng 191a4497974SRajneesh Bhardwaj /* Queue preemption timeout in ms */ 19214328aa5SPhilip Cox extern int queue_preemption_timeout_ms; 19314328aa5SPhilip Cox 1946d909c5dSOak Zeng /* 1956d909c5dSOak Zeng * Don't evict process queues on vm fault 1966d909c5dSOak Zeng */ 1976d909c5dSOak Zeng extern int amdgpu_no_queue_eviction_on_vm_fault; 1986d909c5dSOak Zeng 199a4497974SRajneesh Bhardwaj /* Enable eviction debug messages */ 200b2057956SFelix Kuehling extern bool debug_evictions; 201b2057956SFelix Kuehling 202ed6e6a34SBen Goz enum cache_policy { 203ed6e6a34SBen Goz cache_policy_coherent, 204ed6e6a34SBen Goz cache_policy_noncoherent 205ed6e6a34SBen Goz }; 206ed6e6a34SBen Goz 207dd0ae064SGraham Sider #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 208dd0ae064SGraham Sider #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 20924294e7bSPhilip Yang #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ 210cebbfdd5SAmber Lin ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ 211cebbfdd5SAmber Lin (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) 212ef568db7SFelix Kuehling 2138dc1db31SMukul Joshi struct kfd_node; 2148dc1db31SMukul Joshi 215f3a39818SAndrew Lewycky struct kfd_event_interrupt_class { 2168dc1db31SMukul Joshi bool (*interrupt_isr)(struct kfd_node *dev, 21758e69886SLan Xiao const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 21858e69886SLan Xiao bool *patched_flag); 2198dc1db31SMukul Joshi void (*interrupt_wq)(struct kfd_node *dev, 220f3a39818SAndrew Lewycky const uint32_t *ih_ring_entry); 221f3a39818SAndrew Lewycky }; 222f3a39818SAndrew Lewycky 2234a488a7aSOded Gabbay struct kfd_device_info { 2249d6fa9c7SGraham Sider uint32_t gfx_target_version; 225f3a39818SAndrew Lewycky const struct kfd_event_interrupt_class *event_interrupt_class; 2264a488a7aSOded Gabbay unsigned int max_pasid_bits; 227992839adSYair Shachar unsigned int max_no_of_hqd; 228ada2b29cSFelix Kuehling unsigned int doorbell_size; 2294a488a7aSOded Gabbay size_t ih_ring_entry_size; 230f7c826adSAlexey Skidanov uint8_t num_of_watch_points; 23119f6d2a6SOded Gabbay uint16_t mqd_size_aligned; 232373d7080SFelix Kuehling bool supports_cwsr; 23364d1c3a4SFelix Kuehling bool needs_iommu_device; 2343ee2d00cSFelix Kuehling bool needs_pci_atomics; 235fb932dfeSFelix Kuehling uint32_t no_atomic_fw_version; 236d5094189SShaoyun Liu unsigned int num_sdma_queues_per_engine; 237cc009e61SMukul Joshi unsigned int num_reserved_sdma_queues_per_engine; 238cc009e61SMukul Joshi uint64_t reserved_sdma_queues_bitmap; 2394a488a7aSOded Gabbay }; 2404a488a7aSOded Gabbay 2418dc1db31SMukul Joshi unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); 2428dc1db31SMukul Joshi unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); 243ee2f17f4SAmber Lin 24436b5c08fSOded Gabbay struct kfd_mem_obj { 24536b5c08fSOded Gabbay uint32_t range_start; 24636b5c08fSOded Gabbay uint32_t range_end; 24736b5c08fSOded Gabbay uint64_t gpu_addr; 24836b5c08fSOded Gabbay uint32_t *cpu_ptr; 249b91d43ddSFelix Kuehling void *gtt_mem; 25036b5c08fSOded Gabbay }; 25136b5c08fSOded Gabbay 25244008d7aSYong Zhao struct kfd_vmid_info { 25344008d7aSYong Zhao uint32_t first_vmid_kfd; 25444008d7aSYong Zhao uint32_t last_vmid_kfd; 25544008d7aSYong Zhao uint32_t vmid_num_kfd; 25644008d7aSYong Zhao }; 25744008d7aSYong Zhao 258*74c5b85dSMukul Joshi #define MAX_KFD_NODES 8 259*74c5b85dSMukul Joshi 2608dc1db31SMukul Joshi struct kfd_dev; 2618dc1db31SMukul Joshi 2628dc1db31SMukul Joshi struct kfd_node { 2638dc1db31SMukul Joshi struct amdgpu_device *adev; /* Duplicated here along with keeping 2648dc1db31SMukul Joshi * a copy in kfd_dev to save a hop 2658dc1db31SMukul Joshi */ 2668dc1db31SMukul Joshi const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with 2678dc1db31SMukul Joshi * keeping a copy in kfd_dev to 2688dc1db31SMukul Joshi * save a hop 2698dc1db31SMukul Joshi */ 2708dc1db31SMukul Joshi struct kfd_vmid_info vm_info; 2718dc1db31SMukul Joshi unsigned int id; /* topology stub index */ 272*74c5b85dSMukul Joshi unsigned int num_xcc_per_node; 273*74c5b85dSMukul Joshi unsigned int start_xcc_id; /* Starting XCC instance 274*74c5b85dSMukul Joshi * number for the node 275*74c5b85dSMukul Joshi */ 2768dc1db31SMukul Joshi /* Interrupts */ 2778dc1db31SMukul Joshi struct kfifo ih_fifo; 2788dc1db31SMukul Joshi struct workqueue_struct *ih_wq; 2798dc1db31SMukul Joshi struct work_struct interrupt_work; 2808dc1db31SMukul Joshi spinlock_t interrupt_lock; 2818dc1db31SMukul Joshi 2828dc1db31SMukul Joshi /* 2838dc1db31SMukul Joshi * Interrupts of interest to KFD are copied 2848dc1db31SMukul Joshi * from the HW ring into a SW ring. 2858dc1db31SMukul Joshi */ 2868dc1db31SMukul Joshi bool interrupts_active; 2878dc1db31SMukul Joshi 2888dc1db31SMukul Joshi /* QCM Device instance */ 2898dc1db31SMukul Joshi struct device_queue_manager *dqm; 2908dc1db31SMukul Joshi 2918dc1db31SMukul Joshi /* Global GWS resource shared between processes */ 2928dc1db31SMukul Joshi void *gws; 2938dc1db31SMukul Joshi bool gws_debug_workaround; 2948dc1db31SMukul Joshi 2958dc1db31SMukul Joshi /* Clients watching SMI events */ 2968dc1db31SMukul Joshi struct list_head smi_clients; 2978dc1db31SMukul Joshi spinlock_t smi_lock; 2988dc1db31SMukul Joshi uint32_t reset_seq_num; 2998dc1db31SMukul Joshi 3008dc1db31SMukul Joshi /* SRAM ECC flag */ 3018dc1db31SMukul Joshi atomic_t sram_ecc_flag; 3028dc1db31SMukul Joshi 3038dc1db31SMukul Joshi /*spm process id */ 3048dc1db31SMukul Joshi unsigned int spm_pasid; 3058dc1db31SMukul Joshi 3068dc1db31SMukul Joshi /* Maximum process number mapped to HW scheduler */ 3078dc1db31SMukul Joshi unsigned int max_proc_per_quantum; 3088dc1db31SMukul Joshi 309*74c5b85dSMukul Joshi unsigned int compute_vmid_bitmap; 310*74c5b85dSMukul Joshi 3118dc1db31SMukul Joshi struct kfd_dev *kfd; 3128dc1db31SMukul Joshi }; 3138dc1db31SMukul Joshi 3144a488a7aSOded Gabbay struct kfd_dev { 315c6c57446SGraham Sider struct amdgpu_device *adev; 3164a488a7aSOded Gabbay 317f0dc99a6SGraham Sider struct kfd_device_info device_info; 3184a488a7aSOded Gabbay 31919f6d2a6SOded Gabbay phys_addr_t doorbell_base; /* Start of actual doorbells used by 32019f6d2a6SOded Gabbay * KFD. It is aligned for mapping 32119f6d2a6SOded Gabbay * into user mode 32219f6d2a6SOded Gabbay */ 323339903faSYong Zhao size_t doorbell_base_dw_offset; /* Offset from the start of the PCI 324339903faSYong Zhao * doorbell BAR to the first KFD 325339903faSYong Zhao * doorbell in dwords. GFX reserves 326339903faSYong Zhao * the segment before this offset. 32719f6d2a6SOded Gabbay */ 32819f6d2a6SOded Gabbay u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 32919f6d2a6SOded Gabbay * page used by kernel queue 33019f6d2a6SOded Gabbay */ 33119f6d2a6SOded Gabbay 3324a488a7aSOded Gabbay struct kgd2kfd_shared_resources shared_resources; 333b179fc28SMukul Joshi struct kfd_local_mem_info local_mem_info; 3344a488a7aSOded Gabbay 335cea405b1SXihan Zhang const struct kfd2kgd_calls *kfd2kgd; 336cea405b1SXihan Zhang struct mutex doorbell_mutex; 337f761d8bdSJoe Perches DECLARE_BITMAP(doorbell_available_index, 338f761d8bdSJoe Perches KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 339cea405b1SXihan Zhang 34036b5c08fSOded Gabbay void *gtt_mem; 34136b5c08fSOded Gabbay uint64_t gtt_start_gpu_addr; 34236b5c08fSOded Gabbay void *gtt_start_cpu_ptr; 34336b5c08fSOded Gabbay void *gtt_sa_bitmap; 34436b5c08fSOded Gabbay struct mutex gtt_sa_lock; 34536b5c08fSOded Gabbay unsigned int gtt_sa_chunk_size; 34636b5c08fSOded Gabbay unsigned int gtt_sa_num_of_chunks; 34736b5c08fSOded Gabbay 348ed6e6a34SBen Goz bool init_complete; 349fbeb661bSYair Shachar 3505ade6c9cSFelix Kuehling /* Firmware versions */ 3515ade6c9cSFelix Kuehling uint16_t mec_fw_version; 35229633d0eSJoseph Greathouse uint16_t mec2_fw_version; 3535ade6c9cSFelix Kuehling uint16_t sdma_fw_version; 3545ade6c9cSFelix Kuehling 355373d7080SFelix Kuehling /* CWSR */ 356373d7080SFelix Kuehling bool cwsr_enabled; 357373d7080SFelix Kuehling const void *cwsr_isa; 358373d7080SFelix Kuehling unsigned int cwsr_isa_size; 3590c1690e3SShaoyun Liu 3600c1690e3SShaoyun Liu /* xGMI */ 3610c1690e3SShaoyun Liu uint64_t hive_id; 3620c663695SDivya Shikre 363d35f00d8SEric Huang bool pci_atomic_requested; 3649b54d201SEric Huang 3656127896fSHuang Rui /* Use IOMMU v2 flag */ 3666127896fSHuang Rui bool use_iommu_v2; 3676127896fSHuang Rui 368f756e631SHarish Kasiviswanathan /* Compute Profile ref. count */ 369f756e631SHarish Kasiviswanathan atomic_t compute_profile; 370e09d4fc8SOak Zeng 37159d7115dSMukul Joshi struct ida doorbell_ida; 37259d7115dSMukul Joshi unsigned int max_doorbell_slices; 3739b498efaSAlex Deucher 3749b498efaSAlex Deucher int noretry; 375814ab993SPhilip Yang 376814ab993SPhilip Yang /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ 377814ab993SPhilip Yang struct dev_pagemap pgmap; 3788dc1db31SMukul Joshi 379*74c5b85dSMukul Joshi struct kfd_node *nodes[MAX_KFD_NODES]; 380*74c5b85dSMukul Joshi unsigned int num_nodes; 3814a488a7aSOded Gabbay }; 3824a488a7aSOded Gabbay 38319f6d2a6SOded Gabbay enum kfd_mempool { 38419f6d2a6SOded Gabbay KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 38519f6d2a6SOded Gabbay KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 38619f6d2a6SOded Gabbay KFD_MEMPOOL_FRAMEBUFFER = 3, 38719f6d2a6SOded Gabbay }; 38819f6d2a6SOded Gabbay 3894a488a7aSOded Gabbay /* Character device interface */ 3904a488a7aSOded Gabbay int kfd_chardev_init(void); 3914a488a7aSOded Gabbay void kfd_chardev_exit(void); 3924a488a7aSOded Gabbay 393241f24f8SBen Goz /** 394a4497974SRajneesh Bhardwaj * enum kfd_unmap_queues_filter - Enum for queue filters. 395241f24f8SBen Goz * 3967da2bcf8SYong Zhao * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 397241f24f8SBen Goz * running queues list. 398241f24f8SBen Goz * 399d2cb0b21SJonathan Kim * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues 400d2cb0b21SJonathan Kim * in the run list. 401d2cb0b21SJonathan Kim * 4027da2bcf8SYong Zhao * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 403241f24f8SBen Goz * specific process. 404241f24f8SBen Goz * 405241f24f8SBen Goz */ 4067da2bcf8SYong Zhao enum kfd_unmap_queues_filter { 407d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1, 408d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2, 409d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3 410241f24f8SBen Goz }; 41119f6d2a6SOded Gabbay 412ed8aab45SBen Goz /** 413a4497974SRajneesh Bhardwaj * enum kfd_queue_type - Enum for various queue types. 414ed8aab45SBen Goz * 415ed8aab45SBen Goz * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 416ed8aab45SBen Goz * 417a4497974SRajneesh Bhardwaj * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 418ed8aab45SBen Goz * 419ed8aab45SBen Goz * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 420ed8aab45SBen Goz * 421ed8aab45SBen Goz * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 422a4497974SRajneesh Bhardwaj * 423a4497974SRajneesh Bhardwaj * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 424ed8aab45SBen Goz */ 425ed8aab45SBen Goz enum kfd_queue_type { 426ed8aab45SBen Goz KFD_QUEUE_TYPE_COMPUTE, 427ed8aab45SBen Goz KFD_QUEUE_TYPE_SDMA, 428ed8aab45SBen Goz KFD_QUEUE_TYPE_HIQ, 4291b4670f6SOak Zeng KFD_QUEUE_TYPE_DIQ, 4301b4670f6SOak Zeng KFD_QUEUE_TYPE_SDMA_XGMI 431ed8aab45SBen Goz }; 432ed8aab45SBen Goz 4336e99df57SBen Goz enum kfd_queue_format { 4346e99df57SBen Goz KFD_QUEUE_FORMAT_PM4, 4356e99df57SBen Goz KFD_QUEUE_FORMAT_AQL 4366e99df57SBen Goz }; 4376e99df57SBen Goz 4380ccbc7cdSOak Zeng enum KFD_QUEUE_PRIORITY { 4390ccbc7cdSOak Zeng KFD_QUEUE_PRIORITY_MINIMUM = 0, 4400ccbc7cdSOak Zeng KFD_QUEUE_PRIORITY_MAXIMUM = 15 4410ccbc7cdSOak Zeng }; 4420ccbc7cdSOak Zeng 443ed8aab45SBen Goz /** 444ed8aab45SBen Goz * struct queue_properties 445ed8aab45SBen Goz * 446ed8aab45SBen Goz * @type: The queue type. 447ed8aab45SBen Goz * 448ed8aab45SBen Goz * @queue_id: Queue identifier. 449ed8aab45SBen Goz * 450ed8aab45SBen Goz * @queue_address: Queue ring buffer address. 451ed8aab45SBen Goz * 452ed8aab45SBen Goz * @queue_size: Queue ring buffer size. 453ed8aab45SBen Goz * 454ed8aab45SBen Goz * @priority: Defines the queue priority relative to other queues in the 455ed8aab45SBen Goz * process. 456ed8aab45SBen Goz * This is just an indication and HW scheduling may override the priority as 457ed8aab45SBen Goz * necessary while keeping the relative prioritization. 458ed8aab45SBen Goz * the priority granularity is from 0 to f which f is the highest priority. 459ed8aab45SBen Goz * currently all queues are initialized with the highest priority. 460ed8aab45SBen Goz * 461ed8aab45SBen Goz * @queue_percent: This field is partially implemented and currently a zero in 462ed8aab45SBen Goz * this field defines that the queue is non active. 463ed8aab45SBen Goz * 464ed8aab45SBen Goz * @read_ptr: User space address which points to the number of dwords the 465ed8aab45SBen Goz * cp read from the ring buffer. This field updates automatically by the H/W. 466ed8aab45SBen Goz * 467ed8aab45SBen Goz * @write_ptr: Defines the number of dwords written to the ring buffer. 468ed8aab45SBen Goz * 469a4497974SRajneesh Bhardwaj * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 470a4497974SRajneesh Bhardwaj * buffer. This field should be similar to write_ptr and the user should 471a4497974SRajneesh Bhardwaj * update this field after updating the write_ptr. 472ed8aab45SBen Goz * 473ed8aab45SBen Goz * @doorbell_off: The doorbell offset in the doorbell pci-bar. 474ed8aab45SBen Goz * 4758eabaf54SKent Russell * @is_interop: Defines if this is a interop queue. Interop queue means that 4768eabaf54SKent Russell * the queue can access both graphics and compute resources. 477ed8aab45SBen Goz * 47826103436SFelix Kuehling * @is_evicted: Defines if the queue is evicted. Only active queues 47926103436SFelix Kuehling * are evicted, rendering them inactive. 48026103436SFelix Kuehling * 48126103436SFelix Kuehling * @is_active: Defines if the queue is active or not. @is_active and 48226103436SFelix Kuehling * @is_evicted are protected by the DQM lock. 483ed8aab45SBen Goz * 484b8020b03SJoseph Greathouse * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 485b8020b03SJoseph Greathouse * @is_gws should be protected by the DQM lock, since changing it can yield the 486b8020b03SJoseph Greathouse * possibility of updating DQM state on number of GWS queues. 487b8020b03SJoseph Greathouse * 488ed8aab45SBen Goz * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 489ed8aab45SBen Goz * of the queue. 490ed8aab45SBen Goz * 491ed8aab45SBen Goz * This structure represents the queue properties for each queue no matter if 492ed8aab45SBen Goz * it's user mode or kernel mode queue. 493ed8aab45SBen Goz * 494ed8aab45SBen Goz */ 4958668dfc3SDavid Yat Sin 496ed8aab45SBen Goz struct queue_properties { 497ed8aab45SBen Goz enum kfd_queue_type type; 4986e99df57SBen Goz enum kfd_queue_format format; 499ed8aab45SBen Goz unsigned int queue_id; 500ed8aab45SBen Goz uint64_t queue_address; 501ed8aab45SBen Goz uint64_t queue_size; 502ed8aab45SBen Goz uint32_t priority; 503ed8aab45SBen Goz uint32_t queue_percent; 504ed8aab45SBen Goz uint32_t *read_ptr; 505ed8aab45SBen Goz uint32_t *write_ptr; 506ada2b29cSFelix Kuehling void __iomem *doorbell_ptr; 507ed8aab45SBen Goz uint32_t doorbell_off; 508ed8aab45SBen Goz bool is_interop; 50926103436SFelix Kuehling bool is_evicted; 510ed8aab45SBen Goz bool is_active; 511b8020b03SJoseph Greathouse bool is_gws; 512ed8aab45SBen Goz /* Not relevant for user mode queues in cp scheduling */ 513ed8aab45SBen Goz unsigned int vmid; 51477669eb8SBen Goz /* Relevant only for sdma queues*/ 51577669eb8SBen Goz uint32_t sdma_engine_id; 51677669eb8SBen Goz uint32_t sdma_queue_id; 51777669eb8SBen Goz uint32_t sdma_vm_addr; 518ff3d04a1SBen Goz /* Relevant only for VI */ 519ff3d04a1SBen Goz uint64_t eop_ring_buffer_address; 520ff3d04a1SBen Goz uint32_t eop_ring_buffer_size; 521ff3d04a1SBen Goz uint64_t ctx_save_restore_area_address; 522ff3d04a1SBen Goz uint32_t ctx_save_restore_area_size; 523373d7080SFelix Kuehling uint32_t ctl_stack_size; 524373d7080SFelix Kuehling uint64_t tba_addr; 525373d7080SFelix Kuehling uint64_t tma_addr; 526ed8aab45SBen Goz }; 527ed8aab45SBen Goz 528bb2d2128SFelix Kuehling #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 529bb2d2128SFelix Kuehling (q).queue_address != 0 && \ 530bb2d2128SFelix Kuehling (q).queue_percent > 0 && \ 531bb2d2128SFelix Kuehling !(q).is_evicted) 532bb2d2128SFelix Kuehling 5337c695a2cSLang Yu enum mqd_update_flag { 5347c695a2cSLang Yu UPDATE_FLAG_CU_MASK = 0, 5357c695a2cSLang Yu }; 5367c695a2cSLang Yu 5377c695a2cSLang Yu struct mqd_update_info { 5387c695a2cSLang Yu union { 5397c695a2cSLang Yu struct { 5407c695a2cSLang Yu uint32_t count; /* Must be a multiple of 32 */ 5417c695a2cSLang Yu uint32_t *ptr; 5427c695a2cSLang Yu } cu_mask; 5437c695a2cSLang Yu }; 5447c695a2cSLang Yu enum mqd_update_flag update_flag; 5457c695a2cSLang Yu }; 546c6e559ebSLang Yu 547ed8aab45SBen Goz /** 548ed8aab45SBen Goz * struct queue 549ed8aab45SBen Goz * 550ed8aab45SBen Goz * @list: Queue linked list. 551ed8aab45SBen Goz * 552a4497974SRajneesh Bhardwaj * @mqd: The queue MQD (memory queue descriptor). 553ed8aab45SBen Goz * 554ed8aab45SBen Goz * @mqd_mem_obj: The MQD local gpu memory object. 555ed8aab45SBen Goz * 556ed8aab45SBen Goz * @gart_mqd_addr: The MQD gart mc address. 557ed8aab45SBen Goz * 558ed8aab45SBen Goz * @properties: The queue properties. 559ed8aab45SBen Goz * 560ed8aab45SBen Goz * @mec: Used only in no cp scheduling mode and identifies to micro engine id 561a4497974SRajneesh Bhardwaj * that the queue should be executed on. 562ed8aab45SBen Goz * 5638eabaf54SKent Russell * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 5648eabaf54SKent Russell * id. 565ed8aab45SBen Goz * 566ed8aab45SBen Goz * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 567ed8aab45SBen Goz * 568ed8aab45SBen Goz * @process: The kfd process that created this queue. 569ed8aab45SBen Goz * 570ed8aab45SBen Goz * @device: The kfd device that created this queue. 571ed8aab45SBen Goz * 572eb82da1dSOak Zeng * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 573eb82da1dSOak Zeng * otherwise. 574eb82da1dSOak Zeng * 575ed8aab45SBen Goz * This structure represents user mode compute queues. 576ed8aab45SBen Goz * It contains all the necessary data to handle such queues. 577ed8aab45SBen Goz * 578ed8aab45SBen Goz */ 579ed8aab45SBen Goz 580ed8aab45SBen Goz struct queue { 581ed8aab45SBen Goz struct list_head list; 582ed8aab45SBen Goz void *mqd; 583ed8aab45SBen Goz struct kfd_mem_obj *mqd_mem_obj; 584ed8aab45SBen Goz uint64_t gart_mqd_addr; 585ed8aab45SBen Goz struct queue_properties properties; 586ed8aab45SBen Goz 587ed8aab45SBen Goz uint32_t mec; 588ed8aab45SBen Goz uint32_t pipe; 589ed8aab45SBen Goz uint32_t queue; 590ed8aab45SBen Goz 59177669eb8SBen Goz unsigned int sdma_id; 592ef568db7SFelix Kuehling unsigned int doorbell_id; 59377669eb8SBen Goz 594ed8aab45SBen Goz struct kfd_process *process; 5958dc1db31SMukul Joshi struct kfd_node *device; 596eb82da1dSOak Zeng void *gws; 5976d220a7eSAmber Lin 5986d220a7eSAmber Lin /* procfs */ 5996d220a7eSAmber Lin struct kobject kobj; 600cc009e61SMukul Joshi 601cc009e61SMukul Joshi void *gang_ctx_bo; 602cc009e61SMukul Joshi uint64_t gang_ctx_gpu_addr; 603cc009e61SMukul Joshi void *gang_ctx_cpu_ptr; 604e77a541fSGraham Sider 605e77a541fSGraham Sider struct amdgpu_bo *wptr_bo; 606ed8aab45SBen Goz }; 607ed8aab45SBen Goz 6086e99df57SBen Goz enum KFD_MQD_TYPE { 609d7c0b047SYong Zhao KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 61085d258f9SBen Goz KFD_MQD_TYPE_CP, /* for cp queues and diq */ 61185d258f9SBen Goz KFD_MQD_TYPE_SDMA, /* for sdma queues */ 61259f650a0SOak Zeng KFD_MQD_TYPE_DIQ, /* for diq */ 6136e99df57SBen Goz KFD_MQD_TYPE_MAX 6146e99df57SBen Goz }; 6156e99df57SBen Goz 6160ccbc7cdSOak Zeng enum KFD_PIPE_PRIORITY { 6170ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_LOW = 0, 6180ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_MEDIUM, 6190ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_HIGH 6200ccbc7cdSOak Zeng }; 6210ccbc7cdSOak Zeng 622241f24f8SBen Goz struct scheduling_resources { 623241f24f8SBen Goz unsigned int vmid_mask; 624241f24f8SBen Goz enum kfd_queue_type type; 625241f24f8SBen Goz uint64_t queue_mask; 626241f24f8SBen Goz uint64_t gws_mask; 627241f24f8SBen Goz uint32_t oac_mask; 628241f24f8SBen Goz uint32_t gds_heap_base; 629241f24f8SBen Goz uint32_t gds_heap_size; 630241f24f8SBen Goz }; 631241f24f8SBen Goz 632241f24f8SBen Goz struct process_queue_manager { 633241f24f8SBen Goz /* data */ 634241f24f8SBen Goz struct kfd_process *process; 635241f24f8SBen Goz struct list_head queues; 636241f24f8SBen Goz unsigned long *queue_slot_bitmap; 637241f24f8SBen Goz }; 638241f24f8SBen Goz 639241f24f8SBen Goz struct qcm_process_device { 640241f24f8SBen Goz /* The Device Queue Manager that owns this data */ 641241f24f8SBen Goz struct device_queue_manager *dqm; 642241f24f8SBen Goz struct process_queue_manager *pqm; 643241f24f8SBen Goz /* Queues list */ 644241f24f8SBen Goz struct list_head queues_list; 645241f24f8SBen Goz struct list_head priv_queue_list; 646241f24f8SBen Goz 647241f24f8SBen Goz unsigned int queue_count; 648241f24f8SBen Goz unsigned int vmid; 649241f24f8SBen Goz bool is_debug; 65026103436SFelix Kuehling unsigned int evicted; /* eviction counter, 0=active */ 6519fd3f1bfSFelix Kuehling 6529fd3f1bfSFelix Kuehling /* This flag tells if we should reset all wavefronts on 6539fd3f1bfSFelix Kuehling * process termination 6549fd3f1bfSFelix Kuehling */ 6559fd3f1bfSFelix Kuehling bool reset_wavefronts; 6569fd3f1bfSFelix Kuehling 657b8020b03SJoseph Greathouse /* This flag tells us if this process has a GWS-capable 658b8020b03SJoseph Greathouse * queue that will be mapped into the runlist. It's 659b8020b03SJoseph Greathouse * possible to request a GWS BO, but not have the queue 660b8020b03SJoseph Greathouse * currently mapped, and this changes how the MAP_PROCESS 661b8020b03SJoseph Greathouse * PM4 packet is configured. 662b8020b03SJoseph Greathouse */ 663b8020b03SJoseph Greathouse bool mapped_gws_queue; 664b8020b03SJoseph Greathouse 665a4497974SRajneesh Bhardwaj /* All the memory management data should be here too */ 666241f24f8SBen Goz uint64_t gds_context_area; 667435e2f97SYong Zhao /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 668e715c6d0SShaoyun Liu uint64_t page_table_base; 669241f24f8SBen Goz uint32_t sh_mem_config; 670241f24f8SBen Goz uint32_t sh_mem_bases; 671241f24f8SBen Goz uint32_t sh_mem_ape1_base; 672241f24f8SBen Goz uint32_t sh_mem_ape1_limit; 673241f24f8SBen Goz uint32_t gds_size; 674241f24f8SBen Goz uint32_t num_gws; 675241f24f8SBen Goz uint32_t num_oac; 6766a1c9510SMoses Reuben uint32_t sh_hidden_private_base; 677373d7080SFelix Kuehling 678373d7080SFelix Kuehling /* CWSR memory */ 67968df0f19SLang Yu struct kgd_mem *cwsr_mem; 680373d7080SFelix Kuehling void *cwsr_kaddr; 681d01994c2SFelix Kuehling uint64_t cwsr_base; 682373d7080SFelix Kuehling uint64_t tba_addr; 683373d7080SFelix Kuehling uint64_t tma_addr; 684d01994c2SFelix Kuehling 685d01994c2SFelix Kuehling /* IB memory */ 68668df0f19SLang Yu struct kgd_mem *ib_mem; 687d01994c2SFelix Kuehling uint64_t ib_base; 688552764b6SFelix Kuehling void *ib_kaddr; 689ef568db7SFelix Kuehling 690ef568db7SFelix Kuehling /* doorbell resources per process per device */ 691ef568db7SFelix Kuehling unsigned long *doorbell_bitmap; 692241f24f8SBen Goz }; 693241f24f8SBen Goz 69426103436SFelix Kuehling /* KFD Memory Eviction */ 69526103436SFelix Kuehling 69626103436SFelix Kuehling /* Approx. wait time before attempting to restore evicted BOs */ 69726103436SFelix Kuehling #define PROCESS_RESTORE_TIME_MS 100 69826103436SFelix Kuehling /* Approx. back off time if restore fails due to lack of memory */ 69926103436SFelix Kuehling #define PROCESS_BACK_OFF_TIME_MS 100 70026103436SFelix Kuehling /* Approx. time before evicting the process again */ 70126103436SFelix Kuehling #define PROCESS_ACTIVE_TIME_MS 10 70226103436SFelix Kuehling 7035ec7e028SFelix Kuehling /* 8 byte handle containing GPU ID in the most significant 4 bytes and 7045ec7e028SFelix Kuehling * idr_handle in the least significant 4 bytes 7055ec7e028SFelix Kuehling */ 7065ec7e028SFelix Kuehling #define MAKE_HANDLE(gpu_id, idr_handle) \ 7075ec7e028SFelix Kuehling (((uint64_t)(gpu_id) << 32) + idr_handle) 7085ec7e028SFelix Kuehling #define GET_GPU_ID(handle) (handle >> 32) 7095ec7e028SFelix Kuehling #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 7105ec7e028SFelix Kuehling 711733fa1f7SYong Zhao enum kfd_pdd_bound { 712733fa1f7SYong Zhao PDD_UNBOUND = 0, 713733fa1f7SYong Zhao PDD_BOUND, 714733fa1f7SYong Zhao PDD_BOUND_SUSPENDED, 715733fa1f7SYong Zhao }; 716733fa1f7SYong Zhao 7174327bed2SPhilip Cox #define MAX_SYSFS_FILENAME_LEN 15 71832cb59f3SMukul Joshi 71932cb59f3SMukul Joshi /* 72032cb59f3SMukul Joshi * SDMA counter runs at 100MHz frequency. 72132cb59f3SMukul Joshi * We display SDMA activity in microsecond granularity in sysfs. 72232cb59f3SMukul Joshi * As a result, the divisor is 100. 72332cb59f3SMukul Joshi */ 72432cb59f3SMukul Joshi #define SDMA_ACTIVITY_DIVISOR 100 725d4566deeSMukul Joshi 72619f6d2a6SOded Gabbay /* Data that is per-process-per device. */ 72719f6d2a6SOded Gabbay struct kfd_process_device { 72819f6d2a6SOded Gabbay /* The device that owns this data. */ 7298dc1db31SMukul Joshi struct kfd_node *dev; 73019f6d2a6SOded Gabbay 7319fd3f1bfSFelix Kuehling /* The process that owns this kfd_process_device. */ 7329fd3f1bfSFelix Kuehling struct kfd_process *process; 73319f6d2a6SOded Gabbay 73445102048SBen Goz /* per-process-per device QCM data structure */ 73545102048SBen Goz struct qcm_process_device qpd; 73645102048SBen Goz 73719f6d2a6SOded Gabbay /*Apertures*/ 73819f6d2a6SOded Gabbay uint64_t lds_base; 73919f6d2a6SOded Gabbay uint64_t lds_limit; 74019f6d2a6SOded Gabbay uint64_t gpuvm_base; 74119f6d2a6SOded Gabbay uint64_t gpuvm_limit; 74219f6d2a6SOded Gabbay uint64_t scratch_base; 74319f6d2a6SOded Gabbay uint64_t scratch_limit; 74419f6d2a6SOded Gabbay 745403575c4SFelix Kuehling /* VM context for GPUVM allocations */ 746b84394e2SFelix Kuehling struct file *drm_file; 747b40a6ab2SFelix Kuehling void *drm_priv; 7488fde0248SPhilip Yang atomic64_t tlb_seq; 749403575c4SFelix Kuehling 75052b29d73SFelix Kuehling /* GPUVM allocations storage */ 75152b29d73SFelix Kuehling struct idr alloc_idr; 75252b29d73SFelix Kuehling 7539fd3f1bfSFelix Kuehling /* Flag used to tell the pdd has dequeued from the dqm. 7549fd3f1bfSFelix Kuehling * This is used to prevent dev->dqm->ops.process_termination() from 7559fd3f1bfSFelix Kuehling * being called twice when it is already called in IOMMU callback 7569fd3f1bfSFelix Kuehling * function. 757a82918f1SBen Goz */ 7589fd3f1bfSFelix Kuehling bool already_dequeued; 7599593f4d6SRajneesh Bhardwaj bool runtime_inuse; 76064d1c3a4SFelix Kuehling 76164d1c3a4SFelix Kuehling /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 76264d1c3a4SFelix Kuehling enum kfd_pdd_bound bound; 763d4566deeSMukul Joshi 764d4566deeSMukul Joshi /* VRAM usage */ 765d4566deeSMukul Joshi uint64_t vram_usage; 766d4566deeSMukul Joshi struct attribute attr_vram; 76732cb59f3SMukul Joshi char vram_filename[MAX_SYSFS_FILENAME_LEN]; 76832cb59f3SMukul Joshi 76932cb59f3SMukul Joshi /* SDMA activity tracking */ 77032cb59f3SMukul Joshi uint64_t sdma_past_activity_counter; 77132cb59f3SMukul Joshi struct attribute attr_sdma; 77232cb59f3SMukul Joshi char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 7734327bed2SPhilip Cox 7744327bed2SPhilip Cox /* Eviction activity tracking */ 7754327bed2SPhilip Cox uint64_t last_evict_timestamp; 7764327bed2SPhilip Cox atomic64_t evict_duration_counter; 7774327bed2SPhilip Cox struct attribute attr_evict; 7784327bed2SPhilip Cox 7794327bed2SPhilip Cox struct kobject *kobj_stats; 78059d7115dSMukul Joshi unsigned int doorbell_index; 781f2fa07b3SRamesh Errabolu 782f2fa07b3SRamesh Errabolu /* 783f2fa07b3SRamesh Errabolu * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 784f2fa07b3SRamesh Errabolu * that is associated with device encoded by "this" struct instance. The 785f2fa07b3SRamesh Errabolu * value reflects CU usage by all of the waves launched by this process 786f2fa07b3SRamesh Errabolu * on this device. A very important property of occupancy parameter is 787f2fa07b3SRamesh Errabolu * that its value is a snapshot of current use. 788f2fa07b3SRamesh Errabolu * 789f2fa07b3SRamesh Errabolu * Following is to be noted regarding how this parameter is reported: 790f2fa07b3SRamesh Errabolu * 791f2fa07b3SRamesh Errabolu * The number of waves that a CU can launch is limited by couple of 792f2fa07b3SRamesh Errabolu * parameters. These are encoded by struct amdgpu_cu_info instance 793f2fa07b3SRamesh Errabolu * that is part of every device definition. For GFX9 devices this 794f2fa07b3SRamesh Errabolu * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 795f2fa07b3SRamesh Errabolu * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 796f2fa07b3SRamesh Errabolu * when they do use scratch memory. This could change for future 797f2fa07b3SRamesh Errabolu * devices and therefore this example should be considered as a guide. 798f2fa07b3SRamesh Errabolu * 799f2fa07b3SRamesh Errabolu * All CU's of a device are available for the process. This may not be true 800f2fa07b3SRamesh Errabolu * under certain conditions - e.g. CU masking. 801f2fa07b3SRamesh Errabolu * 802f2fa07b3SRamesh Errabolu * Finally number of CU's that are occupied by a process is affected by both 803f2fa07b3SRamesh Errabolu * number of CU's a device has along with number of other competing processes 804f2fa07b3SRamesh Errabolu */ 805f2fa07b3SRamesh Errabolu struct attribute attr_cu_occupancy; 806751580b3SPhilip Yang 807751580b3SPhilip Yang /* sysfs counters for GPU retry fault and page migration tracking */ 808751580b3SPhilip Yang struct kobject *kobj_counters; 809751580b3SPhilip Yang struct attribute attr_faults; 810751580b3SPhilip Yang struct attribute attr_page_in; 811751580b3SPhilip Yang struct attribute attr_page_out; 812751580b3SPhilip Yang uint64_t faults; 813751580b3SPhilip Yang uint64_t page_in; 814751580b3SPhilip Yang uint64_t page_out; 815bef153b7SDavid Yat Sin /* 816bef153b7SDavid Yat Sin * If this process has been checkpointed before, then the user 817bef153b7SDavid Yat Sin * application will use the original gpu_id on the 818bef153b7SDavid Yat Sin * checkpointed node to refer to this device. 819bef153b7SDavid Yat Sin */ 820bef153b7SDavid Yat Sin uint32_t user_gpu_id; 821cc009e61SMukul Joshi 822cc009e61SMukul Joshi void *proc_ctx_bo; 823cc009e61SMukul Joshi uint64_t proc_ctx_gpu_addr; 824cc009e61SMukul Joshi void *proc_ctx_cpu_ptr; 82519f6d2a6SOded Gabbay }; 82619f6d2a6SOded Gabbay 82752a5fdceSAlexey Skidanov #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 82852a5fdceSAlexey Skidanov 82942de677fSPhilip Yang struct svm_range_list { 83042de677fSPhilip Yang struct mutex lock; 83142de677fSPhilip Yang struct rb_root_cached objects; 83242de677fSPhilip Yang struct list_head list; 8334683cfecSPhilip Yang struct work_struct deferred_list_work; 8344683cfecSPhilip Yang struct list_head deferred_range_list; 835c2db32ceSRajneesh Bhardwaj struct list_head criu_svm_metadata_list; 8364683cfecSPhilip Yang spinlock_t deferred_list_lock; 8378a7c184aSFelix Kuehling atomic_t evicted_ranges; 8382e447728SPhilip Yang atomic_t drain_pagefaults; 8398a7c184aSFelix Kuehling struct delayed_work restore_work; 8405a75ea56SFelix Kuehling DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 841a6283010SAlex Sierra struct task_struct *faulting_task; 84242de677fSPhilip Yang }; 84342de677fSPhilip Yang 8444a488a7aSOded Gabbay /* Process data */ 8454a488a7aSOded Gabbay struct kfd_process { 84619f6d2a6SOded Gabbay /* 84719f6d2a6SOded Gabbay * kfd_process are stored in an mm_struct*->kfd_process* 84819f6d2a6SOded Gabbay * hash table (kfd_processes in kfd_process.c) 84919f6d2a6SOded Gabbay */ 85019f6d2a6SOded Gabbay struct hlist_node kfd_processes; 85119f6d2a6SOded Gabbay 8529b56bb11SFelix Kuehling /* 8539b56bb11SFelix Kuehling * Opaque pointer to mm_struct. We don't hold a reference to 8549b56bb11SFelix Kuehling * it so it should never be dereferenced from here. This is 8559b56bb11SFelix Kuehling * only used for looking up processes by their mm. 8569b56bb11SFelix Kuehling */ 8579b56bb11SFelix Kuehling void *mm; 85819f6d2a6SOded Gabbay 8595ce10687SFelix Kuehling struct kref ref; 8605ce10687SFelix Kuehling struct work_struct release_work; 8615ce10687SFelix Kuehling 86219f6d2a6SOded Gabbay struct mutex mutex; 86319f6d2a6SOded Gabbay 86419f6d2a6SOded Gabbay /* 86519f6d2a6SOded Gabbay * In any process, the thread that started main() is the lead 86619f6d2a6SOded Gabbay * thread and outlives the rest. 86719f6d2a6SOded Gabbay * It is here because amd_iommu_bind_pasid wants a task_struct. 868894a8293SFelix Kuehling * It can also be used for safely getting a reference to the 869894a8293SFelix Kuehling * mm_struct of the process. 87019f6d2a6SOded Gabbay */ 87119f6d2a6SOded Gabbay struct task_struct *lead_thread; 87219f6d2a6SOded Gabbay 87319f6d2a6SOded Gabbay /* We want to receive a notification when the mm_struct is destroyed */ 87419f6d2a6SOded Gabbay struct mmu_notifier mmu_notifier; 87519f6d2a6SOded Gabbay 876c7b6bac9SFenghua Yu u32 pasid; 87719f6d2a6SOded Gabbay 87819f6d2a6SOded Gabbay /* 8796ae27841SAlex Sierra * Array of kfd_process_device pointers, 88019f6d2a6SOded Gabbay * one for each device the process is using. 88119f6d2a6SOded Gabbay */ 8826ae27841SAlex Sierra struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 8836ae27841SAlex Sierra uint32_t n_pdds; 88419f6d2a6SOded Gabbay 88545102048SBen Goz struct process_queue_manager pqm; 88645102048SBen Goz 88719f6d2a6SOded Gabbay /*Is the user space process 32 bit?*/ 88819f6d2a6SOded Gabbay bool is_32bit_user_mode; 889f3a39818SAndrew Lewycky 890f3a39818SAndrew Lewycky /* Event-related data */ 891f3a39818SAndrew Lewycky struct mutex event_mutex; 892482f0777SFelix Kuehling /* Event ID allocator and lookup */ 893482f0777SFelix Kuehling struct idr event_idr; 89450cb7dd9SFelix Kuehling /* Event page */ 89568df0f19SLang Yu u64 signal_handle; 89650cb7dd9SFelix Kuehling struct kfd_signal_page *signal_page; 897b9a5d0a5SFelix Kuehling size_t signal_mapped_size; 898f3a39818SAndrew Lewycky size_t signal_event_count; 899c986169fSFelix Kuehling bool signal_event_limit_reached; 900403575c4SFelix Kuehling 901403575c4SFelix Kuehling /* Information used for memory eviction */ 902403575c4SFelix Kuehling void *kgd_process_info; 903403575c4SFelix Kuehling /* Eviction fence that is attached to all the BOs of this process. The 904403575c4SFelix Kuehling * fence will be triggered during eviction and new one will be created 905403575c4SFelix Kuehling * during restore 906403575c4SFelix Kuehling */ 907403575c4SFelix Kuehling struct dma_fence *ef; 90826103436SFelix Kuehling 90926103436SFelix Kuehling /* Work items for evicting and restoring BOs */ 91026103436SFelix Kuehling struct delayed_work eviction_work; 91126103436SFelix Kuehling struct delayed_work restore_work; 91226103436SFelix Kuehling /* seqno of the last scheduled eviction */ 91326103436SFelix Kuehling unsigned int last_eviction_seqno; 91426103436SFelix Kuehling /* Approx. the last timestamp (in jiffies) when the process was 91526103436SFelix Kuehling * restored after an eviction 91626103436SFelix Kuehling */ 91726103436SFelix Kuehling unsigned long last_restore_timestamp; 918de9f26bbSKent Russell 919de9f26bbSKent Russell /* Kobj for our procfs */ 920de9f26bbSKent Russell struct kobject *kobj; 9216d220a7eSAmber Lin struct kobject *kobj_queues; 922de9f26bbSKent Russell struct attribute attr_pasid; 92340ce74d1SPhilip Yang 92442de677fSPhilip Yang /* shared virtual memory registered by this process */ 92542de677fSPhilip Yang struct svm_range_list svms; 926063e33c5SAlex Sierra 927063e33c5SAlex Sierra bool xnack_enabled; 928b6485bedSTao Zhou 929b6485bedSTao Zhou atomic_t poison; 930cd9f7910SDavid Yat Sin /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 931cd9f7910SDavid Yat Sin bool queues_paused; 9324a488a7aSOded Gabbay }; 9334a488a7aSOded Gabbay 93464d1c3a4SFelix Kuehling #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 93564d1c3a4SFelix Kuehling extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 93664d1c3a4SFelix Kuehling extern struct srcu_struct kfd_processes_srcu; 93764d1c3a4SFelix Kuehling 93876baee6cSOded Gabbay /** 939a4497974SRajneesh Bhardwaj * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 94076baee6cSOded Gabbay * 941a4497974SRajneesh Bhardwaj * @filep: pointer to file structure. 942a4497974SRajneesh Bhardwaj * @p: amdkfd process pointer. 943a4497974SRajneesh Bhardwaj * @data: pointer to arg that was copied from user. 944a4497974SRajneesh Bhardwaj * 945a4497974SRajneesh Bhardwaj * Return: returns ioctl completion code. 94676baee6cSOded Gabbay */ 94776baee6cSOded Gabbay typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 94876baee6cSOded Gabbay void *data); 94976baee6cSOded Gabbay 95076baee6cSOded Gabbay struct amdkfd_ioctl_desc { 95176baee6cSOded Gabbay unsigned int cmd; 95276baee6cSOded Gabbay int flags; 95376baee6cSOded Gabbay amdkfd_ioctl_t *func; 95476baee6cSOded Gabbay unsigned int cmd_drv; 95576baee6cSOded Gabbay const char *name; 95676baee6cSOded Gabbay }; 9578dc1db31SMukul Joshi bool kfd_dev_is_large_bar(struct kfd_node *dev); 95876baee6cSOded Gabbay 9591679ae8fSFelix Kuehling int kfd_process_create_wq(void); 96019f6d2a6SOded Gabbay void kfd_process_destroy_wq(void); 96122e3d934SDavid Belanger void kfd_cleanup_processes(void); 962373d7080SFelix Kuehling struct kfd_process *kfd_create_process(struct file *filep); 9632243f493SRajneesh Bhardwaj struct kfd_process *kfd_get_process(const struct task_struct *task); 964c7b6bac9SFenghua Yu struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); 96526103436SFelix Kuehling struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 9662aeb742bSAlex Sierra 9672aeb742bSAlex Sierra int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 96856c5977eSGraham Sider int kfd_process_gpuid_from_adev(struct kfd_process *p, 969cda0f85bSFelix Kuehling struct amdgpu_device *adev, uint32_t *gpuid, 970cda0f85bSFelix Kuehling uint32_t *gpuidx); 9712aeb742bSAlex Sierra static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 9722aeb742bSAlex Sierra uint32_t gpuidx, uint32_t *gpuid) { 9732aeb742bSAlex Sierra return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 9742aeb742bSAlex Sierra } 9752aeb742bSAlex Sierra static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 9762aeb742bSAlex Sierra struct kfd_process *p, uint32_t gpuidx) { 9772aeb742bSAlex Sierra return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 9782aeb742bSAlex Sierra } 9792aeb742bSAlex Sierra 980abb208a8SFelix Kuehling void kfd_unref_process(struct kfd_process *p); 981c7f21978SPhilip Yang int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger); 9826b95e797SFelix Kuehling int kfd_process_restore_queues(struct kfd_process *p); 98326103436SFelix Kuehling void kfd_suspend_all_processes(void); 98426103436SFelix Kuehling int kfd_resume_all_processes(void); 98519f6d2a6SOded Gabbay 986bef153b7SDavid Yat Sin struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 987bef153b7SDavid Yat Sin uint32_t gpu_id); 988bef153b7SDavid Yat Sin 989bef153b7SDavid Yat Sin int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 990bef153b7SDavid Yat Sin 991b84394e2SFelix Kuehling int kfd_process_device_init_vm(struct kfd_process_device *pdd, 992b84394e2SFelix Kuehling struct file *drm_file); 9938dc1db31SMukul Joshi struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 99464c7f8cfSBen Goz struct kfd_process *p); 9958dc1db31SMukul Joshi struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 996093c7d8cSAlexey Skidanov struct kfd_process *p); 9978dc1db31SMukul Joshi struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 998093c7d8cSAlexey Skidanov struct kfd_process *p); 99919f6d2a6SOded Gabbay 1000063e33c5SAlex Sierra bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 1001063e33c5SAlex Sierra 10028dc1db31SMukul Joshi int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 1003373d7080SFelix Kuehling struct vm_area_struct *vma); 1004373d7080SFelix Kuehling 100552b29d73SFelix Kuehling /* KFD process API for creating and translating handles */ 100652b29d73SFelix Kuehling int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 100752b29d73SFelix Kuehling void *mem); 100852b29d73SFelix Kuehling void *kfd_process_device_translate_handle(struct kfd_process_device *p, 100952b29d73SFelix Kuehling int handle); 101052b29d73SFelix Kuehling void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 101152b29d73SFelix Kuehling int handle); 1012011bbb03SRajneesh Bhardwaj struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 101352b29d73SFelix Kuehling 101419f6d2a6SOded Gabbay /* PASIDs */ 101519f6d2a6SOded Gabbay int kfd_pasid_init(void); 101619f6d2a6SOded Gabbay void kfd_pasid_exit(void); 101719f6d2a6SOded Gabbay bool kfd_set_pasid_limit(unsigned int new_limit); 101819f6d2a6SOded Gabbay unsigned int kfd_get_pasid_limit(void); 1019c7b6bac9SFenghua Yu u32 kfd_pasid_alloc(void); 1020c7b6bac9SFenghua Yu void kfd_pasid_free(u32 pasid); 102119f6d2a6SOded Gabbay 102219f6d2a6SOded Gabbay /* Doorbells */ 1023ef568db7SFelix Kuehling size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1024735df2baSFelix Kuehling int kfd_doorbell_init(struct kfd_dev *kfd); 1025735df2baSFelix Kuehling void kfd_doorbell_fini(struct kfd_dev *kfd); 10268dc1db31SMukul Joshi int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, 1027df03ef93SHarish Kasiviswanathan struct vm_area_struct *vma); 1028ada2b29cSFelix Kuehling void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 102919f6d2a6SOded Gabbay unsigned int *doorbell_off); 103019f6d2a6SOded Gabbay void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 103119f6d2a6SOded Gabbay u32 read_kernel_doorbell(u32 __iomem *db); 1032ada2b29cSFelix Kuehling void write_kernel_doorbell(void __iomem *db, u32 value); 10339d7d0248SFelix Kuehling void write_kernel_doorbell64(void __iomem *db, u64 value); 1034339903faSYong Zhao unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 103559d7115dSMukul Joshi struct kfd_process_device *pdd, 1036ef568db7SFelix Kuehling unsigned int doorbell_id); 103759d7115dSMukul Joshi phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 103859d7115dSMukul Joshi int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 103959d7115dSMukul Joshi unsigned int *doorbell_index); 104059d7115dSMukul Joshi void kfd_free_process_doorbells(struct kfd_dev *kfd, 104159d7115dSMukul Joshi unsigned int doorbell_index); 10426e81090bSOded Gabbay /* GTT Sub-Allocator */ 10436e81090bSOded Gabbay 10448dc1db31SMukul Joshi int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, 10456e81090bSOded Gabbay struct kfd_mem_obj **mem_obj); 10466e81090bSOded Gabbay 10478dc1db31SMukul Joshi int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); 10486e81090bSOded Gabbay 10494a488a7aSOded Gabbay extern struct device *kfd_device; 10504a488a7aSOded Gabbay 1051de9f26bbSKent Russell /* KFD's procfs */ 1052de9f26bbSKent Russell void kfd_procfs_init(void); 1053de9f26bbSKent Russell void kfd_procfs_shutdown(void); 10546d220a7eSAmber Lin int kfd_procfs_add_queue(struct queue *q); 10556d220a7eSAmber Lin void kfd_procfs_del_queue(struct queue *q); 1056de9f26bbSKent Russell 10575b5c4e40SEvgeny Pinchuk /* Topology */ 10585b5c4e40SEvgeny Pinchuk int kfd_topology_init(void); 10595b5c4e40SEvgeny Pinchuk void kfd_topology_shutdown(void); 10608dc1db31SMukul Joshi int kfd_topology_add_device(struct kfd_node *gpu); 10618dc1db31SMukul Joshi int kfd_topology_remove_device(struct kfd_node *gpu); 10623a87177eSHarish Kasiviswanathan struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 10633a87177eSHarish Kasiviswanathan uint32_t proximity_domain); 106446d18d51SMukul Joshi struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( 106546d18d51SMukul Joshi uint32_t proximity_domain); 106644d8cc6fSYong Zhao struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 10678dc1db31SMukul Joshi struct kfd_node *kfd_device_by_id(uint32_t gpu_id); 10688dc1db31SMukul Joshi struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); 10698dc1db31SMukul Joshi struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); 10708dc1db31SMukul Joshi int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); 1071520b8fb7SFelix Kuehling int kfd_numa_node_to_apic_id(int numa_node_id); 10726127896fSHuang Rui void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); 10735b5c4e40SEvgeny Pinchuk 10744a488a7aSOded Gabbay /* Interrupts */ 10758dc1db31SMukul Joshi int kfd_interrupt_init(struct kfd_node *dev); 10768dc1db31SMukul Joshi void kfd_interrupt_exit(struct kfd_node *dev); 10778dc1db31SMukul Joshi bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); 10788dc1db31SMukul Joshi bool interrupt_is_wanted(struct kfd_node *dev, 107958e69886SLan Xiao const uint32_t *ih_ring_entry, 108058e69886SLan Xiao uint32_t *patched_ihre, bool *flag); 10814a488a7aSOded Gabbay 108219f6d2a6SOded Gabbay /* amdkfd Apertures */ 108319f6d2a6SOded Gabbay int kfd_init_apertures(struct kfd_process *process); 108419f6d2a6SOded Gabbay 10857c9631afSJay Cornwall void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 10867c9631afSJay Cornwall uint64_t tba_addr, 10877c9631afSJay Cornwall uint64_t tma_addr); 10887c9631afSJay Cornwall 108936988070SRajneesh Bhardwaj /* CRIU */ 109036988070SRajneesh Bhardwaj /* 109136988070SRajneesh Bhardwaj * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 109236988070SRajneesh Bhardwaj * structures: 109336988070SRajneesh Bhardwaj * kfd_criu_process_priv_data 109436988070SRajneesh Bhardwaj * kfd_criu_device_priv_data 109536988070SRajneesh Bhardwaj * kfd_criu_bo_priv_data 109636988070SRajneesh Bhardwaj * kfd_criu_queue_priv_data 109736988070SRajneesh Bhardwaj * kfd_criu_event_priv_data 109836988070SRajneesh Bhardwaj * kfd_criu_svm_range_priv_data 109936988070SRajneesh Bhardwaj */ 110036988070SRajneesh Bhardwaj 110136988070SRajneesh Bhardwaj #define KFD_CRIU_PRIV_VERSION 1 110236988070SRajneesh Bhardwaj 110336988070SRajneesh Bhardwaj struct kfd_criu_process_priv_data { 110436988070SRajneesh Bhardwaj uint32_t version; 11054717fe3dSRajneesh Bhardwaj uint32_t xnack_mode; 110636988070SRajneesh Bhardwaj }; 110736988070SRajneesh Bhardwaj 110836988070SRajneesh Bhardwaj struct kfd_criu_device_priv_data { 110936988070SRajneesh Bhardwaj /* For future use */ 111036988070SRajneesh Bhardwaj uint64_t reserved; 111136988070SRajneesh Bhardwaj }; 111236988070SRajneesh Bhardwaj 111336988070SRajneesh Bhardwaj struct kfd_criu_bo_priv_data { 11145ccbb057SRajneesh Bhardwaj uint64_t user_addr; 11155ccbb057SRajneesh Bhardwaj uint32_t idr_handle; 11165ccbb057SRajneesh Bhardwaj uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 111736988070SRajneesh Bhardwaj }; 111836988070SRajneesh Bhardwaj 1119626f7b31SDavid Yat Sin /* 1120626f7b31SDavid Yat Sin * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1121626f7b31SDavid Yat Sin * kfd_criu_svm_range_priv_data is the object type 1122626f7b31SDavid Yat Sin */ 1123626f7b31SDavid Yat Sin enum kfd_criu_object_type { 1124626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_QUEUE, 1125626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_EVENT, 1126626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1127626f7b31SDavid Yat Sin }; 1128626f7b31SDavid Yat Sin 112936988070SRajneesh Bhardwaj struct kfd_criu_svm_range_priv_data { 113036988070SRajneesh Bhardwaj uint32_t object_type; 113108a987a8SRajneesh Bhardwaj uint64_t start_addr; 113208a987a8SRajneesh Bhardwaj uint64_t size; 113308a987a8SRajneesh Bhardwaj /* Variable length array of attributes */ 1134d5c83156SChangcheng Deng struct kfd_ioctl_svm_attribute attrs[]; 113536988070SRajneesh Bhardwaj }; 113636988070SRajneesh Bhardwaj 113736988070SRajneesh Bhardwaj struct kfd_criu_queue_priv_data { 113836988070SRajneesh Bhardwaj uint32_t object_type; 1139626f7b31SDavid Yat Sin uint64_t q_address; 1140626f7b31SDavid Yat Sin uint64_t q_size; 1141626f7b31SDavid Yat Sin uint64_t read_ptr_addr; 1142626f7b31SDavid Yat Sin uint64_t write_ptr_addr; 1143626f7b31SDavid Yat Sin uint64_t doorbell_off; 1144626f7b31SDavid Yat Sin uint64_t eop_ring_buffer_address; 1145626f7b31SDavid Yat Sin uint64_t ctx_save_restore_area_address; 1146626f7b31SDavid Yat Sin uint32_t gpu_id; 1147626f7b31SDavid Yat Sin uint32_t type; 1148626f7b31SDavid Yat Sin uint32_t format; 1149626f7b31SDavid Yat Sin uint32_t q_id; 1150626f7b31SDavid Yat Sin uint32_t priority; 1151626f7b31SDavid Yat Sin uint32_t q_percent; 1152626f7b31SDavid Yat Sin uint32_t doorbell_id; 1153747eea07SDavid Yat Sin uint32_t gws; 1154626f7b31SDavid Yat Sin uint32_t sdma_id; 1155626f7b31SDavid Yat Sin uint32_t eop_ring_buffer_size; 1156626f7b31SDavid Yat Sin uint32_t ctx_save_restore_area_size; 1157626f7b31SDavid Yat Sin uint32_t ctl_stack_size; 1158626f7b31SDavid Yat Sin uint32_t mqd_size; 115936988070SRajneesh Bhardwaj }; 116036988070SRajneesh Bhardwaj 116136988070SRajneesh Bhardwaj struct kfd_criu_event_priv_data { 116236988070SRajneesh Bhardwaj uint32_t object_type; 116340e8a766SDavid Yat Sin uint64_t user_handle; 116440e8a766SDavid Yat Sin uint32_t event_id; 116540e8a766SDavid Yat Sin uint32_t auto_reset; 116640e8a766SDavid Yat Sin uint32_t type; 116740e8a766SDavid Yat Sin uint32_t signaled; 116840e8a766SDavid Yat Sin 116940e8a766SDavid Yat Sin union { 117040e8a766SDavid Yat Sin struct kfd_hsa_memory_exception_data memory_exception_data; 117140e8a766SDavid Yat Sin struct kfd_hsa_hw_exception_data hw_exception_data; 117240e8a766SDavid Yat Sin }; 117336988070SRajneesh Bhardwaj }; 117436988070SRajneesh Bhardwaj 1175626f7b31SDavid Yat Sin int kfd_process_get_queue_info(struct kfd_process *p, 1176626f7b31SDavid Yat Sin uint32_t *num_queues, 1177626f7b31SDavid Yat Sin uint64_t *priv_data_sizes); 1178626f7b31SDavid Yat Sin 1179626f7b31SDavid Yat Sin int kfd_criu_checkpoint_queues(struct kfd_process *p, 1180626f7b31SDavid Yat Sin uint8_t __user *user_priv_data, 1181626f7b31SDavid Yat Sin uint64_t *priv_data_offset); 1182626f7b31SDavid Yat Sin 1183626f7b31SDavid Yat Sin int kfd_criu_restore_queue(struct kfd_process *p, 1184626f7b31SDavid Yat Sin uint8_t __user *user_priv_data, 1185626f7b31SDavid Yat Sin uint64_t *priv_data_offset, 1186626f7b31SDavid Yat Sin uint64_t max_priv_data_size); 118740e8a766SDavid Yat Sin 118840e8a766SDavid Yat Sin int kfd_criu_checkpoint_events(struct kfd_process *p, 118940e8a766SDavid Yat Sin uint8_t __user *user_priv_data, 119040e8a766SDavid Yat Sin uint64_t *priv_data_offset); 119140e8a766SDavid Yat Sin 119240e8a766SDavid Yat Sin int kfd_criu_restore_event(struct file *devkfd, 119340e8a766SDavid Yat Sin struct kfd_process *p, 119440e8a766SDavid Yat Sin uint8_t __user *user_priv_data, 119540e8a766SDavid Yat Sin uint64_t *priv_data_offset, 119640e8a766SDavid Yat Sin uint64_t max_priv_data_size); 119736988070SRajneesh Bhardwaj /* CRIU - End */ 119836988070SRajneesh Bhardwaj 1199ed6e6a34SBen Goz /* Queue Context Management */ 1200e88a614cSEdward O'Callaghan int init_queue(struct queue **q, const struct queue_properties *properties); 1201ed6e6a34SBen Goz void uninit_queue(struct queue *q); 120245102048SBen Goz void print_queue_properties(struct queue_properties *q); 1203ed6e6a34SBen Goz void print_queue(struct queue *q); 1204ed6e6a34SBen Goz 12054b8f589bSBen Goz struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 12068dc1db31SMukul Joshi struct kfd_node *dev); 1207ee04955aSFelix Kuehling struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 12088dc1db31SMukul Joshi struct kfd_node *dev); 12094b8f589bSBen Goz struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 12108dc1db31SMukul Joshi struct kfd_node *dev); 1211ee04955aSFelix Kuehling struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 12128dc1db31SMukul Joshi struct kfd_node *dev); 1213b91d43ddSFelix Kuehling struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 12148dc1db31SMukul Joshi struct kfd_node *dev); 121514328aa5SPhilip Cox struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 12168dc1db31SMukul Joshi struct kfd_node *dev); 1217cc009e61SMukul Joshi struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, 12188dc1db31SMukul Joshi struct kfd_node *dev); 12198dc1db31SMukul Joshi struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 122064c7f8cfSBen Goz void device_queue_manager_uninit(struct device_queue_manager *dqm); 12218dc1db31SMukul Joshi struct kernel_queue *kernel_queue_init(struct kfd_node *dev, 1222241f24f8SBen Goz enum kfd_queue_type type); 1223c2a77fdeSFelix Kuehling void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 122403e5b167STao Zhou int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); 1225241f24f8SBen Goz 122645102048SBen Goz /* Process Queue Manager */ 122745102048SBen Goz struct process_queue_node { 122845102048SBen Goz struct queue *q; 122945102048SBen Goz struct kernel_queue *kq; 123045102048SBen Goz struct list_head process_queue_list; 123145102048SBen Goz }; 123245102048SBen Goz 12339fd3f1bfSFelix Kuehling void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 12349fd3f1bfSFelix Kuehling void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 123545102048SBen Goz int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 123645102048SBen Goz void pqm_uninit(struct process_queue_manager *pqm); 123745102048SBen Goz int pqm_create_queue(struct process_queue_manager *pqm, 12388dc1db31SMukul Joshi struct kfd_node *dev, 123945102048SBen Goz struct file *f, 124045102048SBen Goz struct queue_properties *properties, 1241e47a8b52SYong Zhao unsigned int *qid, 1242e77a541fSGraham Sider struct amdgpu_bo *wptr_bo, 12438668dfc3SDavid Yat Sin const struct kfd_criu_queue_priv_data *q_data, 124442c6c482SDavid Yat Sin const void *restore_mqd, 12453a9822d7SDavid Yat Sin const void *restore_ctl_stack, 1246e47a8b52SYong Zhao uint32_t *p_doorbell_offset_in_process); 124745102048SBen Goz int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 12487c695a2cSLang Yu int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 124945102048SBen Goz struct queue_properties *p); 12507c695a2cSLang Yu int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 12517c695a2cSLang Yu struct mqd_update_info *minfo); 1252eb82da1dSOak Zeng int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1253eb82da1dSOak Zeng void *gws); 1254fbeb661bSYair Shachar struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 1255fbeb661bSYair Shachar unsigned int qid); 12565bb4b78bSOak Zeng struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 12575bb4b78bSOak Zeng unsigned int qid); 12585df099e8SJay Cornwall int pqm_get_wave_state(struct process_queue_manager *pqm, 12595df099e8SJay Cornwall unsigned int qid, 12605df099e8SJay Cornwall void __user *ctl_stack, 12615df099e8SJay Cornwall u32 *ctl_stack_used_size, 12625df099e8SJay Cornwall u32 *save_area_used_size); 126345102048SBen Goz 1264b010affeSQu Huang int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1265b010affeSQu Huang uint64_t fence_value, 12668c72c3d7SYong Zhao unsigned int timeout_ms); 1267788bf83dSYair Shachar 126842c6c482SDavid Yat Sin int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 126942c6c482SDavid Yat Sin unsigned int qid, 12703a9822d7SDavid Yat Sin u32 *mqd_size, 12713a9822d7SDavid Yat Sin u32 *ctl_stack_size); 1272ed6e6a34SBen Goz /* Packet Manager */ 1273ed6e6a34SBen Goz 127464c7f8cfSBen Goz #define KFD_FENCE_COMPLETED (100) 127564c7f8cfSBen Goz #define KFD_FENCE_INIT (10) 1276241f24f8SBen Goz 1277ed6e6a34SBen Goz struct packet_manager { 1278ed6e6a34SBen Goz struct device_queue_manager *dqm; 1279ed6e6a34SBen Goz struct kernel_queue *priv_queue; 1280ed6e6a34SBen Goz struct mutex lock; 1281ed6e6a34SBen Goz bool allocated; 1282ed6e6a34SBen Goz struct kfd_mem_obj *ib_buffer_obj; 1283851a645eSFelix Kuehling unsigned int ib_size_bytes; 1284819ec5acSFelix Kuehling bool is_over_subscription; 1285f6e27ff1SFelix Kuehling 1286f6e27ff1SFelix Kuehling const struct packet_manager_funcs *pmf; 1287ed6e6a34SBen Goz }; 1288ed6e6a34SBen Goz 1289f6e27ff1SFelix Kuehling struct packet_manager_funcs { 1290f6e27ff1SFelix Kuehling /* Support ASIC-specific packet formats for PM4 packets */ 1291f6e27ff1SFelix Kuehling int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1292f6e27ff1SFelix Kuehling struct qcm_process_device *qpd); 1293f6e27ff1SFelix Kuehling int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1294f6e27ff1SFelix Kuehling uint64_t ib, size_t ib_size_in_dwords, bool chain); 1295f6e27ff1SFelix Kuehling int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1296f6e27ff1SFelix Kuehling struct scheduling_resources *res); 1297f6e27ff1SFelix Kuehling int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1298f6e27ff1SFelix Kuehling struct queue *q, bool is_static); 1299f6e27ff1SFelix Kuehling int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1300f6e27ff1SFelix Kuehling enum kfd_unmap_queues_filter mode, 1301d2cb0b21SJonathan Kim uint32_t filter_param, bool reset); 1302f6e27ff1SFelix Kuehling int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1303b010affeSQu Huang uint64_t fence_address, uint64_t fence_value); 1304f6e27ff1SFelix Kuehling int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1305f6e27ff1SFelix Kuehling 1306f6e27ff1SFelix Kuehling /* Packet sizes */ 1307f6e27ff1SFelix Kuehling int map_process_size; 1308f6e27ff1SFelix Kuehling int runlist_size; 1309f6e27ff1SFelix Kuehling int set_resources_size; 1310f6e27ff1SFelix Kuehling int map_queues_size; 1311f6e27ff1SFelix Kuehling int unmap_queues_size; 1312f6e27ff1SFelix Kuehling int query_status_size; 1313f6e27ff1SFelix Kuehling int release_mem_size; 1314f6e27ff1SFelix Kuehling }; 1315f6e27ff1SFelix Kuehling 1316f6e27ff1SFelix Kuehling extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1317454150b1SFelix Kuehling extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1318fd6a440eSJonathan Kim extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1319f6e27ff1SFelix Kuehling 132064c7f8cfSBen Goz int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1321c2a77fdeSFelix Kuehling void pm_uninit(struct packet_manager *pm, bool hanging); 132264c7f8cfSBen Goz int pm_send_set_resources(struct packet_manager *pm, 132364c7f8cfSBen Goz struct scheduling_resources *res); 132464c7f8cfSBen Goz int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 132564c7f8cfSBen Goz int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1326b010affeSQu Huang uint64_t fence_value); 132764c7f8cfSBen Goz 1328d2cb0b21SJonathan Kim int pm_send_unmap_queue(struct packet_manager *pm, 13297da2bcf8SYong Zhao enum kfd_unmap_queues_filter mode, 1330d2cb0b21SJonathan Kim uint32_t filter_param, bool reset); 133164c7f8cfSBen Goz 1332241f24f8SBen Goz void pm_release_ib(struct packet_manager *pm); 1333241f24f8SBen Goz 1334454150b1SFelix Kuehling /* Following PM funcs can be shared among VI and AI */ 1335454150b1SFelix Kuehling unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 133614328aa5SPhilip Cox 133719f6d2a6SOded Gabbay uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 133819f6d2a6SOded Gabbay 1339f3a39818SAndrew Lewycky /* Events */ 1340f3a39818SAndrew Lewycky extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1341ca750681SFelix Kuehling extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 1342cc009e61SMukul Joshi extern const struct kfd_event_interrupt_class event_interrupt_class_v11; 1343ca750681SFelix Kuehling 1344930c5ff4SAlexey Skidanov extern const struct kfd_device_global_init_class device_global_init_class_cik; 1345f3a39818SAndrew Lewycky 1346c3eb12dfSFelix Kuehling int kfd_event_init_process(struct kfd_process *p); 1347f3a39818SAndrew Lewycky void kfd_event_free_process(struct kfd_process *p); 1348f3a39818SAndrew Lewycky int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1349f3a39818SAndrew Lewycky int kfd_wait_on_events(struct kfd_process *p, 135059d3e8beSAlexey Skidanov uint32_t num_events, void __user *data, 1351bea9a56aSFelix Kuehling bool all, uint32_t *user_timeout_ms, 1352fdf0c833SFelix Kuehling uint32_t *wait_result); 1353c7b6bac9SFenghua Yu void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1354f3a39818SAndrew Lewycky uint32_t valid_id_bits); 13558dc1db31SMukul Joshi void kfd_signal_iommu_event(struct kfd_node *dev, 1356c7b6bac9SFenghua Yu u32 pasid, unsigned long address, 135759d3e8beSAlexey Skidanov bool is_write_requested, bool is_execute_requested); 1358c7b6bac9SFenghua Yu void kfd_signal_hw_exception_event(u32 pasid); 1359f3a39818SAndrew Lewycky int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1360f3a39818SAndrew Lewycky int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 136140e8a766SDavid Yat Sin int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 136240e8a766SDavid Yat Sin 1363f3a39818SAndrew Lewycky int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1364f3a39818SAndrew Lewycky uint32_t event_type, bool auto_reset, uint32_t node_id, 1365f3a39818SAndrew Lewycky uint32_t *event_id, uint32_t *event_trigger_data, 1366f3a39818SAndrew Lewycky uint64_t *event_page_offset, uint32_t *event_slot_index); 136740e8a766SDavid Yat Sin 136840e8a766SDavid Yat Sin int kfd_get_num_events(struct kfd_process *p); 1369f3a39818SAndrew Lewycky int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1370f3a39818SAndrew Lewycky 13718dc1db31SMukul Joshi void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, 13722640c3faSshaoyunl struct kfd_vm_fault_info *info); 13732640c3faSshaoyunl 13748dc1db31SMukul Joshi void kfd_signal_reset_event(struct kfd_node *dev); 1375e42051d2SShaoyun Liu 13768dc1db31SMukul Joshi void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); 1377e2b1f9f5SDennis Li 13783543b055SEric Huang void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); 1379403575c4SFelix Kuehling 1380459ccca5SLang Yu static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) 1381459ccca5SLang Yu { 1382459ccca5SLang Yu return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || 1383459ccca5SLang Yu (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && 1384459ccca5SLang Yu dev->adev->sdma.instance[0].fw_version >= 18) || 1385459ccca5SLang Yu KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); 1386459ccca5SLang Yu } 1387459ccca5SLang Yu 1388e42051d2SShaoyun Liu bool kfd_is_locked(void); 1389e42051d2SShaoyun Liu 1390f756e631SHarish Kasiviswanathan /* Compute profile */ 13918dc1db31SMukul Joshi void kfd_inc_compute_active(struct kfd_node *dev); 13928dc1db31SMukul Joshi void kfd_dec_compute_active(struct kfd_node *dev); 1393f756e631SHarish Kasiviswanathan 13946b855f7bSHarish Kasiviswanathan /* Cgroup Support */ 13956b855f7bSHarish Kasiviswanathan /* Check with device cgroup if @kfd device is accessible */ 13968dc1db31SMukul Joshi static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) 13976b855f7bSHarish Kasiviswanathan { 1398eec8fd02SOdin Ugedal #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1399d69a3b76SMukul Joshi struct drm_device *ddev = adev_to_drm(kfd->adev); 14006b855f7bSHarish Kasiviswanathan 140199c7b309SLorenz Brun return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 14026b855f7bSHarish Kasiviswanathan ddev->render->index, 14036b855f7bSHarish Kasiviswanathan DEVCG_ACC_WRITE | DEVCG_ACC_READ); 14046b855f7bSHarish Kasiviswanathan #else 14056b855f7bSHarish Kasiviswanathan return 0; 14066b855f7bSHarish Kasiviswanathan #endif 14076b855f7bSHarish Kasiviswanathan } 14086b855f7bSHarish Kasiviswanathan 1409*74c5b85dSMukul Joshi static inline bool kfd_is_first_node(struct kfd_node *node) 1410*74c5b85dSMukul Joshi { 1411*74c5b85dSMukul Joshi return (node == node->kfd->nodes[0]); 1412*74c5b85dSMukul Joshi } 1413*74c5b85dSMukul Joshi 1414851a645eSFelix Kuehling /* Debugfs */ 1415851a645eSFelix Kuehling #if defined(CONFIG_DEBUG_FS) 1416851a645eSFelix Kuehling 1417851a645eSFelix Kuehling void kfd_debugfs_init(void); 1418851a645eSFelix Kuehling void kfd_debugfs_fini(void); 1419851a645eSFelix Kuehling int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1420851a645eSFelix Kuehling int pqm_debugfs_mqds(struct seq_file *m, void *data); 1421851a645eSFelix Kuehling int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1422851a645eSFelix Kuehling int dqm_debugfs_hqds(struct seq_file *m, void *data); 1423851a645eSFelix Kuehling int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1424851a645eSFelix Kuehling int pm_debugfs_runlist(struct seq_file *m, void *data); 1425851a645eSFelix Kuehling 14268dc1db31SMukul Joshi int kfd_debugfs_hang_hws(struct kfd_node *dev); 1427a29ec470SShaoyun Liu int pm_debugfs_hang_hws(struct packet_manager *pm); 14284f942aaeSOak Zeng int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1429a29ec470SShaoyun Liu 1430851a645eSFelix Kuehling #else 1431851a645eSFelix Kuehling 1432851a645eSFelix Kuehling static inline void kfd_debugfs_init(void) {} 1433851a645eSFelix Kuehling static inline void kfd_debugfs_fini(void) {} 1434851a645eSFelix Kuehling 1435851a645eSFelix Kuehling #endif 1436851a645eSFelix Kuehling 14374a488a7aSOded Gabbay #endif 1438