1d87f36a0SRajneesh Bhardwaj /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 24a488a7aSOded Gabbay /* 3d87f36a0SRajneesh Bhardwaj * Copyright 2014-2022 Advanced Micro Devices, Inc. 44a488a7aSOded Gabbay * 54a488a7aSOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 64a488a7aSOded Gabbay * copy of this software and associated documentation files (the "Software"), 74a488a7aSOded Gabbay * to deal in the Software without restriction, including without limitation 84a488a7aSOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 94a488a7aSOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 104a488a7aSOded Gabbay * Software is furnished to do so, subject to the following conditions: 114a488a7aSOded Gabbay * 124a488a7aSOded Gabbay * The above copyright notice and this permission notice shall be included in 134a488a7aSOded Gabbay * all copies or substantial portions of the Software. 144a488a7aSOded Gabbay * 154a488a7aSOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 164a488a7aSOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 174a488a7aSOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 184a488a7aSOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 194a488a7aSOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 204a488a7aSOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 214a488a7aSOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 224a488a7aSOded Gabbay */ 234a488a7aSOded Gabbay 244a488a7aSOded Gabbay #ifndef KFD_PRIV_H_INCLUDED 254a488a7aSOded Gabbay #define KFD_PRIV_H_INCLUDED 264a488a7aSOded Gabbay 274a488a7aSOded Gabbay #include <linux/hashtable.h> 284a488a7aSOded Gabbay #include <linux/mmu_notifier.h> 29dc90f084SChristoph Hellwig #include <linux/memremap.h> 304a488a7aSOded Gabbay #include <linux/mutex.h> 314a488a7aSOded Gabbay #include <linux/types.h> 324a488a7aSOded Gabbay #include <linux/atomic.h> 334a488a7aSOded Gabbay #include <linux/workqueue.h> 344a488a7aSOded Gabbay #include <linux/spinlock.h> 3519f6d2a6SOded Gabbay #include <linux/kfd_ioctl.h> 36482f0777SFelix Kuehling #include <linux/idr.h> 3704ad47bdSAndres Rodriguez #include <linux/kfifo.h> 38851a645eSFelix Kuehling #include <linux/seq_file.h> 395ce10687SFelix Kuehling #include <linux/kref.h> 40de9f26bbSKent Russell #include <linux/sysfs.h> 416b855f7bSHarish Kasiviswanathan #include <linux/device_cgroup.h> 421cd4d9eeSStephen Rothwell #include <drm/drm_file.h> 431cd4d9eeSStephen Rothwell #include <drm/drm_drv.h> 441cd4d9eeSStephen Rothwell #include <drm/drm_device.h> 4599c7b309SLorenz Brun #include <drm/drm_ioctl.h> 464a488a7aSOded Gabbay #include <kgd_kfd_interface.h> 476d220a7eSAmber Lin #include <linux/swap.h> 484a488a7aSOded Gabbay 49e596b903SYong Zhao #include "amd_shared.h" 506ae27841SAlex Sierra #include "amdgpu.h" 51e596b903SYong Zhao 52af47b390SLaura Abbott #define KFD_MAX_RING_ENTRY_SIZE 8 53af47b390SLaura Abbott 545b5c4e40SEvgeny Pinchuk #define KFD_SYSFS_FILE_MODE 0444 555b5c4e40SEvgeny Pinchuk 56df03ef93SHarish Kasiviswanathan /* GPU ID hash width in bits */ 57df03ef93SHarish Kasiviswanathan #define KFD_GPU_ID_HASH_WIDTH 16 58df03ef93SHarish Kasiviswanathan 59df03ef93SHarish Kasiviswanathan /* Use upper bits of mmap offset to store KFD driver specific information. 60df03ef93SHarish Kasiviswanathan * BITS[63:62] - Encode MMAP type 61df03ef93SHarish Kasiviswanathan * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 62df03ef93SHarish Kasiviswanathan * BITS[45:0] - MMAP offset value 63df03ef93SHarish Kasiviswanathan * 64df03ef93SHarish Kasiviswanathan * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 65df03ef93SHarish Kasiviswanathan * defines are w.r.t to PAGE_SIZE 66df03ef93SHarish Kasiviswanathan */ 6729453755SYong Zhao #define KFD_MMAP_TYPE_SHIFT 62 68df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 69df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 70df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 71df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 72d33ea570SOak Zeng #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 73df03ef93SHarish Kasiviswanathan 7429453755SYong Zhao #define KFD_MMAP_GPU_ID_SHIFT 46 75df03ef93SHarish Kasiviswanathan #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 76df03ef93SHarish Kasiviswanathan << KFD_MMAP_GPU_ID_SHIFT) 77df03ef93SHarish Kasiviswanathan #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 78df03ef93SHarish Kasiviswanathan & KFD_MMAP_GPU_ID_MASK) 7929453755SYong Zhao #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 80df03ef93SHarish Kasiviswanathan >> KFD_MMAP_GPU_ID_SHIFT) 81df03ef93SHarish Kasiviswanathan 82ed6e6a34SBen Goz /* 83ed6e6a34SBen Goz * When working with cp scheduler we should assign the HIQ manually or via 84e7016d8eSYong Zhao * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 85ed6e6a34SBen Goz * definitions for Kaveri. In Kaveri only the first ME queues participates 86ed6e6a34SBen Goz * in the cp scheduling taking that in mind we set the HIQ slot in the 87ed6e6a34SBen Goz * second ME. 88ed6e6a34SBen Goz */ 89ed6e6a34SBen Goz #define KFD_CIK_HIQ_PIPE 4 90ed6e6a34SBen Goz #define KFD_CIK_HIQ_QUEUE 0 91ed6e6a34SBen Goz 925b5c4e40SEvgeny Pinchuk /* Macro for allocating structures */ 935b5c4e40SEvgeny Pinchuk #define kfd_alloc_struct(ptr_to_struct) \ 945b5c4e40SEvgeny Pinchuk ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 955b5c4e40SEvgeny Pinchuk 9619f6d2a6SOded Gabbay #define KFD_MAX_NUM_OF_PROCESSES 512 97b8cbab04SOded Gabbay #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 9819f6d2a6SOded Gabbay 9919f6d2a6SOded Gabbay /* 100373d7080SFelix Kuehling * Size of the per-process TBA+TMA buffer: 2 pages 101373d7080SFelix Kuehling * 102373d7080SFelix Kuehling * The first page is the TBA used for the CWSR ISA code. The second 103a4497974SRajneesh Bhardwaj * page is used as TMA for user-mode trap handler setup in daisy-chain mode. 104373d7080SFelix Kuehling */ 105373d7080SFelix Kuehling #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106373d7080SFelix Kuehling #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 107373d7080SFelix Kuehling 10874523943SYong Zhao #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 10974523943SYong Zhao (KFD_MAX_NUM_OF_PROCESSES * \ 11074523943SYong Zhao KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 11174523943SYong Zhao 11274523943SYong Zhao #define KFD_KERNEL_QUEUE_SIZE 2048 11374523943SYong Zhao 11414328aa5SPhilip Cox #define KFD_UNMAP_LATENCY_MS (4000) 11514328aa5SPhilip Cox 116a805889aSMukul Joshi #define KFD_MAX_SDMA_QUEUES 128 117a805889aSMukul Joshi 118373d7080SFelix Kuehling /* 1191f86805aSYong Zhao * 512 = 0x200 1201f86805aSYong Zhao * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 1211f86805aSYong Zhao * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 1221f86805aSYong Zhao * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 1231f86805aSYong Zhao * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 1241f86805aSYong Zhao * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 1251f86805aSYong Zhao */ 1261f86805aSYong Zhao #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 1271f86805aSYong Zhao 12836988070SRajneesh Bhardwaj /** 12936988070SRajneesh Bhardwaj * enum kfd_ioctl_flags - KFD ioctl flags 13036988070SRajneesh Bhardwaj * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 13136988070SRajneesh Bhardwaj * userspace can use a given ioctl. 13236988070SRajneesh Bhardwaj */ 13336988070SRajneesh Bhardwaj enum kfd_ioctl_flags { 13436988070SRajneesh Bhardwaj /* 13536988070SRajneesh Bhardwaj * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 13636988070SRajneesh Bhardwaj * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 13736988070SRajneesh Bhardwaj * perform privileged operations and load arbitrary data into MQDs and 13836988070SRajneesh Bhardwaj * eventually HQD registers when the queue is mapped by HWS. In order to 13936988070SRajneesh Bhardwaj * prevent this we should perform additional security checks. 14036988070SRajneesh Bhardwaj * 14136988070SRajneesh Bhardwaj * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 14236988070SRajneesh Bhardwaj * 14336988070SRajneesh Bhardwaj * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 14436988070SRajneesh Bhardwaj * we also allow ioctls with SYS_ADMIN capability. 14536988070SRajneesh Bhardwaj */ 14636988070SRajneesh Bhardwaj KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 14736988070SRajneesh Bhardwaj }; 1481f86805aSYong Zhao /* 149b8cbab04SOded Gabbay * Kernel module parameter to specify maximum number of supported queues per 150b8cbab04SOded Gabbay * device 15119f6d2a6SOded Gabbay */ 152b8cbab04SOded Gabbay extern int max_num_of_queues_per_device; 15319f6d2a6SOded Gabbay 154ed6e6a34SBen Goz 15531c21fecSBen Goz /* Kernel module parameter to specify the scheduling policy */ 15631c21fecSBen Goz extern int sched_policy; 15731c21fecSBen Goz 158a99c6d4fSFelix Kuehling /* 159a99c6d4fSFelix Kuehling * Kernel module parameter to specify the maximum process 160a99c6d4fSFelix Kuehling * number per HW scheduler 161a99c6d4fSFelix Kuehling */ 162a99c6d4fSFelix Kuehling extern int hws_max_conc_proc; 163a99c6d4fSFelix Kuehling 164373d7080SFelix Kuehling extern int cwsr_enable; 165373d7080SFelix Kuehling 16681663016SOded Gabbay /* 16781663016SOded Gabbay * Kernel module parameter to specify whether to send sigterm to HSA process on 16881663016SOded Gabbay * unhandled exception 16981663016SOded Gabbay */ 17081663016SOded Gabbay extern int send_sigterm; 17181663016SOded Gabbay 172ebcfd1e2SFelix Kuehling /* 173374200b1SFelix Kuehling * This kernel module is used to simulate large bar machine on non-large bar 174374200b1SFelix Kuehling * enabled machines. 175374200b1SFelix Kuehling */ 176374200b1SFelix Kuehling extern int debug_largebar; 177374200b1SFelix Kuehling 178374200b1SFelix Kuehling /* 179ebcfd1e2SFelix Kuehling * Ignore CRAT table during KFD initialization, can be used to work around 180ebcfd1e2SFelix Kuehling * broken CRAT tables on some AMD systems 181ebcfd1e2SFelix Kuehling */ 182ebcfd1e2SFelix Kuehling extern int ignore_crat; 183ebcfd1e2SFelix Kuehling 184a4497974SRajneesh Bhardwaj /* Set sh_mem_config.retry_disable on GFX v9 */ 18575ee6487SFelix Kuehling extern int amdgpu_noretry; 186bed4f110SFelix Kuehling 187a4497974SRajneesh Bhardwaj /* Halt if HWS hang is detected */ 1880e9a860cSYong Zhao extern int halt_if_hws_hang; 1890e9a860cSYong Zhao 190a4497974SRajneesh Bhardwaj /* Whether MEC FW support GWS barriers */ 19129e76462SOak Zeng extern bool hws_gws_support; 19229e76462SOak Zeng 193a4497974SRajneesh Bhardwaj /* Queue preemption timeout in ms */ 19414328aa5SPhilip Cox extern int queue_preemption_timeout_ms; 19514328aa5SPhilip Cox 1966d909c5dSOak Zeng /* 1976d909c5dSOak Zeng * Don't evict process queues on vm fault 1986d909c5dSOak Zeng */ 1996d909c5dSOak Zeng extern int amdgpu_no_queue_eviction_on_vm_fault; 2006d909c5dSOak Zeng 201a4497974SRajneesh Bhardwaj /* Enable eviction debug messages */ 202b2057956SFelix Kuehling extern bool debug_evictions; 203b2057956SFelix Kuehling 204fe1f05dfSMukul Joshi extern struct mutex kfd_processes_mutex; 205fe1f05dfSMukul Joshi 206ed6e6a34SBen Goz enum cache_policy { 207ed6e6a34SBen Goz cache_policy_coherent, 208ed6e6a34SBen Goz cache_policy_noncoherent 209ed6e6a34SBen Goz }; 210ed6e6a34SBen Goz 211dd0ae064SGraham Sider #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 212dd0ae064SGraham Sider #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 21324294e7bSPhilip Yang #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ 214cebbfdd5SAmber Lin ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ 215cebbfdd5SAmber Lin (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) 216ef568db7SFelix Kuehling 2178dc1db31SMukul Joshi struct kfd_node; 2188dc1db31SMukul Joshi 219f3a39818SAndrew Lewycky struct kfd_event_interrupt_class { 2208dc1db31SMukul Joshi bool (*interrupt_isr)(struct kfd_node *dev, 22158e69886SLan Xiao const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 22258e69886SLan Xiao bool *patched_flag); 2238dc1db31SMukul Joshi void (*interrupt_wq)(struct kfd_node *dev, 224f3a39818SAndrew Lewycky const uint32_t *ih_ring_entry); 225f3a39818SAndrew Lewycky }; 226f3a39818SAndrew Lewycky 2274a488a7aSOded Gabbay struct kfd_device_info { 2289d6fa9c7SGraham Sider uint32_t gfx_target_version; 229f3a39818SAndrew Lewycky const struct kfd_event_interrupt_class *event_interrupt_class; 2304a488a7aSOded Gabbay unsigned int max_pasid_bits; 231992839adSYair Shachar unsigned int max_no_of_hqd; 232ada2b29cSFelix Kuehling unsigned int doorbell_size; 2334a488a7aSOded Gabbay size_t ih_ring_entry_size; 234f7c826adSAlexey Skidanov uint8_t num_of_watch_points; 23519f6d2a6SOded Gabbay uint16_t mqd_size_aligned; 236373d7080SFelix Kuehling bool supports_cwsr; 23764d1c3a4SFelix Kuehling bool needs_iommu_device; 2383ee2d00cSFelix Kuehling bool needs_pci_atomics; 239fb932dfeSFelix Kuehling uint32_t no_atomic_fw_version; 240d5094189SShaoyun Liu unsigned int num_sdma_queues_per_engine; 241cc009e61SMukul Joshi unsigned int num_reserved_sdma_queues_per_engine; 242cc009e61SMukul Joshi uint64_t reserved_sdma_queues_bitmap; 2434a488a7aSOded Gabbay }; 2444a488a7aSOded Gabbay 2458dc1db31SMukul Joshi unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); 2468dc1db31SMukul Joshi unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); 247ee2f17f4SAmber Lin 24836b5c08fSOded Gabbay struct kfd_mem_obj { 24936b5c08fSOded Gabbay uint32_t range_start; 25036b5c08fSOded Gabbay uint32_t range_end; 25136b5c08fSOded Gabbay uint64_t gpu_addr; 25236b5c08fSOded Gabbay uint32_t *cpu_ptr; 253b91d43ddSFelix Kuehling void *gtt_mem; 25436b5c08fSOded Gabbay }; 25536b5c08fSOded Gabbay 25644008d7aSYong Zhao struct kfd_vmid_info { 25744008d7aSYong Zhao uint32_t first_vmid_kfd; 25844008d7aSYong Zhao uint32_t last_vmid_kfd; 25944008d7aSYong Zhao uint32_t vmid_num_kfd; 26044008d7aSYong Zhao }; 26144008d7aSYong Zhao 26274c5b85dSMukul Joshi #define MAX_KFD_NODES 8 26374c5b85dSMukul Joshi 2648dc1db31SMukul Joshi struct kfd_dev; 2658dc1db31SMukul Joshi 2668dc1db31SMukul Joshi struct kfd_node { 267a805889aSMukul Joshi unsigned int node_id; 2688dc1db31SMukul Joshi struct amdgpu_device *adev; /* Duplicated here along with keeping 2698dc1db31SMukul Joshi * a copy in kfd_dev to save a hop 2708dc1db31SMukul Joshi */ 2718dc1db31SMukul Joshi const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with 2728dc1db31SMukul Joshi * keeping a copy in kfd_dev to 2738dc1db31SMukul Joshi * save a hop 2748dc1db31SMukul Joshi */ 2758dc1db31SMukul Joshi struct kfd_vmid_info vm_info; 2768dc1db31SMukul Joshi unsigned int id; /* topology stub index */ 277a75f2271SLijo Lazar uint32_t xcc_mask; /* Instance mask of XCCs present */ 278a75f2271SLijo Lazar struct amdgpu_xcp *xcp; 279a75f2271SLijo Lazar 2808dc1db31SMukul Joshi /* Interrupts */ 2818dc1db31SMukul Joshi struct kfifo ih_fifo; 2828dc1db31SMukul Joshi struct workqueue_struct *ih_wq; 2838dc1db31SMukul Joshi struct work_struct interrupt_work; 2848dc1db31SMukul Joshi spinlock_t interrupt_lock; 2858dc1db31SMukul Joshi 2868dc1db31SMukul Joshi /* 2878dc1db31SMukul Joshi * Interrupts of interest to KFD are copied 2888dc1db31SMukul Joshi * from the HW ring into a SW ring. 2898dc1db31SMukul Joshi */ 2908dc1db31SMukul Joshi bool interrupts_active; 2915fb34bd9SAlex Sierra uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ 2928dc1db31SMukul Joshi 2938dc1db31SMukul Joshi /* QCM Device instance */ 2948dc1db31SMukul Joshi struct device_queue_manager *dqm; 2958dc1db31SMukul Joshi 2968dc1db31SMukul Joshi /* Global GWS resource shared between processes */ 2978dc1db31SMukul Joshi void *gws; 2988dc1db31SMukul Joshi bool gws_debug_workaround; 2998dc1db31SMukul Joshi 3008dc1db31SMukul Joshi /* Clients watching SMI events */ 3018dc1db31SMukul Joshi struct list_head smi_clients; 3028dc1db31SMukul Joshi spinlock_t smi_lock; 3038dc1db31SMukul Joshi uint32_t reset_seq_num; 3048dc1db31SMukul Joshi 3058dc1db31SMukul Joshi /* SRAM ECC flag */ 3068dc1db31SMukul Joshi atomic_t sram_ecc_flag; 3078dc1db31SMukul Joshi 3088dc1db31SMukul Joshi /*spm process id */ 3098dc1db31SMukul Joshi unsigned int spm_pasid; 3108dc1db31SMukul Joshi 3118dc1db31SMukul Joshi /* Maximum process number mapped to HW scheduler */ 3128dc1db31SMukul Joshi unsigned int max_proc_per_quantum; 3138dc1db31SMukul Joshi 31474c5b85dSMukul Joshi unsigned int compute_vmid_bitmap; 31574c5b85dSMukul Joshi 316315e29ecSMukul Joshi struct kfd_local_mem_info local_mem_info; 317315e29ecSMukul Joshi 3188dc1db31SMukul Joshi struct kfd_dev *kfd; 3198dc1db31SMukul Joshi }; 3208dc1db31SMukul Joshi 3214a488a7aSOded Gabbay struct kfd_dev { 322c6c57446SGraham Sider struct amdgpu_device *adev; 3234a488a7aSOded Gabbay 324f0dc99a6SGraham Sider struct kfd_device_info device_info; 3254a488a7aSOded Gabbay 32619f6d2a6SOded Gabbay phys_addr_t doorbell_base; /* Start of actual doorbells used by 32719f6d2a6SOded Gabbay * KFD. It is aligned for mapping 32819f6d2a6SOded Gabbay * into user mode 32919f6d2a6SOded Gabbay */ 330339903faSYong Zhao size_t doorbell_base_dw_offset; /* Offset from the start of the PCI 331339903faSYong Zhao * doorbell BAR to the first KFD 332339903faSYong Zhao * doorbell in dwords. GFX reserves 333339903faSYong Zhao * the segment before this offset. 33419f6d2a6SOded Gabbay */ 33519f6d2a6SOded Gabbay u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 33619f6d2a6SOded Gabbay * page used by kernel queue 33719f6d2a6SOded Gabbay */ 33819f6d2a6SOded Gabbay 3394a488a7aSOded Gabbay struct kgd2kfd_shared_resources shared_resources; 3404a488a7aSOded Gabbay 341cea405b1SXihan Zhang const struct kfd2kgd_calls *kfd2kgd; 342cea405b1SXihan Zhang struct mutex doorbell_mutex; 343f761d8bdSJoe Perches DECLARE_BITMAP(doorbell_available_index, 344f761d8bdSJoe Perches KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 345cea405b1SXihan Zhang 34636b5c08fSOded Gabbay void *gtt_mem; 34736b5c08fSOded Gabbay uint64_t gtt_start_gpu_addr; 34836b5c08fSOded Gabbay void *gtt_start_cpu_ptr; 34936b5c08fSOded Gabbay void *gtt_sa_bitmap; 35036b5c08fSOded Gabbay struct mutex gtt_sa_lock; 35136b5c08fSOded Gabbay unsigned int gtt_sa_chunk_size; 35236b5c08fSOded Gabbay unsigned int gtt_sa_num_of_chunks; 35336b5c08fSOded Gabbay 354ed6e6a34SBen Goz bool init_complete; 355fbeb661bSYair Shachar 3565ade6c9cSFelix Kuehling /* Firmware versions */ 3575ade6c9cSFelix Kuehling uint16_t mec_fw_version; 35829633d0eSJoseph Greathouse uint16_t mec2_fw_version; 3595ade6c9cSFelix Kuehling uint16_t sdma_fw_version; 3605ade6c9cSFelix Kuehling 361373d7080SFelix Kuehling /* CWSR */ 362373d7080SFelix Kuehling bool cwsr_enabled; 363373d7080SFelix Kuehling const void *cwsr_isa; 364373d7080SFelix Kuehling unsigned int cwsr_isa_size; 3650c1690e3SShaoyun Liu 3660c1690e3SShaoyun Liu /* xGMI */ 3670c1690e3SShaoyun Liu uint64_t hive_id; 3680c663695SDivya Shikre 369d35f00d8SEric Huang bool pci_atomic_requested; 3709b54d201SEric Huang 3716127896fSHuang Rui /* Use IOMMU v2 flag */ 3726127896fSHuang Rui bool use_iommu_v2; 3736127896fSHuang Rui 374f756e631SHarish Kasiviswanathan /* Compute Profile ref. count */ 375f756e631SHarish Kasiviswanathan atomic_t compute_profile; 376e09d4fc8SOak Zeng 37759d7115dSMukul Joshi struct ida doorbell_ida; 37859d7115dSMukul Joshi unsigned int max_doorbell_slices; 3799b498efaSAlex Deucher 3809b498efaSAlex Deucher int noretry; 381814ab993SPhilip Yang 38274c5b85dSMukul Joshi struct kfd_node *nodes[MAX_KFD_NODES]; 38374c5b85dSMukul Joshi unsigned int num_nodes; 3844a488a7aSOded Gabbay }; 3854a488a7aSOded Gabbay 38619f6d2a6SOded Gabbay enum kfd_mempool { 38719f6d2a6SOded Gabbay KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 38819f6d2a6SOded Gabbay KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 38919f6d2a6SOded Gabbay KFD_MEMPOOL_FRAMEBUFFER = 3, 39019f6d2a6SOded Gabbay }; 39119f6d2a6SOded Gabbay 3924a488a7aSOded Gabbay /* Character device interface */ 3934a488a7aSOded Gabbay int kfd_chardev_init(void); 3944a488a7aSOded Gabbay void kfd_chardev_exit(void); 3954a488a7aSOded Gabbay 396241f24f8SBen Goz /** 397a4497974SRajneesh Bhardwaj * enum kfd_unmap_queues_filter - Enum for queue filters. 398241f24f8SBen Goz * 3997da2bcf8SYong Zhao * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 400241f24f8SBen Goz * running queues list. 401241f24f8SBen Goz * 402d2cb0b21SJonathan Kim * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues 403d2cb0b21SJonathan Kim * in the run list. 404d2cb0b21SJonathan Kim * 4057da2bcf8SYong Zhao * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 406241f24f8SBen Goz * specific process. 407241f24f8SBen Goz * 408241f24f8SBen Goz */ 4097da2bcf8SYong Zhao enum kfd_unmap_queues_filter { 410d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1, 411d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2, 412d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3 413241f24f8SBen Goz }; 41419f6d2a6SOded Gabbay 415ed8aab45SBen Goz /** 416a4497974SRajneesh Bhardwaj * enum kfd_queue_type - Enum for various queue types. 417ed8aab45SBen Goz * 418ed8aab45SBen Goz * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 419ed8aab45SBen Goz * 420a4497974SRajneesh Bhardwaj * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 421ed8aab45SBen Goz * 422ed8aab45SBen Goz * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 423ed8aab45SBen Goz * 424ed8aab45SBen Goz * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 425a4497974SRajneesh Bhardwaj * 426a4497974SRajneesh Bhardwaj * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 427ed8aab45SBen Goz */ 428ed8aab45SBen Goz enum kfd_queue_type { 429ed8aab45SBen Goz KFD_QUEUE_TYPE_COMPUTE, 430ed8aab45SBen Goz KFD_QUEUE_TYPE_SDMA, 431ed8aab45SBen Goz KFD_QUEUE_TYPE_HIQ, 4321b4670f6SOak Zeng KFD_QUEUE_TYPE_DIQ, 4331b4670f6SOak Zeng KFD_QUEUE_TYPE_SDMA_XGMI 434ed8aab45SBen Goz }; 435ed8aab45SBen Goz 4366e99df57SBen Goz enum kfd_queue_format { 4376e99df57SBen Goz KFD_QUEUE_FORMAT_PM4, 4386e99df57SBen Goz KFD_QUEUE_FORMAT_AQL 4396e99df57SBen Goz }; 4406e99df57SBen Goz 4410ccbc7cdSOak Zeng enum KFD_QUEUE_PRIORITY { 4420ccbc7cdSOak Zeng KFD_QUEUE_PRIORITY_MINIMUM = 0, 4430ccbc7cdSOak Zeng KFD_QUEUE_PRIORITY_MAXIMUM = 15 4440ccbc7cdSOak Zeng }; 4450ccbc7cdSOak Zeng 446ed8aab45SBen Goz /** 447ed8aab45SBen Goz * struct queue_properties 448ed8aab45SBen Goz * 449ed8aab45SBen Goz * @type: The queue type. 450ed8aab45SBen Goz * 451ed8aab45SBen Goz * @queue_id: Queue identifier. 452ed8aab45SBen Goz * 453ed8aab45SBen Goz * @queue_address: Queue ring buffer address. 454ed8aab45SBen Goz * 455ed8aab45SBen Goz * @queue_size: Queue ring buffer size. 456ed8aab45SBen Goz * 457ed8aab45SBen Goz * @priority: Defines the queue priority relative to other queues in the 458ed8aab45SBen Goz * process. 459ed8aab45SBen Goz * This is just an indication and HW scheduling may override the priority as 460ed8aab45SBen Goz * necessary while keeping the relative prioritization. 461ed8aab45SBen Goz * the priority granularity is from 0 to f which f is the highest priority. 462ed8aab45SBen Goz * currently all queues are initialized with the highest priority. 463ed8aab45SBen Goz * 464ed8aab45SBen Goz * @queue_percent: This field is partially implemented and currently a zero in 465ed8aab45SBen Goz * this field defines that the queue is non active. 466ed8aab45SBen Goz * 467ed8aab45SBen Goz * @read_ptr: User space address which points to the number of dwords the 468ed8aab45SBen Goz * cp read from the ring buffer. This field updates automatically by the H/W. 469ed8aab45SBen Goz * 470ed8aab45SBen Goz * @write_ptr: Defines the number of dwords written to the ring buffer. 471ed8aab45SBen Goz * 472a4497974SRajneesh Bhardwaj * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 473a4497974SRajneesh Bhardwaj * buffer. This field should be similar to write_ptr and the user should 474a4497974SRajneesh Bhardwaj * update this field after updating the write_ptr. 475ed8aab45SBen Goz * 476ed8aab45SBen Goz * @doorbell_off: The doorbell offset in the doorbell pci-bar. 477ed8aab45SBen Goz * 4788eabaf54SKent Russell * @is_interop: Defines if this is a interop queue. Interop queue means that 4798eabaf54SKent Russell * the queue can access both graphics and compute resources. 480ed8aab45SBen Goz * 48126103436SFelix Kuehling * @is_evicted: Defines if the queue is evicted. Only active queues 48226103436SFelix Kuehling * are evicted, rendering them inactive. 48326103436SFelix Kuehling * 48426103436SFelix Kuehling * @is_active: Defines if the queue is active or not. @is_active and 48526103436SFelix Kuehling * @is_evicted are protected by the DQM lock. 486ed8aab45SBen Goz * 487b8020b03SJoseph Greathouse * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 488b8020b03SJoseph Greathouse * @is_gws should be protected by the DQM lock, since changing it can yield the 489b8020b03SJoseph Greathouse * possibility of updating DQM state on number of GWS queues. 490b8020b03SJoseph Greathouse * 491ed8aab45SBen Goz * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 492ed8aab45SBen Goz * of the queue. 493ed8aab45SBen Goz * 494ed8aab45SBen Goz * This structure represents the queue properties for each queue no matter if 495ed8aab45SBen Goz * it's user mode or kernel mode queue. 496ed8aab45SBen Goz * 497ed8aab45SBen Goz */ 4988668dfc3SDavid Yat Sin 499ed8aab45SBen Goz struct queue_properties { 500ed8aab45SBen Goz enum kfd_queue_type type; 5016e99df57SBen Goz enum kfd_queue_format format; 502ed8aab45SBen Goz unsigned int queue_id; 503ed8aab45SBen Goz uint64_t queue_address; 504ed8aab45SBen Goz uint64_t queue_size; 505ed8aab45SBen Goz uint32_t priority; 506ed8aab45SBen Goz uint32_t queue_percent; 507ed8aab45SBen Goz uint32_t *read_ptr; 508ed8aab45SBen Goz uint32_t *write_ptr; 509ada2b29cSFelix Kuehling void __iomem *doorbell_ptr; 510ed8aab45SBen Goz uint32_t doorbell_off; 511ed8aab45SBen Goz bool is_interop; 51226103436SFelix Kuehling bool is_evicted; 513ed8aab45SBen Goz bool is_active; 514b8020b03SJoseph Greathouse bool is_gws; 5153c8bdb51SMukul Joshi uint32_t pm4_target_xcc; 516ed8aab45SBen Goz /* Not relevant for user mode queues in cp scheduling */ 517ed8aab45SBen Goz unsigned int vmid; 51877669eb8SBen Goz /* Relevant only for sdma queues*/ 51977669eb8SBen Goz uint32_t sdma_engine_id; 52077669eb8SBen Goz uint32_t sdma_queue_id; 52177669eb8SBen Goz uint32_t sdma_vm_addr; 522ff3d04a1SBen Goz /* Relevant only for VI */ 523ff3d04a1SBen Goz uint64_t eop_ring_buffer_address; 524ff3d04a1SBen Goz uint32_t eop_ring_buffer_size; 525ff3d04a1SBen Goz uint64_t ctx_save_restore_area_address; 526ff3d04a1SBen Goz uint32_t ctx_save_restore_area_size; 527373d7080SFelix Kuehling uint32_t ctl_stack_size; 528373d7080SFelix Kuehling uint64_t tba_addr; 529373d7080SFelix Kuehling uint64_t tma_addr; 530ed8aab45SBen Goz }; 531ed8aab45SBen Goz 532bb2d2128SFelix Kuehling #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 533bb2d2128SFelix Kuehling (q).queue_address != 0 && \ 534bb2d2128SFelix Kuehling (q).queue_percent > 0 && \ 535bb2d2128SFelix Kuehling !(q).is_evicted) 536bb2d2128SFelix Kuehling 5377c695a2cSLang Yu enum mqd_update_flag { 5387c695a2cSLang Yu UPDATE_FLAG_CU_MASK = 0, 5397c695a2cSLang Yu }; 5407c695a2cSLang Yu 5417c695a2cSLang Yu struct mqd_update_info { 5427c695a2cSLang Yu union { 5437c695a2cSLang Yu struct { 5447c695a2cSLang Yu uint32_t count; /* Must be a multiple of 32 */ 5457c695a2cSLang Yu uint32_t *ptr; 5467c695a2cSLang Yu } cu_mask; 5477c695a2cSLang Yu }; 5487c695a2cSLang Yu enum mqd_update_flag update_flag; 5497c695a2cSLang Yu }; 550c6e559ebSLang Yu 551ed8aab45SBen Goz /** 552ed8aab45SBen Goz * struct queue 553ed8aab45SBen Goz * 554ed8aab45SBen Goz * @list: Queue linked list. 555ed8aab45SBen Goz * 556a4497974SRajneesh Bhardwaj * @mqd: The queue MQD (memory queue descriptor). 557ed8aab45SBen Goz * 558ed8aab45SBen Goz * @mqd_mem_obj: The MQD local gpu memory object. 559ed8aab45SBen Goz * 560ed8aab45SBen Goz * @gart_mqd_addr: The MQD gart mc address. 561ed8aab45SBen Goz * 562ed8aab45SBen Goz * @properties: The queue properties. 563ed8aab45SBen Goz * 564ed8aab45SBen Goz * @mec: Used only in no cp scheduling mode and identifies to micro engine id 565a4497974SRajneesh Bhardwaj * that the queue should be executed on. 566ed8aab45SBen Goz * 5678eabaf54SKent Russell * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 5688eabaf54SKent Russell * id. 569ed8aab45SBen Goz * 570ed8aab45SBen Goz * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 571ed8aab45SBen Goz * 572ed8aab45SBen Goz * @process: The kfd process that created this queue. 573ed8aab45SBen Goz * 574ed8aab45SBen Goz * @device: The kfd device that created this queue. 575ed8aab45SBen Goz * 576eb82da1dSOak Zeng * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 577eb82da1dSOak Zeng * otherwise. 578eb82da1dSOak Zeng * 579ed8aab45SBen Goz * This structure represents user mode compute queues. 580ed8aab45SBen Goz * It contains all the necessary data to handle such queues. 581ed8aab45SBen Goz * 582ed8aab45SBen Goz */ 583ed8aab45SBen Goz 584ed8aab45SBen Goz struct queue { 585ed8aab45SBen Goz struct list_head list; 586ed8aab45SBen Goz void *mqd; 587ed8aab45SBen Goz struct kfd_mem_obj *mqd_mem_obj; 588ed8aab45SBen Goz uint64_t gart_mqd_addr; 589ed8aab45SBen Goz struct queue_properties properties; 590ed8aab45SBen Goz 591ed8aab45SBen Goz uint32_t mec; 592ed8aab45SBen Goz uint32_t pipe; 593ed8aab45SBen Goz uint32_t queue; 594ed8aab45SBen Goz 59577669eb8SBen Goz unsigned int sdma_id; 596ef568db7SFelix Kuehling unsigned int doorbell_id; 59777669eb8SBen Goz 598ed8aab45SBen Goz struct kfd_process *process; 5998dc1db31SMukul Joshi struct kfd_node *device; 600eb82da1dSOak Zeng void *gws; 6016d220a7eSAmber Lin 6026d220a7eSAmber Lin /* procfs */ 6036d220a7eSAmber Lin struct kobject kobj; 604cc009e61SMukul Joshi 605cc009e61SMukul Joshi void *gang_ctx_bo; 606cc009e61SMukul Joshi uint64_t gang_ctx_gpu_addr; 607cc009e61SMukul Joshi void *gang_ctx_cpu_ptr; 608e77a541fSGraham Sider 609e77a541fSGraham Sider struct amdgpu_bo *wptr_bo; 610ed8aab45SBen Goz }; 611ed8aab45SBen Goz 6126e99df57SBen Goz enum KFD_MQD_TYPE { 613d7c0b047SYong Zhao KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 61485d258f9SBen Goz KFD_MQD_TYPE_CP, /* for cp queues and diq */ 61585d258f9SBen Goz KFD_MQD_TYPE_SDMA, /* for sdma queues */ 61659f650a0SOak Zeng KFD_MQD_TYPE_DIQ, /* for diq */ 6176e99df57SBen Goz KFD_MQD_TYPE_MAX 6186e99df57SBen Goz }; 6196e99df57SBen Goz 6200ccbc7cdSOak Zeng enum KFD_PIPE_PRIORITY { 6210ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_LOW = 0, 6220ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_MEDIUM, 6230ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_HIGH 6240ccbc7cdSOak Zeng }; 6250ccbc7cdSOak Zeng 626241f24f8SBen Goz struct scheduling_resources { 627241f24f8SBen Goz unsigned int vmid_mask; 628241f24f8SBen Goz enum kfd_queue_type type; 629241f24f8SBen Goz uint64_t queue_mask; 630241f24f8SBen Goz uint64_t gws_mask; 631241f24f8SBen Goz uint32_t oac_mask; 632241f24f8SBen Goz uint32_t gds_heap_base; 633241f24f8SBen Goz uint32_t gds_heap_size; 634241f24f8SBen Goz }; 635241f24f8SBen Goz 636241f24f8SBen Goz struct process_queue_manager { 637241f24f8SBen Goz /* data */ 638241f24f8SBen Goz struct kfd_process *process; 639241f24f8SBen Goz struct list_head queues; 640241f24f8SBen Goz unsigned long *queue_slot_bitmap; 641241f24f8SBen Goz }; 642241f24f8SBen Goz 643241f24f8SBen Goz struct qcm_process_device { 644241f24f8SBen Goz /* The Device Queue Manager that owns this data */ 645241f24f8SBen Goz struct device_queue_manager *dqm; 646241f24f8SBen Goz struct process_queue_manager *pqm; 647241f24f8SBen Goz /* Queues list */ 648241f24f8SBen Goz struct list_head queues_list; 649241f24f8SBen Goz struct list_head priv_queue_list; 650241f24f8SBen Goz 651241f24f8SBen Goz unsigned int queue_count; 652241f24f8SBen Goz unsigned int vmid; 653241f24f8SBen Goz bool is_debug; 65426103436SFelix Kuehling unsigned int evicted; /* eviction counter, 0=active */ 6559fd3f1bfSFelix Kuehling 6569fd3f1bfSFelix Kuehling /* This flag tells if we should reset all wavefronts on 6579fd3f1bfSFelix Kuehling * process termination 6589fd3f1bfSFelix Kuehling */ 6599fd3f1bfSFelix Kuehling bool reset_wavefronts; 6609fd3f1bfSFelix Kuehling 661b8020b03SJoseph Greathouse /* This flag tells us if this process has a GWS-capable 662b8020b03SJoseph Greathouse * queue that will be mapped into the runlist. It's 663b8020b03SJoseph Greathouse * possible to request a GWS BO, but not have the queue 664b8020b03SJoseph Greathouse * currently mapped, and this changes how the MAP_PROCESS 665b8020b03SJoseph Greathouse * PM4 packet is configured. 666b8020b03SJoseph Greathouse */ 667b8020b03SJoseph Greathouse bool mapped_gws_queue; 668b8020b03SJoseph Greathouse 669a4497974SRajneesh Bhardwaj /* All the memory management data should be here too */ 670241f24f8SBen Goz uint64_t gds_context_area; 671435e2f97SYong Zhao /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 672e715c6d0SShaoyun Liu uint64_t page_table_base; 673241f24f8SBen Goz uint32_t sh_mem_config; 674241f24f8SBen Goz uint32_t sh_mem_bases; 675241f24f8SBen Goz uint32_t sh_mem_ape1_base; 676241f24f8SBen Goz uint32_t sh_mem_ape1_limit; 677241f24f8SBen Goz uint32_t gds_size; 678241f24f8SBen Goz uint32_t num_gws; 679241f24f8SBen Goz uint32_t num_oac; 6806a1c9510SMoses Reuben uint32_t sh_hidden_private_base; 681373d7080SFelix Kuehling 682373d7080SFelix Kuehling /* CWSR memory */ 68368df0f19SLang Yu struct kgd_mem *cwsr_mem; 684373d7080SFelix Kuehling void *cwsr_kaddr; 685d01994c2SFelix Kuehling uint64_t cwsr_base; 686373d7080SFelix Kuehling uint64_t tba_addr; 687373d7080SFelix Kuehling uint64_t tma_addr; 688d01994c2SFelix Kuehling 689d01994c2SFelix Kuehling /* IB memory */ 69068df0f19SLang Yu struct kgd_mem *ib_mem; 691d01994c2SFelix Kuehling uint64_t ib_base; 692552764b6SFelix Kuehling void *ib_kaddr; 693ef568db7SFelix Kuehling 694ef568db7SFelix Kuehling /* doorbell resources per process per device */ 695ef568db7SFelix Kuehling unsigned long *doorbell_bitmap; 696241f24f8SBen Goz }; 697241f24f8SBen Goz 69826103436SFelix Kuehling /* KFD Memory Eviction */ 69926103436SFelix Kuehling 70026103436SFelix Kuehling /* Approx. wait time before attempting to restore evicted BOs */ 70126103436SFelix Kuehling #define PROCESS_RESTORE_TIME_MS 100 70226103436SFelix Kuehling /* Approx. back off time if restore fails due to lack of memory */ 70326103436SFelix Kuehling #define PROCESS_BACK_OFF_TIME_MS 100 70426103436SFelix Kuehling /* Approx. time before evicting the process again */ 70526103436SFelix Kuehling #define PROCESS_ACTIVE_TIME_MS 10 70626103436SFelix Kuehling 7075ec7e028SFelix Kuehling /* 8 byte handle containing GPU ID in the most significant 4 bytes and 7085ec7e028SFelix Kuehling * idr_handle in the least significant 4 bytes 7095ec7e028SFelix Kuehling */ 7105ec7e028SFelix Kuehling #define MAKE_HANDLE(gpu_id, idr_handle) \ 7115ec7e028SFelix Kuehling (((uint64_t)(gpu_id) << 32) + idr_handle) 7125ec7e028SFelix Kuehling #define GET_GPU_ID(handle) (handle >> 32) 7135ec7e028SFelix Kuehling #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 7145ec7e028SFelix Kuehling 715733fa1f7SYong Zhao enum kfd_pdd_bound { 716733fa1f7SYong Zhao PDD_UNBOUND = 0, 717733fa1f7SYong Zhao PDD_BOUND, 718733fa1f7SYong Zhao PDD_BOUND_SUSPENDED, 719733fa1f7SYong Zhao }; 720733fa1f7SYong Zhao 7214327bed2SPhilip Cox #define MAX_SYSFS_FILENAME_LEN 15 72232cb59f3SMukul Joshi 72332cb59f3SMukul Joshi /* 72432cb59f3SMukul Joshi * SDMA counter runs at 100MHz frequency. 72532cb59f3SMukul Joshi * We display SDMA activity in microsecond granularity in sysfs. 72632cb59f3SMukul Joshi * As a result, the divisor is 100. 72732cb59f3SMukul Joshi */ 72832cb59f3SMukul Joshi #define SDMA_ACTIVITY_DIVISOR 100 729d4566deeSMukul Joshi 73019f6d2a6SOded Gabbay /* Data that is per-process-per device. */ 73119f6d2a6SOded Gabbay struct kfd_process_device { 73219f6d2a6SOded Gabbay /* The device that owns this data. */ 7338dc1db31SMukul Joshi struct kfd_node *dev; 73419f6d2a6SOded Gabbay 7359fd3f1bfSFelix Kuehling /* The process that owns this kfd_process_device. */ 7369fd3f1bfSFelix Kuehling struct kfd_process *process; 73719f6d2a6SOded Gabbay 73845102048SBen Goz /* per-process-per device QCM data structure */ 73945102048SBen Goz struct qcm_process_device qpd; 74045102048SBen Goz 74119f6d2a6SOded Gabbay /*Apertures*/ 74219f6d2a6SOded Gabbay uint64_t lds_base; 74319f6d2a6SOded Gabbay uint64_t lds_limit; 74419f6d2a6SOded Gabbay uint64_t gpuvm_base; 74519f6d2a6SOded Gabbay uint64_t gpuvm_limit; 74619f6d2a6SOded Gabbay uint64_t scratch_base; 74719f6d2a6SOded Gabbay uint64_t scratch_limit; 74819f6d2a6SOded Gabbay 749403575c4SFelix Kuehling /* VM context for GPUVM allocations */ 750b84394e2SFelix Kuehling struct file *drm_file; 751b40a6ab2SFelix Kuehling void *drm_priv; 7528fde0248SPhilip Yang atomic64_t tlb_seq; 753403575c4SFelix Kuehling 75452b29d73SFelix Kuehling /* GPUVM allocations storage */ 75552b29d73SFelix Kuehling struct idr alloc_idr; 75652b29d73SFelix Kuehling 7579fd3f1bfSFelix Kuehling /* Flag used to tell the pdd has dequeued from the dqm. 7589fd3f1bfSFelix Kuehling * This is used to prevent dev->dqm->ops.process_termination() from 7599fd3f1bfSFelix Kuehling * being called twice when it is already called in IOMMU callback 7609fd3f1bfSFelix Kuehling * function. 761a82918f1SBen Goz */ 7629fd3f1bfSFelix Kuehling bool already_dequeued; 7639593f4d6SRajneesh Bhardwaj bool runtime_inuse; 76464d1c3a4SFelix Kuehling 76564d1c3a4SFelix Kuehling /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 76664d1c3a4SFelix Kuehling enum kfd_pdd_bound bound; 767d4566deeSMukul Joshi 768d4566deeSMukul Joshi /* VRAM usage */ 769d4566deeSMukul Joshi uint64_t vram_usage; 770d4566deeSMukul Joshi struct attribute attr_vram; 77132cb59f3SMukul Joshi char vram_filename[MAX_SYSFS_FILENAME_LEN]; 77232cb59f3SMukul Joshi 77332cb59f3SMukul Joshi /* SDMA activity tracking */ 77432cb59f3SMukul Joshi uint64_t sdma_past_activity_counter; 77532cb59f3SMukul Joshi struct attribute attr_sdma; 77632cb59f3SMukul Joshi char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 7774327bed2SPhilip Cox 7784327bed2SPhilip Cox /* Eviction activity tracking */ 7794327bed2SPhilip Cox uint64_t last_evict_timestamp; 7804327bed2SPhilip Cox atomic64_t evict_duration_counter; 7814327bed2SPhilip Cox struct attribute attr_evict; 7824327bed2SPhilip Cox 7834327bed2SPhilip Cox struct kobject *kobj_stats; 78459d7115dSMukul Joshi unsigned int doorbell_index; 785f2fa07b3SRamesh Errabolu 786f2fa07b3SRamesh Errabolu /* 787f2fa07b3SRamesh Errabolu * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 788f2fa07b3SRamesh Errabolu * that is associated with device encoded by "this" struct instance. The 789f2fa07b3SRamesh Errabolu * value reflects CU usage by all of the waves launched by this process 790f2fa07b3SRamesh Errabolu * on this device. A very important property of occupancy parameter is 791f2fa07b3SRamesh Errabolu * that its value is a snapshot of current use. 792f2fa07b3SRamesh Errabolu * 793f2fa07b3SRamesh Errabolu * Following is to be noted regarding how this parameter is reported: 794f2fa07b3SRamesh Errabolu * 795f2fa07b3SRamesh Errabolu * The number of waves that a CU can launch is limited by couple of 796f2fa07b3SRamesh Errabolu * parameters. These are encoded by struct amdgpu_cu_info instance 797f2fa07b3SRamesh Errabolu * that is part of every device definition. For GFX9 devices this 798f2fa07b3SRamesh Errabolu * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 799f2fa07b3SRamesh Errabolu * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 800f2fa07b3SRamesh Errabolu * when they do use scratch memory. This could change for future 801f2fa07b3SRamesh Errabolu * devices and therefore this example should be considered as a guide. 802f2fa07b3SRamesh Errabolu * 803f2fa07b3SRamesh Errabolu * All CU's of a device are available for the process. This may not be true 804f2fa07b3SRamesh Errabolu * under certain conditions - e.g. CU masking. 805f2fa07b3SRamesh Errabolu * 806f2fa07b3SRamesh Errabolu * Finally number of CU's that are occupied by a process is affected by both 807f2fa07b3SRamesh Errabolu * number of CU's a device has along with number of other competing processes 808f2fa07b3SRamesh Errabolu */ 809f2fa07b3SRamesh Errabolu struct attribute attr_cu_occupancy; 810751580b3SPhilip Yang 811751580b3SPhilip Yang /* sysfs counters for GPU retry fault and page migration tracking */ 812751580b3SPhilip Yang struct kobject *kobj_counters; 813751580b3SPhilip Yang struct attribute attr_faults; 814751580b3SPhilip Yang struct attribute attr_page_in; 815751580b3SPhilip Yang struct attribute attr_page_out; 816751580b3SPhilip Yang uint64_t faults; 817751580b3SPhilip Yang uint64_t page_in; 818751580b3SPhilip Yang uint64_t page_out; 819*0de4ec9aSJonathan Kim 820*0de4ec9aSJonathan Kim /* Tracks debug per-vmid request settings */ 821*0de4ec9aSJonathan Kim uint32_t spi_dbg_override; 822*0de4ec9aSJonathan Kim uint32_t spi_dbg_launch_mode; 823*0de4ec9aSJonathan Kim uint32_t watch_points[4]; 824*0de4ec9aSJonathan Kim 825bef153b7SDavid Yat Sin /* 826bef153b7SDavid Yat Sin * If this process has been checkpointed before, then the user 827bef153b7SDavid Yat Sin * application will use the original gpu_id on the 828bef153b7SDavid Yat Sin * checkpointed node to refer to this device. 829bef153b7SDavid Yat Sin */ 830bef153b7SDavid Yat Sin uint32_t user_gpu_id; 831cc009e61SMukul Joshi 832cc009e61SMukul Joshi void *proc_ctx_bo; 833cc009e61SMukul Joshi uint64_t proc_ctx_gpu_addr; 834cc009e61SMukul Joshi void *proc_ctx_cpu_ptr; 83519f6d2a6SOded Gabbay }; 83619f6d2a6SOded Gabbay 83752a5fdceSAlexey Skidanov #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 83852a5fdceSAlexey Skidanov 83942de677fSPhilip Yang struct svm_range_list { 84042de677fSPhilip Yang struct mutex lock; 84142de677fSPhilip Yang struct rb_root_cached objects; 84242de677fSPhilip Yang struct list_head list; 8434683cfecSPhilip Yang struct work_struct deferred_list_work; 8444683cfecSPhilip Yang struct list_head deferred_range_list; 845c2db32ceSRajneesh Bhardwaj struct list_head criu_svm_metadata_list; 8464683cfecSPhilip Yang spinlock_t deferred_list_lock; 8478a7c184aSFelix Kuehling atomic_t evicted_ranges; 8482e447728SPhilip Yang atomic_t drain_pagefaults; 8498a7c184aSFelix Kuehling struct delayed_work restore_work; 8505a75ea56SFelix Kuehling DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 851a6283010SAlex Sierra struct task_struct *faulting_task; 85242de677fSPhilip Yang }; 85342de677fSPhilip Yang 8544a488a7aSOded Gabbay /* Process data */ 8554a488a7aSOded Gabbay struct kfd_process { 85619f6d2a6SOded Gabbay /* 85719f6d2a6SOded Gabbay * kfd_process are stored in an mm_struct*->kfd_process* 85819f6d2a6SOded Gabbay * hash table (kfd_processes in kfd_process.c) 85919f6d2a6SOded Gabbay */ 86019f6d2a6SOded Gabbay struct hlist_node kfd_processes; 86119f6d2a6SOded Gabbay 8629b56bb11SFelix Kuehling /* 8639b56bb11SFelix Kuehling * Opaque pointer to mm_struct. We don't hold a reference to 8649b56bb11SFelix Kuehling * it so it should never be dereferenced from here. This is 8659b56bb11SFelix Kuehling * only used for looking up processes by their mm. 8669b56bb11SFelix Kuehling */ 8679b56bb11SFelix Kuehling void *mm; 86819f6d2a6SOded Gabbay 8695ce10687SFelix Kuehling struct kref ref; 8705ce10687SFelix Kuehling struct work_struct release_work; 8715ce10687SFelix Kuehling 87219f6d2a6SOded Gabbay struct mutex mutex; 87319f6d2a6SOded Gabbay 87419f6d2a6SOded Gabbay /* 87519f6d2a6SOded Gabbay * In any process, the thread that started main() is the lead 87619f6d2a6SOded Gabbay * thread and outlives the rest. 87719f6d2a6SOded Gabbay * It is here because amd_iommu_bind_pasid wants a task_struct. 878894a8293SFelix Kuehling * It can also be used for safely getting a reference to the 879894a8293SFelix Kuehling * mm_struct of the process. 88019f6d2a6SOded Gabbay */ 88119f6d2a6SOded Gabbay struct task_struct *lead_thread; 88219f6d2a6SOded Gabbay 88319f6d2a6SOded Gabbay /* We want to receive a notification when the mm_struct is destroyed */ 88419f6d2a6SOded Gabbay struct mmu_notifier mmu_notifier; 88519f6d2a6SOded Gabbay 886c7b6bac9SFenghua Yu u32 pasid; 88719f6d2a6SOded Gabbay 88819f6d2a6SOded Gabbay /* 8896ae27841SAlex Sierra * Array of kfd_process_device pointers, 89019f6d2a6SOded Gabbay * one for each device the process is using. 89119f6d2a6SOded Gabbay */ 8926ae27841SAlex Sierra struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 8936ae27841SAlex Sierra uint32_t n_pdds; 89419f6d2a6SOded Gabbay 89545102048SBen Goz struct process_queue_manager pqm; 89645102048SBen Goz 89719f6d2a6SOded Gabbay /*Is the user space process 32 bit?*/ 89819f6d2a6SOded Gabbay bool is_32bit_user_mode; 899f3a39818SAndrew Lewycky 900f3a39818SAndrew Lewycky /* Event-related data */ 901f3a39818SAndrew Lewycky struct mutex event_mutex; 902482f0777SFelix Kuehling /* Event ID allocator and lookup */ 903482f0777SFelix Kuehling struct idr event_idr; 90450cb7dd9SFelix Kuehling /* Event page */ 90568df0f19SLang Yu u64 signal_handle; 90650cb7dd9SFelix Kuehling struct kfd_signal_page *signal_page; 907b9a5d0a5SFelix Kuehling size_t signal_mapped_size; 908f3a39818SAndrew Lewycky size_t signal_event_count; 909c986169fSFelix Kuehling bool signal_event_limit_reached; 910403575c4SFelix Kuehling 911403575c4SFelix Kuehling /* Information used for memory eviction */ 912403575c4SFelix Kuehling void *kgd_process_info; 913403575c4SFelix Kuehling /* Eviction fence that is attached to all the BOs of this process. The 914403575c4SFelix Kuehling * fence will be triggered during eviction and new one will be created 915403575c4SFelix Kuehling * during restore 916403575c4SFelix Kuehling */ 917403575c4SFelix Kuehling struct dma_fence *ef; 91826103436SFelix Kuehling 91926103436SFelix Kuehling /* Work items for evicting and restoring BOs */ 92026103436SFelix Kuehling struct delayed_work eviction_work; 92126103436SFelix Kuehling struct delayed_work restore_work; 92226103436SFelix Kuehling /* seqno of the last scheduled eviction */ 92326103436SFelix Kuehling unsigned int last_eviction_seqno; 92426103436SFelix Kuehling /* Approx. the last timestamp (in jiffies) when the process was 92526103436SFelix Kuehling * restored after an eviction 92626103436SFelix Kuehling */ 92726103436SFelix Kuehling unsigned long last_restore_timestamp; 928de9f26bbSKent Russell 9290ab2d753SJonathan Kim /* Indicates device process is debug attached with reserved vmid. */ 9300ab2d753SJonathan Kim bool debug_trap_enabled; 9310ab2d753SJonathan Kim 9320ab2d753SJonathan Kim /* per-process-per device debug event fd file */ 9330ab2d753SJonathan Kim struct file *dbg_ev_file; 9340ab2d753SJonathan Kim 9350ab2d753SJonathan Kim /* If the process is a kfd debugger, we need to know so we can clean 9360ab2d753SJonathan Kim * up at exit time. If a process enables debugging on itself, it does 9370ab2d753SJonathan Kim * its own clean-up, so we don't set the flag here. We track this by 9380ab2d753SJonathan Kim * counting the number of processes this process is debugging. 9390ab2d753SJonathan Kim */ 9400ab2d753SJonathan Kim atomic_t debugged_process_count; 9410ab2d753SJonathan Kim 9420ab2d753SJonathan Kim /* If the process is a debugged, this is the debugger process */ 9430ab2d753SJonathan Kim struct kfd_process *debugger_process; 9440ab2d753SJonathan Kim 945de9f26bbSKent Russell /* Kobj for our procfs */ 946de9f26bbSKent Russell struct kobject *kobj; 9476d220a7eSAmber Lin struct kobject *kobj_queues; 948de9f26bbSKent Russell struct attribute attr_pasid; 94940ce74d1SPhilip Yang 9500ab2d753SJonathan Kim /* Keep track cwsr init */ 9510ab2d753SJonathan Kim bool has_cwsr; 9520ab2d753SJonathan Kim 9530ab2d753SJonathan Kim /* Exception code enable mask and status */ 9540ab2d753SJonathan Kim uint64_t exception_enable_mask; 9550ab2d753SJonathan Kim 95642de677fSPhilip Yang /* shared virtual memory registered by this process */ 95742de677fSPhilip Yang struct svm_range_list svms; 958063e33c5SAlex Sierra 959063e33c5SAlex Sierra bool xnack_enabled; 960b6485bedSTao Zhou 961*0de4ec9aSJonathan Kim /* Tracks debug per-vmid request for debug flags */ 962*0de4ec9aSJonathan Kim bool dbg_flags; 963*0de4ec9aSJonathan Kim 964b6485bedSTao Zhou atomic_t poison; 965cd9f7910SDavid Yat Sin /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 966cd9f7910SDavid Yat Sin bool queues_paused; 9670ab2d753SJonathan Kim 9680ab2d753SJonathan Kim /* Tracks runtime enable status */ 9690ab2d753SJonathan Kim struct kfd_runtime_info runtime_info; 9700ab2d753SJonathan Kim 9714a488a7aSOded Gabbay }; 9724a488a7aSOded Gabbay 97364d1c3a4SFelix Kuehling #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 97464d1c3a4SFelix Kuehling extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 97564d1c3a4SFelix Kuehling extern struct srcu_struct kfd_processes_srcu; 97664d1c3a4SFelix Kuehling 97776baee6cSOded Gabbay /** 978a4497974SRajneesh Bhardwaj * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 97976baee6cSOded Gabbay * 980a4497974SRajneesh Bhardwaj * @filep: pointer to file structure. 981a4497974SRajneesh Bhardwaj * @p: amdkfd process pointer. 982a4497974SRajneesh Bhardwaj * @data: pointer to arg that was copied from user. 983a4497974SRajneesh Bhardwaj * 984a4497974SRajneesh Bhardwaj * Return: returns ioctl completion code. 98576baee6cSOded Gabbay */ 98676baee6cSOded Gabbay typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 98776baee6cSOded Gabbay void *data); 98876baee6cSOded Gabbay 98976baee6cSOded Gabbay struct amdkfd_ioctl_desc { 99076baee6cSOded Gabbay unsigned int cmd; 99176baee6cSOded Gabbay int flags; 99276baee6cSOded Gabbay amdkfd_ioctl_t *func; 99376baee6cSOded Gabbay unsigned int cmd_drv; 99476baee6cSOded Gabbay const char *name; 99576baee6cSOded Gabbay }; 9968dc1db31SMukul Joshi bool kfd_dev_is_large_bar(struct kfd_node *dev); 99776baee6cSOded Gabbay 9981679ae8fSFelix Kuehling int kfd_process_create_wq(void); 99919f6d2a6SOded Gabbay void kfd_process_destroy_wq(void); 100022e3d934SDavid Belanger void kfd_cleanup_processes(void); 10010ab2d753SJonathan Kim struct kfd_process *kfd_create_process(struct task_struct *thread); 10022243f493SRajneesh Bhardwaj struct kfd_process *kfd_get_process(const struct task_struct *task); 1003c7b6bac9SFenghua Yu struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); 100426103436SFelix Kuehling struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 10052aeb742bSAlex Sierra 10062aeb742bSAlex Sierra int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 10075fb34bd9SAlex Sierra int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 10085fb34bd9SAlex Sierra uint32_t *gpuid, uint32_t *gpuidx); 10092aeb742bSAlex Sierra static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 10102aeb742bSAlex Sierra uint32_t gpuidx, uint32_t *gpuid) { 10112aeb742bSAlex Sierra return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 10122aeb742bSAlex Sierra } 10132aeb742bSAlex Sierra static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 10142aeb742bSAlex Sierra struct kfd_process *p, uint32_t gpuidx) { 10152aeb742bSAlex Sierra return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 10162aeb742bSAlex Sierra } 10172aeb742bSAlex Sierra 1018abb208a8SFelix Kuehling void kfd_unref_process(struct kfd_process *p); 1019c7f21978SPhilip Yang int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger); 10206b95e797SFelix Kuehling int kfd_process_restore_queues(struct kfd_process *p); 102126103436SFelix Kuehling void kfd_suspend_all_processes(void); 102226103436SFelix Kuehling int kfd_resume_all_processes(void); 102319f6d2a6SOded Gabbay 1024bef153b7SDavid Yat Sin struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 1025bef153b7SDavid Yat Sin uint32_t gpu_id); 1026bef153b7SDavid Yat Sin 1027bef153b7SDavid Yat Sin int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 1028bef153b7SDavid Yat Sin 1029b84394e2SFelix Kuehling int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1030b84394e2SFelix Kuehling struct file *drm_file); 10318dc1db31SMukul Joshi struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 103264c7f8cfSBen Goz struct kfd_process *p); 10338dc1db31SMukul Joshi struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1034093c7d8cSAlexey Skidanov struct kfd_process *p); 10358dc1db31SMukul Joshi struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1036093c7d8cSAlexey Skidanov struct kfd_process *p); 103719f6d2a6SOded Gabbay 1038063e33c5SAlex Sierra bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 1039063e33c5SAlex Sierra 10408dc1db31SMukul Joshi int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 1041373d7080SFelix Kuehling struct vm_area_struct *vma); 1042373d7080SFelix Kuehling 104352b29d73SFelix Kuehling /* KFD process API for creating and translating handles */ 104452b29d73SFelix Kuehling int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 104552b29d73SFelix Kuehling void *mem); 104652b29d73SFelix Kuehling void *kfd_process_device_translate_handle(struct kfd_process_device *p, 104752b29d73SFelix Kuehling int handle); 104852b29d73SFelix Kuehling void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 104952b29d73SFelix Kuehling int handle); 1050011bbb03SRajneesh Bhardwaj struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 105152b29d73SFelix Kuehling 105219f6d2a6SOded Gabbay /* PASIDs */ 105319f6d2a6SOded Gabbay int kfd_pasid_init(void); 105419f6d2a6SOded Gabbay void kfd_pasid_exit(void); 105519f6d2a6SOded Gabbay bool kfd_set_pasid_limit(unsigned int new_limit); 105619f6d2a6SOded Gabbay unsigned int kfd_get_pasid_limit(void); 1057c7b6bac9SFenghua Yu u32 kfd_pasid_alloc(void); 1058c7b6bac9SFenghua Yu void kfd_pasid_free(u32 pasid); 105919f6d2a6SOded Gabbay 106019f6d2a6SOded Gabbay /* Doorbells */ 1061ef568db7SFelix Kuehling size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1062735df2baSFelix Kuehling int kfd_doorbell_init(struct kfd_dev *kfd); 1063735df2baSFelix Kuehling void kfd_doorbell_fini(struct kfd_dev *kfd); 10648dc1db31SMukul Joshi int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, 1065df03ef93SHarish Kasiviswanathan struct vm_area_struct *vma); 1066ada2b29cSFelix Kuehling void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 106719f6d2a6SOded Gabbay unsigned int *doorbell_off); 106819f6d2a6SOded Gabbay void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 106919f6d2a6SOded Gabbay u32 read_kernel_doorbell(u32 __iomem *db); 1070ada2b29cSFelix Kuehling void write_kernel_doorbell(void __iomem *db, u32 value); 10719d7d0248SFelix Kuehling void write_kernel_doorbell64(void __iomem *db, u64 value); 1072339903faSYong Zhao unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 107359d7115dSMukul Joshi struct kfd_process_device *pdd, 1074ef568db7SFelix Kuehling unsigned int doorbell_id); 107559d7115dSMukul Joshi phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 107659d7115dSMukul Joshi int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 107759d7115dSMukul Joshi unsigned int *doorbell_index); 107859d7115dSMukul Joshi void kfd_free_process_doorbells(struct kfd_dev *kfd, 107959d7115dSMukul Joshi unsigned int doorbell_index); 10806e81090bSOded Gabbay /* GTT Sub-Allocator */ 10816e81090bSOded Gabbay 10828dc1db31SMukul Joshi int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, 10836e81090bSOded Gabbay struct kfd_mem_obj **mem_obj); 10846e81090bSOded Gabbay 10858dc1db31SMukul Joshi int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); 10866e81090bSOded Gabbay 10874a488a7aSOded Gabbay extern struct device *kfd_device; 10884a488a7aSOded Gabbay 1089de9f26bbSKent Russell /* KFD's procfs */ 1090de9f26bbSKent Russell void kfd_procfs_init(void); 1091de9f26bbSKent Russell void kfd_procfs_shutdown(void); 10926d220a7eSAmber Lin int kfd_procfs_add_queue(struct queue *q); 10936d220a7eSAmber Lin void kfd_procfs_del_queue(struct queue *q); 1094de9f26bbSKent Russell 10955b5c4e40SEvgeny Pinchuk /* Topology */ 10965b5c4e40SEvgeny Pinchuk int kfd_topology_init(void); 10975b5c4e40SEvgeny Pinchuk void kfd_topology_shutdown(void); 10988dc1db31SMukul Joshi int kfd_topology_add_device(struct kfd_node *gpu); 10998dc1db31SMukul Joshi int kfd_topology_remove_device(struct kfd_node *gpu); 11003a87177eSHarish Kasiviswanathan struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 11013a87177eSHarish Kasiviswanathan uint32_t proximity_domain); 110246d18d51SMukul Joshi struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( 110346d18d51SMukul Joshi uint32_t proximity_domain); 110444d8cc6fSYong Zhao struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 11058dc1db31SMukul Joshi struct kfd_node *kfd_device_by_id(uint32_t gpu_id); 11068dc1db31SMukul Joshi struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); 1107f5fe7edfSMukul Joshi static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, 1108f5fe7edfSMukul Joshi uint32_t vmid) 11095fb34bd9SAlex Sierra { 1110f5fe7edfSMukul Joshi return (node->interrupt_bitmap & (1 << node_id)) != 0 && 1111f5fe7edfSMukul Joshi (node->compute_vmid_bitmap & (1 << vmid)) != 0; 11125fb34bd9SAlex Sierra } 11135fb34bd9SAlex Sierra static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, 1114f5fe7edfSMukul Joshi uint32_t node_id, uint32_t vmid) { 11155fb34bd9SAlex Sierra struct kfd_dev *dev = adev->kfd.dev; 11165fb34bd9SAlex Sierra uint32_t i; 11175fb34bd9SAlex Sierra 11185fb34bd9SAlex Sierra if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) 11195fb34bd9SAlex Sierra return dev->nodes[0]; 11205fb34bd9SAlex Sierra 11215fb34bd9SAlex Sierra for (i = 0; i < dev->num_nodes; i++) 1122f5fe7edfSMukul Joshi if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) 11235fb34bd9SAlex Sierra return dev->nodes[i]; 11245fb34bd9SAlex Sierra 11255fb34bd9SAlex Sierra return NULL; 11265fb34bd9SAlex Sierra } 11278dc1db31SMukul Joshi int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); 1128520b8fb7SFelix Kuehling int kfd_numa_node_to_apic_id(int numa_node_id); 11296127896fSHuang Rui void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); 11305b5c4e40SEvgeny Pinchuk 11314a488a7aSOded Gabbay /* Interrupts */ 11328dc1db31SMukul Joshi int kfd_interrupt_init(struct kfd_node *dev); 11338dc1db31SMukul Joshi void kfd_interrupt_exit(struct kfd_node *dev); 11348dc1db31SMukul Joshi bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); 11358dc1db31SMukul Joshi bool interrupt_is_wanted(struct kfd_node *dev, 113658e69886SLan Xiao const uint32_t *ih_ring_entry, 113758e69886SLan Xiao uint32_t *patched_ihre, bool *flag); 11384a488a7aSOded Gabbay 113919f6d2a6SOded Gabbay /* amdkfd Apertures */ 114019f6d2a6SOded Gabbay int kfd_init_apertures(struct kfd_process *process); 114119f6d2a6SOded Gabbay 11427c9631afSJay Cornwall void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 11437c9631afSJay Cornwall uint64_t tba_addr, 11447c9631afSJay Cornwall uint64_t tma_addr); 11457c9631afSJay Cornwall 11460ab2d753SJonathan Kim /* CWSR initialization */ 11470ab2d753SJonathan Kim int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); 11480ab2d753SJonathan Kim 114936988070SRajneesh Bhardwaj /* CRIU */ 115036988070SRajneesh Bhardwaj /* 115136988070SRajneesh Bhardwaj * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 115236988070SRajneesh Bhardwaj * structures: 115336988070SRajneesh Bhardwaj * kfd_criu_process_priv_data 115436988070SRajneesh Bhardwaj * kfd_criu_device_priv_data 115536988070SRajneesh Bhardwaj * kfd_criu_bo_priv_data 115636988070SRajneesh Bhardwaj * kfd_criu_queue_priv_data 115736988070SRajneesh Bhardwaj * kfd_criu_event_priv_data 115836988070SRajneesh Bhardwaj * kfd_criu_svm_range_priv_data 115936988070SRajneesh Bhardwaj */ 116036988070SRajneesh Bhardwaj 116136988070SRajneesh Bhardwaj #define KFD_CRIU_PRIV_VERSION 1 116236988070SRajneesh Bhardwaj 116336988070SRajneesh Bhardwaj struct kfd_criu_process_priv_data { 116436988070SRajneesh Bhardwaj uint32_t version; 11654717fe3dSRajneesh Bhardwaj uint32_t xnack_mode; 116636988070SRajneesh Bhardwaj }; 116736988070SRajneesh Bhardwaj 116836988070SRajneesh Bhardwaj struct kfd_criu_device_priv_data { 116936988070SRajneesh Bhardwaj /* For future use */ 117036988070SRajneesh Bhardwaj uint64_t reserved; 117136988070SRajneesh Bhardwaj }; 117236988070SRajneesh Bhardwaj 117336988070SRajneesh Bhardwaj struct kfd_criu_bo_priv_data { 11745ccbb057SRajneesh Bhardwaj uint64_t user_addr; 11755ccbb057SRajneesh Bhardwaj uint32_t idr_handle; 11765ccbb057SRajneesh Bhardwaj uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 117736988070SRajneesh Bhardwaj }; 117836988070SRajneesh Bhardwaj 1179626f7b31SDavid Yat Sin /* 1180626f7b31SDavid Yat Sin * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1181626f7b31SDavid Yat Sin * kfd_criu_svm_range_priv_data is the object type 1182626f7b31SDavid Yat Sin */ 1183626f7b31SDavid Yat Sin enum kfd_criu_object_type { 1184626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_QUEUE, 1185626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_EVENT, 1186626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1187626f7b31SDavid Yat Sin }; 1188626f7b31SDavid Yat Sin 118936988070SRajneesh Bhardwaj struct kfd_criu_svm_range_priv_data { 119036988070SRajneesh Bhardwaj uint32_t object_type; 119108a987a8SRajneesh Bhardwaj uint64_t start_addr; 119208a987a8SRajneesh Bhardwaj uint64_t size; 119308a987a8SRajneesh Bhardwaj /* Variable length array of attributes */ 1194d5c83156SChangcheng Deng struct kfd_ioctl_svm_attribute attrs[]; 119536988070SRajneesh Bhardwaj }; 119636988070SRajneesh Bhardwaj 119736988070SRajneesh Bhardwaj struct kfd_criu_queue_priv_data { 119836988070SRajneesh Bhardwaj uint32_t object_type; 1199626f7b31SDavid Yat Sin uint64_t q_address; 1200626f7b31SDavid Yat Sin uint64_t q_size; 1201626f7b31SDavid Yat Sin uint64_t read_ptr_addr; 1202626f7b31SDavid Yat Sin uint64_t write_ptr_addr; 1203626f7b31SDavid Yat Sin uint64_t doorbell_off; 1204626f7b31SDavid Yat Sin uint64_t eop_ring_buffer_address; 1205626f7b31SDavid Yat Sin uint64_t ctx_save_restore_area_address; 1206626f7b31SDavid Yat Sin uint32_t gpu_id; 1207626f7b31SDavid Yat Sin uint32_t type; 1208626f7b31SDavid Yat Sin uint32_t format; 1209626f7b31SDavid Yat Sin uint32_t q_id; 1210626f7b31SDavid Yat Sin uint32_t priority; 1211626f7b31SDavid Yat Sin uint32_t q_percent; 1212626f7b31SDavid Yat Sin uint32_t doorbell_id; 1213747eea07SDavid Yat Sin uint32_t gws; 1214626f7b31SDavid Yat Sin uint32_t sdma_id; 1215626f7b31SDavid Yat Sin uint32_t eop_ring_buffer_size; 1216626f7b31SDavid Yat Sin uint32_t ctx_save_restore_area_size; 1217626f7b31SDavid Yat Sin uint32_t ctl_stack_size; 1218626f7b31SDavid Yat Sin uint32_t mqd_size; 121936988070SRajneesh Bhardwaj }; 122036988070SRajneesh Bhardwaj 122136988070SRajneesh Bhardwaj struct kfd_criu_event_priv_data { 122236988070SRajneesh Bhardwaj uint32_t object_type; 122340e8a766SDavid Yat Sin uint64_t user_handle; 122440e8a766SDavid Yat Sin uint32_t event_id; 122540e8a766SDavid Yat Sin uint32_t auto_reset; 122640e8a766SDavid Yat Sin uint32_t type; 122740e8a766SDavid Yat Sin uint32_t signaled; 122840e8a766SDavid Yat Sin 122940e8a766SDavid Yat Sin union { 123040e8a766SDavid Yat Sin struct kfd_hsa_memory_exception_data memory_exception_data; 123140e8a766SDavid Yat Sin struct kfd_hsa_hw_exception_data hw_exception_data; 123240e8a766SDavid Yat Sin }; 123336988070SRajneesh Bhardwaj }; 123436988070SRajneesh Bhardwaj 1235626f7b31SDavid Yat Sin int kfd_process_get_queue_info(struct kfd_process *p, 1236626f7b31SDavid Yat Sin uint32_t *num_queues, 1237626f7b31SDavid Yat Sin uint64_t *priv_data_sizes); 1238626f7b31SDavid Yat Sin 1239626f7b31SDavid Yat Sin int kfd_criu_checkpoint_queues(struct kfd_process *p, 1240626f7b31SDavid Yat Sin uint8_t __user *user_priv_data, 1241626f7b31SDavid Yat Sin uint64_t *priv_data_offset); 1242626f7b31SDavid Yat Sin 1243626f7b31SDavid Yat Sin int kfd_criu_restore_queue(struct kfd_process *p, 1244626f7b31SDavid Yat Sin uint8_t __user *user_priv_data, 1245626f7b31SDavid Yat Sin uint64_t *priv_data_offset, 1246626f7b31SDavid Yat Sin uint64_t max_priv_data_size); 124740e8a766SDavid Yat Sin 124840e8a766SDavid Yat Sin int kfd_criu_checkpoint_events(struct kfd_process *p, 124940e8a766SDavid Yat Sin uint8_t __user *user_priv_data, 125040e8a766SDavid Yat Sin uint64_t *priv_data_offset); 125140e8a766SDavid Yat Sin 125240e8a766SDavid Yat Sin int kfd_criu_restore_event(struct file *devkfd, 125340e8a766SDavid Yat Sin struct kfd_process *p, 125440e8a766SDavid Yat Sin uint8_t __user *user_priv_data, 125540e8a766SDavid Yat Sin uint64_t *priv_data_offset, 125640e8a766SDavid Yat Sin uint64_t max_priv_data_size); 125736988070SRajneesh Bhardwaj /* CRIU - End */ 125836988070SRajneesh Bhardwaj 1259ed6e6a34SBen Goz /* Queue Context Management */ 1260e88a614cSEdward O'Callaghan int init_queue(struct queue **q, const struct queue_properties *properties); 1261ed6e6a34SBen Goz void uninit_queue(struct queue *q); 126245102048SBen Goz void print_queue_properties(struct queue_properties *q); 1263ed6e6a34SBen Goz void print_queue(struct queue *q); 1264ed6e6a34SBen Goz 12654b8f589bSBen Goz struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 12668dc1db31SMukul Joshi struct kfd_node *dev); 1267ee04955aSFelix Kuehling struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 12688dc1db31SMukul Joshi struct kfd_node *dev); 12694b8f589bSBen Goz struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 12708dc1db31SMukul Joshi struct kfd_node *dev); 1271ee04955aSFelix Kuehling struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 12728dc1db31SMukul Joshi struct kfd_node *dev); 1273b91d43ddSFelix Kuehling struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 12748dc1db31SMukul Joshi struct kfd_node *dev); 127514328aa5SPhilip Cox struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 12768dc1db31SMukul Joshi struct kfd_node *dev); 1277cc009e61SMukul Joshi struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, 12788dc1db31SMukul Joshi struct kfd_node *dev); 12798dc1db31SMukul Joshi struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 128064c7f8cfSBen Goz void device_queue_manager_uninit(struct device_queue_manager *dqm); 12818dc1db31SMukul Joshi struct kernel_queue *kernel_queue_init(struct kfd_node *dev, 1282241f24f8SBen Goz enum kfd_queue_type type); 1283c2a77fdeSFelix Kuehling void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 128403e5b167STao Zhou int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); 1285241f24f8SBen Goz 128645102048SBen Goz /* Process Queue Manager */ 128745102048SBen Goz struct process_queue_node { 128845102048SBen Goz struct queue *q; 128945102048SBen Goz struct kernel_queue *kq; 129045102048SBen Goz struct list_head process_queue_list; 129145102048SBen Goz }; 129245102048SBen Goz 12939fd3f1bfSFelix Kuehling void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 12949fd3f1bfSFelix Kuehling void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 129545102048SBen Goz int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 129645102048SBen Goz void pqm_uninit(struct process_queue_manager *pqm); 129745102048SBen Goz int pqm_create_queue(struct process_queue_manager *pqm, 12988dc1db31SMukul Joshi struct kfd_node *dev, 129945102048SBen Goz struct file *f, 130045102048SBen Goz struct queue_properties *properties, 1301e47a8b52SYong Zhao unsigned int *qid, 1302e77a541fSGraham Sider struct amdgpu_bo *wptr_bo, 13038668dfc3SDavid Yat Sin const struct kfd_criu_queue_priv_data *q_data, 130442c6c482SDavid Yat Sin const void *restore_mqd, 13053a9822d7SDavid Yat Sin const void *restore_ctl_stack, 1306e47a8b52SYong Zhao uint32_t *p_doorbell_offset_in_process); 130745102048SBen Goz int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 13087c695a2cSLang Yu int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 130945102048SBen Goz struct queue_properties *p); 13107c695a2cSLang Yu int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 13117c695a2cSLang Yu struct mqd_update_info *minfo); 1312eb82da1dSOak Zeng int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1313eb82da1dSOak Zeng void *gws); 1314fbeb661bSYair Shachar struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 1315fbeb661bSYair Shachar unsigned int qid); 13165bb4b78bSOak Zeng struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 13175bb4b78bSOak Zeng unsigned int qid); 13185df099e8SJay Cornwall int pqm_get_wave_state(struct process_queue_manager *pqm, 13195df099e8SJay Cornwall unsigned int qid, 13205df099e8SJay Cornwall void __user *ctl_stack, 13215df099e8SJay Cornwall u32 *ctl_stack_used_size, 13225df099e8SJay Cornwall u32 *save_area_used_size); 132345102048SBen Goz 1324b010affeSQu Huang int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1325b010affeSQu Huang uint64_t fence_value, 13268c72c3d7SYong Zhao unsigned int timeout_ms); 1327788bf83dSYair Shachar 132842c6c482SDavid Yat Sin int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 132942c6c482SDavid Yat Sin unsigned int qid, 13303a9822d7SDavid Yat Sin u32 *mqd_size, 13313a9822d7SDavid Yat Sin u32 *ctl_stack_size); 1332ed6e6a34SBen Goz /* Packet Manager */ 1333ed6e6a34SBen Goz 133464c7f8cfSBen Goz #define KFD_FENCE_COMPLETED (100) 133564c7f8cfSBen Goz #define KFD_FENCE_INIT (10) 1336241f24f8SBen Goz 1337ed6e6a34SBen Goz struct packet_manager { 1338ed6e6a34SBen Goz struct device_queue_manager *dqm; 1339ed6e6a34SBen Goz struct kernel_queue *priv_queue; 1340ed6e6a34SBen Goz struct mutex lock; 1341ed6e6a34SBen Goz bool allocated; 1342ed6e6a34SBen Goz struct kfd_mem_obj *ib_buffer_obj; 1343851a645eSFelix Kuehling unsigned int ib_size_bytes; 1344819ec5acSFelix Kuehling bool is_over_subscription; 1345f6e27ff1SFelix Kuehling 1346f6e27ff1SFelix Kuehling const struct packet_manager_funcs *pmf; 1347ed6e6a34SBen Goz }; 1348ed6e6a34SBen Goz 1349f6e27ff1SFelix Kuehling struct packet_manager_funcs { 1350f6e27ff1SFelix Kuehling /* Support ASIC-specific packet formats for PM4 packets */ 1351f6e27ff1SFelix Kuehling int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1352f6e27ff1SFelix Kuehling struct qcm_process_device *qpd); 1353f6e27ff1SFelix Kuehling int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1354f6e27ff1SFelix Kuehling uint64_t ib, size_t ib_size_in_dwords, bool chain); 1355f6e27ff1SFelix Kuehling int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1356f6e27ff1SFelix Kuehling struct scheduling_resources *res); 1357f6e27ff1SFelix Kuehling int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1358f6e27ff1SFelix Kuehling struct queue *q, bool is_static); 1359f6e27ff1SFelix Kuehling int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1360f6e27ff1SFelix Kuehling enum kfd_unmap_queues_filter mode, 1361d2cb0b21SJonathan Kim uint32_t filter_param, bool reset); 13627cee6a68SJonathan Kim int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, 13637cee6a68SJonathan Kim uint32_t grace_period); 1364f6e27ff1SFelix Kuehling int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1365b010affeSQu Huang uint64_t fence_address, uint64_t fence_value); 1366f6e27ff1SFelix Kuehling int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1367f6e27ff1SFelix Kuehling 1368f6e27ff1SFelix Kuehling /* Packet sizes */ 1369f6e27ff1SFelix Kuehling int map_process_size; 1370f6e27ff1SFelix Kuehling int runlist_size; 1371f6e27ff1SFelix Kuehling int set_resources_size; 1372f6e27ff1SFelix Kuehling int map_queues_size; 1373f6e27ff1SFelix Kuehling int unmap_queues_size; 13747cee6a68SJonathan Kim int set_grace_period_size; 1375f6e27ff1SFelix Kuehling int query_status_size; 1376f6e27ff1SFelix Kuehling int release_mem_size; 1377f6e27ff1SFelix Kuehling }; 1378f6e27ff1SFelix Kuehling 1379f6e27ff1SFelix Kuehling extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1380454150b1SFelix Kuehling extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1381fd6a440eSJonathan Kim extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1382f6e27ff1SFelix Kuehling 138364c7f8cfSBen Goz int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1384c2a77fdeSFelix Kuehling void pm_uninit(struct packet_manager *pm, bool hanging); 138564c7f8cfSBen Goz int pm_send_set_resources(struct packet_manager *pm, 138664c7f8cfSBen Goz struct scheduling_resources *res); 138764c7f8cfSBen Goz int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 138864c7f8cfSBen Goz int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1389b010affeSQu Huang uint64_t fence_value); 139064c7f8cfSBen Goz 1391d2cb0b21SJonathan Kim int pm_send_unmap_queue(struct packet_manager *pm, 13927da2bcf8SYong Zhao enum kfd_unmap_queues_filter mode, 1393d2cb0b21SJonathan Kim uint32_t filter_param, bool reset); 139464c7f8cfSBen Goz 1395241f24f8SBen Goz void pm_release_ib(struct packet_manager *pm); 1396241f24f8SBen Goz 13977cee6a68SJonathan Kim int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); 13987cee6a68SJonathan Kim 1399454150b1SFelix Kuehling /* Following PM funcs can be shared among VI and AI */ 1400454150b1SFelix Kuehling unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 140114328aa5SPhilip Cox 140219f6d2a6SOded Gabbay uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 140319f6d2a6SOded Gabbay 1404f3a39818SAndrew Lewycky /* Events */ 1405f3a39818SAndrew Lewycky extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1406ca750681SFelix Kuehling extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 1407cc009e61SMukul Joshi extern const struct kfd_event_interrupt_class event_interrupt_class_v11; 1408ca750681SFelix Kuehling 1409930c5ff4SAlexey Skidanov extern const struct kfd_device_global_init_class device_global_init_class_cik; 1410f3a39818SAndrew Lewycky 1411c3eb12dfSFelix Kuehling int kfd_event_init_process(struct kfd_process *p); 1412f3a39818SAndrew Lewycky void kfd_event_free_process(struct kfd_process *p); 1413f3a39818SAndrew Lewycky int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1414f3a39818SAndrew Lewycky int kfd_wait_on_events(struct kfd_process *p, 141559d3e8beSAlexey Skidanov uint32_t num_events, void __user *data, 1416bea9a56aSFelix Kuehling bool all, uint32_t *user_timeout_ms, 1417fdf0c833SFelix Kuehling uint32_t *wait_result); 1418c7b6bac9SFenghua Yu void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1419f3a39818SAndrew Lewycky uint32_t valid_id_bits); 14208dc1db31SMukul Joshi void kfd_signal_iommu_event(struct kfd_node *dev, 1421c7b6bac9SFenghua Yu u32 pasid, unsigned long address, 142259d3e8beSAlexey Skidanov bool is_write_requested, bool is_execute_requested); 1423c7b6bac9SFenghua Yu void kfd_signal_hw_exception_event(u32 pasid); 1424f3a39818SAndrew Lewycky int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1425f3a39818SAndrew Lewycky int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 142640e8a766SDavid Yat Sin int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 142740e8a766SDavid Yat Sin 1428f3a39818SAndrew Lewycky int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1429f3a39818SAndrew Lewycky uint32_t event_type, bool auto_reset, uint32_t node_id, 1430f3a39818SAndrew Lewycky uint32_t *event_id, uint32_t *event_trigger_data, 1431f3a39818SAndrew Lewycky uint64_t *event_page_offset, uint32_t *event_slot_index); 143240e8a766SDavid Yat Sin 143340e8a766SDavid Yat Sin int kfd_get_num_events(struct kfd_process *p); 1434f3a39818SAndrew Lewycky int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1435f3a39818SAndrew Lewycky 14368dc1db31SMukul Joshi void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, 14372640c3faSshaoyunl struct kfd_vm_fault_info *info); 14382640c3faSshaoyunl 14398dc1db31SMukul Joshi void kfd_signal_reset_event(struct kfd_node *dev); 1440e42051d2SShaoyun Liu 14418dc1db31SMukul Joshi void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); 1442e2b1f9f5SDennis Li 14433543b055SEric Huang void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); 1444403575c4SFelix Kuehling 1445459ccca5SLang Yu static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) 1446459ccca5SLang Yu { 144775dda67cSPhilip Yang return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || 144875dda67cSPhilip Yang KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || 144975dda67cSPhilip Yang (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || 1450459ccca5SLang Yu KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); 1451459ccca5SLang Yu } 1452459ccca5SLang Yu 1453e42051d2SShaoyun Liu bool kfd_is_locked(void); 1454e42051d2SShaoyun Liu 1455f756e631SHarish Kasiviswanathan /* Compute profile */ 14568dc1db31SMukul Joshi void kfd_inc_compute_active(struct kfd_node *dev); 14578dc1db31SMukul Joshi void kfd_dec_compute_active(struct kfd_node *dev); 1458f756e631SHarish Kasiviswanathan 14596b855f7bSHarish Kasiviswanathan /* Cgroup Support */ 14606b855f7bSHarish Kasiviswanathan /* Check with device cgroup if @kfd device is accessible */ 14618dc1db31SMukul Joshi static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) 14626b855f7bSHarish Kasiviswanathan { 1463eec8fd02SOdin Ugedal #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1464d69a3b76SMukul Joshi struct drm_device *ddev = adev_to_drm(kfd->adev); 14656b855f7bSHarish Kasiviswanathan 146699c7b309SLorenz Brun return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 14676b855f7bSHarish Kasiviswanathan ddev->render->index, 14686b855f7bSHarish Kasiviswanathan DEVCG_ACC_WRITE | DEVCG_ACC_READ); 14696b855f7bSHarish Kasiviswanathan #else 14706b855f7bSHarish Kasiviswanathan return 0; 14716b855f7bSHarish Kasiviswanathan #endif 14726b855f7bSHarish Kasiviswanathan } 14736b855f7bSHarish Kasiviswanathan 147474c5b85dSMukul Joshi static inline bool kfd_is_first_node(struct kfd_node *node) 147574c5b85dSMukul Joshi { 147674c5b85dSMukul Joshi return (node == node->kfd->nodes[0]); 147774c5b85dSMukul Joshi } 147874c5b85dSMukul Joshi 1479851a645eSFelix Kuehling /* Debugfs */ 1480851a645eSFelix Kuehling #if defined(CONFIG_DEBUG_FS) 1481851a645eSFelix Kuehling 1482851a645eSFelix Kuehling void kfd_debugfs_init(void); 1483851a645eSFelix Kuehling void kfd_debugfs_fini(void); 1484851a645eSFelix Kuehling int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1485851a645eSFelix Kuehling int pqm_debugfs_mqds(struct seq_file *m, void *data); 1486851a645eSFelix Kuehling int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1487851a645eSFelix Kuehling int dqm_debugfs_hqds(struct seq_file *m, void *data); 1488851a645eSFelix Kuehling int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1489851a645eSFelix Kuehling int pm_debugfs_runlist(struct seq_file *m, void *data); 1490851a645eSFelix Kuehling 14918dc1db31SMukul Joshi int kfd_debugfs_hang_hws(struct kfd_node *dev); 1492a29ec470SShaoyun Liu int pm_debugfs_hang_hws(struct packet_manager *pm); 14934f942aaeSOak Zeng int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1494a29ec470SShaoyun Liu 1495851a645eSFelix Kuehling #else 1496851a645eSFelix Kuehling 1497851a645eSFelix Kuehling static inline void kfd_debugfs_init(void) {} 1498851a645eSFelix Kuehling static inline void kfd_debugfs_fini(void) {} 1499851a645eSFelix Kuehling 1500851a645eSFelix Kuehling #endif 1501851a645eSFelix Kuehling 15024a488a7aSOded Gabbay #endif 1503