1d87f36a0SRajneesh Bhardwaj /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 24a488a7aSOded Gabbay /* 3d87f36a0SRajneesh Bhardwaj * Copyright 2014-2022 Advanced Micro Devices, Inc. 44a488a7aSOded Gabbay * 54a488a7aSOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 64a488a7aSOded Gabbay * copy of this software and associated documentation files (the "Software"), 74a488a7aSOded Gabbay * to deal in the Software without restriction, including without limitation 84a488a7aSOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 94a488a7aSOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 104a488a7aSOded Gabbay * Software is furnished to do so, subject to the following conditions: 114a488a7aSOded Gabbay * 124a488a7aSOded Gabbay * The above copyright notice and this permission notice shall be included in 134a488a7aSOded Gabbay * all copies or substantial portions of the Software. 144a488a7aSOded Gabbay * 154a488a7aSOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 164a488a7aSOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 174a488a7aSOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 184a488a7aSOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 194a488a7aSOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 204a488a7aSOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 214a488a7aSOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 224a488a7aSOded Gabbay */ 234a488a7aSOded Gabbay 244a488a7aSOded Gabbay #ifndef KFD_PRIV_H_INCLUDED 254a488a7aSOded Gabbay #define KFD_PRIV_H_INCLUDED 264a488a7aSOded Gabbay 274a488a7aSOded Gabbay #include <linux/hashtable.h> 284a488a7aSOded Gabbay #include <linux/mmu_notifier.h> 29dc90f084SChristoph Hellwig #include <linux/memremap.h> 304a488a7aSOded Gabbay #include <linux/mutex.h> 314a488a7aSOded Gabbay #include <linux/types.h> 324a488a7aSOded Gabbay #include <linux/atomic.h> 334a488a7aSOded Gabbay #include <linux/workqueue.h> 344a488a7aSOded Gabbay #include <linux/spinlock.h> 3519f6d2a6SOded Gabbay #include <linux/kfd_ioctl.h> 36482f0777SFelix Kuehling #include <linux/idr.h> 3704ad47bdSAndres Rodriguez #include <linux/kfifo.h> 38851a645eSFelix Kuehling #include <linux/seq_file.h> 395ce10687SFelix Kuehling #include <linux/kref.h> 40de9f26bbSKent Russell #include <linux/sysfs.h> 416b855f7bSHarish Kasiviswanathan #include <linux/device_cgroup.h> 421cd4d9eeSStephen Rothwell #include <drm/drm_file.h> 431cd4d9eeSStephen Rothwell #include <drm/drm_drv.h> 441cd4d9eeSStephen Rothwell #include <drm/drm_device.h> 4599c7b309SLorenz Brun #include <drm/drm_ioctl.h> 464a488a7aSOded Gabbay #include <kgd_kfd_interface.h> 476d220a7eSAmber Lin #include <linux/swap.h> 484a488a7aSOded Gabbay 49e596b903SYong Zhao #include "amd_shared.h" 506ae27841SAlex Sierra #include "amdgpu.h" 51e596b903SYong Zhao 52af47b390SLaura Abbott #define KFD_MAX_RING_ENTRY_SIZE 8 53af47b390SLaura Abbott 545b5c4e40SEvgeny Pinchuk #define KFD_SYSFS_FILE_MODE 0444 555b5c4e40SEvgeny Pinchuk 56df03ef93SHarish Kasiviswanathan /* GPU ID hash width in bits */ 57df03ef93SHarish Kasiviswanathan #define KFD_GPU_ID_HASH_WIDTH 16 58df03ef93SHarish Kasiviswanathan 59df03ef93SHarish Kasiviswanathan /* Use upper bits of mmap offset to store KFD driver specific information. 60df03ef93SHarish Kasiviswanathan * BITS[63:62] - Encode MMAP type 61df03ef93SHarish Kasiviswanathan * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 62df03ef93SHarish Kasiviswanathan * BITS[45:0] - MMAP offset value 63df03ef93SHarish Kasiviswanathan * 64df03ef93SHarish Kasiviswanathan * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 65df03ef93SHarish Kasiviswanathan * defines are w.r.t to PAGE_SIZE 66df03ef93SHarish Kasiviswanathan */ 6729453755SYong Zhao #define KFD_MMAP_TYPE_SHIFT 62 68df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 69df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 70df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 71df03ef93SHarish Kasiviswanathan #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 72d33ea570SOak Zeng #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 73df03ef93SHarish Kasiviswanathan 7429453755SYong Zhao #define KFD_MMAP_GPU_ID_SHIFT 46 75df03ef93SHarish Kasiviswanathan #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 76df03ef93SHarish Kasiviswanathan << KFD_MMAP_GPU_ID_SHIFT) 77df03ef93SHarish Kasiviswanathan #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 78df03ef93SHarish Kasiviswanathan & KFD_MMAP_GPU_ID_MASK) 7929453755SYong Zhao #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 80df03ef93SHarish Kasiviswanathan >> KFD_MMAP_GPU_ID_SHIFT) 81df03ef93SHarish Kasiviswanathan 82ed6e6a34SBen Goz /* 83ed6e6a34SBen Goz * When working with cp scheduler we should assign the HIQ manually or via 84e7016d8eSYong Zhao * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 85ed6e6a34SBen Goz * definitions for Kaveri. In Kaveri only the first ME queues participates 86ed6e6a34SBen Goz * in the cp scheduling taking that in mind we set the HIQ slot in the 87ed6e6a34SBen Goz * second ME. 88ed6e6a34SBen Goz */ 89ed6e6a34SBen Goz #define KFD_CIK_HIQ_PIPE 4 90ed6e6a34SBen Goz #define KFD_CIK_HIQ_QUEUE 0 91ed6e6a34SBen Goz 925b5c4e40SEvgeny Pinchuk /* Macro for allocating structures */ 935b5c4e40SEvgeny Pinchuk #define kfd_alloc_struct(ptr_to_struct) \ 945b5c4e40SEvgeny Pinchuk ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 955b5c4e40SEvgeny Pinchuk 9619f6d2a6SOded Gabbay #define KFD_MAX_NUM_OF_PROCESSES 512 97b8cbab04SOded Gabbay #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 9819f6d2a6SOded Gabbay 9919f6d2a6SOded Gabbay /* 100373d7080SFelix Kuehling * Size of the per-process TBA+TMA buffer: 2 pages 101373d7080SFelix Kuehling * 102373d7080SFelix Kuehling * The first page is the TBA used for the CWSR ISA code. The second 103a4497974SRajneesh Bhardwaj * page is used as TMA for user-mode trap handler setup in daisy-chain mode. 104373d7080SFelix Kuehling */ 105373d7080SFelix Kuehling #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106373d7080SFelix Kuehling #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 107373d7080SFelix Kuehling 10874523943SYong Zhao #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 10974523943SYong Zhao (KFD_MAX_NUM_OF_PROCESSES * \ 11074523943SYong Zhao KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 11174523943SYong Zhao 11274523943SYong Zhao #define KFD_KERNEL_QUEUE_SIZE 2048 11374523943SYong Zhao 11414328aa5SPhilip Cox #define KFD_UNMAP_LATENCY_MS (4000) 11514328aa5SPhilip Cox 116a805889aSMukul Joshi #define KFD_MAX_SDMA_QUEUES 128 117a805889aSMukul Joshi 118373d7080SFelix Kuehling /* 1191f86805aSYong Zhao * 512 = 0x200 1201f86805aSYong Zhao * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 1211f86805aSYong Zhao * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 1221f86805aSYong Zhao * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 1231f86805aSYong Zhao * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 1241f86805aSYong Zhao * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 1251f86805aSYong Zhao */ 1261f86805aSYong Zhao #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 1271f86805aSYong Zhao 12836988070SRajneesh Bhardwaj /** 12936988070SRajneesh Bhardwaj * enum kfd_ioctl_flags - KFD ioctl flags 13036988070SRajneesh Bhardwaj * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 13136988070SRajneesh Bhardwaj * userspace can use a given ioctl. 13236988070SRajneesh Bhardwaj */ 13336988070SRajneesh Bhardwaj enum kfd_ioctl_flags { 13436988070SRajneesh Bhardwaj /* 13536988070SRajneesh Bhardwaj * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 13636988070SRajneesh Bhardwaj * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 13736988070SRajneesh Bhardwaj * perform privileged operations and load arbitrary data into MQDs and 13836988070SRajneesh Bhardwaj * eventually HQD registers when the queue is mapped by HWS. In order to 13936988070SRajneesh Bhardwaj * prevent this we should perform additional security checks. 14036988070SRajneesh Bhardwaj * 14136988070SRajneesh Bhardwaj * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 14236988070SRajneesh Bhardwaj * 14336988070SRajneesh Bhardwaj * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 14436988070SRajneesh Bhardwaj * we also allow ioctls with SYS_ADMIN capability. 14536988070SRajneesh Bhardwaj */ 14636988070SRajneesh Bhardwaj KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 14736988070SRajneesh Bhardwaj }; 1481f86805aSYong Zhao /* 149b8cbab04SOded Gabbay * Kernel module parameter to specify maximum number of supported queues per 150b8cbab04SOded Gabbay * device 15119f6d2a6SOded Gabbay */ 152b8cbab04SOded Gabbay extern int max_num_of_queues_per_device; 15319f6d2a6SOded Gabbay 154ed6e6a34SBen Goz 15531c21fecSBen Goz /* Kernel module parameter to specify the scheduling policy */ 15631c21fecSBen Goz extern int sched_policy; 15731c21fecSBen Goz 158a99c6d4fSFelix Kuehling /* 159a99c6d4fSFelix Kuehling * Kernel module parameter to specify the maximum process 160a99c6d4fSFelix Kuehling * number per HW scheduler 161a99c6d4fSFelix Kuehling */ 162a99c6d4fSFelix Kuehling extern int hws_max_conc_proc; 163a99c6d4fSFelix Kuehling 164373d7080SFelix Kuehling extern int cwsr_enable; 165373d7080SFelix Kuehling 16681663016SOded Gabbay /* 16781663016SOded Gabbay * Kernel module parameter to specify whether to send sigterm to HSA process on 16881663016SOded Gabbay * unhandled exception 16981663016SOded Gabbay */ 17081663016SOded Gabbay extern int send_sigterm; 17181663016SOded Gabbay 172ebcfd1e2SFelix Kuehling /* 173374200b1SFelix Kuehling * This kernel module is used to simulate large bar machine on non-large bar 174374200b1SFelix Kuehling * enabled machines. 175374200b1SFelix Kuehling */ 176374200b1SFelix Kuehling extern int debug_largebar; 177374200b1SFelix Kuehling 178374200b1SFelix Kuehling /* 179ebcfd1e2SFelix Kuehling * Ignore CRAT table during KFD initialization, can be used to work around 180ebcfd1e2SFelix Kuehling * broken CRAT tables on some AMD systems 181ebcfd1e2SFelix Kuehling */ 182ebcfd1e2SFelix Kuehling extern int ignore_crat; 183ebcfd1e2SFelix Kuehling 184a4497974SRajneesh Bhardwaj /* Set sh_mem_config.retry_disable on GFX v9 */ 18575ee6487SFelix Kuehling extern int amdgpu_noretry; 186bed4f110SFelix Kuehling 187a4497974SRajneesh Bhardwaj /* Halt if HWS hang is detected */ 1880e9a860cSYong Zhao extern int halt_if_hws_hang; 1890e9a860cSYong Zhao 190a4497974SRajneesh Bhardwaj /* Whether MEC FW support GWS barriers */ 19129e76462SOak Zeng extern bool hws_gws_support; 19229e76462SOak Zeng 193a4497974SRajneesh Bhardwaj /* Queue preemption timeout in ms */ 19414328aa5SPhilip Cox extern int queue_preemption_timeout_ms; 19514328aa5SPhilip Cox 1966d909c5dSOak Zeng /* 1976d909c5dSOak Zeng * Don't evict process queues on vm fault 1986d909c5dSOak Zeng */ 1996d909c5dSOak Zeng extern int amdgpu_no_queue_eviction_on_vm_fault; 2006d909c5dSOak Zeng 201a4497974SRajneesh Bhardwaj /* Enable eviction debug messages */ 202b2057956SFelix Kuehling extern bool debug_evictions; 203b2057956SFelix Kuehling 204fe1f05dfSMukul Joshi extern struct mutex kfd_processes_mutex; 205fe1f05dfSMukul Joshi 206ed6e6a34SBen Goz enum cache_policy { 207ed6e6a34SBen Goz cache_policy_coherent, 208ed6e6a34SBen Goz cache_policy_noncoherent 209ed6e6a34SBen Goz }; 210ed6e6a34SBen Goz 211dd0ae064SGraham Sider #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 212dd0ae064SGraham Sider #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 21324294e7bSPhilip Yang #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ 214cebbfdd5SAmber Lin ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ 215cebbfdd5SAmber Lin (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) 216ef568db7SFelix Kuehling 2178dc1db31SMukul Joshi struct kfd_node; 2188dc1db31SMukul Joshi 219f3a39818SAndrew Lewycky struct kfd_event_interrupt_class { 2208dc1db31SMukul Joshi bool (*interrupt_isr)(struct kfd_node *dev, 22158e69886SLan Xiao const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 22258e69886SLan Xiao bool *patched_flag); 2238dc1db31SMukul Joshi void (*interrupt_wq)(struct kfd_node *dev, 224f3a39818SAndrew Lewycky const uint32_t *ih_ring_entry); 225f3a39818SAndrew Lewycky }; 226f3a39818SAndrew Lewycky 2274a488a7aSOded Gabbay struct kfd_device_info { 2289d6fa9c7SGraham Sider uint32_t gfx_target_version; 229f3a39818SAndrew Lewycky const struct kfd_event_interrupt_class *event_interrupt_class; 2304a488a7aSOded Gabbay unsigned int max_pasid_bits; 231992839adSYair Shachar unsigned int max_no_of_hqd; 232ada2b29cSFelix Kuehling unsigned int doorbell_size; 2334a488a7aSOded Gabbay size_t ih_ring_entry_size; 234f7c826adSAlexey Skidanov uint8_t num_of_watch_points; 23519f6d2a6SOded Gabbay uint16_t mqd_size_aligned; 236373d7080SFelix Kuehling bool supports_cwsr; 23764d1c3a4SFelix Kuehling bool needs_iommu_device; 2383ee2d00cSFelix Kuehling bool needs_pci_atomics; 239fb932dfeSFelix Kuehling uint32_t no_atomic_fw_version; 240d5094189SShaoyun Liu unsigned int num_sdma_queues_per_engine; 241cc009e61SMukul Joshi unsigned int num_reserved_sdma_queues_per_engine; 242cc009e61SMukul Joshi uint64_t reserved_sdma_queues_bitmap; 2434a488a7aSOded Gabbay }; 2444a488a7aSOded Gabbay 2458dc1db31SMukul Joshi unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); 2468dc1db31SMukul Joshi unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); 247ee2f17f4SAmber Lin 24836b5c08fSOded Gabbay struct kfd_mem_obj { 24936b5c08fSOded Gabbay uint32_t range_start; 25036b5c08fSOded Gabbay uint32_t range_end; 25136b5c08fSOded Gabbay uint64_t gpu_addr; 25236b5c08fSOded Gabbay uint32_t *cpu_ptr; 253b91d43ddSFelix Kuehling void *gtt_mem; 25436b5c08fSOded Gabbay }; 25536b5c08fSOded Gabbay 25644008d7aSYong Zhao struct kfd_vmid_info { 25744008d7aSYong Zhao uint32_t first_vmid_kfd; 25844008d7aSYong Zhao uint32_t last_vmid_kfd; 25944008d7aSYong Zhao uint32_t vmid_num_kfd; 26044008d7aSYong Zhao }; 26144008d7aSYong Zhao 26274c5b85dSMukul Joshi #define MAX_KFD_NODES 8 26374c5b85dSMukul Joshi 2648dc1db31SMukul Joshi struct kfd_dev; 2658dc1db31SMukul Joshi 2668dc1db31SMukul Joshi struct kfd_node { 267a805889aSMukul Joshi unsigned int node_id; 2688dc1db31SMukul Joshi struct amdgpu_device *adev; /* Duplicated here along with keeping 2698dc1db31SMukul Joshi * a copy in kfd_dev to save a hop 2708dc1db31SMukul Joshi */ 2718dc1db31SMukul Joshi const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with 2728dc1db31SMukul Joshi * keeping a copy in kfd_dev to 2738dc1db31SMukul Joshi * save a hop 2748dc1db31SMukul Joshi */ 2758dc1db31SMukul Joshi struct kfd_vmid_info vm_info; 2768dc1db31SMukul Joshi unsigned int id; /* topology stub index */ 277a75f2271SLijo Lazar uint32_t xcc_mask; /* Instance mask of XCCs present */ 278a75f2271SLijo Lazar struct amdgpu_xcp *xcp; 279a75f2271SLijo Lazar 2808dc1db31SMukul Joshi /* Interrupts */ 2818dc1db31SMukul Joshi struct kfifo ih_fifo; 2828dc1db31SMukul Joshi struct workqueue_struct *ih_wq; 2838dc1db31SMukul Joshi struct work_struct interrupt_work; 2848dc1db31SMukul Joshi spinlock_t interrupt_lock; 2858dc1db31SMukul Joshi 2868dc1db31SMukul Joshi /* 2878dc1db31SMukul Joshi * Interrupts of interest to KFD are copied 2888dc1db31SMukul Joshi * from the HW ring into a SW ring. 2898dc1db31SMukul Joshi */ 2908dc1db31SMukul Joshi bool interrupts_active; 2915fb34bd9SAlex Sierra uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ 2928dc1db31SMukul Joshi 2938dc1db31SMukul Joshi /* QCM Device instance */ 2948dc1db31SMukul Joshi struct device_queue_manager *dqm; 2958dc1db31SMukul Joshi 2968dc1db31SMukul Joshi /* Global GWS resource shared between processes */ 2978dc1db31SMukul Joshi void *gws; 2988dc1db31SMukul Joshi bool gws_debug_workaround; 2998dc1db31SMukul Joshi 3008dc1db31SMukul Joshi /* Clients watching SMI events */ 3018dc1db31SMukul Joshi struct list_head smi_clients; 3028dc1db31SMukul Joshi spinlock_t smi_lock; 3038dc1db31SMukul Joshi uint32_t reset_seq_num; 3048dc1db31SMukul Joshi 3058dc1db31SMukul Joshi /* SRAM ECC flag */ 3068dc1db31SMukul Joshi atomic_t sram_ecc_flag; 3078dc1db31SMukul Joshi 3088dc1db31SMukul Joshi /*spm process id */ 3098dc1db31SMukul Joshi unsigned int spm_pasid; 3108dc1db31SMukul Joshi 3118dc1db31SMukul Joshi /* Maximum process number mapped to HW scheduler */ 3128dc1db31SMukul Joshi unsigned int max_proc_per_quantum; 3138dc1db31SMukul Joshi 31474c5b85dSMukul Joshi unsigned int compute_vmid_bitmap; 31574c5b85dSMukul Joshi 316315e29ecSMukul Joshi struct kfd_local_mem_info local_mem_info; 317315e29ecSMukul Joshi 3188dc1db31SMukul Joshi struct kfd_dev *kfd; 3198dc1db31SMukul Joshi }; 3208dc1db31SMukul Joshi 3214a488a7aSOded Gabbay struct kfd_dev { 322c6c57446SGraham Sider struct amdgpu_device *adev; 3234a488a7aSOded Gabbay 324f0dc99a6SGraham Sider struct kfd_device_info device_info; 3254a488a7aSOded Gabbay 32619f6d2a6SOded Gabbay phys_addr_t doorbell_base; /* Start of actual doorbells used by 32719f6d2a6SOded Gabbay * KFD. It is aligned for mapping 32819f6d2a6SOded Gabbay * into user mode 32919f6d2a6SOded Gabbay */ 330339903faSYong Zhao size_t doorbell_base_dw_offset; /* Offset from the start of the PCI 331339903faSYong Zhao * doorbell BAR to the first KFD 332339903faSYong Zhao * doorbell in dwords. GFX reserves 333339903faSYong Zhao * the segment before this offset. 33419f6d2a6SOded Gabbay */ 33519f6d2a6SOded Gabbay u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 33619f6d2a6SOded Gabbay * page used by kernel queue 33719f6d2a6SOded Gabbay */ 33819f6d2a6SOded Gabbay 3394a488a7aSOded Gabbay struct kgd2kfd_shared_resources shared_resources; 3404a488a7aSOded Gabbay 341cea405b1SXihan Zhang const struct kfd2kgd_calls *kfd2kgd; 342cea405b1SXihan Zhang struct mutex doorbell_mutex; 343f761d8bdSJoe Perches DECLARE_BITMAP(doorbell_available_index, 344f761d8bdSJoe Perches KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 345cea405b1SXihan Zhang 34636b5c08fSOded Gabbay void *gtt_mem; 34736b5c08fSOded Gabbay uint64_t gtt_start_gpu_addr; 34836b5c08fSOded Gabbay void *gtt_start_cpu_ptr; 34936b5c08fSOded Gabbay void *gtt_sa_bitmap; 35036b5c08fSOded Gabbay struct mutex gtt_sa_lock; 35136b5c08fSOded Gabbay unsigned int gtt_sa_chunk_size; 35236b5c08fSOded Gabbay unsigned int gtt_sa_num_of_chunks; 35336b5c08fSOded Gabbay 354ed6e6a34SBen Goz bool init_complete; 355fbeb661bSYair Shachar 3565ade6c9cSFelix Kuehling /* Firmware versions */ 3575ade6c9cSFelix Kuehling uint16_t mec_fw_version; 35829633d0eSJoseph Greathouse uint16_t mec2_fw_version; 3595ade6c9cSFelix Kuehling uint16_t sdma_fw_version; 3605ade6c9cSFelix Kuehling 361373d7080SFelix Kuehling /* CWSR */ 362373d7080SFelix Kuehling bool cwsr_enabled; 363373d7080SFelix Kuehling const void *cwsr_isa; 364373d7080SFelix Kuehling unsigned int cwsr_isa_size; 3650c1690e3SShaoyun Liu 3660c1690e3SShaoyun Liu /* xGMI */ 3670c1690e3SShaoyun Liu uint64_t hive_id; 3680c663695SDivya Shikre 369d35f00d8SEric Huang bool pci_atomic_requested; 3709b54d201SEric Huang 3716127896fSHuang Rui /* Use IOMMU v2 flag */ 3726127896fSHuang Rui bool use_iommu_v2; 3736127896fSHuang Rui 374f756e631SHarish Kasiviswanathan /* Compute Profile ref. count */ 375f756e631SHarish Kasiviswanathan atomic_t compute_profile; 376e09d4fc8SOak Zeng 37759d7115dSMukul Joshi struct ida doorbell_ida; 37859d7115dSMukul Joshi unsigned int max_doorbell_slices; 3799b498efaSAlex Deucher 3809b498efaSAlex Deucher int noretry; 381814ab993SPhilip Yang 38274c5b85dSMukul Joshi struct kfd_node *nodes[MAX_KFD_NODES]; 38374c5b85dSMukul Joshi unsigned int num_nodes; 384e0f85f46SJonathan Kim 385e0f85f46SJonathan Kim /* Track per device allocated watch points */ 386e0f85f46SJonathan Kim uint32_t alloc_watch_ids; 387e0f85f46SJonathan Kim spinlock_t watch_points_lock; 3884a488a7aSOded Gabbay }; 3894a488a7aSOded Gabbay 39019f6d2a6SOded Gabbay enum kfd_mempool { 39119f6d2a6SOded Gabbay KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 39219f6d2a6SOded Gabbay KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 39319f6d2a6SOded Gabbay KFD_MEMPOOL_FRAMEBUFFER = 3, 39419f6d2a6SOded Gabbay }; 39519f6d2a6SOded Gabbay 3964a488a7aSOded Gabbay /* Character device interface */ 3974a488a7aSOded Gabbay int kfd_chardev_init(void); 3984a488a7aSOded Gabbay void kfd_chardev_exit(void); 3994a488a7aSOded Gabbay 400241f24f8SBen Goz /** 401a4497974SRajneesh Bhardwaj * enum kfd_unmap_queues_filter - Enum for queue filters. 402241f24f8SBen Goz * 4037da2bcf8SYong Zhao * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 404241f24f8SBen Goz * running queues list. 405241f24f8SBen Goz * 406d2cb0b21SJonathan Kim * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues 407d2cb0b21SJonathan Kim * in the run list. 408d2cb0b21SJonathan Kim * 4097da2bcf8SYong Zhao * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 410241f24f8SBen Goz * specific process. 411241f24f8SBen Goz * 412241f24f8SBen Goz */ 4137da2bcf8SYong Zhao enum kfd_unmap_queues_filter { 414d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1, 415d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2, 416d2cb0b21SJonathan Kim KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3 417241f24f8SBen Goz }; 41819f6d2a6SOded Gabbay 419ed8aab45SBen Goz /** 420a4497974SRajneesh Bhardwaj * enum kfd_queue_type - Enum for various queue types. 421ed8aab45SBen Goz * 422ed8aab45SBen Goz * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 423ed8aab45SBen Goz * 424a4497974SRajneesh Bhardwaj * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 425ed8aab45SBen Goz * 426ed8aab45SBen Goz * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 427ed8aab45SBen Goz * 428ed8aab45SBen Goz * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 429a4497974SRajneesh Bhardwaj * 430a4497974SRajneesh Bhardwaj * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 431ed8aab45SBen Goz */ 432ed8aab45SBen Goz enum kfd_queue_type { 433ed8aab45SBen Goz KFD_QUEUE_TYPE_COMPUTE, 434ed8aab45SBen Goz KFD_QUEUE_TYPE_SDMA, 435ed8aab45SBen Goz KFD_QUEUE_TYPE_HIQ, 4361b4670f6SOak Zeng KFD_QUEUE_TYPE_DIQ, 4371b4670f6SOak Zeng KFD_QUEUE_TYPE_SDMA_XGMI 438ed8aab45SBen Goz }; 439ed8aab45SBen Goz 4406e99df57SBen Goz enum kfd_queue_format { 4416e99df57SBen Goz KFD_QUEUE_FORMAT_PM4, 4426e99df57SBen Goz KFD_QUEUE_FORMAT_AQL 4436e99df57SBen Goz }; 4446e99df57SBen Goz 4450ccbc7cdSOak Zeng enum KFD_QUEUE_PRIORITY { 4460ccbc7cdSOak Zeng KFD_QUEUE_PRIORITY_MINIMUM = 0, 4470ccbc7cdSOak Zeng KFD_QUEUE_PRIORITY_MAXIMUM = 15 4480ccbc7cdSOak Zeng }; 4490ccbc7cdSOak Zeng 450ed8aab45SBen Goz /** 451ed8aab45SBen Goz * struct queue_properties 452ed8aab45SBen Goz * 453ed8aab45SBen Goz * @type: The queue type. 454ed8aab45SBen Goz * 455ed8aab45SBen Goz * @queue_id: Queue identifier. 456ed8aab45SBen Goz * 457ed8aab45SBen Goz * @queue_address: Queue ring buffer address. 458ed8aab45SBen Goz * 459ed8aab45SBen Goz * @queue_size: Queue ring buffer size. 460ed8aab45SBen Goz * 461ed8aab45SBen Goz * @priority: Defines the queue priority relative to other queues in the 462ed8aab45SBen Goz * process. 463ed8aab45SBen Goz * This is just an indication and HW scheduling may override the priority as 464ed8aab45SBen Goz * necessary while keeping the relative prioritization. 465ed8aab45SBen Goz * the priority granularity is from 0 to f which f is the highest priority. 466ed8aab45SBen Goz * currently all queues are initialized with the highest priority. 467ed8aab45SBen Goz * 468ed8aab45SBen Goz * @queue_percent: This field is partially implemented and currently a zero in 469ed8aab45SBen Goz * this field defines that the queue is non active. 470ed8aab45SBen Goz * 471ed8aab45SBen Goz * @read_ptr: User space address which points to the number of dwords the 472ed8aab45SBen Goz * cp read from the ring buffer. This field updates automatically by the H/W. 473ed8aab45SBen Goz * 474ed8aab45SBen Goz * @write_ptr: Defines the number of dwords written to the ring buffer. 475ed8aab45SBen Goz * 476a4497974SRajneesh Bhardwaj * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 477a4497974SRajneesh Bhardwaj * buffer. This field should be similar to write_ptr and the user should 478a4497974SRajneesh Bhardwaj * update this field after updating the write_ptr. 479ed8aab45SBen Goz * 480ed8aab45SBen Goz * @doorbell_off: The doorbell offset in the doorbell pci-bar. 481ed8aab45SBen Goz * 4828eabaf54SKent Russell * @is_interop: Defines if this is a interop queue. Interop queue means that 4838eabaf54SKent Russell * the queue can access both graphics and compute resources. 484ed8aab45SBen Goz * 48526103436SFelix Kuehling * @is_evicted: Defines if the queue is evicted. Only active queues 48626103436SFelix Kuehling * are evicted, rendering them inactive. 48726103436SFelix Kuehling * 48826103436SFelix Kuehling * @is_active: Defines if the queue is active or not. @is_active and 48926103436SFelix Kuehling * @is_evicted are protected by the DQM lock. 490ed8aab45SBen Goz * 491b8020b03SJoseph Greathouse * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 492b8020b03SJoseph Greathouse * @is_gws should be protected by the DQM lock, since changing it can yield the 493b8020b03SJoseph Greathouse * possibility of updating DQM state on number of GWS queues. 494b8020b03SJoseph Greathouse * 495ed8aab45SBen Goz * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 496ed8aab45SBen Goz * of the queue. 497ed8aab45SBen Goz * 498ed8aab45SBen Goz * This structure represents the queue properties for each queue no matter if 499ed8aab45SBen Goz * it's user mode or kernel mode queue. 500ed8aab45SBen Goz * 501ed8aab45SBen Goz */ 5028668dfc3SDavid Yat Sin 503ed8aab45SBen Goz struct queue_properties { 504ed8aab45SBen Goz enum kfd_queue_type type; 5056e99df57SBen Goz enum kfd_queue_format format; 506ed8aab45SBen Goz unsigned int queue_id; 507ed8aab45SBen Goz uint64_t queue_address; 508ed8aab45SBen Goz uint64_t queue_size; 509ed8aab45SBen Goz uint32_t priority; 510ed8aab45SBen Goz uint32_t queue_percent; 511ed8aab45SBen Goz uint32_t *read_ptr; 512ed8aab45SBen Goz uint32_t *write_ptr; 513ada2b29cSFelix Kuehling void __iomem *doorbell_ptr; 514ed8aab45SBen Goz uint32_t doorbell_off; 515ed8aab45SBen Goz bool is_interop; 51626103436SFelix Kuehling bool is_evicted; 517a70a93faSJonathan Kim bool is_suspended; 518a70a93faSJonathan Kim bool is_being_destroyed; 519ed8aab45SBen Goz bool is_active; 520b8020b03SJoseph Greathouse bool is_gws; 5213c8bdb51SMukul Joshi uint32_t pm4_target_xcc; 52269a8c3aeSJonathan Kim bool is_dbg_wa; 52369a8c3aeSJonathan Kim bool is_user_cu_masked; 524ed8aab45SBen Goz /* Not relevant for user mode queues in cp scheduling */ 525ed8aab45SBen Goz unsigned int vmid; 52677669eb8SBen Goz /* Relevant only for sdma queues*/ 52777669eb8SBen Goz uint32_t sdma_engine_id; 52877669eb8SBen Goz uint32_t sdma_queue_id; 52977669eb8SBen Goz uint32_t sdma_vm_addr; 530ff3d04a1SBen Goz /* Relevant only for VI */ 531ff3d04a1SBen Goz uint64_t eop_ring_buffer_address; 532ff3d04a1SBen Goz uint32_t eop_ring_buffer_size; 533ff3d04a1SBen Goz uint64_t ctx_save_restore_area_address; 534ff3d04a1SBen Goz uint32_t ctx_save_restore_area_size; 535373d7080SFelix Kuehling uint32_t ctl_stack_size; 536373d7080SFelix Kuehling uint64_t tba_addr; 537373d7080SFelix Kuehling uint64_t tma_addr; 53844b87bb0SJonathan Kim uint64_t exception_status; 539ed8aab45SBen Goz }; 540ed8aab45SBen Goz 541bb2d2128SFelix Kuehling #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 542bb2d2128SFelix Kuehling (q).queue_address != 0 && \ 543bb2d2128SFelix Kuehling (q).queue_percent > 0 && \ 544a70a93faSJonathan Kim !(q).is_evicted && \ 545a70a93faSJonathan Kim !(q).is_suspended) 546bb2d2128SFelix Kuehling 5477c695a2cSLang Yu enum mqd_update_flag { 54869a8c3aeSJonathan Kim UPDATE_FLAG_DBG_WA_ENABLE = 1, 54969a8c3aeSJonathan Kim UPDATE_FLAG_DBG_WA_DISABLE = 2, 5507c695a2cSLang Yu }; 5517c695a2cSLang Yu 5527c695a2cSLang Yu struct mqd_update_info { 5537c695a2cSLang Yu union { 5547c695a2cSLang Yu struct { 5557c695a2cSLang Yu uint32_t count; /* Must be a multiple of 32 */ 5567c695a2cSLang Yu uint32_t *ptr; 5577c695a2cSLang Yu } cu_mask; 5587c695a2cSLang Yu }; 5597c695a2cSLang Yu enum mqd_update_flag update_flag; 5607c695a2cSLang Yu }; 561c6e559ebSLang Yu 562ed8aab45SBen Goz /** 563ed8aab45SBen Goz * struct queue 564ed8aab45SBen Goz * 565ed8aab45SBen Goz * @list: Queue linked list. 566ed8aab45SBen Goz * 567a4497974SRajneesh Bhardwaj * @mqd: The queue MQD (memory queue descriptor). 568ed8aab45SBen Goz * 569ed8aab45SBen Goz * @mqd_mem_obj: The MQD local gpu memory object. 570ed8aab45SBen Goz * 571ed8aab45SBen Goz * @gart_mqd_addr: The MQD gart mc address. 572ed8aab45SBen Goz * 573ed8aab45SBen Goz * @properties: The queue properties. 574ed8aab45SBen Goz * 575ed8aab45SBen Goz * @mec: Used only in no cp scheduling mode and identifies to micro engine id 576a4497974SRajneesh Bhardwaj * that the queue should be executed on. 577ed8aab45SBen Goz * 5788eabaf54SKent Russell * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 5798eabaf54SKent Russell * id. 580ed8aab45SBen Goz * 581ed8aab45SBen Goz * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 582ed8aab45SBen Goz * 583ed8aab45SBen Goz * @process: The kfd process that created this queue. 584ed8aab45SBen Goz * 585ed8aab45SBen Goz * @device: The kfd device that created this queue. 586ed8aab45SBen Goz * 587eb82da1dSOak Zeng * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 588eb82da1dSOak Zeng * otherwise. 589eb82da1dSOak Zeng * 590ed8aab45SBen Goz * This structure represents user mode compute queues. 591ed8aab45SBen Goz * It contains all the necessary data to handle such queues. 592ed8aab45SBen Goz * 593ed8aab45SBen Goz */ 594ed8aab45SBen Goz 595ed8aab45SBen Goz struct queue { 596ed8aab45SBen Goz struct list_head list; 597ed8aab45SBen Goz void *mqd; 598ed8aab45SBen Goz struct kfd_mem_obj *mqd_mem_obj; 599ed8aab45SBen Goz uint64_t gart_mqd_addr; 600ed8aab45SBen Goz struct queue_properties properties; 601ed8aab45SBen Goz 602ed8aab45SBen Goz uint32_t mec; 603ed8aab45SBen Goz uint32_t pipe; 604ed8aab45SBen Goz uint32_t queue; 605ed8aab45SBen Goz 60677669eb8SBen Goz unsigned int sdma_id; 607ef568db7SFelix Kuehling unsigned int doorbell_id; 60877669eb8SBen Goz 609ed8aab45SBen Goz struct kfd_process *process; 6108dc1db31SMukul Joshi struct kfd_node *device; 611eb82da1dSOak Zeng void *gws; 6126d220a7eSAmber Lin 6136d220a7eSAmber Lin /* procfs */ 6146d220a7eSAmber Lin struct kobject kobj; 615cc009e61SMukul Joshi 616cc009e61SMukul Joshi void *gang_ctx_bo; 617cc009e61SMukul Joshi uint64_t gang_ctx_gpu_addr; 618cc009e61SMukul Joshi void *gang_ctx_cpu_ptr; 619e77a541fSGraham Sider 620e77a541fSGraham Sider struct amdgpu_bo *wptr_bo; 621ed8aab45SBen Goz }; 622ed8aab45SBen Goz 6236e99df57SBen Goz enum KFD_MQD_TYPE { 624d7c0b047SYong Zhao KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 62585d258f9SBen Goz KFD_MQD_TYPE_CP, /* for cp queues and diq */ 62685d258f9SBen Goz KFD_MQD_TYPE_SDMA, /* for sdma queues */ 62759f650a0SOak Zeng KFD_MQD_TYPE_DIQ, /* for diq */ 6286e99df57SBen Goz KFD_MQD_TYPE_MAX 6296e99df57SBen Goz }; 6306e99df57SBen Goz 6310ccbc7cdSOak Zeng enum KFD_PIPE_PRIORITY { 6320ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_LOW = 0, 6330ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_MEDIUM, 6340ccbc7cdSOak Zeng KFD_PIPE_PRIORITY_CS_HIGH 6350ccbc7cdSOak Zeng }; 6360ccbc7cdSOak Zeng 637241f24f8SBen Goz struct scheduling_resources { 638241f24f8SBen Goz unsigned int vmid_mask; 639241f24f8SBen Goz enum kfd_queue_type type; 640241f24f8SBen Goz uint64_t queue_mask; 641241f24f8SBen Goz uint64_t gws_mask; 642241f24f8SBen Goz uint32_t oac_mask; 643241f24f8SBen Goz uint32_t gds_heap_base; 644241f24f8SBen Goz uint32_t gds_heap_size; 645241f24f8SBen Goz }; 646241f24f8SBen Goz 647241f24f8SBen Goz struct process_queue_manager { 648241f24f8SBen Goz /* data */ 649241f24f8SBen Goz struct kfd_process *process; 650241f24f8SBen Goz struct list_head queues; 651241f24f8SBen Goz unsigned long *queue_slot_bitmap; 652241f24f8SBen Goz }; 653241f24f8SBen Goz 654241f24f8SBen Goz struct qcm_process_device { 655241f24f8SBen Goz /* The Device Queue Manager that owns this data */ 656241f24f8SBen Goz struct device_queue_manager *dqm; 657241f24f8SBen Goz struct process_queue_manager *pqm; 658241f24f8SBen Goz /* Queues list */ 659241f24f8SBen Goz struct list_head queues_list; 660241f24f8SBen Goz struct list_head priv_queue_list; 661241f24f8SBen Goz 662241f24f8SBen Goz unsigned int queue_count; 663241f24f8SBen Goz unsigned int vmid; 664241f24f8SBen Goz bool is_debug; 66526103436SFelix Kuehling unsigned int evicted; /* eviction counter, 0=active */ 6669fd3f1bfSFelix Kuehling 6679fd3f1bfSFelix Kuehling /* This flag tells if we should reset all wavefronts on 6689fd3f1bfSFelix Kuehling * process termination 6699fd3f1bfSFelix Kuehling */ 6709fd3f1bfSFelix Kuehling bool reset_wavefronts; 6719fd3f1bfSFelix Kuehling 672b8020b03SJoseph Greathouse /* This flag tells us if this process has a GWS-capable 673b8020b03SJoseph Greathouse * queue that will be mapped into the runlist. It's 674b8020b03SJoseph Greathouse * possible to request a GWS BO, but not have the queue 675b8020b03SJoseph Greathouse * currently mapped, and this changes how the MAP_PROCESS 676b8020b03SJoseph Greathouse * PM4 packet is configured. 677b8020b03SJoseph Greathouse */ 678b8020b03SJoseph Greathouse bool mapped_gws_queue; 679b8020b03SJoseph Greathouse 680a4497974SRajneesh Bhardwaj /* All the memory management data should be here too */ 681241f24f8SBen Goz uint64_t gds_context_area; 682435e2f97SYong Zhao /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 683e715c6d0SShaoyun Liu uint64_t page_table_base; 684241f24f8SBen Goz uint32_t sh_mem_config; 685241f24f8SBen Goz uint32_t sh_mem_bases; 686241f24f8SBen Goz uint32_t sh_mem_ape1_base; 687241f24f8SBen Goz uint32_t sh_mem_ape1_limit; 688241f24f8SBen Goz uint32_t gds_size; 689241f24f8SBen Goz uint32_t num_gws; 690241f24f8SBen Goz uint32_t num_oac; 6916a1c9510SMoses Reuben uint32_t sh_hidden_private_base; 692373d7080SFelix Kuehling 693373d7080SFelix Kuehling /* CWSR memory */ 69468df0f19SLang Yu struct kgd_mem *cwsr_mem; 695373d7080SFelix Kuehling void *cwsr_kaddr; 696d01994c2SFelix Kuehling uint64_t cwsr_base; 697373d7080SFelix Kuehling uint64_t tba_addr; 698373d7080SFelix Kuehling uint64_t tma_addr; 699d01994c2SFelix Kuehling 700d01994c2SFelix Kuehling /* IB memory */ 70168df0f19SLang Yu struct kgd_mem *ib_mem; 702d01994c2SFelix Kuehling uint64_t ib_base; 703552764b6SFelix Kuehling void *ib_kaddr; 704ef568db7SFelix Kuehling 705ef568db7SFelix Kuehling /* doorbell resources per process per device */ 706ef568db7SFelix Kuehling unsigned long *doorbell_bitmap; 707241f24f8SBen Goz }; 708241f24f8SBen Goz 70926103436SFelix Kuehling /* KFD Memory Eviction */ 71026103436SFelix Kuehling 71126103436SFelix Kuehling /* Approx. wait time before attempting to restore evicted BOs */ 71226103436SFelix Kuehling #define PROCESS_RESTORE_TIME_MS 100 71326103436SFelix Kuehling /* Approx. back off time if restore fails due to lack of memory */ 71426103436SFelix Kuehling #define PROCESS_BACK_OFF_TIME_MS 100 71526103436SFelix Kuehling /* Approx. time before evicting the process again */ 71626103436SFelix Kuehling #define PROCESS_ACTIVE_TIME_MS 10 71726103436SFelix Kuehling 7185ec7e028SFelix Kuehling /* 8 byte handle containing GPU ID in the most significant 4 bytes and 7195ec7e028SFelix Kuehling * idr_handle in the least significant 4 bytes 7205ec7e028SFelix Kuehling */ 7215ec7e028SFelix Kuehling #define MAKE_HANDLE(gpu_id, idr_handle) \ 7225ec7e028SFelix Kuehling (((uint64_t)(gpu_id) << 32) + idr_handle) 7235ec7e028SFelix Kuehling #define GET_GPU_ID(handle) (handle >> 32) 7245ec7e028SFelix Kuehling #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 7255ec7e028SFelix Kuehling 726733fa1f7SYong Zhao enum kfd_pdd_bound { 727733fa1f7SYong Zhao PDD_UNBOUND = 0, 728733fa1f7SYong Zhao PDD_BOUND, 729733fa1f7SYong Zhao PDD_BOUND_SUSPENDED, 730733fa1f7SYong Zhao }; 731733fa1f7SYong Zhao 7324327bed2SPhilip Cox #define MAX_SYSFS_FILENAME_LEN 15 73332cb59f3SMukul Joshi 73432cb59f3SMukul Joshi /* 73532cb59f3SMukul Joshi * SDMA counter runs at 100MHz frequency. 73632cb59f3SMukul Joshi * We display SDMA activity in microsecond granularity in sysfs. 73732cb59f3SMukul Joshi * As a result, the divisor is 100. 73832cb59f3SMukul Joshi */ 73932cb59f3SMukul Joshi #define SDMA_ACTIVITY_DIVISOR 100 740d4566deeSMukul Joshi 74119f6d2a6SOded Gabbay /* Data that is per-process-per device. */ 74219f6d2a6SOded Gabbay struct kfd_process_device { 74319f6d2a6SOded Gabbay /* The device that owns this data. */ 7448dc1db31SMukul Joshi struct kfd_node *dev; 74519f6d2a6SOded Gabbay 7469fd3f1bfSFelix Kuehling /* The process that owns this kfd_process_device. */ 7479fd3f1bfSFelix Kuehling struct kfd_process *process; 74819f6d2a6SOded Gabbay 74945102048SBen Goz /* per-process-per device QCM data structure */ 75045102048SBen Goz struct qcm_process_device qpd; 75145102048SBen Goz 75219f6d2a6SOded Gabbay /*Apertures*/ 75319f6d2a6SOded Gabbay uint64_t lds_base; 75419f6d2a6SOded Gabbay uint64_t lds_limit; 75519f6d2a6SOded Gabbay uint64_t gpuvm_base; 75619f6d2a6SOded Gabbay uint64_t gpuvm_limit; 75719f6d2a6SOded Gabbay uint64_t scratch_base; 75819f6d2a6SOded Gabbay uint64_t scratch_limit; 75919f6d2a6SOded Gabbay 760403575c4SFelix Kuehling /* VM context for GPUVM allocations */ 761b84394e2SFelix Kuehling struct file *drm_file; 762b40a6ab2SFelix Kuehling void *drm_priv; 7638fde0248SPhilip Yang atomic64_t tlb_seq; 764403575c4SFelix Kuehling 76552b29d73SFelix Kuehling /* GPUVM allocations storage */ 76652b29d73SFelix Kuehling struct idr alloc_idr; 76752b29d73SFelix Kuehling 7689fd3f1bfSFelix Kuehling /* Flag used to tell the pdd has dequeued from the dqm. 7699fd3f1bfSFelix Kuehling * This is used to prevent dev->dqm->ops.process_termination() from 7709fd3f1bfSFelix Kuehling * being called twice when it is already called in IOMMU callback 7719fd3f1bfSFelix Kuehling * function. 772a82918f1SBen Goz */ 7739fd3f1bfSFelix Kuehling bool already_dequeued; 7749593f4d6SRajneesh Bhardwaj bool runtime_inuse; 77564d1c3a4SFelix Kuehling 77664d1c3a4SFelix Kuehling /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 77764d1c3a4SFelix Kuehling enum kfd_pdd_bound bound; 778d4566deeSMukul Joshi 779d4566deeSMukul Joshi /* VRAM usage */ 780d4566deeSMukul Joshi uint64_t vram_usage; 781d4566deeSMukul Joshi struct attribute attr_vram; 78232cb59f3SMukul Joshi char vram_filename[MAX_SYSFS_FILENAME_LEN]; 78332cb59f3SMukul Joshi 78432cb59f3SMukul Joshi /* SDMA activity tracking */ 78532cb59f3SMukul Joshi uint64_t sdma_past_activity_counter; 78632cb59f3SMukul Joshi struct attribute attr_sdma; 78732cb59f3SMukul Joshi char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 7884327bed2SPhilip Cox 7894327bed2SPhilip Cox /* Eviction activity tracking */ 7904327bed2SPhilip Cox uint64_t last_evict_timestamp; 7914327bed2SPhilip Cox atomic64_t evict_duration_counter; 7924327bed2SPhilip Cox struct attribute attr_evict; 7934327bed2SPhilip Cox 7944327bed2SPhilip Cox struct kobject *kobj_stats; 79559d7115dSMukul Joshi unsigned int doorbell_index; 796f2fa07b3SRamesh Errabolu 797f2fa07b3SRamesh Errabolu /* 798f2fa07b3SRamesh Errabolu * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 799f2fa07b3SRamesh Errabolu * that is associated with device encoded by "this" struct instance. The 800f2fa07b3SRamesh Errabolu * value reflects CU usage by all of the waves launched by this process 801f2fa07b3SRamesh Errabolu * on this device. A very important property of occupancy parameter is 802f2fa07b3SRamesh Errabolu * that its value is a snapshot of current use. 803f2fa07b3SRamesh Errabolu * 804f2fa07b3SRamesh Errabolu * Following is to be noted regarding how this parameter is reported: 805f2fa07b3SRamesh Errabolu * 806f2fa07b3SRamesh Errabolu * The number of waves that a CU can launch is limited by couple of 807f2fa07b3SRamesh Errabolu * parameters. These are encoded by struct amdgpu_cu_info instance 808f2fa07b3SRamesh Errabolu * that is part of every device definition. For GFX9 devices this 809f2fa07b3SRamesh Errabolu * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 810f2fa07b3SRamesh Errabolu * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 811f2fa07b3SRamesh Errabolu * when they do use scratch memory. This could change for future 812f2fa07b3SRamesh Errabolu * devices and therefore this example should be considered as a guide. 813f2fa07b3SRamesh Errabolu * 814f2fa07b3SRamesh Errabolu * All CU's of a device are available for the process. This may not be true 815f2fa07b3SRamesh Errabolu * under certain conditions - e.g. CU masking. 816f2fa07b3SRamesh Errabolu * 817f2fa07b3SRamesh Errabolu * Finally number of CU's that are occupied by a process is affected by both 818f2fa07b3SRamesh Errabolu * number of CU's a device has along with number of other competing processes 819f2fa07b3SRamesh Errabolu */ 820f2fa07b3SRamesh Errabolu struct attribute attr_cu_occupancy; 821751580b3SPhilip Yang 822751580b3SPhilip Yang /* sysfs counters for GPU retry fault and page migration tracking */ 823751580b3SPhilip Yang struct kobject *kobj_counters; 824751580b3SPhilip Yang struct attribute attr_faults; 825751580b3SPhilip Yang struct attribute attr_page_in; 826751580b3SPhilip Yang struct attribute attr_page_out; 827751580b3SPhilip Yang uint64_t faults; 828751580b3SPhilip Yang uint64_t page_in; 829751580b3SPhilip Yang uint64_t page_out; 8300de4ec9aSJonathan Kim 83144b87bb0SJonathan Kim /* Exception code status*/ 83244b87bb0SJonathan Kim uint64_t exception_status; 83344b87bb0SJonathan Kim void *vm_fault_exc_data; 83444b87bb0SJonathan Kim size_t vm_fault_exc_data_size; 83544b87bb0SJonathan Kim 8360de4ec9aSJonathan Kim /* Tracks debug per-vmid request settings */ 8370de4ec9aSJonathan Kim uint32_t spi_dbg_override; 8380de4ec9aSJonathan Kim uint32_t spi_dbg_launch_mode; 8390de4ec9aSJonathan Kim uint32_t watch_points[4]; 840e0f85f46SJonathan Kim uint32_t alloc_watch_ids; 8410de4ec9aSJonathan Kim 842bef153b7SDavid Yat Sin /* 843bef153b7SDavid Yat Sin * If this process has been checkpointed before, then the user 844bef153b7SDavid Yat Sin * application will use the original gpu_id on the 845bef153b7SDavid Yat Sin * checkpointed node to refer to this device. 846bef153b7SDavid Yat Sin */ 847bef153b7SDavid Yat Sin uint32_t user_gpu_id; 848cc009e61SMukul Joshi 849cc009e61SMukul Joshi void *proc_ctx_bo; 850cc009e61SMukul Joshi uint64_t proc_ctx_gpu_addr; 851cc009e61SMukul Joshi void *proc_ctx_cpu_ptr; 85219f6d2a6SOded Gabbay }; 85319f6d2a6SOded Gabbay 85452a5fdceSAlexey Skidanov #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 85552a5fdceSAlexey Skidanov 85642de677fSPhilip Yang struct svm_range_list { 85742de677fSPhilip Yang struct mutex lock; 85842de677fSPhilip Yang struct rb_root_cached objects; 85942de677fSPhilip Yang struct list_head list; 8604683cfecSPhilip Yang struct work_struct deferred_list_work; 8614683cfecSPhilip Yang struct list_head deferred_range_list; 862c2db32ceSRajneesh Bhardwaj struct list_head criu_svm_metadata_list; 8634683cfecSPhilip Yang spinlock_t deferred_list_lock; 8648a7c184aSFelix Kuehling atomic_t evicted_ranges; 8652e447728SPhilip Yang atomic_t drain_pagefaults; 8668a7c184aSFelix Kuehling struct delayed_work restore_work; 8675a75ea56SFelix Kuehling DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 868a6283010SAlex Sierra struct task_struct *faulting_task; 86942de677fSPhilip Yang }; 87042de677fSPhilip Yang 8714a488a7aSOded Gabbay /* Process data */ 8724a488a7aSOded Gabbay struct kfd_process { 87319f6d2a6SOded Gabbay /* 87419f6d2a6SOded Gabbay * kfd_process are stored in an mm_struct*->kfd_process* 87519f6d2a6SOded Gabbay * hash table (kfd_processes in kfd_process.c) 87619f6d2a6SOded Gabbay */ 87719f6d2a6SOded Gabbay struct hlist_node kfd_processes; 87819f6d2a6SOded Gabbay 8799b56bb11SFelix Kuehling /* 8809b56bb11SFelix Kuehling * Opaque pointer to mm_struct. We don't hold a reference to 8819b56bb11SFelix Kuehling * it so it should never be dereferenced from here. This is 8829b56bb11SFelix Kuehling * only used for looking up processes by their mm. 8839b56bb11SFelix Kuehling */ 8849b56bb11SFelix Kuehling void *mm; 88519f6d2a6SOded Gabbay 8865ce10687SFelix Kuehling struct kref ref; 8875ce10687SFelix Kuehling struct work_struct release_work; 8885ce10687SFelix Kuehling 88919f6d2a6SOded Gabbay struct mutex mutex; 89019f6d2a6SOded Gabbay 89119f6d2a6SOded Gabbay /* 89219f6d2a6SOded Gabbay * In any process, the thread that started main() is the lead 89319f6d2a6SOded Gabbay * thread and outlives the rest. 89419f6d2a6SOded Gabbay * It is here because amd_iommu_bind_pasid wants a task_struct. 895894a8293SFelix Kuehling * It can also be used for safely getting a reference to the 896894a8293SFelix Kuehling * mm_struct of the process. 89719f6d2a6SOded Gabbay */ 89819f6d2a6SOded Gabbay struct task_struct *lead_thread; 89919f6d2a6SOded Gabbay 90019f6d2a6SOded Gabbay /* We want to receive a notification when the mm_struct is destroyed */ 90119f6d2a6SOded Gabbay struct mmu_notifier mmu_notifier; 90219f6d2a6SOded Gabbay 903c7b6bac9SFenghua Yu u32 pasid; 90419f6d2a6SOded Gabbay 90519f6d2a6SOded Gabbay /* 9066ae27841SAlex Sierra * Array of kfd_process_device pointers, 90719f6d2a6SOded Gabbay * one for each device the process is using. 90819f6d2a6SOded Gabbay */ 9096ae27841SAlex Sierra struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 9106ae27841SAlex Sierra uint32_t n_pdds; 91119f6d2a6SOded Gabbay 91245102048SBen Goz struct process_queue_manager pqm; 91345102048SBen Goz 91419f6d2a6SOded Gabbay /*Is the user space process 32 bit?*/ 91519f6d2a6SOded Gabbay bool is_32bit_user_mode; 916f3a39818SAndrew Lewycky 917f3a39818SAndrew Lewycky /* Event-related data */ 918f3a39818SAndrew Lewycky struct mutex event_mutex; 919482f0777SFelix Kuehling /* Event ID allocator and lookup */ 920482f0777SFelix Kuehling struct idr event_idr; 92150cb7dd9SFelix Kuehling /* Event page */ 92268df0f19SLang Yu u64 signal_handle; 92350cb7dd9SFelix Kuehling struct kfd_signal_page *signal_page; 924b9a5d0a5SFelix Kuehling size_t signal_mapped_size; 925f3a39818SAndrew Lewycky size_t signal_event_count; 926c986169fSFelix Kuehling bool signal_event_limit_reached; 927403575c4SFelix Kuehling 928403575c4SFelix Kuehling /* Information used for memory eviction */ 929403575c4SFelix Kuehling void *kgd_process_info; 930403575c4SFelix Kuehling /* Eviction fence that is attached to all the BOs of this process. The 931403575c4SFelix Kuehling * fence will be triggered during eviction and new one will be created 932403575c4SFelix Kuehling * during restore 933403575c4SFelix Kuehling */ 934403575c4SFelix Kuehling struct dma_fence *ef; 93526103436SFelix Kuehling 93626103436SFelix Kuehling /* Work items for evicting and restoring BOs */ 93726103436SFelix Kuehling struct delayed_work eviction_work; 93826103436SFelix Kuehling struct delayed_work restore_work; 93926103436SFelix Kuehling /* seqno of the last scheduled eviction */ 94026103436SFelix Kuehling unsigned int last_eviction_seqno; 94126103436SFelix Kuehling /* Approx. the last timestamp (in jiffies) when the process was 94226103436SFelix Kuehling * restored after an eviction 94326103436SFelix Kuehling */ 94426103436SFelix Kuehling unsigned long last_restore_timestamp; 945de9f26bbSKent Russell 9460ab2d753SJonathan Kim /* Indicates device process is debug attached with reserved vmid. */ 9470ab2d753SJonathan Kim bool debug_trap_enabled; 9480ab2d753SJonathan Kim 9490ab2d753SJonathan Kim /* per-process-per device debug event fd file */ 9500ab2d753SJonathan Kim struct file *dbg_ev_file; 9510ab2d753SJonathan Kim 9520ab2d753SJonathan Kim /* If the process is a kfd debugger, we need to know so we can clean 9530ab2d753SJonathan Kim * up at exit time. If a process enables debugging on itself, it does 9540ab2d753SJonathan Kim * its own clean-up, so we don't set the flag here. We track this by 9550ab2d753SJonathan Kim * counting the number of processes this process is debugging. 9560ab2d753SJonathan Kim */ 9570ab2d753SJonathan Kim atomic_t debugged_process_count; 9580ab2d753SJonathan Kim 9590ab2d753SJonathan Kim /* If the process is a debugged, this is the debugger process */ 9600ab2d753SJonathan Kim struct kfd_process *debugger_process; 9610ab2d753SJonathan Kim 962de9f26bbSKent Russell /* Kobj for our procfs */ 963de9f26bbSKent Russell struct kobject *kobj; 9646d220a7eSAmber Lin struct kobject *kobj_queues; 965de9f26bbSKent Russell struct attribute attr_pasid; 96640ce74d1SPhilip Yang 9670ab2d753SJonathan Kim /* Keep track cwsr init */ 9680ab2d753SJonathan Kim bool has_cwsr; 9690ab2d753SJonathan Kim 9700ab2d753SJonathan Kim /* Exception code enable mask and status */ 9710ab2d753SJonathan Kim uint64_t exception_enable_mask; 97244b87bb0SJonathan Kim uint64_t exception_status; 9730ab2d753SJonathan Kim 97412fb1ad7SJonathan Kim /* Used to drain stale interrupts */ 97512fb1ad7SJonathan Kim wait_queue_head_t wait_irq_drain; 97612fb1ad7SJonathan Kim bool irq_drain_is_open; 97712fb1ad7SJonathan Kim 97842de677fSPhilip Yang /* shared virtual memory registered by this process */ 97942de677fSPhilip Yang struct svm_range_list svms; 980063e33c5SAlex Sierra 981063e33c5SAlex Sierra bool xnack_enabled; 982b6485bedSTao Zhou 98344b87bb0SJonathan Kim /* Work area for debugger event writer worker. */ 98444b87bb0SJonathan Kim struct work_struct debug_event_workarea; 98544b87bb0SJonathan Kim 9860de4ec9aSJonathan Kim /* Tracks debug per-vmid request for debug flags */ 9870de4ec9aSJonathan Kim bool dbg_flags; 9880de4ec9aSJonathan Kim 989b6485bedSTao Zhou atomic_t poison; 990cd9f7910SDavid Yat Sin /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 991cd9f7910SDavid Yat Sin bool queues_paused; 9920ab2d753SJonathan Kim 9930ab2d753SJonathan Kim /* Tracks runtime enable status */ 994c2d2588cSJonathan Kim struct semaphore runtime_enable_sema; 995455227c4SJonathan Kim bool is_runtime_retry; 9960ab2d753SJonathan Kim struct kfd_runtime_info runtime_info; 9974a488a7aSOded Gabbay }; 9984a488a7aSOded Gabbay 99964d1c3a4SFelix Kuehling #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 100064d1c3a4SFelix Kuehling extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 100164d1c3a4SFelix Kuehling extern struct srcu_struct kfd_processes_srcu; 100264d1c3a4SFelix Kuehling 100376baee6cSOded Gabbay /** 1004a4497974SRajneesh Bhardwaj * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 100576baee6cSOded Gabbay * 1006a4497974SRajneesh Bhardwaj * @filep: pointer to file structure. 1007a4497974SRajneesh Bhardwaj * @p: amdkfd process pointer. 1008a4497974SRajneesh Bhardwaj * @data: pointer to arg that was copied from user. 1009a4497974SRajneesh Bhardwaj * 1010a4497974SRajneesh Bhardwaj * Return: returns ioctl completion code. 101176baee6cSOded Gabbay */ 101276baee6cSOded Gabbay typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 101376baee6cSOded Gabbay void *data); 101476baee6cSOded Gabbay 101576baee6cSOded Gabbay struct amdkfd_ioctl_desc { 101676baee6cSOded Gabbay unsigned int cmd; 101776baee6cSOded Gabbay int flags; 101876baee6cSOded Gabbay amdkfd_ioctl_t *func; 101976baee6cSOded Gabbay unsigned int cmd_drv; 102076baee6cSOded Gabbay const char *name; 102176baee6cSOded Gabbay }; 10228dc1db31SMukul Joshi bool kfd_dev_is_large_bar(struct kfd_node *dev); 102376baee6cSOded Gabbay 10241679ae8fSFelix Kuehling int kfd_process_create_wq(void); 102519f6d2a6SOded Gabbay void kfd_process_destroy_wq(void); 102622e3d934SDavid Belanger void kfd_cleanup_processes(void); 10270ab2d753SJonathan Kim struct kfd_process *kfd_create_process(struct task_struct *thread); 10282243f493SRajneesh Bhardwaj struct kfd_process *kfd_get_process(const struct task_struct *task); 1029c7b6bac9SFenghua Yu struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); 103026103436SFelix Kuehling struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 10312aeb742bSAlex Sierra 10322aeb742bSAlex Sierra int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 10335fb34bd9SAlex Sierra int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 10345fb34bd9SAlex Sierra uint32_t *gpuid, uint32_t *gpuidx); 10352aeb742bSAlex Sierra static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 10362aeb742bSAlex Sierra uint32_t gpuidx, uint32_t *gpuid) { 10372aeb742bSAlex Sierra return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 10382aeb742bSAlex Sierra } 10392aeb742bSAlex Sierra static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 10402aeb742bSAlex Sierra struct kfd_process *p, uint32_t gpuidx) { 10412aeb742bSAlex Sierra return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 10422aeb742bSAlex Sierra } 10432aeb742bSAlex Sierra 1044abb208a8SFelix Kuehling void kfd_unref_process(struct kfd_process *p); 1045c7f21978SPhilip Yang int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger); 10466b95e797SFelix Kuehling int kfd_process_restore_queues(struct kfd_process *p); 104726103436SFelix Kuehling void kfd_suspend_all_processes(void); 104826103436SFelix Kuehling int kfd_resume_all_processes(void); 104919f6d2a6SOded Gabbay 1050bef153b7SDavid Yat Sin struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 1051bef153b7SDavid Yat Sin uint32_t gpu_id); 1052bef153b7SDavid Yat Sin 1053bef153b7SDavid Yat Sin int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 1054bef153b7SDavid Yat Sin 1055b84394e2SFelix Kuehling int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1056b84394e2SFelix Kuehling struct file *drm_file); 10578dc1db31SMukul Joshi struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 105864c7f8cfSBen Goz struct kfd_process *p); 10598dc1db31SMukul Joshi struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1060093c7d8cSAlexey Skidanov struct kfd_process *p); 10618dc1db31SMukul Joshi struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1062093c7d8cSAlexey Skidanov struct kfd_process *p); 106319f6d2a6SOded Gabbay 1064063e33c5SAlex Sierra bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 1065063e33c5SAlex Sierra 10668dc1db31SMukul Joshi int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 1067373d7080SFelix Kuehling struct vm_area_struct *vma); 1068373d7080SFelix Kuehling 106952b29d73SFelix Kuehling /* KFD process API for creating and translating handles */ 107052b29d73SFelix Kuehling int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 107152b29d73SFelix Kuehling void *mem); 107252b29d73SFelix Kuehling void *kfd_process_device_translate_handle(struct kfd_process_device *p, 107352b29d73SFelix Kuehling int handle); 107452b29d73SFelix Kuehling void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 107552b29d73SFelix Kuehling int handle); 1076011bbb03SRajneesh Bhardwaj struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 107752b29d73SFelix Kuehling 107819f6d2a6SOded Gabbay /* PASIDs */ 107919f6d2a6SOded Gabbay int kfd_pasid_init(void); 108019f6d2a6SOded Gabbay void kfd_pasid_exit(void); 108119f6d2a6SOded Gabbay bool kfd_set_pasid_limit(unsigned int new_limit); 108219f6d2a6SOded Gabbay unsigned int kfd_get_pasid_limit(void); 1083c7b6bac9SFenghua Yu u32 kfd_pasid_alloc(void); 1084c7b6bac9SFenghua Yu void kfd_pasid_free(u32 pasid); 108519f6d2a6SOded Gabbay 108619f6d2a6SOded Gabbay /* Doorbells */ 1087ef568db7SFelix Kuehling size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1088735df2baSFelix Kuehling int kfd_doorbell_init(struct kfd_dev *kfd); 1089735df2baSFelix Kuehling void kfd_doorbell_fini(struct kfd_dev *kfd); 10908dc1db31SMukul Joshi int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, 1091df03ef93SHarish Kasiviswanathan struct vm_area_struct *vma); 1092ada2b29cSFelix Kuehling void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 109319f6d2a6SOded Gabbay unsigned int *doorbell_off); 109419f6d2a6SOded Gabbay void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 109519f6d2a6SOded Gabbay u32 read_kernel_doorbell(u32 __iomem *db); 1096ada2b29cSFelix Kuehling void write_kernel_doorbell(void __iomem *db, u32 value); 10979d7d0248SFelix Kuehling void write_kernel_doorbell64(void __iomem *db, u64 value); 1098339903faSYong Zhao unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 109959d7115dSMukul Joshi struct kfd_process_device *pdd, 1100ef568db7SFelix Kuehling unsigned int doorbell_id); 110159d7115dSMukul Joshi phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 110259d7115dSMukul Joshi int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 110359d7115dSMukul Joshi unsigned int *doorbell_index); 110459d7115dSMukul Joshi void kfd_free_process_doorbells(struct kfd_dev *kfd, 110559d7115dSMukul Joshi unsigned int doorbell_index); 11066e81090bSOded Gabbay /* GTT Sub-Allocator */ 11076e81090bSOded Gabbay 11088dc1db31SMukul Joshi int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, 11096e81090bSOded Gabbay struct kfd_mem_obj **mem_obj); 11106e81090bSOded Gabbay 11118dc1db31SMukul Joshi int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); 11126e81090bSOded Gabbay 11134a488a7aSOded Gabbay extern struct device *kfd_device; 11144a488a7aSOded Gabbay 1115de9f26bbSKent Russell /* KFD's procfs */ 1116de9f26bbSKent Russell void kfd_procfs_init(void); 1117de9f26bbSKent Russell void kfd_procfs_shutdown(void); 11186d220a7eSAmber Lin int kfd_procfs_add_queue(struct queue *q); 11196d220a7eSAmber Lin void kfd_procfs_del_queue(struct queue *q); 1120de9f26bbSKent Russell 11215b5c4e40SEvgeny Pinchuk /* Topology */ 11225b5c4e40SEvgeny Pinchuk int kfd_topology_init(void); 11235b5c4e40SEvgeny Pinchuk void kfd_topology_shutdown(void); 11248dc1db31SMukul Joshi int kfd_topology_add_device(struct kfd_node *gpu); 11258dc1db31SMukul Joshi int kfd_topology_remove_device(struct kfd_node *gpu); 11263a87177eSHarish Kasiviswanathan struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 11273a87177eSHarish Kasiviswanathan uint32_t proximity_domain); 112846d18d51SMukul Joshi struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( 112946d18d51SMukul Joshi uint32_t proximity_domain); 113044d8cc6fSYong Zhao struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 11318dc1db31SMukul Joshi struct kfd_node *kfd_device_by_id(uint32_t gpu_id); 11328dc1db31SMukul Joshi struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); 1133f5fe7edfSMukul Joshi static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, 1134f5fe7edfSMukul Joshi uint32_t vmid) 11355fb34bd9SAlex Sierra { 1136f5fe7edfSMukul Joshi return (node->interrupt_bitmap & (1 << node_id)) != 0 && 1137f5fe7edfSMukul Joshi (node->compute_vmid_bitmap & (1 << vmid)) != 0; 11385fb34bd9SAlex Sierra } 11395fb34bd9SAlex Sierra static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, 1140f5fe7edfSMukul Joshi uint32_t node_id, uint32_t vmid) { 11415fb34bd9SAlex Sierra struct kfd_dev *dev = adev->kfd.dev; 11425fb34bd9SAlex Sierra uint32_t i; 11435fb34bd9SAlex Sierra 11445fb34bd9SAlex Sierra if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) 11455fb34bd9SAlex Sierra return dev->nodes[0]; 11465fb34bd9SAlex Sierra 11475fb34bd9SAlex Sierra for (i = 0; i < dev->num_nodes; i++) 1148f5fe7edfSMukul Joshi if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) 11495fb34bd9SAlex Sierra return dev->nodes[i]; 11505fb34bd9SAlex Sierra 11515fb34bd9SAlex Sierra return NULL; 11525fb34bd9SAlex Sierra } 11538dc1db31SMukul Joshi int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); 1154520b8fb7SFelix Kuehling int kfd_numa_node_to_apic_id(int numa_node_id); 11556127896fSHuang Rui void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); 11565b5c4e40SEvgeny Pinchuk 11574a488a7aSOded Gabbay /* Interrupts */ 115812fb1ad7SJonathan Kim #define KFD_IRQ_FENCE_CLIENTID 0xff 115912fb1ad7SJonathan Kim #define KFD_IRQ_FENCE_SOURCEID 0xff 116012fb1ad7SJonathan Kim #define KFD_IRQ_IS_FENCE(client, source) \ 116112fb1ad7SJonathan Kim ((client) == KFD_IRQ_FENCE_CLIENTID && \ 116212fb1ad7SJonathan Kim (source) == KFD_IRQ_FENCE_SOURCEID) 11638dc1db31SMukul Joshi int kfd_interrupt_init(struct kfd_node *dev); 11648dc1db31SMukul Joshi void kfd_interrupt_exit(struct kfd_node *dev); 11658dc1db31SMukul Joshi bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); 11668dc1db31SMukul Joshi bool interrupt_is_wanted(struct kfd_node *dev, 116758e69886SLan Xiao const uint32_t *ih_ring_entry, 116858e69886SLan Xiao uint32_t *patched_ihre, bool *flag); 116912fb1ad7SJonathan Kim int kfd_process_drain_interrupts(struct kfd_process_device *pdd); 117012fb1ad7SJonathan Kim void kfd_process_close_interrupt_drain(unsigned int pasid); 11714a488a7aSOded Gabbay 117219f6d2a6SOded Gabbay /* amdkfd Apertures */ 117319f6d2a6SOded Gabbay int kfd_init_apertures(struct kfd_process *process); 117419f6d2a6SOded Gabbay 11757c9631afSJay Cornwall void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 11767c9631afSJay Cornwall uint64_t tba_addr, 11777c9631afSJay Cornwall uint64_t tma_addr); 117850cff45eSJay Cornwall void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, 117950cff45eSJay Cornwall bool enabled); 11807c9631afSJay Cornwall 11810ab2d753SJonathan Kim /* CWSR initialization */ 11820ab2d753SJonathan Kim int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); 11830ab2d753SJonathan Kim 118436988070SRajneesh Bhardwaj /* CRIU */ 118536988070SRajneesh Bhardwaj /* 118636988070SRajneesh Bhardwaj * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 118736988070SRajneesh Bhardwaj * structures: 118836988070SRajneesh Bhardwaj * kfd_criu_process_priv_data 118936988070SRajneesh Bhardwaj * kfd_criu_device_priv_data 119036988070SRajneesh Bhardwaj * kfd_criu_bo_priv_data 119136988070SRajneesh Bhardwaj * kfd_criu_queue_priv_data 119236988070SRajneesh Bhardwaj * kfd_criu_event_priv_data 119336988070SRajneesh Bhardwaj * kfd_criu_svm_range_priv_data 119436988070SRajneesh Bhardwaj */ 119536988070SRajneesh Bhardwaj 119636988070SRajneesh Bhardwaj #define KFD_CRIU_PRIV_VERSION 1 119736988070SRajneesh Bhardwaj 119836988070SRajneesh Bhardwaj struct kfd_criu_process_priv_data { 119936988070SRajneesh Bhardwaj uint32_t version; 12004717fe3dSRajneesh Bhardwaj uint32_t xnack_mode; 120136988070SRajneesh Bhardwaj }; 120236988070SRajneesh Bhardwaj 120336988070SRajneesh Bhardwaj struct kfd_criu_device_priv_data { 120436988070SRajneesh Bhardwaj /* For future use */ 120536988070SRajneesh Bhardwaj uint64_t reserved; 120636988070SRajneesh Bhardwaj }; 120736988070SRajneesh Bhardwaj 120836988070SRajneesh Bhardwaj struct kfd_criu_bo_priv_data { 12095ccbb057SRajneesh Bhardwaj uint64_t user_addr; 12105ccbb057SRajneesh Bhardwaj uint32_t idr_handle; 12115ccbb057SRajneesh Bhardwaj uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 121236988070SRajneesh Bhardwaj }; 121336988070SRajneesh Bhardwaj 1214626f7b31SDavid Yat Sin /* 1215626f7b31SDavid Yat Sin * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1216626f7b31SDavid Yat Sin * kfd_criu_svm_range_priv_data is the object type 1217626f7b31SDavid Yat Sin */ 1218626f7b31SDavid Yat Sin enum kfd_criu_object_type { 1219626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_QUEUE, 1220626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_EVENT, 1221626f7b31SDavid Yat Sin KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1222626f7b31SDavid Yat Sin }; 1223626f7b31SDavid Yat Sin 122436988070SRajneesh Bhardwaj struct kfd_criu_svm_range_priv_data { 122536988070SRajneesh Bhardwaj uint32_t object_type; 122608a987a8SRajneesh Bhardwaj uint64_t start_addr; 122708a987a8SRajneesh Bhardwaj uint64_t size; 122808a987a8SRajneesh Bhardwaj /* Variable length array of attributes */ 1229d5c83156SChangcheng Deng struct kfd_ioctl_svm_attribute attrs[]; 123036988070SRajneesh Bhardwaj }; 123136988070SRajneesh Bhardwaj 123236988070SRajneesh Bhardwaj struct kfd_criu_queue_priv_data { 123336988070SRajneesh Bhardwaj uint32_t object_type; 1234626f7b31SDavid Yat Sin uint64_t q_address; 1235626f7b31SDavid Yat Sin uint64_t q_size; 1236626f7b31SDavid Yat Sin uint64_t read_ptr_addr; 1237626f7b31SDavid Yat Sin uint64_t write_ptr_addr; 1238626f7b31SDavid Yat Sin uint64_t doorbell_off; 1239626f7b31SDavid Yat Sin uint64_t eop_ring_buffer_address; 1240626f7b31SDavid Yat Sin uint64_t ctx_save_restore_area_address; 1241626f7b31SDavid Yat Sin uint32_t gpu_id; 1242626f7b31SDavid Yat Sin uint32_t type; 1243626f7b31SDavid Yat Sin uint32_t format; 1244626f7b31SDavid Yat Sin uint32_t q_id; 1245626f7b31SDavid Yat Sin uint32_t priority; 1246626f7b31SDavid Yat Sin uint32_t q_percent; 1247626f7b31SDavid Yat Sin uint32_t doorbell_id; 1248747eea07SDavid Yat Sin uint32_t gws; 1249626f7b31SDavid Yat Sin uint32_t sdma_id; 1250626f7b31SDavid Yat Sin uint32_t eop_ring_buffer_size; 1251626f7b31SDavid Yat Sin uint32_t ctx_save_restore_area_size; 1252626f7b31SDavid Yat Sin uint32_t ctl_stack_size; 1253626f7b31SDavid Yat Sin uint32_t mqd_size; 125436988070SRajneesh Bhardwaj }; 125536988070SRajneesh Bhardwaj 125636988070SRajneesh Bhardwaj struct kfd_criu_event_priv_data { 125736988070SRajneesh Bhardwaj uint32_t object_type; 125840e8a766SDavid Yat Sin uint64_t user_handle; 125940e8a766SDavid Yat Sin uint32_t event_id; 126040e8a766SDavid Yat Sin uint32_t auto_reset; 126140e8a766SDavid Yat Sin uint32_t type; 126240e8a766SDavid Yat Sin uint32_t signaled; 126340e8a766SDavid Yat Sin 126440e8a766SDavid Yat Sin union { 126540e8a766SDavid Yat Sin struct kfd_hsa_memory_exception_data memory_exception_data; 126640e8a766SDavid Yat Sin struct kfd_hsa_hw_exception_data hw_exception_data; 126740e8a766SDavid Yat Sin }; 126836988070SRajneesh Bhardwaj }; 126936988070SRajneesh Bhardwaj 1270626f7b31SDavid Yat Sin int kfd_process_get_queue_info(struct kfd_process *p, 1271626f7b31SDavid Yat Sin uint32_t *num_queues, 1272626f7b31SDavid Yat Sin uint64_t *priv_data_sizes); 1273626f7b31SDavid Yat Sin 1274626f7b31SDavid Yat Sin int kfd_criu_checkpoint_queues(struct kfd_process *p, 1275626f7b31SDavid Yat Sin uint8_t __user *user_priv_data, 1276626f7b31SDavid Yat Sin uint64_t *priv_data_offset); 1277626f7b31SDavid Yat Sin 1278626f7b31SDavid Yat Sin int kfd_criu_restore_queue(struct kfd_process *p, 1279626f7b31SDavid Yat Sin uint8_t __user *user_priv_data, 1280626f7b31SDavid Yat Sin uint64_t *priv_data_offset, 1281626f7b31SDavid Yat Sin uint64_t max_priv_data_size); 128240e8a766SDavid Yat Sin 128340e8a766SDavid Yat Sin int kfd_criu_checkpoint_events(struct kfd_process *p, 128440e8a766SDavid Yat Sin uint8_t __user *user_priv_data, 128540e8a766SDavid Yat Sin uint64_t *priv_data_offset); 128640e8a766SDavid Yat Sin 128740e8a766SDavid Yat Sin int kfd_criu_restore_event(struct file *devkfd, 128840e8a766SDavid Yat Sin struct kfd_process *p, 128940e8a766SDavid Yat Sin uint8_t __user *user_priv_data, 129040e8a766SDavid Yat Sin uint64_t *priv_data_offset, 129140e8a766SDavid Yat Sin uint64_t max_priv_data_size); 129236988070SRajneesh Bhardwaj /* CRIU - End */ 129336988070SRajneesh Bhardwaj 1294ed6e6a34SBen Goz /* Queue Context Management */ 1295e88a614cSEdward O'Callaghan int init_queue(struct queue **q, const struct queue_properties *properties); 1296ed6e6a34SBen Goz void uninit_queue(struct queue *q); 129745102048SBen Goz void print_queue_properties(struct queue_properties *q); 1298ed6e6a34SBen Goz void print_queue(struct queue *q); 1299ed6e6a34SBen Goz 13004b8f589bSBen Goz struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 13018dc1db31SMukul Joshi struct kfd_node *dev); 1302ee04955aSFelix Kuehling struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 13038dc1db31SMukul Joshi struct kfd_node *dev); 13044b8f589bSBen Goz struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 13058dc1db31SMukul Joshi struct kfd_node *dev); 1306ee04955aSFelix Kuehling struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 13078dc1db31SMukul Joshi struct kfd_node *dev); 1308b91d43ddSFelix Kuehling struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 13098dc1db31SMukul Joshi struct kfd_node *dev); 131014328aa5SPhilip Cox struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 13118dc1db31SMukul Joshi struct kfd_node *dev); 1312cc009e61SMukul Joshi struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, 13138dc1db31SMukul Joshi struct kfd_node *dev); 13148dc1db31SMukul Joshi struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 131564c7f8cfSBen Goz void device_queue_manager_uninit(struct device_queue_manager *dqm); 13168dc1db31SMukul Joshi struct kernel_queue *kernel_queue_init(struct kfd_node *dev, 1317241f24f8SBen Goz enum kfd_queue_type type); 1318c2a77fdeSFelix Kuehling void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 131903e5b167STao Zhou int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); 1320241f24f8SBen Goz 132145102048SBen Goz /* Process Queue Manager */ 132245102048SBen Goz struct process_queue_node { 132345102048SBen Goz struct queue *q; 132445102048SBen Goz struct kernel_queue *kq; 132545102048SBen Goz struct list_head process_queue_list; 132645102048SBen Goz }; 132745102048SBen Goz 13289fd3f1bfSFelix Kuehling void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 13299fd3f1bfSFelix Kuehling void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 133045102048SBen Goz int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 133145102048SBen Goz void pqm_uninit(struct process_queue_manager *pqm); 133245102048SBen Goz int pqm_create_queue(struct process_queue_manager *pqm, 13338dc1db31SMukul Joshi struct kfd_node *dev, 133445102048SBen Goz struct file *f, 133545102048SBen Goz struct queue_properties *properties, 1336e47a8b52SYong Zhao unsigned int *qid, 1337e77a541fSGraham Sider struct amdgpu_bo *wptr_bo, 13388668dfc3SDavid Yat Sin const struct kfd_criu_queue_priv_data *q_data, 133942c6c482SDavid Yat Sin const void *restore_mqd, 13403a9822d7SDavid Yat Sin const void *restore_ctl_stack, 1341e47a8b52SYong Zhao uint32_t *p_doorbell_offset_in_process); 134245102048SBen Goz int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 13437c695a2cSLang Yu int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 134445102048SBen Goz struct queue_properties *p); 13457c695a2cSLang Yu int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 13467c695a2cSLang Yu struct mqd_update_info *minfo); 1347eb82da1dSOak Zeng int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1348eb82da1dSOak Zeng void *gws); 1349fbeb661bSYair Shachar struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 1350fbeb661bSYair Shachar unsigned int qid); 13515bb4b78bSOak Zeng struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 13525bb4b78bSOak Zeng unsigned int qid); 13535df099e8SJay Cornwall int pqm_get_wave_state(struct process_queue_manager *pqm, 13545df099e8SJay Cornwall unsigned int qid, 13555df099e8SJay Cornwall void __user *ctl_stack, 13565df099e8SJay Cornwall u32 *ctl_stack_used_size, 13575df099e8SJay Cornwall u32 *save_area_used_size); 1358*b17bd5dbSJonathan Kim int pqm_get_queue_snapshot(struct process_queue_manager *pqm, 1359*b17bd5dbSJonathan Kim uint64_t exception_clear_mask, 1360*b17bd5dbSJonathan Kim void __user *buf, 1361*b17bd5dbSJonathan Kim int *num_qss_entries, 1362*b17bd5dbSJonathan Kim uint32_t *entry_size); 136345102048SBen Goz 1364b010affeSQu Huang int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1365b010affeSQu Huang uint64_t fence_value, 13668c72c3d7SYong Zhao unsigned int timeout_ms); 1367788bf83dSYair Shachar 136842c6c482SDavid Yat Sin int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 136942c6c482SDavid Yat Sin unsigned int qid, 13703a9822d7SDavid Yat Sin u32 *mqd_size, 13713a9822d7SDavid Yat Sin u32 *ctl_stack_size); 1372ed6e6a34SBen Goz /* Packet Manager */ 1373ed6e6a34SBen Goz 137464c7f8cfSBen Goz #define KFD_FENCE_COMPLETED (100) 137564c7f8cfSBen Goz #define KFD_FENCE_INIT (10) 1376241f24f8SBen Goz 1377ed6e6a34SBen Goz struct packet_manager { 1378ed6e6a34SBen Goz struct device_queue_manager *dqm; 1379ed6e6a34SBen Goz struct kernel_queue *priv_queue; 1380ed6e6a34SBen Goz struct mutex lock; 1381ed6e6a34SBen Goz bool allocated; 1382ed6e6a34SBen Goz struct kfd_mem_obj *ib_buffer_obj; 1383851a645eSFelix Kuehling unsigned int ib_size_bytes; 1384819ec5acSFelix Kuehling bool is_over_subscription; 1385f6e27ff1SFelix Kuehling 1386f6e27ff1SFelix Kuehling const struct packet_manager_funcs *pmf; 1387ed6e6a34SBen Goz }; 1388ed6e6a34SBen Goz 1389f6e27ff1SFelix Kuehling struct packet_manager_funcs { 1390f6e27ff1SFelix Kuehling /* Support ASIC-specific packet formats for PM4 packets */ 1391f6e27ff1SFelix Kuehling int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1392f6e27ff1SFelix Kuehling struct qcm_process_device *qpd); 1393f6e27ff1SFelix Kuehling int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1394f6e27ff1SFelix Kuehling uint64_t ib, size_t ib_size_in_dwords, bool chain); 1395f6e27ff1SFelix Kuehling int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1396f6e27ff1SFelix Kuehling struct scheduling_resources *res); 1397f6e27ff1SFelix Kuehling int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1398f6e27ff1SFelix Kuehling struct queue *q, bool is_static); 1399f6e27ff1SFelix Kuehling int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1400f6e27ff1SFelix Kuehling enum kfd_unmap_queues_filter mode, 1401d2cb0b21SJonathan Kim uint32_t filter_param, bool reset); 14027cee6a68SJonathan Kim int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, 14037cee6a68SJonathan Kim uint32_t grace_period); 1404f6e27ff1SFelix Kuehling int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1405b010affeSQu Huang uint64_t fence_address, uint64_t fence_value); 1406f6e27ff1SFelix Kuehling int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1407f6e27ff1SFelix Kuehling 1408f6e27ff1SFelix Kuehling /* Packet sizes */ 1409f6e27ff1SFelix Kuehling int map_process_size; 1410f6e27ff1SFelix Kuehling int runlist_size; 1411f6e27ff1SFelix Kuehling int set_resources_size; 1412f6e27ff1SFelix Kuehling int map_queues_size; 1413f6e27ff1SFelix Kuehling int unmap_queues_size; 14147cee6a68SJonathan Kim int set_grace_period_size; 1415f6e27ff1SFelix Kuehling int query_status_size; 1416f6e27ff1SFelix Kuehling int release_mem_size; 1417f6e27ff1SFelix Kuehling }; 1418f6e27ff1SFelix Kuehling 1419f6e27ff1SFelix Kuehling extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1420454150b1SFelix Kuehling extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1421fd6a440eSJonathan Kim extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1422f6e27ff1SFelix Kuehling 142364c7f8cfSBen Goz int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1424c2a77fdeSFelix Kuehling void pm_uninit(struct packet_manager *pm, bool hanging); 142564c7f8cfSBen Goz int pm_send_set_resources(struct packet_manager *pm, 142664c7f8cfSBen Goz struct scheduling_resources *res); 142764c7f8cfSBen Goz int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 142864c7f8cfSBen Goz int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1429b010affeSQu Huang uint64_t fence_value); 143064c7f8cfSBen Goz 1431d2cb0b21SJonathan Kim int pm_send_unmap_queue(struct packet_manager *pm, 14327da2bcf8SYong Zhao enum kfd_unmap_queues_filter mode, 1433d2cb0b21SJonathan Kim uint32_t filter_param, bool reset); 143464c7f8cfSBen Goz 1435241f24f8SBen Goz void pm_release_ib(struct packet_manager *pm); 1436241f24f8SBen Goz 14377cee6a68SJonathan Kim int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); 14387cee6a68SJonathan Kim 1439454150b1SFelix Kuehling /* Following PM funcs can be shared among VI and AI */ 1440454150b1SFelix Kuehling unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 144114328aa5SPhilip Cox 144219f6d2a6SOded Gabbay uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 144319f6d2a6SOded Gabbay 1444f3a39818SAndrew Lewycky /* Events */ 1445f3a39818SAndrew Lewycky extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1446ca750681SFelix Kuehling extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 144712fb1ad7SJonathan Kim extern const struct kfd_event_interrupt_class event_interrupt_class_v10; 1448cc009e61SMukul Joshi extern const struct kfd_event_interrupt_class event_interrupt_class_v11; 1449ca750681SFelix Kuehling 1450930c5ff4SAlexey Skidanov extern const struct kfd_device_global_init_class device_global_init_class_cik; 1451f3a39818SAndrew Lewycky 1452c3eb12dfSFelix Kuehling int kfd_event_init_process(struct kfd_process *p); 1453f3a39818SAndrew Lewycky void kfd_event_free_process(struct kfd_process *p); 1454f3a39818SAndrew Lewycky int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1455f3a39818SAndrew Lewycky int kfd_wait_on_events(struct kfd_process *p, 145659d3e8beSAlexey Skidanov uint32_t num_events, void __user *data, 1457bea9a56aSFelix Kuehling bool all, uint32_t *user_timeout_ms, 1458fdf0c833SFelix Kuehling uint32_t *wait_result); 1459c7b6bac9SFenghua Yu void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1460f3a39818SAndrew Lewycky uint32_t valid_id_bits); 14618dc1db31SMukul Joshi void kfd_signal_iommu_event(struct kfd_node *dev, 1462c7b6bac9SFenghua Yu u32 pasid, unsigned long address, 146359d3e8beSAlexey Skidanov bool is_write_requested, bool is_execute_requested); 1464c7b6bac9SFenghua Yu void kfd_signal_hw_exception_event(u32 pasid); 1465f3a39818SAndrew Lewycky int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1466f3a39818SAndrew Lewycky int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 146740e8a766SDavid Yat Sin int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 146840e8a766SDavid Yat Sin 1469f3a39818SAndrew Lewycky int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1470f3a39818SAndrew Lewycky uint32_t event_type, bool auto_reset, uint32_t node_id, 1471f3a39818SAndrew Lewycky uint32_t *event_id, uint32_t *event_trigger_data, 1472f3a39818SAndrew Lewycky uint64_t *event_page_offset, uint32_t *event_slot_index); 147340e8a766SDavid Yat Sin 147440e8a766SDavid Yat Sin int kfd_get_num_events(struct kfd_process *p); 1475f3a39818SAndrew Lewycky int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1476f3a39818SAndrew Lewycky 14778dc1db31SMukul Joshi void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, 1478c2d2588cSJonathan Kim struct kfd_vm_fault_info *info, 1479c2d2588cSJonathan Kim struct kfd_hsa_memory_exception_data *data); 14802640c3faSshaoyunl 14818dc1db31SMukul Joshi void kfd_signal_reset_event(struct kfd_node *dev); 1482e42051d2SShaoyun Liu 14838dc1db31SMukul Joshi void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); 1484e2b1f9f5SDennis Li 14853543b055SEric Huang void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); 1486403575c4SFelix Kuehling 1487459ccca5SLang Yu static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) 1488459ccca5SLang Yu { 148975dda67cSPhilip Yang return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || 149075dda67cSPhilip Yang KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || 149175dda67cSPhilip Yang (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || 1492459ccca5SLang Yu KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); 1493459ccca5SLang Yu } 1494459ccca5SLang Yu 1495c2d2588cSJonathan Kim int kfd_send_exception_to_runtime(struct kfd_process *p, 1496c2d2588cSJonathan Kim unsigned int queue_id, 1497c2d2588cSJonathan Kim uint64_t error_reason); 1498e42051d2SShaoyun Liu bool kfd_is_locked(void); 1499e42051d2SShaoyun Liu 1500f756e631SHarish Kasiviswanathan /* Compute profile */ 15018dc1db31SMukul Joshi void kfd_inc_compute_active(struct kfd_node *dev); 15028dc1db31SMukul Joshi void kfd_dec_compute_active(struct kfd_node *dev); 1503f756e631SHarish Kasiviswanathan 15046b855f7bSHarish Kasiviswanathan /* Cgroup Support */ 15056b855f7bSHarish Kasiviswanathan /* Check with device cgroup if @kfd device is accessible */ 15068dc1db31SMukul Joshi static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) 15076b855f7bSHarish Kasiviswanathan { 1508eec8fd02SOdin Ugedal #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1509d69a3b76SMukul Joshi struct drm_device *ddev = adev_to_drm(kfd->adev); 15106b855f7bSHarish Kasiviswanathan 151199c7b309SLorenz Brun return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 15126b855f7bSHarish Kasiviswanathan ddev->render->index, 15136b855f7bSHarish Kasiviswanathan DEVCG_ACC_WRITE | DEVCG_ACC_READ); 15146b855f7bSHarish Kasiviswanathan #else 15156b855f7bSHarish Kasiviswanathan return 0; 15166b855f7bSHarish Kasiviswanathan #endif 15176b855f7bSHarish Kasiviswanathan } 15186b855f7bSHarish Kasiviswanathan 151974c5b85dSMukul Joshi static inline bool kfd_is_first_node(struct kfd_node *node) 152074c5b85dSMukul Joshi { 152174c5b85dSMukul Joshi return (node == node->kfd->nodes[0]); 152274c5b85dSMukul Joshi } 152374c5b85dSMukul Joshi 1524851a645eSFelix Kuehling /* Debugfs */ 1525851a645eSFelix Kuehling #if defined(CONFIG_DEBUG_FS) 1526851a645eSFelix Kuehling 1527851a645eSFelix Kuehling void kfd_debugfs_init(void); 1528851a645eSFelix Kuehling void kfd_debugfs_fini(void); 1529851a645eSFelix Kuehling int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1530851a645eSFelix Kuehling int pqm_debugfs_mqds(struct seq_file *m, void *data); 1531851a645eSFelix Kuehling int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1532851a645eSFelix Kuehling int dqm_debugfs_hqds(struct seq_file *m, void *data); 1533851a645eSFelix Kuehling int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1534851a645eSFelix Kuehling int pm_debugfs_runlist(struct seq_file *m, void *data); 1535851a645eSFelix Kuehling 15368dc1db31SMukul Joshi int kfd_debugfs_hang_hws(struct kfd_node *dev); 1537a29ec470SShaoyun Liu int pm_debugfs_hang_hws(struct packet_manager *pm); 15384f942aaeSOak Zeng int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1539a29ec470SShaoyun Liu 1540851a645eSFelix Kuehling #else 1541851a645eSFelix Kuehling 1542851a645eSFelix Kuehling static inline void kfd_debugfs_init(void) {} 1543851a645eSFelix Kuehling static inline void kfd_debugfs_fini(void) {} 1544851a645eSFelix Kuehling 1545851a645eSFelix Kuehling #endif 1546851a645eSFelix Kuehling 15474a488a7aSOded Gabbay #endif 1548