1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 /* 24 * This file defines the private interface between the 25 * AMD kernel graphics drivers and the AMD KFD. 26 */ 27 28 #ifndef KGD_KFD_INTERFACE_H_INCLUDED 29 #define KGD_KFD_INTERFACE_H_INCLUDED 30 31 #include <linux/types.h> 32 #include <linux/bitmap.h> 33 #include <linux/dma-fence.h> 34 35 struct pci_dev; 36 struct amdgpu_device; 37 38 #define KGD_MAX_QUEUES 128 39 40 struct kfd_dev; 41 struct kgd_mem; 42 43 enum kfd_preempt_type { 44 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0, 45 KFD_PREEMPT_TYPE_WAVEFRONT_RESET, 46 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE 47 }; 48 49 struct kfd_vm_fault_info { 50 uint64_t page_addr; 51 uint32_t vmid; 52 uint32_t mc_id; 53 uint32_t status; 54 bool prot_valid; 55 bool prot_read; 56 bool prot_write; 57 bool prot_exec; 58 }; 59 60 struct kfd_cu_info { 61 uint32_t num_shader_engines; 62 uint32_t num_shader_arrays_per_engine; 63 uint32_t num_cu_per_sh; 64 uint32_t cu_active_number; 65 uint32_t cu_ao_mask; 66 uint32_t simd_per_cu; 67 uint32_t max_waves_per_simd; 68 uint32_t wave_front_size; 69 uint32_t max_scratch_slots_per_cu; 70 uint32_t lds_size; 71 uint32_t cu_bitmap[4][4]; 72 }; 73 74 /* For getting GPU local memory information from KGD */ 75 struct kfd_local_mem_info { 76 uint64_t local_mem_size_private; 77 uint64_t local_mem_size_public; 78 uint32_t vram_width; 79 uint32_t mem_clk_max; 80 }; 81 82 enum kgd_memory_pool { 83 KGD_POOL_SYSTEM_CACHEABLE = 1, 84 KGD_POOL_SYSTEM_WRITECOMBINE = 2, 85 KGD_POOL_FRAMEBUFFER = 3, 86 }; 87 88 /** 89 * enum kfd_sched_policy 90 * 91 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp) 92 * scheduling. In this scheduling mode we're using the firmware code to 93 * schedule the user mode queues and kernel queues such as HIQ and DIQ. 94 * the HIQ queue is used as a special queue that dispatches the configuration 95 * to the cp and the user mode queues list that are currently running. 96 * the DIQ queue is a debugging queue that dispatches debugging commands to the 97 * firmware. 98 * in this scheduling mode user mode queues over subscription feature is 99 * enabled. 100 * 101 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over 102 * subscription feature disabled. 103 * 104 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly 105 * set the command processor registers and sets the queues "manually". This 106 * mode is used *ONLY* for debugging proposes. 107 * 108 */ 109 enum kfd_sched_policy { 110 KFD_SCHED_POLICY_HWS = 0, 111 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION, 112 KFD_SCHED_POLICY_NO_HWS 113 }; 114 115 struct kgd2kfd_shared_resources { 116 /* Bit n == 1 means VMID n is available for KFD. */ 117 unsigned int compute_vmid_bitmap; 118 119 /* number of pipes per mec */ 120 uint32_t num_pipe_per_mec; 121 122 /* number of queues per pipe */ 123 uint32_t num_queue_per_pipe; 124 125 /* Bit n == 1 means Queue n is available for KFD */ 126 DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); 127 128 /* SDMA doorbell assignments (SOC15 and later chips only). Only 129 * specific doorbells are routed to each SDMA engine. Others 130 * are routed to IH and VCN. They are not usable by the CP. 131 */ 132 uint32_t *sdma_doorbell_idx; 133 134 /* From SOC15 onward, the doorbell index range not usable for CP 135 * queues. 136 */ 137 uint32_t non_cp_doorbells_start; 138 uint32_t non_cp_doorbells_end; 139 140 /* Base address of doorbell aperture. */ 141 phys_addr_t doorbell_physical_address; 142 143 /* Size in bytes of doorbell aperture. */ 144 size_t doorbell_aperture_size; 145 146 /* Number of bytes at start of aperture reserved for KGD. */ 147 size_t doorbell_start_offset; 148 149 /* GPUVM address space size in bytes */ 150 uint64_t gpuvm_size; 151 152 /* Minor device number of the render node */ 153 int drm_render_minor; 154 155 bool enable_mes; 156 }; 157 158 struct tile_config { 159 uint32_t *tile_config_ptr; 160 uint32_t *macro_tile_config_ptr; 161 uint32_t num_tile_configs; 162 uint32_t num_macro_tile_configs; 163 164 uint32_t gb_addr_config; 165 uint32_t num_banks; 166 uint32_t num_ranks; 167 }; 168 169 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 170 171 /** 172 * struct kfd2kgd_calls 173 * 174 * @program_sh_mem_settings: A function that should initiate the memory 175 * properties such as main aperture memory type (cache / non cached) and 176 * secondary aperture base address, size and memory type. 177 * This function is used only for no cp scheduling mode. 178 * 179 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp 180 * scheduling mode. Only used for no cp scheduling mode. 181 * 182 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp 183 * sceduling mode. 184 * 185 * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot. 186 * used only for no HWS mode. 187 * 188 * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs. 189 * Array is allocated with kmalloc, needs to be freed with kfree by caller. 190 * 191 * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs. 192 * Array is allocated with kmalloc, needs to be freed with kfree by caller. 193 * 194 * @hqd_is_occupies: Checks if a hqd slot is occupied. 195 * 196 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. 197 * 198 * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied. 199 * 200 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that 201 * SDMA hqd slot. 202 * 203 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. 204 * Only used for no cp scheduling mode 205 * 206 * @set_vm_context_page_table_base: Program page table base for a VMID 207 * 208 * @invalidate_tlbs: Invalidate TLBs for a specific PASID 209 * 210 * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID 211 * 212 * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the 213 * IH ring entry. This function allows the KFD ISR to get the VMID 214 * from the fault status register as early as possible. 215 * 216 * @get_cu_occupancy: Function pointer that returns to caller the number 217 * of wave fronts that are in flight for all of the queues of a process 218 * as identified by its pasid. It is important to note that the value 219 * returned by this function is a snapshot of current moment and cannot 220 * guarantee any minimum for the number of waves in-flight. This function 221 * is defined for devices that belong to GFX9 and later GFX families. Care 222 * must be taken in calling this function as it is not defined for devices 223 * that belong to GFX8 and below GFX families. 224 * 225 * This structure contains function pointers to services that the kgd driver 226 * provides to amdkfd driver. 227 * 228 */ 229 struct kfd2kgd_calls { 230 /* Register access functions */ 231 void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, 232 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, 233 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases, 234 uint32_t inst); 235 236 int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid, 237 unsigned int vmid, uint32_t inst); 238 239 int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id, 240 uint32_t inst); 241 242 int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, 243 uint32_t queue_id, uint32_t __user *wptr, 244 uint32_t wptr_shift, uint32_t wptr_mask, 245 struct mm_struct *mm, uint32_t inst); 246 247 int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, 248 uint32_t pipe_id, uint32_t queue_id, 249 uint32_t doorbell_off, uint32_t inst); 250 251 int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, 252 uint32_t __user *wptr, struct mm_struct *mm); 253 254 int (*hqd_dump)(struct amdgpu_device *adev, 255 uint32_t pipe_id, uint32_t queue_id, 256 uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst); 257 258 int (*hqd_sdma_dump)(struct amdgpu_device *adev, 259 uint32_t engine_id, uint32_t queue_id, 260 uint32_t (**dump)[2], uint32_t *n_regs); 261 262 bool (*hqd_is_occupied)(struct amdgpu_device *adev, 263 uint64_t queue_address, uint32_t pipe_id, 264 uint32_t queue_id, uint32_t inst); 265 266 int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, 267 enum kfd_preempt_type reset_type, 268 unsigned int timeout, uint32_t pipe_id, 269 uint32_t queue_id, uint32_t inst); 270 271 bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); 272 273 int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd, 274 unsigned int timeout); 275 276 int (*wave_control_execute)(struct amdgpu_device *adev, 277 uint32_t gfx_index_val, 278 uint32_t sq_cmd, uint32_t inst); 279 bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, 280 uint8_t vmid, 281 uint16_t *p_pasid); 282 283 /* No longer needed from GFXv9 onward. The scratch base address is 284 * passed to the shader by the CP. It's the user mode driver's 285 * responsibility. 286 */ 287 void (*set_scratch_backing_va)(struct amdgpu_device *adev, 288 uint64_t va, uint32_t vmid); 289 290 void (*set_vm_context_page_table_base)(struct amdgpu_device *adev, 291 uint32_t vmid, uint64_t page_table_base); 292 uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); 293 294 uint32_t (*enable_debug_trap)(struct amdgpu_device *adev, 295 bool restore_dbg_registers, 296 uint32_t vmid); 297 uint32_t (*disable_debug_trap)(struct amdgpu_device *adev, 298 bool keep_trap_enabled, 299 uint32_t vmid); 300 int (*validate_trap_override_request)(struct amdgpu_device *adev, 301 uint32_t trap_override, 302 uint32_t *trap_mask_supported); 303 uint32_t (*set_wave_launch_trap_override)(struct amdgpu_device *adev, 304 uint32_t vmid, 305 uint32_t trap_override, 306 uint32_t trap_mask_bits, 307 uint32_t trap_mask_request, 308 uint32_t *trap_mask_prev, 309 uint32_t kfd_dbg_trap_cntl_prev); 310 uint32_t (*set_wave_launch_mode)(struct amdgpu_device *adev, 311 uint8_t wave_launch_mode, 312 uint32_t vmid); 313 uint32_t (*set_address_watch)(struct amdgpu_device *adev, 314 uint64_t watch_address, 315 uint32_t watch_address_mask, 316 uint32_t watch_id, 317 uint32_t watch_mode, 318 uint32_t debug_vmid); 319 uint32_t (*clear_address_watch)(struct amdgpu_device *adev, 320 uint32_t watch_id); 321 void (*get_iq_wait_times)(struct amdgpu_device *adev, 322 uint32_t *wait_times); 323 void (*build_grace_period_packet_info)(struct amdgpu_device *adev, 324 uint32_t wait_times, 325 uint32_t grace_period, 326 uint32_t *reg_offset, 327 uint32_t *reg_data); 328 void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, 329 int *wave_cnt, int *max_waves_per_cu, uint32_t inst); 330 void (*program_trap_handler_settings)(struct amdgpu_device *adev, 331 uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, 332 uint32_t inst); 333 }; 334 335 #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ 336