1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 #ifndef __AMDGPU_VM_H__ 25 #define __AMDGPU_VM_H__ 26 27 #include <linux/idr.h> 28 #include <linux/kfifo.h> 29 #include <linux/rbtree.h> 30 #include <drm/gpu_scheduler.h> 31 #include <drm/drm_file.h> 32 #include <drm/ttm/ttm_bo_driver.h> 33 34 #include "amdgpu_sync.h" 35 #include "amdgpu_ring.h" 36 #include "amdgpu_ids.h" 37 38 struct amdgpu_bo_va; 39 struct amdgpu_job; 40 struct amdgpu_bo_list_entry; 41 42 /* 43 * GPUVM handling 44 */ 45 46 /* Maximum number of PTEs the hardware can write with one command */ 47 #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF 48 49 /* number of entries in page table */ 50 #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) 51 52 #define AMDGPU_PTE_VALID (1ULL << 0) 53 #define AMDGPU_PTE_SYSTEM (1ULL << 1) 54 #define AMDGPU_PTE_SNOOPED (1ULL << 2) 55 56 /* VI only */ 57 #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) 58 59 #define AMDGPU_PTE_READABLE (1ULL << 5) 60 #define AMDGPU_PTE_WRITEABLE (1ULL << 6) 61 62 #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) 63 64 /* TILED for VEGA10, reserved for older ASICs */ 65 #define AMDGPU_PTE_PRT (1ULL << 51) 66 67 /* PDE is handled as PTE for VEGA10 */ 68 #define AMDGPU_PDE_PTE (1ULL << 54) 69 70 /* PTE is handled as PDE for VEGA10 (Translate Further) */ 71 #define AMDGPU_PTE_TF (1ULL << 56) 72 73 /* PDE Block Fragment Size for VEGA10 */ 74 #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) 75 76 77 /* For GFX9 */ 78 #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) 79 #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) 80 81 #define AMDGPU_MTYPE_NC 0 82 #define AMDGPU_MTYPE_CC 2 83 84 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ 85 | AMDGPU_PTE_SNOOPED \ 86 | AMDGPU_PTE_EXECUTABLE \ 87 | AMDGPU_PTE_READABLE \ 88 | AMDGPU_PTE_WRITEABLE \ 89 | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC)) 90 91 /* How to programm VM fault handling */ 92 #define AMDGPU_VM_FAULT_STOP_NEVER 0 93 #define AMDGPU_VM_FAULT_STOP_FIRST 1 94 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 95 96 /* max number of VMHUB */ 97 #define AMDGPU_MAX_VMHUBS 2 98 #define AMDGPU_GFXHUB 0 99 #define AMDGPU_MMHUB 1 100 101 /* hardcode that limit for now */ 102 #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) 103 104 /* max vmids dedicated for process */ 105 #define AMDGPU_VM_MAX_RESERVED_VMID 1 106 107 #define AMDGPU_VM_CONTEXT_GFX 0 108 #define AMDGPU_VM_CONTEXT_COMPUTE 1 109 110 /* See vm_update_mode */ 111 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) 112 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) 113 114 /* VMPT level enumerate, and the hiberachy is: 115 * PDB2->PDB1->PDB0->PTB 116 */ 117 enum amdgpu_vm_level { 118 AMDGPU_VM_PDB2, 119 AMDGPU_VM_PDB1, 120 AMDGPU_VM_PDB0, 121 AMDGPU_VM_PTB 122 }; 123 124 /* base structure for tracking BO usage in a VM */ 125 struct amdgpu_vm_bo_base { 126 /* constant after initialization */ 127 struct amdgpu_vm *vm; 128 struct amdgpu_bo *bo; 129 130 /* protected by bo being reserved */ 131 struct amdgpu_vm_bo_base *next; 132 133 /* protected by spinlock */ 134 struct list_head vm_status; 135 136 /* protected by the BO being reserved */ 137 bool moved; 138 }; 139 140 struct amdgpu_vm_pt { 141 struct amdgpu_vm_bo_base base; 142 143 /* array of page tables, one for each directory entry */ 144 struct amdgpu_vm_pt *entries; 145 }; 146 147 /* provided by hw blocks that can write ptes, e.g., sdma */ 148 struct amdgpu_vm_pte_funcs { 149 /* number of dw to reserve per operation */ 150 unsigned copy_pte_num_dw; 151 152 /* copy pte entries from GART */ 153 void (*copy_pte)(struct amdgpu_ib *ib, 154 uint64_t pe, uint64_t src, 155 unsigned count); 156 157 /* write pte one entry at a time with addr mapping */ 158 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, 159 uint64_t value, unsigned count, 160 uint32_t incr); 161 /* for linear pte/pde updates without addr mapping */ 162 void (*set_pte_pde)(struct amdgpu_ib *ib, 163 uint64_t pe, 164 uint64_t addr, unsigned count, 165 uint32_t incr, uint64_t flags); 166 }; 167 168 struct amdgpu_task_info { 169 char process_name[TASK_COMM_LEN]; 170 char task_name[TASK_COMM_LEN]; 171 pid_t pid; 172 pid_t tgid; 173 }; 174 175 /** 176 * struct amdgpu_vm_update_params 177 * 178 * Encapsulate some VM table update parameters to reduce 179 * the number of function parameters 180 * 181 */ 182 struct amdgpu_vm_update_params { 183 184 /** 185 * @adev: amdgpu device we do this update for 186 */ 187 struct amdgpu_device *adev; 188 189 /** 190 * @vm: optional amdgpu_vm we do this update for 191 */ 192 struct amdgpu_vm *vm; 193 194 /** 195 * @pages_addr: 196 * 197 * DMA addresses to use for mapping 198 */ 199 dma_addr_t *pages_addr; 200 201 /** 202 * @job: job to used for hw submission 203 */ 204 struct amdgpu_job *job; 205 206 /** 207 * @num_dw_left: number of dw left for the IB 208 */ 209 unsigned int num_dw_left; 210 }; 211 212 struct amdgpu_vm_update_funcs { 213 int (*map_table)(struct amdgpu_bo *bo); 214 int (*prepare)(struct amdgpu_vm_update_params *p, void * owner, 215 struct dma_fence *exclusive); 216 int (*update)(struct amdgpu_vm_update_params *p, 217 struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, 218 unsigned count, uint32_t incr, uint64_t flags); 219 int (*commit)(struct amdgpu_vm_update_params *p, 220 struct dma_fence **fence); 221 }; 222 223 struct amdgpu_vm { 224 /* tree of virtual addresses mapped */ 225 struct rb_root_cached va; 226 227 /* BOs who needs a validation */ 228 struct list_head evicted; 229 230 /* PT BOs which relocated and their parent need an update */ 231 struct list_head relocated; 232 233 /* per VM BOs moved, but not yet updated in the PT */ 234 struct list_head moved; 235 236 /* All BOs of this VM not currently in the state machine */ 237 struct list_head idle; 238 239 /* regular invalidated BOs, but not yet updated in the PT */ 240 struct list_head invalidated; 241 spinlock_t invalidated_lock; 242 243 /* BO mappings freed, but not yet updated in the PT */ 244 struct list_head freed; 245 246 /* contains the page directory */ 247 struct amdgpu_vm_pt root; 248 struct dma_fence *last_update; 249 250 /* Scheduler entity for page table updates */ 251 struct drm_sched_entity entity; 252 253 unsigned int pasid; 254 /* dedicated to vm */ 255 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS]; 256 257 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ 258 bool use_cpu_for_update; 259 260 /* Functions to use for VM table updates */ 261 const struct amdgpu_vm_update_funcs *update_funcs; 262 263 /* Flag to indicate ATS support from PTE for GFX9 */ 264 bool pte_support_ats; 265 266 /* Up to 128 pending retry page faults */ 267 DECLARE_KFIFO(faults, u64, 128); 268 269 /* Points to the KFD process VM info */ 270 struct amdkfd_process_info *process_info; 271 272 /* List node in amdkfd_process_info.vm_list_head */ 273 struct list_head vm_list_node; 274 275 /* Valid while the PD is reserved or fenced */ 276 uint64_t pd_phys_addr; 277 278 /* Some basic info about the task */ 279 struct amdgpu_task_info task_info; 280 281 /* Store positions of group of BOs */ 282 struct ttm_lru_bulk_move lru_bulk_move; 283 /* mark whether can do the bulk move */ 284 bool bulk_moveable; 285 }; 286 287 struct amdgpu_vm_manager { 288 /* Handling of VMIDs */ 289 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; 290 291 /* Handling of VM fences */ 292 u64 fence_context; 293 unsigned seqno[AMDGPU_MAX_RINGS]; 294 295 uint64_t max_pfn; 296 uint32_t num_level; 297 uint32_t block_size; 298 uint32_t fragment_size; 299 enum amdgpu_vm_level root_level; 300 /* vram base address for page table entry */ 301 u64 vram_base_offset; 302 /* vm pte handling */ 303 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 304 struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS]; 305 unsigned vm_pte_num_rqs; 306 struct amdgpu_ring *page_fault; 307 308 /* partial resident texture handling */ 309 spinlock_t prt_lock; 310 atomic_t num_prt_users; 311 312 /* controls how VM page tables are updated for Graphics and Compute. 313 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU 314 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU 315 */ 316 int vm_update_mode; 317 318 /* PASID to VM mapping, will be used in interrupt context to 319 * look up VM of a page fault 320 */ 321 struct idr pasid_idr; 322 spinlock_t pasid_lock; 323 324 /* counter of mapped memory through xgmi */ 325 uint32_t xgmi_map_counter; 326 struct mutex lock_pstate; 327 }; 328 329 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 330 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 331 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 332 333 extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs; 334 extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs; 335 336 void amdgpu_vm_manager_init(struct amdgpu_device *adev); 337 void amdgpu_vm_manager_fini(struct amdgpu_device *adev); 338 339 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); 340 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 341 int vm_context, unsigned int pasid); 342 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid); 343 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); 344 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 345 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 346 struct list_head *validated, 347 struct amdgpu_bo_list_entry *entry); 348 bool amdgpu_vm_ready(struct amdgpu_vm *vm); 349 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, 350 int (*callback)(void *p, struct amdgpu_bo *bo), 351 void *param); 352 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 353 int amdgpu_vm_update_directories(struct amdgpu_device *adev, 354 struct amdgpu_vm *vm); 355 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 356 struct amdgpu_vm *vm, 357 struct dma_fence **fence); 358 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 359 struct amdgpu_vm *vm); 360 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 361 struct amdgpu_bo_va *bo_va, 362 bool clear); 363 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 364 struct amdgpu_bo *bo, bool evicted); 365 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 366 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 367 struct amdgpu_bo *bo); 368 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 369 struct amdgpu_vm *vm, 370 struct amdgpu_bo *bo); 371 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 372 struct amdgpu_bo_va *bo_va, 373 uint64_t addr, uint64_t offset, 374 uint64_t size, uint64_t flags); 375 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 376 struct amdgpu_bo_va *bo_va, 377 uint64_t addr, uint64_t offset, 378 uint64_t size, uint64_t flags); 379 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 380 struct amdgpu_bo_va *bo_va, 381 uint64_t addr); 382 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 383 struct amdgpu_vm *vm, 384 uint64_t saddr, uint64_t size); 385 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 386 uint64_t addr); 387 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); 388 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 389 struct amdgpu_bo_va *bo_va); 390 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 391 uint32_t fragment_size_default, unsigned max_level, 392 unsigned max_bits); 393 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 394 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 395 struct amdgpu_job *job); 396 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); 397 398 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, 399 struct amdgpu_task_info *task_info); 400 401 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); 402 403 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 404 struct amdgpu_vm *vm); 405 void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo); 406 407 #endif 408