1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_xgmi.h"
43 #include "amdgpu_dma_buf.h"
44 #include "amdgpu_res_cursor.h"
45 #include "kfd_svm.h"
46 
47 /**
48  * DOC: GPUVM
49  *
50  * GPUVM is the MMU functionality provided on the GPU.
51  * GPUVM is similar to the legacy GART on older asics, however
52  * rather than there being a single global GART table
53  * for the entire GPU, there can be multiple GPUVM page tables active
54  * at any given time.  The GPUVM page tables can contain a mix
55  * VRAM pages and system pages (both memory and MMIO) and system pages
56  * can be mapped as snooped (cached system pages) or unsnooped
57  * (uncached system pages).
58  *
59  * Each active GPUVM has an ID associated with it and there is a page table
60  * linked with each VMID.  When executing a command buffer,
61  * the kernel tells the engine what VMID to use for that command
62  * buffer.  VMIDs are allocated dynamically as commands are submitted.
63  * The userspace drivers maintain their own address space and the kernel
64  * sets up their pages tables accordingly when they submit their
65  * command buffers and a VMID is assigned.
66  * The hardware supports up to 16 active GPUVMs at any given time.
67  *
68  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
69  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
70  * as other features such as encryption and caching attributes.
71  *
72  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
73  * addition to an aperture managed by a page table, VMID 0 also has
74  * several other apertures.  There is an aperture for direct access to VRAM
75  * and there is a legacy AGP aperture which just forwards accesses directly
76  * to the matching system physical addresses (or IOVAs when an IOMMU is
77  * present).  These apertures provide direct access to these memories without
78  * incurring the overhead of a page table.  VMID 0 is used by the kernel
79  * driver for tasks like memory management.
80  *
81  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
82  * For user applications, each application can have their own unique GPUVM
83  * address space.  The application manages the address space and the kernel
84  * driver manages the GPUVM page tables for each process.  If an GPU client
85  * accesses an invalid page, it will generate a GPU page fault, similar to
86  * accessing an invalid page on a CPU.
87  */
88 
89 #define START(node) ((node)->start)
90 #define LAST(node) ((node)->last)
91 
92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
93 		     START, LAST, static, amdgpu_vm_it)
94 
95 #undef START
96 #undef LAST
97 
98 /**
99  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
100  */
101 struct amdgpu_prt_cb {
102 
103 	/**
104 	 * @adev: amdgpu device
105 	 */
106 	struct amdgpu_device *adev;
107 
108 	/**
109 	 * @cb: callback
110 	 */
111 	struct dma_fence_cb cb;
112 };
113 
114 /**
115  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
116  */
117 struct amdgpu_vm_tlb_seq_struct {
118 	/**
119 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
120 	 */
121 	struct amdgpu_vm *vm;
122 
123 	/**
124 	 * @cb: callback
125 	 */
126 	struct dma_fence_cb cb;
127 };
128 
129 /**
130  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
131  *
132  * @adev: amdgpu_device pointer
133  * @vm: amdgpu_vm pointer
134  * @pasid: the pasid the VM is using on this GPU
135  *
136  * Set the pasid this VM is using on this GPU, can also be used to remove the
137  * pasid by passing in zero.
138  *
139  */
amdgpu_vm_set_pasid(struct amdgpu_device * adev,struct amdgpu_vm * vm,u32 pasid)140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
141 			u32 pasid)
142 {
143 	int r;
144 
145 	if (vm->pasid == pasid)
146 		return 0;
147 
148 	if (vm->pasid) {
149 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
150 		if (r < 0)
151 			return r;
152 
153 		vm->pasid = 0;
154 	}
155 
156 	if (pasid) {
157 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
158 					GFP_KERNEL));
159 		if (r < 0)
160 			return r;
161 
162 		vm->pasid = pasid;
163 	}
164 
165 
166 	return 0;
167 }
168 
169 /**
170  * amdgpu_vm_bo_evicted - vm_bo is evicted
171  *
172  * @vm_bo: vm_bo which is evicted
173  *
174  * State for PDs/PTs and per VM BOs which are not at the location they should
175  * be.
176  */
amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base * vm_bo)177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
178 {
179 	struct amdgpu_vm *vm = vm_bo->vm;
180 	struct amdgpu_bo *bo = vm_bo->bo;
181 
182 	vm_bo->moved = true;
183 	spin_lock(&vm_bo->vm->status_lock);
184 	if (bo->tbo.type == ttm_bo_type_kernel)
185 		list_move(&vm_bo->vm_status, &vm->evicted);
186 	else
187 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
188 	spin_unlock(&vm_bo->vm->status_lock);
189 }
190 /**
191  * amdgpu_vm_bo_moved - vm_bo is moved
192  *
193  * @vm_bo: vm_bo which is moved
194  *
195  * State for per VM BOs which are moved, but that change is not yet reflected
196  * in the page tables.
197  */
amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base * vm_bo)198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
199 {
200 	spin_lock(&vm_bo->vm->status_lock);
201 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
202 	spin_unlock(&vm_bo->vm->status_lock);
203 }
204 
205 /**
206  * amdgpu_vm_bo_idle - vm_bo is idle
207  *
208  * @vm_bo: vm_bo which is now idle
209  *
210  * State for PDs/PTs and per VM BOs which have gone through the state machine
211  * and are now idle.
212  */
amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base * vm_bo)213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
214 {
215 	spin_lock(&vm_bo->vm->status_lock);
216 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
217 	spin_unlock(&vm_bo->vm->status_lock);
218 	vm_bo->moved = false;
219 }
220 
221 /**
222  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
223  *
224  * @vm_bo: vm_bo which is now invalidated
225  *
226  * State for normal BOs which are invalidated and that change not yet reflected
227  * in the PTs.
228  */
amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base * vm_bo)229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
230 {
231 	spin_lock(&vm_bo->vm->status_lock);
232 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
233 	spin_unlock(&vm_bo->vm->status_lock);
234 }
235 
236 /**
237  * amdgpu_vm_bo_relocated - vm_bo is reloacted
238  *
239  * @vm_bo: vm_bo which is relocated
240  *
241  * State for PDs/PTs which needs to update their parent PD.
242  * For the root PD, just move to idle state.
243  */
amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base * vm_bo)244 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
245 {
246 	if (vm_bo->bo->parent) {
247 		spin_lock(&vm_bo->vm->status_lock);
248 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
249 		spin_unlock(&vm_bo->vm->status_lock);
250 	} else {
251 		amdgpu_vm_bo_idle(vm_bo);
252 	}
253 }
254 
255 /**
256  * amdgpu_vm_bo_done - vm_bo is done
257  *
258  * @vm_bo: vm_bo which is now done
259  *
260  * State for normal BOs which are invalidated and that change has been updated
261  * in the PTs.
262  */
amdgpu_vm_bo_done(struct amdgpu_vm_bo_base * vm_bo)263 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
264 {
265 	spin_lock(&vm_bo->vm->status_lock);
266 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
267 	spin_unlock(&vm_bo->vm->status_lock);
268 }
269 
270 /**
271  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
272  * @vm: the VM which state machine to reset
273  *
274  * Move all vm_bo object in the VM into a state where they will be updated
275  * again during validation.
276  */
amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm * vm)277 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
278 {
279 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
280 
281 	spin_lock(&vm->status_lock);
282 	list_splice_init(&vm->done, &vm->invalidated);
283 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
284 		vm_bo->moved = true;
285 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
286 		struct amdgpu_bo *bo = vm_bo->bo;
287 
288 		vm_bo->moved = true;
289 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
290 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
291 		else if (bo->parent)
292 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
293 	}
294 	spin_unlock(&vm->status_lock);
295 }
296 
297 /**
298  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
299  *
300  * @base: base structure for tracking BO usage in a VM
301  * @vm: vm to which bo is to be added
302  * @bo: amdgpu buffer object
303  *
304  * Initialize a bo_va_base structure and add it to the appropriate lists
305  *
306  */
amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base * base,struct amdgpu_vm * vm,struct amdgpu_bo * bo)307 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
308 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
309 {
310 	base->vm = vm;
311 	base->bo = bo;
312 	base->next = NULL;
313 	INIT_LIST_HEAD(&base->vm_status);
314 
315 	if (!bo)
316 		return;
317 	base->next = bo->vm_bo;
318 	bo->vm_bo = base;
319 
320 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
321 		return;
322 
323 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
324 
325 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
326 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
327 		amdgpu_vm_bo_relocated(base);
328 	else
329 		amdgpu_vm_bo_idle(base);
330 
331 	if (bo->preferred_domains &
332 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
333 		return;
334 
335 	/*
336 	 * we checked all the prerequisites, but it looks like this per vm bo
337 	 * is currently evicted. add the bo to the evicted list to make sure it
338 	 * is validated on next vm use to avoid fault.
339 	 * */
340 	amdgpu_vm_bo_evicted(base);
341 }
342 
343 /**
344  * amdgpu_vm_lock_pd - lock PD in drm_exec
345  *
346  * @vm: vm providing the BOs
347  * @exec: drm execution context
348  * @num_fences: number of extra fences to reserve
349  *
350  * Lock the VM root PD in the DRM execution context.
351  */
amdgpu_vm_lock_pd(struct amdgpu_vm * vm,struct drm_exec * exec,unsigned int num_fences)352 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
353 		      unsigned int num_fences)
354 {
355 	/* We need at least two fences for the VM PD/PT updates */
356 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
357 				    2 + num_fences);
358 }
359 
360 /**
361  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
362  *
363  * @adev: amdgpu device pointer
364  * @vm: vm providing the BOs
365  *
366  * Move all BOs to the end of LRU and remember their positions to put them
367  * together.
368  */
amdgpu_vm_move_to_lru_tail(struct amdgpu_device * adev,struct amdgpu_vm * vm)369 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
370 				struct amdgpu_vm *vm)
371 {
372 	spin_lock(&adev->mman.bdev.lru_lock);
373 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
374 	spin_unlock(&adev->mman.bdev.lru_lock);
375 }
376 
377 /* Create scheduler entities for page table updates */
amdgpu_vm_init_entities(struct amdgpu_device * adev,struct amdgpu_vm * vm)378 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
379 				   struct amdgpu_vm *vm)
380 {
381 	int r;
382 
383 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
384 				  adev->vm_manager.vm_pte_scheds,
385 				  adev->vm_manager.vm_pte_num_scheds, NULL);
386 	if (r)
387 		goto error;
388 
389 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
390 				     adev->vm_manager.vm_pte_scheds,
391 				     adev->vm_manager.vm_pte_num_scheds, NULL);
392 
393 error:
394 	drm_sched_entity_destroy(&vm->immediate);
395 	return r;
396 }
397 
398 /* Destroy the entities for page table updates again */
amdgpu_vm_fini_entities(struct amdgpu_vm * vm)399 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
400 {
401 	drm_sched_entity_destroy(&vm->immediate);
402 	drm_sched_entity_destroy(&vm->delayed);
403 }
404 
405 /**
406  * amdgpu_vm_generation - return the page table re-generation counter
407  * @adev: the amdgpu_device
408  * @vm: optional VM to check, might be NULL
409  *
410  * Returns a page table re-generation token to allow checking if submissions
411  * are still valid to use this VM. The VM parameter might be NULL in which case
412  * just the VRAM lost counter will be used.
413  */
amdgpu_vm_generation(struct amdgpu_device * adev,struct amdgpu_vm * vm)414 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
415 {
416 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
417 
418 	if (!vm)
419 		return result;
420 
421 	result += lower_32_bits(vm->generation);
422 	/* Add one if the page tables will be re-generated on next CS */
423 	if (drm_sched_entity_error(&vm->delayed))
424 		++result;
425 
426 	return result;
427 }
428 
429 /**
430  * amdgpu_vm_validate_pt_bos - validate the page table BOs
431  *
432  * @adev: amdgpu device pointer
433  * @vm: vm providing the BOs
434  * @validate: callback to do the validation
435  * @param: parameter for the validation callback
436  *
437  * Validate the page table BOs on command submission if neccessary.
438  *
439  * Returns:
440  * Validation result.
441  */
amdgpu_vm_validate_pt_bos(struct amdgpu_device * adev,struct amdgpu_vm * vm,int (* validate)(void * p,struct amdgpu_bo * bo),void * param)442 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
443 			      int (*validate)(void *p, struct amdgpu_bo *bo),
444 			      void *param)
445 {
446 	uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
447 	struct amdgpu_vm_bo_base *bo_base;
448 	struct amdgpu_bo *shadow;
449 	struct amdgpu_bo *bo;
450 	int r;
451 
452 	if (vm->generation != new_vm_generation) {
453 		vm->generation = new_vm_generation;
454 		amdgpu_vm_bo_reset_state_machine(vm);
455 		amdgpu_vm_fini_entities(vm);
456 		r = amdgpu_vm_init_entities(adev, vm);
457 		if (r)
458 			return r;
459 	}
460 
461 	spin_lock(&vm->status_lock);
462 	while (!list_empty(&vm->evicted)) {
463 		bo_base = list_first_entry(&vm->evicted,
464 					   struct amdgpu_vm_bo_base,
465 					   vm_status);
466 		spin_unlock(&vm->status_lock);
467 
468 		bo = bo_base->bo;
469 		shadow = amdgpu_bo_shadowed(bo);
470 
471 		r = validate(param, bo);
472 		if (r)
473 			return r;
474 		if (shadow) {
475 			r = validate(param, shadow);
476 			if (r)
477 				return r;
478 		}
479 
480 		if (bo->tbo.type != ttm_bo_type_kernel) {
481 			amdgpu_vm_bo_moved(bo_base);
482 		} else {
483 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
484 			amdgpu_vm_bo_relocated(bo_base);
485 		}
486 		spin_lock(&vm->status_lock);
487 	}
488 	spin_unlock(&vm->status_lock);
489 
490 	amdgpu_vm_eviction_lock(vm);
491 	vm->evicting = false;
492 	amdgpu_vm_eviction_unlock(vm);
493 
494 	return 0;
495 }
496 
497 /**
498  * amdgpu_vm_ready - check VM is ready for updates
499  *
500  * @vm: VM to check
501  *
502  * Check if all VM PDs/PTs are ready for updates
503  *
504  * Returns:
505  * True if VM is not evicting.
506  */
amdgpu_vm_ready(struct amdgpu_vm * vm)507 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
508 {
509 	bool empty;
510 	bool ret;
511 
512 	amdgpu_vm_eviction_lock(vm);
513 	ret = !vm->evicting;
514 	amdgpu_vm_eviction_unlock(vm);
515 
516 	spin_lock(&vm->status_lock);
517 	empty = list_empty(&vm->evicted);
518 	spin_unlock(&vm->status_lock);
519 
520 	return ret && empty;
521 }
522 
523 /**
524  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
525  *
526  * @adev: amdgpu_device pointer
527  */
amdgpu_vm_check_compute_bug(struct amdgpu_device * adev)528 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
529 {
530 	const struct amdgpu_ip_block *ip_block;
531 	bool has_compute_vm_bug;
532 	struct amdgpu_ring *ring;
533 	int i;
534 
535 	has_compute_vm_bug = false;
536 
537 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
538 	if (ip_block) {
539 		/* Compute has a VM bug for GFX version < 7.
540 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
541 		if (ip_block->version->major <= 7)
542 			has_compute_vm_bug = true;
543 		else if (ip_block->version->major == 8)
544 			if (adev->gfx.mec_fw_version < 673)
545 				has_compute_vm_bug = true;
546 	}
547 
548 	for (i = 0; i < adev->num_rings; i++) {
549 		ring = adev->rings[i];
550 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
551 			/* only compute rings */
552 			ring->has_compute_vm_bug = has_compute_vm_bug;
553 		else
554 			ring->has_compute_vm_bug = false;
555 	}
556 }
557 
558 /**
559  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
560  *
561  * @ring: ring on which the job will be submitted
562  * @job: job to submit
563  *
564  * Returns:
565  * True if sync is needed.
566  */
amdgpu_vm_need_pipeline_sync(struct amdgpu_ring * ring,struct amdgpu_job * job)567 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
568 				  struct amdgpu_job *job)
569 {
570 	struct amdgpu_device *adev = ring->adev;
571 	unsigned vmhub = ring->vm_hub;
572 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
573 
574 	if (job->vmid == 0)
575 		return false;
576 
577 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
578 		return true;
579 
580 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
581 		return true;
582 
583 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
584 		return true;
585 
586 	return false;
587 }
588 
589 /**
590  * amdgpu_vm_flush - hardware flush the vm
591  *
592  * @ring: ring to use for flush
593  * @job:  related job
594  * @need_pipe_sync: is pipe sync needed
595  *
596  * Emit a VM flush when it is necessary.
597  *
598  * Returns:
599  * 0 on success, errno otherwise.
600  */
amdgpu_vm_flush(struct amdgpu_ring * ring,struct amdgpu_job * job,bool need_pipe_sync)601 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
602 		    bool need_pipe_sync)
603 {
604 	struct amdgpu_device *adev = ring->adev;
605 	unsigned vmhub = ring->vm_hub;
606 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
607 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
608 	bool spm_update_needed = job->spm_update_needed;
609 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
610 		job->gds_switch_needed;
611 	bool vm_flush_needed = job->vm_needs_flush;
612 	struct dma_fence *fence = NULL;
613 	bool pasid_mapping_needed = false;
614 	unsigned patch_offset = 0;
615 	int r;
616 
617 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
618 		gds_switch_needed = true;
619 		vm_flush_needed = true;
620 		pasid_mapping_needed = true;
621 		spm_update_needed = true;
622 	}
623 
624 	mutex_lock(&id_mgr->lock);
625 	if (id->pasid != job->pasid || !id->pasid_mapping ||
626 	    !dma_fence_is_signaled(id->pasid_mapping))
627 		pasid_mapping_needed = true;
628 	mutex_unlock(&id_mgr->lock);
629 
630 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
631 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
632 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
633 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
634 		ring->funcs->emit_wreg;
635 
636 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
637 		return 0;
638 
639 	amdgpu_ring_ib_begin(ring);
640 	if (ring->funcs->init_cond_exec)
641 		patch_offset = amdgpu_ring_init_cond_exec(ring);
642 
643 	if (need_pipe_sync)
644 		amdgpu_ring_emit_pipeline_sync(ring);
645 
646 	if (vm_flush_needed) {
647 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
648 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
649 	}
650 
651 	if (pasid_mapping_needed)
652 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
653 
654 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
655 		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
656 
657 	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
658 	    gds_switch_needed) {
659 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
660 					    job->gds_size, job->gws_base,
661 					    job->gws_size, job->oa_base,
662 					    job->oa_size);
663 	}
664 
665 	if (vm_flush_needed || pasid_mapping_needed) {
666 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
667 		if (r)
668 			return r;
669 	}
670 
671 	if (vm_flush_needed) {
672 		mutex_lock(&id_mgr->lock);
673 		dma_fence_put(id->last_flush);
674 		id->last_flush = dma_fence_get(fence);
675 		id->current_gpu_reset_count =
676 			atomic_read(&adev->gpu_reset_counter);
677 		mutex_unlock(&id_mgr->lock);
678 	}
679 
680 	if (pasid_mapping_needed) {
681 		mutex_lock(&id_mgr->lock);
682 		id->pasid = job->pasid;
683 		dma_fence_put(id->pasid_mapping);
684 		id->pasid_mapping = dma_fence_get(fence);
685 		mutex_unlock(&id_mgr->lock);
686 	}
687 	dma_fence_put(fence);
688 
689 	if (ring->funcs->patch_cond_exec)
690 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
691 
692 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
693 	if (ring->funcs->emit_switch_buffer) {
694 		amdgpu_ring_emit_switch_buffer(ring);
695 		amdgpu_ring_emit_switch_buffer(ring);
696 	}
697 	amdgpu_ring_ib_end(ring);
698 	return 0;
699 }
700 
701 /**
702  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
703  *
704  * @vm: requested vm
705  * @bo: requested buffer object
706  *
707  * Find @bo inside the requested vm.
708  * Search inside the @bos vm list for the requested vm
709  * Returns the found bo_va or NULL if none is found
710  *
711  * Object has to be reserved!
712  *
713  * Returns:
714  * Found bo_va or NULL.
715  */
amdgpu_vm_bo_find(struct amdgpu_vm * vm,struct amdgpu_bo * bo)716 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
717 				       struct amdgpu_bo *bo)
718 {
719 	struct amdgpu_vm_bo_base *base;
720 
721 	for (base = bo->vm_bo; base; base = base->next) {
722 		if (base->vm != vm)
723 			continue;
724 
725 		return container_of(base, struct amdgpu_bo_va, base);
726 	}
727 	return NULL;
728 }
729 
730 /**
731  * amdgpu_vm_map_gart - Resolve gart mapping of addr
732  *
733  * @pages_addr: optional DMA address to use for lookup
734  * @addr: the unmapped addr
735  *
736  * Look up the physical address of the page that the pte resolves
737  * to.
738  *
739  * Returns:
740  * The pointer for the page table entry.
741  */
amdgpu_vm_map_gart(const dma_addr_t * pages_addr,uint64_t addr)742 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
743 {
744 	uint64_t result;
745 
746 	/* page table offset */
747 	result = pages_addr[addr >> PAGE_SHIFT];
748 
749 	/* in case cpu page size != gpu page size*/
750 	result |= addr & (~PAGE_MASK);
751 
752 	result &= 0xFFFFFFFFFFFFF000ULL;
753 
754 	return result;
755 }
756 
757 /**
758  * amdgpu_vm_update_pdes - make sure that all directories are valid
759  *
760  * @adev: amdgpu_device pointer
761  * @vm: requested vm
762  * @immediate: submit immediately to the paging queue
763  *
764  * Makes sure all directories are up to date.
765  *
766  * Returns:
767  * 0 for success, error for failure.
768  */
amdgpu_vm_update_pdes(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate)769 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
770 			  struct amdgpu_vm *vm, bool immediate)
771 {
772 	struct amdgpu_vm_update_params params;
773 	struct amdgpu_vm_bo_base *entry;
774 	bool flush_tlb_needed = false;
775 	LIST_HEAD(relocated);
776 	int r, idx;
777 
778 	spin_lock(&vm->status_lock);
779 	list_splice_init(&vm->relocated, &relocated);
780 	spin_unlock(&vm->status_lock);
781 
782 	if (list_empty(&relocated))
783 		return 0;
784 
785 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
786 		return -ENODEV;
787 
788 	memset(&params, 0, sizeof(params));
789 	params.adev = adev;
790 	params.vm = vm;
791 	params.immediate = immediate;
792 
793 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
794 	if (r)
795 		goto error;
796 
797 	list_for_each_entry(entry, &relocated, vm_status) {
798 		/* vm_flush_needed after updating moved PDEs */
799 		flush_tlb_needed |= entry->moved;
800 
801 		r = amdgpu_vm_pde_update(&params, entry);
802 		if (r)
803 			goto error;
804 	}
805 
806 	r = vm->update_funcs->commit(&params, &vm->last_update);
807 	if (r)
808 		goto error;
809 
810 	if (flush_tlb_needed)
811 		atomic64_inc(&vm->tlb_seq);
812 
813 	while (!list_empty(&relocated)) {
814 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
815 					 vm_status);
816 		amdgpu_vm_bo_idle(entry);
817 	}
818 
819 error:
820 	drm_dev_exit(idx);
821 	return r;
822 }
823 
824 /**
825  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
826  * @fence: unused
827  * @cb: the callback structure
828  *
829  * Increments the tlb sequence to make sure that future CS execute a VM flush.
830  */
amdgpu_vm_tlb_seq_cb(struct dma_fence * fence,struct dma_fence_cb * cb)831 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
832 				 struct dma_fence_cb *cb)
833 {
834 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
835 
836 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
837 	atomic64_inc(&tlb_cb->vm->tlb_seq);
838 	kfree(tlb_cb);
839 }
840 
841 /**
842  * amdgpu_vm_update_range - update a range in the vm page table
843  *
844  * @adev: amdgpu_device pointer to use for commands
845  * @vm: the VM to update the range
846  * @immediate: immediate submission in a page fault
847  * @unlocked: unlocked invalidation during MM callback
848  * @flush_tlb: trigger tlb invalidation after update completed
849  * @resv: fences we need to sync to
850  * @start: start of mapped range
851  * @last: last mapped entry
852  * @flags: flags for the entries
853  * @offset: offset into nodes and pages_addr
854  * @vram_base: base for vram mappings
855  * @res: ttm_resource to map
856  * @pages_addr: DMA addresses to use for mapping
857  * @fence: optional resulting fence
858  *
859  * Fill in the page table entries between @start and @last.
860  *
861  * Returns:
862  * 0 for success, negative erro code for failure.
863  */
amdgpu_vm_update_range(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate,bool unlocked,bool flush_tlb,struct dma_resv * resv,uint64_t start,uint64_t last,uint64_t flags,uint64_t offset,uint64_t vram_base,struct ttm_resource * res,dma_addr_t * pages_addr,struct dma_fence ** fence)864 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
865 			   bool immediate, bool unlocked, bool flush_tlb,
866 			   struct dma_resv *resv, uint64_t start, uint64_t last,
867 			   uint64_t flags, uint64_t offset, uint64_t vram_base,
868 			   struct ttm_resource *res, dma_addr_t *pages_addr,
869 			   struct dma_fence **fence)
870 {
871 	struct amdgpu_vm_update_params params;
872 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
873 	struct amdgpu_res_cursor cursor;
874 	enum amdgpu_sync_mode sync_mode;
875 	int r, idx;
876 
877 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
878 		return -ENODEV;
879 
880 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
881 	if (!tlb_cb) {
882 		r = -ENOMEM;
883 		goto error_unlock;
884 	}
885 
886 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
887 	 * heavy-weight flush TLB unconditionally.
888 	 */
889 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
890 		     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
891 
892 	/*
893 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
894 	 */
895 	flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
896 
897 	memset(&params, 0, sizeof(params));
898 	params.adev = adev;
899 	params.vm = vm;
900 	params.immediate = immediate;
901 	params.pages_addr = pages_addr;
902 	params.unlocked = unlocked;
903 
904 	/* Implicitly sync to command submissions in the same VM before
905 	 * unmapping. Sync to moving fences before mapping.
906 	 */
907 	if (!(flags & AMDGPU_PTE_VALID))
908 		sync_mode = AMDGPU_SYNC_EQ_OWNER;
909 	else
910 		sync_mode = AMDGPU_SYNC_EXPLICIT;
911 
912 	amdgpu_vm_eviction_lock(vm);
913 	if (vm->evicting) {
914 		r = -EBUSY;
915 		goto error_free;
916 	}
917 
918 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
919 		struct dma_fence *tmp = dma_fence_get_stub();
920 
921 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
922 		swap(vm->last_unlocked, tmp);
923 		dma_fence_put(tmp);
924 	}
925 
926 	r = vm->update_funcs->prepare(&params, resv, sync_mode);
927 	if (r)
928 		goto error_free;
929 
930 	amdgpu_res_first(pages_addr ? NULL : res, offset,
931 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
932 	while (cursor.remaining) {
933 		uint64_t tmp, num_entries, addr;
934 
935 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
936 		if (pages_addr) {
937 			bool contiguous = true;
938 
939 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
940 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
941 				uint64_t count;
942 
943 				contiguous = pages_addr[pfn + 1] ==
944 					pages_addr[pfn] + PAGE_SIZE;
945 
946 				tmp = num_entries /
947 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
948 				for (count = 2; count < tmp; ++count) {
949 					uint64_t idx = pfn + count;
950 
951 					if (contiguous != (pages_addr[idx] ==
952 					    pages_addr[idx - 1] + PAGE_SIZE))
953 						break;
954 				}
955 				if (!contiguous)
956 					count--;
957 				num_entries = count *
958 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
959 			}
960 
961 			if (!contiguous) {
962 				addr = cursor.start;
963 				params.pages_addr = pages_addr;
964 			} else {
965 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
966 				params.pages_addr = NULL;
967 			}
968 
969 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
970 			addr = vram_base + cursor.start;
971 		} else {
972 			addr = 0;
973 		}
974 
975 		tmp = start + num_entries;
976 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
977 		if (r)
978 			goto error_free;
979 
980 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
981 		start = tmp;
982 	}
983 
984 	r = vm->update_funcs->commit(&params, fence);
985 
986 	if (flush_tlb || params.table_freed) {
987 		tlb_cb->vm = vm;
988 		if (fence && *fence &&
989 		    !dma_fence_add_callback(*fence, &tlb_cb->cb,
990 					   amdgpu_vm_tlb_seq_cb)) {
991 			dma_fence_put(vm->last_tlb_flush);
992 			vm->last_tlb_flush = dma_fence_get(*fence);
993 		} else {
994 			amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
995 		}
996 		tlb_cb = NULL;
997 	}
998 
999 error_free:
1000 	kfree(tlb_cb);
1001 
1002 error_unlock:
1003 	amdgpu_vm_eviction_unlock(vm);
1004 	drm_dev_exit(idx);
1005 	return r;
1006 }
1007 
amdgpu_vm_bo_get_memory(struct amdgpu_bo_va * bo_va,struct amdgpu_mem_stats * stats)1008 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1009 				    struct amdgpu_mem_stats *stats)
1010 {
1011 	struct amdgpu_vm *vm = bo_va->base.vm;
1012 	struct amdgpu_bo *bo = bo_va->base.bo;
1013 
1014 	if (!bo)
1015 		return;
1016 
1017 	/*
1018 	 * For now ignore BOs which are currently locked and potentially
1019 	 * changing their location.
1020 	 */
1021 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
1022 	    !dma_resv_trylock(bo->tbo.base.resv))
1023 		return;
1024 
1025 	amdgpu_bo_get_memory(bo, stats);
1026 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
1027 	    dma_resv_unlock(bo->tbo.base.resv);
1028 }
1029 
amdgpu_vm_get_memory(struct amdgpu_vm * vm,struct amdgpu_mem_stats * stats)1030 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1031 			  struct amdgpu_mem_stats *stats)
1032 {
1033 	struct amdgpu_bo_va *bo_va, *tmp;
1034 
1035 	spin_lock(&vm->status_lock);
1036 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1037 		amdgpu_vm_bo_get_memory(bo_va, stats);
1038 
1039 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1040 		amdgpu_vm_bo_get_memory(bo_va, stats);
1041 
1042 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1043 		amdgpu_vm_bo_get_memory(bo_va, stats);
1044 
1045 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1046 		amdgpu_vm_bo_get_memory(bo_va, stats);
1047 
1048 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1049 		amdgpu_vm_bo_get_memory(bo_va, stats);
1050 
1051 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1052 		amdgpu_vm_bo_get_memory(bo_va, stats);
1053 	spin_unlock(&vm->status_lock);
1054 }
1055 
1056 /**
1057  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1058  *
1059  * @adev: amdgpu_device pointer
1060  * @bo_va: requested BO and VM object
1061  * @clear: if true clear the entries
1062  *
1063  * Fill in the page table entries for @bo_va.
1064  *
1065  * Returns:
1066  * 0 for success, -EINVAL for failure.
1067  */
amdgpu_vm_bo_update(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,bool clear)1068 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1069 			bool clear)
1070 {
1071 	struct amdgpu_bo *bo = bo_va->base.bo;
1072 	struct amdgpu_vm *vm = bo_va->base.vm;
1073 	struct amdgpu_bo_va_mapping *mapping;
1074 	dma_addr_t *pages_addr = NULL;
1075 	struct ttm_resource *mem;
1076 	struct dma_fence **last_update;
1077 	bool flush_tlb = clear;
1078 	struct dma_resv *resv;
1079 	uint64_t vram_base;
1080 	uint64_t flags;
1081 	int r;
1082 
1083 	if (clear || !bo) {
1084 		mem = NULL;
1085 		resv = vm->root.bo->tbo.base.resv;
1086 	} else {
1087 		struct drm_gem_object *obj = &bo->tbo.base;
1088 
1089 		resv = bo->tbo.base.resv;
1090 		if (obj->import_attach && bo_va->is_xgmi) {
1091 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1092 			struct drm_gem_object *gobj = dma_buf->priv;
1093 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1094 
1095 			if (abo->tbo.resource &&
1096 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1097 				bo = gem_to_amdgpu_bo(gobj);
1098 		}
1099 		mem = bo->tbo.resource;
1100 		if (mem && (mem->mem_type == TTM_PL_TT ||
1101 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1102 			pages_addr = bo->tbo.ttm->dma_address;
1103 	}
1104 
1105 	if (bo) {
1106 		struct amdgpu_device *bo_adev;
1107 
1108 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1109 
1110 		if (amdgpu_bo_encrypted(bo))
1111 			flags |= AMDGPU_PTE_TMZ;
1112 
1113 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1114 		vram_base = bo_adev->vm_manager.vram_base_offset;
1115 	} else {
1116 		flags = 0x0;
1117 		vram_base = 0;
1118 	}
1119 
1120 	if (clear || (bo && bo->tbo.base.resv ==
1121 		      vm->root.bo->tbo.base.resv))
1122 		last_update = &vm->last_update;
1123 	else
1124 		last_update = &bo_va->last_pt_update;
1125 
1126 	if (!clear && bo_va->base.moved) {
1127 		flush_tlb = true;
1128 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1129 
1130 	} else if (bo_va->cleared != clear) {
1131 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1132 	}
1133 
1134 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1135 		uint64_t update_flags = flags;
1136 
1137 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1138 		 * but in case of something, we filter the flags in first place
1139 		 */
1140 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1141 			update_flags &= ~AMDGPU_PTE_READABLE;
1142 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1143 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1144 
1145 		/* Apply ASIC specific mapping flags */
1146 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1147 
1148 		trace_amdgpu_vm_bo_update(mapping);
1149 
1150 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1151 					   resv, mapping->start, mapping->last,
1152 					   update_flags, mapping->offset,
1153 					   vram_base, mem, pages_addr,
1154 					   last_update);
1155 		if (r)
1156 			return r;
1157 	}
1158 
1159 	/* If the BO is not in its preferred location add it back to
1160 	 * the evicted list so that it gets validated again on the
1161 	 * next command submission.
1162 	 */
1163 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1164 		uint32_t mem_type = bo->tbo.resource->mem_type;
1165 
1166 		if (!(bo->preferred_domains &
1167 		      amdgpu_mem_type_to_domain(mem_type)))
1168 			amdgpu_vm_bo_evicted(&bo_va->base);
1169 		else
1170 			amdgpu_vm_bo_idle(&bo_va->base);
1171 	} else {
1172 		amdgpu_vm_bo_done(&bo_va->base);
1173 	}
1174 
1175 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1176 	bo_va->cleared = clear;
1177 	bo_va->base.moved = false;
1178 
1179 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1180 		list_for_each_entry(mapping, &bo_va->valids, list)
1181 			trace_amdgpu_vm_bo_mapping(mapping);
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 /**
1188  * amdgpu_vm_update_prt_state - update the global PRT state
1189  *
1190  * @adev: amdgpu_device pointer
1191  */
amdgpu_vm_update_prt_state(struct amdgpu_device * adev)1192 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1193 {
1194 	unsigned long flags;
1195 	bool enable;
1196 
1197 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1198 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1199 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1200 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1201 }
1202 
1203 /**
1204  * amdgpu_vm_prt_get - add a PRT user
1205  *
1206  * @adev: amdgpu_device pointer
1207  */
amdgpu_vm_prt_get(struct amdgpu_device * adev)1208 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1209 {
1210 	if (!adev->gmc.gmc_funcs->set_prt)
1211 		return;
1212 
1213 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1214 		amdgpu_vm_update_prt_state(adev);
1215 }
1216 
1217 /**
1218  * amdgpu_vm_prt_put - drop a PRT user
1219  *
1220  * @adev: amdgpu_device pointer
1221  */
amdgpu_vm_prt_put(struct amdgpu_device * adev)1222 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1223 {
1224 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1225 		amdgpu_vm_update_prt_state(adev);
1226 }
1227 
1228 /**
1229  * amdgpu_vm_prt_cb - callback for updating the PRT status
1230  *
1231  * @fence: fence for the callback
1232  * @_cb: the callback function
1233  */
amdgpu_vm_prt_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)1234 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1235 {
1236 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1237 
1238 	amdgpu_vm_prt_put(cb->adev);
1239 	kfree(cb);
1240 }
1241 
1242 /**
1243  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1244  *
1245  * @adev: amdgpu_device pointer
1246  * @fence: fence for the callback
1247  */
amdgpu_vm_add_prt_cb(struct amdgpu_device * adev,struct dma_fence * fence)1248 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1249 				 struct dma_fence *fence)
1250 {
1251 	struct amdgpu_prt_cb *cb;
1252 
1253 	if (!adev->gmc.gmc_funcs->set_prt)
1254 		return;
1255 
1256 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1257 	if (!cb) {
1258 		/* Last resort when we are OOM */
1259 		if (fence)
1260 			dma_fence_wait(fence, false);
1261 
1262 		amdgpu_vm_prt_put(adev);
1263 	} else {
1264 		cb->adev = adev;
1265 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1266 						     amdgpu_vm_prt_cb))
1267 			amdgpu_vm_prt_cb(fence, &cb->cb);
1268 	}
1269 }
1270 
1271 /**
1272  * amdgpu_vm_free_mapping - free a mapping
1273  *
1274  * @adev: amdgpu_device pointer
1275  * @vm: requested vm
1276  * @mapping: mapping to be freed
1277  * @fence: fence of the unmap operation
1278  *
1279  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1280  */
amdgpu_vm_free_mapping(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,struct dma_fence * fence)1281 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1282 				   struct amdgpu_vm *vm,
1283 				   struct amdgpu_bo_va_mapping *mapping,
1284 				   struct dma_fence *fence)
1285 {
1286 	if (mapping->flags & AMDGPU_PTE_PRT)
1287 		amdgpu_vm_add_prt_cb(adev, fence);
1288 	kfree(mapping);
1289 }
1290 
1291 /**
1292  * amdgpu_vm_prt_fini - finish all prt mappings
1293  *
1294  * @adev: amdgpu_device pointer
1295  * @vm: requested vm
1296  *
1297  * Register a cleanup callback to disable PRT support after VM dies.
1298  */
amdgpu_vm_prt_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)1299 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1300 {
1301 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1302 	struct dma_resv_iter cursor;
1303 	struct dma_fence *fence;
1304 
1305 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1306 		/* Add a callback for each fence in the reservation object */
1307 		amdgpu_vm_prt_get(adev);
1308 		amdgpu_vm_add_prt_cb(adev, fence);
1309 	}
1310 }
1311 
1312 /**
1313  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1314  *
1315  * @adev: amdgpu_device pointer
1316  * @vm: requested vm
1317  * @fence: optional resulting fence (unchanged if no work needed to be done
1318  * or if an error occurred)
1319  *
1320  * Make sure all freed BOs are cleared in the PT.
1321  * PTs have to be reserved and mutex must be locked!
1322  *
1323  * Returns:
1324  * 0 for success.
1325  *
1326  */
amdgpu_vm_clear_freed(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct dma_fence ** fence)1327 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1328 			  struct amdgpu_vm *vm,
1329 			  struct dma_fence **fence)
1330 {
1331 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1332 	struct amdgpu_bo_va_mapping *mapping;
1333 	uint64_t init_pte_value = 0;
1334 	struct dma_fence *f = NULL;
1335 	int r;
1336 
1337 	while (!list_empty(&vm->freed)) {
1338 		mapping = list_first_entry(&vm->freed,
1339 			struct amdgpu_bo_va_mapping, list);
1340 		list_del(&mapping->list);
1341 
1342 		if (vm->pte_support_ats &&
1343 		    mapping->start < AMDGPU_GMC_HOLE_START)
1344 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1345 
1346 		r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
1347 					   mapping->start, mapping->last,
1348 					   init_pte_value, 0, 0, NULL, NULL,
1349 					   &f);
1350 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1351 		if (r) {
1352 			dma_fence_put(f);
1353 			return r;
1354 		}
1355 	}
1356 
1357 	if (fence && f) {
1358 		dma_fence_put(*fence);
1359 		*fence = f;
1360 	} else {
1361 		dma_fence_put(f);
1362 	}
1363 
1364 	return 0;
1365 
1366 }
1367 
1368 /**
1369  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1370  *
1371  * @adev: amdgpu_device pointer
1372  * @vm: requested vm
1373  *
1374  * Make sure all BOs which are moved are updated in the PTs.
1375  *
1376  * Returns:
1377  * 0 for success.
1378  *
1379  * PTs have to be reserved!
1380  */
amdgpu_vm_handle_moved(struct amdgpu_device * adev,struct amdgpu_vm * vm)1381 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1382 			   struct amdgpu_vm *vm)
1383 {
1384 	struct amdgpu_bo_va *bo_va;
1385 	struct dma_resv *resv;
1386 	bool clear;
1387 	int r;
1388 
1389 	spin_lock(&vm->status_lock);
1390 	while (!list_empty(&vm->moved)) {
1391 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1392 					 base.vm_status);
1393 		spin_unlock(&vm->status_lock);
1394 
1395 		/* Per VM BOs never need to bo cleared in the page tables */
1396 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1397 		if (r)
1398 			return r;
1399 		spin_lock(&vm->status_lock);
1400 	}
1401 
1402 	while (!list_empty(&vm->invalidated)) {
1403 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1404 					 base.vm_status);
1405 		resv = bo_va->base.bo->tbo.base.resv;
1406 		spin_unlock(&vm->status_lock);
1407 
1408 		/* Try to reserve the BO to avoid clearing its ptes */
1409 		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
1410 			clear = false;
1411 		/* Somebody else is using the BO right now */
1412 		else
1413 			clear = true;
1414 
1415 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1416 		if (r)
1417 			return r;
1418 
1419 		if (!clear)
1420 			dma_resv_unlock(resv);
1421 		spin_lock(&vm->status_lock);
1422 	}
1423 	spin_unlock(&vm->status_lock);
1424 
1425 	return 0;
1426 }
1427 
1428 /**
1429  * amdgpu_vm_bo_add - add a bo to a specific vm
1430  *
1431  * @adev: amdgpu_device pointer
1432  * @vm: requested vm
1433  * @bo: amdgpu buffer object
1434  *
1435  * Add @bo into the requested vm.
1436  * Add @bo to the list of bos associated with the vm
1437  *
1438  * Returns:
1439  * Newly added bo_va or NULL for failure
1440  *
1441  * Object has to be reserved!
1442  */
amdgpu_vm_bo_add(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)1443 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1444 				      struct amdgpu_vm *vm,
1445 				      struct amdgpu_bo *bo)
1446 {
1447 	struct amdgpu_bo_va *bo_va;
1448 
1449 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1450 	if (bo_va == NULL) {
1451 		return NULL;
1452 	}
1453 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1454 
1455 	bo_va->ref_count = 1;
1456 	bo_va->last_pt_update = dma_fence_get_stub();
1457 	INIT_LIST_HEAD(&bo_va->valids);
1458 	INIT_LIST_HEAD(&bo_va->invalids);
1459 
1460 	if (!bo)
1461 		return bo_va;
1462 
1463 	dma_resv_assert_held(bo->tbo.base.resv);
1464 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1465 		bo_va->is_xgmi = true;
1466 		/* Power up XGMI if it can be potentially used */
1467 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1468 	}
1469 
1470 	return bo_va;
1471 }
1472 
1473 
1474 /**
1475  * amdgpu_vm_bo_insert_map - insert a new mapping
1476  *
1477  * @adev: amdgpu_device pointer
1478  * @bo_va: bo_va to store the address
1479  * @mapping: the mapping to insert
1480  *
1481  * Insert a new mapping into all structures.
1482  */
amdgpu_vm_bo_insert_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,struct amdgpu_bo_va_mapping * mapping)1483 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1484 				    struct amdgpu_bo_va *bo_va,
1485 				    struct amdgpu_bo_va_mapping *mapping)
1486 {
1487 	struct amdgpu_vm *vm = bo_va->base.vm;
1488 	struct amdgpu_bo *bo = bo_va->base.bo;
1489 
1490 	mapping->bo_va = bo_va;
1491 	list_add(&mapping->list, &bo_va->invalids);
1492 	amdgpu_vm_it_insert(mapping, &vm->va);
1493 
1494 	if (mapping->flags & AMDGPU_PTE_PRT)
1495 		amdgpu_vm_prt_get(adev);
1496 
1497 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1498 	    !bo_va->base.moved) {
1499 		amdgpu_vm_bo_moved(&bo_va->base);
1500 	}
1501 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1502 }
1503 
1504 /* Validate operation parameters to prevent potential abuse */
amdgpu_vm_verify_parameters(struct amdgpu_device * adev,struct amdgpu_bo * bo,uint64_t saddr,uint64_t offset,uint64_t size)1505 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1506 					  struct amdgpu_bo *bo,
1507 					  uint64_t saddr,
1508 					  uint64_t offset,
1509 					  uint64_t size)
1510 {
1511 	uint64_t tmp, lpfn;
1512 
1513 	if (saddr & AMDGPU_GPU_PAGE_MASK
1514 	    || offset & AMDGPU_GPU_PAGE_MASK
1515 	    || size & AMDGPU_GPU_PAGE_MASK)
1516 		return -EINVAL;
1517 
1518 	if (check_add_overflow(saddr, size, &tmp)
1519 	    || check_add_overflow(offset, size, &tmp)
1520 	    || size == 0 /* which also leads to end < begin */)
1521 		return -EINVAL;
1522 
1523 	/* make sure object fit at this offset */
1524 	if (bo && offset + size > amdgpu_bo_size(bo))
1525 		return -EINVAL;
1526 
1527 	/* Ensure last pfn not exceed max_pfn */
1528 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1529 	if (lpfn >= adev->vm_manager.max_pfn)
1530 		return -EINVAL;
1531 
1532 	return 0;
1533 }
1534 
1535 /**
1536  * amdgpu_vm_bo_map - map bo inside a vm
1537  *
1538  * @adev: amdgpu_device pointer
1539  * @bo_va: bo_va to store the address
1540  * @saddr: where to map the BO
1541  * @offset: requested offset in the BO
1542  * @size: BO size in bytes
1543  * @flags: attributes of pages (read/write/valid/etc.)
1544  *
1545  * Add a mapping of the BO at the specefied addr into the VM.
1546  *
1547  * Returns:
1548  * 0 for success, error for failure.
1549  *
1550  * Object has to be reserved and unreserved outside!
1551  */
amdgpu_vm_bo_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)1552 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1553 		     struct amdgpu_bo_va *bo_va,
1554 		     uint64_t saddr, uint64_t offset,
1555 		     uint64_t size, uint64_t flags)
1556 {
1557 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1558 	struct amdgpu_bo *bo = bo_va->base.bo;
1559 	struct amdgpu_vm *vm = bo_va->base.vm;
1560 	uint64_t eaddr;
1561 	int r;
1562 
1563 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1564 	if (r)
1565 		return r;
1566 
1567 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1568 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1569 
1570 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1571 	if (tmp) {
1572 		/* bo and tmp overlap, invalid addr */
1573 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1574 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1575 			tmp->start, tmp->last + 1);
1576 		return -EINVAL;
1577 	}
1578 
1579 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1580 	if (!mapping)
1581 		return -ENOMEM;
1582 
1583 	mapping->start = saddr;
1584 	mapping->last = eaddr;
1585 	mapping->offset = offset;
1586 	mapping->flags = flags;
1587 
1588 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1589 
1590 	return 0;
1591 }
1592 
1593 /**
1594  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1595  *
1596  * @adev: amdgpu_device pointer
1597  * @bo_va: bo_va to store the address
1598  * @saddr: where to map the BO
1599  * @offset: requested offset in the BO
1600  * @size: BO size in bytes
1601  * @flags: attributes of pages (read/write/valid/etc.)
1602  *
1603  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1604  * mappings as we do so.
1605  *
1606  * Returns:
1607  * 0 for success, error for failure.
1608  *
1609  * Object has to be reserved and unreserved outside!
1610  */
amdgpu_vm_bo_replace_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)1611 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1612 			     struct amdgpu_bo_va *bo_va,
1613 			     uint64_t saddr, uint64_t offset,
1614 			     uint64_t size, uint64_t flags)
1615 {
1616 	struct amdgpu_bo_va_mapping *mapping;
1617 	struct amdgpu_bo *bo = bo_va->base.bo;
1618 	uint64_t eaddr;
1619 	int r;
1620 
1621 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1622 	if (r)
1623 		return r;
1624 
1625 	/* Allocate all the needed memory */
1626 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1627 	if (!mapping)
1628 		return -ENOMEM;
1629 
1630 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1631 	if (r) {
1632 		kfree(mapping);
1633 		return r;
1634 	}
1635 
1636 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1637 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1638 
1639 	mapping->start = saddr;
1640 	mapping->last = eaddr;
1641 	mapping->offset = offset;
1642 	mapping->flags = flags;
1643 
1644 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1645 
1646 	return 0;
1647 }
1648 
1649 /**
1650  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1651  *
1652  * @adev: amdgpu_device pointer
1653  * @bo_va: bo_va to remove the address from
1654  * @saddr: where to the BO is mapped
1655  *
1656  * Remove a mapping of the BO at the specefied addr from the VM.
1657  *
1658  * Returns:
1659  * 0 for success, error for failure.
1660  *
1661  * Object has to be reserved and unreserved outside!
1662  */
amdgpu_vm_bo_unmap(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr)1663 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1664 		       struct amdgpu_bo_va *bo_va,
1665 		       uint64_t saddr)
1666 {
1667 	struct amdgpu_bo_va_mapping *mapping;
1668 	struct amdgpu_vm *vm = bo_va->base.vm;
1669 	bool valid = true;
1670 
1671 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1672 
1673 	list_for_each_entry(mapping, &bo_va->valids, list) {
1674 		if (mapping->start == saddr)
1675 			break;
1676 	}
1677 
1678 	if (&mapping->list == &bo_va->valids) {
1679 		valid = false;
1680 
1681 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1682 			if (mapping->start == saddr)
1683 				break;
1684 		}
1685 
1686 		if (&mapping->list == &bo_va->invalids)
1687 			return -ENOENT;
1688 	}
1689 
1690 	list_del(&mapping->list);
1691 	amdgpu_vm_it_remove(mapping, &vm->va);
1692 	mapping->bo_va = NULL;
1693 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1694 
1695 	if (valid)
1696 		list_add(&mapping->list, &vm->freed);
1697 	else
1698 		amdgpu_vm_free_mapping(adev, vm, mapping,
1699 				       bo_va->last_pt_update);
1700 
1701 	return 0;
1702 }
1703 
1704 /**
1705  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1706  *
1707  * @adev: amdgpu_device pointer
1708  * @vm: VM structure to use
1709  * @saddr: start of the range
1710  * @size: size of the range
1711  *
1712  * Remove all mappings in a range, split them as appropriate.
1713  *
1714  * Returns:
1715  * 0 for success, error for failure.
1716  */
amdgpu_vm_bo_clear_mappings(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t saddr,uint64_t size)1717 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1718 				struct amdgpu_vm *vm,
1719 				uint64_t saddr, uint64_t size)
1720 {
1721 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1722 	LIST_HEAD(removed);
1723 	uint64_t eaddr;
1724 	int r;
1725 
1726 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1727 	if (r)
1728 		return r;
1729 
1730 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1731 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1732 
1733 	/* Allocate all the needed memory */
1734 	before = kzalloc(sizeof(*before), GFP_KERNEL);
1735 	if (!before)
1736 		return -ENOMEM;
1737 	INIT_LIST_HEAD(&before->list);
1738 
1739 	after = kzalloc(sizeof(*after), GFP_KERNEL);
1740 	if (!after) {
1741 		kfree(before);
1742 		return -ENOMEM;
1743 	}
1744 	INIT_LIST_HEAD(&after->list);
1745 
1746 	/* Now gather all removed mappings */
1747 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1748 	while (tmp) {
1749 		/* Remember mapping split at the start */
1750 		if (tmp->start < saddr) {
1751 			before->start = tmp->start;
1752 			before->last = saddr - 1;
1753 			before->offset = tmp->offset;
1754 			before->flags = tmp->flags;
1755 			before->bo_va = tmp->bo_va;
1756 			list_add(&before->list, &tmp->bo_va->invalids);
1757 		}
1758 
1759 		/* Remember mapping split at the end */
1760 		if (tmp->last > eaddr) {
1761 			after->start = eaddr + 1;
1762 			after->last = tmp->last;
1763 			after->offset = tmp->offset;
1764 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1765 			after->flags = tmp->flags;
1766 			after->bo_va = tmp->bo_va;
1767 			list_add(&after->list, &tmp->bo_va->invalids);
1768 		}
1769 
1770 		list_del(&tmp->list);
1771 		list_add(&tmp->list, &removed);
1772 
1773 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1774 	}
1775 
1776 	/* And free them up */
1777 	list_for_each_entry_safe(tmp, next, &removed, list) {
1778 		amdgpu_vm_it_remove(tmp, &vm->va);
1779 		list_del(&tmp->list);
1780 
1781 		if (tmp->start < saddr)
1782 		    tmp->start = saddr;
1783 		if (tmp->last > eaddr)
1784 		    tmp->last = eaddr;
1785 
1786 		tmp->bo_va = NULL;
1787 		list_add(&tmp->list, &vm->freed);
1788 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1789 	}
1790 
1791 	/* Insert partial mapping before the range */
1792 	if (!list_empty(&before->list)) {
1793 		struct amdgpu_bo *bo = before->bo_va->base.bo;
1794 
1795 		amdgpu_vm_it_insert(before, &vm->va);
1796 		if (before->flags & AMDGPU_PTE_PRT)
1797 			amdgpu_vm_prt_get(adev);
1798 
1799 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1800 		    !before->bo_va->base.moved)
1801 			amdgpu_vm_bo_moved(&before->bo_va->base);
1802 	} else {
1803 		kfree(before);
1804 	}
1805 
1806 	/* Insert partial mapping after the range */
1807 	if (!list_empty(&after->list)) {
1808 		struct amdgpu_bo *bo = after->bo_va->base.bo;
1809 
1810 		amdgpu_vm_it_insert(after, &vm->va);
1811 		if (after->flags & AMDGPU_PTE_PRT)
1812 			amdgpu_vm_prt_get(adev);
1813 
1814 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1815 		    !after->bo_va->base.moved)
1816 			amdgpu_vm_bo_moved(&after->bo_va->base);
1817 	} else {
1818 		kfree(after);
1819 	}
1820 
1821 	return 0;
1822 }
1823 
1824 /**
1825  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1826  *
1827  * @vm: the requested VM
1828  * @addr: the address
1829  *
1830  * Find a mapping by it's address.
1831  *
1832  * Returns:
1833  * The amdgpu_bo_va_mapping matching for addr or NULL
1834  *
1835  */
amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm * vm,uint64_t addr)1836 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1837 							 uint64_t addr)
1838 {
1839 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1840 }
1841 
1842 /**
1843  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1844  *
1845  * @vm: the requested vm
1846  * @ticket: CS ticket
1847  *
1848  * Trace all mappings of BOs reserved during a command submission.
1849  */
amdgpu_vm_bo_trace_cs(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)1850 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1851 {
1852 	struct amdgpu_bo_va_mapping *mapping;
1853 
1854 	if (!trace_amdgpu_vm_bo_cs_enabled())
1855 		return;
1856 
1857 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
1858 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
1859 		if (mapping->bo_va && mapping->bo_va->base.bo) {
1860 			struct amdgpu_bo *bo;
1861 
1862 			bo = mapping->bo_va->base.bo;
1863 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
1864 			    ticket)
1865 				continue;
1866 		}
1867 
1868 		trace_amdgpu_vm_bo_cs(mapping);
1869 	}
1870 }
1871 
1872 /**
1873  * amdgpu_vm_bo_del - remove a bo from a specific vm
1874  *
1875  * @adev: amdgpu_device pointer
1876  * @bo_va: requested bo_va
1877  *
1878  * Remove @bo_va->bo from the requested vm.
1879  *
1880  * Object have to be reserved!
1881  */
amdgpu_vm_bo_del(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va)1882 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
1883 		      struct amdgpu_bo_va *bo_va)
1884 {
1885 	struct amdgpu_bo_va_mapping *mapping, *next;
1886 	struct amdgpu_bo *bo = bo_va->base.bo;
1887 	struct amdgpu_vm *vm = bo_va->base.vm;
1888 	struct amdgpu_vm_bo_base **base;
1889 
1890 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
1891 
1892 	if (bo) {
1893 		dma_resv_assert_held(bo->tbo.base.resv);
1894 		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1895 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
1896 
1897 		for (base = &bo_va->base.bo->vm_bo; *base;
1898 		     base = &(*base)->next) {
1899 			if (*base != &bo_va->base)
1900 				continue;
1901 
1902 			*base = bo_va->base.next;
1903 			break;
1904 		}
1905 	}
1906 
1907 	spin_lock(&vm->status_lock);
1908 	list_del(&bo_va->base.vm_status);
1909 	spin_unlock(&vm->status_lock);
1910 
1911 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1912 		list_del(&mapping->list);
1913 		amdgpu_vm_it_remove(mapping, &vm->va);
1914 		mapping->bo_va = NULL;
1915 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1916 		list_add(&mapping->list, &vm->freed);
1917 	}
1918 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1919 		list_del(&mapping->list);
1920 		amdgpu_vm_it_remove(mapping, &vm->va);
1921 		amdgpu_vm_free_mapping(adev, vm, mapping,
1922 				       bo_va->last_pt_update);
1923 	}
1924 
1925 	dma_fence_put(bo_va->last_pt_update);
1926 
1927 	if (bo && bo_va->is_xgmi)
1928 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
1929 
1930 	kfree(bo_va);
1931 }
1932 
1933 /**
1934  * amdgpu_vm_evictable - check if we can evict a VM
1935  *
1936  * @bo: A page table of the VM.
1937  *
1938  * Check if it is possible to evict a VM.
1939  */
amdgpu_vm_evictable(struct amdgpu_bo * bo)1940 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
1941 {
1942 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
1943 
1944 	/* Page tables of a destroyed VM can go away immediately */
1945 	if (!bo_base || !bo_base->vm)
1946 		return true;
1947 
1948 	/* Don't evict VM page tables while they are busy */
1949 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
1950 		return false;
1951 
1952 	/* Try to block ongoing updates */
1953 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
1954 		return false;
1955 
1956 	/* Don't evict VM page tables while they are updated */
1957 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
1958 		amdgpu_vm_eviction_unlock(bo_base->vm);
1959 		return false;
1960 	}
1961 
1962 	bo_base->vm->evicting = true;
1963 	amdgpu_vm_eviction_unlock(bo_base->vm);
1964 	return true;
1965 }
1966 
1967 /**
1968  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1969  *
1970  * @adev: amdgpu_device pointer
1971  * @bo: amdgpu buffer object
1972  * @evicted: is the BO evicted
1973  *
1974  * Mark @bo as invalid.
1975  */
amdgpu_vm_bo_invalidate(struct amdgpu_device * adev,struct amdgpu_bo * bo,bool evicted)1976 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1977 			     struct amdgpu_bo *bo, bool evicted)
1978 {
1979 	struct amdgpu_vm_bo_base *bo_base;
1980 
1981 	/* shadow bo doesn't have bo base, its validation needs its parent */
1982 	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
1983 		bo = bo->parent;
1984 
1985 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
1986 		struct amdgpu_vm *vm = bo_base->vm;
1987 
1988 		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1989 			amdgpu_vm_bo_evicted(bo_base);
1990 			continue;
1991 		}
1992 
1993 		if (bo_base->moved)
1994 			continue;
1995 		bo_base->moved = true;
1996 
1997 		if (bo->tbo.type == ttm_bo_type_kernel)
1998 			amdgpu_vm_bo_relocated(bo_base);
1999 		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
2000 			amdgpu_vm_bo_moved(bo_base);
2001 		else
2002 			amdgpu_vm_bo_invalidated(bo_base);
2003 	}
2004 }
2005 
2006 /**
2007  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2008  *
2009  * @vm_size: VM size
2010  *
2011  * Returns:
2012  * VM page table as power of two
2013  */
amdgpu_vm_get_block_size(uint64_t vm_size)2014 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2015 {
2016 	/* Total bits covered by PD + PTs */
2017 	unsigned bits = ilog2(vm_size) + 18;
2018 
2019 	/* Make sure the PD is 4K in size up to 8GB address space.
2020 	   Above that split equal between PD and PTs */
2021 	if (vm_size <= 8)
2022 		return (bits - 9);
2023 	else
2024 		return ((bits + 3) / 2);
2025 }
2026 
2027 /**
2028  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2029  *
2030  * @adev: amdgpu_device pointer
2031  * @min_vm_size: the minimum vm size in GB if it's set auto
2032  * @fragment_size_default: Default PTE fragment size
2033  * @max_level: max VMPT level
2034  * @max_bits: max address space size in bits
2035  *
2036  */
amdgpu_vm_adjust_size(struct amdgpu_device * adev,uint32_t min_vm_size,uint32_t fragment_size_default,unsigned max_level,unsigned max_bits)2037 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2038 			   uint32_t fragment_size_default, unsigned max_level,
2039 			   unsigned max_bits)
2040 {
2041 	unsigned int max_size = 1 << (max_bits - 30);
2042 	unsigned int vm_size;
2043 	uint64_t tmp;
2044 
2045 	/* adjust vm size first */
2046 	if (amdgpu_vm_size != -1) {
2047 		vm_size = amdgpu_vm_size;
2048 		if (vm_size > max_size) {
2049 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2050 				 amdgpu_vm_size, max_size);
2051 			vm_size = max_size;
2052 		}
2053 	} else {
2054 		struct sysinfo si;
2055 		unsigned int phys_ram_gb;
2056 
2057 		/* Optimal VM size depends on the amount of physical
2058 		 * RAM available. Underlying requirements and
2059 		 * assumptions:
2060 		 *
2061 		 *  - Need to map system memory and VRAM from all GPUs
2062 		 *     - VRAM from other GPUs not known here
2063 		 *     - Assume VRAM <= system memory
2064 		 *  - On GFX8 and older, VM space can be segmented for
2065 		 *    different MTYPEs
2066 		 *  - Need to allow room for fragmentation, guard pages etc.
2067 		 *
2068 		 * This adds up to a rough guess of system memory x3.
2069 		 * Round up to power of two to maximize the available
2070 		 * VM size with the given page table size.
2071 		 */
2072 		si_meminfo(&si);
2073 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2074 			       (1 << 30) - 1) >> 30;
2075 		vm_size = roundup_pow_of_two(
2076 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2077 	}
2078 
2079 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2080 
2081 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2082 	if (amdgpu_vm_block_size != -1)
2083 		tmp >>= amdgpu_vm_block_size - 9;
2084 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2085 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2086 	switch (adev->vm_manager.num_level) {
2087 	case 3:
2088 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2089 		break;
2090 	case 2:
2091 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2092 		break;
2093 	case 1:
2094 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2095 		break;
2096 	default:
2097 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2098 	}
2099 	/* block size depends on vm size and hw setup*/
2100 	if (amdgpu_vm_block_size != -1)
2101 		adev->vm_manager.block_size =
2102 			min((unsigned)amdgpu_vm_block_size, max_bits
2103 			    - AMDGPU_GPU_PAGE_SHIFT
2104 			    - 9 * adev->vm_manager.num_level);
2105 	else if (adev->vm_manager.num_level > 1)
2106 		adev->vm_manager.block_size = 9;
2107 	else
2108 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2109 
2110 	if (amdgpu_vm_fragment_size == -1)
2111 		adev->vm_manager.fragment_size = fragment_size_default;
2112 	else
2113 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2114 
2115 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2116 		 vm_size, adev->vm_manager.num_level + 1,
2117 		 adev->vm_manager.block_size,
2118 		 adev->vm_manager.fragment_size);
2119 }
2120 
2121 /**
2122  * amdgpu_vm_wait_idle - wait for the VM to become idle
2123  *
2124  * @vm: VM object to wait for
2125  * @timeout: timeout to wait for VM to become idle
2126  */
amdgpu_vm_wait_idle(struct amdgpu_vm * vm,long timeout)2127 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2128 {
2129 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2130 					DMA_RESV_USAGE_BOOKKEEP,
2131 					true, timeout);
2132 	if (timeout <= 0)
2133 		return timeout;
2134 
2135 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2136 }
2137 
2138 /**
2139  * amdgpu_vm_init - initialize a vm instance
2140  *
2141  * @adev: amdgpu_device pointer
2142  * @vm: requested vm
2143  * @xcp_id: GPU partition selection id
2144  *
2145  * Init @vm fields.
2146  *
2147  * Returns:
2148  * 0 for success, error for failure.
2149  */
amdgpu_vm_init(struct amdgpu_device * adev,struct amdgpu_vm * vm,int32_t xcp_id)2150 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2151 		   int32_t xcp_id)
2152 {
2153 	struct amdgpu_bo *root_bo;
2154 	struct amdgpu_bo_vm *root;
2155 	int r, i;
2156 
2157 	vm->va = RB_ROOT_CACHED;
2158 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2159 		vm->reserved_vmid[i] = NULL;
2160 	INIT_LIST_HEAD(&vm->evicted);
2161 	INIT_LIST_HEAD(&vm->relocated);
2162 	INIT_LIST_HEAD(&vm->moved);
2163 	INIT_LIST_HEAD(&vm->idle);
2164 	INIT_LIST_HEAD(&vm->invalidated);
2165 	spin_lock_init(&vm->status_lock);
2166 	INIT_LIST_HEAD(&vm->freed);
2167 	INIT_LIST_HEAD(&vm->done);
2168 	INIT_LIST_HEAD(&vm->pt_freed);
2169 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2170 	INIT_KFIFO(vm->faults);
2171 
2172 	r = amdgpu_vm_init_entities(adev, vm);
2173 	if (r)
2174 		return r;
2175 
2176 	vm->pte_support_ats = false;
2177 	vm->is_compute_context = false;
2178 
2179 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2180 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2181 
2182 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2183 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2184 	WARN_ONCE((vm->use_cpu_for_update &&
2185 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2186 		  "CPU update of VM recommended only for large BAR system\n");
2187 
2188 	if (vm->use_cpu_for_update)
2189 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2190 	else
2191 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2192 
2193 	vm->last_update = dma_fence_get_stub();
2194 	vm->last_unlocked = dma_fence_get_stub();
2195 	vm->last_tlb_flush = dma_fence_get_stub();
2196 	vm->generation = amdgpu_vm_generation(adev, NULL);
2197 
2198 	mutex_init(&vm->eviction_lock);
2199 	vm->evicting = false;
2200 
2201 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2202 				false, &root, xcp_id);
2203 	if (r)
2204 		goto error_free_delayed;
2205 
2206 	root_bo = amdgpu_bo_ref(&root->bo);
2207 	r = amdgpu_bo_reserve(root_bo, true);
2208 	if (r) {
2209 		amdgpu_bo_unref(&root->shadow);
2210 		amdgpu_bo_unref(&root_bo);
2211 		goto error_free_delayed;
2212 	}
2213 
2214 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2215 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2216 	if (r)
2217 		goto error_free_root;
2218 
2219 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2220 	if (r)
2221 		goto error_free_root;
2222 
2223 	amdgpu_bo_unreserve(vm->root.bo);
2224 	amdgpu_bo_unref(&root_bo);
2225 
2226 	return 0;
2227 
2228 error_free_root:
2229 	amdgpu_vm_pt_free_root(adev, vm);
2230 	amdgpu_bo_unreserve(vm->root.bo);
2231 	amdgpu_bo_unref(&root_bo);
2232 
2233 error_free_delayed:
2234 	dma_fence_put(vm->last_tlb_flush);
2235 	dma_fence_put(vm->last_unlocked);
2236 	amdgpu_vm_fini_entities(vm);
2237 
2238 	return r;
2239 }
2240 
2241 /**
2242  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2243  *
2244  * @adev: amdgpu_device pointer
2245  * @vm: requested vm
2246  *
2247  * This only works on GFX VMs that don't have any BOs added and no
2248  * page tables allocated yet.
2249  *
2250  * Changes the following VM parameters:
2251  * - use_cpu_for_update
2252  * - pte_supports_ats
2253  *
2254  * Reinitializes the page directory to reflect the changed ATS
2255  * setting.
2256  *
2257  * Returns:
2258  * 0 for success, -errno for errors.
2259  */
amdgpu_vm_make_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2260 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2261 {
2262 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2263 	int r;
2264 
2265 	r = amdgpu_bo_reserve(vm->root.bo, true);
2266 	if (r)
2267 		return r;
2268 
2269 	/* Check if PD needs to be reinitialized and do it before
2270 	 * changing any other state, in case it fails.
2271 	 */
2272 	if (pte_support_ats != vm->pte_support_ats) {
2273 		/* Sanity checks */
2274 		if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
2275 			r = -EINVAL;
2276 			goto unreserve_bo;
2277 		}
2278 
2279 		vm->pte_support_ats = pte_support_ats;
2280 		r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
2281 				       false);
2282 		if (r)
2283 			goto unreserve_bo;
2284 	}
2285 
2286 	/* Update VM state */
2287 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2288 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2289 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2290 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2291 	WARN_ONCE((vm->use_cpu_for_update &&
2292 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2293 		  "CPU update of VM recommended only for large BAR system\n");
2294 
2295 	if (vm->use_cpu_for_update) {
2296 		/* Sync with last SDMA update/clear before switching to CPU */
2297 		r = amdgpu_bo_sync_wait(vm->root.bo,
2298 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2299 		if (r)
2300 			goto unreserve_bo;
2301 
2302 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2303 		r = amdgpu_vm_pt_map_tables(adev, vm);
2304 		if (r)
2305 			goto unreserve_bo;
2306 
2307 	} else {
2308 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2309 	}
2310 
2311 	dma_fence_put(vm->last_update);
2312 	vm->last_update = dma_fence_get_stub();
2313 	vm->is_compute_context = true;
2314 
2315 	/* Free the shadow bo for compute VM */
2316 	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2317 
2318 	goto unreserve_bo;
2319 
2320 unreserve_bo:
2321 	amdgpu_bo_unreserve(vm->root.bo);
2322 	return r;
2323 }
2324 
2325 /**
2326  * amdgpu_vm_release_compute - release a compute vm
2327  * @adev: amdgpu_device pointer
2328  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2329  *
2330  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2331  * pasid from vm. Compute should stop use of vm after this call.
2332  */
amdgpu_vm_release_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2333 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2334 {
2335 	amdgpu_vm_set_pasid(adev, vm, 0);
2336 	vm->is_compute_context = false;
2337 }
2338 
2339 /**
2340  * amdgpu_vm_fini - tear down a vm instance
2341  *
2342  * @adev: amdgpu_device pointer
2343  * @vm: requested vm
2344  *
2345  * Tear down @vm.
2346  * Unbind the VM and remove all bos from the vm bo list
2347  */
amdgpu_vm_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)2348 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2349 {
2350 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2351 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2352 	struct amdgpu_bo *root;
2353 	unsigned long flags;
2354 	int i;
2355 
2356 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2357 
2358 	flush_work(&vm->pt_free_work);
2359 
2360 	root = amdgpu_bo_ref(vm->root.bo);
2361 	amdgpu_bo_reserve(root, true);
2362 	amdgpu_vm_set_pasid(adev, vm, 0);
2363 	dma_fence_wait(vm->last_unlocked, false);
2364 	dma_fence_put(vm->last_unlocked);
2365 	dma_fence_wait(vm->last_tlb_flush, false);
2366 	/* Make sure that all fence callbacks have completed */
2367 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2368 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2369 	dma_fence_put(vm->last_tlb_flush);
2370 
2371 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2372 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2373 			amdgpu_vm_prt_fini(adev, vm);
2374 			prt_fini_needed = false;
2375 		}
2376 
2377 		list_del(&mapping->list);
2378 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2379 	}
2380 
2381 	amdgpu_vm_pt_free_root(adev, vm);
2382 	amdgpu_bo_unreserve(root);
2383 	amdgpu_bo_unref(&root);
2384 	WARN_ON(vm->root.bo);
2385 
2386 	amdgpu_vm_fini_entities(vm);
2387 
2388 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2389 		dev_err(adev->dev, "still active bo inside vm\n");
2390 	}
2391 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2392 					     &vm->va.rb_root, rb) {
2393 		/* Don't remove the mapping here, we don't want to trigger a
2394 		 * rebalance and the tree is about to be destroyed anyway.
2395 		 */
2396 		list_del(&mapping->list);
2397 		kfree(mapping);
2398 	}
2399 
2400 	dma_fence_put(vm->last_update);
2401 
2402 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2403 		if (vm->reserved_vmid[i]) {
2404 			amdgpu_vmid_free_reserved(adev, i);
2405 			vm->reserved_vmid[i] = false;
2406 		}
2407 	}
2408 
2409 }
2410 
2411 /**
2412  * amdgpu_vm_manager_init - init the VM manager
2413  *
2414  * @adev: amdgpu_device pointer
2415  *
2416  * Initialize the VM manager structures
2417  */
amdgpu_vm_manager_init(struct amdgpu_device * adev)2418 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2419 {
2420 	unsigned i;
2421 
2422 	/* Concurrent flushes are only possible starting with Vega10 and
2423 	 * are broken on Navi10 and Navi14.
2424 	 */
2425 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2426 					      adev->asic_type == CHIP_NAVI10 ||
2427 					      adev->asic_type == CHIP_NAVI14);
2428 	amdgpu_vmid_mgr_init(adev);
2429 
2430 	adev->vm_manager.fence_context =
2431 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2432 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2433 		adev->vm_manager.seqno[i] = 0;
2434 
2435 	spin_lock_init(&adev->vm_manager.prt_lock);
2436 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2437 
2438 	/* If not overridden by the user, by default, only in large BAR systems
2439 	 * Compute VM tables will be updated by CPU
2440 	 */
2441 #ifdef CONFIG_X86_64
2442 	if (amdgpu_vm_update_mode == -1) {
2443 		/* For asic with VF MMIO access protection
2444 		 * avoid using CPU for VM table updates
2445 		 */
2446 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2447 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2448 			adev->vm_manager.vm_update_mode =
2449 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2450 		else
2451 			adev->vm_manager.vm_update_mode = 0;
2452 	} else
2453 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2454 #else
2455 	adev->vm_manager.vm_update_mode = 0;
2456 #endif
2457 
2458 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2459 }
2460 
2461 /**
2462  * amdgpu_vm_manager_fini - cleanup VM manager
2463  *
2464  * @adev: amdgpu_device pointer
2465  *
2466  * Cleanup the VM manager and free resources.
2467  */
amdgpu_vm_manager_fini(struct amdgpu_device * adev)2468 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2469 {
2470 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2471 	xa_destroy(&adev->vm_manager.pasids);
2472 
2473 	amdgpu_vmid_mgr_fini(adev);
2474 }
2475 
2476 /**
2477  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2478  *
2479  * @dev: drm device pointer
2480  * @data: drm_amdgpu_vm
2481  * @filp: drm file pointer
2482  *
2483  * Returns:
2484  * 0 for success, -errno for errors.
2485  */
amdgpu_vm_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)2486 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2487 {
2488 	union drm_amdgpu_vm *args = data;
2489 	struct amdgpu_device *adev = drm_to_adev(dev);
2490 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2491 
2492 	/* No valid flags defined yet */
2493 	if (args->in.flags)
2494 		return -EINVAL;
2495 
2496 	switch (args->in.op) {
2497 	case AMDGPU_VM_OP_RESERVE_VMID:
2498 		/* We only have requirement to reserve vmid from gfxhub */
2499 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2500 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2501 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2502 		}
2503 
2504 		break;
2505 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2506 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2507 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2508 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2509 		}
2510 		break;
2511 	default:
2512 		return -EINVAL;
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 /**
2519  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2520  *
2521  * @adev: drm device pointer
2522  * @pasid: PASID identifier for VM
2523  * @task_info: task_info to fill.
2524  */
amdgpu_vm_get_task_info(struct amdgpu_device * adev,u32 pasid,struct amdgpu_task_info * task_info)2525 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
2526 			 struct amdgpu_task_info *task_info)
2527 {
2528 	struct amdgpu_vm *vm;
2529 	unsigned long flags;
2530 
2531 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2532 
2533 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2534 	if (vm)
2535 		*task_info = vm->task_info;
2536 
2537 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2538 }
2539 
2540 /**
2541  * amdgpu_vm_set_task_info - Sets VMs task info.
2542  *
2543  * @vm: vm for which to set the info
2544  */
amdgpu_vm_set_task_info(struct amdgpu_vm * vm)2545 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2546 {
2547 	if (vm->task_info.pid)
2548 		return;
2549 
2550 	vm->task_info.pid = current->pid;
2551 	get_task_comm(vm->task_info.task_name, current);
2552 
2553 	if (current->group_leader->mm != current->mm)
2554 		return;
2555 
2556 	vm->task_info.tgid = current->group_leader->pid;
2557 	get_task_comm(vm->task_info.process_name, current->group_leader);
2558 }
2559 
2560 /**
2561  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2562  * @adev: amdgpu device pointer
2563  * @pasid: PASID of the VM
2564  * @vmid: VMID, only used for GFX 9.4.3.
2565  * @node_id: Node_id received in IH cookie. Only applicable for
2566  *           GFX 9.4.3.
2567  * @addr: Address of the fault
2568  * @write_fault: true is write fault, false is read fault
2569  *
2570  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2571  * shouldn't be reported any more.
2572  */
amdgpu_vm_handle_fault(struct amdgpu_device * adev,u32 pasid,u32 vmid,u32 node_id,uint64_t addr,bool write_fault)2573 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2574 			    u32 vmid, u32 node_id, uint64_t addr,
2575 			    bool write_fault)
2576 {
2577 	bool is_compute_context = false;
2578 	struct amdgpu_bo *root;
2579 	unsigned long irqflags;
2580 	uint64_t value, flags;
2581 	struct amdgpu_vm *vm;
2582 	int r;
2583 
2584 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2585 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2586 	if (vm) {
2587 		root = amdgpu_bo_ref(vm->root.bo);
2588 		is_compute_context = vm->is_compute_context;
2589 	} else {
2590 		root = NULL;
2591 	}
2592 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2593 
2594 	if (!root)
2595 		return false;
2596 
2597 	addr /= AMDGPU_GPU_PAGE_SIZE;
2598 
2599 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2600 	    node_id, addr, write_fault)) {
2601 		amdgpu_bo_unref(&root);
2602 		return true;
2603 	}
2604 
2605 	r = amdgpu_bo_reserve(root, true);
2606 	if (r)
2607 		goto error_unref;
2608 
2609 	/* Double check that the VM still exists */
2610 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2611 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2612 	if (vm && vm->root.bo != root)
2613 		vm = NULL;
2614 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2615 	if (!vm)
2616 		goto error_unlock;
2617 
2618 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2619 		AMDGPU_PTE_SYSTEM;
2620 
2621 	if (is_compute_context) {
2622 		/* Intentionally setting invalid PTE flag
2623 		 * combination to force a no-retry-fault
2624 		 */
2625 		flags = AMDGPU_VM_NORETRY_FLAGS;
2626 		value = 0;
2627 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2628 		/* Redirect the access to the dummy page */
2629 		value = adev->dummy_page_addr;
2630 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2631 			AMDGPU_PTE_WRITEABLE;
2632 
2633 	} else {
2634 		/* Let the hw retry silently on the PTE */
2635 		value = 0;
2636 	}
2637 
2638 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2639 	if (r) {
2640 		pr_debug("failed %d to reserve fence slot\n", r);
2641 		goto error_unlock;
2642 	}
2643 
2644 	r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
2645 				   addr, flags, value, 0, NULL, NULL, NULL);
2646 	if (r)
2647 		goto error_unlock;
2648 
2649 	r = amdgpu_vm_update_pdes(adev, vm, true);
2650 
2651 error_unlock:
2652 	amdgpu_bo_unreserve(root);
2653 	if (r < 0)
2654 		DRM_ERROR("Can't handle page fault (%d)\n", r);
2655 
2656 error_unref:
2657 	amdgpu_bo_unref(&root);
2658 
2659 	return false;
2660 }
2661 
2662 #if defined(CONFIG_DEBUG_FS)
2663 /**
2664  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2665  *
2666  * @vm: Requested VM for printing BO info
2667  * @m: debugfs file
2668  *
2669  * Print BO information in debugfs file for the VM
2670  */
amdgpu_debugfs_vm_bo_info(struct amdgpu_vm * vm,struct seq_file * m)2671 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2672 {
2673 	struct amdgpu_bo_va *bo_va, *tmp;
2674 	u64 total_idle = 0;
2675 	u64 total_evicted = 0;
2676 	u64 total_relocated = 0;
2677 	u64 total_moved = 0;
2678 	u64 total_invalidated = 0;
2679 	u64 total_done = 0;
2680 	unsigned int total_idle_objs = 0;
2681 	unsigned int total_evicted_objs = 0;
2682 	unsigned int total_relocated_objs = 0;
2683 	unsigned int total_moved_objs = 0;
2684 	unsigned int total_invalidated_objs = 0;
2685 	unsigned int total_done_objs = 0;
2686 	unsigned int id = 0;
2687 
2688 	spin_lock(&vm->status_lock);
2689 	seq_puts(m, "\tIdle BOs:\n");
2690 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2691 		if (!bo_va->base.bo)
2692 			continue;
2693 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2694 	}
2695 	total_idle_objs = id;
2696 	id = 0;
2697 
2698 	seq_puts(m, "\tEvicted BOs:\n");
2699 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2700 		if (!bo_va->base.bo)
2701 			continue;
2702 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2703 	}
2704 	total_evicted_objs = id;
2705 	id = 0;
2706 
2707 	seq_puts(m, "\tRelocated BOs:\n");
2708 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2709 		if (!bo_va->base.bo)
2710 			continue;
2711 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2712 	}
2713 	total_relocated_objs = id;
2714 	id = 0;
2715 
2716 	seq_puts(m, "\tMoved BOs:\n");
2717 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2718 		if (!bo_va->base.bo)
2719 			continue;
2720 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2721 	}
2722 	total_moved_objs = id;
2723 	id = 0;
2724 
2725 	seq_puts(m, "\tInvalidated BOs:\n");
2726 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2727 		if (!bo_va->base.bo)
2728 			continue;
2729 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2730 	}
2731 	total_invalidated_objs = id;
2732 	id = 0;
2733 
2734 	seq_puts(m, "\tDone BOs:\n");
2735 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2736 		if (!bo_va->base.bo)
2737 			continue;
2738 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2739 	}
2740 	spin_unlock(&vm->status_lock);
2741 	total_done_objs = id;
2742 
2743 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2744 		   total_idle_objs);
2745 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2746 		   total_evicted_objs);
2747 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2748 		   total_relocated_objs);
2749 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2750 		   total_moved_objs);
2751 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2752 		   total_invalidated_objs);
2753 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2754 		   total_done_objs);
2755 }
2756 #endif
2757