1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/dma-fence-array.h>
30 #include <linux/interval_tree_generic.h>
31 #include <linux/idr.h>
32 #include <linux/dma-buf.h>
33 
34 #include <drm/amdgpu_drm.h>
35 #include <drm/drm_drv.h>
36 #include <drm/ttm/ttm_tt.h>
37 #include <drm/drm_exec.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 #include "amdgpu_amdkfd.h"
41 #include "amdgpu_gmc.h"
42 #include "amdgpu_xgmi.h"
43 #include "amdgpu_dma_buf.h"
44 #include "amdgpu_res_cursor.h"
45 #include "kfd_svm.h"
46 
47 /**
48  * DOC: GPUVM
49  *
50  * GPUVM is the MMU functionality provided on the GPU.
51  * GPUVM is similar to the legacy GART on older asics, however
52  * rather than there being a single global GART table
53  * for the entire GPU, there can be multiple GPUVM page tables active
54  * at any given time.  The GPUVM page tables can contain a mix
55  * VRAM pages and system pages (both memory and MMIO) and system pages
56  * can be mapped as snooped (cached system pages) or unsnooped
57  * (uncached system pages).
58  *
59  * Each active GPUVM has an ID associated with it and there is a page table
60  * linked with each VMID.  When executing a command buffer,
61  * the kernel tells the engine what VMID to use for that command
62  * buffer.  VMIDs are allocated dynamically as commands are submitted.
63  * The userspace drivers maintain their own address space and the kernel
64  * sets up their pages tables accordingly when they submit their
65  * command buffers and a VMID is assigned.
66  * The hardware supports up to 16 active GPUVMs at any given time.
67  *
68  * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
69  * on the ASIC family.  GPUVM supports RWX attributes on each page as well
70  * as other features such as encryption and caching attributes.
71  *
72  * VMID 0 is special.  It is the GPUVM used for the kernel driver.  In
73  * addition to an aperture managed by a page table, VMID 0 also has
74  * several other apertures.  There is an aperture for direct access to VRAM
75  * and there is a legacy AGP aperture which just forwards accesses directly
76  * to the matching system physical addresses (or IOVAs when an IOMMU is
77  * present).  These apertures provide direct access to these memories without
78  * incurring the overhead of a page table.  VMID 0 is used by the kernel
79  * driver for tasks like memory management.
80  *
81  * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
82  * For user applications, each application can have their own unique GPUVM
83  * address space.  The application manages the address space and the kernel
84  * driver manages the GPUVM page tables for each process.  If an GPU client
85  * accesses an invalid page, it will generate a GPU page fault, similar to
86  * accessing an invalid page on a CPU.
87  */
88 
89 #define START(node) ((node)->start)
90 #define LAST(node) ((node)->last)
91 
92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
93 		     START, LAST, static, amdgpu_vm_it)
94 
95 #undef START
96 #undef LAST
97 
98 /**
99  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
100  */
101 struct amdgpu_prt_cb {
102 
103 	/**
104 	 * @adev: amdgpu device
105 	 */
106 	struct amdgpu_device *adev;
107 
108 	/**
109 	 * @cb: callback
110 	 */
111 	struct dma_fence_cb cb;
112 };
113 
114 /**
115  * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
116  */
117 struct amdgpu_vm_tlb_seq_struct {
118 	/**
119 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
120 	 */
121 	struct amdgpu_vm *vm;
122 
123 	/**
124 	 * @cb: callback
125 	 */
126 	struct dma_fence_cb cb;
127 };
128 
129 /**
130  * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
131  *
132  * @adev: amdgpu_device pointer
133  * @vm: amdgpu_vm pointer
134  * @pasid: the pasid the VM is using on this GPU
135  *
136  * Set the pasid this VM is using on this GPU, can also be used to remove the
137  * pasid by passing in zero.
138  *
139  */
amdgpu_vm_set_pasid(struct amdgpu_device * adev,struct amdgpu_vm * vm,u32 pasid)140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
141 			u32 pasid)
142 {
143 	int r;
144 
145 	if (vm->pasid == pasid)
146 		return 0;
147 
148 	if (vm->pasid) {
149 		r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
150 		if (r < 0)
151 			return r;
152 
153 		vm->pasid = 0;
154 	}
155 
156 	if (pasid) {
157 		r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
158 					GFP_KERNEL));
159 		if (r < 0)
160 			return r;
161 
162 		vm->pasid = pasid;
163 	}
164 
165 
166 	return 0;
167 }
168 
169 /**
170  * amdgpu_vm_bo_evicted - vm_bo is evicted
171  *
172  * @vm_bo: vm_bo which is evicted
173  *
174  * State for PDs/PTs and per VM BOs which are not at the location they should
175  * be.
176  */
amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base * vm_bo)177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
178 {
179 	struct amdgpu_vm *vm = vm_bo->vm;
180 	struct amdgpu_bo *bo = vm_bo->bo;
181 
182 	vm_bo->moved = true;
183 	spin_lock(&vm_bo->vm->status_lock);
184 	if (bo->tbo.type == ttm_bo_type_kernel)
185 		list_move(&vm_bo->vm_status, &vm->evicted);
186 	else
187 		list_move_tail(&vm_bo->vm_status, &vm->evicted);
188 	spin_unlock(&vm_bo->vm->status_lock);
189 }
190 /**
191  * amdgpu_vm_bo_moved - vm_bo is moved
192  *
193  * @vm_bo: vm_bo which is moved
194  *
195  * State for per VM BOs which are moved, but that change is not yet reflected
196  * in the page tables.
197  */
amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base * vm_bo)198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
199 {
200 	spin_lock(&vm_bo->vm->status_lock);
201 	list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
202 	spin_unlock(&vm_bo->vm->status_lock);
203 }
204 
205 /**
206  * amdgpu_vm_bo_idle - vm_bo is idle
207  *
208  * @vm_bo: vm_bo which is now idle
209  *
210  * State for PDs/PTs and per VM BOs which have gone through the state machine
211  * and are now idle.
212  */
amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base * vm_bo)213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
214 {
215 	spin_lock(&vm_bo->vm->status_lock);
216 	list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
217 	spin_unlock(&vm_bo->vm->status_lock);
218 	vm_bo->moved = false;
219 }
220 
221 /**
222  * amdgpu_vm_bo_invalidated - vm_bo is invalidated
223  *
224  * @vm_bo: vm_bo which is now invalidated
225  *
226  * State for normal BOs which are invalidated and that change not yet reflected
227  * in the PTs.
228  */
amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base * vm_bo)229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
230 {
231 	spin_lock(&vm_bo->vm->status_lock);
232 	list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
233 	spin_unlock(&vm_bo->vm->status_lock);
234 }
235 
236 /**
237  * amdgpu_vm_bo_relocated - vm_bo is reloacted
238  *
239  * @vm_bo: vm_bo which is relocated
240  *
241  * State for PDs/PTs which needs to update their parent PD.
242  * For the root PD, just move to idle state.
243  */
amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base * vm_bo)244 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
245 {
246 	if (vm_bo->bo->parent) {
247 		spin_lock(&vm_bo->vm->status_lock);
248 		list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
249 		spin_unlock(&vm_bo->vm->status_lock);
250 	} else {
251 		amdgpu_vm_bo_idle(vm_bo);
252 	}
253 }
254 
255 /**
256  * amdgpu_vm_bo_done - vm_bo is done
257  *
258  * @vm_bo: vm_bo which is now done
259  *
260  * State for normal BOs which are invalidated and that change has been updated
261  * in the PTs.
262  */
amdgpu_vm_bo_done(struct amdgpu_vm_bo_base * vm_bo)263 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
264 {
265 	spin_lock(&vm_bo->vm->status_lock);
266 	list_move(&vm_bo->vm_status, &vm_bo->vm->done);
267 	spin_unlock(&vm_bo->vm->status_lock);
268 }
269 
270 /**
271  * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
272  * @vm: the VM which state machine to reset
273  *
274  * Move all vm_bo object in the VM into a state where they will be updated
275  * again during validation.
276  */
amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm * vm)277 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
278 {
279 	struct amdgpu_vm_bo_base *vm_bo, *tmp;
280 
281 	spin_lock(&vm->status_lock);
282 	list_splice_init(&vm->done, &vm->invalidated);
283 	list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
284 		vm_bo->moved = true;
285 	list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
286 		struct amdgpu_bo *bo = vm_bo->bo;
287 
288 		vm_bo->moved = true;
289 		if (!bo || bo->tbo.type != ttm_bo_type_kernel)
290 			list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
291 		else if (bo->parent)
292 			list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
293 	}
294 	spin_unlock(&vm->status_lock);
295 }
296 
297 /**
298  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
299  *
300  * @base: base structure for tracking BO usage in a VM
301  * @vm: vm to which bo is to be added
302  * @bo: amdgpu buffer object
303  *
304  * Initialize a bo_va_base structure and add it to the appropriate lists
305  *
306  */
amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base * base,struct amdgpu_vm * vm,struct amdgpu_bo * bo)307 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
308 			    struct amdgpu_vm *vm, struct amdgpu_bo *bo)
309 {
310 	base->vm = vm;
311 	base->bo = bo;
312 	base->next = NULL;
313 	INIT_LIST_HEAD(&base->vm_status);
314 
315 	if (!bo)
316 		return;
317 	base->next = bo->vm_bo;
318 	bo->vm_bo = base;
319 
320 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
321 		return;
322 
323 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
324 
325 	ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
326 	if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
327 		amdgpu_vm_bo_relocated(base);
328 	else
329 		amdgpu_vm_bo_idle(base);
330 
331 	if (bo->preferred_domains &
332 	    amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
333 		return;
334 
335 	/*
336 	 * we checked all the prerequisites, but it looks like this per vm bo
337 	 * is currently evicted. add the bo to the evicted list to make sure it
338 	 * is validated on next vm use to avoid fault.
339 	 * */
340 	amdgpu_vm_bo_evicted(base);
341 }
342 
343 /**
344  * amdgpu_vm_lock_pd - lock PD in drm_exec
345  *
346  * @vm: vm providing the BOs
347  * @exec: drm execution context
348  * @num_fences: number of extra fences to reserve
349  *
350  * Lock the VM root PD in the DRM execution context.
351  */
amdgpu_vm_lock_pd(struct amdgpu_vm * vm,struct drm_exec * exec,unsigned int num_fences)352 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
353 		      unsigned int num_fences)
354 {
355 	/* We need at least two fences for the VM PD/PT updates */
356 	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
357 				    2 + num_fences);
358 }
359 
360 /**
361  * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
362  *
363  * @adev: amdgpu device pointer
364  * @vm: vm providing the BOs
365  *
366  * Move all BOs to the end of LRU and remember their positions to put them
367  * together.
368  */
amdgpu_vm_move_to_lru_tail(struct amdgpu_device * adev,struct amdgpu_vm * vm)369 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
370 				struct amdgpu_vm *vm)
371 {
372 	spin_lock(&adev->mman.bdev.lru_lock);
373 	ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
374 	spin_unlock(&adev->mman.bdev.lru_lock);
375 }
376 
377 /* Create scheduler entities for page table updates */
amdgpu_vm_init_entities(struct amdgpu_device * adev,struct amdgpu_vm * vm)378 static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
379 				   struct amdgpu_vm *vm)
380 {
381 	int r;
382 
383 	r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
384 				  adev->vm_manager.vm_pte_scheds,
385 				  adev->vm_manager.vm_pte_num_scheds, NULL);
386 	if (r)
387 		goto error;
388 
389 	return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
390 				     adev->vm_manager.vm_pte_scheds,
391 				     adev->vm_manager.vm_pte_num_scheds, NULL);
392 
393 error:
394 	drm_sched_entity_destroy(&vm->immediate);
395 	return r;
396 }
397 
398 /* Destroy the entities for page table updates again */
amdgpu_vm_fini_entities(struct amdgpu_vm * vm)399 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
400 {
401 	drm_sched_entity_destroy(&vm->immediate);
402 	drm_sched_entity_destroy(&vm->delayed);
403 }
404 
405 /**
406  * amdgpu_vm_generation - return the page table re-generation counter
407  * @adev: the amdgpu_device
408  * @vm: optional VM to check, might be NULL
409  *
410  * Returns a page table re-generation token to allow checking if submissions
411  * are still valid to use this VM. The VM parameter might be NULL in which case
412  * just the VRAM lost counter will be used.
413  */
amdgpu_vm_generation(struct amdgpu_device * adev,struct amdgpu_vm * vm)414 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
415 {
416 	uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
417 
418 	if (!vm)
419 		return result;
420 
421 	result += vm->generation;
422 	/* Add one if the page tables will be re-generated on next CS */
423 	if (drm_sched_entity_error(&vm->delayed))
424 		++result;
425 
426 	return result;
427 }
428 
429 /**
430  * amdgpu_vm_validate_pt_bos - validate the page table BOs
431  *
432  * @adev: amdgpu device pointer
433  * @vm: vm providing the BOs
434  * @validate: callback to do the validation
435  * @param: parameter for the validation callback
436  *
437  * Validate the page table BOs on command submission if neccessary.
438  *
439  * Returns:
440  * Validation result.
441  */
amdgpu_vm_validate_pt_bos(struct amdgpu_device * adev,struct amdgpu_vm * vm,int (* validate)(void * p,struct amdgpu_bo * bo),void * param)442 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
443 			      int (*validate)(void *p, struct amdgpu_bo *bo),
444 			      void *param)
445 {
446 	struct amdgpu_vm_bo_base *bo_base;
447 	struct amdgpu_bo *shadow;
448 	struct amdgpu_bo *bo;
449 	int r;
450 
451 	if (drm_sched_entity_error(&vm->delayed)) {
452 		++vm->generation;
453 		amdgpu_vm_bo_reset_state_machine(vm);
454 		amdgpu_vm_fini_entities(vm);
455 		r = amdgpu_vm_init_entities(adev, vm);
456 		if (r)
457 			return r;
458 	}
459 
460 	spin_lock(&vm->status_lock);
461 	while (!list_empty(&vm->evicted)) {
462 		bo_base = list_first_entry(&vm->evicted,
463 					   struct amdgpu_vm_bo_base,
464 					   vm_status);
465 		spin_unlock(&vm->status_lock);
466 
467 		bo = bo_base->bo;
468 		shadow = amdgpu_bo_shadowed(bo);
469 
470 		r = validate(param, bo);
471 		if (r)
472 			return r;
473 		if (shadow) {
474 			r = validate(param, shadow);
475 			if (r)
476 				return r;
477 		}
478 
479 		if (bo->tbo.type != ttm_bo_type_kernel) {
480 			amdgpu_vm_bo_moved(bo_base);
481 		} else {
482 			vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
483 			amdgpu_vm_bo_relocated(bo_base);
484 		}
485 		spin_lock(&vm->status_lock);
486 	}
487 	spin_unlock(&vm->status_lock);
488 
489 	amdgpu_vm_eviction_lock(vm);
490 	vm->evicting = false;
491 	amdgpu_vm_eviction_unlock(vm);
492 
493 	return 0;
494 }
495 
496 /**
497  * amdgpu_vm_ready - check VM is ready for updates
498  *
499  * @vm: VM to check
500  *
501  * Check if all VM PDs/PTs are ready for updates
502  *
503  * Returns:
504  * True if VM is not evicting.
505  */
amdgpu_vm_ready(struct amdgpu_vm * vm)506 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
507 {
508 	bool empty;
509 	bool ret;
510 
511 	amdgpu_vm_eviction_lock(vm);
512 	ret = !vm->evicting;
513 	amdgpu_vm_eviction_unlock(vm);
514 
515 	spin_lock(&vm->status_lock);
516 	empty = list_empty(&vm->evicted);
517 	spin_unlock(&vm->status_lock);
518 
519 	return ret && empty;
520 }
521 
522 /**
523  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
524  *
525  * @adev: amdgpu_device pointer
526  */
amdgpu_vm_check_compute_bug(struct amdgpu_device * adev)527 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
528 {
529 	const struct amdgpu_ip_block *ip_block;
530 	bool has_compute_vm_bug;
531 	struct amdgpu_ring *ring;
532 	int i;
533 
534 	has_compute_vm_bug = false;
535 
536 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
537 	if (ip_block) {
538 		/* Compute has a VM bug for GFX version < 7.
539 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
540 		if (ip_block->version->major <= 7)
541 			has_compute_vm_bug = true;
542 		else if (ip_block->version->major == 8)
543 			if (adev->gfx.mec_fw_version < 673)
544 				has_compute_vm_bug = true;
545 	}
546 
547 	for (i = 0; i < adev->num_rings; i++) {
548 		ring = adev->rings[i];
549 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
550 			/* only compute rings */
551 			ring->has_compute_vm_bug = has_compute_vm_bug;
552 		else
553 			ring->has_compute_vm_bug = false;
554 	}
555 }
556 
557 /**
558  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
559  *
560  * @ring: ring on which the job will be submitted
561  * @job: job to submit
562  *
563  * Returns:
564  * True if sync is needed.
565  */
amdgpu_vm_need_pipeline_sync(struct amdgpu_ring * ring,struct amdgpu_job * job)566 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
567 				  struct amdgpu_job *job)
568 {
569 	struct amdgpu_device *adev = ring->adev;
570 	unsigned vmhub = ring->vm_hub;
571 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
572 
573 	if (job->vmid == 0)
574 		return false;
575 
576 	if (job->vm_needs_flush || ring->has_compute_vm_bug)
577 		return true;
578 
579 	if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
580 		return true;
581 
582 	if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
583 		return true;
584 
585 	return false;
586 }
587 
588 /**
589  * amdgpu_vm_flush - hardware flush the vm
590  *
591  * @ring: ring to use for flush
592  * @job:  related job
593  * @need_pipe_sync: is pipe sync needed
594  *
595  * Emit a VM flush when it is necessary.
596  *
597  * Returns:
598  * 0 on success, errno otherwise.
599  */
amdgpu_vm_flush(struct amdgpu_ring * ring,struct amdgpu_job * job,bool need_pipe_sync)600 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
601 		    bool need_pipe_sync)
602 {
603 	struct amdgpu_device *adev = ring->adev;
604 	unsigned vmhub = ring->vm_hub;
605 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
606 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
607 	bool spm_update_needed = job->spm_update_needed;
608 	bool gds_switch_needed = ring->funcs->emit_gds_switch &&
609 		job->gds_switch_needed;
610 	bool vm_flush_needed = job->vm_needs_flush;
611 	struct dma_fence *fence = NULL;
612 	bool pasid_mapping_needed = false;
613 	unsigned patch_offset = 0;
614 	int r;
615 
616 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
617 		gds_switch_needed = true;
618 		vm_flush_needed = true;
619 		pasid_mapping_needed = true;
620 		spm_update_needed = true;
621 	}
622 
623 	mutex_lock(&id_mgr->lock);
624 	if (id->pasid != job->pasid || !id->pasid_mapping ||
625 	    !dma_fence_is_signaled(id->pasid_mapping))
626 		pasid_mapping_needed = true;
627 	mutex_unlock(&id_mgr->lock);
628 
629 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
630 	vm_flush_needed &= !!ring->funcs->emit_vm_flush  &&
631 			job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
632 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
633 		ring->funcs->emit_wreg;
634 
635 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
636 		return 0;
637 
638 	amdgpu_ring_ib_begin(ring);
639 	if (ring->funcs->init_cond_exec)
640 		patch_offset = amdgpu_ring_init_cond_exec(ring);
641 
642 	if (need_pipe_sync)
643 		amdgpu_ring_emit_pipeline_sync(ring);
644 
645 	if (vm_flush_needed) {
646 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
647 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
648 	}
649 
650 	if (pasid_mapping_needed)
651 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
652 
653 	if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
654 		adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
655 
656 	if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
657 	    gds_switch_needed) {
658 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
659 					    job->gds_size, job->gws_base,
660 					    job->gws_size, job->oa_base,
661 					    job->oa_size);
662 	}
663 
664 	if (vm_flush_needed || pasid_mapping_needed) {
665 		r = amdgpu_fence_emit(ring, &fence, NULL, 0);
666 		if (r)
667 			return r;
668 	}
669 
670 	if (vm_flush_needed) {
671 		mutex_lock(&id_mgr->lock);
672 		dma_fence_put(id->last_flush);
673 		id->last_flush = dma_fence_get(fence);
674 		id->current_gpu_reset_count =
675 			atomic_read(&adev->gpu_reset_counter);
676 		mutex_unlock(&id_mgr->lock);
677 	}
678 
679 	if (pasid_mapping_needed) {
680 		mutex_lock(&id_mgr->lock);
681 		id->pasid = job->pasid;
682 		dma_fence_put(id->pasid_mapping);
683 		id->pasid_mapping = dma_fence_get(fence);
684 		mutex_unlock(&id_mgr->lock);
685 	}
686 	dma_fence_put(fence);
687 
688 	if (ring->funcs->patch_cond_exec)
689 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
690 
691 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
692 	if (ring->funcs->emit_switch_buffer) {
693 		amdgpu_ring_emit_switch_buffer(ring);
694 		amdgpu_ring_emit_switch_buffer(ring);
695 	}
696 	amdgpu_ring_ib_end(ring);
697 	return 0;
698 }
699 
700 /**
701  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
702  *
703  * @vm: requested vm
704  * @bo: requested buffer object
705  *
706  * Find @bo inside the requested vm.
707  * Search inside the @bos vm list for the requested vm
708  * Returns the found bo_va or NULL if none is found
709  *
710  * Object has to be reserved!
711  *
712  * Returns:
713  * Found bo_va or NULL.
714  */
amdgpu_vm_bo_find(struct amdgpu_vm * vm,struct amdgpu_bo * bo)715 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
716 				       struct amdgpu_bo *bo)
717 {
718 	struct amdgpu_vm_bo_base *base;
719 
720 	for (base = bo->vm_bo; base; base = base->next) {
721 		if (base->vm != vm)
722 			continue;
723 
724 		return container_of(base, struct amdgpu_bo_va, base);
725 	}
726 	return NULL;
727 }
728 
729 /**
730  * amdgpu_vm_map_gart - Resolve gart mapping of addr
731  *
732  * @pages_addr: optional DMA address to use for lookup
733  * @addr: the unmapped addr
734  *
735  * Look up the physical address of the page that the pte resolves
736  * to.
737  *
738  * Returns:
739  * The pointer for the page table entry.
740  */
amdgpu_vm_map_gart(const dma_addr_t * pages_addr,uint64_t addr)741 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
742 {
743 	uint64_t result;
744 
745 	/* page table offset */
746 	result = pages_addr[addr >> PAGE_SHIFT];
747 
748 	/* in case cpu page size != gpu page size*/
749 	result |= addr & (~PAGE_MASK);
750 
751 	result &= 0xFFFFFFFFFFFFF000ULL;
752 
753 	return result;
754 }
755 
756 /**
757  * amdgpu_vm_update_pdes - make sure that all directories are valid
758  *
759  * @adev: amdgpu_device pointer
760  * @vm: requested vm
761  * @immediate: submit immediately to the paging queue
762  *
763  * Makes sure all directories are up to date.
764  *
765  * Returns:
766  * 0 for success, error for failure.
767  */
amdgpu_vm_update_pdes(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate)768 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
769 			  struct amdgpu_vm *vm, bool immediate)
770 {
771 	struct amdgpu_vm_update_params params;
772 	struct amdgpu_vm_bo_base *entry;
773 	bool flush_tlb_needed = false;
774 	LIST_HEAD(relocated);
775 	int r, idx;
776 
777 	spin_lock(&vm->status_lock);
778 	list_splice_init(&vm->relocated, &relocated);
779 	spin_unlock(&vm->status_lock);
780 
781 	if (list_empty(&relocated))
782 		return 0;
783 
784 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
785 		return -ENODEV;
786 
787 	memset(&params, 0, sizeof(params));
788 	params.adev = adev;
789 	params.vm = vm;
790 	params.immediate = immediate;
791 
792 	r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
793 	if (r)
794 		goto error;
795 
796 	list_for_each_entry(entry, &relocated, vm_status) {
797 		/* vm_flush_needed after updating moved PDEs */
798 		flush_tlb_needed |= entry->moved;
799 
800 		r = amdgpu_vm_pde_update(&params, entry);
801 		if (r)
802 			goto error;
803 	}
804 
805 	r = vm->update_funcs->commit(&params, &vm->last_update);
806 	if (r)
807 		goto error;
808 
809 	if (flush_tlb_needed)
810 		atomic64_inc(&vm->tlb_seq);
811 
812 	while (!list_empty(&relocated)) {
813 		entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
814 					 vm_status);
815 		amdgpu_vm_bo_idle(entry);
816 	}
817 
818 error:
819 	drm_dev_exit(idx);
820 	return r;
821 }
822 
823 /**
824  * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
825  * @fence: unused
826  * @cb: the callback structure
827  *
828  * Increments the tlb sequence to make sure that future CS execute a VM flush.
829  */
amdgpu_vm_tlb_seq_cb(struct dma_fence * fence,struct dma_fence_cb * cb)830 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
831 				 struct dma_fence_cb *cb)
832 {
833 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
834 
835 	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
836 	atomic64_inc(&tlb_cb->vm->tlb_seq);
837 	kfree(tlb_cb);
838 }
839 
840 /**
841  * amdgpu_vm_update_range - update a range in the vm page table
842  *
843  * @adev: amdgpu_device pointer to use for commands
844  * @vm: the VM to update the range
845  * @immediate: immediate submission in a page fault
846  * @unlocked: unlocked invalidation during MM callback
847  * @flush_tlb: trigger tlb invalidation after update completed
848  * @resv: fences we need to sync to
849  * @start: start of mapped range
850  * @last: last mapped entry
851  * @flags: flags for the entries
852  * @offset: offset into nodes and pages_addr
853  * @vram_base: base for vram mappings
854  * @res: ttm_resource to map
855  * @pages_addr: DMA addresses to use for mapping
856  * @fence: optional resulting fence
857  *
858  * Fill in the page table entries between @start and @last.
859  *
860  * Returns:
861  * 0 for success, negative erro code for failure.
862  */
amdgpu_vm_update_range(struct amdgpu_device * adev,struct amdgpu_vm * vm,bool immediate,bool unlocked,bool flush_tlb,struct dma_resv * resv,uint64_t start,uint64_t last,uint64_t flags,uint64_t offset,uint64_t vram_base,struct ttm_resource * res,dma_addr_t * pages_addr,struct dma_fence ** fence)863 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
864 			   bool immediate, bool unlocked, bool flush_tlb,
865 			   struct dma_resv *resv, uint64_t start, uint64_t last,
866 			   uint64_t flags, uint64_t offset, uint64_t vram_base,
867 			   struct ttm_resource *res, dma_addr_t *pages_addr,
868 			   struct dma_fence **fence)
869 {
870 	struct amdgpu_vm_update_params params;
871 	struct amdgpu_vm_tlb_seq_struct *tlb_cb;
872 	struct amdgpu_res_cursor cursor;
873 	enum amdgpu_sync_mode sync_mode;
874 	int r, idx;
875 
876 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
877 		return -ENODEV;
878 
879 	tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
880 	if (!tlb_cb) {
881 		r = -ENOMEM;
882 		goto error_unlock;
883 	}
884 
885 	/* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
886 	 * heavy-weight flush TLB unconditionally.
887 	 */
888 	flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
889 		     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
890 
891 	/*
892 	 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
893 	 */
894 	flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0);
895 
896 	memset(&params, 0, sizeof(params));
897 	params.adev = adev;
898 	params.vm = vm;
899 	params.immediate = immediate;
900 	params.pages_addr = pages_addr;
901 	params.unlocked = unlocked;
902 
903 	/* Implicitly sync to command submissions in the same VM before
904 	 * unmapping. Sync to moving fences before mapping.
905 	 */
906 	if (!(flags & AMDGPU_PTE_VALID))
907 		sync_mode = AMDGPU_SYNC_EQ_OWNER;
908 	else
909 		sync_mode = AMDGPU_SYNC_EXPLICIT;
910 
911 	amdgpu_vm_eviction_lock(vm);
912 	if (vm->evicting) {
913 		r = -EBUSY;
914 		goto error_free;
915 	}
916 
917 	if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
918 		struct dma_fence *tmp = dma_fence_get_stub();
919 
920 		amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
921 		swap(vm->last_unlocked, tmp);
922 		dma_fence_put(tmp);
923 	}
924 
925 	r = vm->update_funcs->prepare(&params, resv, sync_mode);
926 	if (r)
927 		goto error_free;
928 
929 	amdgpu_res_first(pages_addr ? NULL : res, offset,
930 			 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
931 	while (cursor.remaining) {
932 		uint64_t tmp, num_entries, addr;
933 
934 		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
935 		if (pages_addr) {
936 			bool contiguous = true;
937 
938 			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
939 				uint64_t pfn = cursor.start >> PAGE_SHIFT;
940 				uint64_t count;
941 
942 				contiguous = pages_addr[pfn + 1] ==
943 					pages_addr[pfn] + PAGE_SIZE;
944 
945 				tmp = num_entries /
946 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
947 				for (count = 2; count < tmp; ++count) {
948 					uint64_t idx = pfn + count;
949 
950 					if (contiguous != (pages_addr[idx] ==
951 					    pages_addr[idx - 1] + PAGE_SIZE))
952 						break;
953 				}
954 				if (!contiguous)
955 					count--;
956 				num_entries = count *
957 					AMDGPU_GPU_PAGES_IN_CPU_PAGE;
958 			}
959 
960 			if (!contiguous) {
961 				addr = cursor.start;
962 				params.pages_addr = pages_addr;
963 			} else {
964 				addr = pages_addr[cursor.start >> PAGE_SHIFT];
965 				params.pages_addr = NULL;
966 			}
967 
968 		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
969 			addr = vram_base + cursor.start;
970 		} else {
971 			addr = 0;
972 		}
973 
974 		tmp = start + num_entries;
975 		r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
976 		if (r)
977 			goto error_free;
978 
979 		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
980 		start = tmp;
981 	}
982 
983 	r = vm->update_funcs->commit(&params, fence);
984 
985 	if (flush_tlb || params.table_freed) {
986 		tlb_cb->vm = vm;
987 		if (fence && *fence &&
988 		    !dma_fence_add_callback(*fence, &tlb_cb->cb,
989 					   amdgpu_vm_tlb_seq_cb)) {
990 			dma_fence_put(vm->last_tlb_flush);
991 			vm->last_tlb_flush = dma_fence_get(*fence);
992 		} else {
993 			amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
994 		}
995 		tlb_cb = NULL;
996 	}
997 
998 error_free:
999 	kfree(tlb_cb);
1000 
1001 error_unlock:
1002 	amdgpu_vm_eviction_unlock(vm);
1003 	drm_dev_exit(idx);
1004 	return r;
1005 }
1006 
amdgpu_vm_bo_get_memory(struct amdgpu_bo_va * bo_va,struct amdgpu_mem_stats * stats)1007 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1008 				    struct amdgpu_mem_stats *stats)
1009 {
1010 	struct amdgpu_vm *vm = bo_va->base.vm;
1011 	struct amdgpu_bo *bo = bo_va->base.bo;
1012 
1013 	if (!bo)
1014 		return;
1015 
1016 	/*
1017 	 * For now ignore BOs which are currently locked and potentially
1018 	 * changing their location.
1019 	 */
1020 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
1021 	    !dma_resv_trylock(bo->tbo.base.resv))
1022 		return;
1023 
1024 	amdgpu_bo_get_memory(bo, stats);
1025 	if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
1026 	    dma_resv_unlock(bo->tbo.base.resv);
1027 }
1028 
amdgpu_vm_get_memory(struct amdgpu_vm * vm,struct amdgpu_mem_stats * stats)1029 void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
1030 			  struct amdgpu_mem_stats *stats)
1031 {
1032 	struct amdgpu_bo_va *bo_va, *tmp;
1033 
1034 	spin_lock(&vm->status_lock);
1035 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1036 		amdgpu_vm_bo_get_memory(bo_va, stats);
1037 
1038 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1039 		amdgpu_vm_bo_get_memory(bo_va, stats);
1040 
1041 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1042 		amdgpu_vm_bo_get_memory(bo_va, stats);
1043 
1044 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1045 		amdgpu_vm_bo_get_memory(bo_va, stats);
1046 
1047 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1048 		amdgpu_vm_bo_get_memory(bo_va, stats);
1049 
1050 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1051 		amdgpu_vm_bo_get_memory(bo_va, stats);
1052 	spin_unlock(&vm->status_lock);
1053 }
1054 
1055 /**
1056  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1057  *
1058  * @adev: amdgpu_device pointer
1059  * @bo_va: requested BO and VM object
1060  * @clear: if true clear the entries
1061  *
1062  * Fill in the page table entries for @bo_va.
1063  *
1064  * Returns:
1065  * 0 for success, -EINVAL for failure.
1066  */
amdgpu_vm_bo_update(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,bool clear)1067 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1068 			bool clear)
1069 {
1070 	struct amdgpu_bo *bo = bo_va->base.bo;
1071 	struct amdgpu_vm *vm = bo_va->base.vm;
1072 	struct amdgpu_bo_va_mapping *mapping;
1073 	dma_addr_t *pages_addr = NULL;
1074 	struct ttm_resource *mem;
1075 	struct dma_fence **last_update;
1076 	bool flush_tlb = clear;
1077 	struct dma_resv *resv;
1078 	uint64_t vram_base;
1079 	uint64_t flags;
1080 	int r;
1081 
1082 	if (clear || !bo) {
1083 		mem = NULL;
1084 		resv = vm->root.bo->tbo.base.resv;
1085 	} else {
1086 		struct drm_gem_object *obj = &bo->tbo.base;
1087 
1088 		resv = bo->tbo.base.resv;
1089 		if (obj->import_attach && bo_va->is_xgmi) {
1090 			struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1091 			struct drm_gem_object *gobj = dma_buf->priv;
1092 			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
1093 
1094 			if (abo->tbo.resource &&
1095 			    abo->tbo.resource->mem_type == TTM_PL_VRAM)
1096 				bo = gem_to_amdgpu_bo(gobj);
1097 		}
1098 		mem = bo->tbo.resource;
1099 		if (mem && (mem->mem_type == TTM_PL_TT ||
1100 			    mem->mem_type == AMDGPU_PL_PREEMPT))
1101 			pages_addr = bo->tbo.ttm->dma_address;
1102 	}
1103 
1104 	if (bo) {
1105 		struct amdgpu_device *bo_adev;
1106 
1107 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1108 
1109 		if (amdgpu_bo_encrypted(bo))
1110 			flags |= AMDGPU_PTE_TMZ;
1111 
1112 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1113 		vram_base = bo_adev->vm_manager.vram_base_offset;
1114 	} else {
1115 		flags = 0x0;
1116 		vram_base = 0;
1117 	}
1118 
1119 	if (clear || (bo && bo->tbo.base.resv ==
1120 		      vm->root.bo->tbo.base.resv))
1121 		last_update = &vm->last_update;
1122 	else
1123 		last_update = &bo_va->last_pt_update;
1124 
1125 	if (!clear && bo_va->base.moved) {
1126 		flush_tlb = true;
1127 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1128 
1129 	} else if (bo_va->cleared != clear) {
1130 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1131 	}
1132 
1133 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1134 		uint64_t update_flags = flags;
1135 
1136 		/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1137 		 * but in case of something, we filter the flags in first place
1138 		 */
1139 		if (!(mapping->flags & AMDGPU_PTE_READABLE))
1140 			update_flags &= ~AMDGPU_PTE_READABLE;
1141 		if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1142 			update_flags &= ~AMDGPU_PTE_WRITEABLE;
1143 
1144 		/* Apply ASIC specific mapping flags */
1145 		amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
1146 
1147 		trace_amdgpu_vm_bo_update(mapping);
1148 
1149 		r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
1150 					   resv, mapping->start, mapping->last,
1151 					   update_flags, mapping->offset,
1152 					   vram_base, mem, pages_addr,
1153 					   last_update);
1154 		if (r)
1155 			return r;
1156 	}
1157 
1158 	/* If the BO is not in its preferred location add it back to
1159 	 * the evicted list so that it gets validated again on the
1160 	 * next command submission.
1161 	 */
1162 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1163 		uint32_t mem_type = bo->tbo.resource->mem_type;
1164 
1165 		if (!(bo->preferred_domains &
1166 		      amdgpu_mem_type_to_domain(mem_type)))
1167 			amdgpu_vm_bo_evicted(&bo_va->base);
1168 		else
1169 			amdgpu_vm_bo_idle(&bo_va->base);
1170 	} else {
1171 		amdgpu_vm_bo_done(&bo_va->base);
1172 	}
1173 
1174 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1175 	bo_va->cleared = clear;
1176 	bo_va->base.moved = false;
1177 
1178 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1179 		list_for_each_entry(mapping, &bo_va->valids, list)
1180 			trace_amdgpu_vm_bo_mapping(mapping);
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 /**
1187  * amdgpu_vm_update_prt_state - update the global PRT state
1188  *
1189  * @adev: amdgpu_device pointer
1190  */
amdgpu_vm_update_prt_state(struct amdgpu_device * adev)1191 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1192 {
1193 	unsigned long flags;
1194 	bool enable;
1195 
1196 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1197 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1198 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1199 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1200 }
1201 
1202 /**
1203  * amdgpu_vm_prt_get - add a PRT user
1204  *
1205  * @adev: amdgpu_device pointer
1206  */
amdgpu_vm_prt_get(struct amdgpu_device * adev)1207 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1208 {
1209 	if (!adev->gmc.gmc_funcs->set_prt)
1210 		return;
1211 
1212 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1213 		amdgpu_vm_update_prt_state(adev);
1214 }
1215 
1216 /**
1217  * amdgpu_vm_prt_put - drop a PRT user
1218  *
1219  * @adev: amdgpu_device pointer
1220  */
amdgpu_vm_prt_put(struct amdgpu_device * adev)1221 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1222 {
1223 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1224 		amdgpu_vm_update_prt_state(adev);
1225 }
1226 
1227 /**
1228  * amdgpu_vm_prt_cb - callback for updating the PRT status
1229  *
1230  * @fence: fence for the callback
1231  * @_cb: the callback function
1232  */
amdgpu_vm_prt_cb(struct dma_fence * fence,struct dma_fence_cb * _cb)1233 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1234 {
1235 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1236 
1237 	amdgpu_vm_prt_put(cb->adev);
1238 	kfree(cb);
1239 }
1240 
1241 /**
1242  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1243  *
1244  * @adev: amdgpu_device pointer
1245  * @fence: fence for the callback
1246  */
amdgpu_vm_add_prt_cb(struct amdgpu_device * adev,struct dma_fence * fence)1247 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1248 				 struct dma_fence *fence)
1249 {
1250 	struct amdgpu_prt_cb *cb;
1251 
1252 	if (!adev->gmc.gmc_funcs->set_prt)
1253 		return;
1254 
1255 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1256 	if (!cb) {
1257 		/* Last resort when we are OOM */
1258 		if (fence)
1259 			dma_fence_wait(fence, false);
1260 
1261 		amdgpu_vm_prt_put(adev);
1262 	} else {
1263 		cb->adev = adev;
1264 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1265 						     amdgpu_vm_prt_cb))
1266 			amdgpu_vm_prt_cb(fence, &cb->cb);
1267 	}
1268 }
1269 
1270 /**
1271  * amdgpu_vm_free_mapping - free a mapping
1272  *
1273  * @adev: amdgpu_device pointer
1274  * @vm: requested vm
1275  * @mapping: mapping to be freed
1276  * @fence: fence of the unmap operation
1277  *
1278  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1279  */
amdgpu_vm_free_mapping(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va_mapping * mapping,struct dma_fence * fence)1280 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1281 				   struct amdgpu_vm *vm,
1282 				   struct amdgpu_bo_va_mapping *mapping,
1283 				   struct dma_fence *fence)
1284 {
1285 	if (mapping->flags & AMDGPU_PTE_PRT)
1286 		amdgpu_vm_add_prt_cb(adev, fence);
1287 	kfree(mapping);
1288 }
1289 
1290 /**
1291  * amdgpu_vm_prt_fini - finish all prt mappings
1292  *
1293  * @adev: amdgpu_device pointer
1294  * @vm: requested vm
1295  *
1296  * Register a cleanup callback to disable PRT support after VM dies.
1297  */
amdgpu_vm_prt_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)1298 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1299 {
1300 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1301 	struct dma_resv_iter cursor;
1302 	struct dma_fence *fence;
1303 
1304 	dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
1305 		/* Add a callback for each fence in the reservation object */
1306 		amdgpu_vm_prt_get(adev);
1307 		amdgpu_vm_add_prt_cb(adev, fence);
1308 	}
1309 }
1310 
1311 /**
1312  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1313  *
1314  * @adev: amdgpu_device pointer
1315  * @vm: requested vm
1316  * @fence: optional resulting fence (unchanged if no work needed to be done
1317  * or if an error occurred)
1318  *
1319  * Make sure all freed BOs are cleared in the PT.
1320  * PTs have to be reserved and mutex must be locked!
1321  *
1322  * Returns:
1323  * 0 for success.
1324  *
1325  */
amdgpu_vm_clear_freed(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct dma_fence ** fence)1326 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1327 			  struct amdgpu_vm *vm,
1328 			  struct dma_fence **fence)
1329 {
1330 	struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1331 	struct amdgpu_bo_va_mapping *mapping;
1332 	uint64_t init_pte_value = 0;
1333 	struct dma_fence *f = NULL;
1334 	int r;
1335 
1336 	while (!list_empty(&vm->freed)) {
1337 		mapping = list_first_entry(&vm->freed,
1338 			struct amdgpu_bo_va_mapping, list);
1339 		list_del(&mapping->list);
1340 
1341 		if (vm->pte_support_ats &&
1342 		    mapping->start < AMDGPU_GMC_HOLE_START)
1343 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1344 
1345 		r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
1346 					   mapping->start, mapping->last,
1347 					   init_pte_value, 0, 0, NULL, NULL,
1348 					   &f);
1349 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1350 		if (r) {
1351 			dma_fence_put(f);
1352 			return r;
1353 		}
1354 	}
1355 
1356 	if (fence && f) {
1357 		dma_fence_put(*fence);
1358 		*fence = f;
1359 	} else {
1360 		dma_fence_put(f);
1361 	}
1362 
1363 	return 0;
1364 
1365 }
1366 
1367 /**
1368  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1369  *
1370  * @adev: amdgpu_device pointer
1371  * @vm: requested vm
1372  *
1373  * Make sure all BOs which are moved are updated in the PTs.
1374  *
1375  * Returns:
1376  * 0 for success.
1377  *
1378  * PTs have to be reserved!
1379  */
amdgpu_vm_handle_moved(struct amdgpu_device * adev,struct amdgpu_vm * vm)1380 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1381 			   struct amdgpu_vm *vm)
1382 {
1383 	struct amdgpu_bo_va *bo_va;
1384 	struct dma_resv *resv;
1385 	bool clear;
1386 	int r;
1387 
1388 	spin_lock(&vm->status_lock);
1389 	while (!list_empty(&vm->moved)) {
1390 		bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1391 					 base.vm_status);
1392 		spin_unlock(&vm->status_lock);
1393 
1394 		/* Per VM BOs never need to bo cleared in the page tables */
1395 		r = amdgpu_vm_bo_update(adev, bo_va, false);
1396 		if (r)
1397 			return r;
1398 		spin_lock(&vm->status_lock);
1399 	}
1400 
1401 	while (!list_empty(&vm->invalidated)) {
1402 		bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1403 					 base.vm_status);
1404 		resv = bo_va->base.bo->tbo.base.resv;
1405 		spin_unlock(&vm->status_lock);
1406 
1407 		/* Try to reserve the BO to avoid clearing its ptes */
1408 		if (!amdgpu_vm_debug && dma_resv_trylock(resv))
1409 			clear = false;
1410 		/* Somebody else is using the BO right now */
1411 		else
1412 			clear = true;
1413 
1414 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1415 		if (r)
1416 			return r;
1417 
1418 		if (!clear)
1419 			dma_resv_unlock(resv);
1420 		spin_lock(&vm->status_lock);
1421 	}
1422 	spin_unlock(&vm->status_lock);
1423 
1424 	return 0;
1425 }
1426 
1427 /**
1428  * amdgpu_vm_bo_add - add a bo to a specific vm
1429  *
1430  * @adev: amdgpu_device pointer
1431  * @vm: requested vm
1432  * @bo: amdgpu buffer object
1433  *
1434  * Add @bo into the requested vm.
1435  * Add @bo to the list of bos associated with the vm
1436  *
1437  * Returns:
1438  * Newly added bo_va or NULL for failure
1439  *
1440  * Object has to be reserved!
1441  */
amdgpu_vm_bo_add(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo * bo)1442 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1443 				      struct amdgpu_vm *vm,
1444 				      struct amdgpu_bo *bo)
1445 {
1446 	struct amdgpu_bo_va *bo_va;
1447 
1448 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1449 	if (bo_va == NULL) {
1450 		return NULL;
1451 	}
1452 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1453 
1454 	bo_va->ref_count = 1;
1455 	bo_va->last_pt_update = dma_fence_get_stub();
1456 	INIT_LIST_HEAD(&bo_va->valids);
1457 	INIT_LIST_HEAD(&bo_va->invalids);
1458 
1459 	if (!bo)
1460 		return bo_va;
1461 
1462 	dma_resv_assert_held(bo->tbo.base.resv);
1463 	if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
1464 		bo_va->is_xgmi = true;
1465 		/* Power up XGMI if it can be potentially used */
1466 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
1467 	}
1468 
1469 	return bo_va;
1470 }
1471 
1472 
1473 /**
1474  * amdgpu_vm_bo_insert_map - insert a new mapping
1475  *
1476  * @adev: amdgpu_device pointer
1477  * @bo_va: bo_va to store the address
1478  * @mapping: the mapping to insert
1479  *
1480  * Insert a new mapping into all structures.
1481  */
amdgpu_vm_bo_insert_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,struct amdgpu_bo_va_mapping * mapping)1482 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1483 				    struct amdgpu_bo_va *bo_va,
1484 				    struct amdgpu_bo_va_mapping *mapping)
1485 {
1486 	struct amdgpu_vm *vm = bo_va->base.vm;
1487 	struct amdgpu_bo *bo = bo_va->base.bo;
1488 
1489 	mapping->bo_va = bo_va;
1490 	list_add(&mapping->list, &bo_va->invalids);
1491 	amdgpu_vm_it_insert(mapping, &vm->va);
1492 
1493 	if (mapping->flags & AMDGPU_PTE_PRT)
1494 		amdgpu_vm_prt_get(adev);
1495 
1496 	if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1497 	    !bo_va->base.moved) {
1498 		amdgpu_vm_bo_moved(&bo_va->base);
1499 	}
1500 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1501 }
1502 
1503 /* Validate operation parameters to prevent potential abuse */
amdgpu_vm_verify_parameters(struct amdgpu_device * adev,struct amdgpu_bo * bo,uint64_t saddr,uint64_t offset,uint64_t size)1504 static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
1505 					  struct amdgpu_bo *bo,
1506 					  uint64_t saddr,
1507 					  uint64_t offset,
1508 					  uint64_t size)
1509 {
1510 	uint64_t tmp, lpfn;
1511 
1512 	if (saddr & AMDGPU_GPU_PAGE_MASK
1513 	    || offset & AMDGPU_GPU_PAGE_MASK
1514 	    || size & AMDGPU_GPU_PAGE_MASK)
1515 		return -EINVAL;
1516 
1517 	if (check_add_overflow(saddr, size, &tmp)
1518 	    || check_add_overflow(offset, size, &tmp)
1519 	    || size == 0 /* which also leads to end < begin */)
1520 		return -EINVAL;
1521 
1522 	/* make sure object fit at this offset */
1523 	if (bo && offset + size > amdgpu_bo_size(bo))
1524 		return -EINVAL;
1525 
1526 	/* Ensure last pfn not exceed max_pfn */
1527 	lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1528 	if (lpfn >= adev->vm_manager.max_pfn)
1529 		return -EINVAL;
1530 
1531 	return 0;
1532 }
1533 
1534 /**
1535  * amdgpu_vm_bo_map - map bo inside a vm
1536  *
1537  * @adev: amdgpu_device pointer
1538  * @bo_va: bo_va to store the address
1539  * @saddr: where to map the BO
1540  * @offset: requested offset in the BO
1541  * @size: BO size in bytes
1542  * @flags: attributes of pages (read/write/valid/etc.)
1543  *
1544  * Add a mapping of the BO at the specefied addr into the VM.
1545  *
1546  * Returns:
1547  * 0 for success, error for failure.
1548  *
1549  * Object has to be reserved and unreserved outside!
1550  */
amdgpu_vm_bo_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)1551 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1552 		     struct amdgpu_bo_va *bo_va,
1553 		     uint64_t saddr, uint64_t offset,
1554 		     uint64_t size, uint64_t flags)
1555 {
1556 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1557 	struct amdgpu_bo *bo = bo_va->base.bo;
1558 	struct amdgpu_vm *vm = bo_va->base.vm;
1559 	uint64_t eaddr;
1560 	int r;
1561 
1562 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1563 	if (r)
1564 		return r;
1565 
1566 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1567 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1568 
1569 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1570 	if (tmp) {
1571 		/* bo and tmp overlap, invalid addr */
1572 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1573 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1574 			tmp->start, tmp->last + 1);
1575 		return -EINVAL;
1576 	}
1577 
1578 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1579 	if (!mapping)
1580 		return -ENOMEM;
1581 
1582 	mapping->start = saddr;
1583 	mapping->last = eaddr;
1584 	mapping->offset = offset;
1585 	mapping->flags = flags;
1586 
1587 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1588 
1589 	return 0;
1590 }
1591 
1592 /**
1593  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1594  *
1595  * @adev: amdgpu_device pointer
1596  * @bo_va: bo_va to store the address
1597  * @saddr: where to map the BO
1598  * @offset: requested offset in the BO
1599  * @size: BO size in bytes
1600  * @flags: attributes of pages (read/write/valid/etc.)
1601  *
1602  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1603  * mappings as we do so.
1604  *
1605  * Returns:
1606  * 0 for success, error for failure.
1607  *
1608  * Object has to be reserved and unreserved outside!
1609  */
amdgpu_vm_bo_replace_map(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr,uint64_t offset,uint64_t size,uint64_t flags)1610 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1611 			     struct amdgpu_bo_va *bo_va,
1612 			     uint64_t saddr, uint64_t offset,
1613 			     uint64_t size, uint64_t flags)
1614 {
1615 	struct amdgpu_bo_va_mapping *mapping;
1616 	struct amdgpu_bo *bo = bo_va->base.bo;
1617 	uint64_t eaddr;
1618 	int r;
1619 
1620 	r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
1621 	if (r)
1622 		return r;
1623 
1624 	/* Allocate all the needed memory */
1625 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1626 	if (!mapping)
1627 		return -ENOMEM;
1628 
1629 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1630 	if (r) {
1631 		kfree(mapping);
1632 		return r;
1633 	}
1634 
1635 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1636 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1637 
1638 	mapping->start = saddr;
1639 	mapping->last = eaddr;
1640 	mapping->offset = offset;
1641 	mapping->flags = flags;
1642 
1643 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1644 
1645 	return 0;
1646 }
1647 
1648 /**
1649  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1650  *
1651  * @adev: amdgpu_device pointer
1652  * @bo_va: bo_va to remove the address from
1653  * @saddr: where to the BO is mapped
1654  *
1655  * Remove a mapping of the BO at the specefied addr from the VM.
1656  *
1657  * Returns:
1658  * 0 for success, error for failure.
1659  *
1660  * Object has to be reserved and unreserved outside!
1661  */
amdgpu_vm_bo_unmap(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va,uint64_t saddr)1662 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1663 		       struct amdgpu_bo_va *bo_va,
1664 		       uint64_t saddr)
1665 {
1666 	struct amdgpu_bo_va_mapping *mapping;
1667 	struct amdgpu_vm *vm = bo_va->base.vm;
1668 	bool valid = true;
1669 
1670 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1671 
1672 	list_for_each_entry(mapping, &bo_va->valids, list) {
1673 		if (mapping->start == saddr)
1674 			break;
1675 	}
1676 
1677 	if (&mapping->list == &bo_va->valids) {
1678 		valid = false;
1679 
1680 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1681 			if (mapping->start == saddr)
1682 				break;
1683 		}
1684 
1685 		if (&mapping->list == &bo_va->invalids)
1686 			return -ENOENT;
1687 	}
1688 
1689 	list_del(&mapping->list);
1690 	amdgpu_vm_it_remove(mapping, &vm->va);
1691 	mapping->bo_va = NULL;
1692 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1693 
1694 	if (valid)
1695 		list_add(&mapping->list, &vm->freed);
1696 	else
1697 		amdgpu_vm_free_mapping(adev, vm, mapping,
1698 				       bo_va->last_pt_update);
1699 
1700 	return 0;
1701 }
1702 
1703 /**
1704  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1705  *
1706  * @adev: amdgpu_device pointer
1707  * @vm: VM structure to use
1708  * @saddr: start of the range
1709  * @size: size of the range
1710  *
1711  * Remove all mappings in a range, split them as appropriate.
1712  *
1713  * Returns:
1714  * 0 for success, error for failure.
1715  */
amdgpu_vm_bo_clear_mappings(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t saddr,uint64_t size)1716 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
1717 				struct amdgpu_vm *vm,
1718 				uint64_t saddr, uint64_t size)
1719 {
1720 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
1721 	LIST_HEAD(removed);
1722 	uint64_t eaddr;
1723 	int r;
1724 
1725 	r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
1726 	if (r)
1727 		return r;
1728 
1729 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1730 	eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1731 
1732 	/* Allocate all the needed memory */
1733 	before = kzalloc(sizeof(*before), GFP_KERNEL);
1734 	if (!before)
1735 		return -ENOMEM;
1736 	INIT_LIST_HEAD(&before->list);
1737 
1738 	after = kzalloc(sizeof(*after), GFP_KERNEL);
1739 	if (!after) {
1740 		kfree(before);
1741 		return -ENOMEM;
1742 	}
1743 	INIT_LIST_HEAD(&after->list);
1744 
1745 	/* Now gather all removed mappings */
1746 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1747 	while (tmp) {
1748 		/* Remember mapping split at the start */
1749 		if (tmp->start < saddr) {
1750 			before->start = tmp->start;
1751 			before->last = saddr - 1;
1752 			before->offset = tmp->offset;
1753 			before->flags = tmp->flags;
1754 			before->bo_va = tmp->bo_va;
1755 			list_add(&before->list, &tmp->bo_va->invalids);
1756 		}
1757 
1758 		/* Remember mapping split at the end */
1759 		if (tmp->last > eaddr) {
1760 			after->start = eaddr + 1;
1761 			after->last = tmp->last;
1762 			after->offset = tmp->offset;
1763 			after->offset += (after->start - tmp->start) << PAGE_SHIFT;
1764 			after->flags = tmp->flags;
1765 			after->bo_va = tmp->bo_va;
1766 			list_add(&after->list, &tmp->bo_va->invalids);
1767 		}
1768 
1769 		list_del(&tmp->list);
1770 		list_add(&tmp->list, &removed);
1771 
1772 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
1773 	}
1774 
1775 	/* And free them up */
1776 	list_for_each_entry_safe(tmp, next, &removed, list) {
1777 		amdgpu_vm_it_remove(tmp, &vm->va);
1778 		list_del(&tmp->list);
1779 
1780 		if (tmp->start < saddr)
1781 		    tmp->start = saddr;
1782 		if (tmp->last > eaddr)
1783 		    tmp->last = eaddr;
1784 
1785 		tmp->bo_va = NULL;
1786 		list_add(&tmp->list, &vm->freed);
1787 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
1788 	}
1789 
1790 	/* Insert partial mapping before the range */
1791 	if (!list_empty(&before->list)) {
1792 		struct amdgpu_bo *bo = before->bo_va->base.bo;
1793 
1794 		amdgpu_vm_it_insert(before, &vm->va);
1795 		if (before->flags & AMDGPU_PTE_PRT)
1796 			amdgpu_vm_prt_get(adev);
1797 
1798 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1799 		    !before->bo_va->base.moved)
1800 			amdgpu_vm_bo_moved(&before->bo_va->base);
1801 	} else {
1802 		kfree(before);
1803 	}
1804 
1805 	/* Insert partial mapping after the range */
1806 	if (!list_empty(&after->list)) {
1807 		struct amdgpu_bo *bo = after->bo_va->base.bo;
1808 
1809 		amdgpu_vm_it_insert(after, &vm->va);
1810 		if (after->flags & AMDGPU_PTE_PRT)
1811 			amdgpu_vm_prt_get(adev);
1812 
1813 		if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
1814 		    !after->bo_va->base.moved)
1815 			amdgpu_vm_bo_moved(&after->bo_va->base);
1816 	} else {
1817 		kfree(after);
1818 	}
1819 
1820 	return 0;
1821 }
1822 
1823 /**
1824  * amdgpu_vm_bo_lookup_mapping - find mapping by address
1825  *
1826  * @vm: the requested VM
1827  * @addr: the address
1828  *
1829  * Find a mapping by it's address.
1830  *
1831  * Returns:
1832  * The amdgpu_bo_va_mapping matching for addr or NULL
1833  *
1834  */
amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm * vm,uint64_t addr)1835 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
1836 							 uint64_t addr)
1837 {
1838 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
1839 }
1840 
1841 /**
1842  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
1843  *
1844  * @vm: the requested vm
1845  * @ticket: CS ticket
1846  *
1847  * Trace all mappings of BOs reserved during a command submission.
1848  */
amdgpu_vm_bo_trace_cs(struct amdgpu_vm * vm,struct ww_acquire_ctx * ticket)1849 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
1850 {
1851 	struct amdgpu_bo_va_mapping *mapping;
1852 
1853 	if (!trace_amdgpu_vm_bo_cs_enabled())
1854 		return;
1855 
1856 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
1857 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
1858 		if (mapping->bo_va && mapping->bo_va->base.bo) {
1859 			struct amdgpu_bo *bo;
1860 
1861 			bo = mapping->bo_va->base.bo;
1862 			if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
1863 			    ticket)
1864 				continue;
1865 		}
1866 
1867 		trace_amdgpu_vm_bo_cs(mapping);
1868 	}
1869 }
1870 
1871 /**
1872  * amdgpu_vm_bo_del - remove a bo from a specific vm
1873  *
1874  * @adev: amdgpu_device pointer
1875  * @bo_va: requested bo_va
1876  *
1877  * Remove @bo_va->bo from the requested vm.
1878  *
1879  * Object have to be reserved!
1880  */
amdgpu_vm_bo_del(struct amdgpu_device * adev,struct amdgpu_bo_va * bo_va)1881 void amdgpu_vm_bo_del(struct amdgpu_device *adev,
1882 		      struct amdgpu_bo_va *bo_va)
1883 {
1884 	struct amdgpu_bo_va_mapping *mapping, *next;
1885 	struct amdgpu_bo *bo = bo_va->base.bo;
1886 	struct amdgpu_vm *vm = bo_va->base.vm;
1887 	struct amdgpu_vm_bo_base **base;
1888 
1889 	dma_resv_assert_held(vm->root.bo->tbo.base.resv);
1890 
1891 	if (bo) {
1892 		dma_resv_assert_held(bo->tbo.base.resv);
1893 		if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1894 			ttm_bo_set_bulk_move(&bo->tbo, NULL);
1895 
1896 		for (base = &bo_va->base.bo->vm_bo; *base;
1897 		     base = &(*base)->next) {
1898 			if (*base != &bo_va->base)
1899 				continue;
1900 
1901 			*base = bo_va->base.next;
1902 			break;
1903 		}
1904 	}
1905 
1906 	spin_lock(&vm->status_lock);
1907 	list_del(&bo_va->base.vm_status);
1908 	spin_unlock(&vm->status_lock);
1909 
1910 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1911 		list_del(&mapping->list);
1912 		amdgpu_vm_it_remove(mapping, &vm->va);
1913 		mapping->bo_va = NULL;
1914 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1915 		list_add(&mapping->list, &vm->freed);
1916 	}
1917 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1918 		list_del(&mapping->list);
1919 		amdgpu_vm_it_remove(mapping, &vm->va);
1920 		amdgpu_vm_free_mapping(adev, vm, mapping,
1921 				       bo_va->last_pt_update);
1922 	}
1923 
1924 	dma_fence_put(bo_va->last_pt_update);
1925 
1926 	if (bo && bo_va->is_xgmi)
1927 		amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
1928 
1929 	kfree(bo_va);
1930 }
1931 
1932 /**
1933  * amdgpu_vm_evictable - check if we can evict a VM
1934  *
1935  * @bo: A page table of the VM.
1936  *
1937  * Check if it is possible to evict a VM.
1938  */
amdgpu_vm_evictable(struct amdgpu_bo * bo)1939 bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
1940 {
1941 	struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
1942 
1943 	/* Page tables of a destroyed VM can go away immediately */
1944 	if (!bo_base || !bo_base->vm)
1945 		return true;
1946 
1947 	/* Don't evict VM page tables while they are busy */
1948 	if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
1949 		return false;
1950 
1951 	/* Try to block ongoing updates */
1952 	if (!amdgpu_vm_eviction_trylock(bo_base->vm))
1953 		return false;
1954 
1955 	/* Don't evict VM page tables while they are updated */
1956 	if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
1957 		amdgpu_vm_eviction_unlock(bo_base->vm);
1958 		return false;
1959 	}
1960 
1961 	bo_base->vm->evicting = true;
1962 	amdgpu_vm_eviction_unlock(bo_base->vm);
1963 	return true;
1964 }
1965 
1966 /**
1967  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1968  *
1969  * @adev: amdgpu_device pointer
1970  * @bo: amdgpu buffer object
1971  * @evicted: is the BO evicted
1972  *
1973  * Mark @bo as invalid.
1974  */
amdgpu_vm_bo_invalidate(struct amdgpu_device * adev,struct amdgpu_bo * bo,bool evicted)1975 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1976 			     struct amdgpu_bo *bo, bool evicted)
1977 {
1978 	struct amdgpu_vm_bo_base *bo_base;
1979 
1980 	/* shadow bo doesn't have bo base, its validation needs its parent */
1981 	if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
1982 		bo = bo->parent;
1983 
1984 	for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
1985 		struct amdgpu_vm *vm = bo_base->vm;
1986 
1987 		if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) {
1988 			amdgpu_vm_bo_evicted(bo_base);
1989 			continue;
1990 		}
1991 
1992 		if (bo_base->moved)
1993 			continue;
1994 		bo_base->moved = true;
1995 
1996 		if (bo->tbo.type == ttm_bo_type_kernel)
1997 			amdgpu_vm_bo_relocated(bo_base);
1998 		else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
1999 			amdgpu_vm_bo_moved(bo_base);
2000 		else
2001 			amdgpu_vm_bo_invalidated(bo_base);
2002 	}
2003 }
2004 
2005 /**
2006  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2007  *
2008  * @vm_size: VM size
2009  *
2010  * Returns:
2011  * VM page table as power of two
2012  */
amdgpu_vm_get_block_size(uint64_t vm_size)2013 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2014 {
2015 	/* Total bits covered by PD + PTs */
2016 	unsigned bits = ilog2(vm_size) + 18;
2017 
2018 	/* Make sure the PD is 4K in size up to 8GB address space.
2019 	   Above that split equal between PD and PTs */
2020 	if (vm_size <= 8)
2021 		return (bits - 9);
2022 	else
2023 		return ((bits + 3) / 2);
2024 }
2025 
2026 /**
2027  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2028  *
2029  * @adev: amdgpu_device pointer
2030  * @min_vm_size: the minimum vm size in GB if it's set auto
2031  * @fragment_size_default: Default PTE fragment size
2032  * @max_level: max VMPT level
2033  * @max_bits: max address space size in bits
2034  *
2035  */
amdgpu_vm_adjust_size(struct amdgpu_device * adev,uint32_t min_vm_size,uint32_t fragment_size_default,unsigned max_level,unsigned max_bits)2036 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2037 			   uint32_t fragment_size_default, unsigned max_level,
2038 			   unsigned max_bits)
2039 {
2040 	unsigned int max_size = 1 << (max_bits - 30);
2041 	unsigned int vm_size;
2042 	uint64_t tmp;
2043 
2044 	/* adjust vm size first */
2045 	if (amdgpu_vm_size != -1) {
2046 		vm_size = amdgpu_vm_size;
2047 		if (vm_size > max_size) {
2048 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2049 				 amdgpu_vm_size, max_size);
2050 			vm_size = max_size;
2051 		}
2052 	} else {
2053 		struct sysinfo si;
2054 		unsigned int phys_ram_gb;
2055 
2056 		/* Optimal VM size depends on the amount of physical
2057 		 * RAM available. Underlying requirements and
2058 		 * assumptions:
2059 		 *
2060 		 *  - Need to map system memory and VRAM from all GPUs
2061 		 *     - VRAM from other GPUs not known here
2062 		 *     - Assume VRAM <= system memory
2063 		 *  - On GFX8 and older, VM space can be segmented for
2064 		 *    different MTYPEs
2065 		 *  - Need to allow room for fragmentation, guard pages etc.
2066 		 *
2067 		 * This adds up to a rough guess of system memory x3.
2068 		 * Round up to power of two to maximize the available
2069 		 * VM size with the given page table size.
2070 		 */
2071 		si_meminfo(&si);
2072 		phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2073 			       (1 << 30) - 1) >> 30;
2074 		vm_size = roundup_pow_of_two(
2075 			min(max(phys_ram_gb * 3, min_vm_size), max_size));
2076 	}
2077 
2078 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2079 
2080 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2081 	if (amdgpu_vm_block_size != -1)
2082 		tmp >>= amdgpu_vm_block_size - 9;
2083 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2084 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2085 	switch (adev->vm_manager.num_level) {
2086 	case 3:
2087 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2088 		break;
2089 	case 2:
2090 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2091 		break;
2092 	case 1:
2093 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2094 		break;
2095 	default:
2096 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2097 	}
2098 	/* block size depends on vm size and hw setup*/
2099 	if (amdgpu_vm_block_size != -1)
2100 		adev->vm_manager.block_size =
2101 			min((unsigned)amdgpu_vm_block_size, max_bits
2102 			    - AMDGPU_GPU_PAGE_SHIFT
2103 			    - 9 * adev->vm_manager.num_level);
2104 	else if (adev->vm_manager.num_level > 1)
2105 		adev->vm_manager.block_size = 9;
2106 	else
2107 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2108 
2109 	if (amdgpu_vm_fragment_size == -1)
2110 		adev->vm_manager.fragment_size = fragment_size_default;
2111 	else
2112 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2113 
2114 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2115 		 vm_size, adev->vm_manager.num_level + 1,
2116 		 adev->vm_manager.block_size,
2117 		 adev->vm_manager.fragment_size);
2118 }
2119 
2120 /**
2121  * amdgpu_vm_wait_idle - wait for the VM to become idle
2122  *
2123  * @vm: VM object to wait for
2124  * @timeout: timeout to wait for VM to become idle
2125  */
amdgpu_vm_wait_idle(struct amdgpu_vm * vm,long timeout)2126 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
2127 {
2128 	timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2129 					DMA_RESV_USAGE_BOOKKEEP,
2130 					true, timeout);
2131 	if (timeout <= 0)
2132 		return timeout;
2133 
2134 	return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2135 }
2136 
2137 /**
2138  * amdgpu_vm_init - initialize a vm instance
2139  *
2140  * @adev: amdgpu_device pointer
2141  * @vm: requested vm
2142  * @xcp_id: GPU partition selection id
2143  *
2144  * Init @vm fields.
2145  *
2146  * Returns:
2147  * 0 for success, error for failure.
2148  */
amdgpu_vm_init(struct amdgpu_device * adev,struct amdgpu_vm * vm,int32_t xcp_id)2149 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2150 		   int32_t xcp_id)
2151 {
2152 	struct amdgpu_bo *root_bo;
2153 	struct amdgpu_bo_vm *root;
2154 	int r, i;
2155 
2156 	vm->va = RB_ROOT_CACHED;
2157 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2158 		vm->reserved_vmid[i] = NULL;
2159 	INIT_LIST_HEAD(&vm->evicted);
2160 	INIT_LIST_HEAD(&vm->relocated);
2161 	INIT_LIST_HEAD(&vm->moved);
2162 	INIT_LIST_HEAD(&vm->idle);
2163 	INIT_LIST_HEAD(&vm->invalidated);
2164 	spin_lock_init(&vm->status_lock);
2165 	INIT_LIST_HEAD(&vm->freed);
2166 	INIT_LIST_HEAD(&vm->done);
2167 	INIT_LIST_HEAD(&vm->pt_freed);
2168 	INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
2169 	INIT_KFIFO(vm->faults);
2170 
2171 	r = amdgpu_vm_init_entities(adev, vm);
2172 	if (r)
2173 		return r;
2174 
2175 	vm->pte_support_ats = false;
2176 	vm->is_compute_context = false;
2177 
2178 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2179 				    AMDGPU_VM_USE_CPU_FOR_GFX);
2180 
2181 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2182 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2183 	WARN_ONCE((vm->use_cpu_for_update &&
2184 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2185 		  "CPU update of VM recommended only for large BAR system\n");
2186 
2187 	if (vm->use_cpu_for_update)
2188 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2189 	else
2190 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2191 
2192 	vm->last_update = dma_fence_get_stub();
2193 	vm->last_unlocked = dma_fence_get_stub();
2194 	vm->last_tlb_flush = dma_fence_get_stub();
2195 	vm->generation = 0;
2196 
2197 	mutex_init(&vm->eviction_lock);
2198 	vm->evicting = false;
2199 
2200 	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2201 				false, &root, xcp_id);
2202 	if (r)
2203 		goto error_free_delayed;
2204 
2205 	root_bo = amdgpu_bo_ref(&root->bo);
2206 	r = amdgpu_bo_reserve(root_bo, true);
2207 	if (r) {
2208 		amdgpu_bo_unref(&root->shadow);
2209 		amdgpu_bo_unref(&root_bo);
2210 		goto error_free_delayed;
2211 	}
2212 
2213 	amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2214 	r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2215 	if (r)
2216 		goto error_free_root;
2217 
2218 	r = amdgpu_vm_pt_clear(adev, vm, root, false);
2219 	if (r)
2220 		goto error_free_root;
2221 
2222 	amdgpu_bo_unreserve(vm->root.bo);
2223 	amdgpu_bo_unref(&root_bo);
2224 
2225 	return 0;
2226 
2227 error_free_root:
2228 	amdgpu_vm_pt_free_root(adev, vm);
2229 	amdgpu_bo_unreserve(vm->root.bo);
2230 	amdgpu_bo_unref(&root_bo);
2231 
2232 error_free_delayed:
2233 	dma_fence_put(vm->last_tlb_flush);
2234 	dma_fence_put(vm->last_unlocked);
2235 	amdgpu_vm_fini_entities(vm);
2236 
2237 	return r;
2238 }
2239 
2240 /**
2241  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2242  *
2243  * @adev: amdgpu_device pointer
2244  * @vm: requested vm
2245  *
2246  * This only works on GFX VMs that don't have any BOs added and no
2247  * page tables allocated yet.
2248  *
2249  * Changes the following VM parameters:
2250  * - use_cpu_for_update
2251  * - pte_supports_ats
2252  *
2253  * Reinitializes the page directory to reflect the changed ATS
2254  * setting.
2255  *
2256  * Returns:
2257  * 0 for success, -errno for errors.
2258  */
amdgpu_vm_make_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2259 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2260 {
2261 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2262 	int r;
2263 
2264 	r = amdgpu_bo_reserve(vm->root.bo, true);
2265 	if (r)
2266 		return r;
2267 
2268 	/* Check if PD needs to be reinitialized and do it before
2269 	 * changing any other state, in case it fails.
2270 	 */
2271 	if (pte_support_ats != vm->pte_support_ats) {
2272 		/* Sanity checks */
2273 		if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
2274 			r = -EINVAL;
2275 			goto unreserve_bo;
2276 		}
2277 
2278 		vm->pte_support_ats = pte_support_ats;
2279 		r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
2280 				       false);
2281 		if (r)
2282 			goto unreserve_bo;
2283 	}
2284 
2285 	/* Update VM state */
2286 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2287 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2288 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2289 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2290 	WARN_ONCE((vm->use_cpu_for_update &&
2291 		   !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2292 		  "CPU update of VM recommended only for large BAR system\n");
2293 
2294 	if (vm->use_cpu_for_update) {
2295 		/* Sync with last SDMA update/clear before switching to CPU */
2296 		r = amdgpu_bo_sync_wait(vm->root.bo,
2297 					AMDGPU_FENCE_OWNER_UNDEFINED, true);
2298 		if (r)
2299 			goto unreserve_bo;
2300 
2301 		vm->update_funcs = &amdgpu_vm_cpu_funcs;
2302 		r = amdgpu_vm_pt_map_tables(adev, vm);
2303 		if (r)
2304 			goto unreserve_bo;
2305 
2306 	} else {
2307 		vm->update_funcs = &amdgpu_vm_sdma_funcs;
2308 	}
2309 
2310 	dma_fence_put(vm->last_update);
2311 	vm->last_update = dma_fence_get_stub();
2312 	vm->is_compute_context = true;
2313 
2314 	/* Free the shadow bo for compute VM */
2315 	amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
2316 
2317 	goto unreserve_bo;
2318 
2319 unreserve_bo:
2320 	amdgpu_bo_unreserve(vm->root.bo);
2321 	return r;
2322 }
2323 
2324 /**
2325  * amdgpu_vm_release_compute - release a compute vm
2326  * @adev: amdgpu_device pointer
2327  * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
2328  *
2329  * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
2330  * pasid from vm. Compute should stop use of vm after this call.
2331  */
amdgpu_vm_release_compute(struct amdgpu_device * adev,struct amdgpu_vm * vm)2332 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2333 {
2334 	amdgpu_vm_set_pasid(adev, vm, 0);
2335 	vm->is_compute_context = false;
2336 }
2337 
2338 /**
2339  * amdgpu_vm_fini - tear down a vm instance
2340  *
2341  * @adev: amdgpu_device pointer
2342  * @vm: requested vm
2343  *
2344  * Tear down @vm.
2345  * Unbind the VM and remove all bos from the vm bo list
2346  */
amdgpu_vm_fini(struct amdgpu_device * adev,struct amdgpu_vm * vm)2347 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2348 {
2349 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2350 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2351 	struct amdgpu_bo *root;
2352 	unsigned long flags;
2353 	int i;
2354 
2355 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2356 
2357 	flush_work(&vm->pt_free_work);
2358 
2359 	root = amdgpu_bo_ref(vm->root.bo);
2360 	amdgpu_bo_reserve(root, true);
2361 	amdgpu_vm_set_pasid(adev, vm, 0);
2362 	dma_fence_wait(vm->last_unlocked, false);
2363 	dma_fence_put(vm->last_unlocked);
2364 	dma_fence_wait(vm->last_tlb_flush, false);
2365 	/* Make sure that all fence callbacks have completed */
2366 	spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2367 	spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2368 	dma_fence_put(vm->last_tlb_flush);
2369 
2370 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2371 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2372 			amdgpu_vm_prt_fini(adev, vm);
2373 			prt_fini_needed = false;
2374 		}
2375 
2376 		list_del(&mapping->list);
2377 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2378 	}
2379 
2380 	amdgpu_vm_pt_free_root(adev, vm);
2381 	amdgpu_bo_unreserve(root);
2382 	amdgpu_bo_unref(&root);
2383 	WARN_ON(vm->root.bo);
2384 
2385 	amdgpu_vm_fini_entities(vm);
2386 
2387 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2388 		dev_err(adev->dev, "still active bo inside vm\n");
2389 	}
2390 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2391 					     &vm->va.rb_root, rb) {
2392 		/* Don't remove the mapping here, we don't want to trigger a
2393 		 * rebalance and the tree is about to be destroyed anyway.
2394 		 */
2395 		list_del(&mapping->list);
2396 		kfree(mapping);
2397 	}
2398 
2399 	dma_fence_put(vm->last_update);
2400 
2401 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
2402 		if (vm->reserved_vmid[i]) {
2403 			amdgpu_vmid_free_reserved(adev, i);
2404 			vm->reserved_vmid[i] = false;
2405 		}
2406 	}
2407 
2408 }
2409 
2410 /**
2411  * amdgpu_vm_manager_init - init the VM manager
2412  *
2413  * @adev: amdgpu_device pointer
2414  *
2415  * Initialize the VM manager structures
2416  */
amdgpu_vm_manager_init(struct amdgpu_device * adev)2417 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2418 {
2419 	unsigned i;
2420 
2421 	/* Concurrent flushes are only possible starting with Vega10 and
2422 	 * are broken on Navi10 and Navi14.
2423 	 */
2424 	adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2425 					      adev->asic_type == CHIP_NAVI10 ||
2426 					      adev->asic_type == CHIP_NAVI14);
2427 	amdgpu_vmid_mgr_init(adev);
2428 
2429 	adev->vm_manager.fence_context =
2430 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2431 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2432 		adev->vm_manager.seqno[i] = 0;
2433 
2434 	spin_lock_init(&adev->vm_manager.prt_lock);
2435 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2436 
2437 	/* If not overridden by the user, by default, only in large BAR systems
2438 	 * Compute VM tables will be updated by CPU
2439 	 */
2440 #ifdef CONFIG_X86_64
2441 	if (amdgpu_vm_update_mode == -1) {
2442 		/* For asic with VF MMIO access protection
2443 		 * avoid using CPU for VM table updates
2444 		 */
2445 		if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2446 		    !amdgpu_sriov_vf_mmio_access_protection(adev))
2447 			adev->vm_manager.vm_update_mode =
2448 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2449 		else
2450 			adev->vm_manager.vm_update_mode = 0;
2451 	} else
2452 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2453 #else
2454 	adev->vm_manager.vm_update_mode = 0;
2455 #endif
2456 
2457 	xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2458 }
2459 
2460 /**
2461  * amdgpu_vm_manager_fini - cleanup VM manager
2462  *
2463  * @adev: amdgpu_device pointer
2464  *
2465  * Cleanup the VM manager and free resources.
2466  */
amdgpu_vm_manager_fini(struct amdgpu_device * adev)2467 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2468 {
2469 	WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2470 	xa_destroy(&adev->vm_manager.pasids);
2471 
2472 	amdgpu_vmid_mgr_fini(adev);
2473 }
2474 
2475 /**
2476  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2477  *
2478  * @dev: drm device pointer
2479  * @data: drm_amdgpu_vm
2480  * @filp: drm file pointer
2481  *
2482  * Returns:
2483  * 0 for success, -errno for errors.
2484  */
amdgpu_vm_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)2485 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2486 {
2487 	union drm_amdgpu_vm *args = data;
2488 	struct amdgpu_device *adev = drm_to_adev(dev);
2489 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2490 
2491 	/* No valid flags defined yet */
2492 	if (args->in.flags)
2493 		return -EINVAL;
2494 
2495 	switch (args->in.op) {
2496 	case AMDGPU_VM_OP_RESERVE_VMID:
2497 		/* We only have requirement to reserve vmid from gfxhub */
2498 		if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2499 			amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
2500 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2501 		}
2502 
2503 		break;
2504 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2505 		if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2506 			amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
2507 			fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2508 		}
2509 		break;
2510 	default:
2511 		return -EINVAL;
2512 	}
2513 
2514 	return 0;
2515 }
2516 
2517 /**
2518  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2519  *
2520  * @adev: drm device pointer
2521  * @pasid: PASID identifier for VM
2522  * @task_info: task_info to fill.
2523  */
amdgpu_vm_get_task_info(struct amdgpu_device * adev,u32 pasid,struct amdgpu_task_info * task_info)2524 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
2525 			 struct amdgpu_task_info *task_info)
2526 {
2527 	struct amdgpu_vm *vm;
2528 	unsigned long flags;
2529 
2530 	xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2531 
2532 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2533 	if (vm)
2534 		*task_info = vm->task_info;
2535 
2536 	xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2537 }
2538 
2539 /**
2540  * amdgpu_vm_set_task_info - Sets VMs task info.
2541  *
2542  * @vm: vm for which to set the info
2543  */
amdgpu_vm_set_task_info(struct amdgpu_vm * vm)2544 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2545 {
2546 	if (vm->task_info.pid)
2547 		return;
2548 
2549 	vm->task_info.pid = current->pid;
2550 	get_task_comm(vm->task_info.task_name, current);
2551 
2552 	if (current->group_leader->mm != current->mm)
2553 		return;
2554 
2555 	vm->task_info.tgid = current->group_leader->pid;
2556 	get_task_comm(vm->task_info.process_name, current->group_leader);
2557 }
2558 
2559 /**
2560  * amdgpu_vm_handle_fault - graceful handling of VM faults.
2561  * @adev: amdgpu device pointer
2562  * @pasid: PASID of the VM
2563  * @vmid: VMID, only used for GFX 9.4.3.
2564  * @node_id: Node_id received in IH cookie. Only applicable for
2565  *           GFX 9.4.3.
2566  * @addr: Address of the fault
2567  * @write_fault: true is write fault, false is read fault
2568  *
2569  * Try to gracefully handle a VM fault. Return true if the fault was handled and
2570  * shouldn't be reported any more.
2571  */
amdgpu_vm_handle_fault(struct amdgpu_device * adev,u32 pasid,u32 vmid,u32 node_id,uint64_t addr,bool write_fault)2572 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
2573 			    u32 vmid, u32 node_id, uint64_t addr,
2574 			    bool write_fault)
2575 {
2576 	bool is_compute_context = false;
2577 	struct amdgpu_bo *root;
2578 	unsigned long irqflags;
2579 	uint64_t value, flags;
2580 	struct amdgpu_vm *vm;
2581 	int r;
2582 
2583 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2584 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2585 	if (vm) {
2586 		root = amdgpu_bo_ref(vm->root.bo);
2587 		is_compute_context = vm->is_compute_context;
2588 	} else {
2589 		root = NULL;
2590 	}
2591 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2592 
2593 	if (!root)
2594 		return false;
2595 
2596 	addr /= AMDGPU_GPU_PAGE_SIZE;
2597 
2598 	if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
2599 	    node_id, addr, write_fault)) {
2600 		amdgpu_bo_unref(&root);
2601 		return true;
2602 	}
2603 
2604 	r = amdgpu_bo_reserve(root, true);
2605 	if (r)
2606 		goto error_unref;
2607 
2608 	/* Double check that the VM still exists */
2609 	xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2610 	vm = xa_load(&adev->vm_manager.pasids, pasid);
2611 	if (vm && vm->root.bo != root)
2612 		vm = NULL;
2613 	xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2614 	if (!vm)
2615 		goto error_unlock;
2616 
2617 	flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
2618 		AMDGPU_PTE_SYSTEM;
2619 
2620 	if (is_compute_context) {
2621 		/* Intentionally setting invalid PTE flag
2622 		 * combination to force a no-retry-fault
2623 		 */
2624 		flags = AMDGPU_VM_NORETRY_FLAGS;
2625 		value = 0;
2626 	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
2627 		/* Redirect the access to the dummy page */
2628 		value = adev->dummy_page_addr;
2629 		flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
2630 			AMDGPU_PTE_WRITEABLE;
2631 
2632 	} else {
2633 		/* Let the hw retry silently on the PTE */
2634 		value = 0;
2635 	}
2636 
2637 	r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2638 	if (r) {
2639 		pr_debug("failed %d to reserve fence slot\n", r);
2640 		goto error_unlock;
2641 	}
2642 
2643 	r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
2644 				   addr, flags, value, 0, NULL, NULL, NULL);
2645 	if (r)
2646 		goto error_unlock;
2647 
2648 	r = amdgpu_vm_update_pdes(adev, vm, true);
2649 
2650 error_unlock:
2651 	amdgpu_bo_unreserve(root);
2652 	if (r < 0)
2653 		DRM_ERROR("Can't handle page fault (%d)\n", r);
2654 
2655 error_unref:
2656 	amdgpu_bo_unref(&root);
2657 
2658 	return false;
2659 }
2660 
2661 #if defined(CONFIG_DEBUG_FS)
2662 /**
2663  * amdgpu_debugfs_vm_bo_info  - print BO info for the VM
2664  *
2665  * @vm: Requested VM for printing BO info
2666  * @m: debugfs file
2667  *
2668  * Print BO information in debugfs file for the VM
2669  */
amdgpu_debugfs_vm_bo_info(struct amdgpu_vm * vm,struct seq_file * m)2670 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
2671 {
2672 	struct amdgpu_bo_va *bo_va, *tmp;
2673 	u64 total_idle = 0;
2674 	u64 total_evicted = 0;
2675 	u64 total_relocated = 0;
2676 	u64 total_moved = 0;
2677 	u64 total_invalidated = 0;
2678 	u64 total_done = 0;
2679 	unsigned int total_idle_objs = 0;
2680 	unsigned int total_evicted_objs = 0;
2681 	unsigned int total_relocated_objs = 0;
2682 	unsigned int total_moved_objs = 0;
2683 	unsigned int total_invalidated_objs = 0;
2684 	unsigned int total_done_objs = 0;
2685 	unsigned int id = 0;
2686 
2687 	spin_lock(&vm->status_lock);
2688 	seq_puts(m, "\tIdle BOs:\n");
2689 	list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2690 		if (!bo_va->base.bo)
2691 			continue;
2692 		total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2693 	}
2694 	total_idle_objs = id;
2695 	id = 0;
2696 
2697 	seq_puts(m, "\tEvicted BOs:\n");
2698 	list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2699 		if (!bo_va->base.bo)
2700 			continue;
2701 		total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2702 	}
2703 	total_evicted_objs = id;
2704 	id = 0;
2705 
2706 	seq_puts(m, "\tRelocated BOs:\n");
2707 	list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2708 		if (!bo_va->base.bo)
2709 			continue;
2710 		total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2711 	}
2712 	total_relocated_objs = id;
2713 	id = 0;
2714 
2715 	seq_puts(m, "\tMoved BOs:\n");
2716 	list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2717 		if (!bo_va->base.bo)
2718 			continue;
2719 		total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2720 	}
2721 	total_moved_objs = id;
2722 	id = 0;
2723 
2724 	seq_puts(m, "\tInvalidated BOs:\n");
2725 	list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2726 		if (!bo_va->base.bo)
2727 			continue;
2728 		total_invalidated += amdgpu_bo_print_info(id++,	bo_va->base.bo, m);
2729 	}
2730 	total_invalidated_objs = id;
2731 	id = 0;
2732 
2733 	seq_puts(m, "\tDone BOs:\n");
2734 	list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2735 		if (!bo_va->base.bo)
2736 			continue;
2737 		total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2738 	}
2739 	spin_unlock(&vm->status_lock);
2740 	total_done_objs = id;
2741 
2742 	seq_printf(m, "\tTotal idle size:        %12lld\tobjs:\t%d\n", total_idle,
2743 		   total_idle_objs);
2744 	seq_printf(m, "\tTotal evicted size:     %12lld\tobjs:\t%d\n", total_evicted,
2745 		   total_evicted_objs);
2746 	seq_printf(m, "\tTotal relocated size:   %12lld\tobjs:\t%d\n", total_relocated,
2747 		   total_relocated_objs);
2748 	seq_printf(m, "\tTotal moved size:       %12lld\tobjs:\t%d\n", total_moved,
2749 		   total_moved_objs);
2750 	seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
2751 		   total_invalidated_objs);
2752 	seq_printf(m, "\tTotal done size:        %12lld\tobjs:\t%d\n", total_done,
2753 		   total_done_objs);
2754 }
2755 #endif
2756