1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37 
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58 
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61 
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63 		     START, LAST, static, amdgpu_vm_it)
64 
65 #undef START
66 #undef LAST
67 
68 /**
69  * struct amdgpu_pte_update_params - Local structure
70  *
71  * Encapsulate some VM table update parameters to reduce
72  * the number of function parameters
73  *
74  */
75 struct amdgpu_pte_update_params {
76 
77 	/**
78 	 * @adev: amdgpu device we do this update for
79 	 */
80 	struct amdgpu_device *adev;
81 
82 	/**
83 	 * @vm: optional amdgpu_vm we do this update for
84 	 */
85 	struct amdgpu_vm *vm;
86 
87 	/**
88 	 * @src: address where to copy page table entries from
89 	 */
90 	uint64_t src;
91 
92 	/**
93 	 * @ib: indirect buffer to fill with commands
94 	 */
95 	struct amdgpu_ib *ib;
96 
97 	/**
98 	 * @func: Function which actually does the update
99 	 */
100 	void (*func)(struct amdgpu_pte_update_params *params,
101 		     struct amdgpu_bo *bo, uint64_t pe,
102 		     uint64_t addr, unsigned count, uint32_t incr,
103 		     uint64_t flags);
104 	/**
105 	 * @pages_addr:
106 	 *
107 	 * DMA addresses to use for mapping, used during VM update by CPU
108 	 */
109 	dma_addr_t *pages_addr;
110 
111 	/**
112 	 * @kptr:
113 	 *
114 	 * Kernel pointer of PD/PT BO that needs to be updated,
115 	 * used during VM update by CPU
116 	 */
117 	void *kptr;
118 };
119 
120 /**
121  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
122  */
123 struct amdgpu_prt_cb {
124 
125 	/**
126 	 * @adev: amdgpu device
127 	 */
128 	struct amdgpu_device *adev;
129 
130 	/**
131 	 * @cb: callback
132 	 */
133 	struct dma_fence_cb cb;
134 };
135 
136 /**
137  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
138  *
139  * @base: base structure for tracking BO usage in a VM
140  * @vm: vm to which bo is to be added
141  * @bo: amdgpu buffer object
142  *
143  * Initialize a bo_va_base structure and add it to the appropriate lists
144  *
145  */
146 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
147 				   struct amdgpu_vm *vm,
148 				   struct amdgpu_bo *bo)
149 {
150 	base->vm = vm;
151 	base->bo = bo;
152 	INIT_LIST_HEAD(&base->bo_list);
153 	INIT_LIST_HEAD(&base->vm_status);
154 
155 	if (!bo)
156 		return;
157 	list_add_tail(&base->bo_list, &bo->va);
158 
159 	if (bo->tbo.type == ttm_bo_type_kernel)
160 		list_move(&base->vm_status, &vm->relocated);
161 
162 	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
163 		return;
164 
165 	if (bo->preferred_domains &
166 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
167 		return;
168 
169 	/*
170 	 * we checked all the prerequisites, but it looks like this per vm bo
171 	 * is currently evicted. add the bo to the evicted list to make sure it
172 	 * is validated on next vm use to avoid fault.
173 	 * */
174 	list_move_tail(&base->vm_status, &vm->evicted);
175 }
176 
177 /**
178  * amdgpu_vm_level_shift - return the addr shift for each level
179  *
180  * @adev: amdgpu_device pointer
181  * @level: VMPT level
182  *
183  * Returns:
184  * The number of bits the pfn needs to be right shifted for a level.
185  */
186 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
187 				      unsigned level)
188 {
189 	unsigned shift = 0xff;
190 
191 	switch (level) {
192 	case AMDGPU_VM_PDB2:
193 	case AMDGPU_VM_PDB1:
194 	case AMDGPU_VM_PDB0:
195 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
196 			adev->vm_manager.block_size;
197 		break;
198 	case AMDGPU_VM_PTB:
199 		shift = 0;
200 		break;
201 	default:
202 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
203 	}
204 
205 	return shift;
206 }
207 
208 /**
209  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
210  *
211  * @adev: amdgpu_device pointer
212  * @level: VMPT level
213  *
214  * Returns:
215  * The number of entries in a page directory or page table.
216  */
217 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
218 				      unsigned level)
219 {
220 	unsigned shift = amdgpu_vm_level_shift(adev,
221 					       adev->vm_manager.root_level);
222 
223 	if (level == adev->vm_manager.root_level)
224 		/* For the root directory */
225 		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
226 	else if (level != AMDGPU_VM_PTB)
227 		/* Everything in between */
228 		return 512;
229 	else
230 		/* For the page tables on the leaves */
231 		return AMDGPU_VM_PTE_COUNT(adev);
232 }
233 
234 /**
235  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
236  *
237  * @adev: amdgpu_device pointer
238  * @level: VMPT level
239  *
240  * Returns:
241  * The size of the BO for a page directory or page table in bytes.
242  */
243 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
244 {
245 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
246 }
247 
248 /**
249  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
250  *
251  * @vm: vm providing the BOs
252  * @validated: head of validation list
253  * @entry: entry to add
254  *
255  * Add the page directory to the list of BOs to
256  * validate for command submission.
257  */
258 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
259 			 struct list_head *validated,
260 			 struct amdgpu_bo_list_entry *entry)
261 {
262 	entry->robj = vm->root.base.bo;
263 	entry->priority = 0;
264 	entry->tv.bo = &entry->robj->tbo;
265 	entry->tv.shared = true;
266 	entry->user_pages = NULL;
267 	list_add(&entry->tv.head, validated);
268 }
269 
270 /**
271  * amdgpu_vm_validate_pt_bos - validate the page table BOs
272  *
273  * @adev: amdgpu device pointer
274  * @vm: vm providing the BOs
275  * @validate: callback to do the validation
276  * @param: parameter for the validation callback
277  *
278  * Validate the page table BOs on command submission if neccessary.
279  *
280  * Returns:
281  * Validation result.
282  */
283 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
284 			      int (*validate)(void *p, struct amdgpu_bo *bo),
285 			      void *param)
286 {
287 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
288 	struct amdgpu_vm_bo_base *bo_base, *tmp;
289 	int r = 0;
290 
291 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
292 		struct amdgpu_bo *bo = bo_base->bo;
293 
294 		if (bo->parent) {
295 			r = validate(param, bo);
296 			if (r)
297 				break;
298 
299 			spin_lock(&glob->lru_lock);
300 			ttm_bo_move_to_lru_tail(&bo->tbo);
301 			if (bo->shadow)
302 				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
303 			spin_unlock(&glob->lru_lock);
304 		}
305 
306 		if (bo->tbo.type != ttm_bo_type_kernel) {
307 			spin_lock(&vm->moved_lock);
308 			list_move(&bo_base->vm_status, &vm->moved);
309 			spin_unlock(&vm->moved_lock);
310 		} else {
311 			list_move(&bo_base->vm_status, &vm->relocated);
312 		}
313 	}
314 
315 	spin_lock(&glob->lru_lock);
316 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
317 		struct amdgpu_bo *bo = bo_base->bo;
318 
319 		if (!bo->parent)
320 			continue;
321 
322 		ttm_bo_move_to_lru_tail(&bo->tbo);
323 		if (bo->shadow)
324 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
325 	}
326 	spin_unlock(&glob->lru_lock);
327 
328 	return r;
329 }
330 
331 /**
332  * amdgpu_vm_ready - check VM is ready for updates
333  *
334  * @vm: VM to check
335  *
336  * Check if all VM PDs/PTs are ready for updates
337  *
338  * Returns:
339  * True if eviction list is empty.
340  */
341 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
342 {
343 	return list_empty(&vm->evicted);
344 }
345 
346 /**
347  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
348  *
349  * @adev: amdgpu_device pointer
350  * @vm: VM to clear BO from
351  * @bo: BO to clear
352  * @level: level this BO is at
353  * @pte_support_ats: indicate ATS support from PTE
354  *
355  * Root PD needs to be reserved when calling this.
356  *
357  * Returns:
358  * 0 on success, errno otherwise.
359  */
360 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
361 			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
362 			      unsigned level, bool pte_support_ats)
363 {
364 	struct ttm_operation_ctx ctx = { true, false };
365 	struct dma_fence *fence = NULL;
366 	unsigned entries, ats_entries;
367 	struct amdgpu_ring *ring;
368 	struct amdgpu_job *job;
369 	uint64_t addr;
370 	int r;
371 
372 	addr = amdgpu_bo_gpu_offset(bo);
373 	entries = amdgpu_bo_size(bo) / 8;
374 
375 	if (pte_support_ats) {
376 		if (level == adev->vm_manager.root_level) {
377 			ats_entries = amdgpu_vm_level_shift(adev, level);
378 			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
379 			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
380 			ats_entries = min(ats_entries, entries);
381 			entries -= ats_entries;
382 		} else {
383 			ats_entries = entries;
384 			entries = 0;
385 		}
386 	} else {
387 		ats_entries = 0;
388 	}
389 
390 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
391 
392 	r = reservation_object_reserve_shared(bo->tbo.resv);
393 	if (r)
394 		return r;
395 
396 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
397 	if (r)
398 		goto error;
399 
400 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
401 	if (r)
402 		goto error;
403 
404 	if (ats_entries) {
405 		uint64_t ats_value;
406 
407 		ats_value = AMDGPU_PTE_DEFAULT_ATC;
408 		if (level != AMDGPU_VM_PTB)
409 			ats_value |= AMDGPU_PDE_PTE;
410 
411 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
412 				      ats_entries, 0, ats_value);
413 		addr += ats_entries * 8;
414 	}
415 
416 	if (entries)
417 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
418 				      entries, 0, 0);
419 
420 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
421 
422 	WARN_ON(job->ibs[0].length_dw > 64);
423 	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
424 			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
425 	if (r)
426 		goto error_free;
427 
428 	r = amdgpu_job_submit(job, ring, &vm->entity,
429 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
430 	if (r)
431 		goto error_free;
432 
433 	amdgpu_bo_fence(bo, fence, true);
434 	dma_fence_put(fence);
435 
436 	if (bo->shadow)
437 		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
438 					  level, pte_support_ats);
439 
440 	return 0;
441 
442 error_free:
443 	amdgpu_job_free(job);
444 
445 error:
446 	return r;
447 }
448 
449 /**
450  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
451  *
452  * @adev: amdgpu_device pointer
453  * @vm: requested vm
454  * @parent: parent PT
455  * @saddr: start of the address range
456  * @eaddr: end of the address range
457  * @level: VMPT level
458  * @ats: indicate ATS support from PTE
459  *
460  * Make sure the page directories and page tables are allocated
461  *
462  * Returns:
463  * 0 on success, errno otherwise.
464  */
465 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
466 				  struct amdgpu_vm *vm,
467 				  struct amdgpu_vm_pt *parent,
468 				  uint64_t saddr, uint64_t eaddr,
469 				  unsigned level, bool ats)
470 {
471 	unsigned shift = amdgpu_vm_level_shift(adev, level);
472 	unsigned pt_idx, from, to;
473 	u64 flags;
474 	int r;
475 
476 	if (!parent->entries) {
477 		unsigned num_entries = amdgpu_vm_num_entries(adev, level);
478 
479 		parent->entries = kvmalloc_array(num_entries,
480 						   sizeof(struct amdgpu_vm_pt),
481 						   GFP_KERNEL | __GFP_ZERO);
482 		if (!parent->entries)
483 			return -ENOMEM;
484 		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
485 	}
486 
487 	from = saddr >> shift;
488 	to = eaddr >> shift;
489 	if (from >= amdgpu_vm_num_entries(adev, level) ||
490 	    to >= amdgpu_vm_num_entries(adev, level))
491 		return -EINVAL;
492 
493 	++level;
494 	saddr = saddr & ((1 << shift) - 1);
495 	eaddr = eaddr & ((1 << shift) - 1);
496 
497 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
498 	if (vm->use_cpu_for_update)
499 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
500 	else
501 		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
502 				AMDGPU_GEM_CREATE_SHADOW);
503 
504 	/* walk over the address space and allocate the page tables */
505 	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
506 		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
507 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
508 		struct amdgpu_bo *pt;
509 
510 		if (!entry->base.bo) {
511 			struct amdgpu_bo_param bp;
512 
513 			memset(&bp, 0, sizeof(bp));
514 			bp.size = amdgpu_vm_bo_size(adev, level);
515 			bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
516 			bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
517 			bp.flags = flags;
518 			bp.type = ttm_bo_type_kernel;
519 			bp.resv = resv;
520 			r = amdgpu_bo_create(adev, &bp, &pt);
521 			if (r)
522 				return r;
523 
524 			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
525 			if (r) {
526 				amdgpu_bo_unref(&pt->shadow);
527 				amdgpu_bo_unref(&pt);
528 				return r;
529 			}
530 
531 			if (vm->use_cpu_for_update) {
532 				r = amdgpu_bo_kmap(pt, NULL);
533 				if (r) {
534 					amdgpu_bo_unref(&pt->shadow);
535 					amdgpu_bo_unref(&pt);
536 					return r;
537 				}
538 			}
539 
540 			/* Keep a reference to the root directory to avoid
541 			* freeing them up in the wrong order.
542 			*/
543 			pt->parent = amdgpu_bo_ref(parent->base.bo);
544 
545 			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
546 		}
547 
548 		if (level < AMDGPU_VM_PTB) {
549 			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
550 			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
551 				((1 << shift) - 1);
552 			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
553 						   sub_eaddr, level, ats);
554 			if (r)
555 				return r;
556 		}
557 	}
558 
559 	return 0;
560 }
561 
562 /**
563  * amdgpu_vm_alloc_pts - Allocate page tables.
564  *
565  * @adev: amdgpu_device pointer
566  * @vm: VM to allocate page tables for
567  * @saddr: Start address which needs to be allocated
568  * @size: Size from start address we need.
569  *
570  * Make sure the page tables are allocated.
571  *
572  * Returns:
573  * 0 on success, errno otherwise.
574  */
575 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
576 			struct amdgpu_vm *vm,
577 			uint64_t saddr, uint64_t size)
578 {
579 	uint64_t eaddr;
580 	bool ats = false;
581 
582 	/* validate the parameters */
583 	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
584 		return -EINVAL;
585 
586 	eaddr = saddr + size - 1;
587 
588 	if (vm->pte_support_ats)
589 		ats = saddr < AMDGPU_VA_HOLE_START;
590 
591 	saddr /= AMDGPU_GPU_PAGE_SIZE;
592 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
593 
594 	if (eaddr >= adev->vm_manager.max_pfn) {
595 		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
596 			eaddr, adev->vm_manager.max_pfn);
597 		return -EINVAL;
598 	}
599 
600 	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
601 				      adev->vm_manager.root_level, ats);
602 }
603 
604 /**
605  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
606  *
607  * @adev: amdgpu_device pointer
608  */
609 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
610 {
611 	const struct amdgpu_ip_block *ip_block;
612 	bool has_compute_vm_bug;
613 	struct amdgpu_ring *ring;
614 	int i;
615 
616 	has_compute_vm_bug = false;
617 
618 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
619 	if (ip_block) {
620 		/* Compute has a VM bug for GFX version < 7.
621 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
622 		if (ip_block->version->major <= 7)
623 			has_compute_vm_bug = true;
624 		else if (ip_block->version->major == 8)
625 			if (adev->gfx.mec_fw_version < 673)
626 				has_compute_vm_bug = true;
627 	}
628 
629 	for (i = 0; i < adev->num_rings; i++) {
630 		ring = adev->rings[i];
631 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
632 			/* only compute rings */
633 			ring->has_compute_vm_bug = has_compute_vm_bug;
634 		else
635 			ring->has_compute_vm_bug = false;
636 	}
637 }
638 
639 /**
640  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
641  *
642  * @ring: ring on which the job will be submitted
643  * @job: job to submit
644  *
645  * Returns:
646  * True if sync is needed.
647  */
648 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
649 				  struct amdgpu_job *job)
650 {
651 	struct amdgpu_device *adev = ring->adev;
652 	unsigned vmhub = ring->funcs->vmhub;
653 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
654 	struct amdgpu_vmid *id;
655 	bool gds_switch_needed;
656 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
657 
658 	if (job->vmid == 0)
659 		return false;
660 	id = &id_mgr->ids[job->vmid];
661 	gds_switch_needed = ring->funcs->emit_gds_switch && (
662 		id->gds_base != job->gds_base ||
663 		id->gds_size != job->gds_size ||
664 		id->gws_base != job->gws_base ||
665 		id->gws_size != job->gws_size ||
666 		id->oa_base != job->oa_base ||
667 		id->oa_size != job->oa_size);
668 
669 	if (amdgpu_vmid_had_gpu_reset(adev, id))
670 		return true;
671 
672 	return vm_flush_needed || gds_switch_needed;
673 }
674 
675 /**
676  * amdgpu_vm_flush - hardware flush the vm
677  *
678  * @ring: ring to use for flush
679  * @job:  related job
680  * @need_pipe_sync: is pipe sync needed
681  *
682  * Emit a VM flush when it is necessary.
683  *
684  * Returns:
685  * 0 on success, errno otherwise.
686  */
687 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
688 {
689 	struct amdgpu_device *adev = ring->adev;
690 	unsigned vmhub = ring->funcs->vmhub;
691 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
692 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
693 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
694 		id->gds_base != job->gds_base ||
695 		id->gds_size != job->gds_size ||
696 		id->gws_base != job->gws_base ||
697 		id->gws_size != job->gws_size ||
698 		id->oa_base != job->oa_base ||
699 		id->oa_size != job->oa_size);
700 	bool vm_flush_needed = job->vm_needs_flush;
701 	bool pasid_mapping_needed = id->pasid != job->pasid ||
702 		!id->pasid_mapping ||
703 		!dma_fence_is_signaled(id->pasid_mapping);
704 	struct dma_fence *fence = NULL;
705 	unsigned patch_offset = 0;
706 	int r;
707 
708 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
709 		gds_switch_needed = true;
710 		vm_flush_needed = true;
711 		pasid_mapping_needed = true;
712 	}
713 
714 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
715 	vm_flush_needed &= !!ring->funcs->emit_vm_flush;
716 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
717 		ring->funcs->emit_wreg;
718 
719 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
720 		return 0;
721 
722 	if (ring->funcs->init_cond_exec)
723 		patch_offset = amdgpu_ring_init_cond_exec(ring);
724 
725 	if (need_pipe_sync)
726 		amdgpu_ring_emit_pipeline_sync(ring);
727 
728 	if (vm_flush_needed) {
729 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
730 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
731 	}
732 
733 	if (pasid_mapping_needed)
734 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
735 
736 	if (vm_flush_needed || pasid_mapping_needed) {
737 		r = amdgpu_fence_emit(ring, &fence, 0);
738 		if (r)
739 			return r;
740 	}
741 
742 	if (vm_flush_needed) {
743 		mutex_lock(&id_mgr->lock);
744 		dma_fence_put(id->last_flush);
745 		id->last_flush = dma_fence_get(fence);
746 		id->current_gpu_reset_count =
747 			atomic_read(&adev->gpu_reset_counter);
748 		mutex_unlock(&id_mgr->lock);
749 	}
750 
751 	if (pasid_mapping_needed) {
752 		id->pasid = job->pasid;
753 		dma_fence_put(id->pasid_mapping);
754 		id->pasid_mapping = dma_fence_get(fence);
755 	}
756 	dma_fence_put(fence);
757 
758 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
759 		id->gds_base = job->gds_base;
760 		id->gds_size = job->gds_size;
761 		id->gws_base = job->gws_base;
762 		id->gws_size = job->gws_size;
763 		id->oa_base = job->oa_base;
764 		id->oa_size = job->oa_size;
765 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
766 					    job->gds_size, job->gws_base,
767 					    job->gws_size, job->oa_base,
768 					    job->oa_size);
769 	}
770 
771 	if (ring->funcs->patch_cond_exec)
772 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
773 
774 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
775 	if (ring->funcs->emit_switch_buffer) {
776 		amdgpu_ring_emit_switch_buffer(ring);
777 		amdgpu_ring_emit_switch_buffer(ring);
778 	}
779 	return 0;
780 }
781 
782 /**
783  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
784  *
785  * @vm: requested vm
786  * @bo: requested buffer object
787  *
788  * Find @bo inside the requested vm.
789  * Search inside the @bos vm list for the requested vm
790  * Returns the found bo_va or NULL if none is found
791  *
792  * Object has to be reserved!
793  *
794  * Returns:
795  * Found bo_va or NULL.
796  */
797 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
798 				       struct amdgpu_bo *bo)
799 {
800 	struct amdgpu_bo_va *bo_va;
801 
802 	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
803 		if (bo_va->base.vm == vm) {
804 			return bo_va;
805 		}
806 	}
807 	return NULL;
808 }
809 
810 /**
811  * amdgpu_vm_do_set_ptes - helper to call the right asic function
812  *
813  * @params: see amdgpu_pte_update_params definition
814  * @bo: PD/PT to update
815  * @pe: addr of the page entry
816  * @addr: dst addr to write into pe
817  * @count: number of page entries to update
818  * @incr: increase next addr by incr bytes
819  * @flags: hw access flags
820  *
821  * Traces the parameters and calls the right asic functions
822  * to setup the page table using the DMA.
823  */
824 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
825 				  struct amdgpu_bo *bo,
826 				  uint64_t pe, uint64_t addr,
827 				  unsigned count, uint32_t incr,
828 				  uint64_t flags)
829 {
830 	pe += amdgpu_bo_gpu_offset(bo);
831 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
832 
833 	if (count < 3) {
834 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
835 				    addr | flags, count, incr);
836 
837 	} else {
838 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
839 				      count, incr, flags);
840 	}
841 }
842 
843 /**
844  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
845  *
846  * @params: see amdgpu_pte_update_params definition
847  * @bo: PD/PT to update
848  * @pe: addr of the page entry
849  * @addr: dst addr to write into pe
850  * @count: number of page entries to update
851  * @incr: increase next addr by incr bytes
852  * @flags: hw access flags
853  *
854  * Traces the parameters and calls the DMA function to copy the PTEs.
855  */
856 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
857 				   struct amdgpu_bo *bo,
858 				   uint64_t pe, uint64_t addr,
859 				   unsigned count, uint32_t incr,
860 				   uint64_t flags)
861 {
862 	uint64_t src = (params->src + (addr >> 12) * 8);
863 
864 	pe += amdgpu_bo_gpu_offset(bo);
865 	trace_amdgpu_vm_copy_ptes(pe, src, count);
866 
867 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
868 }
869 
870 /**
871  * amdgpu_vm_map_gart - Resolve gart mapping of addr
872  *
873  * @pages_addr: optional DMA address to use for lookup
874  * @addr: the unmapped addr
875  *
876  * Look up the physical address of the page that the pte resolves
877  * to.
878  *
879  * Returns:
880  * The pointer for the page table entry.
881  */
882 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
883 {
884 	uint64_t result;
885 
886 	/* page table offset */
887 	result = pages_addr[addr >> PAGE_SHIFT];
888 
889 	/* in case cpu page size != gpu page size*/
890 	result |= addr & (~PAGE_MASK);
891 
892 	result &= 0xFFFFFFFFFFFFF000ULL;
893 
894 	return result;
895 }
896 
897 /**
898  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
899  *
900  * @params: see amdgpu_pte_update_params definition
901  * @bo: PD/PT to update
902  * @pe: kmap addr of the page entry
903  * @addr: dst addr to write into pe
904  * @count: number of page entries to update
905  * @incr: increase next addr by incr bytes
906  * @flags: hw access flags
907  *
908  * Write count number of PT/PD entries directly.
909  */
910 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
911 				   struct amdgpu_bo *bo,
912 				   uint64_t pe, uint64_t addr,
913 				   unsigned count, uint32_t incr,
914 				   uint64_t flags)
915 {
916 	unsigned int i;
917 	uint64_t value;
918 
919 	pe += (unsigned long)amdgpu_bo_kptr(bo);
920 
921 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
922 
923 	for (i = 0; i < count; i++) {
924 		value = params->pages_addr ?
925 			amdgpu_vm_map_gart(params->pages_addr, addr) :
926 			addr;
927 		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
928 				       i, value, flags);
929 		addr += incr;
930 	}
931 }
932 
933 
934 /**
935  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
936  *
937  * @adev: amdgpu_device pointer
938  * @vm: related vm
939  * @owner: fence owner
940  *
941  * Returns:
942  * 0 on success, errno otherwise.
943  */
944 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
945 			     void *owner)
946 {
947 	struct amdgpu_sync sync;
948 	int r;
949 
950 	amdgpu_sync_create(&sync);
951 	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
952 	r = amdgpu_sync_wait(&sync, true);
953 	amdgpu_sync_free(&sync);
954 
955 	return r;
956 }
957 
958 /*
959  * amdgpu_vm_update_pde - update a single level in the hierarchy
960  *
961  * @param: parameters for the update
962  * @vm: requested vm
963  * @parent: parent directory
964  * @entry: entry to update
965  *
966  * Makes sure the requested entry in parent is up to date.
967  */
968 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
969 				 struct amdgpu_vm *vm,
970 				 struct amdgpu_vm_pt *parent,
971 				 struct amdgpu_vm_pt *entry)
972 {
973 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
974 	uint64_t pde, pt, flags;
975 	unsigned level;
976 
977 	/* Don't update huge pages here */
978 	if (entry->huge)
979 		return;
980 
981 	for (level = 0, pbo = bo->parent; pbo; ++level)
982 		pbo = pbo->parent;
983 
984 	level += params->adev->vm_manager.root_level;
985 	pt = amdgpu_bo_gpu_offset(entry->base.bo);
986 	flags = AMDGPU_PTE_VALID;
987 	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
988 	pde = (entry - parent->entries) * 8;
989 	if (bo->shadow)
990 		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
991 	params->func(params, bo, pde, pt, 1, 0, flags);
992 }
993 
994 /*
995  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
996  *
997  * @adev: amdgpu_device pointer
998  * @vm: related vm
999  * @parent: parent PD
1000  * @level: VMPT level
1001  *
1002  * Mark all PD level as invalid after an error.
1003  */
1004 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1005 				       struct amdgpu_vm *vm,
1006 				       struct amdgpu_vm_pt *parent,
1007 				       unsigned level)
1008 {
1009 	unsigned pt_idx, num_entries;
1010 
1011 	/*
1012 	 * Recurse into the subdirectories. This recursion is harmless because
1013 	 * we only have a maximum of 5 layers.
1014 	 */
1015 	num_entries = amdgpu_vm_num_entries(adev, level);
1016 	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1017 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1018 
1019 		if (!entry->base.bo)
1020 			continue;
1021 
1022 		if (!entry->base.moved)
1023 			list_move(&entry->base.vm_status, &vm->relocated);
1024 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1025 	}
1026 }
1027 
1028 /*
1029  * amdgpu_vm_update_directories - make sure that all directories are valid
1030  *
1031  * @adev: amdgpu_device pointer
1032  * @vm: requested vm
1033  *
1034  * Makes sure all directories are up to date.
1035  *
1036  * Returns:
1037  * 0 for success, error for failure.
1038  */
1039 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1040 				 struct amdgpu_vm *vm)
1041 {
1042 	struct amdgpu_pte_update_params params;
1043 	struct amdgpu_job *job;
1044 	unsigned ndw = 0;
1045 	int r = 0;
1046 
1047 	if (list_empty(&vm->relocated))
1048 		return 0;
1049 
1050 restart:
1051 	memset(&params, 0, sizeof(params));
1052 	params.adev = adev;
1053 
1054 	if (vm->use_cpu_for_update) {
1055 		struct amdgpu_vm_bo_base *bo_base;
1056 
1057 		list_for_each_entry(bo_base, &vm->relocated, vm_status) {
1058 			r = amdgpu_bo_kmap(bo_base->bo, NULL);
1059 			if (unlikely(r))
1060 				return r;
1061 		}
1062 
1063 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1064 		if (unlikely(r))
1065 			return r;
1066 
1067 		params.func = amdgpu_vm_cpu_set_ptes;
1068 	} else {
1069 		ndw = 512 * 8;
1070 		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1071 		if (r)
1072 			return r;
1073 
1074 		params.ib = &job->ibs[0];
1075 		params.func = amdgpu_vm_do_set_ptes;
1076 	}
1077 
1078 	while (!list_empty(&vm->relocated)) {
1079 		struct amdgpu_vm_bo_base *bo_base, *parent;
1080 		struct amdgpu_vm_pt *pt, *entry;
1081 		struct amdgpu_bo *bo;
1082 
1083 		bo_base = list_first_entry(&vm->relocated,
1084 					   struct amdgpu_vm_bo_base,
1085 					   vm_status);
1086 		bo_base->moved = false;
1087 		list_del_init(&bo_base->vm_status);
1088 
1089 		bo = bo_base->bo->parent;
1090 		if (!bo)
1091 			continue;
1092 
1093 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
1094 					  bo_list);
1095 		pt = container_of(parent, struct amdgpu_vm_pt, base);
1096 		entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1097 
1098 		amdgpu_vm_update_pde(&params, vm, pt, entry);
1099 
1100 		if (!vm->use_cpu_for_update &&
1101 		    (ndw - params.ib->length_dw) < 32)
1102 			break;
1103 	}
1104 
1105 	if (vm->use_cpu_for_update) {
1106 		/* Flush HDP */
1107 		mb();
1108 		amdgpu_asic_flush_hdp(adev, NULL);
1109 	} else if (params.ib->length_dw == 0) {
1110 		amdgpu_job_free(job);
1111 	} else {
1112 		struct amdgpu_bo *root = vm->root.base.bo;
1113 		struct amdgpu_ring *ring;
1114 		struct dma_fence *fence;
1115 
1116 		ring = container_of(vm->entity.sched, struct amdgpu_ring,
1117 				    sched);
1118 
1119 		amdgpu_ring_pad_ib(ring, params.ib);
1120 		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1121 				 AMDGPU_FENCE_OWNER_VM, false);
1122 		WARN_ON(params.ib->length_dw > ndw);
1123 		r = amdgpu_job_submit(job, ring, &vm->entity,
1124 				      AMDGPU_FENCE_OWNER_VM, &fence);
1125 		if (r)
1126 			goto error;
1127 
1128 		amdgpu_bo_fence(root, fence, true);
1129 		dma_fence_put(vm->last_update);
1130 		vm->last_update = fence;
1131 	}
1132 
1133 	if (!list_empty(&vm->relocated))
1134 		goto restart;
1135 
1136 	return 0;
1137 
1138 error:
1139 	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1140 				   adev->vm_manager.root_level);
1141 	amdgpu_job_free(job);
1142 	return r;
1143 }
1144 
1145 /**
1146  * amdgpu_vm_find_entry - find the entry for an address
1147  *
1148  * @p: see amdgpu_pte_update_params definition
1149  * @addr: virtual address in question
1150  * @entry: resulting entry or NULL
1151  * @parent: parent entry
1152  *
1153  * Find the vm_pt entry and it's parent for the given address.
1154  */
1155 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1156 			 struct amdgpu_vm_pt **entry,
1157 			 struct amdgpu_vm_pt **parent)
1158 {
1159 	unsigned level = p->adev->vm_manager.root_level;
1160 
1161 	*parent = NULL;
1162 	*entry = &p->vm->root;
1163 	while ((*entry)->entries) {
1164 		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1165 
1166 		*parent = *entry;
1167 		*entry = &(*entry)->entries[addr >> shift];
1168 		addr &= (1ULL << shift) - 1;
1169 	}
1170 
1171 	if (level != AMDGPU_VM_PTB)
1172 		*entry = NULL;
1173 }
1174 
1175 /**
1176  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1177  *
1178  * @p: see amdgpu_pte_update_params definition
1179  * @entry: vm_pt entry to check
1180  * @parent: parent entry
1181  * @nptes: number of PTEs updated with this operation
1182  * @dst: destination address where the PTEs should point to
1183  * @flags: access flags fro the PTEs
1184  *
1185  * Check if we can update the PD with a huge page.
1186  */
1187 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1188 					struct amdgpu_vm_pt *entry,
1189 					struct amdgpu_vm_pt *parent,
1190 					unsigned nptes, uint64_t dst,
1191 					uint64_t flags)
1192 {
1193 	uint64_t pde;
1194 
1195 	/* In the case of a mixed PT the PDE must point to it*/
1196 	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1197 	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1198 		/* Set the huge page flag to stop scanning at this PDE */
1199 		flags |= AMDGPU_PDE_PTE;
1200 	}
1201 
1202 	if (!(flags & AMDGPU_PDE_PTE)) {
1203 		if (entry->huge) {
1204 			/* Add the entry to the relocated list to update it. */
1205 			entry->huge = false;
1206 			list_move(&entry->base.vm_status, &p->vm->relocated);
1207 		}
1208 		return;
1209 	}
1210 
1211 	entry->huge = true;
1212 	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1213 
1214 	pde = (entry - parent->entries) * 8;
1215 	if (parent->base.bo->shadow)
1216 		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1217 	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1218 }
1219 
1220 /**
1221  * amdgpu_vm_update_ptes - make sure that page tables are valid
1222  *
1223  * @params: see amdgpu_pte_update_params definition
1224  * @start: start of GPU address range
1225  * @end: end of GPU address range
1226  * @dst: destination address to map to, the next dst inside the function
1227  * @flags: mapping flags
1228  *
1229  * Update the page tables in the range @start - @end.
1230  *
1231  * Returns:
1232  * 0 for success, -EINVAL for failure.
1233  */
1234 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1235 				  uint64_t start, uint64_t end,
1236 				  uint64_t dst, uint64_t flags)
1237 {
1238 	struct amdgpu_device *adev = params->adev;
1239 	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1240 
1241 	uint64_t addr, pe_start;
1242 	struct amdgpu_bo *pt;
1243 	unsigned nptes;
1244 
1245 	/* walk over the address space and update the page tables */
1246 	for (addr = start; addr < end; addr += nptes,
1247 	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1248 		struct amdgpu_vm_pt *entry, *parent;
1249 
1250 		amdgpu_vm_get_entry(params, addr, &entry, &parent);
1251 		if (!entry)
1252 			return -ENOENT;
1253 
1254 		if ((addr & ~mask) == (end & ~mask))
1255 			nptes = end - addr;
1256 		else
1257 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1258 
1259 		amdgpu_vm_handle_huge_pages(params, entry, parent,
1260 					    nptes, dst, flags);
1261 		/* We don't need to update PTEs for huge pages */
1262 		if (entry->huge)
1263 			continue;
1264 
1265 		pt = entry->base.bo;
1266 		pe_start = (addr & mask) * 8;
1267 		if (pt->shadow)
1268 			params->func(params, pt->shadow, pe_start, dst, nptes,
1269 				     AMDGPU_GPU_PAGE_SIZE, flags);
1270 		params->func(params, pt, pe_start, dst, nptes,
1271 			     AMDGPU_GPU_PAGE_SIZE, flags);
1272 	}
1273 
1274 	return 0;
1275 }
1276 
1277 /*
1278  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1279  *
1280  * @params: see amdgpu_pte_update_params definition
1281  * @vm: requested vm
1282  * @start: first PTE to handle
1283  * @end: last PTE to handle
1284  * @dst: addr those PTEs should point to
1285  * @flags: hw mapping flags
1286  *
1287  * Returns:
1288  * 0 for success, -EINVAL for failure.
1289  */
1290 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1291 				uint64_t start, uint64_t end,
1292 				uint64_t dst, uint64_t flags)
1293 {
1294 	/**
1295 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1296 	 * field in the PTE. When this field is set to a non-zero value, page
1297 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1298 	 * flags are considered valid for all PTEs within the fragment range
1299 	 * and corresponding mappings are assumed to be physically contiguous.
1300 	 *
1301 	 * The L1 TLB can store a single PTE for the whole fragment,
1302 	 * significantly increasing the space available for translation
1303 	 * caching. This leads to large improvements in throughput when the
1304 	 * TLB is under pressure.
1305 	 *
1306 	 * The L2 TLB distributes small and large fragments into two
1307 	 * asymmetric partitions. The large fragment cache is significantly
1308 	 * larger. Thus, we try to use large fragments wherever possible.
1309 	 * Userspace can support this by aligning virtual base address and
1310 	 * allocation size to the fragment size.
1311 	 */
1312 	unsigned max_frag = params->adev->vm_manager.fragment_size;
1313 	int r;
1314 
1315 	/* system pages are non continuously */
1316 	if (params->src || !(flags & AMDGPU_PTE_VALID))
1317 		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1318 
1319 	while (start != end) {
1320 		uint64_t frag_flags, frag_end;
1321 		unsigned frag;
1322 
1323 		/* This intentionally wraps around if no bit is set */
1324 		frag = min((unsigned)ffs(start) - 1,
1325 			   (unsigned)fls64(end - start) - 1);
1326 		if (frag >= max_frag) {
1327 			frag_flags = AMDGPU_PTE_FRAG(max_frag);
1328 			frag_end = end & ~((1ULL << max_frag) - 1);
1329 		} else {
1330 			frag_flags = AMDGPU_PTE_FRAG(frag);
1331 			frag_end = start + (1 << frag);
1332 		}
1333 
1334 		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1335 					  flags | frag_flags);
1336 		if (r)
1337 			return r;
1338 
1339 		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1340 		start = frag_end;
1341 	}
1342 
1343 	return 0;
1344 }
1345 
1346 /**
1347  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1348  *
1349  * @adev: amdgpu_device pointer
1350  * @exclusive: fence we need to sync to
1351  * @pages_addr: DMA addresses to use for mapping
1352  * @vm: requested vm
1353  * @start: start of mapped range
1354  * @last: last mapped entry
1355  * @flags: flags for the entries
1356  * @addr: addr to set the area to
1357  * @fence: optional resulting fence
1358  *
1359  * Fill in the page table entries between @start and @last.
1360  *
1361  * Returns:
1362  * 0 for success, -EINVAL for failure.
1363  */
1364 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1365 				       struct dma_fence *exclusive,
1366 				       dma_addr_t *pages_addr,
1367 				       struct amdgpu_vm *vm,
1368 				       uint64_t start, uint64_t last,
1369 				       uint64_t flags, uint64_t addr,
1370 				       struct dma_fence **fence)
1371 {
1372 	struct amdgpu_ring *ring;
1373 	void *owner = AMDGPU_FENCE_OWNER_VM;
1374 	unsigned nptes, ncmds, ndw;
1375 	struct amdgpu_job *job;
1376 	struct amdgpu_pte_update_params params;
1377 	struct dma_fence *f = NULL;
1378 	int r;
1379 
1380 	memset(&params, 0, sizeof(params));
1381 	params.adev = adev;
1382 	params.vm = vm;
1383 
1384 	/* sync to everything on unmapping */
1385 	if (!(flags & AMDGPU_PTE_VALID))
1386 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1387 
1388 	if (vm->use_cpu_for_update) {
1389 		/* params.src is used as flag to indicate system Memory */
1390 		if (pages_addr)
1391 			params.src = ~0;
1392 
1393 		/* Wait for PT BOs to be free. PTs share the same resv. object
1394 		 * as the root PD BO
1395 		 */
1396 		r = amdgpu_vm_wait_pd(adev, vm, owner);
1397 		if (unlikely(r))
1398 			return r;
1399 
1400 		params.func = amdgpu_vm_cpu_set_ptes;
1401 		params.pages_addr = pages_addr;
1402 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
1403 					   addr, flags);
1404 	}
1405 
1406 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1407 
1408 	nptes = last - start + 1;
1409 
1410 	/*
1411 	 * reserve space for two commands every (1 << BLOCK_SIZE)
1412 	 *  entries or 2k dwords (whatever is smaller)
1413          *
1414          * The second command is for the shadow pagetables.
1415 	 */
1416 	if (vm->root.base.bo->shadow)
1417 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1418 	else
1419 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1420 
1421 	/* padding, etc. */
1422 	ndw = 64;
1423 
1424 	if (pages_addr) {
1425 		/* copy commands needed */
1426 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1427 
1428 		/* and also PTEs */
1429 		ndw += nptes * 2;
1430 
1431 		params.func = amdgpu_vm_do_copy_ptes;
1432 
1433 	} else {
1434 		/* set page commands needed */
1435 		ndw += ncmds * 10;
1436 
1437 		/* extra commands for begin/end fragments */
1438 		if (vm->root.base.bo->shadow)
1439 		        ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1440 		else
1441 		        ndw += 2 * 10 * adev->vm_manager.fragment_size;
1442 
1443 		params.func = amdgpu_vm_do_set_ptes;
1444 	}
1445 
1446 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1447 	if (r)
1448 		return r;
1449 
1450 	params.ib = &job->ibs[0];
1451 
1452 	if (pages_addr) {
1453 		uint64_t *pte;
1454 		unsigned i;
1455 
1456 		/* Put the PTEs at the end of the IB. */
1457 		i = ndw - nptes * 2;
1458 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1459 		params.src = job->ibs->gpu_addr + i * 4;
1460 
1461 		for (i = 0; i < nptes; ++i) {
1462 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1463 						    AMDGPU_GPU_PAGE_SIZE);
1464 			pte[i] |= flags;
1465 		}
1466 		addr = 0;
1467 	}
1468 
1469 	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1470 	if (r)
1471 		goto error_free;
1472 
1473 	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1474 			     owner, false);
1475 	if (r)
1476 		goto error_free;
1477 
1478 	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1479 	if (r)
1480 		goto error_free;
1481 
1482 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1483 	if (r)
1484 		goto error_free;
1485 
1486 	amdgpu_ring_pad_ib(ring, params.ib);
1487 	WARN_ON(params.ib->length_dw > ndw);
1488 	r = amdgpu_job_submit(job, ring, &vm->entity,
1489 			      AMDGPU_FENCE_OWNER_VM, &f);
1490 	if (r)
1491 		goto error_free;
1492 
1493 	amdgpu_bo_fence(vm->root.base.bo, f, true);
1494 	dma_fence_put(*fence);
1495 	*fence = f;
1496 	return 0;
1497 
1498 error_free:
1499 	amdgpu_job_free(job);
1500 	return r;
1501 }
1502 
1503 /**
1504  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1505  *
1506  * @adev: amdgpu_device pointer
1507  * @exclusive: fence we need to sync to
1508  * @pages_addr: DMA addresses to use for mapping
1509  * @vm: requested vm
1510  * @mapping: mapped range and flags to use for the update
1511  * @flags: HW flags for the mapping
1512  * @nodes: array of drm_mm_nodes with the MC addresses
1513  * @fence: optional resulting fence
1514  *
1515  * Split the mapping into smaller chunks so that each update fits
1516  * into a SDMA IB.
1517  *
1518  * Returns:
1519  * 0 for success, -EINVAL for failure.
1520  */
1521 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1522 				      struct dma_fence *exclusive,
1523 				      dma_addr_t *pages_addr,
1524 				      struct amdgpu_vm *vm,
1525 				      struct amdgpu_bo_va_mapping *mapping,
1526 				      uint64_t flags,
1527 				      struct drm_mm_node *nodes,
1528 				      struct dma_fence **fence)
1529 {
1530 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1531 	uint64_t pfn, start = mapping->start;
1532 	int r;
1533 
1534 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1535 	 * but in case of something, we filter the flags in first place
1536 	 */
1537 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1538 		flags &= ~AMDGPU_PTE_READABLE;
1539 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1540 		flags &= ~AMDGPU_PTE_WRITEABLE;
1541 
1542 	flags &= ~AMDGPU_PTE_EXECUTABLE;
1543 	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1544 
1545 	flags &= ~AMDGPU_PTE_MTYPE_MASK;
1546 	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1547 
1548 	if ((mapping->flags & AMDGPU_PTE_PRT) &&
1549 	    (adev->asic_type >= CHIP_VEGA10)) {
1550 		flags |= AMDGPU_PTE_PRT;
1551 		flags &= ~AMDGPU_PTE_VALID;
1552 	}
1553 
1554 	trace_amdgpu_vm_bo_update(mapping);
1555 
1556 	pfn = mapping->offset >> PAGE_SHIFT;
1557 	if (nodes) {
1558 		while (pfn >= nodes->size) {
1559 			pfn -= nodes->size;
1560 			++nodes;
1561 		}
1562 	}
1563 
1564 	do {
1565 		dma_addr_t *dma_addr = NULL;
1566 		uint64_t max_entries;
1567 		uint64_t addr, last;
1568 
1569 		if (nodes) {
1570 			addr = nodes->start << PAGE_SHIFT;
1571 			max_entries = (nodes->size - pfn) *
1572 				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1573 		} else {
1574 			addr = 0;
1575 			max_entries = S64_MAX;
1576 		}
1577 
1578 		if (pages_addr) {
1579 			uint64_t count;
1580 
1581 			max_entries = min(max_entries, 16ull * 1024ull);
1582 			for (count = 1;
1583 			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1584 			     ++count) {
1585 				uint64_t idx = pfn + count;
1586 
1587 				if (pages_addr[idx] !=
1588 				    (pages_addr[idx - 1] + PAGE_SIZE))
1589 					break;
1590 			}
1591 
1592 			if (count < min_linear_pages) {
1593 				addr = pfn << PAGE_SHIFT;
1594 				dma_addr = pages_addr;
1595 			} else {
1596 				addr = pages_addr[pfn];
1597 				max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1598 			}
1599 
1600 		} else if (flags & AMDGPU_PTE_VALID) {
1601 			addr += adev->vm_manager.vram_base_offset;
1602 			addr += pfn << PAGE_SHIFT;
1603 		}
1604 
1605 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1606 		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1607 						start, last, flags, addr,
1608 						fence);
1609 		if (r)
1610 			return r;
1611 
1612 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1613 		if (nodes && nodes->size == pfn) {
1614 			pfn = 0;
1615 			++nodes;
1616 		}
1617 		start = last + 1;
1618 
1619 	} while (unlikely(start != mapping->last + 1));
1620 
1621 	return 0;
1622 }
1623 
1624 /**
1625  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1626  *
1627  * @adev: amdgpu_device pointer
1628  * @bo_va: requested BO and VM object
1629  * @clear: if true clear the entries
1630  *
1631  * Fill in the page table entries for @bo_va.
1632  *
1633  * Returns:
1634  * 0 for success, -EINVAL for failure.
1635  */
1636 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1637 			struct amdgpu_bo_va *bo_va,
1638 			bool clear)
1639 {
1640 	struct amdgpu_bo *bo = bo_va->base.bo;
1641 	struct amdgpu_vm *vm = bo_va->base.vm;
1642 	struct amdgpu_bo_va_mapping *mapping;
1643 	dma_addr_t *pages_addr = NULL;
1644 	struct ttm_mem_reg *mem;
1645 	struct drm_mm_node *nodes;
1646 	struct dma_fence *exclusive, **last_update;
1647 	uint64_t flags;
1648 	int r;
1649 
1650 	if (clear || !bo_va->base.bo) {
1651 		mem = NULL;
1652 		nodes = NULL;
1653 		exclusive = NULL;
1654 	} else {
1655 		struct ttm_dma_tt *ttm;
1656 
1657 		mem = &bo_va->base.bo->tbo.mem;
1658 		nodes = mem->mm_node;
1659 		if (mem->mem_type == TTM_PL_TT) {
1660 			ttm = container_of(bo_va->base.bo->tbo.ttm,
1661 					   struct ttm_dma_tt, ttm);
1662 			pages_addr = ttm->dma_address;
1663 		}
1664 		exclusive = reservation_object_get_excl(bo->tbo.resv);
1665 	}
1666 
1667 	if (bo)
1668 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1669 	else
1670 		flags = 0x0;
1671 
1672 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1673 		last_update = &vm->last_update;
1674 	else
1675 		last_update = &bo_va->last_pt_update;
1676 
1677 	if (!clear && bo_va->base.moved) {
1678 		bo_va->base.moved = false;
1679 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1680 
1681 	} else if (bo_va->cleared != clear) {
1682 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1683 	}
1684 
1685 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1686 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1687 					       mapping, flags, nodes,
1688 					       last_update);
1689 		if (r)
1690 			return r;
1691 	}
1692 
1693 	if (vm->use_cpu_for_update) {
1694 		/* Flush HDP */
1695 		mb();
1696 		amdgpu_asic_flush_hdp(adev, NULL);
1697 	}
1698 
1699 	spin_lock(&vm->moved_lock);
1700 	list_del_init(&bo_va->base.vm_status);
1701 	spin_unlock(&vm->moved_lock);
1702 
1703 	/* If the BO is not in its preferred location add it back to
1704 	 * the evicted list so that it gets validated again on the
1705 	 * next command submission.
1706 	 */
1707 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1708 		uint32_t mem_type = bo->tbo.mem.mem_type;
1709 
1710 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1711 			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1712 		else
1713 			list_add(&bo_va->base.vm_status, &vm->idle);
1714 	}
1715 
1716 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1717 	bo_va->cleared = clear;
1718 
1719 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1720 		list_for_each_entry(mapping, &bo_va->valids, list)
1721 			trace_amdgpu_vm_bo_mapping(mapping);
1722 	}
1723 
1724 	return 0;
1725 }
1726 
1727 /**
1728  * amdgpu_vm_update_prt_state - update the global PRT state
1729  *
1730  * @adev: amdgpu_device pointer
1731  */
1732 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1733 {
1734 	unsigned long flags;
1735 	bool enable;
1736 
1737 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1738 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1739 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1740 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1741 }
1742 
1743 /**
1744  * amdgpu_vm_prt_get - add a PRT user
1745  *
1746  * @adev: amdgpu_device pointer
1747  */
1748 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1749 {
1750 	if (!adev->gmc.gmc_funcs->set_prt)
1751 		return;
1752 
1753 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1754 		amdgpu_vm_update_prt_state(adev);
1755 }
1756 
1757 /**
1758  * amdgpu_vm_prt_put - drop a PRT user
1759  *
1760  * @adev: amdgpu_device pointer
1761  */
1762 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1763 {
1764 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1765 		amdgpu_vm_update_prt_state(adev);
1766 }
1767 
1768 /**
1769  * amdgpu_vm_prt_cb - callback for updating the PRT status
1770  *
1771  * @fence: fence for the callback
1772  * @_cb: the callback function
1773  */
1774 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1775 {
1776 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1777 
1778 	amdgpu_vm_prt_put(cb->adev);
1779 	kfree(cb);
1780 }
1781 
1782 /**
1783  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1784  *
1785  * @adev: amdgpu_device pointer
1786  * @fence: fence for the callback
1787  */
1788 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1789 				 struct dma_fence *fence)
1790 {
1791 	struct amdgpu_prt_cb *cb;
1792 
1793 	if (!adev->gmc.gmc_funcs->set_prt)
1794 		return;
1795 
1796 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1797 	if (!cb) {
1798 		/* Last resort when we are OOM */
1799 		if (fence)
1800 			dma_fence_wait(fence, false);
1801 
1802 		amdgpu_vm_prt_put(adev);
1803 	} else {
1804 		cb->adev = adev;
1805 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1806 						     amdgpu_vm_prt_cb))
1807 			amdgpu_vm_prt_cb(fence, &cb->cb);
1808 	}
1809 }
1810 
1811 /**
1812  * amdgpu_vm_free_mapping - free a mapping
1813  *
1814  * @adev: amdgpu_device pointer
1815  * @vm: requested vm
1816  * @mapping: mapping to be freed
1817  * @fence: fence of the unmap operation
1818  *
1819  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1820  */
1821 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1822 				   struct amdgpu_vm *vm,
1823 				   struct amdgpu_bo_va_mapping *mapping,
1824 				   struct dma_fence *fence)
1825 {
1826 	if (mapping->flags & AMDGPU_PTE_PRT)
1827 		amdgpu_vm_add_prt_cb(adev, fence);
1828 	kfree(mapping);
1829 }
1830 
1831 /**
1832  * amdgpu_vm_prt_fini - finish all prt mappings
1833  *
1834  * @adev: amdgpu_device pointer
1835  * @vm: requested vm
1836  *
1837  * Register a cleanup callback to disable PRT support after VM dies.
1838  */
1839 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1840 {
1841 	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1842 	struct dma_fence *excl, **shared;
1843 	unsigned i, shared_count;
1844 	int r;
1845 
1846 	r = reservation_object_get_fences_rcu(resv, &excl,
1847 					      &shared_count, &shared);
1848 	if (r) {
1849 		/* Not enough memory to grab the fence list, as last resort
1850 		 * block for all the fences to complete.
1851 		 */
1852 		reservation_object_wait_timeout_rcu(resv, true, false,
1853 						    MAX_SCHEDULE_TIMEOUT);
1854 		return;
1855 	}
1856 
1857 	/* Add a callback for each fence in the reservation object */
1858 	amdgpu_vm_prt_get(adev);
1859 	amdgpu_vm_add_prt_cb(adev, excl);
1860 
1861 	for (i = 0; i < shared_count; ++i) {
1862 		amdgpu_vm_prt_get(adev);
1863 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1864 	}
1865 
1866 	kfree(shared);
1867 }
1868 
1869 /**
1870  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1871  *
1872  * @adev: amdgpu_device pointer
1873  * @vm: requested vm
1874  * @fence: optional resulting fence (unchanged if no work needed to be done
1875  * or if an error occurred)
1876  *
1877  * Make sure all freed BOs are cleared in the PT.
1878  * PTs have to be reserved and mutex must be locked!
1879  *
1880  * Returns:
1881  * 0 for success.
1882  *
1883  */
1884 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1885 			  struct amdgpu_vm *vm,
1886 			  struct dma_fence **fence)
1887 {
1888 	struct amdgpu_bo_va_mapping *mapping;
1889 	uint64_t init_pte_value = 0;
1890 	struct dma_fence *f = NULL;
1891 	int r;
1892 
1893 	while (!list_empty(&vm->freed)) {
1894 		mapping = list_first_entry(&vm->freed,
1895 			struct amdgpu_bo_va_mapping, list);
1896 		list_del(&mapping->list);
1897 
1898 		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1899 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1900 
1901 		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1902 						mapping->start, mapping->last,
1903 						init_pte_value, 0, &f);
1904 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1905 		if (r) {
1906 			dma_fence_put(f);
1907 			return r;
1908 		}
1909 	}
1910 
1911 	if (fence && f) {
1912 		dma_fence_put(*fence);
1913 		*fence = f;
1914 	} else {
1915 		dma_fence_put(f);
1916 	}
1917 
1918 	return 0;
1919 
1920 }
1921 
1922 /**
1923  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1924  *
1925  * @adev: amdgpu_device pointer
1926  * @vm: requested vm
1927  *
1928  * Make sure all BOs which are moved are updated in the PTs.
1929  *
1930  * Returns:
1931  * 0 for success.
1932  *
1933  * PTs have to be reserved!
1934  */
1935 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1936 			   struct amdgpu_vm *vm)
1937 {
1938 	struct amdgpu_bo_va *bo_va, *tmp;
1939 	struct list_head moved;
1940 	bool clear;
1941 	int r;
1942 
1943 	INIT_LIST_HEAD(&moved);
1944 	spin_lock(&vm->moved_lock);
1945 	list_splice_init(&vm->moved, &moved);
1946 	spin_unlock(&vm->moved_lock);
1947 
1948 	list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
1949 		struct reservation_object *resv = bo_va->base.bo->tbo.resv;
1950 
1951 		/* Per VM BOs never need to bo cleared in the page tables */
1952 		if (resv == vm->root.base.bo->tbo.resv)
1953 			clear = false;
1954 		/* Try to reserve the BO to avoid clearing its ptes */
1955 		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1956 			clear = false;
1957 		/* Somebody else is using the BO right now */
1958 		else
1959 			clear = true;
1960 
1961 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1962 		if (r) {
1963 			spin_lock(&vm->moved_lock);
1964 			list_splice(&moved, &vm->moved);
1965 			spin_unlock(&vm->moved_lock);
1966 			return r;
1967 		}
1968 
1969 		if (!clear && resv != vm->root.base.bo->tbo.resv)
1970 			reservation_object_unlock(resv);
1971 
1972 	}
1973 
1974 	return 0;
1975 }
1976 
1977 /**
1978  * amdgpu_vm_bo_add - add a bo to a specific vm
1979  *
1980  * @adev: amdgpu_device pointer
1981  * @vm: requested vm
1982  * @bo: amdgpu buffer object
1983  *
1984  * Add @bo into the requested vm.
1985  * Add @bo to the list of bos associated with the vm
1986  *
1987  * Returns:
1988  * Newly added bo_va or NULL for failure
1989  *
1990  * Object has to be reserved!
1991  */
1992 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1993 				      struct amdgpu_vm *vm,
1994 				      struct amdgpu_bo *bo)
1995 {
1996 	struct amdgpu_bo_va *bo_va;
1997 
1998 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1999 	if (bo_va == NULL) {
2000 		return NULL;
2001 	}
2002 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2003 
2004 	bo_va->ref_count = 1;
2005 	INIT_LIST_HEAD(&bo_va->valids);
2006 	INIT_LIST_HEAD(&bo_va->invalids);
2007 
2008 	return bo_va;
2009 }
2010 
2011 
2012 /**
2013  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2014  *
2015  * @adev: amdgpu_device pointer
2016  * @bo_va: bo_va to store the address
2017  * @mapping: the mapping to insert
2018  *
2019  * Insert a new mapping into all structures.
2020  */
2021 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2022 				    struct amdgpu_bo_va *bo_va,
2023 				    struct amdgpu_bo_va_mapping *mapping)
2024 {
2025 	struct amdgpu_vm *vm = bo_va->base.vm;
2026 	struct amdgpu_bo *bo = bo_va->base.bo;
2027 
2028 	mapping->bo_va = bo_va;
2029 	list_add(&mapping->list, &bo_va->invalids);
2030 	amdgpu_vm_it_insert(mapping, &vm->va);
2031 
2032 	if (mapping->flags & AMDGPU_PTE_PRT)
2033 		amdgpu_vm_prt_get(adev);
2034 
2035 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2036 	    !bo_va->base.moved) {
2037 		spin_lock(&vm->moved_lock);
2038 		list_move(&bo_va->base.vm_status, &vm->moved);
2039 		spin_unlock(&vm->moved_lock);
2040 	}
2041 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2042 }
2043 
2044 /**
2045  * amdgpu_vm_bo_map - map bo inside a vm
2046  *
2047  * @adev: amdgpu_device pointer
2048  * @bo_va: bo_va to store the address
2049  * @saddr: where to map the BO
2050  * @offset: requested offset in the BO
2051  * @size: BO size in bytes
2052  * @flags: attributes of pages (read/write/valid/etc.)
2053  *
2054  * Add a mapping of the BO at the specefied addr into the VM.
2055  *
2056  * Returns:
2057  * 0 for success, error for failure.
2058  *
2059  * Object has to be reserved and unreserved outside!
2060  */
2061 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2062 		     struct amdgpu_bo_va *bo_va,
2063 		     uint64_t saddr, uint64_t offset,
2064 		     uint64_t size, uint64_t flags)
2065 {
2066 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2067 	struct amdgpu_bo *bo = bo_va->base.bo;
2068 	struct amdgpu_vm *vm = bo_va->base.vm;
2069 	uint64_t eaddr;
2070 
2071 	/* validate the parameters */
2072 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2073 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2074 		return -EINVAL;
2075 
2076 	/* make sure object fit at this offset */
2077 	eaddr = saddr + size - 1;
2078 	if (saddr >= eaddr ||
2079 	    (bo && offset + size > amdgpu_bo_size(bo)))
2080 		return -EINVAL;
2081 
2082 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2083 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2084 
2085 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2086 	if (tmp) {
2087 		/* bo and tmp overlap, invalid addr */
2088 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2089 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2090 			tmp->start, tmp->last + 1);
2091 		return -EINVAL;
2092 	}
2093 
2094 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2095 	if (!mapping)
2096 		return -ENOMEM;
2097 
2098 	mapping->start = saddr;
2099 	mapping->last = eaddr;
2100 	mapping->offset = offset;
2101 	mapping->flags = flags;
2102 
2103 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2104 
2105 	return 0;
2106 }
2107 
2108 /**
2109  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2110  *
2111  * @adev: amdgpu_device pointer
2112  * @bo_va: bo_va to store the address
2113  * @saddr: where to map the BO
2114  * @offset: requested offset in the BO
2115  * @size: BO size in bytes
2116  * @flags: attributes of pages (read/write/valid/etc.)
2117  *
2118  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2119  * mappings as we do so.
2120  *
2121  * Returns:
2122  * 0 for success, error for failure.
2123  *
2124  * Object has to be reserved and unreserved outside!
2125  */
2126 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2127 			     struct amdgpu_bo_va *bo_va,
2128 			     uint64_t saddr, uint64_t offset,
2129 			     uint64_t size, uint64_t flags)
2130 {
2131 	struct amdgpu_bo_va_mapping *mapping;
2132 	struct amdgpu_bo *bo = bo_va->base.bo;
2133 	uint64_t eaddr;
2134 	int r;
2135 
2136 	/* validate the parameters */
2137 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2138 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2139 		return -EINVAL;
2140 
2141 	/* make sure object fit at this offset */
2142 	eaddr = saddr + size - 1;
2143 	if (saddr >= eaddr ||
2144 	    (bo && offset + size > amdgpu_bo_size(bo)))
2145 		return -EINVAL;
2146 
2147 	/* Allocate all the needed memory */
2148 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2149 	if (!mapping)
2150 		return -ENOMEM;
2151 
2152 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2153 	if (r) {
2154 		kfree(mapping);
2155 		return r;
2156 	}
2157 
2158 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2159 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2160 
2161 	mapping->start = saddr;
2162 	mapping->last = eaddr;
2163 	mapping->offset = offset;
2164 	mapping->flags = flags;
2165 
2166 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2167 
2168 	return 0;
2169 }
2170 
2171 /**
2172  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2173  *
2174  * @adev: amdgpu_device pointer
2175  * @bo_va: bo_va to remove the address from
2176  * @saddr: where to the BO is mapped
2177  *
2178  * Remove a mapping of the BO at the specefied addr from the VM.
2179  *
2180  * Returns:
2181  * 0 for success, error for failure.
2182  *
2183  * Object has to be reserved and unreserved outside!
2184  */
2185 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2186 		       struct amdgpu_bo_va *bo_va,
2187 		       uint64_t saddr)
2188 {
2189 	struct amdgpu_bo_va_mapping *mapping;
2190 	struct amdgpu_vm *vm = bo_va->base.vm;
2191 	bool valid = true;
2192 
2193 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2194 
2195 	list_for_each_entry(mapping, &bo_va->valids, list) {
2196 		if (mapping->start == saddr)
2197 			break;
2198 	}
2199 
2200 	if (&mapping->list == &bo_va->valids) {
2201 		valid = false;
2202 
2203 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2204 			if (mapping->start == saddr)
2205 				break;
2206 		}
2207 
2208 		if (&mapping->list == &bo_va->invalids)
2209 			return -ENOENT;
2210 	}
2211 
2212 	list_del(&mapping->list);
2213 	amdgpu_vm_it_remove(mapping, &vm->va);
2214 	mapping->bo_va = NULL;
2215 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2216 
2217 	if (valid)
2218 		list_add(&mapping->list, &vm->freed);
2219 	else
2220 		amdgpu_vm_free_mapping(adev, vm, mapping,
2221 				       bo_va->last_pt_update);
2222 
2223 	return 0;
2224 }
2225 
2226 /**
2227  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2228  *
2229  * @adev: amdgpu_device pointer
2230  * @vm: VM structure to use
2231  * @saddr: start of the range
2232  * @size: size of the range
2233  *
2234  * Remove all mappings in a range, split them as appropriate.
2235  *
2236  * Returns:
2237  * 0 for success, error for failure.
2238  */
2239 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2240 				struct amdgpu_vm *vm,
2241 				uint64_t saddr, uint64_t size)
2242 {
2243 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2244 	LIST_HEAD(removed);
2245 	uint64_t eaddr;
2246 
2247 	eaddr = saddr + size - 1;
2248 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2249 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2250 
2251 	/* Allocate all the needed memory */
2252 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2253 	if (!before)
2254 		return -ENOMEM;
2255 	INIT_LIST_HEAD(&before->list);
2256 
2257 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2258 	if (!after) {
2259 		kfree(before);
2260 		return -ENOMEM;
2261 	}
2262 	INIT_LIST_HEAD(&after->list);
2263 
2264 	/* Now gather all removed mappings */
2265 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2266 	while (tmp) {
2267 		/* Remember mapping split at the start */
2268 		if (tmp->start < saddr) {
2269 			before->start = tmp->start;
2270 			before->last = saddr - 1;
2271 			before->offset = tmp->offset;
2272 			before->flags = tmp->flags;
2273 			before->bo_va = tmp->bo_va;
2274 			list_add(&before->list, &tmp->bo_va->invalids);
2275 		}
2276 
2277 		/* Remember mapping split at the end */
2278 		if (tmp->last > eaddr) {
2279 			after->start = eaddr + 1;
2280 			after->last = tmp->last;
2281 			after->offset = tmp->offset;
2282 			after->offset += after->start - tmp->start;
2283 			after->flags = tmp->flags;
2284 			after->bo_va = tmp->bo_va;
2285 			list_add(&after->list, &tmp->bo_va->invalids);
2286 		}
2287 
2288 		list_del(&tmp->list);
2289 		list_add(&tmp->list, &removed);
2290 
2291 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2292 	}
2293 
2294 	/* And free them up */
2295 	list_for_each_entry_safe(tmp, next, &removed, list) {
2296 		amdgpu_vm_it_remove(tmp, &vm->va);
2297 		list_del(&tmp->list);
2298 
2299 		if (tmp->start < saddr)
2300 		    tmp->start = saddr;
2301 		if (tmp->last > eaddr)
2302 		    tmp->last = eaddr;
2303 
2304 		tmp->bo_va = NULL;
2305 		list_add(&tmp->list, &vm->freed);
2306 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2307 	}
2308 
2309 	/* Insert partial mapping before the range */
2310 	if (!list_empty(&before->list)) {
2311 		amdgpu_vm_it_insert(before, &vm->va);
2312 		if (before->flags & AMDGPU_PTE_PRT)
2313 			amdgpu_vm_prt_get(adev);
2314 	} else {
2315 		kfree(before);
2316 	}
2317 
2318 	/* Insert partial mapping after the range */
2319 	if (!list_empty(&after->list)) {
2320 		amdgpu_vm_it_insert(after, &vm->va);
2321 		if (after->flags & AMDGPU_PTE_PRT)
2322 			amdgpu_vm_prt_get(adev);
2323 	} else {
2324 		kfree(after);
2325 	}
2326 
2327 	return 0;
2328 }
2329 
2330 /**
2331  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2332  *
2333  * @vm: the requested VM
2334  * @addr: the address
2335  *
2336  * Find a mapping by it's address.
2337  *
2338  * Returns:
2339  * The amdgpu_bo_va_mapping matching for addr or NULL
2340  *
2341  */
2342 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2343 							 uint64_t addr)
2344 {
2345 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2346 }
2347 
2348 /**
2349  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2350  *
2351  * @adev: amdgpu_device pointer
2352  * @bo_va: requested bo_va
2353  *
2354  * Remove @bo_va->bo from the requested vm.
2355  *
2356  * Object have to be reserved!
2357  */
2358 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2359 		      struct amdgpu_bo_va *bo_va)
2360 {
2361 	struct amdgpu_bo_va_mapping *mapping, *next;
2362 	struct amdgpu_vm *vm = bo_va->base.vm;
2363 
2364 	list_del(&bo_va->base.bo_list);
2365 
2366 	spin_lock(&vm->moved_lock);
2367 	list_del(&bo_va->base.vm_status);
2368 	spin_unlock(&vm->moved_lock);
2369 
2370 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2371 		list_del(&mapping->list);
2372 		amdgpu_vm_it_remove(mapping, &vm->va);
2373 		mapping->bo_va = NULL;
2374 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2375 		list_add(&mapping->list, &vm->freed);
2376 	}
2377 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2378 		list_del(&mapping->list);
2379 		amdgpu_vm_it_remove(mapping, &vm->va);
2380 		amdgpu_vm_free_mapping(adev, vm, mapping,
2381 				       bo_va->last_pt_update);
2382 	}
2383 
2384 	dma_fence_put(bo_va->last_pt_update);
2385 	kfree(bo_va);
2386 }
2387 
2388 /**
2389  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2390  *
2391  * @adev: amdgpu_device pointer
2392  * @bo: amdgpu buffer object
2393  * @evicted: is the BO evicted
2394  *
2395  * Mark @bo as invalid.
2396  */
2397 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2398 			     struct amdgpu_bo *bo, bool evicted)
2399 {
2400 	struct amdgpu_vm_bo_base *bo_base;
2401 
2402 	/* shadow bo doesn't have bo base, its validation needs its parent */
2403 	if (bo->parent && bo->parent->shadow == bo)
2404 		bo = bo->parent;
2405 
2406 	list_for_each_entry(bo_base, &bo->va, bo_list) {
2407 		struct amdgpu_vm *vm = bo_base->vm;
2408 		bool was_moved = bo_base->moved;
2409 
2410 		bo_base->moved = true;
2411 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2412 			if (bo->tbo.type == ttm_bo_type_kernel)
2413 				list_move(&bo_base->vm_status, &vm->evicted);
2414 			else
2415 				list_move_tail(&bo_base->vm_status,
2416 					       &vm->evicted);
2417 			continue;
2418 		}
2419 
2420 		if (was_moved)
2421 			continue;
2422 
2423 		if (bo->tbo.type == ttm_bo_type_kernel) {
2424 			list_move(&bo_base->vm_status, &vm->relocated);
2425 		} else {
2426 			spin_lock(&bo_base->vm->moved_lock);
2427 			list_move(&bo_base->vm_status, &vm->moved);
2428 			spin_unlock(&bo_base->vm->moved_lock);
2429 		}
2430 	}
2431 }
2432 
2433 /**
2434  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2435  *
2436  * @vm_size: VM size
2437  *
2438  * Returns:
2439  * VM page table as power of two
2440  */
2441 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2442 {
2443 	/* Total bits covered by PD + PTs */
2444 	unsigned bits = ilog2(vm_size) + 18;
2445 
2446 	/* Make sure the PD is 4K in size up to 8GB address space.
2447 	   Above that split equal between PD and PTs */
2448 	if (vm_size <= 8)
2449 		return (bits - 9);
2450 	else
2451 		return ((bits + 3) / 2);
2452 }
2453 
2454 /**
2455  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2456  *
2457  * @adev: amdgpu_device pointer
2458  * @vm_size: the default vm size if it's set auto
2459  * @fragment_size_default: Default PTE fragment size
2460  * @max_level: max VMPT level
2461  * @max_bits: max address space size in bits
2462  *
2463  */
2464 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2465 			   uint32_t fragment_size_default, unsigned max_level,
2466 			   unsigned max_bits)
2467 {
2468 	uint64_t tmp;
2469 
2470 	/* adjust vm size first */
2471 	if (amdgpu_vm_size != -1) {
2472 		unsigned max_size = 1 << (max_bits - 30);
2473 
2474 		vm_size = amdgpu_vm_size;
2475 		if (vm_size > max_size) {
2476 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2477 				 amdgpu_vm_size, max_size);
2478 			vm_size = max_size;
2479 		}
2480 	}
2481 
2482 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2483 
2484 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2485 	if (amdgpu_vm_block_size != -1)
2486 		tmp >>= amdgpu_vm_block_size - 9;
2487 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2488 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2489 	switch (adev->vm_manager.num_level) {
2490 	case 3:
2491 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2492 		break;
2493 	case 2:
2494 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2495 		break;
2496 	case 1:
2497 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2498 		break;
2499 	default:
2500 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2501 	}
2502 	/* block size depends on vm size and hw setup*/
2503 	if (amdgpu_vm_block_size != -1)
2504 		adev->vm_manager.block_size =
2505 			min((unsigned)amdgpu_vm_block_size, max_bits
2506 			    - AMDGPU_GPU_PAGE_SHIFT
2507 			    - 9 * adev->vm_manager.num_level);
2508 	else if (adev->vm_manager.num_level > 1)
2509 		adev->vm_manager.block_size = 9;
2510 	else
2511 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2512 
2513 	if (amdgpu_vm_fragment_size == -1)
2514 		adev->vm_manager.fragment_size = fragment_size_default;
2515 	else
2516 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2517 
2518 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2519 		 vm_size, adev->vm_manager.num_level + 1,
2520 		 adev->vm_manager.block_size,
2521 		 adev->vm_manager.fragment_size);
2522 }
2523 
2524 /**
2525  * amdgpu_vm_init - initialize a vm instance
2526  *
2527  * @adev: amdgpu_device pointer
2528  * @vm: requested vm
2529  * @vm_context: Indicates if it GFX or Compute context
2530  * @pasid: Process address space identifier
2531  *
2532  * Init @vm fields.
2533  *
2534  * Returns:
2535  * 0 for success, error for failure.
2536  */
2537 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2538 		   int vm_context, unsigned int pasid)
2539 {
2540 	struct amdgpu_bo_param bp;
2541 	struct amdgpu_bo *root;
2542 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2543 		AMDGPU_VM_PTE_COUNT(adev) * 8);
2544 	unsigned ring_instance;
2545 	struct amdgpu_ring *ring;
2546 	struct drm_sched_rq *rq;
2547 	unsigned long size;
2548 	uint64_t flags;
2549 	int r, i;
2550 
2551 	vm->va = RB_ROOT_CACHED;
2552 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2553 		vm->reserved_vmid[i] = NULL;
2554 	INIT_LIST_HEAD(&vm->evicted);
2555 	INIT_LIST_HEAD(&vm->relocated);
2556 	spin_lock_init(&vm->moved_lock);
2557 	INIT_LIST_HEAD(&vm->moved);
2558 	INIT_LIST_HEAD(&vm->idle);
2559 	INIT_LIST_HEAD(&vm->freed);
2560 
2561 	/* create scheduler entity for page table updates */
2562 
2563 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2564 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
2565 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2566 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2567 	r = drm_sched_entity_init(&ring->sched, &vm->entity,
2568 				  rq, NULL);
2569 	if (r)
2570 		return r;
2571 
2572 	vm->pte_support_ats = false;
2573 
2574 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2575 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2576 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2577 
2578 		if (adev->asic_type == CHIP_RAVEN)
2579 			vm->pte_support_ats = true;
2580 	} else {
2581 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2582 						AMDGPU_VM_USE_CPU_FOR_GFX);
2583 	}
2584 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2585 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2586 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2587 		  "CPU update of VM recommended only for large BAR system\n");
2588 	vm->last_update = NULL;
2589 
2590 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2591 	if (vm->use_cpu_for_update)
2592 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2593 	else
2594 		flags |= AMDGPU_GEM_CREATE_SHADOW;
2595 
2596 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2597 	memset(&bp, 0, sizeof(bp));
2598 	bp.size = size;
2599 	bp.byte_align = align;
2600 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2601 	bp.flags = flags;
2602 	bp.type = ttm_bo_type_kernel;
2603 	bp.resv = NULL;
2604 	r = amdgpu_bo_create(adev, &bp, &root);
2605 	if (r)
2606 		goto error_free_sched_entity;
2607 
2608 	r = amdgpu_bo_reserve(root, true);
2609 	if (r)
2610 		goto error_free_root;
2611 
2612 	r = amdgpu_vm_clear_bo(adev, vm, root,
2613 			       adev->vm_manager.root_level,
2614 			       vm->pte_support_ats);
2615 	if (r)
2616 		goto error_unreserve;
2617 
2618 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2619 	amdgpu_bo_unreserve(vm->root.base.bo);
2620 
2621 	if (pasid) {
2622 		unsigned long flags;
2623 
2624 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2625 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2626 			      GFP_ATOMIC);
2627 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2628 		if (r < 0)
2629 			goto error_free_root;
2630 
2631 		vm->pasid = pasid;
2632 	}
2633 
2634 	INIT_KFIFO(vm->faults);
2635 	vm->fault_credit = 16;
2636 
2637 	return 0;
2638 
2639 error_unreserve:
2640 	amdgpu_bo_unreserve(vm->root.base.bo);
2641 
2642 error_free_root:
2643 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2644 	amdgpu_bo_unref(&vm->root.base.bo);
2645 	vm->root.base.bo = NULL;
2646 
2647 error_free_sched_entity:
2648 	drm_sched_entity_destroy(&ring->sched, &vm->entity);
2649 
2650 	return r;
2651 }
2652 
2653 /**
2654  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2655  *
2656  * @adev: amdgpu_device pointer
2657  * @vm: requested vm
2658  *
2659  * This only works on GFX VMs that don't have any BOs added and no
2660  * page tables allocated yet.
2661  *
2662  * Changes the following VM parameters:
2663  * - use_cpu_for_update
2664  * - pte_supports_ats
2665  * - pasid (old PASID is released, because compute manages its own PASIDs)
2666  *
2667  * Reinitializes the page directory to reflect the changed ATS
2668  * setting. May leave behind an unused shadow BO for the page
2669  * directory when switching from SDMA updates to CPU updates.
2670  *
2671  * Returns:
2672  * 0 for success, -errno for errors.
2673  */
2674 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2675 {
2676 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2677 	int r;
2678 
2679 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2680 	if (r)
2681 		return r;
2682 
2683 	/* Sanity checks */
2684 	if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2685 		r = -EINVAL;
2686 		goto error;
2687 	}
2688 
2689 	/* Check if PD needs to be reinitialized and do it before
2690 	 * changing any other state, in case it fails.
2691 	 */
2692 	if (pte_support_ats != vm->pte_support_ats) {
2693 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2694 			       adev->vm_manager.root_level,
2695 			       pte_support_ats);
2696 		if (r)
2697 			goto error;
2698 	}
2699 
2700 	/* Update VM state */
2701 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2702 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2703 	vm->pte_support_ats = pte_support_ats;
2704 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2705 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2706 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2707 		  "CPU update of VM recommended only for large BAR system\n");
2708 
2709 	if (vm->pasid) {
2710 		unsigned long flags;
2711 
2712 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2713 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2714 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2715 
2716 		vm->pasid = 0;
2717 	}
2718 
2719 error:
2720 	amdgpu_bo_unreserve(vm->root.base.bo);
2721 	return r;
2722 }
2723 
2724 /**
2725  * amdgpu_vm_free_levels - free PD/PT levels
2726  *
2727  * @adev: amdgpu device structure
2728  * @parent: PD/PT starting level to free
2729  * @level: level of parent structure
2730  *
2731  * Free the page directory or page table level and all sub levels.
2732  */
2733 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2734 				  struct amdgpu_vm_pt *parent,
2735 				  unsigned level)
2736 {
2737 	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2738 
2739 	if (parent->base.bo) {
2740 		list_del(&parent->base.bo_list);
2741 		list_del(&parent->base.vm_status);
2742 		amdgpu_bo_unref(&parent->base.bo->shadow);
2743 		amdgpu_bo_unref(&parent->base.bo);
2744 	}
2745 
2746 	if (parent->entries)
2747 		for (i = 0; i < num_entries; i++)
2748 			amdgpu_vm_free_levels(adev, &parent->entries[i],
2749 					      level + 1);
2750 
2751 	kvfree(parent->entries);
2752 }
2753 
2754 /**
2755  * amdgpu_vm_fini - tear down a vm instance
2756  *
2757  * @adev: amdgpu_device pointer
2758  * @vm: requested vm
2759  *
2760  * Tear down @vm.
2761  * Unbind the VM and remove all bos from the vm bo list
2762  */
2763 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2764 {
2765 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2766 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2767 	struct amdgpu_bo *root;
2768 	u64 fault;
2769 	int i, r;
2770 
2771 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2772 
2773 	/* Clear pending page faults from IH when the VM is destroyed */
2774 	while (kfifo_get(&vm->faults, &fault))
2775 		amdgpu_ih_clear_fault(adev, fault);
2776 
2777 	if (vm->pasid) {
2778 		unsigned long flags;
2779 
2780 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2781 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2782 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2783 	}
2784 
2785 	drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
2786 
2787 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2788 		dev_err(adev->dev, "still active bo inside vm\n");
2789 	}
2790 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2791 					     &vm->va.rb_root, rb) {
2792 		list_del(&mapping->list);
2793 		amdgpu_vm_it_remove(mapping, &vm->va);
2794 		kfree(mapping);
2795 	}
2796 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2797 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2798 			amdgpu_vm_prt_fini(adev, vm);
2799 			prt_fini_needed = false;
2800 		}
2801 
2802 		list_del(&mapping->list);
2803 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2804 	}
2805 
2806 	root = amdgpu_bo_ref(vm->root.base.bo);
2807 	r = amdgpu_bo_reserve(root, true);
2808 	if (r) {
2809 		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2810 	} else {
2811 		amdgpu_vm_free_levels(adev, &vm->root,
2812 				      adev->vm_manager.root_level);
2813 		amdgpu_bo_unreserve(root);
2814 	}
2815 	amdgpu_bo_unref(&root);
2816 	dma_fence_put(vm->last_update);
2817 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2818 		amdgpu_vmid_free_reserved(adev, vm, i);
2819 }
2820 
2821 /**
2822  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2823  *
2824  * @adev: amdgpu_device pointer
2825  * @pasid: PASID do identify the VM
2826  *
2827  * This function is expected to be called in interrupt context.
2828  *
2829  * Returns:
2830  * True if there was fault credit, false otherwise
2831  */
2832 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2833 				  unsigned int pasid)
2834 {
2835 	struct amdgpu_vm *vm;
2836 
2837 	spin_lock(&adev->vm_manager.pasid_lock);
2838 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2839 	if (!vm) {
2840 		/* VM not found, can't track fault credit */
2841 		spin_unlock(&adev->vm_manager.pasid_lock);
2842 		return true;
2843 	}
2844 
2845 	/* No lock needed. only accessed by IRQ handler */
2846 	if (!vm->fault_credit) {
2847 		/* Too many faults in this VM */
2848 		spin_unlock(&adev->vm_manager.pasid_lock);
2849 		return false;
2850 	}
2851 
2852 	vm->fault_credit--;
2853 	spin_unlock(&adev->vm_manager.pasid_lock);
2854 	return true;
2855 }
2856 
2857 /**
2858  * amdgpu_vm_manager_init - init the VM manager
2859  *
2860  * @adev: amdgpu_device pointer
2861  *
2862  * Initialize the VM manager structures
2863  */
2864 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2865 {
2866 	unsigned i;
2867 
2868 	amdgpu_vmid_mgr_init(adev);
2869 
2870 	adev->vm_manager.fence_context =
2871 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2872 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2873 		adev->vm_manager.seqno[i] = 0;
2874 
2875 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2876 	spin_lock_init(&adev->vm_manager.prt_lock);
2877 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2878 
2879 	/* If not overridden by the user, by default, only in large BAR systems
2880 	 * Compute VM tables will be updated by CPU
2881 	 */
2882 #ifdef CONFIG_X86_64
2883 	if (amdgpu_vm_update_mode == -1) {
2884 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
2885 			adev->vm_manager.vm_update_mode =
2886 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2887 		else
2888 			adev->vm_manager.vm_update_mode = 0;
2889 	} else
2890 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2891 #else
2892 	adev->vm_manager.vm_update_mode = 0;
2893 #endif
2894 
2895 	idr_init(&adev->vm_manager.pasid_idr);
2896 	spin_lock_init(&adev->vm_manager.pasid_lock);
2897 }
2898 
2899 /**
2900  * amdgpu_vm_manager_fini - cleanup VM manager
2901  *
2902  * @adev: amdgpu_device pointer
2903  *
2904  * Cleanup the VM manager and free resources.
2905  */
2906 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2907 {
2908 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2909 	idr_destroy(&adev->vm_manager.pasid_idr);
2910 
2911 	amdgpu_vmid_mgr_fini(adev);
2912 }
2913 
2914 /**
2915  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2916  *
2917  * @dev: drm device pointer
2918  * @data: drm_amdgpu_vm
2919  * @filp: drm file pointer
2920  *
2921  * Returns:
2922  * 0 for success, -errno for errors.
2923  */
2924 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2925 {
2926 	union drm_amdgpu_vm *args = data;
2927 	struct amdgpu_device *adev = dev->dev_private;
2928 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2929 	int r;
2930 
2931 	switch (args->in.op) {
2932 	case AMDGPU_VM_OP_RESERVE_VMID:
2933 		/* current, we only have requirement to reserve vmid from gfxhub */
2934 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2935 		if (r)
2936 			return r;
2937 		break;
2938 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2939 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2940 		break;
2941 	default:
2942 		return -EINVAL;
2943 	}
2944 
2945 	return 0;
2946 }
2947 
2948 /**
2949  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2950  *
2951  * @dev: drm device pointer
2952  * @pasid: PASID identifier for VM
2953  * @task_info: task_info to fill.
2954  */
2955 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
2956 			 struct amdgpu_task_info *task_info)
2957 {
2958 	struct amdgpu_vm *vm;
2959 
2960 	spin_lock(&adev->vm_manager.pasid_lock);
2961 
2962 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2963 	if (vm)
2964 		*task_info = vm->task_info;
2965 
2966 	spin_unlock(&adev->vm_manager.pasid_lock);
2967 }
2968 
2969 /**
2970  * amdgpu_vm_set_task_info - Sets VMs task info.
2971  *
2972  * @vm: vm for which to set the info
2973  */
2974 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
2975 {
2976 	if (!vm->task_info.pid) {
2977 		vm->task_info.pid = current->pid;
2978 		get_task_comm(vm->task_info.task_name, current);
2979 
2980 		if (current->group_leader->mm == current->mm) {
2981 			vm->task_info.tgid = current->group_leader->pid;
2982 			get_task_comm(vm->task_info.process_name, current->group_leader);
2983 		}
2984 	}
2985 }
2986