1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu_gmc.h"
37 
38 /**
39  * DOC: GPUVM
40  *
41  * GPUVM is similar to the legacy gart on older asics, however
42  * rather than there being a single global gart table
43  * for the entire GPU, there are multiple VM page tables active
44  * at any given time.  The VM page tables can contain a mix
45  * vram pages and system memory pages and system memory pages
46  * can be mapped as snooped (cached system pages) or unsnooped
47  * (uncached system pages).
48  * Each VM has an ID associated with it and there is a page table
49  * associated with each VMID.  When execting a command buffer,
50  * the kernel tells the the ring what VMID to use for that command
51  * buffer.  VMIDs are allocated dynamically as commands are submitted.
52  * The userspace drivers maintain their own address space and the kernel
53  * sets up their pages tables accordingly when they submit their
54  * command buffers and a VMID is assigned.
55  * Cayman/Trinity support up to 8 active VMs at any given time;
56  * SI supports 16.
57  */
58 
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61 
62 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
63 		     START, LAST, static, amdgpu_vm_it)
64 
65 #undef START
66 #undef LAST
67 
68 /**
69  * struct amdgpu_pte_update_params - Local structure
70  *
71  * Encapsulate some VM table update parameters to reduce
72  * the number of function parameters
73  *
74  */
75 struct amdgpu_pte_update_params {
76 
77 	/**
78 	 * @adev: amdgpu device we do this update for
79 	 */
80 	struct amdgpu_device *adev;
81 
82 	/**
83 	 * @vm: optional amdgpu_vm we do this update for
84 	 */
85 	struct amdgpu_vm *vm;
86 
87 	/**
88 	 * @src: address where to copy page table entries from
89 	 */
90 	uint64_t src;
91 
92 	/**
93 	 * @ib: indirect buffer to fill with commands
94 	 */
95 	struct amdgpu_ib *ib;
96 
97 	/**
98 	 * @func: Function which actually does the update
99 	 */
100 	void (*func)(struct amdgpu_pte_update_params *params,
101 		     struct amdgpu_bo *bo, uint64_t pe,
102 		     uint64_t addr, unsigned count, uint32_t incr,
103 		     uint64_t flags);
104 	/**
105 	 * @pages_addr:
106 	 *
107 	 * DMA addresses to use for mapping, used during VM update by CPU
108 	 */
109 	dma_addr_t *pages_addr;
110 
111 	/**
112 	 * @kptr:
113 	 *
114 	 * Kernel pointer of PD/PT BO that needs to be updated,
115 	 * used during VM update by CPU
116 	 */
117 	void *kptr;
118 };
119 
120 /**
121  * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
122  */
123 struct amdgpu_prt_cb {
124 
125 	/**
126 	 * @adev: amdgpu device
127 	 */
128 	struct amdgpu_device *adev;
129 
130 	/**
131 	 * @cb: callback
132 	 */
133 	struct dma_fence_cb cb;
134 };
135 
136 /**
137  * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
138  *
139  * @base: base structure for tracking BO usage in a VM
140  * @vm: vm to which bo is to be added
141  * @bo: amdgpu buffer object
142  *
143  * Initialize a bo_va_base structure and add it to the appropriate lists
144  *
145  */
146 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
147 				   struct amdgpu_vm *vm,
148 				   struct amdgpu_bo *bo)
149 {
150 	base->vm = vm;
151 	base->bo = bo;
152 	INIT_LIST_HEAD(&base->bo_list);
153 	INIT_LIST_HEAD(&base->vm_status);
154 
155 	if (!bo)
156 		return;
157 	list_add_tail(&base->bo_list, &bo->va);
158 
159 	if (bo->tbo.type == ttm_bo_type_kernel)
160 		list_move(&base->vm_status, &vm->relocated);
161 
162 	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
163 		return;
164 
165 	if (bo->preferred_domains &
166 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
167 		return;
168 
169 	/*
170 	 * we checked all the prerequisites, but it looks like this per vm bo
171 	 * is currently evicted. add the bo to the evicted list to make sure it
172 	 * is validated on next vm use to avoid fault.
173 	 * */
174 	list_move_tail(&base->vm_status, &vm->evicted);
175 }
176 
177 /**
178  * amdgpu_vm_level_shift - return the addr shift for each level
179  *
180  * @adev: amdgpu_device pointer
181  * @level: VMPT level
182  *
183  * Returns:
184  * The number of bits the pfn needs to be right shifted for a level.
185  */
186 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
187 				      unsigned level)
188 {
189 	unsigned shift = 0xff;
190 
191 	switch (level) {
192 	case AMDGPU_VM_PDB2:
193 	case AMDGPU_VM_PDB1:
194 	case AMDGPU_VM_PDB0:
195 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
196 			adev->vm_manager.block_size;
197 		break;
198 	case AMDGPU_VM_PTB:
199 		shift = 0;
200 		break;
201 	default:
202 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
203 	}
204 
205 	return shift;
206 }
207 
208 /**
209  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
210  *
211  * @adev: amdgpu_device pointer
212  * @level: VMPT level
213  *
214  * Returns:
215  * The number of entries in a page directory or page table.
216  */
217 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
218 				      unsigned level)
219 {
220 	unsigned shift = amdgpu_vm_level_shift(adev,
221 					       adev->vm_manager.root_level);
222 
223 	if (level == adev->vm_manager.root_level)
224 		/* For the root directory */
225 		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
226 	else if (level != AMDGPU_VM_PTB)
227 		/* Everything in between */
228 		return 512;
229 	else
230 		/* For the page tables on the leaves */
231 		return AMDGPU_VM_PTE_COUNT(adev);
232 }
233 
234 /**
235  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
236  *
237  * @adev: amdgpu_device pointer
238  * @level: VMPT level
239  *
240  * Returns:
241  * The size of the BO for a page directory or page table in bytes.
242  */
243 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
244 {
245 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
246 }
247 
248 /**
249  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
250  *
251  * @vm: vm providing the BOs
252  * @validated: head of validation list
253  * @entry: entry to add
254  *
255  * Add the page directory to the list of BOs to
256  * validate for command submission.
257  */
258 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
259 			 struct list_head *validated,
260 			 struct amdgpu_bo_list_entry *entry)
261 {
262 	entry->robj = vm->root.base.bo;
263 	entry->priority = 0;
264 	entry->tv.bo = &entry->robj->tbo;
265 	entry->tv.shared = true;
266 	entry->user_pages = NULL;
267 	list_add(&entry->tv.head, validated);
268 }
269 
270 /**
271  * amdgpu_vm_validate_pt_bos - validate the page table BOs
272  *
273  * @adev: amdgpu device pointer
274  * @vm: vm providing the BOs
275  * @validate: callback to do the validation
276  * @param: parameter for the validation callback
277  *
278  * Validate the page table BOs on command submission if neccessary.
279  *
280  * Returns:
281  * Validation result.
282  */
283 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
284 			      int (*validate)(void *p, struct amdgpu_bo *bo),
285 			      void *param)
286 {
287 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
288 	struct amdgpu_vm_bo_base *bo_base, *tmp;
289 	int r = 0;
290 
291 	list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
292 		struct amdgpu_bo *bo = bo_base->bo;
293 
294 		if (bo->parent) {
295 			r = validate(param, bo);
296 			if (r)
297 				break;
298 
299 			spin_lock(&glob->lru_lock);
300 			ttm_bo_move_to_lru_tail(&bo->tbo);
301 			if (bo->shadow)
302 				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
303 			spin_unlock(&glob->lru_lock);
304 		}
305 
306 		if (bo->tbo.type != ttm_bo_type_kernel) {
307 			spin_lock(&vm->moved_lock);
308 			list_move(&bo_base->vm_status, &vm->moved);
309 			spin_unlock(&vm->moved_lock);
310 		} else {
311 			list_move(&bo_base->vm_status, &vm->relocated);
312 		}
313 	}
314 
315 	spin_lock(&glob->lru_lock);
316 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
317 		struct amdgpu_bo *bo = bo_base->bo;
318 
319 		if (!bo->parent)
320 			continue;
321 
322 		ttm_bo_move_to_lru_tail(&bo->tbo);
323 		if (bo->shadow)
324 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
325 	}
326 	spin_unlock(&glob->lru_lock);
327 
328 	return r;
329 }
330 
331 /**
332  * amdgpu_vm_ready - check VM is ready for updates
333  *
334  * @vm: VM to check
335  *
336  * Check if all VM PDs/PTs are ready for updates
337  *
338  * Returns:
339  * True if eviction list is empty.
340  */
341 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
342 {
343 	return list_empty(&vm->evicted);
344 }
345 
346 /**
347  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
348  *
349  * @adev: amdgpu_device pointer
350  * @vm: VM to clear BO from
351  * @bo: BO to clear
352  * @level: level this BO is at
353  * @pte_support_ats: indicate ATS support from PTE
354  *
355  * Root PD needs to be reserved when calling this.
356  *
357  * Returns:
358  * 0 on success, errno otherwise.
359  */
360 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
361 			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
362 			      unsigned level, bool pte_support_ats)
363 {
364 	struct ttm_operation_ctx ctx = { true, false };
365 	struct dma_fence *fence = NULL;
366 	unsigned entries, ats_entries;
367 	struct amdgpu_ring *ring;
368 	struct amdgpu_job *job;
369 	uint64_t addr;
370 	int r;
371 
372 	addr = amdgpu_bo_gpu_offset(bo);
373 	entries = amdgpu_bo_size(bo) / 8;
374 
375 	if (pte_support_ats) {
376 		if (level == adev->vm_manager.root_level) {
377 			ats_entries = amdgpu_vm_level_shift(adev, level);
378 			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
379 			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
380 			ats_entries = min(ats_entries, entries);
381 			entries -= ats_entries;
382 		} else {
383 			ats_entries = entries;
384 			entries = 0;
385 		}
386 	} else {
387 		ats_entries = 0;
388 	}
389 
390 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
391 
392 	r = reservation_object_reserve_shared(bo->tbo.resv);
393 	if (r)
394 		return r;
395 
396 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
397 	if (r)
398 		goto error;
399 
400 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
401 	if (r)
402 		goto error;
403 
404 	if (ats_entries) {
405 		uint64_t ats_value;
406 
407 		ats_value = AMDGPU_PTE_DEFAULT_ATC;
408 		if (level != AMDGPU_VM_PTB)
409 			ats_value |= AMDGPU_PDE_PTE;
410 
411 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
412 				      ats_entries, 0, ats_value);
413 		addr += ats_entries * 8;
414 	}
415 
416 	if (entries)
417 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
418 				      entries, 0, 0);
419 
420 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
421 
422 	WARN_ON(job->ibs[0].length_dw > 64);
423 	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
424 			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
425 	if (r)
426 		goto error_free;
427 
428 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
429 			      &fence);
430 	if (r)
431 		goto error_free;
432 
433 	amdgpu_bo_fence(bo, fence, true);
434 	dma_fence_put(fence);
435 
436 	if (bo->shadow)
437 		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
438 					  level, pte_support_ats);
439 
440 	return 0;
441 
442 error_free:
443 	amdgpu_job_free(job);
444 
445 error:
446 	return r;
447 }
448 
449 /**
450  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
451  *
452  * @adev: amdgpu_device pointer
453  * @vm: requested vm
454  * @parent: parent PT
455  * @saddr: start of the address range
456  * @eaddr: end of the address range
457  * @level: VMPT level
458  * @ats: indicate ATS support from PTE
459  *
460  * Make sure the page directories and page tables are allocated
461  *
462  * Returns:
463  * 0 on success, errno otherwise.
464  */
465 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
466 				  struct amdgpu_vm *vm,
467 				  struct amdgpu_vm_pt *parent,
468 				  uint64_t saddr, uint64_t eaddr,
469 				  unsigned level, bool ats)
470 {
471 	unsigned shift = amdgpu_vm_level_shift(adev, level);
472 	unsigned pt_idx, from, to;
473 	u64 flags;
474 	int r;
475 
476 	if (!parent->entries) {
477 		unsigned num_entries = amdgpu_vm_num_entries(adev, level);
478 
479 		parent->entries = kvmalloc_array(num_entries,
480 						   sizeof(struct amdgpu_vm_pt),
481 						   GFP_KERNEL | __GFP_ZERO);
482 		if (!parent->entries)
483 			return -ENOMEM;
484 		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
485 	}
486 
487 	from = saddr >> shift;
488 	to = eaddr >> shift;
489 	if (from >= amdgpu_vm_num_entries(adev, level) ||
490 	    to >= amdgpu_vm_num_entries(adev, level))
491 		return -EINVAL;
492 
493 	++level;
494 	saddr = saddr & ((1 << shift) - 1);
495 	eaddr = eaddr & ((1 << shift) - 1);
496 
497 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
498 	if (vm->root.base.bo->shadow)
499 		flags |= AMDGPU_GEM_CREATE_SHADOW;
500 	if (vm->use_cpu_for_update)
501 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
502 	else
503 		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
504 
505 	/* walk over the address space and allocate the page tables */
506 	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
507 		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
508 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
509 		struct amdgpu_bo *pt;
510 
511 		if (!entry->base.bo) {
512 			struct amdgpu_bo_param bp;
513 
514 			memset(&bp, 0, sizeof(bp));
515 			bp.size = amdgpu_vm_bo_size(adev, level);
516 			bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
517 			bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
518 			bp.flags = flags;
519 			bp.type = ttm_bo_type_kernel;
520 			bp.resv = resv;
521 			r = amdgpu_bo_create(adev, &bp, &pt);
522 			if (r)
523 				return r;
524 
525 			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
526 			if (r) {
527 				amdgpu_bo_unref(&pt->shadow);
528 				amdgpu_bo_unref(&pt);
529 				return r;
530 			}
531 
532 			if (vm->use_cpu_for_update) {
533 				r = amdgpu_bo_kmap(pt, NULL);
534 				if (r) {
535 					amdgpu_bo_unref(&pt->shadow);
536 					amdgpu_bo_unref(&pt);
537 					return r;
538 				}
539 			}
540 
541 			/* Keep a reference to the root directory to avoid
542 			* freeing them up in the wrong order.
543 			*/
544 			pt->parent = amdgpu_bo_ref(parent->base.bo);
545 
546 			amdgpu_vm_bo_base_init(&entry->base, vm, pt);
547 		}
548 
549 		if (level < AMDGPU_VM_PTB) {
550 			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
551 			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
552 				((1 << shift) - 1);
553 			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
554 						   sub_eaddr, level, ats);
555 			if (r)
556 				return r;
557 		}
558 	}
559 
560 	return 0;
561 }
562 
563 /**
564  * amdgpu_vm_alloc_pts - Allocate page tables.
565  *
566  * @adev: amdgpu_device pointer
567  * @vm: VM to allocate page tables for
568  * @saddr: Start address which needs to be allocated
569  * @size: Size from start address we need.
570  *
571  * Make sure the page tables are allocated.
572  *
573  * Returns:
574  * 0 on success, errno otherwise.
575  */
576 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
577 			struct amdgpu_vm *vm,
578 			uint64_t saddr, uint64_t size)
579 {
580 	uint64_t eaddr;
581 	bool ats = false;
582 
583 	/* validate the parameters */
584 	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
585 		return -EINVAL;
586 
587 	eaddr = saddr + size - 1;
588 
589 	if (vm->pte_support_ats)
590 		ats = saddr < AMDGPU_VA_HOLE_START;
591 
592 	saddr /= AMDGPU_GPU_PAGE_SIZE;
593 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
594 
595 	if (eaddr >= adev->vm_manager.max_pfn) {
596 		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
597 			eaddr, adev->vm_manager.max_pfn);
598 		return -EINVAL;
599 	}
600 
601 	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
602 				      adev->vm_manager.root_level, ats);
603 }
604 
605 /**
606  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
607  *
608  * @adev: amdgpu_device pointer
609  */
610 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
611 {
612 	const struct amdgpu_ip_block *ip_block;
613 	bool has_compute_vm_bug;
614 	struct amdgpu_ring *ring;
615 	int i;
616 
617 	has_compute_vm_bug = false;
618 
619 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
620 	if (ip_block) {
621 		/* Compute has a VM bug for GFX version < 7.
622 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
623 		if (ip_block->version->major <= 7)
624 			has_compute_vm_bug = true;
625 		else if (ip_block->version->major == 8)
626 			if (adev->gfx.mec_fw_version < 673)
627 				has_compute_vm_bug = true;
628 	}
629 
630 	for (i = 0; i < adev->num_rings; i++) {
631 		ring = adev->rings[i];
632 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
633 			/* only compute rings */
634 			ring->has_compute_vm_bug = has_compute_vm_bug;
635 		else
636 			ring->has_compute_vm_bug = false;
637 	}
638 }
639 
640 /**
641  * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
642  *
643  * @ring: ring on which the job will be submitted
644  * @job: job to submit
645  *
646  * Returns:
647  * True if sync is needed.
648  */
649 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
650 				  struct amdgpu_job *job)
651 {
652 	struct amdgpu_device *adev = ring->adev;
653 	unsigned vmhub = ring->funcs->vmhub;
654 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
655 	struct amdgpu_vmid *id;
656 	bool gds_switch_needed;
657 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
658 
659 	if (job->vmid == 0)
660 		return false;
661 	id = &id_mgr->ids[job->vmid];
662 	gds_switch_needed = ring->funcs->emit_gds_switch && (
663 		id->gds_base != job->gds_base ||
664 		id->gds_size != job->gds_size ||
665 		id->gws_base != job->gws_base ||
666 		id->gws_size != job->gws_size ||
667 		id->oa_base != job->oa_base ||
668 		id->oa_size != job->oa_size);
669 
670 	if (amdgpu_vmid_had_gpu_reset(adev, id))
671 		return true;
672 
673 	return vm_flush_needed || gds_switch_needed;
674 }
675 
676 /**
677  * amdgpu_vm_flush - hardware flush the vm
678  *
679  * @ring: ring to use for flush
680  * @job:  related job
681  * @need_pipe_sync: is pipe sync needed
682  *
683  * Emit a VM flush when it is necessary.
684  *
685  * Returns:
686  * 0 on success, errno otherwise.
687  */
688 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
689 {
690 	struct amdgpu_device *adev = ring->adev;
691 	unsigned vmhub = ring->funcs->vmhub;
692 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
693 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
694 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
695 		id->gds_base != job->gds_base ||
696 		id->gds_size != job->gds_size ||
697 		id->gws_base != job->gws_base ||
698 		id->gws_size != job->gws_size ||
699 		id->oa_base != job->oa_base ||
700 		id->oa_size != job->oa_size);
701 	bool vm_flush_needed = job->vm_needs_flush;
702 	bool pasid_mapping_needed = id->pasid != job->pasid ||
703 		!id->pasid_mapping ||
704 		!dma_fence_is_signaled(id->pasid_mapping);
705 	struct dma_fence *fence = NULL;
706 	unsigned patch_offset = 0;
707 	int r;
708 
709 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
710 		gds_switch_needed = true;
711 		vm_flush_needed = true;
712 		pasid_mapping_needed = true;
713 	}
714 
715 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
716 	vm_flush_needed &= !!ring->funcs->emit_vm_flush;
717 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
718 		ring->funcs->emit_wreg;
719 
720 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
721 		return 0;
722 
723 	if (ring->funcs->init_cond_exec)
724 		patch_offset = amdgpu_ring_init_cond_exec(ring);
725 
726 	if (need_pipe_sync)
727 		amdgpu_ring_emit_pipeline_sync(ring);
728 
729 	if (vm_flush_needed) {
730 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
731 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
732 	}
733 
734 	if (pasid_mapping_needed)
735 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
736 
737 	if (vm_flush_needed || pasid_mapping_needed) {
738 		r = amdgpu_fence_emit(ring, &fence, 0);
739 		if (r)
740 			return r;
741 	}
742 
743 	if (vm_flush_needed) {
744 		mutex_lock(&id_mgr->lock);
745 		dma_fence_put(id->last_flush);
746 		id->last_flush = dma_fence_get(fence);
747 		id->current_gpu_reset_count =
748 			atomic_read(&adev->gpu_reset_counter);
749 		mutex_unlock(&id_mgr->lock);
750 	}
751 
752 	if (pasid_mapping_needed) {
753 		id->pasid = job->pasid;
754 		dma_fence_put(id->pasid_mapping);
755 		id->pasid_mapping = dma_fence_get(fence);
756 	}
757 	dma_fence_put(fence);
758 
759 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
760 		id->gds_base = job->gds_base;
761 		id->gds_size = job->gds_size;
762 		id->gws_base = job->gws_base;
763 		id->gws_size = job->gws_size;
764 		id->oa_base = job->oa_base;
765 		id->oa_size = job->oa_size;
766 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
767 					    job->gds_size, job->gws_base,
768 					    job->gws_size, job->oa_base,
769 					    job->oa_size);
770 	}
771 
772 	if (ring->funcs->patch_cond_exec)
773 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
774 
775 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
776 	if (ring->funcs->emit_switch_buffer) {
777 		amdgpu_ring_emit_switch_buffer(ring);
778 		amdgpu_ring_emit_switch_buffer(ring);
779 	}
780 	return 0;
781 }
782 
783 /**
784  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
785  *
786  * @vm: requested vm
787  * @bo: requested buffer object
788  *
789  * Find @bo inside the requested vm.
790  * Search inside the @bos vm list for the requested vm
791  * Returns the found bo_va or NULL if none is found
792  *
793  * Object has to be reserved!
794  *
795  * Returns:
796  * Found bo_va or NULL.
797  */
798 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
799 				       struct amdgpu_bo *bo)
800 {
801 	struct amdgpu_bo_va *bo_va;
802 
803 	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
804 		if (bo_va->base.vm == vm) {
805 			return bo_va;
806 		}
807 	}
808 	return NULL;
809 }
810 
811 /**
812  * amdgpu_vm_do_set_ptes - helper to call the right asic function
813  *
814  * @params: see amdgpu_pte_update_params definition
815  * @bo: PD/PT to update
816  * @pe: addr of the page entry
817  * @addr: dst addr to write into pe
818  * @count: number of page entries to update
819  * @incr: increase next addr by incr bytes
820  * @flags: hw access flags
821  *
822  * Traces the parameters and calls the right asic functions
823  * to setup the page table using the DMA.
824  */
825 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
826 				  struct amdgpu_bo *bo,
827 				  uint64_t pe, uint64_t addr,
828 				  unsigned count, uint32_t incr,
829 				  uint64_t flags)
830 {
831 	pe += amdgpu_bo_gpu_offset(bo);
832 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
833 
834 	if (count < 3) {
835 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
836 				    addr | flags, count, incr);
837 
838 	} else {
839 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
840 				      count, incr, flags);
841 	}
842 }
843 
844 /**
845  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
846  *
847  * @params: see amdgpu_pte_update_params definition
848  * @bo: PD/PT to update
849  * @pe: addr of the page entry
850  * @addr: dst addr to write into pe
851  * @count: number of page entries to update
852  * @incr: increase next addr by incr bytes
853  * @flags: hw access flags
854  *
855  * Traces the parameters and calls the DMA function to copy the PTEs.
856  */
857 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
858 				   struct amdgpu_bo *bo,
859 				   uint64_t pe, uint64_t addr,
860 				   unsigned count, uint32_t incr,
861 				   uint64_t flags)
862 {
863 	uint64_t src = (params->src + (addr >> 12) * 8);
864 
865 	pe += amdgpu_bo_gpu_offset(bo);
866 	trace_amdgpu_vm_copy_ptes(pe, src, count);
867 
868 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
869 }
870 
871 /**
872  * amdgpu_vm_map_gart - Resolve gart mapping of addr
873  *
874  * @pages_addr: optional DMA address to use for lookup
875  * @addr: the unmapped addr
876  *
877  * Look up the physical address of the page that the pte resolves
878  * to.
879  *
880  * Returns:
881  * The pointer for the page table entry.
882  */
883 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
884 {
885 	uint64_t result;
886 
887 	/* page table offset */
888 	result = pages_addr[addr >> PAGE_SHIFT];
889 
890 	/* in case cpu page size != gpu page size*/
891 	result |= addr & (~PAGE_MASK);
892 
893 	result &= 0xFFFFFFFFFFFFF000ULL;
894 
895 	return result;
896 }
897 
898 /**
899  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
900  *
901  * @params: see amdgpu_pte_update_params definition
902  * @bo: PD/PT to update
903  * @pe: kmap addr of the page entry
904  * @addr: dst addr to write into pe
905  * @count: number of page entries to update
906  * @incr: increase next addr by incr bytes
907  * @flags: hw access flags
908  *
909  * Write count number of PT/PD entries directly.
910  */
911 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
912 				   struct amdgpu_bo *bo,
913 				   uint64_t pe, uint64_t addr,
914 				   unsigned count, uint32_t incr,
915 				   uint64_t flags)
916 {
917 	unsigned int i;
918 	uint64_t value;
919 
920 	pe += (unsigned long)amdgpu_bo_kptr(bo);
921 
922 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
923 
924 	for (i = 0; i < count; i++) {
925 		value = params->pages_addr ?
926 			amdgpu_vm_map_gart(params->pages_addr, addr) :
927 			addr;
928 		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
929 				       i, value, flags);
930 		addr += incr;
931 	}
932 }
933 
934 
935 /**
936  * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
937  *
938  * @adev: amdgpu_device pointer
939  * @vm: related vm
940  * @owner: fence owner
941  *
942  * Returns:
943  * 0 on success, errno otherwise.
944  */
945 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
946 			     void *owner)
947 {
948 	struct amdgpu_sync sync;
949 	int r;
950 
951 	amdgpu_sync_create(&sync);
952 	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
953 	r = amdgpu_sync_wait(&sync, true);
954 	amdgpu_sync_free(&sync);
955 
956 	return r;
957 }
958 
959 /*
960  * amdgpu_vm_update_pde - update a single level in the hierarchy
961  *
962  * @param: parameters for the update
963  * @vm: requested vm
964  * @parent: parent directory
965  * @entry: entry to update
966  *
967  * Makes sure the requested entry in parent is up to date.
968  */
969 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
970 				 struct amdgpu_vm *vm,
971 				 struct amdgpu_vm_pt *parent,
972 				 struct amdgpu_vm_pt *entry)
973 {
974 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
975 	uint64_t pde, pt, flags;
976 	unsigned level;
977 
978 	/* Don't update huge pages here */
979 	if (entry->huge)
980 		return;
981 
982 	for (level = 0, pbo = bo->parent; pbo; ++level)
983 		pbo = pbo->parent;
984 
985 	level += params->adev->vm_manager.root_level;
986 	pt = amdgpu_bo_gpu_offset(entry->base.bo);
987 	flags = AMDGPU_PTE_VALID;
988 	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
989 	pde = (entry - parent->entries) * 8;
990 	if (bo->shadow)
991 		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
992 	params->func(params, bo, pde, pt, 1, 0, flags);
993 }
994 
995 /*
996  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
997  *
998  * @adev: amdgpu_device pointer
999  * @vm: related vm
1000  * @parent: parent PD
1001  * @level: VMPT level
1002  *
1003  * Mark all PD level as invalid after an error.
1004  */
1005 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
1006 				       struct amdgpu_vm *vm,
1007 				       struct amdgpu_vm_pt *parent,
1008 				       unsigned level)
1009 {
1010 	unsigned pt_idx, num_entries;
1011 
1012 	/*
1013 	 * Recurse into the subdirectories. This recursion is harmless because
1014 	 * we only have a maximum of 5 layers.
1015 	 */
1016 	num_entries = amdgpu_vm_num_entries(adev, level);
1017 	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
1018 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
1019 
1020 		if (!entry->base.bo)
1021 			continue;
1022 
1023 		if (!entry->base.moved)
1024 			list_move(&entry->base.vm_status, &vm->relocated);
1025 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
1026 	}
1027 }
1028 
1029 /*
1030  * amdgpu_vm_update_directories - make sure that all directories are valid
1031  *
1032  * @adev: amdgpu_device pointer
1033  * @vm: requested vm
1034  *
1035  * Makes sure all directories are up to date.
1036  *
1037  * Returns:
1038  * 0 for success, error for failure.
1039  */
1040 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
1041 				 struct amdgpu_vm *vm)
1042 {
1043 	struct amdgpu_pte_update_params params;
1044 	struct amdgpu_job *job;
1045 	unsigned ndw = 0;
1046 	int r = 0;
1047 
1048 	if (list_empty(&vm->relocated))
1049 		return 0;
1050 
1051 restart:
1052 	memset(&params, 0, sizeof(params));
1053 	params.adev = adev;
1054 
1055 	if (vm->use_cpu_for_update) {
1056 		struct amdgpu_vm_bo_base *bo_base;
1057 
1058 		list_for_each_entry(bo_base, &vm->relocated, vm_status) {
1059 			r = amdgpu_bo_kmap(bo_base->bo, NULL);
1060 			if (unlikely(r))
1061 				return r;
1062 		}
1063 
1064 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
1065 		if (unlikely(r))
1066 			return r;
1067 
1068 		params.func = amdgpu_vm_cpu_set_ptes;
1069 	} else {
1070 		ndw = 512 * 8;
1071 		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1072 		if (r)
1073 			return r;
1074 
1075 		params.ib = &job->ibs[0];
1076 		params.func = amdgpu_vm_do_set_ptes;
1077 	}
1078 
1079 	while (!list_empty(&vm->relocated)) {
1080 		struct amdgpu_vm_bo_base *bo_base, *parent;
1081 		struct amdgpu_vm_pt *pt, *entry;
1082 		struct amdgpu_bo *bo;
1083 
1084 		bo_base = list_first_entry(&vm->relocated,
1085 					   struct amdgpu_vm_bo_base,
1086 					   vm_status);
1087 		bo_base->moved = false;
1088 		list_del_init(&bo_base->vm_status);
1089 
1090 		bo = bo_base->bo->parent;
1091 		if (!bo)
1092 			continue;
1093 
1094 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
1095 					  bo_list);
1096 		pt = container_of(parent, struct amdgpu_vm_pt, base);
1097 		entry = container_of(bo_base, struct amdgpu_vm_pt, base);
1098 
1099 		amdgpu_vm_update_pde(&params, vm, pt, entry);
1100 
1101 		if (!vm->use_cpu_for_update &&
1102 		    (ndw - params.ib->length_dw) < 32)
1103 			break;
1104 	}
1105 
1106 	if (vm->use_cpu_for_update) {
1107 		/* Flush HDP */
1108 		mb();
1109 		amdgpu_asic_flush_hdp(adev, NULL);
1110 	} else if (params.ib->length_dw == 0) {
1111 		amdgpu_job_free(job);
1112 	} else {
1113 		struct amdgpu_bo *root = vm->root.base.bo;
1114 		struct amdgpu_ring *ring;
1115 		struct dma_fence *fence;
1116 
1117 		ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
1118 				    sched);
1119 
1120 		amdgpu_ring_pad_ib(ring, params.ib);
1121 		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
1122 				 AMDGPU_FENCE_OWNER_VM, false);
1123 		WARN_ON(params.ib->length_dw > ndw);
1124 		r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
1125 				      &fence);
1126 		if (r)
1127 			goto error;
1128 
1129 		amdgpu_bo_fence(root, fence, true);
1130 		dma_fence_put(vm->last_update);
1131 		vm->last_update = fence;
1132 	}
1133 
1134 	if (!list_empty(&vm->relocated))
1135 		goto restart;
1136 
1137 	return 0;
1138 
1139 error:
1140 	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1141 				   adev->vm_manager.root_level);
1142 	amdgpu_job_free(job);
1143 	return r;
1144 }
1145 
1146 /**
1147  * amdgpu_vm_find_entry - find the entry for an address
1148  *
1149  * @p: see amdgpu_pte_update_params definition
1150  * @addr: virtual address in question
1151  * @entry: resulting entry or NULL
1152  * @parent: parent entry
1153  *
1154  * Find the vm_pt entry and it's parent for the given address.
1155  */
1156 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1157 			 struct amdgpu_vm_pt **entry,
1158 			 struct amdgpu_vm_pt **parent)
1159 {
1160 	unsigned level = p->adev->vm_manager.root_level;
1161 
1162 	*parent = NULL;
1163 	*entry = &p->vm->root;
1164 	while ((*entry)->entries) {
1165 		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1166 
1167 		*parent = *entry;
1168 		*entry = &(*entry)->entries[addr >> shift];
1169 		addr &= (1ULL << shift) - 1;
1170 	}
1171 
1172 	if (level != AMDGPU_VM_PTB)
1173 		*entry = NULL;
1174 }
1175 
1176 /**
1177  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1178  *
1179  * @p: see amdgpu_pte_update_params definition
1180  * @entry: vm_pt entry to check
1181  * @parent: parent entry
1182  * @nptes: number of PTEs updated with this operation
1183  * @dst: destination address where the PTEs should point to
1184  * @flags: access flags fro the PTEs
1185  *
1186  * Check if we can update the PD with a huge page.
1187  */
1188 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1189 					struct amdgpu_vm_pt *entry,
1190 					struct amdgpu_vm_pt *parent,
1191 					unsigned nptes, uint64_t dst,
1192 					uint64_t flags)
1193 {
1194 	uint64_t pde;
1195 
1196 	/* In the case of a mixed PT the PDE must point to it*/
1197 	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1198 	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1199 		/* Set the huge page flag to stop scanning at this PDE */
1200 		flags |= AMDGPU_PDE_PTE;
1201 	}
1202 
1203 	if (!(flags & AMDGPU_PDE_PTE)) {
1204 		if (entry->huge) {
1205 			/* Add the entry to the relocated list to update it. */
1206 			entry->huge = false;
1207 			list_move(&entry->base.vm_status, &p->vm->relocated);
1208 		}
1209 		return;
1210 	}
1211 
1212 	entry->huge = true;
1213 	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1214 
1215 	pde = (entry - parent->entries) * 8;
1216 	if (parent->base.bo->shadow)
1217 		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1218 	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1219 }
1220 
1221 /**
1222  * amdgpu_vm_update_ptes - make sure that page tables are valid
1223  *
1224  * @params: see amdgpu_pte_update_params definition
1225  * @start: start of GPU address range
1226  * @end: end of GPU address range
1227  * @dst: destination address to map to, the next dst inside the function
1228  * @flags: mapping flags
1229  *
1230  * Update the page tables in the range @start - @end.
1231  *
1232  * Returns:
1233  * 0 for success, -EINVAL for failure.
1234  */
1235 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1236 				  uint64_t start, uint64_t end,
1237 				  uint64_t dst, uint64_t flags)
1238 {
1239 	struct amdgpu_device *adev = params->adev;
1240 	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1241 
1242 	uint64_t addr, pe_start;
1243 	struct amdgpu_bo *pt;
1244 	unsigned nptes;
1245 
1246 	/* walk over the address space and update the page tables */
1247 	for (addr = start; addr < end; addr += nptes,
1248 	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1249 		struct amdgpu_vm_pt *entry, *parent;
1250 
1251 		amdgpu_vm_get_entry(params, addr, &entry, &parent);
1252 		if (!entry)
1253 			return -ENOENT;
1254 
1255 		if ((addr & ~mask) == (end & ~mask))
1256 			nptes = end - addr;
1257 		else
1258 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1259 
1260 		amdgpu_vm_handle_huge_pages(params, entry, parent,
1261 					    nptes, dst, flags);
1262 		/* We don't need to update PTEs for huge pages */
1263 		if (entry->huge)
1264 			continue;
1265 
1266 		pt = entry->base.bo;
1267 		pe_start = (addr & mask) * 8;
1268 		if (pt->shadow)
1269 			params->func(params, pt->shadow, pe_start, dst, nptes,
1270 				     AMDGPU_GPU_PAGE_SIZE, flags);
1271 		params->func(params, pt, pe_start, dst, nptes,
1272 			     AMDGPU_GPU_PAGE_SIZE, flags);
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 /*
1279  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1280  *
1281  * @params: see amdgpu_pte_update_params definition
1282  * @vm: requested vm
1283  * @start: first PTE to handle
1284  * @end: last PTE to handle
1285  * @dst: addr those PTEs should point to
1286  * @flags: hw mapping flags
1287  *
1288  * Returns:
1289  * 0 for success, -EINVAL for failure.
1290  */
1291 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1292 				uint64_t start, uint64_t end,
1293 				uint64_t dst, uint64_t flags)
1294 {
1295 	/**
1296 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1297 	 * field in the PTE. When this field is set to a non-zero value, page
1298 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1299 	 * flags are considered valid for all PTEs within the fragment range
1300 	 * and corresponding mappings are assumed to be physically contiguous.
1301 	 *
1302 	 * The L1 TLB can store a single PTE for the whole fragment,
1303 	 * significantly increasing the space available for translation
1304 	 * caching. This leads to large improvements in throughput when the
1305 	 * TLB is under pressure.
1306 	 *
1307 	 * The L2 TLB distributes small and large fragments into two
1308 	 * asymmetric partitions. The large fragment cache is significantly
1309 	 * larger. Thus, we try to use large fragments wherever possible.
1310 	 * Userspace can support this by aligning virtual base address and
1311 	 * allocation size to the fragment size.
1312 	 */
1313 	unsigned max_frag = params->adev->vm_manager.fragment_size;
1314 	int r;
1315 
1316 	/* system pages are non continuously */
1317 	if (params->src || !(flags & AMDGPU_PTE_VALID))
1318 		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1319 
1320 	while (start != end) {
1321 		uint64_t frag_flags, frag_end;
1322 		unsigned frag;
1323 
1324 		/* This intentionally wraps around if no bit is set */
1325 		frag = min((unsigned)ffs(start) - 1,
1326 			   (unsigned)fls64(end - start) - 1);
1327 		if (frag >= max_frag) {
1328 			frag_flags = AMDGPU_PTE_FRAG(max_frag);
1329 			frag_end = end & ~((1ULL << max_frag) - 1);
1330 		} else {
1331 			frag_flags = AMDGPU_PTE_FRAG(frag);
1332 			frag_end = start + (1 << frag);
1333 		}
1334 
1335 		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1336 					  flags | frag_flags);
1337 		if (r)
1338 			return r;
1339 
1340 		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1341 		start = frag_end;
1342 	}
1343 
1344 	return 0;
1345 }
1346 
1347 /**
1348  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1349  *
1350  * @adev: amdgpu_device pointer
1351  * @exclusive: fence we need to sync to
1352  * @pages_addr: DMA addresses to use for mapping
1353  * @vm: requested vm
1354  * @start: start of mapped range
1355  * @last: last mapped entry
1356  * @flags: flags for the entries
1357  * @addr: addr to set the area to
1358  * @fence: optional resulting fence
1359  *
1360  * Fill in the page table entries between @start and @last.
1361  *
1362  * Returns:
1363  * 0 for success, -EINVAL for failure.
1364  */
1365 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1366 				       struct dma_fence *exclusive,
1367 				       dma_addr_t *pages_addr,
1368 				       struct amdgpu_vm *vm,
1369 				       uint64_t start, uint64_t last,
1370 				       uint64_t flags, uint64_t addr,
1371 				       struct dma_fence **fence)
1372 {
1373 	struct amdgpu_ring *ring;
1374 	void *owner = AMDGPU_FENCE_OWNER_VM;
1375 	unsigned nptes, ncmds, ndw;
1376 	struct amdgpu_job *job;
1377 	struct amdgpu_pte_update_params params;
1378 	struct dma_fence *f = NULL;
1379 	int r;
1380 
1381 	memset(&params, 0, sizeof(params));
1382 	params.adev = adev;
1383 	params.vm = vm;
1384 
1385 	/* sync to everything on unmapping */
1386 	if (!(flags & AMDGPU_PTE_VALID))
1387 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1388 
1389 	if (vm->use_cpu_for_update) {
1390 		/* params.src is used as flag to indicate system Memory */
1391 		if (pages_addr)
1392 			params.src = ~0;
1393 
1394 		/* Wait for PT BOs to be free. PTs share the same resv. object
1395 		 * as the root PD BO
1396 		 */
1397 		r = amdgpu_vm_wait_pd(adev, vm, owner);
1398 		if (unlikely(r))
1399 			return r;
1400 
1401 		params.func = amdgpu_vm_cpu_set_ptes;
1402 		params.pages_addr = pages_addr;
1403 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
1404 					   addr, flags);
1405 	}
1406 
1407 	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
1408 
1409 	nptes = last - start + 1;
1410 
1411 	/*
1412 	 * reserve space for two commands every (1 << BLOCK_SIZE)
1413 	 *  entries or 2k dwords (whatever is smaller)
1414          *
1415          * The second command is for the shadow pagetables.
1416 	 */
1417 	if (vm->root.base.bo->shadow)
1418 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1419 	else
1420 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1421 
1422 	/* padding, etc. */
1423 	ndw = 64;
1424 
1425 	if (pages_addr) {
1426 		/* copy commands needed */
1427 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1428 
1429 		/* and also PTEs */
1430 		ndw += nptes * 2;
1431 
1432 		params.func = amdgpu_vm_do_copy_ptes;
1433 
1434 	} else {
1435 		/* set page commands needed */
1436 		ndw += ncmds * 10;
1437 
1438 		/* extra commands for begin/end fragments */
1439 		if (vm->root.base.bo->shadow)
1440 		        ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
1441 		else
1442 		        ndw += 2 * 10 * adev->vm_manager.fragment_size;
1443 
1444 		params.func = amdgpu_vm_do_set_ptes;
1445 	}
1446 
1447 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1448 	if (r)
1449 		return r;
1450 
1451 	params.ib = &job->ibs[0];
1452 
1453 	if (pages_addr) {
1454 		uint64_t *pte;
1455 		unsigned i;
1456 
1457 		/* Put the PTEs at the end of the IB. */
1458 		i = ndw - nptes * 2;
1459 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1460 		params.src = job->ibs->gpu_addr + i * 4;
1461 
1462 		for (i = 0; i < nptes; ++i) {
1463 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1464 						    AMDGPU_GPU_PAGE_SIZE);
1465 			pte[i] |= flags;
1466 		}
1467 		addr = 0;
1468 	}
1469 
1470 	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1471 	if (r)
1472 		goto error_free;
1473 
1474 	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1475 			     owner, false);
1476 	if (r)
1477 		goto error_free;
1478 
1479 	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1480 	if (r)
1481 		goto error_free;
1482 
1483 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1484 	if (r)
1485 		goto error_free;
1486 
1487 	amdgpu_ring_pad_ib(ring, params.ib);
1488 	WARN_ON(params.ib->length_dw > ndw);
1489 	r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
1490 	if (r)
1491 		goto error_free;
1492 
1493 	amdgpu_bo_fence(vm->root.base.bo, f, true);
1494 	dma_fence_put(*fence);
1495 	*fence = f;
1496 	return 0;
1497 
1498 error_free:
1499 	amdgpu_job_free(job);
1500 	return r;
1501 }
1502 
1503 /**
1504  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1505  *
1506  * @adev: amdgpu_device pointer
1507  * @exclusive: fence we need to sync to
1508  * @pages_addr: DMA addresses to use for mapping
1509  * @vm: requested vm
1510  * @mapping: mapped range and flags to use for the update
1511  * @flags: HW flags for the mapping
1512  * @nodes: array of drm_mm_nodes with the MC addresses
1513  * @fence: optional resulting fence
1514  *
1515  * Split the mapping into smaller chunks so that each update fits
1516  * into a SDMA IB.
1517  *
1518  * Returns:
1519  * 0 for success, -EINVAL for failure.
1520  */
1521 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1522 				      struct dma_fence *exclusive,
1523 				      dma_addr_t *pages_addr,
1524 				      struct amdgpu_vm *vm,
1525 				      struct amdgpu_bo_va_mapping *mapping,
1526 				      uint64_t flags,
1527 				      struct drm_mm_node *nodes,
1528 				      struct dma_fence **fence)
1529 {
1530 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1531 	uint64_t pfn, start = mapping->start;
1532 	int r;
1533 
1534 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1535 	 * but in case of something, we filter the flags in first place
1536 	 */
1537 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1538 		flags &= ~AMDGPU_PTE_READABLE;
1539 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1540 		flags &= ~AMDGPU_PTE_WRITEABLE;
1541 
1542 	flags &= ~AMDGPU_PTE_EXECUTABLE;
1543 	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1544 
1545 	flags &= ~AMDGPU_PTE_MTYPE_MASK;
1546 	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1547 
1548 	if ((mapping->flags & AMDGPU_PTE_PRT) &&
1549 	    (adev->asic_type >= CHIP_VEGA10)) {
1550 		flags |= AMDGPU_PTE_PRT;
1551 		flags &= ~AMDGPU_PTE_VALID;
1552 	}
1553 
1554 	trace_amdgpu_vm_bo_update(mapping);
1555 
1556 	pfn = mapping->offset >> PAGE_SHIFT;
1557 	if (nodes) {
1558 		while (pfn >= nodes->size) {
1559 			pfn -= nodes->size;
1560 			++nodes;
1561 		}
1562 	}
1563 
1564 	do {
1565 		dma_addr_t *dma_addr = NULL;
1566 		uint64_t max_entries;
1567 		uint64_t addr, last;
1568 
1569 		if (nodes) {
1570 			addr = nodes->start << PAGE_SHIFT;
1571 			max_entries = (nodes->size - pfn) *
1572 				AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1573 		} else {
1574 			addr = 0;
1575 			max_entries = S64_MAX;
1576 		}
1577 
1578 		if (pages_addr) {
1579 			uint64_t count;
1580 
1581 			max_entries = min(max_entries, 16ull * 1024ull);
1582 			for (count = 1;
1583 			     count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1584 			     ++count) {
1585 				uint64_t idx = pfn + count;
1586 
1587 				if (pages_addr[idx] !=
1588 				    (pages_addr[idx - 1] + PAGE_SIZE))
1589 					break;
1590 			}
1591 
1592 			if (count < min_linear_pages) {
1593 				addr = pfn << PAGE_SHIFT;
1594 				dma_addr = pages_addr;
1595 			} else {
1596 				addr = pages_addr[pfn];
1597 				max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1598 			}
1599 
1600 		} else if (flags & AMDGPU_PTE_VALID) {
1601 			addr += adev->vm_manager.vram_base_offset;
1602 			addr += pfn << PAGE_SHIFT;
1603 		}
1604 
1605 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1606 		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1607 						start, last, flags, addr,
1608 						fence);
1609 		if (r)
1610 			return r;
1611 
1612 		pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
1613 		if (nodes && nodes->size == pfn) {
1614 			pfn = 0;
1615 			++nodes;
1616 		}
1617 		start = last + 1;
1618 
1619 	} while (unlikely(start != mapping->last + 1));
1620 
1621 	return 0;
1622 }
1623 
1624 /**
1625  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1626  *
1627  * @adev: amdgpu_device pointer
1628  * @bo_va: requested BO and VM object
1629  * @clear: if true clear the entries
1630  *
1631  * Fill in the page table entries for @bo_va.
1632  *
1633  * Returns:
1634  * 0 for success, -EINVAL for failure.
1635  */
1636 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1637 			struct amdgpu_bo_va *bo_va,
1638 			bool clear)
1639 {
1640 	struct amdgpu_bo *bo = bo_va->base.bo;
1641 	struct amdgpu_vm *vm = bo_va->base.vm;
1642 	struct amdgpu_bo_va_mapping *mapping;
1643 	dma_addr_t *pages_addr = NULL;
1644 	struct ttm_mem_reg *mem;
1645 	struct drm_mm_node *nodes;
1646 	struct dma_fence *exclusive, **last_update;
1647 	uint64_t flags;
1648 	int r;
1649 
1650 	if (clear || !bo) {
1651 		mem = NULL;
1652 		nodes = NULL;
1653 		exclusive = NULL;
1654 	} else {
1655 		struct ttm_dma_tt *ttm;
1656 
1657 		mem = &bo->tbo.mem;
1658 		nodes = mem->mm_node;
1659 		if (mem->mem_type == TTM_PL_TT) {
1660 			ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
1661 			pages_addr = ttm->dma_address;
1662 		}
1663 		exclusive = reservation_object_get_excl(bo->tbo.resv);
1664 	}
1665 
1666 	if (bo)
1667 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1668 	else
1669 		flags = 0x0;
1670 
1671 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1672 		last_update = &vm->last_update;
1673 	else
1674 		last_update = &bo_va->last_pt_update;
1675 
1676 	if (!clear && bo_va->base.moved) {
1677 		bo_va->base.moved = false;
1678 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1679 
1680 	} else if (bo_va->cleared != clear) {
1681 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1682 	}
1683 
1684 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1685 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1686 					       mapping, flags, nodes,
1687 					       last_update);
1688 		if (r)
1689 			return r;
1690 	}
1691 
1692 	if (vm->use_cpu_for_update) {
1693 		/* Flush HDP */
1694 		mb();
1695 		amdgpu_asic_flush_hdp(adev, NULL);
1696 	}
1697 
1698 	spin_lock(&vm->moved_lock);
1699 	list_del_init(&bo_va->base.vm_status);
1700 	spin_unlock(&vm->moved_lock);
1701 
1702 	/* If the BO is not in its preferred location add it back to
1703 	 * the evicted list so that it gets validated again on the
1704 	 * next command submission.
1705 	 */
1706 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1707 		uint32_t mem_type = bo->tbo.mem.mem_type;
1708 
1709 		if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
1710 			list_add_tail(&bo_va->base.vm_status, &vm->evicted);
1711 		else
1712 			list_add(&bo_va->base.vm_status, &vm->idle);
1713 	}
1714 
1715 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1716 	bo_va->cleared = clear;
1717 
1718 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1719 		list_for_each_entry(mapping, &bo_va->valids, list)
1720 			trace_amdgpu_vm_bo_mapping(mapping);
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 /**
1727  * amdgpu_vm_update_prt_state - update the global PRT state
1728  *
1729  * @adev: amdgpu_device pointer
1730  */
1731 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1732 {
1733 	unsigned long flags;
1734 	bool enable;
1735 
1736 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1737 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1738 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1739 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1740 }
1741 
1742 /**
1743  * amdgpu_vm_prt_get - add a PRT user
1744  *
1745  * @adev: amdgpu_device pointer
1746  */
1747 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1748 {
1749 	if (!adev->gmc.gmc_funcs->set_prt)
1750 		return;
1751 
1752 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1753 		amdgpu_vm_update_prt_state(adev);
1754 }
1755 
1756 /**
1757  * amdgpu_vm_prt_put - drop a PRT user
1758  *
1759  * @adev: amdgpu_device pointer
1760  */
1761 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1762 {
1763 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1764 		amdgpu_vm_update_prt_state(adev);
1765 }
1766 
1767 /**
1768  * amdgpu_vm_prt_cb - callback for updating the PRT status
1769  *
1770  * @fence: fence for the callback
1771  * @_cb: the callback function
1772  */
1773 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1774 {
1775 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1776 
1777 	amdgpu_vm_prt_put(cb->adev);
1778 	kfree(cb);
1779 }
1780 
1781 /**
1782  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1783  *
1784  * @adev: amdgpu_device pointer
1785  * @fence: fence for the callback
1786  */
1787 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1788 				 struct dma_fence *fence)
1789 {
1790 	struct amdgpu_prt_cb *cb;
1791 
1792 	if (!adev->gmc.gmc_funcs->set_prt)
1793 		return;
1794 
1795 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1796 	if (!cb) {
1797 		/* Last resort when we are OOM */
1798 		if (fence)
1799 			dma_fence_wait(fence, false);
1800 
1801 		amdgpu_vm_prt_put(adev);
1802 	} else {
1803 		cb->adev = adev;
1804 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1805 						     amdgpu_vm_prt_cb))
1806 			amdgpu_vm_prt_cb(fence, &cb->cb);
1807 	}
1808 }
1809 
1810 /**
1811  * amdgpu_vm_free_mapping - free a mapping
1812  *
1813  * @adev: amdgpu_device pointer
1814  * @vm: requested vm
1815  * @mapping: mapping to be freed
1816  * @fence: fence of the unmap operation
1817  *
1818  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1819  */
1820 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1821 				   struct amdgpu_vm *vm,
1822 				   struct amdgpu_bo_va_mapping *mapping,
1823 				   struct dma_fence *fence)
1824 {
1825 	if (mapping->flags & AMDGPU_PTE_PRT)
1826 		amdgpu_vm_add_prt_cb(adev, fence);
1827 	kfree(mapping);
1828 }
1829 
1830 /**
1831  * amdgpu_vm_prt_fini - finish all prt mappings
1832  *
1833  * @adev: amdgpu_device pointer
1834  * @vm: requested vm
1835  *
1836  * Register a cleanup callback to disable PRT support after VM dies.
1837  */
1838 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1839 {
1840 	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1841 	struct dma_fence *excl, **shared;
1842 	unsigned i, shared_count;
1843 	int r;
1844 
1845 	r = reservation_object_get_fences_rcu(resv, &excl,
1846 					      &shared_count, &shared);
1847 	if (r) {
1848 		/* Not enough memory to grab the fence list, as last resort
1849 		 * block for all the fences to complete.
1850 		 */
1851 		reservation_object_wait_timeout_rcu(resv, true, false,
1852 						    MAX_SCHEDULE_TIMEOUT);
1853 		return;
1854 	}
1855 
1856 	/* Add a callback for each fence in the reservation object */
1857 	amdgpu_vm_prt_get(adev);
1858 	amdgpu_vm_add_prt_cb(adev, excl);
1859 
1860 	for (i = 0; i < shared_count; ++i) {
1861 		amdgpu_vm_prt_get(adev);
1862 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1863 	}
1864 
1865 	kfree(shared);
1866 }
1867 
1868 /**
1869  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1870  *
1871  * @adev: amdgpu_device pointer
1872  * @vm: requested vm
1873  * @fence: optional resulting fence (unchanged if no work needed to be done
1874  * or if an error occurred)
1875  *
1876  * Make sure all freed BOs are cleared in the PT.
1877  * PTs have to be reserved and mutex must be locked!
1878  *
1879  * Returns:
1880  * 0 for success.
1881  *
1882  */
1883 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1884 			  struct amdgpu_vm *vm,
1885 			  struct dma_fence **fence)
1886 {
1887 	struct amdgpu_bo_va_mapping *mapping;
1888 	uint64_t init_pte_value = 0;
1889 	struct dma_fence *f = NULL;
1890 	int r;
1891 
1892 	while (!list_empty(&vm->freed)) {
1893 		mapping = list_first_entry(&vm->freed,
1894 			struct amdgpu_bo_va_mapping, list);
1895 		list_del(&mapping->list);
1896 
1897 		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1898 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1899 
1900 		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1901 						mapping->start, mapping->last,
1902 						init_pte_value, 0, &f);
1903 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1904 		if (r) {
1905 			dma_fence_put(f);
1906 			return r;
1907 		}
1908 	}
1909 
1910 	if (fence && f) {
1911 		dma_fence_put(*fence);
1912 		*fence = f;
1913 	} else {
1914 		dma_fence_put(f);
1915 	}
1916 
1917 	return 0;
1918 
1919 }
1920 
1921 /**
1922  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1923  *
1924  * @adev: amdgpu_device pointer
1925  * @vm: requested vm
1926  *
1927  * Make sure all BOs which are moved are updated in the PTs.
1928  *
1929  * Returns:
1930  * 0 for success.
1931  *
1932  * PTs have to be reserved!
1933  */
1934 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1935 			   struct amdgpu_vm *vm)
1936 {
1937 	struct amdgpu_bo_va *bo_va, *tmp;
1938 	struct list_head moved;
1939 	bool clear;
1940 	int r;
1941 
1942 	INIT_LIST_HEAD(&moved);
1943 	spin_lock(&vm->moved_lock);
1944 	list_splice_init(&vm->moved, &moved);
1945 	spin_unlock(&vm->moved_lock);
1946 
1947 	list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
1948 		struct reservation_object *resv = bo_va->base.bo->tbo.resv;
1949 
1950 		/* Per VM BOs never need to bo cleared in the page tables */
1951 		if (resv == vm->root.base.bo->tbo.resv)
1952 			clear = false;
1953 		/* Try to reserve the BO to avoid clearing its ptes */
1954 		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1955 			clear = false;
1956 		/* Somebody else is using the BO right now */
1957 		else
1958 			clear = true;
1959 
1960 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1961 		if (r) {
1962 			spin_lock(&vm->moved_lock);
1963 			list_splice(&moved, &vm->moved);
1964 			spin_unlock(&vm->moved_lock);
1965 			return r;
1966 		}
1967 
1968 		if (!clear && resv != vm->root.base.bo->tbo.resv)
1969 			reservation_object_unlock(resv);
1970 
1971 	}
1972 
1973 	return 0;
1974 }
1975 
1976 /**
1977  * amdgpu_vm_bo_add - add a bo to a specific vm
1978  *
1979  * @adev: amdgpu_device pointer
1980  * @vm: requested vm
1981  * @bo: amdgpu buffer object
1982  *
1983  * Add @bo into the requested vm.
1984  * Add @bo to the list of bos associated with the vm
1985  *
1986  * Returns:
1987  * Newly added bo_va or NULL for failure
1988  *
1989  * Object has to be reserved!
1990  */
1991 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1992 				      struct amdgpu_vm *vm,
1993 				      struct amdgpu_bo *bo)
1994 {
1995 	struct amdgpu_bo_va *bo_va;
1996 
1997 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1998 	if (bo_va == NULL) {
1999 		return NULL;
2000 	}
2001 	amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
2002 
2003 	bo_va->ref_count = 1;
2004 	INIT_LIST_HEAD(&bo_va->valids);
2005 	INIT_LIST_HEAD(&bo_va->invalids);
2006 
2007 	return bo_va;
2008 }
2009 
2010 
2011 /**
2012  * amdgpu_vm_bo_insert_mapping - insert a new mapping
2013  *
2014  * @adev: amdgpu_device pointer
2015  * @bo_va: bo_va to store the address
2016  * @mapping: the mapping to insert
2017  *
2018  * Insert a new mapping into all structures.
2019  */
2020 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
2021 				    struct amdgpu_bo_va *bo_va,
2022 				    struct amdgpu_bo_va_mapping *mapping)
2023 {
2024 	struct amdgpu_vm *vm = bo_va->base.vm;
2025 	struct amdgpu_bo *bo = bo_va->base.bo;
2026 
2027 	mapping->bo_va = bo_va;
2028 	list_add(&mapping->list, &bo_va->invalids);
2029 	amdgpu_vm_it_insert(mapping, &vm->va);
2030 
2031 	if (mapping->flags & AMDGPU_PTE_PRT)
2032 		amdgpu_vm_prt_get(adev);
2033 
2034 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
2035 	    !bo_va->base.moved) {
2036 		spin_lock(&vm->moved_lock);
2037 		list_move(&bo_va->base.vm_status, &vm->moved);
2038 		spin_unlock(&vm->moved_lock);
2039 	}
2040 	trace_amdgpu_vm_bo_map(bo_va, mapping);
2041 }
2042 
2043 /**
2044  * amdgpu_vm_bo_map - map bo inside a vm
2045  *
2046  * @adev: amdgpu_device pointer
2047  * @bo_va: bo_va to store the address
2048  * @saddr: where to map the BO
2049  * @offset: requested offset in the BO
2050  * @size: BO size in bytes
2051  * @flags: attributes of pages (read/write/valid/etc.)
2052  *
2053  * Add a mapping of the BO at the specefied addr into the VM.
2054  *
2055  * Returns:
2056  * 0 for success, error for failure.
2057  *
2058  * Object has to be reserved and unreserved outside!
2059  */
2060 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2061 		     struct amdgpu_bo_va *bo_va,
2062 		     uint64_t saddr, uint64_t offset,
2063 		     uint64_t size, uint64_t flags)
2064 {
2065 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2066 	struct amdgpu_bo *bo = bo_va->base.bo;
2067 	struct amdgpu_vm *vm = bo_va->base.vm;
2068 	uint64_t eaddr;
2069 
2070 	/* validate the parameters */
2071 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2072 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2073 		return -EINVAL;
2074 
2075 	/* make sure object fit at this offset */
2076 	eaddr = saddr + size - 1;
2077 	if (saddr >= eaddr ||
2078 	    (bo && offset + size > amdgpu_bo_size(bo)))
2079 		return -EINVAL;
2080 
2081 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2082 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2083 
2084 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2085 	if (tmp) {
2086 		/* bo and tmp overlap, invalid addr */
2087 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
2088 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
2089 			tmp->start, tmp->last + 1);
2090 		return -EINVAL;
2091 	}
2092 
2093 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2094 	if (!mapping)
2095 		return -ENOMEM;
2096 
2097 	mapping->start = saddr;
2098 	mapping->last = eaddr;
2099 	mapping->offset = offset;
2100 	mapping->flags = flags;
2101 
2102 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2103 
2104 	return 0;
2105 }
2106 
2107 /**
2108  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
2109  *
2110  * @adev: amdgpu_device pointer
2111  * @bo_va: bo_va to store the address
2112  * @saddr: where to map the BO
2113  * @offset: requested offset in the BO
2114  * @size: BO size in bytes
2115  * @flags: attributes of pages (read/write/valid/etc.)
2116  *
2117  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
2118  * mappings as we do so.
2119  *
2120  * Returns:
2121  * 0 for success, error for failure.
2122  *
2123  * Object has to be reserved and unreserved outside!
2124  */
2125 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
2126 			     struct amdgpu_bo_va *bo_va,
2127 			     uint64_t saddr, uint64_t offset,
2128 			     uint64_t size, uint64_t flags)
2129 {
2130 	struct amdgpu_bo_va_mapping *mapping;
2131 	struct amdgpu_bo *bo = bo_va->base.bo;
2132 	uint64_t eaddr;
2133 	int r;
2134 
2135 	/* validate the parameters */
2136 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
2137 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
2138 		return -EINVAL;
2139 
2140 	/* make sure object fit at this offset */
2141 	eaddr = saddr + size - 1;
2142 	if (saddr >= eaddr ||
2143 	    (bo && offset + size > amdgpu_bo_size(bo)))
2144 		return -EINVAL;
2145 
2146 	/* Allocate all the needed memory */
2147 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
2148 	if (!mapping)
2149 		return -ENOMEM;
2150 
2151 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
2152 	if (r) {
2153 		kfree(mapping);
2154 		return r;
2155 	}
2156 
2157 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2158 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2159 
2160 	mapping->start = saddr;
2161 	mapping->last = eaddr;
2162 	mapping->offset = offset;
2163 	mapping->flags = flags;
2164 
2165 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2166 
2167 	return 0;
2168 }
2169 
2170 /**
2171  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2172  *
2173  * @adev: amdgpu_device pointer
2174  * @bo_va: bo_va to remove the address from
2175  * @saddr: where to the BO is mapped
2176  *
2177  * Remove a mapping of the BO at the specefied addr from the VM.
2178  *
2179  * Returns:
2180  * 0 for success, error for failure.
2181  *
2182  * Object has to be reserved and unreserved outside!
2183  */
2184 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2185 		       struct amdgpu_bo_va *bo_va,
2186 		       uint64_t saddr)
2187 {
2188 	struct amdgpu_bo_va_mapping *mapping;
2189 	struct amdgpu_vm *vm = bo_va->base.vm;
2190 	bool valid = true;
2191 
2192 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2193 
2194 	list_for_each_entry(mapping, &bo_va->valids, list) {
2195 		if (mapping->start == saddr)
2196 			break;
2197 	}
2198 
2199 	if (&mapping->list == &bo_va->valids) {
2200 		valid = false;
2201 
2202 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2203 			if (mapping->start == saddr)
2204 				break;
2205 		}
2206 
2207 		if (&mapping->list == &bo_va->invalids)
2208 			return -ENOENT;
2209 	}
2210 
2211 	list_del(&mapping->list);
2212 	amdgpu_vm_it_remove(mapping, &vm->va);
2213 	mapping->bo_va = NULL;
2214 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2215 
2216 	if (valid)
2217 		list_add(&mapping->list, &vm->freed);
2218 	else
2219 		amdgpu_vm_free_mapping(adev, vm, mapping,
2220 				       bo_va->last_pt_update);
2221 
2222 	return 0;
2223 }
2224 
2225 /**
2226  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2227  *
2228  * @adev: amdgpu_device pointer
2229  * @vm: VM structure to use
2230  * @saddr: start of the range
2231  * @size: size of the range
2232  *
2233  * Remove all mappings in a range, split them as appropriate.
2234  *
2235  * Returns:
2236  * 0 for success, error for failure.
2237  */
2238 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2239 				struct amdgpu_vm *vm,
2240 				uint64_t saddr, uint64_t size)
2241 {
2242 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2243 	LIST_HEAD(removed);
2244 	uint64_t eaddr;
2245 
2246 	eaddr = saddr + size - 1;
2247 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2248 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2249 
2250 	/* Allocate all the needed memory */
2251 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2252 	if (!before)
2253 		return -ENOMEM;
2254 	INIT_LIST_HEAD(&before->list);
2255 
2256 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2257 	if (!after) {
2258 		kfree(before);
2259 		return -ENOMEM;
2260 	}
2261 	INIT_LIST_HEAD(&after->list);
2262 
2263 	/* Now gather all removed mappings */
2264 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2265 	while (tmp) {
2266 		/* Remember mapping split at the start */
2267 		if (tmp->start < saddr) {
2268 			before->start = tmp->start;
2269 			before->last = saddr - 1;
2270 			before->offset = tmp->offset;
2271 			before->flags = tmp->flags;
2272 			before->bo_va = tmp->bo_va;
2273 			list_add(&before->list, &tmp->bo_va->invalids);
2274 		}
2275 
2276 		/* Remember mapping split at the end */
2277 		if (tmp->last > eaddr) {
2278 			after->start = eaddr + 1;
2279 			after->last = tmp->last;
2280 			after->offset = tmp->offset;
2281 			after->offset += after->start - tmp->start;
2282 			after->flags = tmp->flags;
2283 			after->bo_va = tmp->bo_va;
2284 			list_add(&after->list, &tmp->bo_va->invalids);
2285 		}
2286 
2287 		list_del(&tmp->list);
2288 		list_add(&tmp->list, &removed);
2289 
2290 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2291 	}
2292 
2293 	/* And free them up */
2294 	list_for_each_entry_safe(tmp, next, &removed, list) {
2295 		amdgpu_vm_it_remove(tmp, &vm->va);
2296 		list_del(&tmp->list);
2297 
2298 		if (tmp->start < saddr)
2299 		    tmp->start = saddr;
2300 		if (tmp->last > eaddr)
2301 		    tmp->last = eaddr;
2302 
2303 		tmp->bo_va = NULL;
2304 		list_add(&tmp->list, &vm->freed);
2305 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2306 	}
2307 
2308 	/* Insert partial mapping before the range */
2309 	if (!list_empty(&before->list)) {
2310 		amdgpu_vm_it_insert(before, &vm->va);
2311 		if (before->flags & AMDGPU_PTE_PRT)
2312 			amdgpu_vm_prt_get(adev);
2313 	} else {
2314 		kfree(before);
2315 	}
2316 
2317 	/* Insert partial mapping after the range */
2318 	if (!list_empty(&after->list)) {
2319 		amdgpu_vm_it_insert(after, &vm->va);
2320 		if (after->flags & AMDGPU_PTE_PRT)
2321 			amdgpu_vm_prt_get(adev);
2322 	} else {
2323 		kfree(after);
2324 	}
2325 
2326 	return 0;
2327 }
2328 
2329 /**
2330  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2331  *
2332  * @vm: the requested VM
2333  * @addr: the address
2334  *
2335  * Find a mapping by it's address.
2336  *
2337  * Returns:
2338  * The amdgpu_bo_va_mapping matching for addr or NULL
2339  *
2340  */
2341 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2342 							 uint64_t addr)
2343 {
2344 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2345 }
2346 
2347 /**
2348  * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2349  *
2350  * @vm: the requested vm
2351  * @ticket: CS ticket
2352  *
2353  * Trace all mappings of BOs reserved during a command submission.
2354  */
2355 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2356 {
2357 	struct amdgpu_bo_va_mapping *mapping;
2358 
2359 	if (!trace_amdgpu_vm_bo_cs_enabled())
2360 		return;
2361 
2362 	for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2363 	     mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2364 		if (mapping->bo_va && mapping->bo_va->base.bo) {
2365 			struct amdgpu_bo *bo;
2366 
2367 			bo = mapping->bo_va->base.bo;
2368 			if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2369 				continue;
2370 		}
2371 
2372 		trace_amdgpu_vm_bo_cs(mapping);
2373 	}
2374 }
2375 
2376 /**
2377  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2378  *
2379  * @adev: amdgpu_device pointer
2380  * @bo_va: requested bo_va
2381  *
2382  * Remove @bo_va->bo from the requested vm.
2383  *
2384  * Object have to be reserved!
2385  */
2386 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2387 		      struct amdgpu_bo_va *bo_va)
2388 {
2389 	struct amdgpu_bo_va_mapping *mapping, *next;
2390 	struct amdgpu_vm *vm = bo_va->base.vm;
2391 
2392 	list_del(&bo_va->base.bo_list);
2393 
2394 	spin_lock(&vm->moved_lock);
2395 	list_del(&bo_va->base.vm_status);
2396 	spin_unlock(&vm->moved_lock);
2397 
2398 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2399 		list_del(&mapping->list);
2400 		amdgpu_vm_it_remove(mapping, &vm->va);
2401 		mapping->bo_va = NULL;
2402 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2403 		list_add(&mapping->list, &vm->freed);
2404 	}
2405 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2406 		list_del(&mapping->list);
2407 		amdgpu_vm_it_remove(mapping, &vm->va);
2408 		amdgpu_vm_free_mapping(adev, vm, mapping,
2409 				       bo_va->last_pt_update);
2410 	}
2411 
2412 	dma_fence_put(bo_va->last_pt_update);
2413 	kfree(bo_va);
2414 }
2415 
2416 /**
2417  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2418  *
2419  * @adev: amdgpu_device pointer
2420  * @bo: amdgpu buffer object
2421  * @evicted: is the BO evicted
2422  *
2423  * Mark @bo as invalid.
2424  */
2425 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2426 			     struct amdgpu_bo *bo, bool evicted)
2427 {
2428 	struct amdgpu_vm_bo_base *bo_base;
2429 
2430 	/* shadow bo doesn't have bo base, its validation needs its parent */
2431 	if (bo->parent && bo->parent->shadow == bo)
2432 		bo = bo->parent;
2433 
2434 	list_for_each_entry(bo_base, &bo->va, bo_list) {
2435 		struct amdgpu_vm *vm = bo_base->vm;
2436 		bool was_moved = bo_base->moved;
2437 
2438 		bo_base->moved = true;
2439 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2440 			if (bo->tbo.type == ttm_bo_type_kernel)
2441 				list_move(&bo_base->vm_status, &vm->evicted);
2442 			else
2443 				list_move_tail(&bo_base->vm_status,
2444 					       &vm->evicted);
2445 			continue;
2446 		}
2447 
2448 		if (was_moved)
2449 			continue;
2450 
2451 		if (bo->tbo.type == ttm_bo_type_kernel) {
2452 			list_move(&bo_base->vm_status, &vm->relocated);
2453 		} else {
2454 			spin_lock(&bo_base->vm->moved_lock);
2455 			list_move(&bo_base->vm_status, &vm->moved);
2456 			spin_unlock(&bo_base->vm->moved_lock);
2457 		}
2458 	}
2459 }
2460 
2461 /**
2462  * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2463  *
2464  * @vm_size: VM size
2465  *
2466  * Returns:
2467  * VM page table as power of two
2468  */
2469 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2470 {
2471 	/* Total bits covered by PD + PTs */
2472 	unsigned bits = ilog2(vm_size) + 18;
2473 
2474 	/* Make sure the PD is 4K in size up to 8GB address space.
2475 	   Above that split equal between PD and PTs */
2476 	if (vm_size <= 8)
2477 		return (bits - 9);
2478 	else
2479 		return ((bits + 3) / 2);
2480 }
2481 
2482 /**
2483  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2484  *
2485  * @adev: amdgpu_device pointer
2486  * @vm_size: the default vm size if it's set auto
2487  * @fragment_size_default: Default PTE fragment size
2488  * @max_level: max VMPT level
2489  * @max_bits: max address space size in bits
2490  *
2491  */
2492 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2493 			   uint32_t fragment_size_default, unsigned max_level,
2494 			   unsigned max_bits)
2495 {
2496 	uint64_t tmp;
2497 
2498 	/* adjust vm size first */
2499 	if (amdgpu_vm_size != -1) {
2500 		unsigned max_size = 1 << (max_bits - 30);
2501 
2502 		vm_size = amdgpu_vm_size;
2503 		if (vm_size > max_size) {
2504 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2505 				 amdgpu_vm_size, max_size);
2506 			vm_size = max_size;
2507 		}
2508 	}
2509 
2510 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2511 
2512 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2513 	if (amdgpu_vm_block_size != -1)
2514 		tmp >>= amdgpu_vm_block_size - 9;
2515 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2516 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2517 	switch (adev->vm_manager.num_level) {
2518 	case 3:
2519 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2520 		break;
2521 	case 2:
2522 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2523 		break;
2524 	case 1:
2525 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2526 		break;
2527 	default:
2528 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2529 	}
2530 	/* block size depends on vm size and hw setup*/
2531 	if (amdgpu_vm_block_size != -1)
2532 		adev->vm_manager.block_size =
2533 			min((unsigned)amdgpu_vm_block_size, max_bits
2534 			    - AMDGPU_GPU_PAGE_SHIFT
2535 			    - 9 * adev->vm_manager.num_level);
2536 	else if (adev->vm_manager.num_level > 1)
2537 		adev->vm_manager.block_size = 9;
2538 	else
2539 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2540 
2541 	if (amdgpu_vm_fragment_size == -1)
2542 		adev->vm_manager.fragment_size = fragment_size_default;
2543 	else
2544 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2545 
2546 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2547 		 vm_size, adev->vm_manager.num_level + 1,
2548 		 adev->vm_manager.block_size,
2549 		 adev->vm_manager.fragment_size);
2550 }
2551 
2552 /**
2553  * amdgpu_vm_init - initialize a vm instance
2554  *
2555  * @adev: amdgpu_device pointer
2556  * @vm: requested vm
2557  * @vm_context: Indicates if it GFX or Compute context
2558  * @pasid: Process address space identifier
2559  *
2560  * Init @vm fields.
2561  *
2562  * Returns:
2563  * 0 for success, error for failure.
2564  */
2565 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2566 		   int vm_context, unsigned int pasid)
2567 {
2568 	struct amdgpu_bo_param bp;
2569 	struct amdgpu_bo *root;
2570 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2571 		AMDGPU_VM_PTE_COUNT(adev) * 8);
2572 	unsigned ring_instance;
2573 	struct amdgpu_ring *ring;
2574 	struct drm_sched_rq *rq;
2575 	unsigned long size;
2576 	uint64_t flags;
2577 	int r, i;
2578 
2579 	vm->va = RB_ROOT_CACHED;
2580 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2581 		vm->reserved_vmid[i] = NULL;
2582 	INIT_LIST_HEAD(&vm->evicted);
2583 	INIT_LIST_HEAD(&vm->relocated);
2584 	spin_lock_init(&vm->moved_lock);
2585 	INIT_LIST_HEAD(&vm->moved);
2586 	INIT_LIST_HEAD(&vm->idle);
2587 	INIT_LIST_HEAD(&vm->freed);
2588 
2589 	/* create scheduler entity for page table updates */
2590 
2591 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2592 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
2593 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2594 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2595 	r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
2596 	if (r)
2597 		return r;
2598 
2599 	vm->pte_support_ats = false;
2600 
2601 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2602 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2603 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2604 
2605 		if (adev->asic_type == CHIP_RAVEN)
2606 			vm->pte_support_ats = true;
2607 	} else {
2608 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2609 						AMDGPU_VM_USE_CPU_FOR_GFX);
2610 	}
2611 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2612 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2613 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2614 		  "CPU update of VM recommended only for large BAR system\n");
2615 	vm->last_update = NULL;
2616 
2617 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2618 	if (vm->use_cpu_for_update)
2619 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2620 	else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
2621 		flags |= AMDGPU_GEM_CREATE_SHADOW;
2622 
2623 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2624 	memset(&bp, 0, sizeof(bp));
2625 	bp.size = size;
2626 	bp.byte_align = align;
2627 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
2628 	bp.flags = flags;
2629 	bp.type = ttm_bo_type_kernel;
2630 	bp.resv = NULL;
2631 	r = amdgpu_bo_create(adev, &bp, &root);
2632 	if (r)
2633 		goto error_free_sched_entity;
2634 
2635 	r = amdgpu_bo_reserve(root, true);
2636 	if (r)
2637 		goto error_free_root;
2638 
2639 	r = amdgpu_vm_clear_bo(adev, vm, root,
2640 			       adev->vm_manager.root_level,
2641 			       vm->pte_support_ats);
2642 	if (r)
2643 		goto error_unreserve;
2644 
2645 	amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
2646 	amdgpu_bo_unreserve(vm->root.base.bo);
2647 
2648 	if (pasid) {
2649 		unsigned long flags;
2650 
2651 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2652 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2653 			      GFP_ATOMIC);
2654 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2655 		if (r < 0)
2656 			goto error_free_root;
2657 
2658 		vm->pasid = pasid;
2659 	}
2660 
2661 	INIT_KFIFO(vm->faults);
2662 	vm->fault_credit = 16;
2663 
2664 	return 0;
2665 
2666 error_unreserve:
2667 	amdgpu_bo_unreserve(vm->root.base.bo);
2668 
2669 error_free_root:
2670 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2671 	amdgpu_bo_unref(&vm->root.base.bo);
2672 	vm->root.base.bo = NULL;
2673 
2674 error_free_sched_entity:
2675 	drm_sched_entity_destroy(&vm->entity);
2676 
2677 	return r;
2678 }
2679 
2680 /**
2681  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2682  *
2683  * @adev: amdgpu_device pointer
2684  * @vm: requested vm
2685  *
2686  * This only works on GFX VMs that don't have any BOs added and no
2687  * page tables allocated yet.
2688  *
2689  * Changes the following VM parameters:
2690  * - use_cpu_for_update
2691  * - pte_supports_ats
2692  * - pasid (old PASID is released, because compute manages its own PASIDs)
2693  *
2694  * Reinitializes the page directory to reflect the changed ATS
2695  * setting.
2696  *
2697  * Returns:
2698  * 0 for success, -errno for errors.
2699  */
2700 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2701 {
2702 	bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
2703 	int r;
2704 
2705 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2706 	if (r)
2707 		return r;
2708 
2709 	/* Sanity checks */
2710 	if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
2711 		r = -EINVAL;
2712 		goto error;
2713 	}
2714 
2715 	/* Check if PD needs to be reinitialized and do it before
2716 	 * changing any other state, in case it fails.
2717 	 */
2718 	if (pte_support_ats != vm->pte_support_ats) {
2719 		r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2720 			       adev->vm_manager.root_level,
2721 			       pte_support_ats);
2722 		if (r)
2723 			goto error;
2724 	}
2725 
2726 	/* Update VM state */
2727 	vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2728 				    AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2729 	vm->pte_support_ats = pte_support_ats;
2730 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2731 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2732 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2733 		  "CPU update of VM recommended only for large BAR system\n");
2734 
2735 	if (vm->pasid) {
2736 		unsigned long flags;
2737 
2738 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2739 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2740 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2741 
2742 		vm->pasid = 0;
2743 	}
2744 
2745 	/* Free the shadow bo for compute VM */
2746 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2747 
2748 error:
2749 	amdgpu_bo_unreserve(vm->root.base.bo);
2750 	return r;
2751 }
2752 
2753 /**
2754  * amdgpu_vm_free_levels - free PD/PT levels
2755  *
2756  * @adev: amdgpu device structure
2757  * @parent: PD/PT starting level to free
2758  * @level: level of parent structure
2759  *
2760  * Free the page directory or page table level and all sub levels.
2761  */
2762 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2763 				  struct amdgpu_vm_pt *parent,
2764 				  unsigned level)
2765 {
2766 	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2767 
2768 	if (parent->base.bo) {
2769 		list_del(&parent->base.bo_list);
2770 		list_del(&parent->base.vm_status);
2771 		amdgpu_bo_unref(&parent->base.bo->shadow);
2772 		amdgpu_bo_unref(&parent->base.bo);
2773 	}
2774 
2775 	if (parent->entries)
2776 		for (i = 0; i < num_entries; i++)
2777 			amdgpu_vm_free_levels(adev, &parent->entries[i],
2778 					      level + 1);
2779 
2780 	kvfree(parent->entries);
2781 }
2782 
2783 /**
2784  * amdgpu_vm_fini - tear down a vm instance
2785  *
2786  * @adev: amdgpu_device pointer
2787  * @vm: requested vm
2788  *
2789  * Tear down @vm.
2790  * Unbind the VM and remove all bos from the vm bo list
2791  */
2792 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2793 {
2794 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2795 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2796 	struct amdgpu_bo *root;
2797 	u64 fault;
2798 	int i, r;
2799 
2800 	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
2801 
2802 	/* Clear pending page faults from IH when the VM is destroyed */
2803 	while (kfifo_get(&vm->faults, &fault))
2804 		amdgpu_ih_clear_fault(adev, fault);
2805 
2806 	if (vm->pasid) {
2807 		unsigned long flags;
2808 
2809 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2810 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2811 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2812 	}
2813 
2814 	drm_sched_entity_destroy(&vm->entity);
2815 
2816 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2817 		dev_err(adev->dev, "still active bo inside vm\n");
2818 	}
2819 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2820 					     &vm->va.rb_root, rb) {
2821 		list_del(&mapping->list);
2822 		amdgpu_vm_it_remove(mapping, &vm->va);
2823 		kfree(mapping);
2824 	}
2825 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2826 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2827 			amdgpu_vm_prt_fini(adev, vm);
2828 			prt_fini_needed = false;
2829 		}
2830 
2831 		list_del(&mapping->list);
2832 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2833 	}
2834 
2835 	root = amdgpu_bo_ref(vm->root.base.bo);
2836 	r = amdgpu_bo_reserve(root, true);
2837 	if (r) {
2838 		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2839 	} else {
2840 		amdgpu_vm_free_levels(adev, &vm->root,
2841 				      adev->vm_manager.root_level);
2842 		amdgpu_bo_unreserve(root);
2843 	}
2844 	amdgpu_bo_unref(&root);
2845 	dma_fence_put(vm->last_update);
2846 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2847 		amdgpu_vmid_free_reserved(adev, vm, i);
2848 }
2849 
2850 /**
2851  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2852  *
2853  * @adev: amdgpu_device pointer
2854  * @pasid: PASID do identify the VM
2855  *
2856  * This function is expected to be called in interrupt context.
2857  *
2858  * Returns:
2859  * True if there was fault credit, false otherwise
2860  */
2861 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2862 				  unsigned int pasid)
2863 {
2864 	struct amdgpu_vm *vm;
2865 
2866 	spin_lock(&adev->vm_manager.pasid_lock);
2867 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2868 	if (!vm) {
2869 		/* VM not found, can't track fault credit */
2870 		spin_unlock(&adev->vm_manager.pasid_lock);
2871 		return true;
2872 	}
2873 
2874 	/* No lock needed. only accessed by IRQ handler */
2875 	if (!vm->fault_credit) {
2876 		/* Too many faults in this VM */
2877 		spin_unlock(&adev->vm_manager.pasid_lock);
2878 		return false;
2879 	}
2880 
2881 	vm->fault_credit--;
2882 	spin_unlock(&adev->vm_manager.pasid_lock);
2883 	return true;
2884 }
2885 
2886 /**
2887  * amdgpu_vm_manager_init - init the VM manager
2888  *
2889  * @adev: amdgpu_device pointer
2890  *
2891  * Initialize the VM manager structures
2892  */
2893 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2894 {
2895 	unsigned i;
2896 
2897 	amdgpu_vmid_mgr_init(adev);
2898 
2899 	adev->vm_manager.fence_context =
2900 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2901 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2902 		adev->vm_manager.seqno[i] = 0;
2903 
2904 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2905 	spin_lock_init(&adev->vm_manager.prt_lock);
2906 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2907 
2908 	/* If not overridden by the user, by default, only in large BAR systems
2909 	 * Compute VM tables will be updated by CPU
2910 	 */
2911 #ifdef CONFIG_X86_64
2912 	if (amdgpu_vm_update_mode == -1) {
2913 		if (amdgpu_gmc_vram_full_visible(&adev->gmc))
2914 			adev->vm_manager.vm_update_mode =
2915 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2916 		else
2917 			adev->vm_manager.vm_update_mode = 0;
2918 	} else
2919 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2920 #else
2921 	adev->vm_manager.vm_update_mode = 0;
2922 #endif
2923 
2924 	idr_init(&adev->vm_manager.pasid_idr);
2925 	spin_lock_init(&adev->vm_manager.pasid_lock);
2926 }
2927 
2928 /**
2929  * amdgpu_vm_manager_fini - cleanup VM manager
2930  *
2931  * @adev: amdgpu_device pointer
2932  *
2933  * Cleanup the VM manager and free resources.
2934  */
2935 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2936 {
2937 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2938 	idr_destroy(&adev->vm_manager.pasid_idr);
2939 
2940 	amdgpu_vmid_mgr_fini(adev);
2941 }
2942 
2943 /**
2944  * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2945  *
2946  * @dev: drm device pointer
2947  * @data: drm_amdgpu_vm
2948  * @filp: drm file pointer
2949  *
2950  * Returns:
2951  * 0 for success, -errno for errors.
2952  */
2953 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2954 {
2955 	union drm_amdgpu_vm *args = data;
2956 	struct amdgpu_device *adev = dev->dev_private;
2957 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2958 	int r;
2959 
2960 	switch (args->in.op) {
2961 	case AMDGPU_VM_OP_RESERVE_VMID:
2962 		/* current, we only have requirement to reserve vmid from gfxhub */
2963 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2964 		if (r)
2965 			return r;
2966 		break;
2967 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2968 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2969 		break;
2970 	default:
2971 		return -EINVAL;
2972 	}
2973 
2974 	return 0;
2975 }
2976 
2977 /**
2978  * amdgpu_vm_get_task_info - Extracts task info for a PASID.
2979  *
2980  * @dev: drm device pointer
2981  * @pasid: PASID identifier for VM
2982  * @task_info: task_info to fill.
2983  */
2984 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
2985 			 struct amdgpu_task_info *task_info)
2986 {
2987 	struct amdgpu_vm *vm;
2988 
2989 	spin_lock(&adev->vm_manager.pasid_lock);
2990 
2991 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2992 	if (vm)
2993 		*task_info = vm->task_info;
2994 
2995 	spin_unlock(&adev->vm_manager.pasid_lock);
2996 }
2997 
2998 /**
2999  * amdgpu_vm_set_task_info - Sets VMs task info.
3000  *
3001  * @vm: vm for which to set the info
3002  */
3003 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
3004 {
3005 	if (!vm->task_info.pid) {
3006 		vm->task_info.pid = current->pid;
3007 		get_task_comm(vm->task_info.task_name, current);
3008 
3009 		if (current->group_leader->mm == current->mm) {
3010 			vm->task_info.tgid = current->group_leader->pid;
3011 			get_task_comm(vm->task_info.process_name, current->group_leader);
3012 		}
3013 	}
3014 }
3015