1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <linux/interval_tree_generic.h>
30 #include <linux/idr.h>
31 #include <drm/drmP.h>
32 #include <drm/amdgpu_drm.h>
33 #include "amdgpu.h"
34 #include "amdgpu_trace.h"
35 
36 /*
37  * GPUVM
38  * GPUVM is similar to the legacy gart on older asics, however
39  * rather than there being a single global gart table
40  * for the entire GPU, there are multiple VM page tables active
41  * at any given time.  The VM page tables can contain a mix
42  * vram pages and system memory pages and system memory pages
43  * can be mapped as snooped (cached system pages) or unsnooped
44  * (uncached system pages).
45  * Each VM has an ID associated with it and there is a page table
46  * associated with each VMID.  When execting a command buffer,
47  * the kernel tells the the ring what VMID to use for that command
48  * buffer.  VMIDs are allocated dynamically as commands are submitted.
49  * The userspace drivers maintain their own address space and the kernel
50  * sets up their pages tables accordingly when they submit their
51  * command buffers and a VMID is assigned.
52  * Cayman/Trinity support up to 8 active VMs at any given time;
53  * SI supports 16.
54  */
55 
56 #define START(node) ((node)->start)
57 #define LAST(node) ((node)->last)
58 
59 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
60 		     START, LAST, static, amdgpu_vm_it)
61 
62 #undef START
63 #undef LAST
64 
65 /* Local structure. Encapsulate some VM table update parameters to reduce
66  * the number of function parameters
67  */
68 struct amdgpu_pte_update_params {
69 	/* amdgpu device we do this update for */
70 	struct amdgpu_device *adev;
71 	/* optional amdgpu_vm we do this update for */
72 	struct amdgpu_vm *vm;
73 	/* address where to copy page table entries from */
74 	uint64_t src;
75 	/* indirect buffer to fill with commands */
76 	struct amdgpu_ib *ib;
77 	/* Function which actually does the update */
78 	void (*func)(struct amdgpu_pte_update_params *params,
79 		     struct amdgpu_bo *bo, uint64_t pe,
80 		     uint64_t addr, unsigned count, uint32_t incr,
81 		     uint64_t flags);
82 	/* The next two are used during VM update by CPU
83 	 *  DMA addresses to use for mapping
84 	 *  Kernel pointer of PD/PT BO that needs to be updated
85 	 */
86 	dma_addr_t *pages_addr;
87 	void *kptr;
88 };
89 
90 /* Helper to disable partial resident texture feature from a fence callback */
91 struct amdgpu_prt_cb {
92 	struct amdgpu_device *adev;
93 	struct dma_fence_cb cb;
94 };
95 
96 /**
97  * amdgpu_vm_level_shift - return the addr shift for each level
98  *
99  * @adev: amdgpu_device pointer
100  *
101  * Returns the number of bits the pfn needs to be right shifted for a level.
102  */
103 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
104 				      unsigned level)
105 {
106 	unsigned shift = 0xff;
107 
108 	switch (level) {
109 	case AMDGPU_VM_PDB2:
110 	case AMDGPU_VM_PDB1:
111 	case AMDGPU_VM_PDB0:
112 		shift = 9 * (AMDGPU_VM_PDB0 - level) +
113 			adev->vm_manager.block_size;
114 		break;
115 	case AMDGPU_VM_PTB:
116 		shift = 0;
117 		break;
118 	default:
119 		dev_err(adev->dev, "the level%d isn't supported.\n", level);
120 	}
121 
122 	return shift;
123 }
124 
125 /**
126  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
127  *
128  * @adev: amdgpu_device pointer
129  *
130  * Calculate the number of entries in a page directory or page table.
131  */
132 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
133 				      unsigned level)
134 {
135 	unsigned shift = amdgpu_vm_level_shift(adev,
136 					       adev->vm_manager.root_level);
137 
138 	if (level == adev->vm_manager.root_level)
139 		/* For the root directory */
140 		return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
141 	else if (level != AMDGPU_VM_PTB)
142 		/* Everything in between */
143 		return 512;
144 	else
145 		/* For the page tables on the leaves */
146 		return AMDGPU_VM_PTE_COUNT(adev);
147 }
148 
149 /**
150  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
151  *
152  * @adev: amdgpu_device pointer
153  *
154  * Calculate the size of the BO for a page directory or page table in bytes.
155  */
156 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
157 {
158 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
159 }
160 
161 /**
162  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
163  *
164  * @vm: vm providing the BOs
165  * @validated: head of validation list
166  * @entry: entry to add
167  *
168  * Add the page directory to the list of BOs to
169  * validate for command submission.
170  */
171 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
172 			 struct list_head *validated,
173 			 struct amdgpu_bo_list_entry *entry)
174 {
175 	entry->robj = vm->root.base.bo;
176 	entry->priority = 0;
177 	entry->tv.bo = &entry->robj->tbo;
178 	entry->tv.shared = true;
179 	entry->user_pages = NULL;
180 	list_add(&entry->tv.head, validated);
181 }
182 
183 /**
184  * amdgpu_vm_validate_pt_bos - validate the page table BOs
185  *
186  * @adev: amdgpu device pointer
187  * @vm: vm providing the BOs
188  * @validate: callback to do the validation
189  * @param: parameter for the validation callback
190  *
191  * Validate the page table BOs on command submission if neccessary.
192  */
193 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
194 			      int (*validate)(void *p, struct amdgpu_bo *bo),
195 			      void *param)
196 {
197 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
198 	int r;
199 
200 	spin_lock(&vm->status_lock);
201 	while (!list_empty(&vm->evicted)) {
202 		struct amdgpu_vm_bo_base *bo_base;
203 		struct amdgpu_bo *bo;
204 
205 		bo_base = list_first_entry(&vm->evicted,
206 					   struct amdgpu_vm_bo_base,
207 					   vm_status);
208 		spin_unlock(&vm->status_lock);
209 
210 		bo = bo_base->bo;
211 		BUG_ON(!bo);
212 		if (bo->parent) {
213 			r = validate(param, bo);
214 			if (r)
215 				return r;
216 
217 			spin_lock(&glob->lru_lock);
218 			ttm_bo_move_to_lru_tail(&bo->tbo);
219 			if (bo->shadow)
220 				ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
221 			spin_unlock(&glob->lru_lock);
222 		}
223 
224 		if (bo->tbo.type == ttm_bo_type_kernel &&
225 		    vm->use_cpu_for_update) {
226 			r = amdgpu_bo_kmap(bo, NULL);
227 			if (r)
228 				return r;
229 		}
230 
231 		spin_lock(&vm->status_lock);
232 		if (bo->tbo.type != ttm_bo_type_kernel)
233 			list_move(&bo_base->vm_status, &vm->moved);
234 		else
235 			list_move(&bo_base->vm_status, &vm->relocated);
236 	}
237 	spin_unlock(&vm->status_lock);
238 
239 	return 0;
240 }
241 
242 /**
243  * amdgpu_vm_ready - check VM is ready for updates
244  *
245  * @vm: VM to check
246  *
247  * Check if all VM PDs/PTs are ready for updates
248  */
249 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
250 {
251 	bool ready;
252 
253 	spin_lock(&vm->status_lock);
254 	ready = list_empty(&vm->evicted);
255 	spin_unlock(&vm->status_lock);
256 
257 	return ready;
258 }
259 
260 /**
261  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
262  *
263  * @adev: amdgpu_device pointer
264  * @bo: BO to clear
265  * @level: level this BO is at
266  *
267  * Root PD needs to be reserved when calling this.
268  */
269 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
270 			      struct amdgpu_vm *vm, struct amdgpu_bo *bo,
271 			      unsigned level, bool pte_support_ats)
272 {
273 	struct ttm_operation_ctx ctx = { true, false };
274 	struct dma_fence *fence = NULL;
275 	unsigned entries, ats_entries;
276 	struct amdgpu_ring *ring;
277 	struct amdgpu_job *job;
278 	uint64_t addr;
279 	int r;
280 
281 	addr = amdgpu_bo_gpu_offset(bo);
282 	entries = amdgpu_bo_size(bo) / 8;
283 
284 	if (pte_support_ats) {
285 		if (level == adev->vm_manager.root_level) {
286 			ats_entries = amdgpu_vm_level_shift(adev, level);
287 			ats_entries += AMDGPU_GPU_PAGE_SHIFT;
288 			ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
289 			ats_entries = min(ats_entries, entries);
290 			entries -= ats_entries;
291 		} else {
292 			ats_entries = entries;
293 			entries = 0;
294 		}
295 	} else {
296 		ats_entries = 0;
297 	}
298 
299 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
300 
301 	r = reservation_object_reserve_shared(bo->tbo.resv);
302 	if (r)
303 		return r;
304 
305 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
306 	if (r)
307 		goto error;
308 
309 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
310 	if (r)
311 		goto error;
312 
313 	if (ats_entries) {
314 		uint64_t ats_value;
315 
316 		ats_value = AMDGPU_PTE_DEFAULT_ATC;
317 		if (level != AMDGPU_VM_PTB)
318 			ats_value |= AMDGPU_PDE_PTE;
319 
320 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
321 				      ats_entries, 0, ats_value);
322 		addr += ats_entries * 8;
323 	}
324 
325 	if (entries)
326 		amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
327 				      entries, 0, 0);
328 
329 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
330 
331 	WARN_ON(job->ibs[0].length_dw > 64);
332 	r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
333 			     AMDGPU_FENCE_OWNER_UNDEFINED, false);
334 	if (r)
335 		goto error_free;
336 
337 	r = amdgpu_job_submit(job, ring, &vm->entity,
338 			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
339 	if (r)
340 		goto error_free;
341 
342 	amdgpu_bo_fence(bo, fence, true);
343 	dma_fence_put(fence);
344 
345 	if (bo->shadow)
346 		return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
347 					  level, pte_support_ats);
348 
349 	return 0;
350 
351 error_free:
352 	amdgpu_job_free(job);
353 
354 error:
355 	return r;
356 }
357 
358 /**
359  * amdgpu_vm_alloc_levels - allocate the PD/PT levels
360  *
361  * @adev: amdgpu_device pointer
362  * @vm: requested vm
363  * @saddr: start of the address range
364  * @eaddr: end of the address range
365  *
366  * Make sure the page directories and page tables are allocated
367  */
368 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
369 				  struct amdgpu_vm *vm,
370 				  struct amdgpu_vm_pt *parent,
371 				  uint64_t saddr, uint64_t eaddr,
372 				  unsigned level, bool ats)
373 {
374 	unsigned shift = amdgpu_vm_level_shift(adev, level);
375 	unsigned pt_idx, from, to;
376 	u64 flags;
377 	int r;
378 
379 	if (!parent->entries) {
380 		unsigned num_entries = amdgpu_vm_num_entries(adev, level);
381 
382 		parent->entries = kvmalloc_array(num_entries,
383 						   sizeof(struct amdgpu_vm_pt),
384 						   GFP_KERNEL | __GFP_ZERO);
385 		if (!parent->entries)
386 			return -ENOMEM;
387 		memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
388 	}
389 
390 	from = saddr >> shift;
391 	to = eaddr >> shift;
392 	if (from >= amdgpu_vm_num_entries(adev, level) ||
393 	    to >= amdgpu_vm_num_entries(adev, level))
394 		return -EINVAL;
395 
396 	++level;
397 	saddr = saddr & ((1 << shift) - 1);
398 	eaddr = eaddr & ((1 << shift) - 1);
399 
400 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
401 	if (vm->use_cpu_for_update)
402 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
403 	else
404 		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
405 				AMDGPU_GEM_CREATE_SHADOW);
406 
407 	/* walk over the address space and allocate the page tables */
408 	for (pt_idx = from; pt_idx <= to; ++pt_idx) {
409 		struct reservation_object *resv = vm->root.base.bo->tbo.resv;
410 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
411 		struct amdgpu_bo *pt;
412 
413 		if (!entry->base.bo) {
414 			r = amdgpu_bo_create(adev,
415 					     amdgpu_vm_bo_size(adev, level),
416 					     AMDGPU_GPU_PAGE_SIZE,
417 					     AMDGPU_GEM_DOMAIN_VRAM, flags,
418 					     ttm_bo_type_kernel, resv, &pt);
419 			if (r)
420 				return r;
421 
422 			r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
423 			if (r) {
424 				amdgpu_bo_unref(&pt->shadow);
425 				amdgpu_bo_unref(&pt);
426 				return r;
427 			}
428 
429 			if (vm->use_cpu_for_update) {
430 				r = amdgpu_bo_kmap(pt, NULL);
431 				if (r) {
432 					amdgpu_bo_unref(&pt->shadow);
433 					amdgpu_bo_unref(&pt);
434 					return r;
435 				}
436 			}
437 
438 			/* Keep a reference to the root directory to avoid
439 			* freeing them up in the wrong order.
440 			*/
441 			pt->parent = amdgpu_bo_ref(parent->base.bo);
442 
443 			entry->base.vm = vm;
444 			entry->base.bo = pt;
445 			list_add_tail(&entry->base.bo_list, &pt->va);
446 			spin_lock(&vm->status_lock);
447 			list_add(&entry->base.vm_status, &vm->relocated);
448 			spin_unlock(&vm->status_lock);
449 		}
450 
451 		if (level < AMDGPU_VM_PTB) {
452 			uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
453 			uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
454 				((1 << shift) - 1);
455 			r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
456 						   sub_eaddr, level, ats);
457 			if (r)
458 				return r;
459 		}
460 	}
461 
462 	return 0;
463 }
464 
465 /**
466  * amdgpu_vm_alloc_pts - Allocate page tables.
467  *
468  * @adev: amdgpu_device pointer
469  * @vm: VM to allocate page tables for
470  * @saddr: Start address which needs to be allocated
471  * @size: Size from start address we need.
472  *
473  * Make sure the page tables are allocated.
474  */
475 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
476 			struct amdgpu_vm *vm,
477 			uint64_t saddr, uint64_t size)
478 {
479 	uint64_t eaddr;
480 	bool ats = false;
481 
482 	/* validate the parameters */
483 	if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
484 		return -EINVAL;
485 
486 	eaddr = saddr + size - 1;
487 
488 	if (vm->pte_support_ats)
489 		ats = saddr < AMDGPU_VA_HOLE_START;
490 
491 	saddr /= AMDGPU_GPU_PAGE_SIZE;
492 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
493 
494 	if (eaddr >= adev->vm_manager.max_pfn) {
495 		dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
496 			eaddr, adev->vm_manager.max_pfn);
497 		return -EINVAL;
498 	}
499 
500 	return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
501 				      adev->vm_manager.root_level, ats);
502 }
503 
504 /**
505  * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
506  *
507  * @adev: amdgpu_device pointer
508  */
509 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
510 {
511 	const struct amdgpu_ip_block *ip_block;
512 	bool has_compute_vm_bug;
513 	struct amdgpu_ring *ring;
514 	int i;
515 
516 	has_compute_vm_bug = false;
517 
518 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
519 	if (ip_block) {
520 		/* Compute has a VM bug for GFX version < 7.
521 		   Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
522 		if (ip_block->version->major <= 7)
523 			has_compute_vm_bug = true;
524 		else if (ip_block->version->major == 8)
525 			if (adev->gfx.mec_fw_version < 673)
526 				has_compute_vm_bug = true;
527 	}
528 
529 	for (i = 0; i < adev->num_rings; i++) {
530 		ring = adev->rings[i];
531 		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
532 			/* only compute rings */
533 			ring->has_compute_vm_bug = has_compute_vm_bug;
534 		else
535 			ring->has_compute_vm_bug = false;
536 	}
537 }
538 
539 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
540 				  struct amdgpu_job *job)
541 {
542 	struct amdgpu_device *adev = ring->adev;
543 	unsigned vmhub = ring->funcs->vmhub;
544 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
545 	struct amdgpu_vmid *id;
546 	bool gds_switch_needed;
547 	bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
548 
549 	if (job->vmid == 0)
550 		return false;
551 	id = &id_mgr->ids[job->vmid];
552 	gds_switch_needed = ring->funcs->emit_gds_switch && (
553 		id->gds_base != job->gds_base ||
554 		id->gds_size != job->gds_size ||
555 		id->gws_base != job->gws_base ||
556 		id->gws_size != job->gws_size ||
557 		id->oa_base != job->oa_base ||
558 		id->oa_size != job->oa_size);
559 
560 	if (amdgpu_vmid_had_gpu_reset(adev, id))
561 		return true;
562 
563 	return vm_flush_needed || gds_switch_needed;
564 }
565 
566 static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
567 {
568 	return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
569 }
570 
571 /**
572  * amdgpu_vm_flush - hardware flush the vm
573  *
574  * @ring: ring to use for flush
575  * @vmid: vmid number to use
576  * @pd_addr: address of the page directory
577  *
578  * Emit a VM flush when it is necessary.
579  */
580 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
581 {
582 	struct amdgpu_device *adev = ring->adev;
583 	unsigned vmhub = ring->funcs->vmhub;
584 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
585 	struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
586 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
587 		id->gds_base != job->gds_base ||
588 		id->gds_size != job->gds_size ||
589 		id->gws_base != job->gws_base ||
590 		id->gws_size != job->gws_size ||
591 		id->oa_base != job->oa_base ||
592 		id->oa_size != job->oa_size);
593 	bool vm_flush_needed = job->vm_needs_flush;
594 	bool pasid_mapping_needed = id->pasid != job->pasid ||
595 		!id->pasid_mapping ||
596 		!dma_fence_is_signaled(id->pasid_mapping);
597 	struct dma_fence *fence = NULL;
598 	unsigned patch_offset = 0;
599 	int r;
600 
601 	if (amdgpu_vmid_had_gpu_reset(adev, id)) {
602 		gds_switch_needed = true;
603 		vm_flush_needed = true;
604 		pasid_mapping_needed = true;
605 	}
606 
607 	gds_switch_needed &= !!ring->funcs->emit_gds_switch;
608 	vm_flush_needed &= !!ring->funcs->emit_vm_flush;
609 	pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
610 		ring->funcs->emit_wreg;
611 
612 	if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
613 		return 0;
614 
615 	if (ring->funcs->init_cond_exec)
616 		patch_offset = amdgpu_ring_init_cond_exec(ring);
617 
618 	if (need_pipe_sync)
619 		amdgpu_ring_emit_pipeline_sync(ring);
620 
621 	if (vm_flush_needed) {
622 		trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
623 		amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
624 	}
625 
626 	if (pasid_mapping_needed)
627 		amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
628 
629 	if (vm_flush_needed || pasid_mapping_needed) {
630 		r = amdgpu_fence_emit(ring, &fence);
631 		if (r)
632 			return r;
633 	}
634 
635 	if (vm_flush_needed) {
636 		mutex_lock(&id_mgr->lock);
637 		dma_fence_put(id->last_flush);
638 		id->last_flush = dma_fence_get(fence);
639 		id->current_gpu_reset_count =
640 			atomic_read(&adev->gpu_reset_counter);
641 		mutex_unlock(&id_mgr->lock);
642 	}
643 
644 	if (pasid_mapping_needed) {
645 		id->pasid = job->pasid;
646 		dma_fence_put(id->pasid_mapping);
647 		id->pasid_mapping = dma_fence_get(fence);
648 	}
649 	dma_fence_put(fence);
650 
651 	if (ring->funcs->emit_gds_switch && gds_switch_needed) {
652 		id->gds_base = job->gds_base;
653 		id->gds_size = job->gds_size;
654 		id->gws_base = job->gws_base;
655 		id->gws_size = job->gws_size;
656 		id->oa_base = job->oa_base;
657 		id->oa_size = job->oa_size;
658 		amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
659 					    job->gds_size, job->gws_base,
660 					    job->gws_size, job->oa_base,
661 					    job->oa_size);
662 	}
663 
664 	if (ring->funcs->patch_cond_exec)
665 		amdgpu_ring_patch_cond_exec(ring, patch_offset);
666 
667 	/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
668 	if (ring->funcs->emit_switch_buffer) {
669 		amdgpu_ring_emit_switch_buffer(ring);
670 		amdgpu_ring_emit_switch_buffer(ring);
671 	}
672 	return 0;
673 }
674 
675 /**
676  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
677  *
678  * @vm: requested vm
679  * @bo: requested buffer object
680  *
681  * Find @bo inside the requested vm.
682  * Search inside the @bos vm list for the requested vm
683  * Returns the found bo_va or NULL if none is found
684  *
685  * Object has to be reserved!
686  */
687 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
688 				       struct amdgpu_bo *bo)
689 {
690 	struct amdgpu_bo_va *bo_va;
691 
692 	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
693 		if (bo_va->base.vm == vm) {
694 			return bo_va;
695 		}
696 	}
697 	return NULL;
698 }
699 
700 /**
701  * amdgpu_vm_do_set_ptes - helper to call the right asic function
702  *
703  * @params: see amdgpu_pte_update_params definition
704  * @bo: PD/PT to update
705  * @pe: addr of the page entry
706  * @addr: dst addr to write into pe
707  * @count: number of page entries to update
708  * @incr: increase next addr by incr bytes
709  * @flags: hw access flags
710  *
711  * Traces the parameters and calls the right asic functions
712  * to setup the page table using the DMA.
713  */
714 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
715 				  struct amdgpu_bo *bo,
716 				  uint64_t pe, uint64_t addr,
717 				  unsigned count, uint32_t incr,
718 				  uint64_t flags)
719 {
720 	pe += amdgpu_bo_gpu_offset(bo);
721 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
722 
723 	if (count < 3) {
724 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
725 				    addr | flags, count, incr);
726 
727 	} else {
728 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
729 				      count, incr, flags);
730 	}
731 }
732 
733 /**
734  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
735  *
736  * @params: see amdgpu_pte_update_params definition
737  * @bo: PD/PT to update
738  * @pe: addr of the page entry
739  * @addr: dst addr to write into pe
740  * @count: number of page entries to update
741  * @incr: increase next addr by incr bytes
742  * @flags: hw access flags
743  *
744  * Traces the parameters and calls the DMA function to copy the PTEs.
745  */
746 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
747 				   struct amdgpu_bo *bo,
748 				   uint64_t pe, uint64_t addr,
749 				   unsigned count, uint32_t incr,
750 				   uint64_t flags)
751 {
752 	uint64_t src = (params->src + (addr >> 12) * 8);
753 
754 	pe += amdgpu_bo_gpu_offset(bo);
755 	trace_amdgpu_vm_copy_ptes(pe, src, count);
756 
757 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
758 }
759 
760 /**
761  * amdgpu_vm_map_gart - Resolve gart mapping of addr
762  *
763  * @pages_addr: optional DMA address to use for lookup
764  * @addr: the unmapped addr
765  *
766  * Look up the physical address of the page that the pte resolves
767  * to and return the pointer for the page table entry.
768  */
769 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
770 {
771 	uint64_t result;
772 
773 	/* page table offset */
774 	result = pages_addr[addr >> PAGE_SHIFT];
775 
776 	/* in case cpu page size != gpu page size*/
777 	result |= addr & (~PAGE_MASK);
778 
779 	result &= 0xFFFFFFFFFFFFF000ULL;
780 
781 	return result;
782 }
783 
784 /**
785  * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
786  *
787  * @params: see amdgpu_pte_update_params definition
788  * @bo: PD/PT to update
789  * @pe: kmap addr of the page entry
790  * @addr: dst addr to write into pe
791  * @count: number of page entries to update
792  * @incr: increase next addr by incr bytes
793  * @flags: hw access flags
794  *
795  * Write count number of PT/PD entries directly.
796  */
797 static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
798 				   struct amdgpu_bo *bo,
799 				   uint64_t pe, uint64_t addr,
800 				   unsigned count, uint32_t incr,
801 				   uint64_t flags)
802 {
803 	unsigned int i;
804 	uint64_t value;
805 
806 	pe += (unsigned long)amdgpu_bo_kptr(bo);
807 
808 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
809 
810 	for (i = 0; i < count; i++) {
811 		value = params->pages_addr ?
812 			amdgpu_vm_map_gart(params->pages_addr, addr) :
813 			addr;
814 		amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
815 				       i, value, flags);
816 		addr += incr;
817 	}
818 }
819 
820 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
821 			     void *owner)
822 {
823 	struct amdgpu_sync sync;
824 	int r;
825 
826 	amdgpu_sync_create(&sync);
827 	amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
828 	r = amdgpu_sync_wait(&sync, true);
829 	amdgpu_sync_free(&sync);
830 
831 	return r;
832 }
833 
834 /*
835  * amdgpu_vm_update_pde - update a single level in the hierarchy
836  *
837  * @param: parameters for the update
838  * @vm: requested vm
839  * @parent: parent directory
840  * @entry: entry to update
841  *
842  * Makes sure the requested entry in parent is up to date.
843  */
844 static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
845 				 struct amdgpu_vm *vm,
846 				 struct amdgpu_vm_pt *parent,
847 				 struct amdgpu_vm_pt *entry)
848 {
849 	struct amdgpu_bo *bo = parent->base.bo, *pbo;
850 	uint64_t pde, pt, flags;
851 	unsigned level;
852 
853 	/* Don't update huge pages here */
854 	if (entry->huge)
855 		return;
856 
857 	for (level = 0, pbo = bo->parent; pbo; ++level)
858 		pbo = pbo->parent;
859 
860 	level += params->adev->vm_manager.root_level;
861 	pt = amdgpu_bo_gpu_offset(entry->base.bo);
862 	flags = AMDGPU_PTE_VALID;
863 	amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
864 	pde = (entry - parent->entries) * 8;
865 	if (bo->shadow)
866 		params->func(params, bo->shadow, pde, pt, 1, 0, flags);
867 	params->func(params, bo, pde, pt, 1, 0, flags);
868 }
869 
870 /*
871  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
872  *
873  * @parent: parent PD
874  *
875  * Mark all PD level as invalid after an error.
876  */
877 static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
878 				       struct amdgpu_vm *vm,
879 				       struct amdgpu_vm_pt *parent,
880 				       unsigned level)
881 {
882 	unsigned pt_idx, num_entries;
883 
884 	/*
885 	 * Recurse into the subdirectories. This recursion is harmless because
886 	 * we only have a maximum of 5 layers.
887 	 */
888 	num_entries = amdgpu_vm_num_entries(adev, level);
889 	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
890 		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
891 
892 		if (!entry->base.bo)
893 			continue;
894 
895 		spin_lock(&vm->status_lock);
896 		if (list_empty(&entry->base.vm_status))
897 			list_add(&entry->base.vm_status, &vm->relocated);
898 		spin_unlock(&vm->status_lock);
899 		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
900 	}
901 }
902 
903 /*
904  * amdgpu_vm_update_directories - make sure that all directories are valid
905  *
906  * @adev: amdgpu_device pointer
907  * @vm: requested vm
908  *
909  * Makes sure all directories are up to date.
910  * Returns 0 for success, error for failure.
911  */
912 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
913 				 struct amdgpu_vm *vm)
914 {
915 	struct amdgpu_pte_update_params params;
916 	struct amdgpu_job *job;
917 	unsigned ndw = 0;
918 	int r = 0;
919 
920 	if (list_empty(&vm->relocated))
921 		return 0;
922 
923 restart:
924 	memset(&params, 0, sizeof(params));
925 	params.adev = adev;
926 
927 	if (vm->use_cpu_for_update) {
928 		r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
929 		if (unlikely(r))
930 			return r;
931 
932 		params.func = amdgpu_vm_cpu_set_ptes;
933 	} else {
934 		ndw = 512 * 8;
935 		r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
936 		if (r)
937 			return r;
938 
939 		params.ib = &job->ibs[0];
940 		params.func = amdgpu_vm_do_set_ptes;
941 	}
942 
943 	spin_lock(&vm->status_lock);
944 	while (!list_empty(&vm->relocated)) {
945 		struct amdgpu_vm_bo_base *bo_base, *parent;
946 		struct amdgpu_vm_pt *pt, *entry;
947 		struct amdgpu_bo *bo;
948 
949 		bo_base = list_first_entry(&vm->relocated,
950 					   struct amdgpu_vm_bo_base,
951 					   vm_status);
952 		list_del_init(&bo_base->vm_status);
953 		spin_unlock(&vm->status_lock);
954 
955 		bo = bo_base->bo->parent;
956 		if (!bo) {
957 			spin_lock(&vm->status_lock);
958 			continue;
959 		}
960 
961 		parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
962 					  bo_list);
963 		pt = container_of(parent, struct amdgpu_vm_pt, base);
964 		entry = container_of(bo_base, struct amdgpu_vm_pt, base);
965 
966 		amdgpu_vm_update_pde(&params, vm, pt, entry);
967 
968 		spin_lock(&vm->status_lock);
969 		if (!vm->use_cpu_for_update &&
970 		    (ndw - params.ib->length_dw) < 32)
971 			break;
972 	}
973 	spin_unlock(&vm->status_lock);
974 
975 	if (vm->use_cpu_for_update) {
976 		/* Flush HDP */
977 		mb();
978 		amdgpu_asic_flush_hdp(adev, NULL);
979 	} else if (params.ib->length_dw == 0) {
980 		amdgpu_job_free(job);
981 	} else {
982 		struct amdgpu_bo *root = vm->root.base.bo;
983 		struct amdgpu_ring *ring;
984 		struct dma_fence *fence;
985 
986 		ring = container_of(vm->entity.sched, struct amdgpu_ring,
987 				    sched);
988 
989 		amdgpu_ring_pad_ib(ring, params.ib);
990 		amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
991 				 AMDGPU_FENCE_OWNER_VM, false);
992 		WARN_ON(params.ib->length_dw > ndw);
993 		r = amdgpu_job_submit(job, ring, &vm->entity,
994 				      AMDGPU_FENCE_OWNER_VM, &fence);
995 		if (r)
996 			goto error;
997 
998 		amdgpu_bo_fence(root, fence, true);
999 		dma_fence_put(vm->last_update);
1000 		vm->last_update = fence;
1001 	}
1002 
1003 	if (!list_empty(&vm->relocated))
1004 		goto restart;
1005 
1006 	return 0;
1007 
1008 error:
1009 	amdgpu_vm_invalidate_level(adev, vm, &vm->root,
1010 				   adev->vm_manager.root_level);
1011 	amdgpu_job_free(job);
1012 	return r;
1013 }
1014 
1015 /**
1016  * amdgpu_vm_find_entry - find the entry for an address
1017  *
1018  * @p: see amdgpu_pte_update_params definition
1019  * @addr: virtual address in question
1020  * @entry: resulting entry or NULL
1021  * @parent: parent entry
1022  *
1023  * Find the vm_pt entry and it's parent for the given address.
1024  */
1025 void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
1026 			 struct amdgpu_vm_pt **entry,
1027 			 struct amdgpu_vm_pt **parent)
1028 {
1029 	unsigned level = p->adev->vm_manager.root_level;
1030 
1031 	*parent = NULL;
1032 	*entry = &p->vm->root;
1033 	while ((*entry)->entries) {
1034 		unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
1035 
1036 		*parent = *entry;
1037 		*entry = &(*entry)->entries[addr >> shift];
1038 		addr &= (1ULL << shift) - 1;
1039 	}
1040 
1041 	if (level != AMDGPU_VM_PTB)
1042 		*entry = NULL;
1043 }
1044 
1045 /**
1046  * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
1047  *
1048  * @p: see amdgpu_pte_update_params definition
1049  * @entry: vm_pt entry to check
1050  * @parent: parent entry
1051  * @nptes: number of PTEs updated with this operation
1052  * @dst: destination address where the PTEs should point to
1053  * @flags: access flags fro the PTEs
1054  *
1055  * Check if we can update the PD with a huge page.
1056  */
1057 static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
1058 					struct amdgpu_vm_pt *entry,
1059 					struct amdgpu_vm_pt *parent,
1060 					unsigned nptes, uint64_t dst,
1061 					uint64_t flags)
1062 {
1063 	uint64_t pde;
1064 
1065 	/* In the case of a mixed PT the PDE must point to it*/
1066 	if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
1067 	    nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
1068 		/* Set the huge page flag to stop scanning at this PDE */
1069 		flags |= AMDGPU_PDE_PTE;
1070 	}
1071 
1072 	if (!(flags & AMDGPU_PDE_PTE)) {
1073 		if (entry->huge) {
1074 			/* Add the entry to the relocated list to update it. */
1075 			entry->huge = false;
1076 			spin_lock(&p->vm->status_lock);
1077 			list_move(&entry->base.vm_status, &p->vm->relocated);
1078 			spin_unlock(&p->vm->status_lock);
1079 		}
1080 		return;
1081 	}
1082 
1083 	entry->huge = true;
1084 	amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
1085 
1086 	pde = (entry - parent->entries) * 8;
1087 	if (parent->base.bo->shadow)
1088 		p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
1089 	p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
1090 }
1091 
1092 /**
1093  * amdgpu_vm_update_ptes - make sure that page tables are valid
1094  *
1095  * @params: see amdgpu_pte_update_params definition
1096  * @vm: requested vm
1097  * @start: start of GPU address range
1098  * @end: end of GPU address range
1099  * @dst: destination address to map to, the next dst inside the function
1100  * @flags: mapping flags
1101  *
1102  * Update the page tables in the range @start - @end.
1103  * Returns 0 for success, -EINVAL for failure.
1104  */
1105 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
1106 				  uint64_t start, uint64_t end,
1107 				  uint64_t dst, uint64_t flags)
1108 {
1109 	struct amdgpu_device *adev = params->adev;
1110 	const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
1111 
1112 	uint64_t addr, pe_start;
1113 	struct amdgpu_bo *pt;
1114 	unsigned nptes;
1115 
1116 	/* walk over the address space and update the page tables */
1117 	for (addr = start; addr < end; addr += nptes,
1118 	     dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
1119 		struct amdgpu_vm_pt *entry, *parent;
1120 
1121 		amdgpu_vm_get_entry(params, addr, &entry, &parent);
1122 		if (!entry)
1123 			return -ENOENT;
1124 
1125 		if ((addr & ~mask) == (end & ~mask))
1126 			nptes = end - addr;
1127 		else
1128 			nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
1129 
1130 		amdgpu_vm_handle_huge_pages(params, entry, parent,
1131 					    nptes, dst, flags);
1132 		/* We don't need to update PTEs for huge pages */
1133 		if (entry->huge)
1134 			continue;
1135 
1136 		pt = entry->base.bo;
1137 		pe_start = (addr & mask) * 8;
1138 		if (pt->shadow)
1139 			params->func(params, pt->shadow, pe_start, dst, nptes,
1140 				     AMDGPU_GPU_PAGE_SIZE, flags);
1141 		params->func(params, pt, pe_start, dst, nptes,
1142 			     AMDGPU_GPU_PAGE_SIZE, flags);
1143 	}
1144 
1145 	return 0;
1146 }
1147 
1148 /*
1149  * amdgpu_vm_frag_ptes - add fragment information to PTEs
1150  *
1151  * @params: see amdgpu_pte_update_params definition
1152  * @vm: requested vm
1153  * @start: first PTE to handle
1154  * @end: last PTE to handle
1155  * @dst: addr those PTEs should point to
1156  * @flags: hw mapping flags
1157  * Returns 0 for success, -EINVAL for failure.
1158  */
1159 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
1160 				uint64_t start, uint64_t end,
1161 				uint64_t dst, uint64_t flags)
1162 {
1163 	/**
1164 	 * The MC L1 TLB supports variable sized pages, based on a fragment
1165 	 * field in the PTE. When this field is set to a non-zero value, page
1166 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
1167 	 * flags are considered valid for all PTEs within the fragment range
1168 	 * and corresponding mappings are assumed to be physically contiguous.
1169 	 *
1170 	 * The L1 TLB can store a single PTE for the whole fragment,
1171 	 * significantly increasing the space available for translation
1172 	 * caching. This leads to large improvements in throughput when the
1173 	 * TLB is under pressure.
1174 	 *
1175 	 * The L2 TLB distributes small and large fragments into two
1176 	 * asymmetric partitions. The large fragment cache is significantly
1177 	 * larger. Thus, we try to use large fragments wherever possible.
1178 	 * Userspace can support this by aligning virtual base address and
1179 	 * allocation size to the fragment size.
1180 	 */
1181 	unsigned max_frag = params->adev->vm_manager.fragment_size;
1182 	int r;
1183 
1184 	/* system pages are non continuously */
1185 	if (params->src || !(flags & AMDGPU_PTE_VALID))
1186 		return amdgpu_vm_update_ptes(params, start, end, dst, flags);
1187 
1188 	while (start != end) {
1189 		uint64_t frag_flags, frag_end;
1190 		unsigned frag;
1191 
1192 		/* This intentionally wraps around if no bit is set */
1193 		frag = min((unsigned)ffs(start) - 1,
1194 			   (unsigned)fls64(end - start) - 1);
1195 		if (frag >= max_frag) {
1196 			frag_flags = AMDGPU_PTE_FRAG(max_frag);
1197 			frag_end = end & ~((1ULL << max_frag) - 1);
1198 		} else {
1199 			frag_flags = AMDGPU_PTE_FRAG(frag);
1200 			frag_end = start + (1 << frag);
1201 		}
1202 
1203 		r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
1204 					  flags | frag_flags);
1205 		if (r)
1206 			return r;
1207 
1208 		dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
1209 		start = frag_end;
1210 	}
1211 
1212 	return 0;
1213 }
1214 
1215 /**
1216  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
1217  *
1218  * @adev: amdgpu_device pointer
1219  * @exclusive: fence we need to sync to
1220  * @pages_addr: DMA addresses to use for mapping
1221  * @vm: requested vm
1222  * @start: start of mapped range
1223  * @last: last mapped entry
1224  * @flags: flags for the entries
1225  * @addr: addr to set the area to
1226  * @fence: optional resulting fence
1227  *
1228  * Fill in the page table entries between @start and @last.
1229  * Returns 0 for success, -EINVAL for failure.
1230  */
1231 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
1232 				       struct dma_fence *exclusive,
1233 				       dma_addr_t *pages_addr,
1234 				       struct amdgpu_vm *vm,
1235 				       uint64_t start, uint64_t last,
1236 				       uint64_t flags, uint64_t addr,
1237 				       struct dma_fence **fence)
1238 {
1239 	struct amdgpu_ring *ring;
1240 	void *owner = AMDGPU_FENCE_OWNER_VM;
1241 	unsigned nptes, ncmds, ndw;
1242 	struct amdgpu_job *job;
1243 	struct amdgpu_pte_update_params params;
1244 	struct dma_fence *f = NULL;
1245 	int r;
1246 
1247 	memset(&params, 0, sizeof(params));
1248 	params.adev = adev;
1249 	params.vm = vm;
1250 
1251 	/* sync to everything on unmapping */
1252 	if (!(flags & AMDGPU_PTE_VALID))
1253 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
1254 
1255 	if (vm->use_cpu_for_update) {
1256 		/* params.src is used as flag to indicate system Memory */
1257 		if (pages_addr)
1258 			params.src = ~0;
1259 
1260 		/* Wait for PT BOs to be free. PTs share the same resv. object
1261 		 * as the root PD BO
1262 		 */
1263 		r = amdgpu_vm_wait_pd(adev, vm, owner);
1264 		if (unlikely(r))
1265 			return r;
1266 
1267 		params.func = amdgpu_vm_cpu_set_ptes;
1268 		params.pages_addr = pages_addr;
1269 		return amdgpu_vm_frag_ptes(&params, start, last + 1,
1270 					   addr, flags);
1271 	}
1272 
1273 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
1274 
1275 	nptes = last - start + 1;
1276 
1277 	/*
1278 	 * reserve space for two commands every (1 << BLOCK_SIZE)
1279 	 *  entries or 2k dwords (whatever is smaller)
1280          *
1281          * The second command is for the shadow pagetables.
1282 	 */
1283 	if (vm->root.base.bo->shadow)
1284 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
1285 	else
1286 		ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
1287 
1288 	/* padding, etc. */
1289 	ndw = 64;
1290 
1291 	if (pages_addr) {
1292 		/* copy commands needed */
1293 		ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
1294 
1295 		/* and also PTEs */
1296 		ndw += nptes * 2;
1297 
1298 		params.func = amdgpu_vm_do_copy_ptes;
1299 
1300 	} else {
1301 		/* set page commands needed */
1302 		ndw += ncmds * 10;
1303 
1304 		/* extra commands for begin/end fragments */
1305 		ndw += 2 * 10 * adev->vm_manager.fragment_size;
1306 
1307 		params.func = amdgpu_vm_do_set_ptes;
1308 	}
1309 
1310 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1311 	if (r)
1312 		return r;
1313 
1314 	params.ib = &job->ibs[0];
1315 
1316 	if (pages_addr) {
1317 		uint64_t *pte;
1318 		unsigned i;
1319 
1320 		/* Put the PTEs at the end of the IB. */
1321 		i = ndw - nptes * 2;
1322 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1323 		params.src = job->ibs->gpu_addr + i * 4;
1324 
1325 		for (i = 0; i < nptes; ++i) {
1326 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1327 						    AMDGPU_GPU_PAGE_SIZE);
1328 			pte[i] |= flags;
1329 		}
1330 		addr = 0;
1331 	}
1332 
1333 	r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
1334 	if (r)
1335 		goto error_free;
1336 
1337 	r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
1338 			     owner, false);
1339 	if (r)
1340 		goto error_free;
1341 
1342 	r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
1343 	if (r)
1344 		goto error_free;
1345 
1346 	r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
1347 	if (r)
1348 		goto error_free;
1349 
1350 	amdgpu_ring_pad_ib(ring, params.ib);
1351 	WARN_ON(params.ib->length_dw > ndw);
1352 	r = amdgpu_job_submit(job, ring, &vm->entity,
1353 			      AMDGPU_FENCE_OWNER_VM, &f);
1354 	if (r)
1355 		goto error_free;
1356 
1357 	amdgpu_bo_fence(vm->root.base.bo, f, true);
1358 	dma_fence_put(*fence);
1359 	*fence = f;
1360 	return 0;
1361 
1362 error_free:
1363 	amdgpu_job_free(job);
1364 	return r;
1365 }
1366 
1367 /**
1368  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1369  *
1370  * @adev: amdgpu_device pointer
1371  * @exclusive: fence we need to sync to
1372  * @pages_addr: DMA addresses to use for mapping
1373  * @vm: requested vm
1374  * @mapping: mapped range and flags to use for the update
1375  * @flags: HW flags for the mapping
1376  * @nodes: array of drm_mm_nodes with the MC addresses
1377  * @fence: optional resulting fence
1378  *
1379  * Split the mapping into smaller chunks so that each update fits
1380  * into a SDMA IB.
1381  * Returns 0 for success, -EINVAL for failure.
1382  */
1383 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1384 				      struct dma_fence *exclusive,
1385 				      dma_addr_t *pages_addr,
1386 				      struct amdgpu_vm *vm,
1387 				      struct amdgpu_bo_va_mapping *mapping,
1388 				      uint64_t flags,
1389 				      struct drm_mm_node *nodes,
1390 				      struct dma_fence **fence)
1391 {
1392 	unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
1393 	uint64_t pfn, start = mapping->start;
1394 	int r;
1395 
1396 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1397 	 * but in case of something, we filter the flags in first place
1398 	 */
1399 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1400 		flags &= ~AMDGPU_PTE_READABLE;
1401 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1402 		flags &= ~AMDGPU_PTE_WRITEABLE;
1403 
1404 	flags &= ~AMDGPU_PTE_EXECUTABLE;
1405 	flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1406 
1407 	flags &= ~AMDGPU_PTE_MTYPE_MASK;
1408 	flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
1409 
1410 	if ((mapping->flags & AMDGPU_PTE_PRT) &&
1411 	    (adev->asic_type >= CHIP_VEGA10)) {
1412 		flags |= AMDGPU_PTE_PRT;
1413 		flags &= ~AMDGPU_PTE_VALID;
1414 	}
1415 
1416 	trace_amdgpu_vm_bo_update(mapping);
1417 
1418 	pfn = mapping->offset >> PAGE_SHIFT;
1419 	if (nodes) {
1420 		while (pfn >= nodes->size) {
1421 			pfn -= nodes->size;
1422 			++nodes;
1423 		}
1424 	}
1425 
1426 	do {
1427 		dma_addr_t *dma_addr = NULL;
1428 		uint64_t max_entries;
1429 		uint64_t addr, last;
1430 
1431 		if (nodes) {
1432 			addr = nodes->start << PAGE_SHIFT;
1433 			max_entries = (nodes->size - pfn) *
1434 				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1435 		} else {
1436 			addr = 0;
1437 			max_entries = S64_MAX;
1438 		}
1439 
1440 		if (pages_addr) {
1441 			uint64_t count;
1442 
1443 			max_entries = min(max_entries, 16ull * 1024ull);
1444 			for (count = 1; count < max_entries; ++count) {
1445 				uint64_t idx = pfn + count;
1446 
1447 				if (pages_addr[idx] !=
1448 				    (pages_addr[idx - 1] + PAGE_SIZE))
1449 					break;
1450 			}
1451 
1452 			if (count < min_linear_pages) {
1453 				addr = pfn << PAGE_SHIFT;
1454 				dma_addr = pages_addr;
1455 			} else {
1456 				addr = pages_addr[pfn];
1457 				max_entries = count;
1458 			}
1459 
1460 		} else if (flags & AMDGPU_PTE_VALID) {
1461 			addr += adev->vm_manager.vram_base_offset;
1462 			addr += pfn << PAGE_SHIFT;
1463 		}
1464 
1465 		last = min((uint64_t)mapping->last, start + max_entries - 1);
1466 		r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
1467 						start, last, flags, addr,
1468 						fence);
1469 		if (r)
1470 			return r;
1471 
1472 		pfn += last - start + 1;
1473 		if (nodes && nodes->size == pfn) {
1474 			pfn = 0;
1475 			++nodes;
1476 		}
1477 		start = last + 1;
1478 
1479 	} while (unlikely(start != mapping->last + 1));
1480 
1481 	return 0;
1482 }
1483 
1484 /**
1485  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1486  *
1487  * @adev: amdgpu_device pointer
1488  * @bo_va: requested BO and VM object
1489  * @clear: if true clear the entries
1490  *
1491  * Fill in the page table entries for @bo_va.
1492  * Returns 0 for success, -EINVAL for failure.
1493  */
1494 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1495 			struct amdgpu_bo_va *bo_va,
1496 			bool clear)
1497 {
1498 	struct amdgpu_bo *bo = bo_va->base.bo;
1499 	struct amdgpu_vm *vm = bo_va->base.vm;
1500 	struct amdgpu_bo_va_mapping *mapping;
1501 	dma_addr_t *pages_addr = NULL;
1502 	struct ttm_mem_reg *mem;
1503 	struct drm_mm_node *nodes;
1504 	struct dma_fence *exclusive, **last_update;
1505 	uint64_t flags;
1506 	int r;
1507 
1508 	if (clear || !bo_va->base.bo) {
1509 		mem = NULL;
1510 		nodes = NULL;
1511 		exclusive = NULL;
1512 	} else {
1513 		struct ttm_dma_tt *ttm;
1514 
1515 		mem = &bo_va->base.bo->tbo.mem;
1516 		nodes = mem->mm_node;
1517 		if (mem->mem_type == TTM_PL_TT) {
1518 			ttm = container_of(bo_va->base.bo->tbo.ttm,
1519 					   struct ttm_dma_tt, ttm);
1520 			pages_addr = ttm->dma_address;
1521 		}
1522 		exclusive = reservation_object_get_excl(bo->tbo.resv);
1523 	}
1524 
1525 	if (bo)
1526 		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1527 	else
1528 		flags = 0x0;
1529 
1530 	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
1531 		last_update = &vm->last_update;
1532 	else
1533 		last_update = &bo_va->last_pt_update;
1534 
1535 	if (!clear && bo_va->base.moved) {
1536 		bo_va->base.moved = false;
1537 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1538 
1539 	} else if (bo_va->cleared != clear) {
1540 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1541 	}
1542 
1543 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1544 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
1545 					       mapping, flags, nodes,
1546 					       last_update);
1547 		if (r)
1548 			return r;
1549 	}
1550 
1551 	if (vm->use_cpu_for_update) {
1552 		/* Flush HDP */
1553 		mb();
1554 		amdgpu_asic_flush_hdp(adev, NULL);
1555 	}
1556 
1557 	spin_lock(&vm->status_lock);
1558 	list_del_init(&bo_va->base.vm_status);
1559 	spin_unlock(&vm->status_lock);
1560 
1561 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1562 	bo_va->cleared = clear;
1563 
1564 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1565 		list_for_each_entry(mapping, &bo_va->valids, list)
1566 			trace_amdgpu_vm_bo_mapping(mapping);
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 /**
1573  * amdgpu_vm_update_prt_state - update the global PRT state
1574  */
1575 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
1576 {
1577 	unsigned long flags;
1578 	bool enable;
1579 
1580 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1581 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1582 	adev->gmc.gmc_funcs->set_prt(adev, enable);
1583 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1584 }
1585 
1586 /**
1587  * amdgpu_vm_prt_get - add a PRT user
1588  */
1589 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
1590 {
1591 	if (!adev->gmc.gmc_funcs->set_prt)
1592 		return;
1593 
1594 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1595 		amdgpu_vm_update_prt_state(adev);
1596 }
1597 
1598 /**
1599  * amdgpu_vm_prt_put - drop a PRT user
1600  */
1601 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
1602 {
1603 	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1604 		amdgpu_vm_update_prt_state(adev);
1605 }
1606 
1607 /**
1608  * amdgpu_vm_prt_cb - callback for updating the PRT status
1609  */
1610 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
1611 {
1612 	struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1613 
1614 	amdgpu_vm_prt_put(cb->adev);
1615 	kfree(cb);
1616 }
1617 
1618 /**
1619  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1620  */
1621 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
1622 				 struct dma_fence *fence)
1623 {
1624 	struct amdgpu_prt_cb *cb;
1625 
1626 	if (!adev->gmc.gmc_funcs->set_prt)
1627 		return;
1628 
1629 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1630 	if (!cb) {
1631 		/* Last resort when we are OOM */
1632 		if (fence)
1633 			dma_fence_wait(fence, false);
1634 
1635 		amdgpu_vm_prt_put(adev);
1636 	} else {
1637 		cb->adev = adev;
1638 		if (!fence || dma_fence_add_callback(fence, &cb->cb,
1639 						     amdgpu_vm_prt_cb))
1640 			amdgpu_vm_prt_cb(fence, &cb->cb);
1641 	}
1642 }
1643 
1644 /**
1645  * amdgpu_vm_free_mapping - free a mapping
1646  *
1647  * @adev: amdgpu_device pointer
1648  * @vm: requested vm
1649  * @mapping: mapping to be freed
1650  * @fence: fence of the unmap operation
1651  *
1652  * Free a mapping and make sure we decrease the PRT usage count if applicable.
1653  */
1654 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
1655 				   struct amdgpu_vm *vm,
1656 				   struct amdgpu_bo_va_mapping *mapping,
1657 				   struct dma_fence *fence)
1658 {
1659 	if (mapping->flags & AMDGPU_PTE_PRT)
1660 		amdgpu_vm_add_prt_cb(adev, fence);
1661 	kfree(mapping);
1662 }
1663 
1664 /**
1665  * amdgpu_vm_prt_fini - finish all prt mappings
1666  *
1667  * @adev: amdgpu_device pointer
1668  * @vm: requested vm
1669  *
1670  * Register a cleanup callback to disable PRT support after VM dies.
1671  */
1672 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1673 {
1674 	struct reservation_object *resv = vm->root.base.bo->tbo.resv;
1675 	struct dma_fence *excl, **shared;
1676 	unsigned i, shared_count;
1677 	int r;
1678 
1679 	r = reservation_object_get_fences_rcu(resv, &excl,
1680 					      &shared_count, &shared);
1681 	if (r) {
1682 		/* Not enough memory to grab the fence list, as last resort
1683 		 * block for all the fences to complete.
1684 		 */
1685 		reservation_object_wait_timeout_rcu(resv, true, false,
1686 						    MAX_SCHEDULE_TIMEOUT);
1687 		return;
1688 	}
1689 
1690 	/* Add a callback for each fence in the reservation object */
1691 	amdgpu_vm_prt_get(adev);
1692 	amdgpu_vm_add_prt_cb(adev, excl);
1693 
1694 	for (i = 0; i < shared_count; ++i) {
1695 		amdgpu_vm_prt_get(adev);
1696 		amdgpu_vm_add_prt_cb(adev, shared[i]);
1697 	}
1698 
1699 	kfree(shared);
1700 }
1701 
1702 /**
1703  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1704  *
1705  * @adev: amdgpu_device pointer
1706  * @vm: requested vm
1707  * @fence: optional resulting fence (unchanged if no work needed to be done
1708  * or if an error occurred)
1709  *
1710  * Make sure all freed BOs are cleared in the PT.
1711  * Returns 0 for success.
1712  *
1713  * PTs have to be reserved and mutex must be locked!
1714  */
1715 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1716 			  struct amdgpu_vm *vm,
1717 			  struct dma_fence **fence)
1718 {
1719 	struct amdgpu_bo_va_mapping *mapping;
1720 	uint64_t init_pte_value = 0;
1721 	struct dma_fence *f = NULL;
1722 	int r;
1723 
1724 	while (!list_empty(&vm->freed)) {
1725 		mapping = list_first_entry(&vm->freed,
1726 			struct amdgpu_bo_va_mapping, list);
1727 		list_del(&mapping->list);
1728 
1729 		if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
1730 			init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
1731 
1732 		r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
1733 						mapping->start, mapping->last,
1734 						init_pte_value, 0, &f);
1735 		amdgpu_vm_free_mapping(adev, vm, mapping, f);
1736 		if (r) {
1737 			dma_fence_put(f);
1738 			return r;
1739 		}
1740 	}
1741 
1742 	if (fence && f) {
1743 		dma_fence_put(*fence);
1744 		*fence = f;
1745 	} else {
1746 		dma_fence_put(f);
1747 	}
1748 
1749 	return 0;
1750 
1751 }
1752 
1753 /**
1754  * amdgpu_vm_handle_moved - handle moved BOs in the PT
1755  *
1756  * @adev: amdgpu_device pointer
1757  * @vm: requested vm
1758  * @sync: sync object to add fences to
1759  *
1760  * Make sure all BOs which are moved are updated in the PTs.
1761  * Returns 0 for success.
1762  *
1763  * PTs have to be reserved!
1764  */
1765 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
1766 			   struct amdgpu_vm *vm)
1767 {
1768 	bool clear;
1769 	int r = 0;
1770 
1771 	spin_lock(&vm->status_lock);
1772 	while (!list_empty(&vm->moved)) {
1773 		struct amdgpu_bo_va *bo_va;
1774 		struct reservation_object *resv;
1775 
1776 		bo_va = list_first_entry(&vm->moved,
1777 			struct amdgpu_bo_va, base.vm_status);
1778 		spin_unlock(&vm->status_lock);
1779 
1780 		resv = bo_va->base.bo->tbo.resv;
1781 
1782 		/* Per VM BOs never need to bo cleared in the page tables */
1783 		if (resv == vm->root.base.bo->tbo.resv)
1784 			clear = false;
1785 		/* Try to reserve the BO to avoid clearing its ptes */
1786 		else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
1787 			clear = false;
1788 		/* Somebody else is using the BO right now */
1789 		else
1790 			clear = true;
1791 
1792 		r = amdgpu_vm_bo_update(adev, bo_va, clear);
1793 		if (r)
1794 			return r;
1795 
1796 		if (!clear && resv != vm->root.base.bo->tbo.resv)
1797 			reservation_object_unlock(resv);
1798 
1799 		spin_lock(&vm->status_lock);
1800 	}
1801 	spin_unlock(&vm->status_lock);
1802 
1803 	return r;
1804 }
1805 
1806 /**
1807  * amdgpu_vm_bo_add - add a bo to a specific vm
1808  *
1809  * @adev: amdgpu_device pointer
1810  * @vm: requested vm
1811  * @bo: amdgpu buffer object
1812  *
1813  * Add @bo into the requested vm.
1814  * Add @bo to the list of bos associated with the vm
1815  * Returns newly added bo_va or NULL for failure
1816  *
1817  * Object has to be reserved!
1818  */
1819 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1820 				      struct amdgpu_vm *vm,
1821 				      struct amdgpu_bo *bo)
1822 {
1823 	struct amdgpu_bo_va *bo_va;
1824 
1825 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1826 	if (bo_va == NULL) {
1827 		return NULL;
1828 	}
1829 	bo_va->base.vm = vm;
1830 	bo_va->base.bo = bo;
1831 	INIT_LIST_HEAD(&bo_va->base.bo_list);
1832 	INIT_LIST_HEAD(&bo_va->base.vm_status);
1833 
1834 	bo_va->ref_count = 1;
1835 	INIT_LIST_HEAD(&bo_va->valids);
1836 	INIT_LIST_HEAD(&bo_va->invalids);
1837 
1838 	if (!bo)
1839 		return bo_va;
1840 
1841 	list_add_tail(&bo_va->base.bo_list, &bo->va);
1842 
1843 	if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
1844 		return bo_va;
1845 
1846 	if (bo->preferred_domains &
1847 	    amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
1848 		return bo_va;
1849 
1850 	/*
1851 	 * We checked all the prerequisites, but it looks like this per VM BO
1852 	 * is currently evicted. add the BO to the evicted list to make sure it
1853 	 * is validated on next VM use to avoid fault.
1854 	 * */
1855 	spin_lock(&vm->status_lock);
1856 	list_move_tail(&bo_va->base.vm_status, &vm->evicted);
1857 	spin_unlock(&vm->status_lock);
1858 
1859 	return bo_va;
1860 }
1861 
1862 
1863 /**
1864  * amdgpu_vm_bo_insert_mapping - insert a new mapping
1865  *
1866  * @adev: amdgpu_device pointer
1867  * @bo_va: bo_va to store the address
1868  * @mapping: the mapping to insert
1869  *
1870  * Insert a new mapping into all structures.
1871  */
1872 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
1873 				    struct amdgpu_bo_va *bo_va,
1874 				    struct amdgpu_bo_va_mapping *mapping)
1875 {
1876 	struct amdgpu_vm *vm = bo_va->base.vm;
1877 	struct amdgpu_bo *bo = bo_va->base.bo;
1878 
1879 	mapping->bo_va = bo_va;
1880 	list_add(&mapping->list, &bo_va->invalids);
1881 	amdgpu_vm_it_insert(mapping, &vm->va);
1882 
1883 	if (mapping->flags & AMDGPU_PTE_PRT)
1884 		amdgpu_vm_prt_get(adev);
1885 
1886 	if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
1887 		spin_lock(&vm->status_lock);
1888 		if (list_empty(&bo_va->base.vm_status))
1889 			list_add(&bo_va->base.vm_status, &vm->moved);
1890 		spin_unlock(&vm->status_lock);
1891 	}
1892 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1893 }
1894 
1895 /**
1896  * amdgpu_vm_bo_map - map bo inside a vm
1897  *
1898  * @adev: amdgpu_device pointer
1899  * @bo_va: bo_va to store the address
1900  * @saddr: where to map the BO
1901  * @offset: requested offset in the BO
1902  * @flags: attributes of pages (read/write/valid/etc.)
1903  *
1904  * Add a mapping of the BO at the specefied addr into the VM.
1905  * Returns 0 for success, error for failure.
1906  *
1907  * Object has to be reserved and unreserved outside!
1908  */
1909 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1910 		     struct amdgpu_bo_va *bo_va,
1911 		     uint64_t saddr, uint64_t offset,
1912 		     uint64_t size, uint64_t flags)
1913 {
1914 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1915 	struct amdgpu_bo *bo = bo_va->base.bo;
1916 	struct amdgpu_vm *vm = bo_va->base.vm;
1917 	uint64_t eaddr;
1918 
1919 	/* validate the parameters */
1920 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1921 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1922 		return -EINVAL;
1923 
1924 	/* make sure object fit at this offset */
1925 	eaddr = saddr + size - 1;
1926 	if (saddr >= eaddr ||
1927 	    (bo && offset + size > amdgpu_bo_size(bo)))
1928 		return -EINVAL;
1929 
1930 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1931 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1932 
1933 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1934 	if (tmp) {
1935 		/* bo and tmp overlap, invalid addr */
1936 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1937 			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1938 			tmp->start, tmp->last + 1);
1939 		return -EINVAL;
1940 	}
1941 
1942 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1943 	if (!mapping)
1944 		return -ENOMEM;
1945 
1946 	mapping->start = saddr;
1947 	mapping->last = eaddr;
1948 	mapping->offset = offset;
1949 	mapping->flags = flags;
1950 
1951 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1952 
1953 	return 0;
1954 }
1955 
1956 /**
1957  * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1958  *
1959  * @adev: amdgpu_device pointer
1960  * @bo_va: bo_va to store the address
1961  * @saddr: where to map the BO
1962  * @offset: requested offset in the BO
1963  * @flags: attributes of pages (read/write/valid/etc.)
1964  *
1965  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1966  * mappings as we do so.
1967  * Returns 0 for success, error for failure.
1968  *
1969  * Object has to be reserved and unreserved outside!
1970  */
1971 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
1972 			     struct amdgpu_bo_va *bo_va,
1973 			     uint64_t saddr, uint64_t offset,
1974 			     uint64_t size, uint64_t flags)
1975 {
1976 	struct amdgpu_bo_va_mapping *mapping;
1977 	struct amdgpu_bo *bo = bo_va->base.bo;
1978 	uint64_t eaddr;
1979 	int r;
1980 
1981 	/* validate the parameters */
1982 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1983 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1984 		return -EINVAL;
1985 
1986 	/* make sure object fit at this offset */
1987 	eaddr = saddr + size - 1;
1988 	if (saddr >= eaddr ||
1989 	    (bo && offset + size > amdgpu_bo_size(bo)))
1990 		return -EINVAL;
1991 
1992 	/* Allocate all the needed memory */
1993 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1994 	if (!mapping)
1995 		return -ENOMEM;
1996 
1997 	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1998 	if (r) {
1999 		kfree(mapping);
2000 		return r;
2001 	}
2002 
2003 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2004 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2005 
2006 	mapping->start = saddr;
2007 	mapping->last = eaddr;
2008 	mapping->offset = offset;
2009 	mapping->flags = flags;
2010 
2011 	amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
2012 
2013 	return 0;
2014 }
2015 
2016 /**
2017  * amdgpu_vm_bo_unmap - remove bo mapping from vm
2018  *
2019  * @adev: amdgpu_device pointer
2020  * @bo_va: bo_va to remove the address from
2021  * @saddr: where to the BO is mapped
2022  *
2023  * Remove a mapping of the BO at the specefied addr from the VM.
2024  * Returns 0 for success, error for failure.
2025  *
2026  * Object has to be reserved and unreserved outside!
2027  */
2028 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2029 		       struct amdgpu_bo_va *bo_va,
2030 		       uint64_t saddr)
2031 {
2032 	struct amdgpu_bo_va_mapping *mapping;
2033 	struct amdgpu_vm *vm = bo_va->base.vm;
2034 	bool valid = true;
2035 
2036 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2037 
2038 	list_for_each_entry(mapping, &bo_va->valids, list) {
2039 		if (mapping->start == saddr)
2040 			break;
2041 	}
2042 
2043 	if (&mapping->list == &bo_va->valids) {
2044 		valid = false;
2045 
2046 		list_for_each_entry(mapping, &bo_va->invalids, list) {
2047 			if (mapping->start == saddr)
2048 				break;
2049 		}
2050 
2051 		if (&mapping->list == &bo_va->invalids)
2052 			return -ENOENT;
2053 	}
2054 
2055 	list_del(&mapping->list);
2056 	amdgpu_vm_it_remove(mapping, &vm->va);
2057 	mapping->bo_va = NULL;
2058 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2059 
2060 	if (valid)
2061 		list_add(&mapping->list, &vm->freed);
2062 	else
2063 		amdgpu_vm_free_mapping(adev, vm, mapping,
2064 				       bo_va->last_pt_update);
2065 
2066 	return 0;
2067 }
2068 
2069 /**
2070  * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
2071  *
2072  * @adev: amdgpu_device pointer
2073  * @vm: VM structure to use
2074  * @saddr: start of the range
2075  * @size: size of the range
2076  *
2077  * Remove all mappings in a range, split them as appropriate.
2078  * Returns 0 for success, error for failure.
2079  */
2080 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
2081 				struct amdgpu_vm *vm,
2082 				uint64_t saddr, uint64_t size)
2083 {
2084 	struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
2085 	LIST_HEAD(removed);
2086 	uint64_t eaddr;
2087 
2088 	eaddr = saddr + size - 1;
2089 	saddr /= AMDGPU_GPU_PAGE_SIZE;
2090 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
2091 
2092 	/* Allocate all the needed memory */
2093 	before = kzalloc(sizeof(*before), GFP_KERNEL);
2094 	if (!before)
2095 		return -ENOMEM;
2096 	INIT_LIST_HEAD(&before->list);
2097 
2098 	after = kzalloc(sizeof(*after), GFP_KERNEL);
2099 	if (!after) {
2100 		kfree(before);
2101 		return -ENOMEM;
2102 	}
2103 	INIT_LIST_HEAD(&after->list);
2104 
2105 	/* Now gather all removed mappings */
2106 	tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2107 	while (tmp) {
2108 		/* Remember mapping split at the start */
2109 		if (tmp->start < saddr) {
2110 			before->start = tmp->start;
2111 			before->last = saddr - 1;
2112 			before->offset = tmp->offset;
2113 			before->flags = tmp->flags;
2114 			list_add(&before->list, &tmp->list);
2115 		}
2116 
2117 		/* Remember mapping split at the end */
2118 		if (tmp->last > eaddr) {
2119 			after->start = eaddr + 1;
2120 			after->last = tmp->last;
2121 			after->offset = tmp->offset;
2122 			after->offset += after->start - tmp->start;
2123 			after->flags = tmp->flags;
2124 			list_add(&after->list, &tmp->list);
2125 		}
2126 
2127 		list_del(&tmp->list);
2128 		list_add(&tmp->list, &removed);
2129 
2130 		tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
2131 	}
2132 
2133 	/* And free them up */
2134 	list_for_each_entry_safe(tmp, next, &removed, list) {
2135 		amdgpu_vm_it_remove(tmp, &vm->va);
2136 		list_del(&tmp->list);
2137 
2138 		if (tmp->start < saddr)
2139 		    tmp->start = saddr;
2140 		if (tmp->last > eaddr)
2141 		    tmp->last = eaddr;
2142 
2143 		tmp->bo_va = NULL;
2144 		list_add(&tmp->list, &vm->freed);
2145 		trace_amdgpu_vm_bo_unmap(NULL, tmp);
2146 	}
2147 
2148 	/* Insert partial mapping before the range */
2149 	if (!list_empty(&before->list)) {
2150 		amdgpu_vm_it_insert(before, &vm->va);
2151 		if (before->flags & AMDGPU_PTE_PRT)
2152 			amdgpu_vm_prt_get(adev);
2153 	} else {
2154 		kfree(before);
2155 	}
2156 
2157 	/* Insert partial mapping after the range */
2158 	if (!list_empty(&after->list)) {
2159 		amdgpu_vm_it_insert(after, &vm->va);
2160 		if (after->flags & AMDGPU_PTE_PRT)
2161 			amdgpu_vm_prt_get(adev);
2162 	} else {
2163 		kfree(after);
2164 	}
2165 
2166 	return 0;
2167 }
2168 
2169 /**
2170  * amdgpu_vm_bo_lookup_mapping - find mapping by address
2171  *
2172  * @vm: the requested VM
2173  *
2174  * Find a mapping by it's address.
2175  */
2176 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2177 							 uint64_t addr)
2178 {
2179 	return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2180 }
2181 
2182 /**
2183  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2184  *
2185  * @adev: amdgpu_device pointer
2186  * @bo_va: requested bo_va
2187  *
2188  * Remove @bo_va->bo from the requested vm.
2189  *
2190  * Object have to be reserved!
2191  */
2192 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2193 		      struct amdgpu_bo_va *bo_va)
2194 {
2195 	struct amdgpu_bo_va_mapping *mapping, *next;
2196 	struct amdgpu_vm *vm = bo_va->base.vm;
2197 
2198 	list_del(&bo_va->base.bo_list);
2199 
2200 	spin_lock(&vm->status_lock);
2201 	list_del(&bo_va->base.vm_status);
2202 	spin_unlock(&vm->status_lock);
2203 
2204 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2205 		list_del(&mapping->list);
2206 		amdgpu_vm_it_remove(mapping, &vm->va);
2207 		mapping->bo_va = NULL;
2208 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
2209 		list_add(&mapping->list, &vm->freed);
2210 	}
2211 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2212 		list_del(&mapping->list);
2213 		amdgpu_vm_it_remove(mapping, &vm->va);
2214 		amdgpu_vm_free_mapping(adev, vm, mapping,
2215 				       bo_va->last_pt_update);
2216 	}
2217 
2218 	dma_fence_put(bo_va->last_pt_update);
2219 	kfree(bo_va);
2220 }
2221 
2222 /**
2223  * amdgpu_vm_bo_invalidate - mark the bo as invalid
2224  *
2225  * @adev: amdgpu_device pointer
2226  * @vm: requested vm
2227  * @bo: amdgpu buffer object
2228  *
2229  * Mark @bo as invalid.
2230  */
2231 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2232 			     struct amdgpu_bo *bo, bool evicted)
2233 {
2234 	struct amdgpu_vm_bo_base *bo_base;
2235 
2236 	list_for_each_entry(bo_base, &bo->va, bo_list) {
2237 		struct amdgpu_vm *vm = bo_base->vm;
2238 
2239 		bo_base->moved = true;
2240 		if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
2241 			spin_lock(&bo_base->vm->status_lock);
2242 			if (bo->tbo.type == ttm_bo_type_kernel)
2243 				list_move(&bo_base->vm_status, &vm->evicted);
2244 			else
2245 				list_move_tail(&bo_base->vm_status,
2246 					       &vm->evicted);
2247 			spin_unlock(&bo_base->vm->status_lock);
2248 			continue;
2249 		}
2250 
2251 		if (bo->tbo.type == ttm_bo_type_kernel) {
2252 			spin_lock(&bo_base->vm->status_lock);
2253 			if (list_empty(&bo_base->vm_status))
2254 				list_add(&bo_base->vm_status, &vm->relocated);
2255 			spin_unlock(&bo_base->vm->status_lock);
2256 			continue;
2257 		}
2258 
2259 		spin_lock(&bo_base->vm->status_lock);
2260 		if (list_empty(&bo_base->vm_status))
2261 			list_add(&bo_base->vm_status, &vm->moved);
2262 		spin_unlock(&bo_base->vm->status_lock);
2263 	}
2264 }
2265 
2266 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2267 {
2268 	/* Total bits covered by PD + PTs */
2269 	unsigned bits = ilog2(vm_size) + 18;
2270 
2271 	/* Make sure the PD is 4K in size up to 8GB address space.
2272 	   Above that split equal between PD and PTs */
2273 	if (vm_size <= 8)
2274 		return (bits - 9);
2275 	else
2276 		return ((bits + 3) / 2);
2277 }
2278 
2279 /**
2280  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2281  *
2282  * @adev: amdgpu_device pointer
2283  * @vm_size: the default vm size if it's set auto
2284  */
2285 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
2286 			   uint32_t fragment_size_default, unsigned max_level,
2287 			   unsigned max_bits)
2288 {
2289 	uint64_t tmp;
2290 
2291 	/* adjust vm size first */
2292 	if (amdgpu_vm_size != -1) {
2293 		unsigned max_size = 1 << (max_bits - 30);
2294 
2295 		vm_size = amdgpu_vm_size;
2296 		if (vm_size > max_size) {
2297 			dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2298 				 amdgpu_vm_size, max_size);
2299 			vm_size = max_size;
2300 		}
2301 	}
2302 
2303 	adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2304 
2305 	tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2306 	if (amdgpu_vm_block_size != -1)
2307 		tmp >>= amdgpu_vm_block_size - 9;
2308 	tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2309 	adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
2310 	switch (adev->vm_manager.num_level) {
2311 	case 3:
2312 		adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2313 		break;
2314 	case 2:
2315 		adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2316 		break;
2317 	case 1:
2318 		adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2319 		break;
2320 	default:
2321 		dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2322 	}
2323 	/* block size depends on vm size and hw setup*/
2324 	if (amdgpu_vm_block_size != -1)
2325 		adev->vm_manager.block_size =
2326 			min((unsigned)amdgpu_vm_block_size, max_bits
2327 			    - AMDGPU_GPU_PAGE_SHIFT
2328 			    - 9 * adev->vm_manager.num_level);
2329 	else if (adev->vm_manager.num_level > 1)
2330 		adev->vm_manager.block_size = 9;
2331 	else
2332 		adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2333 
2334 	if (amdgpu_vm_fragment_size == -1)
2335 		adev->vm_manager.fragment_size = fragment_size_default;
2336 	else
2337 		adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2338 
2339 	DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2340 		 vm_size, adev->vm_manager.num_level + 1,
2341 		 adev->vm_manager.block_size,
2342 		 adev->vm_manager.fragment_size);
2343 }
2344 
2345 /**
2346  * amdgpu_vm_init - initialize a vm instance
2347  *
2348  * @adev: amdgpu_device pointer
2349  * @vm: requested vm
2350  * @vm_context: Indicates if it GFX or Compute context
2351  *
2352  * Init @vm fields.
2353  */
2354 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2355 		   int vm_context, unsigned int pasid)
2356 {
2357 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2358 		AMDGPU_VM_PTE_COUNT(adev) * 8);
2359 	unsigned ring_instance;
2360 	struct amdgpu_ring *ring;
2361 	struct drm_sched_rq *rq;
2362 	unsigned long size;
2363 	uint64_t flags;
2364 	int r, i;
2365 
2366 	vm->va = RB_ROOT_CACHED;
2367 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2368 		vm->reserved_vmid[i] = NULL;
2369 	spin_lock_init(&vm->status_lock);
2370 	INIT_LIST_HEAD(&vm->evicted);
2371 	INIT_LIST_HEAD(&vm->relocated);
2372 	INIT_LIST_HEAD(&vm->moved);
2373 	INIT_LIST_HEAD(&vm->freed);
2374 
2375 	/* create scheduler entity for page table updates */
2376 
2377 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
2378 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
2379 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
2380 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
2381 	r = drm_sched_entity_init(&ring->sched, &vm->entity,
2382 				  rq, amdgpu_sched_jobs, NULL);
2383 	if (r)
2384 		return r;
2385 
2386 	vm->pte_support_ats = false;
2387 
2388 	if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
2389 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2390 						AMDGPU_VM_USE_CPU_FOR_COMPUTE);
2391 
2392 		if (adev->asic_type == CHIP_RAVEN)
2393 			vm->pte_support_ats = true;
2394 	} else {
2395 		vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2396 						AMDGPU_VM_USE_CPU_FOR_GFX);
2397 	}
2398 	DRM_DEBUG_DRIVER("VM update mode is %s\n",
2399 			 vm->use_cpu_for_update ? "CPU" : "SDMA");
2400 	WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
2401 		  "CPU update of VM recommended only for large BAR system\n");
2402 	vm->last_update = NULL;
2403 
2404 	flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2405 	if (vm->use_cpu_for_update)
2406 		flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
2407 	else
2408 		flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
2409 				AMDGPU_GEM_CREATE_SHADOW);
2410 
2411 	size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2412 	r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
2413 			     ttm_bo_type_kernel, NULL, &vm->root.base.bo);
2414 	if (r)
2415 		goto error_free_sched_entity;
2416 
2417 	r = amdgpu_bo_reserve(vm->root.base.bo, true);
2418 	if (r)
2419 		goto error_free_root;
2420 
2421 	r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
2422 			       adev->vm_manager.root_level,
2423 			       vm->pte_support_ats);
2424 	if (r)
2425 		goto error_unreserve;
2426 
2427 	vm->root.base.vm = vm;
2428 	list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2429 	list_add_tail(&vm->root.base.vm_status, &vm->evicted);
2430 	amdgpu_bo_unreserve(vm->root.base.bo);
2431 
2432 	if (pasid) {
2433 		unsigned long flags;
2434 
2435 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2436 		r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
2437 			      GFP_ATOMIC);
2438 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2439 		if (r < 0)
2440 			goto error_free_root;
2441 
2442 		vm->pasid = pasid;
2443 	}
2444 
2445 	INIT_KFIFO(vm->faults);
2446 	vm->fault_credit = 16;
2447 
2448 	return 0;
2449 
2450 error_unreserve:
2451 	amdgpu_bo_unreserve(vm->root.base.bo);
2452 
2453 error_free_root:
2454 	amdgpu_bo_unref(&vm->root.base.bo->shadow);
2455 	amdgpu_bo_unref(&vm->root.base.bo);
2456 	vm->root.base.bo = NULL;
2457 
2458 error_free_sched_entity:
2459 	drm_sched_entity_fini(&ring->sched, &vm->entity);
2460 
2461 	return r;
2462 }
2463 
2464 /**
2465  * amdgpu_vm_free_levels - free PD/PT levels
2466  *
2467  * @adev: amdgpu device structure
2468  * @parent: PD/PT starting level to free
2469  * @level: level of parent structure
2470  *
2471  * Free the page directory or page table level and all sub levels.
2472  */
2473 static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
2474 				  struct amdgpu_vm_pt *parent,
2475 				  unsigned level)
2476 {
2477 	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
2478 
2479 	if (parent->base.bo) {
2480 		list_del(&parent->base.bo_list);
2481 		list_del(&parent->base.vm_status);
2482 		amdgpu_bo_unref(&parent->base.bo->shadow);
2483 		amdgpu_bo_unref(&parent->base.bo);
2484 	}
2485 
2486 	if (parent->entries)
2487 		for (i = 0; i < num_entries; i++)
2488 			amdgpu_vm_free_levels(adev, &parent->entries[i],
2489 					      level + 1);
2490 
2491 	kvfree(parent->entries);
2492 }
2493 
2494 /**
2495  * amdgpu_vm_fini - tear down a vm instance
2496  *
2497  * @adev: amdgpu_device pointer
2498  * @vm: requested vm
2499  *
2500  * Tear down @vm.
2501  * Unbind the VM and remove all bos from the vm bo list
2502  */
2503 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
2504 {
2505 	struct amdgpu_bo_va_mapping *mapping, *tmp;
2506 	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2507 	struct amdgpu_bo *root;
2508 	u64 fault;
2509 	int i, r;
2510 
2511 	/* Clear pending page faults from IH when the VM is destroyed */
2512 	while (kfifo_get(&vm->faults, &fault))
2513 		amdgpu_ih_clear_fault(adev, fault);
2514 
2515 	if (vm->pasid) {
2516 		unsigned long flags;
2517 
2518 		spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
2519 		idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
2520 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
2521 	}
2522 
2523 	drm_sched_entity_fini(vm->entity.sched, &vm->entity);
2524 
2525 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2526 		dev_err(adev->dev, "still active bo inside vm\n");
2527 	}
2528 	rbtree_postorder_for_each_entry_safe(mapping, tmp,
2529 					     &vm->va.rb_root, rb) {
2530 		list_del(&mapping->list);
2531 		amdgpu_vm_it_remove(mapping, &vm->va);
2532 		kfree(mapping);
2533 	}
2534 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2535 		if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
2536 			amdgpu_vm_prt_fini(adev, vm);
2537 			prt_fini_needed = false;
2538 		}
2539 
2540 		list_del(&mapping->list);
2541 		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
2542 	}
2543 
2544 	root = amdgpu_bo_ref(vm->root.base.bo);
2545 	r = amdgpu_bo_reserve(root, true);
2546 	if (r) {
2547 		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
2548 	} else {
2549 		amdgpu_vm_free_levels(adev, &vm->root,
2550 				      adev->vm_manager.root_level);
2551 		amdgpu_bo_unreserve(root);
2552 	}
2553 	amdgpu_bo_unref(&root);
2554 	dma_fence_put(vm->last_update);
2555 	for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
2556 		amdgpu_vmid_free_reserved(adev, vm, i);
2557 }
2558 
2559 /**
2560  * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
2561  *
2562  * @adev: amdgpu_device pointer
2563  * @pasid: PASID do identify the VM
2564  *
2565  * This function is expected to be called in interrupt context. Returns
2566  * true if there was fault credit, false otherwise
2567  */
2568 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
2569 				  unsigned int pasid)
2570 {
2571 	struct amdgpu_vm *vm;
2572 
2573 	spin_lock(&adev->vm_manager.pasid_lock);
2574 	vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
2575 	if (!vm) {
2576 		/* VM not found, can't track fault credit */
2577 		spin_unlock(&adev->vm_manager.pasid_lock);
2578 		return true;
2579 	}
2580 
2581 	/* No lock needed. only accessed by IRQ handler */
2582 	if (!vm->fault_credit) {
2583 		/* Too many faults in this VM */
2584 		spin_unlock(&adev->vm_manager.pasid_lock);
2585 		return false;
2586 	}
2587 
2588 	vm->fault_credit--;
2589 	spin_unlock(&adev->vm_manager.pasid_lock);
2590 	return true;
2591 }
2592 
2593 /**
2594  * amdgpu_vm_manager_init - init the VM manager
2595  *
2596  * @adev: amdgpu_device pointer
2597  *
2598  * Initialize the VM manager structures
2599  */
2600 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
2601 {
2602 	unsigned i;
2603 
2604 	amdgpu_vmid_mgr_init(adev);
2605 
2606 	adev->vm_manager.fence_context =
2607 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
2608 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
2609 		adev->vm_manager.seqno[i] = 0;
2610 
2611 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
2612 	spin_lock_init(&adev->vm_manager.prt_lock);
2613 	atomic_set(&adev->vm_manager.num_prt_users, 0);
2614 
2615 	/* If not overridden by the user, by default, only in large BAR systems
2616 	 * Compute VM tables will be updated by CPU
2617 	 */
2618 #ifdef CONFIG_X86_64
2619 	if (amdgpu_vm_update_mode == -1) {
2620 		if (amdgpu_vm_is_large_bar(adev))
2621 			adev->vm_manager.vm_update_mode =
2622 				AMDGPU_VM_USE_CPU_FOR_COMPUTE;
2623 		else
2624 			adev->vm_manager.vm_update_mode = 0;
2625 	} else
2626 		adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2627 #else
2628 	adev->vm_manager.vm_update_mode = 0;
2629 #endif
2630 
2631 	idr_init(&adev->vm_manager.pasid_idr);
2632 	spin_lock_init(&adev->vm_manager.pasid_lock);
2633 }
2634 
2635 /**
2636  * amdgpu_vm_manager_fini - cleanup VM manager
2637  *
2638  * @adev: amdgpu_device pointer
2639  *
2640  * Cleanup the VM manager and free resources.
2641  */
2642 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
2643 {
2644 	WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
2645 	idr_destroy(&adev->vm_manager.pasid_idr);
2646 
2647 	amdgpu_vmid_mgr_fini(adev);
2648 }
2649 
2650 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
2651 {
2652 	union drm_amdgpu_vm *args = data;
2653 	struct amdgpu_device *adev = dev->dev_private;
2654 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
2655 	int r;
2656 
2657 	switch (args->in.op) {
2658 	case AMDGPU_VM_OP_RESERVE_VMID:
2659 		/* current, we only have requirement to reserve vmid from gfxhub */
2660 		r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2661 		if (r)
2662 			return r;
2663 		break;
2664 	case AMDGPU_VM_OP_UNRESERVE_VMID:
2665 		amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
2666 		break;
2667 	default:
2668 		return -EINVAL;
2669 	}
2670 
2671 	return 0;
2672 }
2673