xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c (revision 029f7f3b8701cc7aca8bdb31f0c7edd6a479e357)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/amdgpu_drm.h>
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 
33 /*
34  * GPUVM
35  * GPUVM is similar to the legacy gart on older asics, however
36  * rather than there being a single global gart table
37  * for the entire GPU, there are multiple VM page tables active
38  * at any given time.  The VM page tables can contain a mix
39  * vram pages and system memory pages and system memory pages
40  * can be mapped as snooped (cached system pages) or unsnooped
41  * (uncached system pages).
42  * Each VM has an ID associated with it and there is a page table
43  * associated with each VMID.  When execting a command buffer,
44  * the kernel tells the the ring what VMID to use for that command
45  * buffer.  VMIDs are allocated dynamically as commands are submitted.
46  * The userspace drivers maintain their own address space and the kernel
47  * sets up their pages tables accordingly when they submit their
48  * command buffers and a VMID is assigned.
49  * Cayman/Trinity support up to 8 active VMs at any given time;
50  * SI supports 16.
51  */
52 
53 /**
54  * amdgpu_vm_num_pde - return the number of page directory entries
55  *
56  * @adev: amdgpu_device pointer
57  *
58  * Calculate the number of page directory entries (cayman+).
59  */
60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
61 {
62 	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
63 }
64 
65 /**
66  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
67  *
68  * @adev: amdgpu_device pointer
69  *
70  * Calculate the size of the page directory in bytes (cayman+).
71  */
72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
73 {
74 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
75 }
76 
77 /**
78  * amdgpu_vm_get_bos - add the vm BOs to a validation list
79  *
80  * @vm: vm providing the BOs
81  * @head: head of validation list
82  *
83  * Add the page directory to the list of BOs to
84  * validate for command submission (cayman+).
85  */
86 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
87 					  struct amdgpu_vm *vm,
88 					  struct list_head *head)
89 {
90 	struct amdgpu_bo_list_entry *list;
91 	unsigned i, idx;
92 
93 	list = drm_malloc_ab(vm->max_pde_used + 2,
94 			     sizeof(struct amdgpu_bo_list_entry));
95 	if (!list) {
96 		return NULL;
97 	}
98 
99 	/* add the vm page table to the list */
100 	list[0].robj = vm->page_directory;
101 	list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
102 	list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
103 	list[0].priority = 0;
104 	list[0].tv.bo = &vm->page_directory->tbo;
105 	list[0].tv.shared = true;
106 	list_add(&list[0].tv.head, head);
107 
108 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
109 		if (!vm->page_tables[i].bo)
110 			continue;
111 
112 		list[idx].robj = vm->page_tables[i].bo;
113 		list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
114 		list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
115 		list[idx].priority = 0;
116 		list[idx].tv.bo = &list[idx].robj->tbo;
117 		list[idx].tv.shared = true;
118 		list_add(&list[idx++].tv.head, head);
119 	}
120 
121 	return list;
122 }
123 
124 /**
125  * amdgpu_vm_grab_id - allocate the next free VMID
126  *
127  * @vm: vm to allocate id for
128  * @ring: ring we want to submit job to
129  * @sync: sync object where we add dependencies
130  *
131  * Allocate an id for the vm, adding fences to the sync obj as necessary.
132  *
133  * Global mutex must be locked!
134  */
135 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
136 		      struct amdgpu_sync *sync)
137 {
138 	struct fence *best[AMDGPU_MAX_RINGS] = {};
139 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
140 	struct amdgpu_device *adev = ring->adev;
141 
142 	unsigned choices[2] = {};
143 	unsigned i;
144 
145 	/* check if the id is still valid */
146 	if (vm_id->id && vm_id->last_id_use &&
147 	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
148 		trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
149 		return 0;
150 	}
151 
152 	/* we definately need to flush */
153 	vm_id->pd_gpu_addr = ~0ll;
154 
155 	/* skip over VMID 0, since it is the system VM */
156 	for (i = 1; i < adev->vm_manager.nvm; ++i) {
157 		struct fence *fence = adev->vm_manager.active[i];
158 		struct amdgpu_ring *fring;
159 
160 		if (fence == NULL) {
161 			/* found a free one */
162 			vm_id->id = i;
163 			trace_amdgpu_vm_grab_id(i, ring->idx);
164 			return 0;
165 		}
166 
167 		fring = amdgpu_ring_from_fence(fence);
168 		if (best[fring->idx] == NULL ||
169 		    fence_is_later(best[fring->idx], fence)) {
170 			best[fring->idx] = fence;
171 			choices[fring == ring ? 0 : 1] = i;
172 		}
173 	}
174 
175 	for (i = 0; i < 2; ++i) {
176 		if (choices[i]) {
177 			struct fence *fence;
178 
179 			fence  = adev->vm_manager.active[choices[i]];
180 			vm_id->id = choices[i];
181 
182 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
183 			return amdgpu_sync_fence(ring->adev, sync, fence);
184 		}
185 	}
186 
187 	/* should never happen */
188 	BUG();
189 	return -EINVAL;
190 }
191 
192 /**
193  * amdgpu_vm_flush - hardware flush the vm
194  *
195  * @ring: ring to use for flush
196  * @vm: vm we want to flush
197  * @updates: last vm update that we waited for
198  *
199  * Flush the vm (cayman+).
200  *
201  * Global and local mutex must be locked!
202  */
203 void amdgpu_vm_flush(struct amdgpu_ring *ring,
204 		     struct amdgpu_vm *vm,
205 		     struct fence *updates)
206 {
207 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
208 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
209 	struct fence *flushed_updates = vm_id->flushed_updates;
210 	bool is_earlier = false;
211 
212 	if (flushed_updates && updates) {
213 		BUG_ON(flushed_updates->context != updates->context);
214 		is_earlier = (updates->seqno - flushed_updates->seqno <=
215 			      INT_MAX) ? true : false;
216 	}
217 
218 	if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
219 	    is_earlier) {
220 
221 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
222 		if (is_earlier) {
223 			vm_id->flushed_updates = fence_get(updates);
224 			fence_put(flushed_updates);
225 		}
226 		if (!flushed_updates)
227 			vm_id->flushed_updates = fence_get(updates);
228 		vm_id->pd_gpu_addr = pd_addr;
229 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
230 	}
231 }
232 
233 /**
234  * amdgpu_vm_fence - remember fence for vm
235  *
236  * @adev: amdgpu_device pointer
237  * @vm: vm we want to fence
238  * @fence: fence to remember
239  *
240  * Fence the vm (cayman+).
241  * Set the fence used to protect page table and id.
242  *
243  * Global and local mutex must be locked!
244  */
245 void amdgpu_vm_fence(struct amdgpu_device *adev,
246 		     struct amdgpu_vm *vm,
247 		     struct amdgpu_fence *fence)
248 {
249 	unsigned ridx = fence->ring->idx;
250 	unsigned vm_id = vm->ids[ridx].id;
251 
252 	fence_put(adev->vm_manager.active[vm_id]);
253 	adev->vm_manager.active[vm_id] = fence_get(&fence->base);
254 
255 	fence_put(vm->ids[ridx].last_id_use);
256 	vm->ids[ridx].last_id_use = fence_get(&fence->base);
257 }
258 
259 /**
260  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
261  *
262  * @vm: requested vm
263  * @bo: requested buffer object
264  *
265  * Find @bo inside the requested vm (cayman+).
266  * Search inside the @bos vm list for the requested vm
267  * Returns the found bo_va or NULL if none is found
268  *
269  * Object has to be reserved!
270  */
271 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
272 				       struct amdgpu_bo *bo)
273 {
274 	struct amdgpu_bo_va *bo_va;
275 
276 	list_for_each_entry(bo_va, &bo->va, bo_list) {
277 		if (bo_va->vm == vm) {
278 			return bo_va;
279 		}
280 	}
281 	return NULL;
282 }
283 
284 /**
285  * amdgpu_vm_update_pages - helper to call the right asic function
286  *
287  * @adev: amdgpu_device pointer
288  * @ib: indirect buffer to fill with commands
289  * @pe: addr of the page entry
290  * @addr: dst addr to write into pe
291  * @count: number of page entries to update
292  * @incr: increase next addr by incr bytes
293  * @flags: hw access flags
294  * @gtt_flags: GTT hw access flags
295  *
296  * Traces the parameters and calls the right asic functions
297  * to setup the page table using the DMA.
298  */
299 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
300 				   struct amdgpu_ib *ib,
301 				   uint64_t pe, uint64_t addr,
302 				   unsigned count, uint32_t incr,
303 				   uint32_t flags, uint32_t gtt_flags)
304 {
305 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
306 
307 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
308 		uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
309 		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
310 
311 	} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
312 		amdgpu_vm_write_pte(adev, ib, pe, addr,
313 				      count, incr, flags);
314 
315 	} else {
316 		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
317 				      count, incr, flags);
318 	}
319 }
320 
321 int amdgpu_vm_free_job(struct amdgpu_job *job)
322 {
323 	int i;
324 	for (i = 0; i < job->num_ibs; i++)
325 		amdgpu_ib_free(job->adev, &job->ibs[i]);
326 	kfree(job->ibs);
327 	return 0;
328 }
329 
330 /**
331  * amdgpu_vm_clear_bo - initially clear the page dir/table
332  *
333  * @adev: amdgpu_device pointer
334  * @bo: bo to clear
335  */
336 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
337 			      struct amdgpu_bo *bo)
338 {
339 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
340 	struct fence *fence = NULL;
341 	struct amdgpu_ib *ib;
342 	unsigned entries;
343 	uint64_t addr;
344 	int r;
345 
346 	r = amdgpu_bo_reserve(bo, false);
347 	if (r)
348 		return r;
349 
350 	r = reservation_object_reserve_shared(bo->tbo.resv);
351 	if (r)
352 		return r;
353 
354 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
355 	if (r)
356 		goto error_unreserve;
357 
358 	addr = amdgpu_bo_gpu_offset(bo);
359 	entries = amdgpu_bo_size(bo) / 8;
360 
361 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
362 	if (!ib)
363 		goto error_unreserve;
364 
365 	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
366 	if (r)
367 		goto error_free;
368 
369 	ib->length_dw = 0;
370 
371 	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
372 	amdgpu_vm_pad_ib(adev, ib);
373 	WARN_ON(ib->length_dw > 64);
374 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
375 						 &amdgpu_vm_free_job,
376 						 AMDGPU_FENCE_OWNER_VM,
377 						 &fence);
378 	if (!r)
379 		amdgpu_bo_fence(bo, fence, true);
380 	fence_put(fence);
381 	if (amdgpu_enable_scheduler) {
382 		amdgpu_bo_unreserve(bo);
383 		return 0;
384 	}
385 error_free:
386 	amdgpu_ib_free(adev, ib);
387 	kfree(ib);
388 
389 error_unreserve:
390 	amdgpu_bo_unreserve(bo);
391 	return r;
392 }
393 
394 /**
395  * amdgpu_vm_map_gart - get the physical address of a gart page
396  *
397  * @adev: amdgpu_device pointer
398  * @addr: the unmapped addr
399  *
400  * Look up the physical address of the page that the pte resolves
401  * to (cayman+).
402  * Returns the physical address of the page.
403  */
404 uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
405 {
406 	uint64_t result;
407 
408 	/* page table offset */
409 	result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
410 
411 	/* in case cpu page size != gpu page size*/
412 	result |= addr & (~PAGE_MASK);
413 
414 	return result;
415 }
416 
417 /**
418  * amdgpu_vm_update_pdes - make sure that page directory is valid
419  *
420  * @adev: amdgpu_device pointer
421  * @vm: requested vm
422  * @start: start of GPU address range
423  * @end: end of GPU address range
424  *
425  * Allocates new page tables if necessary
426  * and updates the page directory (cayman+).
427  * Returns 0 for success, error for failure.
428  *
429  * Global and local mutex must be locked!
430  */
431 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
432 				    struct amdgpu_vm *vm)
433 {
434 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
435 	struct amdgpu_bo *pd = vm->page_directory;
436 	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
437 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
438 	uint64_t last_pde = ~0, last_pt = ~0;
439 	unsigned count = 0, pt_idx, ndw;
440 	struct amdgpu_ib *ib;
441 	struct fence *fence = NULL;
442 
443 	int r;
444 
445 	/* padding, etc. */
446 	ndw = 64;
447 
448 	/* assume the worst case */
449 	ndw += vm->max_pde_used * 6;
450 
451 	/* update too big for an IB */
452 	if (ndw > 0xfffff)
453 		return -ENOMEM;
454 
455 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
456 	if (!ib)
457 		return -ENOMEM;
458 
459 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
460 	if (r) {
461 		kfree(ib);
462 		return r;
463 	}
464 	ib->length_dw = 0;
465 
466 	/* walk over the address space and update the page directory */
467 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
468 		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
469 		uint64_t pde, pt;
470 
471 		if (bo == NULL)
472 			continue;
473 
474 		pt = amdgpu_bo_gpu_offset(bo);
475 		if (vm->page_tables[pt_idx].addr == pt)
476 			continue;
477 		vm->page_tables[pt_idx].addr = pt;
478 
479 		pde = pd_addr + pt_idx * 8;
480 		if (((last_pde + 8 * count) != pde) ||
481 		    ((last_pt + incr * count) != pt)) {
482 
483 			if (count) {
484 				amdgpu_vm_update_pages(adev, ib, last_pde,
485 						       last_pt, count, incr,
486 						       AMDGPU_PTE_VALID, 0);
487 			}
488 
489 			count = 1;
490 			last_pde = pde;
491 			last_pt = pt;
492 		} else {
493 			++count;
494 		}
495 	}
496 
497 	if (count)
498 		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
499 				       incr, AMDGPU_PTE_VALID, 0);
500 
501 	if (ib->length_dw != 0) {
502 		amdgpu_vm_pad_ib(adev, ib);
503 		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
504 		WARN_ON(ib->length_dw > ndw);
505 		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
506 							 &amdgpu_vm_free_job,
507 							 AMDGPU_FENCE_OWNER_VM,
508 							 &fence);
509 		if (r)
510 			goto error_free;
511 
512 		amdgpu_bo_fence(pd, fence, true);
513 		fence_put(vm->page_directory_fence);
514 		vm->page_directory_fence = fence_get(fence);
515 		fence_put(fence);
516 	}
517 
518 	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
519 		amdgpu_ib_free(adev, ib);
520 		kfree(ib);
521 	}
522 
523 	return 0;
524 
525 error_free:
526 	amdgpu_ib_free(adev, ib);
527 	kfree(ib);
528 	return r;
529 }
530 
531 /**
532  * amdgpu_vm_frag_ptes - add fragment information to PTEs
533  *
534  * @adev: amdgpu_device pointer
535  * @ib: IB for the update
536  * @pe_start: first PTE to handle
537  * @pe_end: last PTE to handle
538  * @addr: addr those PTEs should point to
539  * @flags: hw mapping flags
540  * @gtt_flags: GTT hw mapping flags
541  *
542  * Global and local mutex must be locked!
543  */
544 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
545 				struct amdgpu_ib *ib,
546 				uint64_t pe_start, uint64_t pe_end,
547 				uint64_t addr, uint32_t flags,
548 				uint32_t gtt_flags)
549 {
550 	/**
551 	 * The MC L1 TLB supports variable sized pages, based on a fragment
552 	 * field in the PTE. When this field is set to a non-zero value, page
553 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
554 	 * flags are considered valid for all PTEs within the fragment range
555 	 * and corresponding mappings are assumed to be physically contiguous.
556 	 *
557 	 * The L1 TLB can store a single PTE for the whole fragment,
558 	 * significantly increasing the space available for translation
559 	 * caching. This leads to large improvements in throughput when the
560 	 * TLB is under pressure.
561 	 *
562 	 * The L2 TLB distributes small and large fragments into two
563 	 * asymmetric partitions. The large fragment cache is significantly
564 	 * larger. Thus, we try to use large fragments wherever possible.
565 	 * Userspace can support this by aligning virtual base address and
566 	 * allocation size to the fragment size.
567 	 */
568 
569 	/* SI and newer are optimized for 64KB */
570 	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
571 	uint64_t frag_align = 0x80;
572 
573 	uint64_t frag_start = ALIGN(pe_start, frag_align);
574 	uint64_t frag_end = pe_end & ~(frag_align - 1);
575 
576 	unsigned count;
577 
578 	/* system pages are non continuously */
579 	if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
580 	    (frag_start >= frag_end)) {
581 
582 		count = (pe_end - pe_start) / 8;
583 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
584 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
585 		return;
586 	}
587 
588 	/* handle the 4K area at the beginning */
589 	if (pe_start != frag_start) {
590 		count = (frag_start - pe_start) / 8;
591 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
592 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
593 		addr += AMDGPU_GPU_PAGE_SIZE * count;
594 	}
595 
596 	/* handle the area in the middle */
597 	count = (frag_end - frag_start) / 8;
598 	amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
599 			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
600 			       gtt_flags);
601 
602 	/* handle the 4K area at the end */
603 	if (frag_end != pe_end) {
604 		addr += AMDGPU_GPU_PAGE_SIZE * count;
605 		count = (pe_end - frag_end) / 8;
606 		amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
607 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
608 	}
609 }
610 
611 /**
612  * amdgpu_vm_update_ptes - make sure that page tables are valid
613  *
614  * @adev: amdgpu_device pointer
615  * @vm: requested vm
616  * @start: start of GPU address range
617  * @end: end of GPU address range
618  * @dst: destination address to map to
619  * @flags: mapping flags
620  *
621  * Update the page tables in the range @start - @end (cayman+).
622  *
623  * Global and local mutex must be locked!
624  */
625 static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
626 				 struct amdgpu_vm *vm,
627 				 struct amdgpu_ib *ib,
628 				 uint64_t start, uint64_t end,
629 				 uint64_t dst, uint32_t flags,
630 				 uint32_t gtt_flags)
631 {
632 	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
633 	uint64_t last_pte = ~0, last_dst = ~0;
634 	void *owner = AMDGPU_FENCE_OWNER_VM;
635 	unsigned count = 0;
636 	uint64_t addr;
637 
638 	/* sync to everything on unmapping */
639 	if (!(flags & AMDGPU_PTE_VALID))
640 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
641 
642 	/* walk over the address space and update the page tables */
643 	for (addr = start; addr < end; ) {
644 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
645 		struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
646 		unsigned nptes;
647 		uint64_t pte;
648 		int r;
649 
650 		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
651 		r = reservation_object_reserve_shared(pt->tbo.resv);
652 		if (r)
653 			return r;
654 
655 		if ((addr & ~mask) == (end & ~mask))
656 			nptes = end - addr;
657 		else
658 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
659 
660 		pte = amdgpu_bo_gpu_offset(pt);
661 		pte += (addr & mask) * 8;
662 
663 		if ((last_pte + 8 * count) != pte) {
664 
665 			if (count) {
666 				amdgpu_vm_frag_ptes(adev, ib, last_pte,
667 						    last_pte + 8 * count,
668 						    last_dst, flags,
669 						    gtt_flags);
670 			}
671 
672 			count = nptes;
673 			last_pte = pte;
674 			last_dst = dst;
675 		} else {
676 			count += nptes;
677 		}
678 
679 		addr += nptes;
680 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
681 	}
682 
683 	if (count) {
684 		amdgpu_vm_frag_ptes(adev, ib, last_pte,
685 				    last_pte + 8 * count,
686 				    last_dst, flags, gtt_flags);
687 	}
688 
689 	return 0;
690 }
691 
692 /**
693  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
694  *
695  * @adev: amdgpu_device pointer
696  * @vm: requested vm
697  * @mapping: mapped range and flags to use for the update
698  * @addr: addr to set the area to
699  * @gtt_flags: flags as they are used for GTT
700  * @fence: optional resulting fence
701  *
702  * Fill in the page table entries for @mapping.
703  * Returns 0 for success, -EINVAL for failure.
704  *
705  * Object have to be reserved and mutex must be locked!
706  */
707 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
708 				       struct amdgpu_vm *vm,
709 				       struct amdgpu_bo_va_mapping *mapping,
710 				       uint64_t addr, uint32_t gtt_flags,
711 				       struct fence **fence)
712 {
713 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
714 	unsigned nptes, ncmds, ndw;
715 	uint32_t flags = gtt_flags;
716 	struct amdgpu_ib *ib;
717 	struct fence *f = NULL;
718 	int r;
719 
720 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
721 	 * but in case of something, we filter the flags in first place
722 	 */
723 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
724 		flags &= ~AMDGPU_PTE_READABLE;
725 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
726 		flags &= ~AMDGPU_PTE_WRITEABLE;
727 
728 	trace_amdgpu_vm_bo_update(mapping);
729 
730 	nptes = mapping->it.last - mapping->it.start + 1;
731 
732 	/*
733 	 * reserve space for one command every (1 << BLOCK_SIZE)
734 	 *  entries or 2k dwords (whatever is smaller)
735 	 */
736 	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
737 
738 	/* padding, etc. */
739 	ndw = 64;
740 
741 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
742 		/* only copy commands needed */
743 		ndw += ncmds * 7;
744 
745 	} else if (flags & AMDGPU_PTE_SYSTEM) {
746 		/* header for write data commands */
747 		ndw += ncmds * 4;
748 
749 		/* body of write data command */
750 		ndw += nptes * 2;
751 
752 	} else {
753 		/* set page commands needed */
754 		ndw += ncmds * 10;
755 
756 		/* two extra commands for begin/end of fragment */
757 		ndw += 2 * 10;
758 	}
759 
760 	/* update too big for an IB */
761 	if (ndw > 0xfffff)
762 		return -ENOMEM;
763 
764 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
765 	if (!ib)
766 		return -ENOMEM;
767 
768 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
769 	if (r) {
770 		kfree(ib);
771 		return r;
772 	}
773 
774 	ib->length_dw = 0;
775 
776 	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
777 				  mapping->it.last + 1, addr + mapping->offset,
778 				  flags, gtt_flags);
779 
780 	if (r) {
781 		amdgpu_ib_free(adev, ib);
782 		kfree(ib);
783 		return r;
784 	}
785 
786 	amdgpu_vm_pad_ib(adev, ib);
787 	WARN_ON(ib->length_dw > ndw);
788 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
789 						 &amdgpu_vm_free_job,
790 						 AMDGPU_FENCE_OWNER_VM,
791 						 &f);
792 	if (r)
793 		goto error_free;
794 
795 	amdgpu_bo_fence(vm->page_directory, f, true);
796 	if (fence) {
797 		fence_put(*fence);
798 		*fence = fence_get(f);
799 	}
800 	fence_put(f);
801 	if (!amdgpu_enable_scheduler) {
802 		amdgpu_ib_free(adev, ib);
803 		kfree(ib);
804 	}
805 	return 0;
806 
807 error_free:
808 	amdgpu_ib_free(adev, ib);
809 	kfree(ib);
810 	return r;
811 }
812 
813 /**
814  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
815  *
816  * @adev: amdgpu_device pointer
817  * @bo_va: requested BO and VM object
818  * @mem: ttm mem
819  *
820  * Fill in the page table entries for @bo_va.
821  * Returns 0 for success, -EINVAL for failure.
822  *
823  * Object have to be reserved and mutex must be locked!
824  */
825 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
826 			struct amdgpu_bo_va *bo_va,
827 			struct ttm_mem_reg *mem)
828 {
829 	struct amdgpu_vm *vm = bo_va->vm;
830 	struct amdgpu_bo_va_mapping *mapping;
831 	uint32_t flags;
832 	uint64_t addr;
833 	int r;
834 
835 	if (mem) {
836 		addr = (u64)mem->start << PAGE_SHIFT;
837 		if (mem->mem_type != TTM_PL_TT)
838 			addr += adev->vm_manager.vram_base_offset;
839 	} else {
840 		addr = 0;
841 	}
842 
843 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
844 
845 	spin_lock(&vm->status_lock);
846 	if (!list_empty(&bo_va->vm_status))
847 		list_splice_init(&bo_va->valids, &bo_va->invalids);
848 	spin_unlock(&vm->status_lock);
849 
850 	list_for_each_entry(mapping, &bo_va->invalids, list) {
851 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
852 						flags, &bo_va->last_pt_update);
853 		if (r)
854 			return r;
855 	}
856 
857 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
858 		list_for_each_entry(mapping, &bo_va->valids, list)
859 			trace_amdgpu_vm_bo_mapping(mapping);
860 
861 		list_for_each_entry(mapping, &bo_va->invalids, list)
862 			trace_amdgpu_vm_bo_mapping(mapping);
863 	}
864 
865 	spin_lock(&vm->status_lock);
866 	list_splice_init(&bo_va->invalids, &bo_va->valids);
867 	list_del_init(&bo_va->vm_status);
868 	if (!mem)
869 		list_add(&bo_va->vm_status, &vm->cleared);
870 	spin_unlock(&vm->status_lock);
871 
872 	return 0;
873 }
874 
875 /**
876  * amdgpu_vm_clear_freed - clear freed BOs in the PT
877  *
878  * @adev: amdgpu_device pointer
879  * @vm: requested vm
880  *
881  * Make sure all freed BOs are cleared in the PT.
882  * Returns 0 for success.
883  *
884  * PTs have to be reserved and mutex must be locked!
885  */
886 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
887 			  struct amdgpu_vm *vm)
888 {
889 	struct amdgpu_bo_va_mapping *mapping;
890 	int r;
891 
892 	while (!list_empty(&vm->freed)) {
893 		mapping = list_first_entry(&vm->freed,
894 			struct amdgpu_bo_va_mapping, list);
895 		list_del(&mapping->list);
896 
897 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
898 		kfree(mapping);
899 		if (r)
900 			return r;
901 
902 	}
903 	return 0;
904 
905 }
906 
907 /**
908  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
909  *
910  * @adev: amdgpu_device pointer
911  * @vm: requested vm
912  *
913  * Make sure all invalidated BOs are cleared in the PT.
914  * Returns 0 for success.
915  *
916  * PTs have to be reserved and mutex must be locked!
917  */
918 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
919 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
920 {
921 	struct amdgpu_bo_va *bo_va = NULL;
922 	int r = 0;
923 
924 	spin_lock(&vm->status_lock);
925 	while (!list_empty(&vm->invalidated)) {
926 		bo_va = list_first_entry(&vm->invalidated,
927 			struct amdgpu_bo_va, vm_status);
928 		spin_unlock(&vm->status_lock);
929 
930 		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
931 		if (r)
932 			return r;
933 
934 		spin_lock(&vm->status_lock);
935 	}
936 	spin_unlock(&vm->status_lock);
937 
938 	if (bo_va)
939 		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
940 
941 	return r;
942 }
943 
944 /**
945  * amdgpu_vm_bo_add - add a bo to a specific vm
946  *
947  * @adev: amdgpu_device pointer
948  * @vm: requested vm
949  * @bo: amdgpu buffer object
950  *
951  * Add @bo into the requested vm (cayman+).
952  * Add @bo to the list of bos associated with the vm
953  * Returns newly added bo_va or NULL for failure
954  *
955  * Object has to be reserved!
956  */
957 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
958 				      struct amdgpu_vm *vm,
959 				      struct amdgpu_bo *bo)
960 {
961 	struct amdgpu_bo_va *bo_va;
962 
963 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
964 	if (bo_va == NULL) {
965 		return NULL;
966 	}
967 	bo_va->vm = vm;
968 	bo_va->bo = bo;
969 	bo_va->ref_count = 1;
970 	INIT_LIST_HEAD(&bo_va->bo_list);
971 	INIT_LIST_HEAD(&bo_va->valids);
972 	INIT_LIST_HEAD(&bo_va->invalids);
973 	INIT_LIST_HEAD(&bo_va->vm_status);
974 
975 	list_add_tail(&bo_va->bo_list, &bo->va);
976 
977 	return bo_va;
978 }
979 
980 /**
981  * amdgpu_vm_bo_map - map bo inside a vm
982  *
983  * @adev: amdgpu_device pointer
984  * @bo_va: bo_va to store the address
985  * @saddr: where to map the BO
986  * @offset: requested offset in the BO
987  * @flags: attributes of pages (read/write/valid/etc.)
988  *
989  * Add a mapping of the BO at the specefied addr into the VM.
990  * Returns 0 for success, error for failure.
991  *
992  * Object has to be reserved and gets unreserved by this function!
993  */
994 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
995 		     struct amdgpu_bo_va *bo_va,
996 		     uint64_t saddr, uint64_t offset,
997 		     uint64_t size, uint32_t flags)
998 {
999 	struct amdgpu_bo_va_mapping *mapping;
1000 	struct amdgpu_vm *vm = bo_va->vm;
1001 	struct interval_tree_node *it;
1002 	unsigned last_pfn, pt_idx;
1003 	uint64_t eaddr;
1004 	int r;
1005 
1006 	/* validate the parameters */
1007 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1008 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
1009 		amdgpu_bo_unreserve(bo_va->bo);
1010 		return -EINVAL;
1011 	}
1012 
1013 	/* make sure object fit at this offset */
1014 	eaddr = saddr + size;
1015 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
1016 		amdgpu_bo_unreserve(bo_va->bo);
1017 		return -EINVAL;
1018 	}
1019 
1020 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1021 	if (last_pfn > adev->vm_manager.max_pfn) {
1022 		dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1023 			last_pfn, adev->vm_manager.max_pfn);
1024 		amdgpu_bo_unreserve(bo_va->bo);
1025 		return -EINVAL;
1026 	}
1027 
1028 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1029 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1030 
1031 	it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1032 	if (it) {
1033 		struct amdgpu_bo_va_mapping *tmp;
1034 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1035 		/* bo and tmp overlap, invalid addr */
1036 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1037 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1038 			tmp->it.start, tmp->it.last + 1);
1039 		amdgpu_bo_unreserve(bo_va->bo);
1040 		r = -EINVAL;
1041 		goto error;
1042 	}
1043 
1044 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1045 	if (!mapping) {
1046 		amdgpu_bo_unreserve(bo_va->bo);
1047 		r = -ENOMEM;
1048 		goto error;
1049 	}
1050 
1051 	INIT_LIST_HEAD(&mapping->list);
1052 	mapping->it.start = saddr;
1053 	mapping->it.last = eaddr - 1;
1054 	mapping->offset = offset;
1055 	mapping->flags = flags;
1056 
1057 	list_add(&mapping->list, &bo_va->invalids);
1058 	interval_tree_insert(&mapping->it, &vm->va);
1059 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1060 
1061 	/* Make sure the page tables are allocated */
1062 	saddr >>= amdgpu_vm_block_size;
1063 	eaddr >>= amdgpu_vm_block_size;
1064 
1065 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1066 
1067 	if (eaddr > vm->max_pde_used)
1068 		vm->max_pde_used = eaddr;
1069 
1070 	amdgpu_bo_unreserve(bo_va->bo);
1071 
1072 	/* walk over the address space and allocate the page tables */
1073 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1074 		struct reservation_object *resv = vm->page_directory->tbo.resv;
1075 		struct amdgpu_bo *pt;
1076 
1077 		if (vm->page_tables[pt_idx].bo)
1078 			continue;
1079 
1080 		ww_mutex_lock(&resv->lock, NULL);
1081 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1082 				     AMDGPU_GPU_PAGE_SIZE, true,
1083 				     AMDGPU_GEM_DOMAIN_VRAM,
1084 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1085 				     NULL, resv, &pt);
1086 		ww_mutex_unlock(&resv->lock);
1087 		if (r)
1088 			goto error_free;
1089 
1090 		r = amdgpu_vm_clear_bo(adev, pt);
1091 		if (r) {
1092 			amdgpu_bo_unref(&pt);
1093 			goto error_free;
1094 		}
1095 
1096 		vm->page_tables[pt_idx].addr = 0;
1097 		vm->page_tables[pt_idx].bo = pt;
1098 	}
1099 
1100 	return 0;
1101 
1102 error_free:
1103 	list_del(&mapping->list);
1104 	interval_tree_remove(&mapping->it, &vm->va);
1105 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1106 	kfree(mapping);
1107 
1108 error:
1109 	return r;
1110 }
1111 
1112 /**
1113  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1114  *
1115  * @adev: amdgpu_device pointer
1116  * @bo_va: bo_va to remove the address from
1117  * @saddr: where to the BO is mapped
1118  *
1119  * Remove a mapping of the BO at the specefied addr from the VM.
1120  * Returns 0 for success, error for failure.
1121  *
1122  * Object has to be reserved and gets unreserved by this function!
1123  */
1124 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1125 		       struct amdgpu_bo_va *bo_va,
1126 		       uint64_t saddr)
1127 {
1128 	struct amdgpu_bo_va_mapping *mapping;
1129 	struct amdgpu_vm *vm = bo_va->vm;
1130 	bool valid = true;
1131 
1132 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1133 
1134 	list_for_each_entry(mapping, &bo_va->valids, list) {
1135 		if (mapping->it.start == saddr)
1136 			break;
1137 	}
1138 
1139 	if (&mapping->list == &bo_va->valids) {
1140 		valid = false;
1141 
1142 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1143 			if (mapping->it.start == saddr)
1144 				break;
1145 		}
1146 
1147 		if (&mapping->list == &bo_va->invalids) {
1148 			amdgpu_bo_unreserve(bo_va->bo);
1149 			return -ENOENT;
1150 		}
1151 	}
1152 
1153 	list_del(&mapping->list);
1154 	interval_tree_remove(&mapping->it, &vm->va);
1155 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1156 
1157 	if (valid)
1158 		list_add(&mapping->list, &vm->freed);
1159 	else
1160 		kfree(mapping);
1161 	amdgpu_bo_unreserve(bo_va->bo);
1162 
1163 	return 0;
1164 }
1165 
1166 /**
1167  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1168  *
1169  * @adev: amdgpu_device pointer
1170  * @bo_va: requested bo_va
1171  *
1172  * Remove @bo_va->bo from the requested vm (cayman+).
1173  *
1174  * Object have to be reserved!
1175  */
1176 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1177 		      struct amdgpu_bo_va *bo_va)
1178 {
1179 	struct amdgpu_bo_va_mapping *mapping, *next;
1180 	struct amdgpu_vm *vm = bo_va->vm;
1181 
1182 	list_del(&bo_va->bo_list);
1183 
1184 	spin_lock(&vm->status_lock);
1185 	list_del(&bo_va->vm_status);
1186 	spin_unlock(&vm->status_lock);
1187 
1188 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1189 		list_del(&mapping->list);
1190 		interval_tree_remove(&mapping->it, &vm->va);
1191 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1192 		list_add(&mapping->list, &vm->freed);
1193 	}
1194 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1195 		list_del(&mapping->list);
1196 		interval_tree_remove(&mapping->it, &vm->va);
1197 		kfree(mapping);
1198 	}
1199 
1200 	fence_put(bo_va->last_pt_update);
1201 	kfree(bo_va);
1202 }
1203 
1204 /**
1205  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1206  *
1207  * @adev: amdgpu_device pointer
1208  * @vm: requested vm
1209  * @bo: amdgpu buffer object
1210  *
1211  * Mark @bo as invalid (cayman+).
1212  */
1213 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1214 			     struct amdgpu_bo *bo)
1215 {
1216 	struct amdgpu_bo_va *bo_va;
1217 
1218 	list_for_each_entry(bo_va, &bo->va, bo_list) {
1219 		spin_lock(&bo_va->vm->status_lock);
1220 		if (list_empty(&bo_va->vm_status))
1221 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1222 		spin_unlock(&bo_va->vm->status_lock);
1223 	}
1224 }
1225 
1226 /**
1227  * amdgpu_vm_init - initialize a vm instance
1228  *
1229  * @adev: amdgpu_device pointer
1230  * @vm: requested vm
1231  *
1232  * Init @vm fields (cayman+).
1233  */
1234 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1235 {
1236 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1237 		AMDGPU_VM_PTE_COUNT * 8);
1238 	unsigned pd_size, pd_entries, pts_size;
1239 	int i, r;
1240 
1241 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1242 		vm->ids[i].id = 0;
1243 		vm->ids[i].flushed_updates = NULL;
1244 		vm->ids[i].last_id_use = NULL;
1245 	}
1246 	mutex_init(&vm->mutex);
1247 	vm->va = RB_ROOT;
1248 	spin_lock_init(&vm->status_lock);
1249 	INIT_LIST_HEAD(&vm->invalidated);
1250 	INIT_LIST_HEAD(&vm->cleared);
1251 	INIT_LIST_HEAD(&vm->freed);
1252 
1253 	pd_size = amdgpu_vm_directory_size(adev);
1254 	pd_entries = amdgpu_vm_num_pdes(adev);
1255 
1256 	/* allocate page table array */
1257 	pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
1258 	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1259 	if (vm->page_tables == NULL) {
1260 		DRM_ERROR("Cannot allocate memory for page table array\n");
1261 		return -ENOMEM;
1262 	}
1263 
1264 	vm->page_directory_fence = NULL;
1265 
1266 	r = amdgpu_bo_create(adev, pd_size, align, true,
1267 			     AMDGPU_GEM_DOMAIN_VRAM,
1268 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1269 			     NULL, NULL, &vm->page_directory);
1270 	if (r)
1271 		return r;
1272 
1273 	r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1274 	if (r) {
1275 		amdgpu_bo_unref(&vm->page_directory);
1276 		vm->page_directory = NULL;
1277 		return r;
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 /**
1284  * amdgpu_vm_fini - tear down a vm instance
1285  *
1286  * @adev: amdgpu_device pointer
1287  * @vm: requested vm
1288  *
1289  * Tear down @vm (cayman+).
1290  * Unbind the VM and remove all bos from the vm bo list
1291  */
1292 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1293 {
1294 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1295 	int i;
1296 
1297 	if (!RB_EMPTY_ROOT(&vm->va)) {
1298 		dev_err(adev->dev, "still active bo inside vm\n");
1299 	}
1300 	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1301 		list_del(&mapping->list);
1302 		interval_tree_remove(&mapping->it, &vm->va);
1303 		kfree(mapping);
1304 	}
1305 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1306 		list_del(&mapping->list);
1307 		kfree(mapping);
1308 	}
1309 
1310 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1311 		amdgpu_bo_unref(&vm->page_tables[i].bo);
1312 	kfree(vm->page_tables);
1313 
1314 	amdgpu_bo_unref(&vm->page_directory);
1315 	fence_put(vm->page_directory_fence);
1316 
1317 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1318 		fence_put(vm->ids[i].flushed_updates);
1319 		fence_put(vm->ids[i].last_id_use);
1320 	}
1321 
1322 	mutex_destroy(&vm->mutex);
1323 }
1324