1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/amdgpu_drm.h>
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32 
33 /*
34  * GPUVM
35  * GPUVM is similar to the legacy gart on older asics, however
36  * rather than there being a single global gart table
37  * for the entire GPU, there are multiple VM page tables active
38  * at any given time.  The VM page tables can contain a mix
39  * vram pages and system memory pages and system memory pages
40  * can be mapped as snooped (cached system pages) or unsnooped
41  * (uncached system pages).
42  * Each VM has an ID associated with it and there is a page table
43  * associated with each VMID.  When execting a command buffer,
44  * the kernel tells the the ring what VMID to use for that command
45  * buffer.  VMIDs are allocated dynamically as commands are submitted.
46  * The userspace drivers maintain their own address space and the kernel
47  * sets up their pages tables accordingly when they submit their
48  * command buffers and a VMID is assigned.
49  * Cayman/Trinity support up to 8 active VMs at any given time;
50  * SI supports 16.
51  */
52 
53 /**
54  * amdgpu_vm_num_pde - return the number of page directory entries
55  *
56  * @adev: amdgpu_device pointer
57  *
58  * Calculate the number of page directory entries (cayman+).
59  */
60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
61 {
62 	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
63 }
64 
65 /**
66  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
67  *
68  * @adev: amdgpu_device pointer
69  *
70  * Calculate the size of the page directory in bytes (cayman+).
71  */
72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
73 {
74 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
75 }
76 
77 /**
78  * amdgpu_vm_get_bos - add the vm BOs to a validation list
79  *
80  * @vm: vm providing the BOs
81  * @head: head of validation list
82  *
83  * Add the page directory to the list of BOs to
84  * validate for command submission (cayman+).
85  */
86 struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
87 					  struct amdgpu_vm *vm,
88 					  struct list_head *head)
89 {
90 	struct amdgpu_bo_list_entry *list;
91 	unsigned i, idx;
92 
93 	mutex_lock(&vm->mutex);
94 	list = drm_malloc_ab(vm->max_pde_used + 2,
95 			     sizeof(struct amdgpu_bo_list_entry));
96 	if (!list) {
97 		mutex_unlock(&vm->mutex);
98 		return NULL;
99 	}
100 
101 	/* add the vm page table to the list */
102 	list[0].robj = vm->page_directory;
103 	list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
104 	list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
105 	list[0].priority = 0;
106 	list[0].tv.bo = &vm->page_directory->tbo;
107 	list[0].tv.shared = true;
108 	list_add(&list[0].tv.head, head);
109 
110 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
111 		if (!vm->page_tables[i].bo)
112 			continue;
113 
114 		list[idx].robj = vm->page_tables[i].bo;
115 		list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
116 		list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
117 		list[idx].priority = 0;
118 		list[idx].tv.bo = &list[idx].robj->tbo;
119 		list[idx].tv.shared = true;
120 		list_add(&list[idx++].tv.head, head);
121 	}
122 	mutex_unlock(&vm->mutex);
123 
124 	return list;
125 }
126 
127 /**
128  * amdgpu_vm_grab_id - allocate the next free VMID
129  *
130  * @vm: vm to allocate id for
131  * @ring: ring we want to submit job to
132  * @sync: sync object where we add dependencies
133  *
134  * Allocate an id for the vm, adding fences to the sync obj as necessary.
135  *
136  * Global mutex must be locked!
137  */
138 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
139 		      struct amdgpu_sync *sync)
140 {
141 	struct amdgpu_fence *best[AMDGPU_MAX_RINGS] = {};
142 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
143 	struct amdgpu_device *adev = ring->adev;
144 
145 	unsigned choices[2] = {};
146 	unsigned i;
147 
148 	/* check if the id is still valid */
149 	if (vm_id->id && vm_id->last_id_use &&
150 	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
151 		return 0;
152 
153 	/* we definately need to flush */
154 	vm_id->pd_gpu_addr = ~0ll;
155 
156 	/* skip over VMID 0, since it is the system VM */
157 	for (i = 1; i < adev->vm_manager.nvm; ++i) {
158 		struct amdgpu_fence *fence = adev->vm_manager.active[i];
159 
160 		if (fence == NULL) {
161 			/* found a free one */
162 			vm_id->id = i;
163 			trace_amdgpu_vm_grab_id(i, ring->idx);
164 			return 0;
165 		}
166 
167 		if (amdgpu_fence_is_earlier(fence, best[fence->ring->idx])) {
168 			best[fence->ring->idx] = fence;
169 			choices[fence->ring == ring ? 0 : 1] = i;
170 		}
171 	}
172 
173 	for (i = 0; i < 2; ++i) {
174 		if (choices[i]) {
175 			struct amdgpu_fence *fence;
176 
177 			fence  = adev->vm_manager.active[choices[i]];
178 			vm_id->id = choices[i];
179 
180 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
181 			return amdgpu_sync_fence(ring->adev, sync, &fence->base);
182 		}
183 	}
184 
185 	/* should never happen */
186 	BUG();
187 	return -EINVAL;
188 }
189 
190 /**
191  * amdgpu_vm_flush - hardware flush the vm
192  *
193  * @ring: ring to use for flush
194  * @vm: vm we want to flush
195  * @updates: last vm update that we waited for
196  *
197  * Flush the vm (cayman+).
198  *
199  * Global and local mutex must be locked!
200  */
201 void amdgpu_vm_flush(struct amdgpu_ring *ring,
202 		     struct amdgpu_vm *vm,
203 		     struct fence *updates)
204 {
205 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
206 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
207 	struct fence *flushed_updates = vm_id->flushed_updates;
208 	bool is_earlier = false;
209 
210 	if (flushed_updates && updates) {
211 		BUG_ON(flushed_updates->context != updates->context);
212 		is_earlier = (updates->seqno - flushed_updates->seqno <=
213 			      INT_MAX) ? true : false;
214 	}
215 
216 	if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
217 	    is_earlier) {
218 
219 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
220 		if (is_earlier) {
221 			vm_id->flushed_updates = fence_get(updates);
222 			fence_put(flushed_updates);
223 		}
224 		if (!flushed_updates)
225 			vm_id->flushed_updates = fence_get(updates);
226 		vm_id->pd_gpu_addr = pd_addr;
227 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
228 	}
229 }
230 
231 /**
232  * amdgpu_vm_fence - remember fence for vm
233  *
234  * @adev: amdgpu_device pointer
235  * @vm: vm we want to fence
236  * @fence: fence to remember
237  *
238  * Fence the vm (cayman+).
239  * Set the fence used to protect page table and id.
240  *
241  * Global and local mutex must be locked!
242  */
243 void amdgpu_vm_fence(struct amdgpu_device *adev,
244 		     struct amdgpu_vm *vm,
245 		     struct amdgpu_fence *fence)
246 {
247 	unsigned ridx = fence->ring->idx;
248 	unsigned vm_id = vm->ids[ridx].id;
249 
250 	amdgpu_fence_unref(&adev->vm_manager.active[vm_id]);
251 	adev->vm_manager.active[vm_id] = amdgpu_fence_ref(fence);
252 
253 	amdgpu_fence_unref(&vm->ids[ridx].last_id_use);
254 	vm->ids[ridx].last_id_use = amdgpu_fence_ref(fence);
255 }
256 
257 /**
258  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
259  *
260  * @vm: requested vm
261  * @bo: requested buffer object
262  *
263  * Find @bo inside the requested vm (cayman+).
264  * Search inside the @bos vm list for the requested vm
265  * Returns the found bo_va or NULL if none is found
266  *
267  * Object has to be reserved!
268  */
269 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
270 				       struct amdgpu_bo *bo)
271 {
272 	struct amdgpu_bo_va *bo_va;
273 
274 	list_for_each_entry(bo_va, &bo->va, bo_list) {
275 		if (bo_va->vm == vm) {
276 			return bo_va;
277 		}
278 	}
279 	return NULL;
280 }
281 
282 /**
283  * amdgpu_vm_update_pages - helper to call the right asic function
284  *
285  * @adev: amdgpu_device pointer
286  * @ib: indirect buffer to fill with commands
287  * @pe: addr of the page entry
288  * @addr: dst addr to write into pe
289  * @count: number of page entries to update
290  * @incr: increase next addr by incr bytes
291  * @flags: hw access flags
292  * @gtt_flags: GTT hw access flags
293  *
294  * Traces the parameters and calls the right asic functions
295  * to setup the page table using the DMA.
296  */
297 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
298 				   struct amdgpu_ib *ib,
299 				   uint64_t pe, uint64_t addr,
300 				   unsigned count, uint32_t incr,
301 				   uint32_t flags, uint32_t gtt_flags)
302 {
303 	trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
304 
305 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
306 		uint64_t src = adev->gart.table_addr + (addr >> 12) * 8;
307 		amdgpu_vm_copy_pte(adev, ib, pe, src, count);
308 
309 	} else if ((flags & AMDGPU_PTE_SYSTEM) || (count < 3)) {
310 		amdgpu_vm_write_pte(adev, ib, pe, addr,
311 				      count, incr, flags);
312 
313 	} else {
314 		amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
315 				      count, incr, flags);
316 	}
317 }
318 
319 int amdgpu_vm_free_job(struct amdgpu_job *job)
320 {
321 	int i;
322 	for (i = 0; i < job->num_ibs; i++)
323 		amdgpu_ib_free(job->adev, &job->ibs[i]);
324 	kfree(job->ibs);
325 	return 0;
326 }
327 
328 /**
329  * amdgpu_vm_clear_bo - initially clear the page dir/table
330  *
331  * @adev: amdgpu_device pointer
332  * @bo: bo to clear
333  */
334 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
335 			      struct amdgpu_bo *bo)
336 {
337 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
338 	struct fence *fence = NULL;
339 	struct amdgpu_ib *ib;
340 	unsigned entries;
341 	uint64_t addr;
342 	int r;
343 
344 	r = amdgpu_bo_reserve(bo, false);
345 	if (r)
346 		return r;
347 
348 	r = reservation_object_reserve_shared(bo->tbo.resv);
349 	if (r)
350 		return r;
351 
352 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
353 	if (r)
354 		goto error_unreserve;
355 
356 	addr = amdgpu_bo_gpu_offset(bo);
357 	entries = amdgpu_bo_size(bo) / 8;
358 
359 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
360 	if (!ib)
361 		goto error_unreserve;
362 
363 	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
364 	if (r)
365 		goto error_free;
366 
367 	ib->length_dw = 0;
368 
369 	amdgpu_vm_update_pages(adev, ib, addr, 0, entries, 0, 0, 0);
370 	amdgpu_vm_pad_ib(adev, ib);
371 	WARN_ON(ib->length_dw > 64);
372 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
373 						 &amdgpu_vm_free_job,
374 						 AMDGPU_FENCE_OWNER_VM,
375 						 &fence);
376 	if (!r)
377 		amdgpu_bo_fence(bo, fence, true);
378 	fence_put(fence);
379 	if (amdgpu_enable_scheduler) {
380 		amdgpu_bo_unreserve(bo);
381 		return 0;
382 	}
383 error_free:
384 	amdgpu_ib_free(adev, ib);
385 	kfree(ib);
386 
387 error_unreserve:
388 	amdgpu_bo_unreserve(bo);
389 	return r;
390 }
391 
392 /**
393  * amdgpu_vm_map_gart - get the physical address of a gart page
394  *
395  * @adev: amdgpu_device pointer
396  * @addr: the unmapped addr
397  *
398  * Look up the physical address of the page that the pte resolves
399  * to (cayman+).
400  * Returns the physical address of the page.
401  */
402 uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr)
403 {
404 	uint64_t result;
405 
406 	/* page table offset */
407 	result = adev->gart.pages_addr[addr >> PAGE_SHIFT];
408 
409 	/* in case cpu page size != gpu page size*/
410 	result |= addr & (~PAGE_MASK);
411 
412 	return result;
413 }
414 
415 /**
416  * amdgpu_vm_update_pdes - make sure that page directory is valid
417  *
418  * @adev: amdgpu_device pointer
419  * @vm: requested vm
420  * @start: start of GPU address range
421  * @end: end of GPU address range
422  *
423  * Allocates new page tables if necessary
424  * and updates the page directory (cayman+).
425  * Returns 0 for success, error for failure.
426  *
427  * Global and local mutex must be locked!
428  */
429 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
430 				    struct amdgpu_vm *vm)
431 {
432 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
433 	struct amdgpu_bo *pd = vm->page_directory;
434 	uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
435 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
436 	uint64_t last_pde = ~0, last_pt = ~0;
437 	unsigned count = 0, pt_idx, ndw;
438 	struct amdgpu_ib *ib;
439 	struct fence *fence = NULL;
440 
441 	int r;
442 
443 	/* padding, etc. */
444 	ndw = 64;
445 
446 	/* assume the worst case */
447 	ndw += vm->max_pde_used * 6;
448 
449 	/* update too big for an IB */
450 	if (ndw > 0xfffff)
451 		return -ENOMEM;
452 
453 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
454 	if (!ib)
455 		return -ENOMEM;
456 
457 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
458 	if (r) {
459 		kfree(ib);
460 		return r;
461 	}
462 	ib->length_dw = 0;
463 
464 	/* walk over the address space and update the page directory */
465 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
466 		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
467 		uint64_t pde, pt;
468 
469 		if (bo == NULL)
470 			continue;
471 
472 		pt = amdgpu_bo_gpu_offset(bo);
473 		if (vm->page_tables[pt_idx].addr == pt)
474 			continue;
475 		vm->page_tables[pt_idx].addr = pt;
476 
477 		pde = pd_addr + pt_idx * 8;
478 		if (((last_pde + 8 * count) != pde) ||
479 		    ((last_pt + incr * count) != pt)) {
480 
481 			if (count) {
482 				amdgpu_vm_update_pages(adev, ib, last_pde,
483 						       last_pt, count, incr,
484 						       AMDGPU_PTE_VALID, 0);
485 			}
486 
487 			count = 1;
488 			last_pde = pde;
489 			last_pt = pt;
490 		} else {
491 			++count;
492 		}
493 	}
494 
495 	if (count)
496 		amdgpu_vm_update_pages(adev, ib, last_pde, last_pt, count,
497 				       incr, AMDGPU_PTE_VALID, 0);
498 
499 	if (ib->length_dw != 0) {
500 		amdgpu_vm_pad_ib(adev, ib);
501 		amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM);
502 		WARN_ON(ib->length_dw > ndw);
503 		r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
504 							 &amdgpu_vm_free_job,
505 							 AMDGPU_FENCE_OWNER_VM,
506 							 &fence);
507 		if (r)
508 			goto error_free;
509 
510 		amdgpu_bo_fence(pd, fence, true);
511 		fence_put(vm->page_directory_fence);
512 		vm->page_directory_fence = fence_get(fence);
513 		fence_put(fence);
514 	}
515 
516 	if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
517 		amdgpu_ib_free(adev, ib);
518 		kfree(ib);
519 	}
520 
521 	return 0;
522 
523 error_free:
524 	amdgpu_ib_free(adev, ib);
525 	kfree(ib);
526 	return r;
527 }
528 
529 /**
530  * amdgpu_vm_frag_ptes - add fragment information to PTEs
531  *
532  * @adev: amdgpu_device pointer
533  * @ib: IB for the update
534  * @pe_start: first PTE to handle
535  * @pe_end: last PTE to handle
536  * @addr: addr those PTEs should point to
537  * @flags: hw mapping flags
538  * @gtt_flags: GTT hw mapping flags
539  *
540  * Global and local mutex must be locked!
541  */
542 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
543 				struct amdgpu_ib *ib,
544 				uint64_t pe_start, uint64_t pe_end,
545 				uint64_t addr, uint32_t flags,
546 				uint32_t gtt_flags)
547 {
548 	/**
549 	 * The MC L1 TLB supports variable sized pages, based on a fragment
550 	 * field in the PTE. When this field is set to a non-zero value, page
551 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
552 	 * flags are considered valid for all PTEs within the fragment range
553 	 * and corresponding mappings are assumed to be physically contiguous.
554 	 *
555 	 * The L1 TLB can store a single PTE for the whole fragment,
556 	 * significantly increasing the space available for translation
557 	 * caching. This leads to large improvements in throughput when the
558 	 * TLB is under pressure.
559 	 *
560 	 * The L2 TLB distributes small and large fragments into two
561 	 * asymmetric partitions. The large fragment cache is significantly
562 	 * larger. Thus, we try to use large fragments wherever possible.
563 	 * Userspace can support this by aligning virtual base address and
564 	 * allocation size to the fragment size.
565 	 */
566 
567 	/* SI and newer are optimized for 64KB */
568 	uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
569 	uint64_t frag_align = 0x80;
570 
571 	uint64_t frag_start = ALIGN(pe_start, frag_align);
572 	uint64_t frag_end = pe_end & ~(frag_align - 1);
573 
574 	unsigned count;
575 
576 	/* system pages are non continuously */
577 	if ((flags & AMDGPU_PTE_SYSTEM) || !(flags & AMDGPU_PTE_VALID) ||
578 	    (frag_start >= frag_end)) {
579 
580 		count = (pe_end - pe_start) / 8;
581 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
582 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
583 		return;
584 	}
585 
586 	/* handle the 4K area at the beginning */
587 	if (pe_start != frag_start) {
588 		count = (frag_start - pe_start) / 8;
589 		amdgpu_vm_update_pages(adev, ib, pe_start, addr, count,
590 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
591 		addr += AMDGPU_GPU_PAGE_SIZE * count;
592 	}
593 
594 	/* handle the area in the middle */
595 	count = (frag_end - frag_start) / 8;
596 	amdgpu_vm_update_pages(adev, ib, frag_start, addr, count,
597 			       AMDGPU_GPU_PAGE_SIZE, flags | frag_flags,
598 			       gtt_flags);
599 
600 	/* handle the 4K area at the end */
601 	if (frag_end != pe_end) {
602 		addr += AMDGPU_GPU_PAGE_SIZE * count;
603 		count = (pe_end - frag_end) / 8;
604 		amdgpu_vm_update_pages(adev, ib, frag_end, addr, count,
605 				       AMDGPU_GPU_PAGE_SIZE, flags, gtt_flags);
606 	}
607 }
608 
609 /**
610  * amdgpu_vm_update_ptes - make sure that page tables are valid
611  *
612  * @adev: amdgpu_device pointer
613  * @vm: requested vm
614  * @start: start of GPU address range
615  * @end: end of GPU address range
616  * @dst: destination address to map to
617  * @flags: mapping flags
618  *
619  * Update the page tables in the range @start - @end (cayman+).
620  *
621  * Global and local mutex must be locked!
622  */
623 static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
624 				 struct amdgpu_vm *vm,
625 				 struct amdgpu_ib *ib,
626 				 uint64_t start, uint64_t end,
627 				 uint64_t dst, uint32_t flags,
628 				 uint32_t gtt_flags)
629 {
630 	uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
631 	uint64_t last_pte = ~0, last_dst = ~0;
632 	void *owner = AMDGPU_FENCE_OWNER_VM;
633 	unsigned count = 0;
634 	uint64_t addr;
635 
636 	/* sync to everything on unmapping */
637 	if (!(flags & AMDGPU_PTE_VALID))
638 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
639 
640 	/* walk over the address space and update the page tables */
641 	for (addr = start; addr < end; ) {
642 		uint64_t pt_idx = addr >> amdgpu_vm_block_size;
643 		struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo;
644 		unsigned nptes;
645 		uint64_t pte;
646 		int r;
647 
648 		amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner);
649 		r = reservation_object_reserve_shared(pt->tbo.resv);
650 		if (r)
651 			return r;
652 
653 		if ((addr & ~mask) == (end & ~mask))
654 			nptes = end - addr;
655 		else
656 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
657 
658 		pte = amdgpu_bo_gpu_offset(pt);
659 		pte += (addr & mask) * 8;
660 
661 		if ((last_pte + 8 * count) != pte) {
662 
663 			if (count) {
664 				amdgpu_vm_frag_ptes(adev, ib, last_pte,
665 						    last_pte + 8 * count,
666 						    last_dst, flags,
667 						    gtt_flags);
668 			}
669 
670 			count = nptes;
671 			last_pte = pte;
672 			last_dst = dst;
673 		} else {
674 			count += nptes;
675 		}
676 
677 		addr += nptes;
678 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
679 	}
680 
681 	if (count) {
682 		amdgpu_vm_frag_ptes(adev, ib, last_pte,
683 				    last_pte + 8 * count,
684 				    last_dst, flags, gtt_flags);
685 	}
686 
687 	return 0;
688 }
689 
690 /**
691  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
692  *
693  * @adev: amdgpu_device pointer
694  * @vm: requested vm
695  * @mapping: mapped range and flags to use for the update
696  * @addr: addr to set the area to
697  * @gtt_flags: flags as they are used for GTT
698  * @fence: optional resulting fence
699  *
700  * Fill in the page table entries for @mapping.
701  * Returns 0 for success, -EINVAL for failure.
702  *
703  * Object have to be reserved and mutex must be locked!
704  */
705 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
706 				       struct amdgpu_vm *vm,
707 				       struct amdgpu_bo_va_mapping *mapping,
708 				       uint64_t addr, uint32_t gtt_flags,
709 				       struct fence **fence)
710 {
711 	struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
712 	unsigned nptes, ncmds, ndw;
713 	uint32_t flags = gtt_flags;
714 	struct amdgpu_ib *ib;
715 	struct fence *f = NULL;
716 	int r;
717 
718 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
719 	 * but in case of something, we filter the flags in first place
720 	 */
721 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
722 		flags &= ~AMDGPU_PTE_READABLE;
723 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
724 		flags &= ~AMDGPU_PTE_WRITEABLE;
725 
726 	trace_amdgpu_vm_bo_update(mapping);
727 
728 	nptes = mapping->it.last - mapping->it.start + 1;
729 
730 	/*
731 	 * reserve space for one command every (1 << BLOCK_SIZE)
732 	 *  entries or 2k dwords (whatever is smaller)
733 	 */
734 	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
735 
736 	/* padding, etc. */
737 	ndw = 64;
738 
739 	if ((flags & AMDGPU_PTE_SYSTEM) && (flags == gtt_flags)) {
740 		/* only copy commands needed */
741 		ndw += ncmds * 7;
742 
743 	} else if (flags & AMDGPU_PTE_SYSTEM) {
744 		/* header for write data commands */
745 		ndw += ncmds * 4;
746 
747 		/* body of write data command */
748 		ndw += nptes * 2;
749 
750 	} else {
751 		/* set page commands needed */
752 		ndw += ncmds * 10;
753 
754 		/* two extra commands for begin/end of fragment */
755 		ndw += 2 * 10;
756 	}
757 
758 	/* update too big for an IB */
759 	if (ndw > 0xfffff)
760 		return -ENOMEM;
761 
762 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
763 	if (!ib)
764 		return -ENOMEM;
765 
766 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
767 	if (r) {
768 		kfree(ib);
769 		return r;
770 	}
771 
772 	ib->length_dw = 0;
773 
774 	r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start,
775 				  mapping->it.last + 1, addr + mapping->offset,
776 				  flags, gtt_flags);
777 
778 	if (r) {
779 		amdgpu_ib_free(adev, ib);
780 		kfree(ib);
781 		return r;
782 	}
783 
784 	amdgpu_vm_pad_ib(adev, ib);
785 	WARN_ON(ib->length_dw > ndw);
786 	r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
787 						 &amdgpu_vm_free_job,
788 						 AMDGPU_FENCE_OWNER_VM,
789 						 &f);
790 	if (r)
791 		goto error_free;
792 
793 	amdgpu_bo_fence(vm->page_directory, f, true);
794 	if (fence) {
795 		fence_put(*fence);
796 		*fence = fence_get(f);
797 	}
798 	fence_put(f);
799 	if (!amdgpu_enable_scheduler) {
800 		amdgpu_ib_free(adev, ib);
801 		kfree(ib);
802 	}
803 	return 0;
804 
805 error_free:
806 	amdgpu_ib_free(adev, ib);
807 	kfree(ib);
808 	return r;
809 }
810 
811 /**
812  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
813  *
814  * @adev: amdgpu_device pointer
815  * @bo_va: requested BO and VM object
816  * @mem: ttm mem
817  *
818  * Fill in the page table entries for @bo_va.
819  * Returns 0 for success, -EINVAL for failure.
820  *
821  * Object have to be reserved and mutex must be locked!
822  */
823 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
824 			struct amdgpu_bo_va *bo_va,
825 			struct ttm_mem_reg *mem)
826 {
827 	struct amdgpu_vm *vm = bo_va->vm;
828 	struct amdgpu_bo_va_mapping *mapping;
829 	uint32_t flags;
830 	uint64_t addr;
831 	int r;
832 
833 	if (mem) {
834 		addr = (u64)mem->start << PAGE_SHIFT;
835 		if (mem->mem_type != TTM_PL_TT)
836 			addr += adev->vm_manager.vram_base_offset;
837 	} else {
838 		addr = 0;
839 	}
840 
841 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
842 
843 	spin_lock(&vm->status_lock);
844 	if (!list_empty(&bo_va->vm_status))
845 		list_splice_init(&bo_va->valids, &bo_va->invalids);
846 	spin_unlock(&vm->status_lock);
847 
848 	list_for_each_entry(mapping, &bo_va->invalids, list) {
849 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr,
850 						flags, &bo_va->last_pt_update);
851 		if (r)
852 			return r;
853 	}
854 
855 	spin_lock(&vm->status_lock);
856 	list_splice_init(&bo_va->invalids, &bo_va->valids);
857 	list_del_init(&bo_va->vm_status);
858 	if (!mem)
859 		list_add(&bo_va->vm_status, &vm->cleared);
860 	spin_unlock(&vm->status_lock);
861 
862 	return 0;
863 }
864 
865 /**
866  * amdgpu_vm_clear_freed - clear freed BOs in the PT
867  *
868  * @adev: amdgpu_device pointer
869  * @vm: requested vm
870  *
871  * Make sure all freed BOs are cleared in the PT.
872  * Returns 0 for success.
873  *
874  * PTs have to be reserved and mutex must be locked!
875  */
876 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
877 			  struct amdgpu_vm *vm)
878 {
879 	struct amdgpu_bo_va_mapping *mapping;
880 	int r;
881 
882 	while (!list_empty(&vm->freed)) {
883 		mapping = list_first_entry(&vm->freed,
884 			struct amdgpu_bo_va_mapping, list);
885 		list_del(&mapping->list);
886 
887 		r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
888 		kfree(mapping);
889 		if (r)
890 			return r;
891 
892 	}
893 	return 0;
894 
895 }
896 
897 /**
898  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
899  *
900  * @adev: amdgpu_device pointer
901  * @vm: requested vm
902  *
903  * Make sure all invalidated BOs are cleared in the PT.
904  * Returns 0 for success.
905  *
906  * PTs have to be reserved and mutex must be locked!
907  */
908 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
909 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
910 {
911 	struct amdgpu_bo_va *bo_va = NULL;
912 	int r = 0;
913 
914 	spin_lock(&vm->status_lock);
915 	while (!list_empty(&vm->invalidated)) {
916 		bo_va = list_first_entry(&vm->invalidated,
917 			struct amdgpu_bo_va, vm_status);
918 		spin_unlock(&vm->status_lock);
919 
920 		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
921 		if (r)
922 			return r;
923 
924 		spin_lock(&vm->status_lock);
925 	}
926 	spin_unlock(&vm->status_lock);
927 
928 	if (bo_va)
929 		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
930 
931 	return r;
932 }
933 
934 /**
935  * amdgpu_vm_bo_add - add a bo to a specific vm
936  *
937  * @adev: amdgpu_device pointer
938  * @vm: requested vm
939  * @bo: amdgpu buffer object
940  *
941  * Add @bo into the requested vm (cayman+).
942  * Add @bo to the list of bos associated with the vm
943  * Returns newly added bo_va or NULL for failure
944  *
945  * Object has to be reserved!
946  */
947 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
948 				      struct amdgpu_vm *vm,
949 				      struct amdgpu_bo *bo)
950 {
951 	struct amdgpu_bo_va *bo_va;
952 
953 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
954 	if (bo_va == NULL) {
955 		return NULL;
956 	}
957 	bo_va->vm = vm;
958 	bo_va->bo = bo;
959 	bo_va->ref_count = 1;
960 	INIT_LIST_HEAD(&bo_va->bo_list);
961 	INIT_LIST_HEAD(&bo_va->valids);
962 	INIT_LIST_HEAD(&bo_va->invalids);
963 	INIT_LIST_HEAD(&bo_va->vm_status);
964 
965 	mutex_lock(&vm->mutex);
966 	list_add_tail(&bo_va->bo_list, &bo->va);
967 	mutex_unlock(&vm->mutex);
968 
969 	return bo_va;
970 }
971 
972 /**
973  * amdgpu_vm_bo_map - map bo inside a vm
974  *
975  * @adev: amdgpu_device pointer
976  * @bo_va: bo_va to store the address
977  * @saddr: where to map the BO
978  * @offset: requested offset in the BO
979  * @flags: attributes of pages (read/write/valid/etc.)
980  *
981  * Add a mapping of the BO at the specefied addr into the VM.
982  * Returns 0 for success, error for failure.
983  *
984  * Object has to be reserved and gets unreserved by this function!
985  */
986 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
987 		     struct amdgpu_bo_va *bo_va,
988 		     uint64_t saddr, uint64_t offset,
989 		     uint64_t size, uint32_t flags)
990 {
991 	struct amdgpu_bo_va_mapping *mapping;
992 	struct amdgpu_vm *vm = bo_va->vm;
993 	struct interval_tree_node *it;
994 	unsigned last_pfn, pt_idx;
995 	uint64_t eaddr;
996 	int r;
997 
998 	/* validate the parameters */
999 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1000 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
1001 		amdgpu_bo_unreserve(bo_va->bo);
1002 		return -EINVAL;
1003 	}
1004 
1005 	/* make sure object fit at this offset */
1006 	eaddr = saddr + size;
1007 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
1008 		amdgpu_bo_unreserve(bo_va->bo);
1009 		return -EINVAL;
1010 	}
1011 
1012 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1013 	if (last_pfn > adev->vm_manager.max_pfn) {
1014 		dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1015 			last_pfn, adev->vm_manager.max_pfn);
1016 		amdgpu_bo_unreserve(bo_va->bo);
1017 		return -EINVAL;
1018 	}
1019 
1020 	mutex_lock(&vm->mutex);
1021 
1022 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1023 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1024 
1025 	it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1026 	if (it) {
1027 		struct amdgpu_bo_va_mapping *tmp;
1028 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1029 		/* bo and tmp overlap, invalid addr */
1030 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1031 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1032 			tmp->it.start, tmp->it.last + 1);
1033 		amdgpu_bo_unreserve(bo_va->bo);
1034 		r = -EINVAL;
1035 		goto error_unlock;
1036 	}
1037 
1038 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1039 	if (!mapping) {
1040 		amdgpu_bo_unreserve(bo_va->bo);
1041 		r = -ENOMEM;
1042 		goto error_unlock;
1043 	}
1044 
1045 	INIT_LIST_HEAD(&mapping->list);
1046 	mapping->it.start = saddr;
1047 	mapping->it.last = eaddr - 1;
1048 	mapping->offset = offset;
1049 	mapping->flags = flags;
1050 
1051 	list_add(&mapping->list, &bo_va->invalids);
1052 	interval_tree_insert(&mapping->it, &vm->va);
1053 	trace_amdgpu_vm_bo_map(bo_va, mapping);
1054 
1055 	/* Make sure the page tables are allocated */
1056 	saddr >>= amdgpu_vm_block_size;
1057 	eaddr >>= amdgpu_vm_block_size;
1058 
1059 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1060 
1061 	if (eaddr > vm->max_pde_used)
1062 		vm->max_pde_used = eaddr;
1063 
1064 	amdgpu_bo_unreserve(bo_va->bo);
1065 
1066 	/* walk over the address space and allocate the page tables */
1067 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1068 		struct reservation_object *resv = vm->page_directory->tbo.resv;
1069 		struct amdgpu_bo *pt;
1070 
1071 		if (vm->page_tables[pt_idx].bo)
1072 			continue;
1073 
1074 		/* drop mutex to allocate and clear page table */
1075 		mutex_unlock(&vm->mutex);
1076 
1077 		ww_mutex_lock(&resv->lock, NULL);
1078 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1079 				     AMDGPU_GPU_PAGE_SIZE, true,
1080 				     AMDGPU_GEM_DOMAIN_VRAM,
1081 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1082 				     NULL, resv, &pt);
1083 		ww_mutex_unlock(&resv->lock);
1084 		if (r)
1085 			goto error_free;
1086 
1087 		r = amdgpu_vm_clear_bo(adev, pt);
1088 		if (r) {
1089 			amdgpu_bo_unref(&pt);
1090 			goto error_free;
1091 		}
1092 
1093 		/* aquire mutex again */
1094 		mutex_lock(&vm->mutex);
1095 		if (vm->page_tables[pt_idx].bo) {
1096 			/* someone else allocated the pt in the meantime */
1097 			mutex_unlock(&vm->mutex);
1098 			amdgpu_bo_unref(&pt);
1099 			mutex_lock(&vm->mutex);
1100 			continue;
1101 		}
1102 
1103 		vm->page_tables[pt_idx].addr = 0;
1104 		vm->page_tables[pt_idx].bo = pt;
1105 	}
1106 
1107 	mutex_unlock(&vm->mutex);
1108 	return 0;
1109 
1110 error_free:
1111 	mutex_lock(&vm->mutex);
1112 	list_del(&mapping->list);
1113 	interval_tree_remove(&mapping->it, &vm->va);
1114 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1115 	kfree(mapping);
1116 
1117 error_unlock:
1118 	mutex_unlock(&vm->mutex);
1119 	return r;
1120 }
1121 
1122 /**
1123  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1124  *
1125  * @adev: amdgpu_device pointer
1126  * @bo_va: bo_va to remove the address from
1127  * @saddr: where to the BO is mapped
1128  *
1129  * Remove a mapping of the BO at the specefied addr from the VM.
1130  * Returns 0 for success, error for failure.
1131  *
1132  * Object has to be reserved and gets unreserved by this function!
1133  */
1134 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1135 		       struct amdgpu_bo_va *bo_va,
1136 		       uint64_t saddr)
1137 {
1138 	struct amdgpu_bo_va_mapping *mapping;
1139 	struct amdgpu_vm *vm = bo_va->vm;
1140 	bool valid = true;
1141 
1142 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1143 
1144 	list_for_each_entry(mapping, &bo_va->valids, list) {
1145 		if (mapping->it.start == saddr)
1146 			break;
1147 	}
1148 
1149 	if (&mapping->list == &bo_va->valids) {
1150 		valid = false;
1151 
1152 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1153 			if (mapping->it.start == saddr)
1154 				break;
1155 		}
1156 
1157 		if (&mapping->list == &bo_va->invalids) {
1158 			amdgpu_bo_unreserve(bo_va->bo);
1159 			return -ENOENT;
1160 		}
1161 	}
1162 
1163 	mutex_lock(&vm->mutex);
1164 	list_del(&mapping->list);
1165 	interval_tree_remove(&mapping->it, &vm->va);
1166 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1167 
1168 	if (valid)
1169 		list_add(&mapping->list, &vm->freed);
1170 	else
1171 		kfree(mapping);
1172 	mutex_unlock(&vm->mutex);
1173 	amdgpu_bo_unreserve(bo_va->bo);
1174 
1175 	return 0;
1176 }
1177 
1178 /**
1179  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1180  *
1181  * @adev: amdgpu_device pointer
1182  * @bo_va: requested bo_va
1183  *
1184  * Remove @bo_va->bo from the requested vm (cayman+).
1185  *
1186  * Object have to be reserved!
1187  */
1188 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1189 		      struct amdgpu_bo_va *bo_va)
1190 {
1191 	struct amdgpu_bo_va_mapping *mapping, *next;
1192 	struct amdgpu_vm *vm = bo_va->vm;
1193 
1194 	list_del(&bo_va->bo_list);
1195 
1196 	mutex_lock(&vm->mutex);
1197 
1198 	spin_lock(&vm->status_lock);
1199 	list_del(&bo_va->vm_status);
1200 	spin_unlock(&vm->status_lock);
1201 
1202 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1203 		list_del(&mapping->list);
1204 		interval_tree_remove(&mapping->it, &vm->va);
1205 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1206 		list_add(&mapping->list, &vm->freed);
1207 	}
1208 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1209 		list_del(&mapping->list);
1210 		interval_tree_remove(&mapping->it, &vm->va);
1211 		kfree(mapping);
1212 	}
1213 
1214 	fence_put(bo_va->last_pt_update);
1215 	kfree(bo_va);
1216 
1217 	mutex_unlock(&vm->mutex);
1218 }
1219 
1220 /**
1221  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1222  *
1223  * @adev: amdgpu_device pointer
1224  * @vm: requested vm
1225  * @bo: amdgpu buffer object
1226  *
1227  * Mark @bo as invalid (cayman+).
1228  */
1229 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1230 			     struct amdgpu_bo *bo)
1231 {
1232 	struct amdgpu_bo_va *bo_va;
1233 
1234 	list_for_each_entry(bo_va, &bo->va, bo_list) {
1235 		spin_lock(&bo_va->vm->status_lock);
1236 		if (list_empty(&bo_va->vm_status))
1237 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1238 		spin_unlock(&bo_va->vm->status_lock);
1239 	}
1240 }
1241 
1242 /**
1243  * amdgpu_vm_init - initialize a vm instance
1244  *
1245  * @adev: amdgpu_device pointer
1246  * @vm: requested vm
1247  *
1248  * Init @vm fields (cayman+).
1249  */
1250 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1251 {
1252 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1253 		AMDGPU_VM_PTE_COUNT * 8);
1254 	unsigned pd_size, pd_entries, pts_size;
1255 	int i, r;
1256 
1257 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1258 		vm->ids[i].id = 0;
1259 		vm->ids[i].flushed_updates = NULL;
1260 		vm->ids[i].last_id_use = NULL;
1261 	}
1262 	mutex_init(&vm->mutex);
1263 	vm->va = RB_ROOT;
1264 	spin_lock_init(&vm->status_lock);
1265 	INIT_LIST_HEAD(&vm->invalidated);
1266 	INIT_LIST_HEAD(&vm->cleared);
1267 	INIT_LIST_HEAD(&vm->freed);
1268 
1269 	pd_size = amdgpu_vm_directory_size(adev);
1270 	pd_entries = amdgpu_vm_num_pdes(adev);
1271 
1272 	/* allocate page table array */
1273 	pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
1274 	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1275 	if (vm->page_tables == NULL) {
1276 		DRM_ERROR("Cannot allocate memory for page table array\n");
1277 		return -ENOMEM;
1278 	}
1279 
1280 	vm->page_directory_fence = NULL;
1281 
1282 	r = amdgpu_bo_create(adev, pd_size, align, true,
1283 			     AMDGPU_GEM_DOMAIN_VRAM,
1284 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1285 			     NULL, NULL, &vm->page_directory);
1286 	if (r)
1287 		return r;
1288 
1289 	r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1290 	if (r) {
1291 		amdgpu_bo_unref(&vm->page_directory);
1292 		vm->page_directory = NULL;
1293 		return r;
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 /**
1300  * amdgpu_vm_fini - tear down a vm instance
1301  *
1302  * @adev: amdgpu_device pointer
1303  * @vm: requested vm
1304  *
1305  * Tear down @vm (cayman+).
1306  * Unbind the VM and remove all bos from the vm bo list
1307  */
1308 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1309 {
1310 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1311 	int i;
1312 
1313 	if (!RB_EMPTY_ROOT(&vm->va)) {
1314 		dev_err(adev->dev, "still active bo inside vm\n");
1315 	}
1316 	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1317 		list_del(&mapping->list);
1318 		interval_tree_remove(&mapping->it, &vm->va);
1319 		kfree(mapping);
1320 	}
1321 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1322 		list_del(&mapping->list);
1323 		kfree(mapping);
1324 	}
1325 
1326 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1327 		amdgpu_bo_unref(&vm->page_tables[i].bo);
1328 	kfree(vm->page_tables);
1329 
1330 	amdgpu_bo_unref(&vm->page_directory);
1331 	fence_put(vm->page_directory_fence);
1332 
1333 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1334 		fence_put(vm->ids[i].flushed_updates);
1335 		amdgpu_fence_unref(&vm->ids[i].last_id_use);
1336 	}
1337 
1338 	mutex_destroy(&vm->mutex);
1339 }
1340