xref: /openbmc/linux/drivers/gpu/drm/radeon/radeon_vm.c (revision c819e2cf)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 #include "radeon_trace.h"
32 
33 /*
34  * GPUVM
35  * GPUVM is similar to the legacy gart on older asics, however
36  * rather than there being a single global gart table
37  * for the entire GPU, there are multiple VM page tables active
38  * at any given time.  The VM page tables can contain a mix
39  * vram pages and system memory pages and system memory pages
40  * can be mapped as snooped (cached system pages) or unsnooped
41  * (uncached system pages).
42  * Each VM has an ID associated with it and there is a page table
43  * associated with each VMID.  When execting a command buffer,
44  * the kernel tells the the ring what VMID to use for that command
45  * buffer.  VMIDs are allocated dynamically as commands are submitted.
46  * The userspace drivers maintain their own address space and the kernel
47  * sets up their pages tables accordingly when they submit their
48  * command buffers and a VMID is assigned.
49  * Cayman/Trinity support up to 8 active VMs at any given time;
50  * SI supports 16.
51  */
52 
53 /**
54  * radeon_vm_num_pde - return the number of page directory entries
55  *
56  * @rdev: radeon_device pointer
57  *
58  * Calculate the number of page directory entries (cayman+).
59  */
60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61 {
62 	return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
63 }
64 
65 /**
66  * radeon_vm_directory_size - returns the size of the page directory in bytes
67  *
68  * @rdev: radeon_device pointer
69  *
70  * Calculate the size of the page directory in bytes (cayman+).
71  */
72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73 {
74 	return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75 }
76 
77 /**
78  * radeon_vm_manager_init - init the vm manager
79  *
80  * @rdev: radeon_device pointer
81  *
82  * Init the vm manager (cayman+).
83  * Returns 0 for success, error for failure.
84  */
85 int radeon_vm_manager_init(struct radeon_device *rdev)
86 {
87 	int r;
88 
89 	if (!rdev->vm_manager.enabled) {
90 		r = radeon_asic_vm_init(rdev);
91 		if (r)
92 			return r;
93 
94 		rdev->vm_manager.enabled = true;
95 	}
96 	return 0;
97 }
98 
99 /**
100  * radeon_vm_manager_fini - tear down the vm manager
101  *
102  * @rdev: radeon_device pointer
103  *
104  * Tear down the VM manager (cayman+).
105  */
106 void radeon_vm_manager_fini(struct radeon_device *rdev)
107 {
108 	int i;
109 
110 	if (!rdev->vm_manager.enabled)
111 		return;
112 
113 	for (i = 0; i < RADEON_NUM_VM; ++i)
114 		radeon_fence_unref(&rdev->vm_manager.active[i]);
115 	radeon_asic_vm_fini(rdev);
116 	rdev->vm_manager.enabled = false;
117 }
118 
119 /**
120  * radeon_vm_get_bos - add the vm BOs to a validation list
121  *
122  * @vm: vm providing the BOs
123  * @head: head of validation list
124  *
125  * Add the page directory to the list of BOs to
126  * validate for command submission (cayman+).
127  */
128 struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
129 					  struct radeon_vm *vm,
130 					  struct list_head *head)
131 {
132 	struct radeon_bo_list *list;
133 	unsigned i, idx;
134 
135 	list = drm_malloc_ab(vm->max_pde_used + 2,
136 			     sizeof(struct radeon_bo_list));
137 	if (!list)
138 		return NULL;
139 
140 	/* add the vm page table to the list */
141 	list[0].robj = vm->page_directory;
142 	list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
143 	list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
144 	list[0].tv.bo = &vm->page_directory->tbo;
145 	list[0].tv.shared = true;
146 	list[0].tiling_flags = 0;
147 	list_add(&list[0].tv.head, head);
148 
149 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
150 		if (!vm->page_tables[i].bo)
151 			continue;
152 
153 		list[idx].robj = vm->page_tables[i].bo;
154 		list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
155 		list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
156 		list[idx].tv.bo = &list[idx].robj->tbo;
157 		list[idx].tv.shared = true;
158 		list[idx].tiling_flags = 0;
159 		list_add(&list[idx++].tv.head, head);
160 	}
161 
162 	return list;
163 }
164 
165 /**
166  * radeon_vm_grab_id - allocate the next free VMID
167  *
168  * @rdev: radeon_device pointer
169  * @vm: vm to allocate id for
170  * @ring: ring we want to submit job to
171  *
172  * Allocate an id for the vm (cayman+).
173  * Returns the fence we need to sync to (if any).
174  *
175  * Global and local mutex must be locked!
176  */
177 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
178 				       struct radeon_vm *vm, int ring)
179 {
180 	struct radeon_fence *best[RADEON_NUM_RINGS] = {};
181 	struct radeon_vm_id *vm_id = &vm->ids[ring];
182 
183 	unsigned choices[2] = {};
184 	unsigned i;
185 
186 	/* check if the id is still valid */
187 	if (vm_id->id && vm_id->last_id_use &&
188 	    vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
189 		return NULL;
190 
191 	/* we definately need to flush */
192 	vm_id->pd_gpu_addr = ~0ll;
193 
194 	/* skip over VMID 0, since it is the system VM */
195 	for (i = 1; i < rdev->vm_manager.nvm; ++i) {
196 		struct radeon_fence *fence = rdev->vm_manager.active[i];
197 
198 		if (fence == NULL) {
199 			/* found a free one */
200 			vm_id->id = i;
201 			trace_radeon_vm_grab_id(i, ring);
202 			return NULL;
203 		}
204 
205 		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
206 			best[fence->ring] = fence;
207 			choices[fence->ring == ring ? 0 : 1] = i;
208 		}
209 	}
210 
211 	for (i = 0; i < 2; ++i) {
212 		if (choices[i]) {
213 			vm_id->id = choices[i];
214 			trace_radeon_vm_grab_id(choices[i], ring);
215 			return rdev->vm_manager.active[choices[i]];
216 		}
217 	}
218 
219 	/* should never happen */
220 	BUG();
221 	return NULL;
222 }
223 
224 /**
225  * radeon_vm_flush - hardware flush the vm
226  *
227  * @rdev: radeon_device pointer
228  * @vm: vm we want to flush
229  * @ring: ring to use for flush
230  * @updates: last vm update that is waited for
231  *
232  * Flush the vm (cayman+).
233  *
234  * Global and local mutex must be locked!
235  */
236 void radeon_vm_flush(struct radeon_device *rdev,
237 		     struct radeon_vm *vm,
238 		     int ring, struct radeon_fence *updates)
239 {
240 	uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
241 	struct radeon_vm_id *vm_id = &vm->ids[ring];
242 
243 	if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
244 	    radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
245 
246 		trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
247 		radeon_fence_unref(&vm_id->flushed_updates);
248 		vm_id->flushed_updates = radeon_fence_ref(updates);
249 		vm_id->pd_gpu_addr = pd_addr;
250 		radeon_ring_vm_flush(rdev, &rdev->ring[ring],
251 				     vm_id->id, vm_id->pd_gpu_addr);
252 
253 	}
254 }
255 
256 /**
257  * radeon_vm_fence - remember fence for vm
258  *
259  * @rdev: radeon_device pointer
260  * @vm: vm we want to fence
261  * @fence: fence to remember
262  *
263  * Fence the vm (cayman+).
264  * Set the fence used to protect page table and id.
265  *
266  * Global and local mutex must be locked!
267  */
268 void radeon_vm_fence(struct radeon_device *rdev,
269 		     struct radeon_vm *vm,
270 		     struct radeon_fence *fence)
271 {
272 	unsigned vm_id = vm->ids[fence->ring].id;
273 
274 	radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
275 	rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
276 
277 	radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
278 	vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
279 }
280 
281 /**
282  * radeon_vm_bo_find - find the bo_va for a specific vm & bo
283  *
284  * @vm: requested vm
285  * @bo: requested buffer object
286  *
287  * Find @bo inside the requested vm (cayman+).
288  * Search inside the @bos vm list for the requested vm
289  * Returns the found bo_va or NULL if none is found
290  *
291  * Object has to be reserved!
292  */
293 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
294 				       struct radeon_bo *bo)
295 {
296 	struct radeon_bo_va *bo_va;
297 
298 	list_for_each_entry(bo_va, &bo->va, bo_list) {
299 		if (bo_va->vm == vm) {
300 			return bo_va;
301 		}
302 	}
303 	return NULL;
304 }
305 
306 /**
307  * radeon_vm_bo_add - add a bo to a specific vm
308  *
309  * @rdev: radeon_device pointer
310  * @vm: requested vm
311  * @bo: radeon buffer object
312  *
313  * Add @bo into the requested vm (cayman+).
314  * Add @bo to the list of bos associated with the vm
315  * Returns newly added bo_va or NULL for failure
316  *
317  * Object has to be reserved!
318  */
319 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
320 				      struct radeon_vm *vm,
321 				      struct radeon_bo *bo)
322 {
323 	struct radeon_bo_va *bo_va;
324 
325 	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
326 	if (bo_va == NULL) {
327 		return NULL;
328 	}
329 	bo_va->vm = vm;
330 	bo_va->bo = bo;
331 	bo_va->it.start = 0;
332 	bo_va->it.last = 0;
333 	bo_va->flags = 0;
334 	bo_va->addr = 0;
335 	bo_va->ref_count = 1;
336 	INIT_LIST_HEAD(&bo_va->bo_list);
337 	INIT_LIST_HEAD(&bo_va->vm_status);
338 
339 	mutex_lock(&vm->mutex);
340 	list_add_tail(&bo_va->bo_list, &bo->va);
341 	mutex_unlock(&vm->mutex);
342 
343 	return bo_va;
344 }
345 
346 /**
347  * radeon_vm_set_pages - helper to call the right asic function
348  *
349  * @rdev: radeon_device pointer
350  * @ib: indirect buffer to fill with commands
351  * @pe: addr of the page entry
352  * @addr: dst addr to write into pe
353  * @count: number of page entries to update
354  * @incr: increase next addr by incr bytes
355  * @flags: hw access flags
356  *
357  * Traces the parameters and calls the right asic functions
358  * to setup the page table using the DMA.
359  */
360 static void radeon_vm_set_pages(struct radeon_device *rdev,
361 				struct radeon_ib *ib,
362 				uint64_t pe,
363 				uint64_t addr, unsigned count,
364 				uint32_t incr, uint32_t flags)
365 {
366 	trace_radeon_vm_set_page(pe, addr, count, incr, flags);
367 
368 	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
369 		uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
370 		radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
371 
372 	} else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
373 		radeon_asic_vm_write_pages(rdev, ib, pe, addr,
374 					   count, incr, flags);
375 
376 	} else {
377 		radeon_asic_vm_set_pages(rdev, ib, pe, addr,
378 					 count, incr, flags);
379 	}
380 }
381 
382 /**
383  * radeon_vm_clear_bo - initially clear the page dir/table
384  *
385  * @rdev: radeon_device pointer
386  * @bo: bo to clear
387  */
388 static int radeon_vm_clear_bo(struct radeon_device *rdev,
389 			      struct radeon_bo *bo)
390 {
391 	struct radeon_ib ib;
392 	unsigned entries;
393 	uint64_t addr;
394 	int r;
395 
396 	r = radeon_bo_reserve(bo, false);
397 	if (r)
398 		return r;
399 
400 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
401 	if (r)
402 		goto error_unreserve;
403 
404 	addr = radeon_bo_gpu_offset(bo);
405 	entries = radeon_bo_size(bo) / 8;
406 
407 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
408 	if (r)
409 		goto error_unreserve;
410 
411 	ib.length_dw = 0;
412 
413 	radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
414 	radeon_asic_vm_pad_ib(rdev, &ib);
415 	WARN_ON(ib.length_dw > 64);
416 
417 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
418 	if (r)
419 		goto error_free;
420 
421 	ib.fence->is_vm_update = true;
422 	radeon_bo_fence(bo, ib.fence, false);
423 
424 error_free:
425 	radeon_ib_free(rdev, &ib);
426 
427 error_unreserve:
428 	radeon_bo_unreserve(bo);
429 	return r;
430 }
431 
432 /**
433  * radeon_vm_bo_set_addr - set bos virtual address inside a vm
434  *
435  * @rdev: radeon_device pointer
436  * @bo_va: bo_va to store the address
437  * @soffset: requested offset of the buffer in the VM address space
438  * @flags: attributes of pages (read/write/valid/etc.)
439  *
440  * Set offset of @bo_va (cayman+).
441  * Validate and set the offset requested within the vm address space.
442  * Returns 0 for success, error for failure.
443  *
444  * Object has to be reserved and gets unreserved by this function!
445  */
446 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
447 			  struct radeon_bo_va *bo_va,
448 			  uint64_t soffset,
449 			  uint32_t flags)
450 {
451 	uint64_t size = radeon_bo_size(bo_va->bo);
452 	struct radeon_vm *vm = bo_va->vm;
453 	unsigned last_pfn, pt_idx;
454 	uint64_t eoffset;
455 	int r;
456 
457 	if (soffset) {
458 		/* make sure object fit at this offset */
459 		eoffset = soffset + size;
460 		if (soffset >= eoffset) {
461 			return -EINVAL;
462 		}
463 
464 		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
465 		if (last_pfn > rdev->vm_manager.max_pfn) {
466 			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
467 				last_pfn, rdev->vm_manager.max_pfn);
468 			return -EINVAL;
469 		}
470 
471 	} else {
472 		eoffset = last_pfn = 0;
473 	}
474 
475 	mutex_lock(&vm->mutex);
476 	if (bo_va->it.start || bo_va->it.last) {
477 		if (bo_va->addr) {
478 			/* add a clone of the bo_va to clear the old address */
479 			struct radeon_bo_va *tmp;
480 			tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
481 			if (!tmp) {
482 				mutex_unlock(&vm->mutex);
483 				return -ENOMEM;
484 			}
485 			tmp->it.start = bo_va->it.start;
486 			tmp->it.last = bo_va->it.last;
487 			tmp->vm = vm;
488 			tmp->addr = bo_va->addr;
489 			tmp->bo = radeon_bo_ref(bo_va->bo);
490 			spin_lock(&vm->status_lock);
491 			list_add(&tmp->vm_status, &vm->freed);
492 			spin_unlock(&vm->status_lock);
493 		}
494 
495 		interval_tree_remove(&bo_va->it, &vm->va);
496 		bo_va->it.start = 0;
497 		bo_va->it.last = 0;
498 	}
499 
500 	soffset /= RADEON_GPU_PAGE_SIZE;
501 	eoffset /= RADEON_GPU_PAGE_SIZE;
502 	if (soffset || eoffset) {
503 		struct interval_tree_node *it;
504 		it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
505 		if (it) {
506 			struct radeon_bo_va *tmp;
507 			tmp = container_of(it, struct radeon_bo_va, it);
508 			/* bo and tmp overlap, invalid offset */
509 			dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
510 				"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
511 				soffset, tmp->bo, tmp->it.start, tmp->it.last);
512 			mutex_unlock(&vm->mutex);
513 			return -EINVAL;
514 		}
515 		bo_va->it.start = soffset;
516 		bo_va->it.last = eoffset - 1;
517 		interval_tree_insert(&bo_va->it, &vm->va);
518 	}
519 
520 	bo_va->flags = flags;
521 	bo_va->addr = 0;
522 
523 	soffset >>= radeon_vm_block_size;
524 	eoffset >>= radeon_vm_block_size;
525 
526 	BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
527 
528 	if (eoffset > vm->max_pde_used)
529 		vm->max_pde_used = eoffset;
530 
531 	radeon_bo_unreserve(bo_va->bo);
532 
533 	/* walk over the address space and allocate the page tables */
534 	for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
535 		struct radeon_bo *pt;
536 
537 		if (vm->page_tables[pt_idx].bo)
538 			continue;
539 
540 		/* drop mutex to allocate and clear page table */
541 		mutex_unlock(&vm->mutex);
542 
543 		r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
544 				     RADEON_GPU_PAGE_SIZE, true,
545 				     RADEON_GEM_DOMAIN_VRAM, 0,
546 				     NULL, NULL, &pt);
547 		if (r)
548 			return r;
549 
550 		r = radeon_vm_clear_bo(rdev, pt);
551 		if (r) {
552 			radeon_bo_unref(&pt);
553 			radeon_bo_reserve(bo_va->bo, false);
554 			return r;
555 		}
556 
557 		/* aquire mutex again */
558 		mutex_lock(&vm->mutex);
559 		if (vm->page_tables[pt_idx].bo) {
560 			/* someone else allocated the pt in the meantime */
561 			mutex_unlock(&vm->mutex);
562 			radeon_bo_unref(&pt);
563 			mutex_lock(&vm->mutex);
564 			continue;
565 		}
566 
567 		vm->page_tables[pt_idx].addr = 0;
568 		vm->page_tables[pt_idx].bo = pt;
569 	}
570 
571 	mutex_unlock(&vm->mutex);
572 	return 0;
573 }
574 
575 /**
576  * radeon_vm_map_gart - get the physical address of a gart page
577  *
578  * @rdev: radeon_device pointer
579  * @addr: the unmapped addr
580  *
581  * Look up the physical address of the page that the pte resolves
582  * to (cayman+).
583  * Returns the physical address of the page.
584  */
585 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
586 {
587 	uint64_t result;
588 
589 	/* page table offset */
590 	result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
591 	result &= ~RADEON_GPU_PAGE_MASK;
592 
593 	return result;
594 }
595 
596 /**
597  * radeon_vm_page_flags - translate page flags to what the hw uses
598  *
599  * @flags: flags comming from userspace
600  *
601  * Translate the flags the userspace ABI uses to hw flags.
602  */
603 static uint32_t radeon_vm_page_flags(uint32_t flags)
604 {
605         uint32_t hw_flags = 0;
606         hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
607         hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
608         hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
609         if (flags & RADEON_VM_PAGE_SYSTEM) {
610                 hw_flags |= R600_PTE_SYSTEM;
611                 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
612         }
613         return hw_flags;
614 }
615 
616 /**
617  * radeon_vm_update_pdes - make sure that page directory is valid
618  *
619  * @rdev: radeon_device pointer
620  * @vm: requested vm
621  * @start: start of GPU address range
622  * @end: end of GPU address range
623  *
624  * Allocates new page tables if necessary
625  * and updates the page directory (cayman+).
626  * Returns 0 for success, error for failure.
627  *
628  * Global and local mutex must be locked!
629  */
630 int radeon_vm_update_page_directory(struct radeon_device *rdev,
631 				    struct radeon_vm *vm)
632 {
633 	struct radeon_bo *pd = vm->page_directory;
634 	uint64_t pd_addr = radeon_bo_gpu_offset(pd);
635 	uint32_t incr = RADEON_VM_PTE_COUNT * 8;
636 	uint64_t last_pde = ~0, last_pt = ~0;
637 	unsigned count = 0, pt_idx, ndw;
638 	struct radeon_ib ib;
639 	int r;
640 
641 	/* padding, etc. */
642 	ndw = 64;
643 
644 	/* assume the worst case */
645 	ndw += vm->max_pde_used * 6;
646 
647 	/* update too big for an IB */
648 	if (ndw > 0xfffff)
649 		return -ENOMEM;
650 
651 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
652 	if (r)
653 		return r;
654 	ib.length_dw = 0;
655 
656 	/* walk over the address space and update the page directory */
657 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
658 		struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
659 		uint64_t pde, pt;
660 
661 		if (bo == NULL)
662 			continue;
663 
664 		pt = radeon_bo_gpu_offset(bo);
665 		if (vm->page_tables[pt_idx].addr == pt)
666 			continue;
667 		vm->page_tables[pt_idx].addr = pt;
668 
669 		pde = pd_addr + pt_idx * 8;
670 		if (((last_pde + 8 * count) != pde) ||
671 		    ((last_pt + incr * count) != pt)) {
672 
673 			if (count) {
674 				radeon_vm_set_pages(rdev, &ib, last_pde,
675 						    last_pt, count, incr,
676 						    R600_PTE_VALID);
677 			}
678 
679 			count = 1;
680 			last_pde = pde;
681 			last_pt = pt;
682 		} else {
683 			++count;
684 		}
685 	}
686 
687 	if (count)
688 		radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
689 				    incr, R600_PTE_VALID);
690 
691 	if (ib.length_dw != 0) {
692 		radeon_asic_vm_pad_ib(rdev, &ib);
693 
694 		radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
695 		WARN_ON(ib.length_dw > ndw);
696 		r = radeon_ib_schedule(rdev, &ib, NULL, false);
697 		if (r) {
698 			radeon_ib_free(rdev, &ib);
699 			return r;
700 		}
701 		ib.fence->is_vm_update = true;
702 		radeon_bo_fence(pd, ib.fence, false);
703 	}
704 	radeon_ib_free(rdev, &ib);
705 
706 	return 0;
707 }
708 
709 /**
710  * radeon_vm_frag_ptes - add fragment information to PTEs
711  *
712  * @rdev: radeon_device pointer
713  * @ib: IB for the update
714  * @pe_start: first PTE to handle
715  * @pe_end: last PTE to handle
716  * @addr: addr those PTEs should point to
717  * @flags: hw mapping flags
718  *
719  * Global and local mutex must be locked!
720  */
721 static void radeon_vm_frag_ptes(struct radeon_device *rdev,
722 				struct radeon_ib *ib,
723 				uint64_t pe_start, uint64_t pe_end,
724 				uint64_t addr, uint32_t flags)
725 {
726 	/**
727 	 * The MC L1 TLB supports variable sized pages, based on a fragment
728 	 * field in the PTE. When this field is set to a non-zero value, page
729 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
730 	 * flags are considered valid for all PTEs within the fragment range
731 	 * and corresponding mappings are assumed to be physically contiguous.
732 	 *
733 	 * The L1 TLB can store a single PTE for the whole fragment,
734 	 * significantly increasing the space available for translation
735 	 * caching. This leads to large improvements in throughput when the
736 	 * TLB is under pressure.
737 	 *
738 	 * The L2 TLB distributes small and large fragments into two
739 	 * asymmetric partitions. The large fragment cache is significantly
740 	 * larger. Thus, we try to use large fragments wherever possible.
741 	 * Userspace can support this by aligning virtual base address and
742 	 * allocation size to the fragment size.
743 	 */
744 
745 	/* NI is optimized for 256KB fragments, SI and newer for 64KB */
746 	uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
747 			R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
748 	uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
749 
750 	uint64_t frag_start = ALIGN(pe_start, frag_align);
751 	uint64_t frag_end = pe_end & ~(frag_align - 1);
752 
753 	unsigned count;
754 
755 	/* system pages are non continuously */
756 	if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
757 	    (frag_start >= frag_end)) {
758 
759 		count = (pe_end - pe_start) / 8;
760 		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
761 				    RADEON_GPU_PAGE_SIZE, flags);
762 		return;
763 	}
764 
765 	/* handle the 4K area at the beginning */
766 	if (pe_start != frag_start) {
767 		count = (frag_start - pe_start) / 8;
768 		radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
769 				    RADEON_GPU_PAGE_SIZE, flags);
770 		addr += RADEON_GPU_PAGE_SIZE * count;
771 	}
772 
773 	/* handle the area in the middle */
774 	count = (frag_end - frag_start) / 8;
775 	radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
776 			    RADEON_GPU_PAGE_SIZE, flags | frag_flags);
777 
778 	/* handle the 4K area at the end */
779 	if (frag_end != pe_end) {
780 		addr += RADEON_GPU_PAGE_SIZE * count;
781 		count = (pe_end - frag_end) / 8;
782 		radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
783 				    RADEON_GPU_PAGE_SIZE, flags);
784 	}
785 }
786 
787 /**
788  * radeon_vm_update_ptes - make sure that page tables are valid
789  *
790  * @rdev: radeon_device pointer
791  * @vm: requested vm
792  * @start: start of GPU address range
793  * @end: end of GPU address range
794  * @dst: destination address to map to
795  * @flags: mapping flags
796  *
797  * Update the page tables in the range @start - @end (cayman+).
798  *
799  * Global and local mutex must be locked!
800  */
801 static int radeon_vm_update_ptes(struct radeon_device *rdev,
802 				 struct radeon_vm *vm,
803 				 struct radeon_ib *ib,
804 				 uint64_t start, uint64_t end,
805 				 uint64_t dst, uint32_t flags)
806 {
807 	uint64_t mask = RADEON_VM_PTE_COUNT - 1;
808 	uint64_t last_pte = ~0, last_dst = ~0;
809 	unsigned count = 0;
810 	uint64_t addr;
811 
812 	/* walk over the address space and update the page tables */
813 	for (addr = start; addr < end; ) {
814 		uint64_t pt_idx = addr >> radeon_vm_block_size;
815 		struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
816 		unsigned nptes;
817 		uint64_t pte;
818 		int r;
819 
820 		radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
821 		r = reservation_object_reserve_shared(pt->tbo.resv);
822 		if (r)
823 			return r;
824 
825 		if ((addr & ~mask) == (end & ~mask))
826 			nptes = end - addr;
827 		else
828 			nptes = RADEON_VM_PTE_COUNT - (addr & mask);
829 
830 		pte = radeon_bo_gpu_offset(pt);
831 		pte += (addr & mask) * 8;
832 
833 		if ((last_pte + 8 * count) != pte) {
834 
835 			if (count) {
836 				radeon_vm_frag_ptes(rdev, ib, last_pte,
837 						    last_pte + 8 * count,
838 						    last_dst, flags);
839 			}
840 
841 			count = nptes;
842 			last_pte = pte;
843 			last_dst = dst;
844 		} else {
845 			count += nptes;
846 		}
847 
848 		addr += nptes;
849 		dst += nptes * RADEON_GPU_PAGE_SIZE;
850 	}
851 
852 	if (count) {
853 		radeon_vm_frag_ptes(rdev, ib, last_pte,
854 				    last_pte + 8 * count,
855 				    last_dst, flags);
856 	}
857 
858 	return 0;
859 }
860 
861 /**
862  * radeon_vm_fence_pts - fence page tables after an update
863  *
864  * @vm: requested vm
865  * @start: start of GPU address range
866  * @end: end of GPU address range
867  * @fence: fence to use
868  *
869  * Fence the page tables in the range @start - @end (cayman+).
870  *
871  * Global and local mutex must be locked!
872  */
873 static void radeon_vm_fence_pts(struct radeon_vm *vm,
874 				uint64_t start, uint64_t end,
875 				struct radeon_fence *fence)
876 {
877 	unsigned i;
878 
879 	start >>= radeon_vm_block_size;
880 	end >>= radeon_vm_block_size;
881 
882 	for (i = start; i <= end; ++i)
883 		radeon_bo_fence(vm->page_tables[i].bo, fence, true);
884 }
885 
886 /**
887  * radeon_vm_bo_update - map a bo into the vm page table
888  *
889  * @rdev: radeon_device pointer
890  * @vm: requested vm
891  * @bo: radeon buffer object
892  * @mem: ttm mem
893  *
894  * Fill in the page table entries for @bo (cayman+).
895  * Returns 0 for success, -EINVAL for failure.
896  *
897  * Object have to be reserved and mutex must be locked!
898  */
899 int radeon_vm_bo_update(struct radeon_device *rdev,
900 			struct radeon_bo_va *bo_va,
901 			struct ttm_mem_reg *mem)
902 {
903 	struct radeon_vm *vm = bo_va->vm;
904 	struct radeon_ib ib;
905 	unsigned nptes, ncmds, ndw;
906 	uint64_t addr;
907 	uint32_t flags;
908 	int r;
909 
910 	if (!bo_va->it.start) {
911 		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
912 			bo_va->bo, vm);
913 		return -EINVAL;
914 	}
915 
916 	spin_lock(&vm->status_lock);
917 	list_del_init(&bo_va->vm_status);
918 	spin_unlock(&vm->status_lock);
919 
920 	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
921 	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
922 	bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
923 	if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
924 		bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
925 
926 	if (mem) {
927 		addr = mem->start << PAGE_SHIFT;
928 		if (mem->mem_type != TTM_PL_SYSTEM) {
929 			bo_va->flags |= RADEON_VM_PAGE_VALID;
930 		}
931 		if (mem->mem_type == TTM_PL_TT) {
932 			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
933 			if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
934 				bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
935 
936 		} else {
937 			addr += rdev->vm_manager.vram_base_offset;
938 		}
939 	} else {
940 		addr = 0;
941 	}
942 
943 	if (addr == bo_va->addr)
944 		return 0;
945 	bo_va->addr = addr;
946 
947 	trace_radeon_vm_bo_update(bo_va);
948 
949 	nptes = bo_va->it.last - bo_va->it.start + 1;
950 
951 	/* reserve space for one command every (1 << BLOCK_SIZE) entries
952 	   or 2k dwords (whatever is smaller) */
953 	ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
954 
955 	/* padding, etc. */
956 	ndw = 64;
957 
958 	flags = radeon_vm_page_flags(bo_va->flags);
959 	if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
960 		/* only copy commands needed */
961 		ndw += ncmds * 7;
962 
963 	} else if (flags & R600_PTE_SYSTEM) {
964 		/* header for write data commands */
965 		ndw += ncmds * 4;
966 
967 		/* body of write data command */
968 		ndw += nptes * 2;
969 
970 	} else {
971 		/* set page commands needed */
972 		ndw += ncmds * 10;
973 
974 		/* two extra commands for begin/end of fragment */
975 		ndw += 2 * 10;
976 	}
977 
978 	/* update too big for an IB */
979 	if (ndw > 0xfffff)
980 		return -ENOMEM;
981 
982 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
983 	if (r)
984 		return r;
985 	ib.length_dw = 0;
986 
987 	if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
988 		unsigned i;
989 
990 		for (i = 0; i < RADEON_NUM_RINGS; ++i)
991 			radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
992 	}
993 
994 	r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
995 				  bo_va->it.last + 1, addr,
996 				  radeon_vm_page_flags(bo_va->flags));
997 	if (r) {
998 		radeon_ib_free(rdev, &ib);
999 		return r;
1000 	}
1001 
1002 	radeon_asic_vm_pad_ib(rdev, &ib);
1003 	WARN_ON(ib.length_dw > ndw);
1004 
1005 	r = radeon_ib_schedule(rdev, &ib, NULL, false);
1006 	if (r) {
1007 		radeon_ib_free(rdev, &ib);
1008 		return r;
1009 	}
1010 	ib.fence->is_vm_update = true;
1011 	radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
1012 	radeon_fence_unref(&bo_va->last_pt_update);
1013 	bo_va->last_pt_update = radeon_fence_ref(ib.fence);
1014 	radeon_ib_free(rdev, &ib);
1015 
1016 	return 0;
1017 }
1018 
1019 /**
1020  * radeon_vm_clear_freed - clear freed BOs in the PT
1021  *
1022  * @rdev: radeon_device pointer
1023  * @vm: requested vm
1024  *
1025  * Make sure all freed BOs are cleared in the PT.
1026  * Returns 0 for success.
1027  *
1028  * PTs have to be reserved and mutex must be locked!
1029  */
1030 int radeon_vm_clear_freed(struct radeon_device *rdev,
1031 			  struct radeon_vm *vm)
1032 {
1033 	struct radeon_bo_va *bo_va;
1034 	int r;
1035 
1036 	spin_lock(&vm->status_lock);
1037 	while (!list_empty(&vm->freed)) {
1038 		bo_va = list_first_entry(&vm->freed,
1039 			struct radeon_bo_va, vm_status);
1040 		spin_unlock(&vm->status_lock);
1041 
1042 		r = radeon_vm_bo_update(rdev, bo_va, NULL);
1043 		radeon_bo_unref(&bo_va->bo);
1044 		radeon_fence_unref(&bo_va->last_pt_update);
1045 		kfree(bo_va);
1046 		if (r)
1047 			return r;
1048 
1049 		spin_lock(&vm->status_lock);
1050 	}
1051 	spin_unlock(&vm->status_lock);
1052 	return 0;
1053 
1054 }
1055 
1056 /**
1057  * radeon_vm_clear_invalids - clear invalidated BOs in the PT
1058  *
1059  * @rdev: radeon_device pointer
1060  * @vm: requested vm
1061  *
1062  * Make sure all invalidated BOs are cleared in the PT.
1063  * Returns 0 for success.
1064  *
1065  * PTs have to be reserved and mutex must be locked!
1066  */
1067 int radeon_vm_clear_invalids(struct radeon_device *rdev,
1068 			     struct radeon_vm *vm)
1069 {
1070 	struct radeon_bo_va *bo_va;
1071 	int r;
1072 
1073 	spin_lock(&vm->status_lock);
1074 	while (!list_empty(&vm->invalidated)) {
1075 		bo_va = list_first_entry(&vm->invalidated,
1076 			struct radeon_bo_va, vm_status);
1077 		spin_unlock(&vm->status_lock);
1078 
1079 		r = radeon_vm_bo_update(rdev, bo_va, NULL);
1080 		if (r)
1081 			return r;
1082 
1083 		spin_lock(&vm->status_lock);
1084 	}
1085 	spin_unlock(&vm->status_lock);
1086 
1087 	return 0;
1088 }
1089 
1090 /**
1091  * radeon_vm_bo_rmv - remove a bo to a specific vm
1092  *
1093  * @rdev: radeon_device pointer
1094  * @bo_va: requested bo_va
1095  *
1096  * Remove @bo_va->bo from the requested vm (cayman+).
1097  *
1098  * Object have to be reserved!
1099  */
1100 void radeon_vm_bo_rmv(struct radeon_device *rdev,
1101 		      struct radeon_bo_va *bo_va)
1102 {
1103 	struct radeon_vm *vm = bo_va->vm;
1104 
1105 	list_del(&bo_va->bo_list);
1106 
1107 	mutex_lock(&vm->mutex);
1108 	interval_tree_remove(&bo_va->it, &vm->va);
1109 	spin_lock(&vm->status_lock);
1110 	list_del(&bo_va->vm_status);
1111 
1112 	if (bo_va->addr) {
1113 		bo_va->bo = radeon_bo_ref(bo_va->bo);
1114 		list_add(&bo_va->vm_status, &vm->freed);
1115 	} else {
1116 		radeon_fence_unref(&bo_va->last_pt_update);
1117 		kfree(bo_va);
1118 	}
1119 	spin_unlock(&vm->status_lock);
1120 
1121 	mutex_unlock(&vm->mutex);
1122 }
1123 
1124 /**
1125  * radeon_vm_bo_invalidate - mark the bo as invalid
1126  *
1127  * @rdev: radeon_device pointer
1128  * @vm: requested vm
1129  * @bo: radeon buffer object
1130  *
1131  * Mark @bo as invalid (cayman+).
1132  */
1133 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1134 			     struct radeon_bo *bo)
1135 {
1136 	struct radeon_bo_va *bo_va;
1137 
1138 	list_for_each_entry(bo_va, &bo->va, bo_list) {
1139 		if (bo_va->addr) {
1140 			spin_lock(&bo_va->vm->status_lock);
1141 			list_del(&bo_va->vm_status);
1142 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1143 			spin_unlock(&bo_va->vm->status_lock);
1144 		}
1145 	}
1146 }
1147 
1148 /**
1149  * radeon_vm_init - initialize a vm instance
1150  *
1151  * @rdev: radeon_device pointer
1152  * @vm: requested vm
1153  *
1154  * Init @vm fields (cayman+).
1155  */
1156 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1157 {
1158 	const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1159 		RADEON_VM_PTE_COUNT * 8);
1160 	unsigned pd_size, pd_entries, pts_size;
1161 	int i, r;
1162 
1163 	vm->ib_bo_va = NULL;
1164 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1165 		vm->ids[i].id = 0;
1166 		vm->ids[i].flushed_updates = NULL;
1167 		vm->ids[i].last_id_use = NULL;
1168 	}
1169 	mutex_init(&vm->mutex);
1170 	vm->va = RB_ROOT;
1171 	spin_lock_init(&vm->status_lock);
1172 	INIT_LIST_HEAD(&vm->invalidated);
1173 	INIT_LIST_HEAD(&vm->freed);
1174 
1175 	pd_size = radeon_vm_directory_size(rdev);
1176 	pd_entries = radeon_vm_num_pdes(rdev);
1177 
1178 	/* allocate page table array */
1179 	pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1180 	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1181 	if (vm->page_tables == NULL) {
1182 		DRM_ERROR("Cannot allocate memory for page table array\n");
1183 		return -ENOMEM;
1184 	}
1185 
1186 	r = radeon_bo_create(rdev, pd_size, align, true,
1187 			     RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1188 			     NULL, &vm->page_directory);
1189 	if (r)
1190 		return r;
1191 
1192 	r = radeon_vm_clear_bo(rdev, vm->page_directory);
1193 	if (r) {
1194 		radeon_bo_unref(&vm->page_directory);
1195 		vm->page_directory = NULL;
1196 		return r;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 /**
1203  * radeon_vm_fini - tear down a vm instance
1204  *
1205  * @rdev: radeon_device pointer
1206  * @vm: requested vm
1207  *
1208  * Tear down @vm (cayman+).
1209  * Unbind the VM and remove all bos from the vm bo list
1210  */
1211 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1212 {
1213 	struct radeon_bo_va *bo_va, *tmp;
1214 	int i, r;
1215 
1216 	if (!RB_EMPTY_ROOT(&vm->va)) {
1217 		dev_err(rdev->dev, "still active bo inside vm\n");
1218 	}
1219 	rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
1220 		interval_tree_remove(&bo_va->it, &vm->va);
1221 		r = radeon_bo_reserve(bo_va->bo, false);
1222 		if (!r) {
1223 			list_del_init(&bo_va->bo_list);
1224 			radeon_bo_unreserve(bo_va->bo);
1225 			radeon_fence_unref(&bo_va->last_pt_update);
1226 			kfree(bo_va);
1227 		}
1228 	}
1229 	list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
1230 		radeon_bo_unref(&bo_va->bo);
1231 		radeon_fence_unref(&bo_va->last_pt_update);
1232 		kfree(bo_va);
1233 	}
1234 
1235 	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1236 		radeon_bo_unref(&vm->page_tables[i].bo);
1237 	kfree(vm->page_tables);
1238 
1239 	radeon_bo_unref(&vm->page_directory);
1240 
1241 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1242 		radeon_fence_unref(&vm->ids[i].flushed_updates);
1243 		radeon_fence_unref(&vm->ids[i].last_id_use);
1244 	}
1245 
1246 	mutex_destroy(&vm->mutex);
1247 }
1248