xref: /openbmc/linux/drivers/gpu/drm/radeon/radeon_vm.c (revision d2999e1b)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 #include "radeon_trace.h"
32 
33 /*
34  * GPUVM
35  * GPUVM is similar to the legacy gart on older asics, however
36  * rather than there being a single global gart table
37  * for the entire GPU, there are multiple VM page tables active
38  * at any given time.  The VM page tables can contain a mix
39  * vram pages and system memory pages and system memory pages
40  * can be mapped as snooped (cached system pages) or unsnooped
41  * (uncached system pages).
42  * Each VM has an ID associated with it and there is a page table
43  * associated with each VMID.  When execting a command buffer,
44  * the kernel tells the the ring what VMID to use for that command
45  * buffer.  VMIDs are allocated dynamically as commands are submitted.
46  * The userspace drivers maintain their own address space and the kernel
47  * sets up their pages tables accordingly when they submit their
48  * command buffers and a VMID is assigned.
49  * Cayman/Trinity support up to 8 active VMs at any given time;
50  * SI supports 16.
51  */
52 
53 /**
54  * radeon_vm_num_pde - return the number of page directory entries
55  *
56  * @rdev: radeon_device pointer
57  *
58  * Calculate the number of page directory entries (cayman+).
59  */
60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61 {
62 	return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
63 }
64 
65 /**
66  * radeon_vm_directory_size - returns the size of the page directory in bytes
67  *
68  * @rdev: radeon_device pointer
69  *
70  * Calculate the size of the page directory in bytes (cayman+).
71  */
72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73 {
74 	return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75 }
76 
77 /**
78  * radeon_vm_manager_init - init the vm manager
79  *
80  * @rdev: radeon_device pointer
81  *
82  * Init the vm manager (cayman+).
83  * Returns 0 for success, error for failure.
84  */
85 int radeon_vm_manager_init(struct radeon_device *rdev)
86 {
87 	int r;
88 
89 	if (!rdev->vm_manager.enabled) {
90 		r = radeon_asic_vm_init(rdev);
91 		if (r)
92 			return r;
93 
94 		rdev->vm_manager.enabled = true;
95 	}
96 	return 0;
97 }
98 
99 /**
100  * radeon_vm_manager_fini - tear down the vm manager
101  *
102  * @rdev: radeon_device pointer
103  *
104  * Tear down the VM manager (cayman+).
105  */
106 void radeon_vm_manager_fini(struct radeon_device *rdev)
107 {
108 	int i;
109 
110 	if (!rdev->vm_manager.enabled)
111 		return;
112 
113 	for (i = 0; i < RADEON_NUM_VM; ++i)
114 		radeon_fence_unref(&rdev->vm_manager.active[i]);
115 	radeon_asic_vm_fini(rdev);
116 	rdev->vm_manager.enabled = false;
117 }
118 
119 /**
120  * radeon_vm_get_bos - add the vm BOs to a validation list
121  *
122  * @vm: vm providing the BOs
123  * @head: head of validation list
124  *
125  * Add the page directory to the list of BOs to
126  * validate for command submission (cayman+).
127  */
128 struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
129 					  struct radeon_vm *vm,
130 					  struct list_head *head)
131 {
132 	struct radeon_cs_reloc *list;
133 	unsigned i, idx;
134 
135 	list = kmalloc_array(vm->max_pde_used + 2,
136 			     sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 	if (!list)
138 		return NULL;
139 
140 	/* add the vm page table to the list */
141 	list[0].gobj = NULL;
142 	list[0].robj = vm->page_directory;
143 	list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 	list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 	list[0].tv.bo = &vm->page_directory->tbo;
146 	list[0].tiling_flags = 0;
147 	list[0].handle = 0;
148 	list_add(&list[0].tv.head, head);
149 
150 	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 		if (!vm->page_tables[i].bo)
152 			continue;
153 
154 		list[idx].gobj = NULL;
155 		list[idx].robj = vm->page_tables[i].bo;
156 		list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 		list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
158 		list[idx].tv.bo = &list[idx].robj->tbo;
159 		list[idx].tiling_flags = 0;
160 		list[idx].handle = 0;
161 		list_add(&list[idx++].tv.head, head);
162 	}
163 
164 	return list;
165 }
166 
167 /**
168  * radeon_vm_grab_id - allocate the next free VMID
169  *
170  * @rdev: radeon_device pointer
171  * @vm: vm to allocate id for
172  * @ring: ring we want to submit job to
173  *
174  * Allocate an id for the vm (cayman+).
175  * Returns the fence we need to sync to (if any).
176  *
177  * Global and local mutex must be locked!
178  */
179 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
180 				       struct radeon_vm *vm, int ring)
181 {
182 	struct radeon_fence *best[RADEON_NUM_RINGS] = {};
183 	unsigned choices[2] = {};
184 	unsigned i;
185 
186 	/* check if the id is still valid */
187 	if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
188 		return NULL;
189 
190 	/* we definately need to flush */
191 	radeon_fence_unref(&vm->last_flush);
192 
193 	/* skip over VMID 0, since it is the system VM */
194 	for (i = 1; i < rdev->vm_manager.nvm; ++i) {
195 		struct radeon_fence *fence = rdev->vm_manager.active[i];
196 
197 		if (fence == NULL) {
198 			/* found a free one */
199 			vm->id = i;
200 			trace_radeon_vm_grab_id(vm->id, ring);
201 			return NULL;
202 		}
203 
204 		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
205 			best[fence->ring] = fence;
206 			choices[fence->ring == ring ? 0 : 1] = i;
207 		}
208 	}
209 
210 	for (i = 0; i < 2; ++i) {
211 		if (choices[i]) {
212 			vm->id = choices[i];
213 			trace_radeon_vm_grab_id(vm->id, ring);
214 			return rdev->vm_manager.active[choices[i]];
215 		}
216 	}
217 
218 	/* should never happen */
219 	BUG();
220 	return NULL;
221 }
222 
223 /**
224  * radeon_vm_flush - hardware flush the vm
225  *
226  * @rdev: radeon_device pointer
227  * @vm: vm we want to flush
228  * @ring: ring to use for flush
229  *
230  * Flush the vm (cayman+).
231  *
232  * Global and local mutex must be locked!
233  */
234 void radeon_vm_flush(struct radeon_device *rdev,
235 		     struct radeon_vm *vm,
236 		     int ring)
237 {
238 	uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
239 
240 	/* if we can't remember our last VM flush then flush now! */
241 	/* XXX figure out why we have to flush all the time */
242 	if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
243 		vm->pd_gpu_addr = pd_addr;
244 		radeon_ring_vm_flush(rdev, ring, vm);
245 	}
246 }
247 
248 /**
249  * radeon_vm_fence - remember fence for vm
250  *
251  * @rdev: radeon_device pointer
252  * @vm: vm we want to fence
253  * @fence: fence to remember
254  *
255  * Fence the vm (cayman+).
256  * Set the fence used to protect page table and id.
257  *
258  * Global and local mutex must be locked!
259  */
260 void radeon_vm_fence(struct radeon_device *rdev,
261 		     struct radeon_vm *vm,
262 		     struct radeon_fence *fence)
263 {
264 	radeon_fence_unref(&vm->fence);
265 	vm->fence = radeon_fence_ref(fence);
266 
267 	radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
268 	rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
269 
270 	radeon_fence_unref(&vm->last_id_use);
271 	vm->last_id_use = radeon_fence_ref(fence);
272 
273         /* we just flushed the VM, remember that */
274         if (!vm->last_flush)
275                 vm->last_flush = radeon_fence_ref(fence);
276 }
277 
278 /**
279  * radeon_vm_bo_find - find the bo_va for a specific vm & bo
280  *
281  * @vm: requested vm
282  * @bo: requested buffer object
283  *
284  * Find @bo inside the requested vm (cayman+).
285  * Search inside the @bos vm list for the requested vm
286  * Returns the found bo_va or NULL if none is found
287  *
288  * Object has to be reserved!
289  */
290 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
291 				       struct radeon_bo *bo)
292 {
293 	struct radeon_bo_va *bo_va;
294 
295 	list_for_each_entry(bo_va, &bo->va, bo_list) {
296 		if (bo_va->vm == vm) {
297 			return bo_va;
298 		}
299 	}
300 	return NULL;
301 }
302 
303 /**
304  * radeon_vm_bo_add - add a bo to a specific vm
305  *
306  * @rdev: radeon_device pointer
307  * @vm: requested vm
308  * @bo: radeon buffer object
309  *
310  * Add @bo into the requested vm (cayman+).
311  * Add @bo to the list of bos associated with the vm
312  * Returns newly added bo_va or NULL for failure
313  *
314  * Object has to be reserved!
315  */
316 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
317 				      struct radeon_vm *vm,
318 				      struct radeon_bo *bo)
319 {
320 	struct radeon_bo_va *bo_va;
321 
322 	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
323 	if (bo_va == NULL) {
324 		return NULL;
325 	}
326 	bo_va->vm = vm;
327 	bo_va->bo = bo;
328 	bo_va->soffset = 0;
329 	bo_va->eoffset = 0;
330 	bo_va->flags = 0;
331 	bo_va->valid = false;
332 	bo_va->ref_count = 1;
333 	INIT_LIST_HEAD(&bo_va->bo_list);
334 	INIT_LIST_HEAD(&bo_va->vm_list);
335 
336 	mutex_lock(&vm->mutex);
337 	list_add(&bo_va->vm_list, &vm->va);
338 	list_add_tail(&bo_va->bo_list, &bo->va);
339 	mutex_unlock(&vm->mutex);
340 
341 	return bo_va;
342 }
343 
344 /**
345  * radeon_vm_clear_bo - initially clear the page dir/table
346  *
347  * @rdev: radeon_device pointer
348  * @bo: bo to clear
349  */
350 static int radeon_vm_clear_bo(struct radeon_device *rdev,
351 			      struct radeon_bo *bo)
352 {
353         struct ttm_validate_buffer tv;
354         struct ww_acquire_ctx ticket;
355         struct list_head head;
356 	struct radeon_ib ib;
357 	unsigned entries;
358 	uint64_t addr;
359 	int r;
360 
361         memset(&tv, 0, sizeof(tv));
362         tv.bo = &bo->tbo;
363 
364         INIT_LIST_HEAD(&head);
365         list_add(&tv.head, &head);
366 
367         r = ttm_eu_reserve_buffers(&ticket, &head);
368         if (r)
369 		return r;
370 
371         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
372         if (r)
373                 goto error;
374 
375 	addr = radeon_bo_gpu_offset(bo);
376 	entries = radeon_bo_size(bo) / 8;
377 
378 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
379 			  NULL, entries * 2 + 64);
380 	if (r)
381                 goto error;
382 
383 	ib.length_dw = 0;
384 
385 	radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
386 
387 	r = radeon_ib_schedule(rdev, &ib, NULL);
388 	if (r)
389                 goto error;
390 
391 	ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
392 	radeon_ib_free(rdev, &ib);
393 
394 	return 0;
395 
396 error:
397 	ttm_eu_backoff_reservation(&ticket, &head);
398 	return r;
399 }
400 
401 /**
402  * radeon_vm_bo_set_addr - set bos virtual address inside a vm
403  *
404  * @rdev: radeon_device pointer
405  * @bo_va: bo_va to store the address
406  * @soffset: requested offset of the buffer in the VM address space
407  * @flags: attributes of pages (read/write/valid/etc.)
408  *
409  * Set offset of @bo_va (cayman+).
410  * Validate and set the offset requested within the vm address space.
411  * Returns 0 for success, error for failure.
412  *
413  * Object has to be reserved!
414  */
415 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
416 			  struct radeon_bo_va *bo_va,
417 			  uint64_t soffset,
418 			  uint32_t flags)
419 {
420 	uint64_t size = radeon_bo_size(bo_va->bo);
421 	uint64_t eoffset, last_offset = 0;
422 	struct radeon_vm *vm = bo_va->vm;
423 	struct radeon_bo_va *tmp;
424 	struct list_head *head;
425 	unsigned last_pfn, pt_idx;
426 	int r;
427 
428 	if (soffset) {
429 		/* make sure object fit at this offset */
430 		eoffset = soffset + size;
431 		if (soffset >= eoffset) {
432 			return -EINVAL;
433 		}
434 
435 		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
436 		if (last_pfn > rdev->vm_manager.max_pfn) {
437 			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
438 				last_pfn, rdev->vm_manager.max_pfn);
439 			return -EINVAL;
440 		}
441 
442 	} else {
443 		eoffset = last_pfn = 0;
444 	}
445 
446 	mutex_lock(&vm->mutex);
447 	head = &vm->va;
448 	last_offset = 0;
449 	list_for_each_entry(tmp, &vm->va, vm_list) {
450 		if (bo_va == tmp) {
451 			/* skip over currently modified bo */
452 			continue;
453 		}
454 
455 		if (soffset >= last_offset && eoffset <= tmp->soffset) {
456 			/* bo can be added before this one */
457 			break;
458 		}
459 		if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
460 			/* bo and tmp overlap, invalid offset */
461 			dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
462 				bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
463 				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
464 			mutex_unlock(&vm->mutex);
465 			return -EINVAL;
466 		}
467 		last_offset = tmp->eoffset;
468 		head = &tmp->vm_list;
469 	}
470 
471 	bo_va->soffset = soffset;
472 	bo_va->eoffset = eoffset;
473 	bo_va->flags = flags;
474 	bo_va->valid = false;
475 	list_move(&bo_va->vm_list, head);
476 
477 	soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
478 	eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
479 
480 	BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
481 
482 	if (eoffset > vm->max_pde_used)
483 		vm->max_pde_used = eoffset;
484 
485 	radeon_bo_unreserve(bo_va->bo);
486 
487 	/* walk over the address space and allocate the page tables */
488 	for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
489 		struct radeon_bo *pt;
490 
491 		if (vm->page_tables[pt_idx].bo)
492 			continue;
493 
494 		/* drop mutex to allocate and clear page table */
495 		mutex_unlock(&vm->mutex);
496 
497 		r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
498 				     RADEON_GPU_PAGE_SIZE, false,
499 				     RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
500 		if (r)
501 			return r;
502 
503 		r = radeon_vm_clear_bo(rdev, pt);
504 		if (r) {
505 			radeon_bo_unref(&pt);
506 			radeon_bo_reserve(bo_va->bo, false);
507 			return r;
508 		}
509 
510 		/* aquire mutex again */
511 		mutex_lock(&vm->mutex);
512 		if (vm->page_tables[pt_idx].bo) {
513 			/* someone else allocated the pt in the meantime */
514 			mutex_unlock(&vm->mutex);
515 			radeon_bo_unref(&pt);
516 			mutex_lock(&vm->mutex);
517 			continue;
518 		}
519 
520 		vm->page_tables[pt_idx].addr = 0;
521 		vm->page_tables[pt_idx].bo = pt;
522 	}
523 
524 	mutex_unlock(&vm->mutex);
525 	return radeon_bo_reserve(bo_va->bo, false);
526 }
527 
528 /**
529  * radeon_vm_map_gart - get the physical address of a gart page
530  *
531  * @rdev: radeon_device pointer
532  * @addr: the unmapped addr
533  *
534  * Look up the physical address of the page that the pte resolves
535  * to (cayman+).
536  * Returns the physical address of the page.
537  */
538 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
539 {
540 	uint64_t result;
541 
542 	/* page table offset */
543 	result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
544 
545 	/* in case cpu page size != gpu page size*/
546 	result |= addr & (~PAGE_MASK);
547 
548 	return result;
549 }
550 
551 /**
552  * radeon_vm_page_flags - translate page flags to what the hw uses
553  *
554  * @flags: flags comming from userspace
555  *
556  * Translate the flags the userspace ABI uses to hw flags.
557  */
558 static uint32_t radeon_vm_page_flags(uint32_t flags)
559 {
560         uint32_t hw_flags = 0;
561         hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
562         hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
563         hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
564         if (flags & RADEON_VM_PAGE_SYSTEM) {
565                 hw_flags |= R600_PTE_SYSTEM;
566                 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
567         }
568         return hw_flags;
569 }
570 
571 /**
572  * radeon_vm_update_pdes - make sure that page directory is valid
573  *
574  * @rdev: radeon_device pointer
575  * @vm: requested vm
576  * @start: start of GPU address range
577  * @end: end of GPU address range
578  *
579  * Allocates new page tables if necessary
580  * and updates the page directory (cayman+).
581  * Returns 0 for success, error for failure.
582  *
583  * Global and local mutex must be locked!
584  */
585 int radeon_vm_update_page_directory(struct radeon_device *rdev,
586 				    struct radeon_vm *vm)
587 {
588 	struct radeon_bo *pd = vm->page_directory;
589 	uint64_t pd_addr = radeon_bo_gpu_offset(pd);
590 	uint32_t incr = RADEON_VM_PTE_COUNT * 8;
591 	uint64_t last_pde = ~0, last_pt = ~0;
592 	unsigned count = 0, pt_idx, ndw;
593 	struct radeon_ib ib;
594 	int r;
595 
596 	/* padding, etc. */
597 	ndw = 64;
598 
599 	/* assume the worst case */
600 	ndw += vm->max_pde_used * 16;
601 
602 	/* update too big for an IB */
603 	if (ndw > 0xfffff)
604 		return -ENOMEM;
605 
606 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
607 	if (r)
608 		return r;
609 	ib.length_dw = 0;
610 
611 	/* walk over the address space and update the page directory */
612 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
613 		struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
614 		uint64_t pde, pt;
615 
616 		if (bo == NULL)
617 			continue;
618 
619 		pt = radeon_bo_gpu_offset(bo);
620 		if (vm->page_tables[pt_idx].addr == pt)
621 			continue;
622 		vm->page_tables[pt_idx].addr = pt;
623 
624 		pde = pd_addr + pt_idx * 8;
625 		if (((last_pde + 8 * count) != pde) ||
626 		    ((last_pt + incr * count) != pt)) {
627 
628 			if (count) {
629 				radeon_asic_vm_set_page(rdev, &ib, last_pde,
630 							last_pt, count, incr,
631 							R600_PTE_VALID);
632 			}
633 
634 			count = 1;
635 			last_pde = pde;
636 			last_pt = pt;
637 		} else {
638 			++count;
639 		}
640 	}
641 
642 	if (count)
643 		radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
644 					incr, R600_PTE_VALID);
645 
646 	if (ib.length_dw != 0) {
647 		radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
648 		radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
649 		r = radeon_ib_schedule(rdev, &ib, NULL);
650 		if (r) {
651 			radeon_ib_free(rdev, &ib);
652 			return r;
653 		}
654 		radeon_fence_unref(&vm->fence);
655 		vm->fence = radeon_fence_ref(ib.fence);
656 		radeon_fence_unref(&vm->last_flush);
657 	}
658 	radeon_ib_free(rdev, &ib);
659 
660 	return 0;
661 }
662 
663 /**
664  * radeon_vm_frag_ptes - add fragment information to PTEs
665  *
666  * @rdev: radeon_device pointer
667  * @ib: IB for the update
668  * @pe_start: first PTE to handle
669  * @pe_end: last PTE to handle
670  * @addr: addr those PTEs should point to
671  * @flags: hw mapping flags
672  *
673  * Global and local mutex must be locked!
674  */
675 static void radeon_vm_frag_ptes(struct radeon_device *rdev,
676 				struct radeon_ib *ib,
677 				uint64_t pe_start, uint64_t pe_end,
678 				uint64_t addr, uint32_t flags)
679 {
680 	/**
681 	 * The MC L1 TLB supports variable sized pages, based on a fragment
682 	 * field in the PTE. When this field is set to a non-zero value, page
683 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
684 	 * flags are considered valid for all PTEs within the fragment range
685 	 * and corresponding mappings are assumed to be physically contiguous.
686 	 *
687 	 * The L1 TLB can store a single PTE for the whole fragment,
688 	 * significantly increasing the space available for translation
689 	 * caching. This leads to large improvements in throughput when the
690 	 * TLB is under pressure.
691 	 *
692 	 * The L2 TLB distributes small and large fragments into two
693 	 * asymmetric partitions. The large fragment cache is significantly
694 	 * larger. Thus, we try to use large fragments wherever possible.
695 	 * Userspace can support this by aligning virtual base address and
696 	 * allocation size to the fragment size.
697 	 */
698 
699 	/* NI is optimized for 256KB fragments, SI and newer for 64KB */
700 	uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
701 			R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
702 	uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
703 
704 	uint64_t frag_start = ALIGN(pe_start, frag_align);
705 	uint64_t frag_end = pe_end & ~(frag_align - 1);
706 
707 	unsigned count;
708 
709 	/* system pages are non continuously */
710 	if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
711 	    (frag_start >= frag_end)) {
712 
713 		count = (pe_end - pe_start) / 8;
714 		radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
715 					RADEON_GPU_PAGE_SIZE, flags);
716 		return;
717 	}
718 
719 	/* handle the 4K area at the beginning */
720 	if (pe_start != frag_start) {
721 		count = (frag_start - pe_start) / 8;
722 		radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
723 					RADEON_GPU_PAGE_SIZE, flags);
724 		addr += RADEON_GPU_PAGE_SIZE * count;
725 	}
726 
727 	/* handle the area in the middle */
728 	count = (frag_end - frag_start) / 8;
729 	radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
730 				RADEON_GPU_PAGE_SIZE, flags | frag_flags);
731 
732 	/* handle the 4K area at the end */
733 	if (frag_end != pe_end) {
734 		addr += RADEON_GPU_PAGE_SIZE * count;
735 		count = (pe_end - frag_end) / 8;
736 		radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
737 					RADEON_GPU_PAGE_SIZE, flags);
738 	}
739 }
740 
741 /**
742  * radeon_vm_update_ptes - make sure that page tables are valid
743  *
744  * @rdev: radeon_device pointer
745  * @vm: requested vm
746  * @start: start of GPU address range
747  * @end: end of GPU address range
748  * @dst: destination address to map to
749  * @flags: mapping flags
750  *
751  * Update the page tables in the range @start - @end (cayman+).
752  *
753  * Global and local mutex must be locked!
754  */
755 static void radeon_vm_update_ptes(struct radeon_device *rdev,
756 				  struct radeon_vm *vm,
757 				  struct radeon_ib *ib,
758 				  uint64_t start, uint64_t end,
759 				  uint64_t dst, uint32_t flags)
760 {
761 	uint64_t mask = RADEON_VM_PTE_COUNT - 1;
762 	uint64_t last_pte = ~0, last_dst = ~0;
763 	unsigned count = 0;
764 	uint64_t addr;
765 
766 	start = start / RADEON_GPU_PAGE_SIZE;
767 	end = end / RADEON_GPU_PAGE_SIZE;
768 
769 	/* walk over the address space and update the page tables */
770 	for (addr = start; addr < end; ) {
771 		uint64_t pt_idx = addr >> radeon_vm_block_size;
772 		struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
773 		unsigned nptes;
774 		uint64_t pte;
775 
776 		radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
777 
778 		if ((addr & ~mask) == (end & ~mask))
779 			nptes = end - addr;
780 		else
781 			nptes = RADEON_VM_PTE_COUNT - (addr & mask);
782 
783 		pte = radeon_bo_gpu_offset(pt);
784 		pte += (addr & mask) * 8;
785 
786 		if ((last_pte + 8 * count) != pte) {
787 
788 			if (count) {
789 				radeon_vm_frag_ptes(rdev, ib, last_pte,
790 						    last_pte + 8 * count,
791 						    last_dst, flags);
792 			}
793 
794 			count = nptes;
795 			last_pte = pte;
796 			last_dst = dst;
797 		} else {
798 			count += nptes;
799 		}
800 
801 		addr += nptes;
802 		dst += nptes * RADEON_GPU_PAGE_SIZE;
803 	}
804 
805 	if (count) {
806 		radeon_vm_frag_ptes(rdev, ib, last_pte,
807 				    last_pte + 8 * count,
808 				    last_dst, flags);
809 	}
810 }
811 
812 /**
813  * radeon_vm_bo_update - map a bo into the vm page table
814  *
815  * @rdev: radeon_device pointer
816  * @vm: requested vm
817  * @bo: radeon buffer object
818  * @mem: ttm mem
819  *
820  * Fill in the page table entries for @bo (cayman+).
821  * Returns 0 for success, -EINVAL for failure.
822  *
823  * Object have to be reserved and mutex must be locked!
824  */
825 int radeon_vm_bo_update(struct radeon_device *rdev,
826 			struct radeon_vm *vm,
827 			struct radeon_bo *bo,
828 			struct ttm_mem_reg *mem)
829 {
830 	struct radeon_ib ib;
831 	struct radeon_bo_va *bo_va;
832 	unsigned nptes, ndw;
833 	uint64_t addr;
834 	int r;
835 
836 	bo_va = radeon_vm_bo_find(vm, bo);
837 	if (bo_va == NULL) {
838 		dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
839 		return -EINVAL;
840 	}
841 
842 	if (!bo_va->soffset) {
843 		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
844 			bo, vm);
845 		return -EINVAL;
846 	}
847 
848 	if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
849 		return 0;
850 
851 	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
852 	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
853 	if (mem) {
854 		addr = mem->start << PAGE_SHIFT;
855 		if (mem->mem_type != TTM_PL_SYSTEM) {
856 			bo_va->flags |= RADEON_VM_PAGE_VALID;
857 			bo_va->valid = true;
858 		}
859 		if (mem->mem_type == TTM_PL_TT) {
860 			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
861 		} else {
862 			addr += rdev->vm_manager.vram_base_offset;
863 		}
864 	} else {
865 		addr = 0;
866 		bo_va->valid = false;
867 	}
868 
869 	trace_radeon_vm_bo_update(bo_va);
870 
871 	nptes = radeon_bo_ngpu_pages(bo);
872 
873 	/* padding, etc. */
874 	ndw = 64;
875 
876 	if (radeon_vm_block_size > 11)
877 		/* reserve space for one header for every 2k dwords */
878 		ndw += (nptes >> 11) * 4;
879 	else
880 		/* reserve space for one header for
881 		    every (1 << BLOCK_SIZE) entries */
882 		ndw += (nptes >> radeon_vm_block_size) * 4;
883 
884 	/* reserve space for pte addresses */
885 	ndw += nptes * 2;
886 
887 	/* update too big for an IB */
888 	if (ndw > 0xfffff)
889 		return -ENOMEM;
890 
891 	r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
892 	if (r)
893 		return r;
894 	ib.length_dw = 0;
895 
896 	radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
897 			      addr, radeon_vm_page_flags(bo_va->flags));
898 
899 	radeon_semaphore_sync_to(ib.semaphore, vm->fence);
900 	r = radeon_ib_schedule(rdev, &ib, NULL);
901 	if (r) {
902 		radeon_ib_free(rdev, &ib);
903 		return r;
904 	}
905 	radeon_fence_unref(&vm->fence);
906 	vm->fence = radeon_fence_ref(ib.fence);
907 	radeon_ib_free(rdev, &ib);
908 	radeon_fence_unref(&vm->last_flush);
909 
910 	return 0;
911 }
912 
913 /**
914  * radeon_vm_bo_rmv - remove a bo to a specific vm
915  *
916  * @rdev: radeon_device pointer
917  * @bo_va: requested bo_va
918  *
919  * Remove @bo_va->bo from the requested vm (cayman+).
920  * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
921  * remove the ptes for @bo_va in the page table.
922  * Returns 0 for success.
923  *
924  * Object have to be reserved!
925  */
926 int radeon_vm_bo_rmv(struct radeon_device *rdev,
927 		     struct radeon_bo_va *bo_va)
928 {
929 	int r = 0;
930 
931 	mutex_lock(&bo_va->vm->mutex);
932 	if (bo_va->soffset)
933 		r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
934 
935 	list_del(&bo_va->vm_list);
936 	mutex_unlock(&bo_va->vm->mutex);
937 	list_del(&bo_va->bo_list);
938 
939 	kfree(bo_va);
940 	return r;
941 }
942 
943 /**
944  * radeon_vm_bo_invalidate - mark the bo as invalid
945  *
946  * @rdev: radeon_device pointer
947  * @vm: requested vm
948  * @bo: radeon buffer object
949  *
950  * Mark @bo as invalid (cayman+).
951  */
952 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
953 			     struct radeon_bo *bo)
954 {
955 	struct radeon_bo_va *bo_va;
956 
957 	list_for_each_entry(bo_va, &bo->va, bo_list) {
958 		bo_va->valid = false;
959 	}
960 }
961 
962 /**
963  * radeon_vm_init - initialize a vm instance
964  *
965  * @rdev: radeon_device pointer
966  * @vm: requested vm
967  *
968  * Init @vm fields (cayman+).
969  */
970 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
971 {
972 	const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
973 		RADEON_VM_PTE_COUNT * 8);
974 	unsigned pd_size, pd_entries, pts_size;
975 	int r;
976 
977 	vm->id = 0;
978 	vm->fence = NULL;
979 	vm->last_flush = NULL;
980 	vm->last_id_use = NULL;
981 	mutex_init(&vm->mutex);
982 	INIT_LIST_HEAD(&vm->va);
983 
984 	pd_size = radeon_vm_directory_size(rdev);
985 	pd_entries = radeon_vm_num_pdes(rdev);
986 
987 	/* allocate page table array */
988 	pts_size = pd_entries * sizeof(struct radeon_vm_pt);
989 	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
990 	if (vm->page_tables == NULL) {
991 		DRM_ERROR("Cannot allocate memory for page table array\n");
992 		return -ENOMEM;
993 	}
994 
995 	r = radeon_bo_create(rdev, pd_size, align, false,
996 			     RADEON_GEM_DOMAIN_VRAM, NULL,
997 			     &vm->page_directory);
998 	if (r)
999 		return r;
1000 
1001 	r = radeon_vm_clear_bo(rdev, vm->page_directory);
1002 	if (r) {
1003 		radeon_bo_unref(&vm->page_directory);
1004 		vm->page_directory = NULL;
1005 		return r;
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 /**
1012  * radeon_vm_fini - tear down a vm instance
1013  *
1014  * @rdev: radeon_device pointer
1015  * @vm: requested vm
1016  *
1017  * Tear down @vm (cayman+).
1018  * Unbind the VM and remove all bos from the vm bo list
1019  */
1020 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1021 {
1022 	struct radeon_bo_va *bo_va, *tmp;
1023 	int i, r;
1024 
1025 	if (!list_empty(&vm->va)) {
1026 		dev_err(rdev->dev, "still active bo inside vm\n");
1027 	}
1028 	list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
1029 		list_del_init(&bo_va->vm_list);
1030 		r = radeon_bo_reserve(bo_va->bo, false);
1031 		if (!r) {
1032 			list_del_init(&bo_va->bo_list);
1033 			radeon_bo_unreserve(bo_va->bo);
1034 			kfree(bo_va);
1035 		}
1036 	}
1037 
1038 
1039 	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1040 		radeon_bo_unref(&vm->page_tables[i].bo);
1041 	kfree(vm->page_tables);
1042 
1043 	radeon_bo_unref(&vm->page_directory);
1044 
1045 	radeon_fence_unref(&vm->fence);
1046 	radeon_fence_unref(&vm->last_flush);
1047 	radeon_fence_unref(&vm->last_id_use);
1048 
1049 	mutex_destroy(&vm->mutex);
1050 }
1051