1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/dma-fence-array.h>
29 #include <drm/drmP.h>
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32 #include "amdgpu_trace.h"
33 
34 /*
35  * GPUVM
36  * GPUVM is similar to the legacy gart on older asics, however
37  * rather than there being a single global gart table
38  * for the entire GPU, there are multiple VM page tables active
39  * at any given time.  The VM page tables can contain a mix
40  * vram pages and system memory pages and system memory pages
41  * can be mapped as snooped (cached system pages) or unsnooped
42  * (uncached system pages).
43  * Each VM has an ID associated with it and there is a page table
44  * associated with each VMID.  When execting a command buffer,
45  * the kernel tells the the ring what VMID to use for that command
46  * buffer.  VMIDs are allocated dynamically as commands are submitted.
47  * The userspace drivers maintain their own address space and the kernel
48  * sets up their pages tables accordingly when they submit their
49  * command buffers and a VMID is assigned.
50  * Cayman/Trinity support up to 8 active VMs at any given time;
51  * SI supports 16.
52  */
53 
54 /* Local structure. Encapsulate some VM table update parameters to reduce
55  * the number of function parameters
56  */
57 struct amdgpu_pte_update_params {
58 	/* amdgpu device we do this update for */
59 	struct amdgpu_device *adev;
60 	/* address where to copy page table entries from */
61 	uint64_t src;
62 	/* indirect buffer to fill with commands */
63 	struct amdgpu_ib *ib;
64 	/* Function which actually does the update */
65 	void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
66 		     uint64_t addr, unsigned count, uint32_t incr,
67 		     uint32_t flags);
68 	/* indicate update pt or its shadow */
69 	bool shadow;
70 };
71 
72 /**
73  * amdgpu_vm_num_pde - return the number of page directory entries
74  *
75  * @adev: amdgpu_device pointer
76  *
77  * Calculate the number of page directory entries.
78  */
79 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
80 {
81 	return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
82 }
83 
84 /**
85  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
86  *
87  * @adev: amdgpu_device pointer
88  *
89  * Calculate the size of the page directory in bytes.
90  */
91 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
92 {
93 	return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
94 }
95 
96 /**
97  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
98  *
99  * @vm: vm providing the BOs
100  * @validated: head of validation list
101  * @entry: entry to add
102  *
103  * Add the page directory to the list of BOs to
104  * validate for command submission.
105  */
106 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
107 			 struct list_head *validated,
108 			 struct amdgpu_bo_list_entry *entry)
109 {
110 	entry->robj = vm->page_directory;
111 	entry->priority = 0;
112 	entry->tv.bo = &vm->page_directory->tbo;
113 	entry->tv.shared = true;
114 	entry->user_pages = NULL;
115 	list_add(&entry->tv.head, validated);
116 }
117 
118 /**
119  * amdgpu_vm_validate_pt_bos - validate the page table BOs
120  *
121  * @adev: amdgpu device pointer
122  * @vm: vm providing the BOs
123  * @validate: callback to do the validation
124  * @param: parameter for the validation callback
125  *
126  * Validate the page table BOs on command submission if neccessary.
127  */
128 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
129 			      int (*validate)(void *p, struct amdgpu_bo *bo),
130 			      void *param)
131 {
132 	uint64_t num_evictions;
133 	unsigned i;
134 	int r;
135 
136 	/* We only need to validate the page tables
137 	 * if they aren't already valid.
138 	 */
139 	num_evictions = atomic64_read(&adev->num_evictions);
140 	if (num_evictions == vm->last_eviction_counter)
141 		return 0;
142 
143 	/* add the vm page table to the list */
144 	for (i = 0; i <= vm->max_pde_used; ++i) {
145 		struct amdgpu_bo *bo = vm->page_tables[i].bo;
146 
147 		if (!bo)
148 			continue;
149 
150 		r = validate(param, bo);
151 		if (r)
152 			return r;
153 	}
154 
155 	return 0;
156 }
157 
158 /**
159  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
160  *
161  * @adev: amdgpu device instance
162  * @vm: vm providing the BOs
163  *
164  * Move the PT BOs to the tail of the LRU.
165  */
166 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
167 				  struct amdgpu_vm *vm)
168 {
169 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
170 	unsigned i;
171 
172 	spin_lock(&glob->lru_lock);
173 	for (i = 0; i <= vm->max_pde_used; ++i) {
174 		struct amdgpu_bo *bo = vm->page_tables[i].bo;
175 
176 		if (!bo)
177 			continue;
178 
179 		ttm_bo_move_to_lru_tail(&bo->tbo);
180 	}
181 	spin_unlock(&glob->lru_lock);
182 }
183 
184 static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
185 			      struct amdgpu_vm_id *id)
186 {
187 	return id->current_gpu_reset_count !=
188 		atomic_read(&adev->gpu_reset_counter) ? true : false;
189 }
190 
191 /**
192  * amdgpu_vm_grab_id - allocate the next free VMID
193  *
194  * @vm: vm to allocate id for
195  * @ring: ring we want to submit job to
196  * @sync: sync object where we add dependencies
197  * @fence: fence protecting ID from reuse
198  *
199  * Allocate an id for the vm, adding fences to the sync obj as necessary.
200  */
201 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
202 		      struct amdgpu_sync *sync, struct dma_fence *fence,
203 		      struct amdgpu_job *job)
204 {
205 	struct amdgpu_device *adev = ring->adev;
206 	uint64_t fence_context = adev->fence_context + ring->idx;
207 	struct dma_fence *updates = sync->last_vm_update;
208 	struct amdgpu_vm_id *id, *idle;
209 	struct dma_fence **fences;
210 	unsigned i;
211 	int r = 0;
212 
213 	fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
214 			       GFP_KERNEL);
215 	if (!fences)
216 		return -ENOMEM;
217 
218 	mutex_lock(&adev->vm_manager.lock);
219 
220 	/* Check if we have an idle VMID */
221 	i = 0;
222 	list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
223 		fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
224 		if (!fences[i])
225 			break;
226 		++i;
227 	}
228 
229 	/* If we can't find a idle VMID to use, wait till one becomes available */
230 	if (&idle->list == &adev->vm_manager.ids_lru) {
231 		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
232 		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
233 		struct dma_fence_array *array;
234 		unsigned j;
235 
236 		for (j = 0; j < i; ++j)
237 			dma_fence_get(fences[j]);
238 
239 		array = dma_fence_array_create(i, fences, fence_context,
240 					   seqno, true);
241 		if (!array) {
242 			for (j = 0; j < i; ++j)
243 				dma_fence_put(fences[j]);
244 			kfree(fences);
245 			r = -ENOMEM;
246 			goto error;
247 		}
248 
249 
250 		r = amdgpu_sync_fence(ring->adev, sync, &array->base);
251 		dma_fence_put(&array->base);
252 		if (r)
253 			goto error;
254 
255 		mutex_unlock(&adev->vm_manager.lock);
256 		return 0;
257 
258 	}
259 	kfree(fences);
260 
261 	job->vm_needs_flush = true;
262 	/* Check if we can use a VMID already assigned to this VM */
263 	i = ring->idx;
264 	do {
265 		struct dma_fence *flushed;
266 
267 		id = vm->ids[i++];
268 		if (i == AMDGPU_MAX_RINGS)
269 			i = 0;
270 
271 		/* Check all the prerequisites to using this VMID */
272 		if (!id)
273 			continue;
274 		if (amdgpu_vm_is_gpu_reset(adev, id))
275 			continue;
276 
277 		if (atomic64_read(&id->owner) != vm->client_id)
278 			continue;
279 
280 		if (job->vm_pd_addr != id->pd_gpu_addr)
281 			continue;
282 
283 		if (!id->last_flush)
284 			continue;
285 
286 		if (id->last_flush->context != fence_context &&
287 		    !dma_fence_is_signaled(id->last_flush))
288 			continue;
289 
290 		flushed  = id->flushed_updates;
291 		if (updates &&
292 		    (!flushed || dma_fence_is_later(updates, flushed)))
293 			continue;
294 
295 		/* Good we can use this VMID. Remember this submission as
296 		 * user of the VMID.
297 		 */
298 		r = amdgpu_sync_fence(ring->adev, &id->active, fence);
299 		if (r)
300 			goto error;
301 
302 		id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
303 		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
304 		vm->ids[ring->idx] = id;
305 
306 		job->vm_id = id - adev->vm_manager.ids;
307 		job->vm_needs_flush = false;
308 		trace_amdgpu_vm_grab_id(vm, ring->idx, job);
309 
310 		mutex_unlock(&adev->vm_manager.lock);
311 		return 0;
312 
313 	} while (i != ring->idx);
314 
315 	/* Still no ID to use? Then use the idle one found earlier */
316 	id = idle;
317 
318 	/* Remember this submission as user of the VMID */
319 	r = amdgpu_sync_fence(ring->adev, &id->active, fence);
320 	if (r)
321 		goto error;
322 
323 	dma_fence_put(id->first);
324 	id->first = dma_fence_get(fence);
325 
326 	dma_fence_put(id->last_flush);
327 	id->last_flush = NULL;
328 
329 	dma_fence_put(id->flushed_updates);
330 	id->flushed_updates = dma_fence_get(updates);
331 
332 	id->pd_gpu_addr = job->vm_pd_addr;
333 	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
334 	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
335 	atomic64_set(&id->owner, vm->client_id);
336 	vm->ids[ring->idx] = id;
337 
338 	job->vm_id = id - adev->vm_manager.ids;
339 	trace_amdgpu_vm_grab_id(vm, ring->idx, job);
340 
341 error:
342 	mutex_unlock(&adev->vm_manager.lock);
343 	return r;
344 }
345 
346 static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
347 {
348 	struct amdgpu_device *adev = ring->adev;
349 	const struct amdgpu_ip_block *ip_block;
350 
351 	if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
352 		/* only compute rings */
353 		return false;
354 
355 	ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
356 	if (!ip_block)
357 		return false;
358 
359 	if (ip_block->version->major <= 7) {
360 		/* gfx7 has no workaround */
361 		return true;
362 	} else if (ip_block->version->major == 8) {
363 		if (adev->gfx.mec_fw_version >= 673)
364 			/* gfx8 is fixed in MEC firmware 673 */
365 			return false;
366 		else
367 			return true;
368 	}
369 	return false;
370 }
371 
372 /**
373  * amdgpu_vm_flush - hardware flush the vm
374  *
375  * @ring: ring to use for flush
376  * @vm_id: vmid number to use
377  * @pd_addr: address of the page directory
378  *
379  * Emit a VM flush when it is necessary.
380  */
381 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
382 {
383 	struct amdgpu_device *adev = ring->adev;
384 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
385 	bool gds_switch_needed = ring->funcs->emit_gds_switch && (
386 		id->gds_base != job->gds_base ||
387 		id->gds_size != job->gds_size ||
388 		id->gws_base != job->gws_base ||
389 		id->gws_size != job->gws_size ||
390 		id->oa_base != job->oa_base ||
391 		id->oa_size != job->oa_size);
392 	int r;
393 
394 	if (ring->funcs->emit_pipeline_sync && (
395 	    job->vm_needs_flush || gds_switch_needed ||
396 	    amdgpu_vm_ring_has_compute_vm_bug(ring)))
397 		amdgpu_ring_emit_pipeline_sync(ring);
398 
399 	if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
400 	    amdgpu_vm_is_gpu_reset(adev, id))) {
401 		struct dma_fence *fence;
402 
403 		trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
404 		amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
405 
406 		r = amdgpu_fence_emit(ring, &fence);
407 		if (r)
408 			return r;
409 
410 		mutex_lock(&adev->vm_manager.lock);
411 		dma_fence_put(id->last_flush);
412 		id->last_flush = fence;
413 		mutex_unlock(&adev->vm_manager.lock);
414 	}
415 
416 	if (gds_switch_needed) {
417 		id->gds_base = job->gds_base;
418 		id->gds_size = job->gds_size;
419 		id->gws_base = job->gws_base;
420 		id->gws_size = job->gws_size;
421 		id->oa_base = job->oa_base;
422 		id->oa_size = job->oa_size;
423 		amdgpu_ring_emit_gds_switch(ring, job->vm_id,
424 					    job->gds_base, job->gds_size,
425 					    job->gws_base, job->gws_size,
426 					    job->oa_base, job->oa_size);
427 	}
428 
429 	return 0;
430 }
431 
432 /**
433  * amdgpu_vm_reset_id - reset VMID to zero
434  *
435  * @adev: amdgpu device structure
436  * @vm_id: vmid number to use
437  *
438  * Reset saved GDW, GWS and OA to force switch on next flush.
439  */
440 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
441 {
442 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
443 
444 	id->gds_base = 0;
445 	id->gds_size = 0;
446 	id->gws_base = 0;
447 	id->gws_size = 0;
448 	id->oa_base = 0;
449 	id->oa_size = 0;
450 }
451 
452 /**
453  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
454  *
455  * @vm: requested vm
456  * @bo: requested buffer object
457  *
458  * Find @bo inside the requested vm.
459  * Search inside the @bos vm list for the requested vm
460  * Returns the found bo_va or NULL if none is found
461  *
462  * Object has to be reserved!
463  */
464 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
465 				       struct amdgpu_bo *bo)
466 {
467 	struct amdgpu_bo_va *bo_va;
468 
469 	list_for_each_entry(bo_va, &bo->va, bo_list) {
470 		if (bo_va->vm == vm) {
471 			return bo_va;
472 		}
473 	}
474 	return NULL;
475 }
476 
477 /**
478  * amdgpu_vm_do_set_ptes - helper to call the right asic function
479  *
480  * @params: see amdgpu_pte_update_params definition
481  * @pe: addr of the page entry
482  * @addr: dst addr to write into pe
483  * @count: number of page entries to update
484  * @incr: increase next addr by incr bytes
485  * @flags: hw access flags
486  *
487  * Traces the parameters and calls the right asic functions
488  * to setup the page table using the DMA.
489  */
490 static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
491 				  uint64_t pe, uint64_t addr,
492 				  unsigned count, uint32_t incr,
493 				  uint32_t flags)
494 {
495 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
496 
497 	if (count < 3) {
498 		amdgpu_vm_write_pte(params->adev, params->ib, pe,
499 				    addr | flags, count, incr);
500 
501 	} else {
502 		amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
503 				      count, incr, flags);
504 	}
505 }
506 
507 /**
508  * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
509  *
510  * @params: see amdgpu_pte_update_params definition
511  * @pe: addr of the page entry
512  * @addr: dst addr to write into pe
513  * @count: number of page entries to update
514  * @incr: increase next addr by incr bytes
515  * @flags: hw access flags
516  *
517  * Traces the parameters and calls the DMA function to copy the PTEs.
518  */
519 static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
520 				   uint64_t pe, uint64_t addr,
521 				   unsigned count, uint32_t incr,
522 				   uint32_t flags)
523 {
524 	uint64_t src = (params->src + (addr >> 12) * 8);
525 
526 
527 	trace_amdgpu_vm_copy_ptes(pe, src, count);
528 
529 	amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
530 }
531 
532 /**
533  * amdgpu_vm_clear_bo - initially clear the page dir/table
534  *
535  * @adev: amdgpu_device pointer
536  * @bo: bo to clear
537  *
538  * need to reserve bo first before calling it.
539  */
540 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
541 			      struct amdgpu_vm *vm,
542 			      struct amdgpu_bo *bo)
543 {
544 	struct amdgpu_ring *ring;
545 	struct dma_fence *fence = NULL;
546 	struct amdgpu_job *job;
547 	struct amdgpu_pte_update_params params;
548 	unsigned entries;
549 	uint64_t addr;
550 	int r;
551 
552 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
553 
554 	r = reservation_object_reserve_shared(bo->tbo.resv);
555 	if (r)
556 		return r;
557 
558 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
559 	if (r)
560 		goto error;
561 
562 	r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
563 	if (r)
564 		goto error;
565 
566 	addr = amdgpu_bo_gpu_offset(bo);
567 	entries = amdgpu_bo_size(bo) / 8;
568 
569 	r = amdgpu_job_alloc_with_ib(adev, 64, &job);
570 	if (r)
571 		goto error;
572 
573 	memset(&params, 0, sizeof(params));
574 	params.adev = adev;
575 	params.ib = &job->ibs[0];
576 	amdgpu_vm_do_set_ptes(&params, addr, 0, entries, 0, 0);
577 	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
578 
579 	WARN_ON(job->ibs[0].length_dw > 64);
580 	r = amdgpu_job_submit(job, ring, &vm->entity,
581 			      AMDGPU_FENCE_OWNER_VM, &fence);
582 	if (r)
583 		goto error_free;
584 
585 	amdgpu_bo_fence(bo, fence, true);
586 	dma_fence_put(fence);
587 	return 0;
588 
589 error_free:
590 	amdgpu_job_free(job);
591 
592 error:
593 	return r;
594 }
595 
596 /**
597  * amdgpu_vm_map_gart - Resolve gart mapping of addr
598  *
599  * @pages_addr: optional DMA address to use for lookup
600  * @addr: the unmapped addr
601  *
602  * Look up the physical address of the page that the pte resolves
603  * to and return the pointer for the page table entry.
604  */
605 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
606 {
607 	uint64_t result;
608 
609 	/* page table offset */
610 	result = pages_addr[addr >> PAGE_SHIFT];
611 
612 	/* in case cpu page size != gpu page size*/
613 	result |= addr & (~PAGE_MASK);
614 
615 	result &= 0xFFFFFFFFFFFFF000ULL;
616 
617 	return result;
618 }
619 
620 /*
621  * amdgpu_vm_update_pdes - make sure that page directory is valid
622  *
623  * @adev: amdgpu_device pointer
624  * @vm: requested vm
625  * @start: start of GPU address range
626  * @end: end of GPU address range
627  *
628  * Allocates new page tables if necessary
629  * and updates the page directory.
630  * Returns 0 for success, error for failure.
631  */
632 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
633 				    struct amdgpu_vm *vm)
634 {
635 	struct amdgpu_bo *shadow;
636 	struct amdgpu_ring *ring;
637 	uint64_t pd_addr, shadow_addr;
638 	uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
639 	uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
640 	unsigned count = 0, pt_idx, ndw;
641 	struct amdgpu_job *job;
642 	struct amdgpu_pte_update_params params;
643 	struct dma_fence *fence = NULL;
644 
645 	int r;
646 
647 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
648 	shadow = vm->page_directory->shadow;
649 
650 	/* padding, etc. */
651 	ndw = 64;
652 
653 	/* assume the worst case */
654 	ndw += vm->max_pde_used * 6;
655 
656 	pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
657 	if (shadow) {
658 		r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
659 		if (r)
660 			return r;
661 		shadow_addr = amdgpu_bo_gpu_offset(shadow);
662 		ndw *= 2;
663 	} else {
664 		shadow_addr = 0;
665 	}
666 
667 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
668 	if (r)
669 		return r;
670 
671 	memset(&params, 0, sizeof(params));
672 	params.adev = adev;
673 	params.ib = &job->ibs[0];
674 
675 	/* walk over the address space and update the page directory */
676 	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
677 		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
678 		uint64_t pde, pt;
679 
680 		if (bo == NULL)
681 			continue;
682 
683 		if (bo->shadow) {
684 			struct amdgpu_bo *pt_shadow = bo->shadow;
685 
686 			r = amdgpu_ttm_bind(&pt_shadow->tbo,
687 					    &pt_shadow->tbo.mem);
688 			if (r)
689 				return r;
690 		}
691 
692 		pt = amdgpu_bo_gpu_offset(bo);
693 		if (vm->page_tables[pt_idx].addr == pt)
694 			continue;
695 
696 		vm->page_tables[pt_idx].addr = pt;
697 
698 		pde = pd_addr + pt_idx * 8;
699 		if (((last_pde + 8 * count) != pde) ||
700 		    ((last_pt + incr * count) != pt) ||
701 		    (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
702 
703 			if (count) {
704 				if (shadow)
705 					amdgpu_vm_do_set_ptes(&params,
706 							      last_shadow,
707 							      last_pt, count,
708 							      incr,
709 							      AMDGPU_PTE_VALID);
710 
711 				amdgpu_vm_do_set_ptes(&params, last_pde,
712 						      last_pt, count, incr,
713 						      AMDGPU_PTE_VALID);
714 			}
715 
716 			count = 1;
717 			last_pde = pde;
718 			last_shadow = shadow_addr + pt_idx * 8;
719 			last_pt = pt;
720 		} else {
721 			++count;
722 		}
723 	}
724 
725 	if (count) {
726 		if (vm->page_directory->shadow)
727 			amdgpu_vm_do_set_ptes(&params, last_shadow, last_pt,
728 					      count, incr, AMDGPU_PTE_VALID);
729 
730 		amdgpu_vm_do_set_ptes(&params, last_pde, last_pt,
731 				      count, incr, AMDGPU_PTE_VALID);
732 	}
733 
734 	if (params.ib->length_dw == 0) {
735 		amdgpu_job_free(job);
736 		return 0;
737 	}
738 
739 	amdgpu_ring_pad_ib(ring, params.ib);
740 	amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
741 			 AMDGPU_FENCE_OWNER_VM);
742 	if (shadow)
743 		amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
744 				 AMDGPU_FENCE_OWNER_VM);
745 
746 	WARN_ON(params.ib->length_dw > ndw);
747 	r = amdgpu_job_submit(job, ring, &vm->entity,
748 			      AMDGPU_FENCE_OWNER_VM, &fence);
749 	if (r)
750 		goto error_free;
751 
752 	amdgpu_bo_fence(vm->page_directory, fence, true);
753 	dma_fence_put(vm->page_directory_fence);
754 	vm->page_directory_fence = dma_fence_get(fence);
755 	dma_fence_put(fence);
756 
757 	return 0;
758 
759 error_free:
760 	amdgpu_job_free(job);
761 	return r;
762 }
763 
764 /**
765  * amdgpu_vm_update_ptes - make sure that page tables are valid
766  *
767  * @params: see amdgpu_pte_update_params definition
768  * @vm: requested vm
769  * @start: start of GPU address range
770  * @end: end of GPU address range
771  * @dst: destination address to map to, the next dst inside the function
772  * @flags: mapping flags
773  *
774  * Update the page tables in the range @start - @end.
775  */
776 static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
777 				  struct amdgpu_vm *vm,
778 				  uint64_t start, uint64_t end,
779 				  uint64_t dst, uint32_t flags)
780 {
781 	const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
782 
783 	uint64_t cur_pe_start, cur_nptes, cur_dst;
784 	uint64_t addr; /* next GPU address to be updated */
785 	uint64_t pt_idx;
786 	struct amdgpu_bo *pt;
787 	unsigned nptes; /* next number of ptes to be updated */
788 	uint64_t next_pe_start;
789 
790 	/* initialize the variables */
791 	addr = start;
792 	pt_idx = addr >> amdgpu_vm_block_size;
793 	pt = vm->page_tables[pt_idx].bo;
794 	if (params->shadow) {
795 		if (!pt->shadow)
796 			return;
797 		pt = pt->shadow;
798 	}
799 	if ((addr & ~mask) == (end & ~mask))
800 		nptes = end - addr;
801 	else
802 		nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
803 
804 	cur_pe_start = amdgpu_bo_gpu_offset(pt);
805 	cur_pe_start += (addr & mask) * 8;
806 	cur_nptes = nptes;
807 	cur_dst = dst;
808 
809 	/* for next ptb*/
810 	addr += nptes;
811 	dst += nptes * AMDGPU_GPU_PAGE_SIZE;
812 
813 	/* walk over the address space and update the page tables */
814 	while (addr < end) {
815 		pt_idx = addr >> amdgpu_vm_block_size;
816 		pt = vm->page_tables[pt_idx].bo;
817 		if (params->shadow) {
818 			if (!pt->shadow)
819 				return;
820 			pt = pt->shadow;
821 		}
822 
823 		if ((addr & ~mask) == (end & ~mask))
824 			nptes = end - addr;
825 		else
826 			nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
827 
828 		next_pe_start = amdgpu_bo_gpu_offset(pt);
829 		next_pe_start += (addr & mask) * 8;
830 
831 		if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
832 		    ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
833 			/* The next ptb is consecutive to current ptb.
834 			 * Don't call the update function now.
835 			 * Will update two ptbs together in future.
836 			*/
837 			cur_nptes += nptes;
838 		} else {
839 			params->func(params, cur_pe_start, cur_dst, cur_nptes,
840 				     AMDGPU_GPU_PAGE_SIZE, flags);
841 
842 			cur_pe_start = next_pe_start;
843 			cur_nptes = nptes;
844 			cur_dst = dst;
845 		}
846 
847 		/* for next ptb*/
848 		addr += nptes;
849 		dst += nptes * AMDGPU_GPU_PAGE_SIZE;
850 	}
851 
852 	params->func(params, cur_pe_start, cur_dst, cur_nptes,
853 		     AMDGPU_GPU_PAGE_SIZE, flags);
854 }
855 
856 /*
857  * amdgpu_vm_frag_ptes - add fragment information to PTEs
858  *
859  * @params: see amdgpu_pte_update_params definition
860  * @vm: requested vm
861  * @start: first PTE to handle
862  * @end: last PTE to handle
863  * @dst: addr those PTEs should point to
864  * @flags: hw mapping flags
865  */
866 static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params	*params,
867 				struct amdgpu_vm *vm,
868 				uint64_t start, uint64_t end,
869 				uint64_t dst, uint32_t flags)
870 {
871 	/**
872 	 * The MC L1 TLB supports variable sized pages, based on a fragment
873 	 * field in the PTE. When this field is set to a non-zero value, page
874 	 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
875 	 * flags are considered valid for all PTEs within the fragment range
876 	 * and corresponding mappings are assumed to be physically contiguous.
877 	 *
878 	 * The L1 TLB can store a single PTE for the whole fragment,
879 	 * significantly increasing the space available for translation
880 	 * caching. This leads to large improvements in throughput when the
881 	 * TLB is under pressure.
882 	 *
883 	 * The L2 TLB distributes small and large fragments into two
884 	 * asymmetric partitions. The large fragment cache is significantly
885 	 * larger. Thus, we try to use large fragments wherever possible.
886 	 * Userspace can support this by aligning virtual base address and
887 	 * allocation size to the fragment size.
888 	 */
889 
890 	/* SI and newer are optimized for 64KB */
891 	uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
892 	uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
893 
894 	uint64_t frag_start = ALIGN(start, frag_align);
895 	uint64_t frag_end = end & ~(frag_align - 1);
896 
897 	/* system pages are non continuously */
898 	if (params->src || !(flags & AMDGPU_PTE_VALID) ||
899 	    (frag_start >= frag_end)) {
900 
901 		amdgpu_vm_update_ptes(params, vm, start, end, dst, flags);
902 		return;
903 	}
904 
905 	/* handle the 4K area at the beginning */
906 	if (start != frag_start) {
907 		amdgpu_vm_update_ptes(params, vm, start, frag_start,
908 				      dst, flags);
909 		dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
910 	}
911 
912 	/* handle the area in the middle */
913 	amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst,
914 			      flags | frag_flags);
915 
916 	/* handle the 4K area at the end */
917 	if (frag_end != end) {
918 		dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
919 		amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags);
920 	}
921 }
922 
923 /**
924  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
925  *
926  * @adev: amdgpu_device pointer
927  * @exclusive: fence we need to sync to
928  * @src: address where to copy page table entries from
929  * @pages_addr: DMA addresses to use for mapping
930  * @vm: requested vm
931  * @start: start of mapped range
932  * @last: last mapped entry
933  * @flags: flags for the entries
934  * @addr: addr to set the area to
935  * @fence: optional resulting fence
936  *
937  * Fill in the page table entries between @start and @last.
938  * Returns 0 for success, -EINVAL for failure.
939  */
940 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
941 				       struct dma_fence *exclusive,
942 				       uint64_t src,
943 				       dma_addr_t *pages_addr,
944 				       struct amdgpu_vm *vm,
945 				       uint64_t start, uint64_t last,
946 				       uint32_t flags, uint64_t addr,
947 				       struct dma_fence **fence)
948 {
949 	struct amdgpu_ring *ring;
950 	void *owner = AMDGPU_FENCE_OWNER_VM;
951 	unsigned nptes, ncmds, ndw;
952 	struct amdgpu_job *job;
953 	struct amdgpu_pte_update_params params;
954 	struct dma_fence *f = NULL;
955 	int r;
956 
957 	memset(&params, 0, sizeof(params));
958 	params.adev = adev;
959 	params.src = src;
960 
961 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
962 
963 	memset(&params, 0, sizeof(params));
964 	params.adev = adev;
965 	params.src = src;
966 
967 	/* sync to everything on unmapping */
968 	if (!(flags & AMDGPU_PTE_VALID))
969 		owner = AMDGPU_FENCE_OWNER_UNDEFINED;
970 
971 	nptes = last - start + 1;
972 
973 	/*
974 	 * reserve space for one command every (1 << BLOCK_SIZE)
975 	 *  entries or 2k dwords (whatever is smaller)
976 	 */
977 	ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
978 
979 	/* padding, etc. */
980 	ndw = 64;
981 
982 	if (src) {
983 		/* only copy commands needed */
984 		ndw += ncmds * 7;
985 
986 		params.func = amdgpu_vm_do_copy_ptes;
987 
988 	} else if (pages_addr) {
989 		/* copy commands needed */
990 		ndw += ncmds * 7;
991 
992 		/* and also PTEs */
993 		ndw += nptes * 2;
994 
995 		params.func = amdgpu_vm_do_copy_ptes;
996 
997 	} else {
998 		/* set page commands needed */
999 		ndw += ncmds * 10;
1000 
1001 		/* two extra commands for begin/end of fragment */
1002 		ndw += 2 * 10;
1003 
1004 		params.func = amdgpu_vm_do_set_ptes;
1005 	}
1006 
1007 	r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
1008 	if (r)
1009 		return r;
1010 
1011 	params.ib = &job->ibs[0];
1012 
1013 	if (!src && pages_addr) {
1014 		uint64_t *pte;
1015 		unsigned i;
1016 
1017 		/* Put the PTEs at the end of the IB. */
1018 		i = ndw - nptes * 2;
1019 		pte= (uint64_t *)&(job->ibs->ptr[i]);
1020 		params.src = job->ibs->gpu_addr + i * 4;
1021 
1022 		for (i = 0; i < nptes; ++i) {
1023 			pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
1024 						    AMDGPU_GPU_PAGE_SIZE);
1025 			pte[i] |= flags;
1026 		}
1027 		addr = 0;
1028 	}
1029 
1030 	r = amdgpu_sync_fence(adev, &job->sync, exclusive);
1031 	if (r)
1032 		goto error_free;
1033 
1034 	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
1035 			     owner);
1036 	if (r)
1037 		goto error_free;
1038 
1039 	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
1040 	if (r)
1041 		goto error_free;
1042 
1043 	params.shadow = true;
1044 	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
1045 	params.shadow = false;
1046 	amdgpu_vm_frag_ptes(&params, vm, start, last + 1, addr, flags);
1047 
1048 	amdgpu_ring_pad_ib(ring, params.ib);
1049 	WARN_ON(params.ib->length_dw > ndw);
1050 	r = amdgpu_job_submit(job, ring, &vm->entity,
1051 			      AMDGPU_FENCE_OWNER_VM, &f);
1052 	if (r)
1053 		goto error_free;
1054 
1055 	amdgpu_bo_fence(vm->page_directory, f, true);
1056 	if (fence) {
1057 		dma_fence_put(*fence);
1058 		*fence = dma_fence_get(f);
1059 	}
1060 	dma_fence_put(f);
1061 	return 0;
1062 
1063 error_free:
1064 	amdgpu_job_free(job);
1065 	return r;
1066 }
1067 
1068 /**
1069  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
1070  *
1071  * @adev: amdgpu_device pointer
1072  * @exclusive: fence we need to sync to
1073  * @gtt_flags: flags as they are used for GTT
1074  * @pages_addr: DMA addresses to use for mapping
1075  * @vm: requested vm
1076  * @mapping: mapped range and flags to use for the update
1077  * @flags: HW flags for the mapping
1078  * @nodes: array of drm_mm_nodes with the MC addresses
1079  * @fence: optional resulting fence
1080  *
1081  * Split the mapping into smaller chunks so that each update fits
1082  * into a SDMA IB.
1083  * Returns 0 for success, -EINVAL for failure.
1084  */
1085 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1086 				      struct dma_fence *exclusive,
1087 				      uint32_t gtt_flags,
1088 				      dma_addr_t *pages_addr,
1089 				      struct amdgpu_vm *vm,
1090 				      struct amdgpu_bo_va_mapping *mapping,
1091 				      uint32_t flags,
1092 				      struct drm_mm_node *nodes,
1093 				      struct dma_fence **fence)
1094 {
1095 	uint64_t pfn, src = 0, start = mapping->it.start;
1096 	int r;
1097 
1098 	/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1099 	 * but in case of something, we filter the flags in first place
1100 	 */
1101 	if (!(mapping->flags & AMDGPU_PTE_READABLE))
1102 		flags &= ~AMDGPU_PTE_READABLE;
1103 	if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1104 		flags &= ~AMDGPU_PTE_WRITEABLE;
1105 
1106 	trace_amdgpu_vm_bo_update(mapping);
1107 
1108 	pfn = mapping->offset >> PAGE_SHIFT;
1109 	if (nodes) {
1110 		while (pfn >= nodes->size) {
1111 			pfn -= nodes->size;
1112 			++nodes;
1113 		}
1114 	}
1115 
1116 	do {
1117 		uint64_t max_entries;
1118 		uint64_t addr, last;
1119 
1120 		if (nodes) {
1121 			addr = nodes->start << PAGE_SHIFT;
1122 			max_entries = (nodes->size - pfn) *
1123 				(PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1124 		} else {
1125 			addr = 0;
1126 			max_entries = S64_MAX;
1127 		}
1128 
1129 		if (pages_addr) {
1130 			if (flags == gtt_flags)
1131 				src = adev->gart.table_addr +
1132 					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
1133 			else
1134 				max_entries = min(max_entries, 16ull * 1024ull);
1135 			addr = 0;
1136 		} else if (flags & AMDGPU_PTE_VALID) {
1137 			addr += adev->vm_manager.vram_base_offset;
1138 		}
1139 		addr += pfn << PAGE_SHIFT;
1140 
1141 		last = min((uint64_t)mapping->it.last, start + max_entries - 1);
1142 		r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1143 						src, pages_addr, vm,
1144 						start, last, flags, addr,
1145 						fence);
1146 		if (r)
1147 			return r;
1148 
1149 		pfn += last - start + 1;
1150 		if (nodes && nodes->size == pfn) {
1151 			pfn = 0;
1152 			++nodes;
1153 		}
1154 		start = last + 1;
1155 
1156 	} while (unlikely(start != mapping->it.last + 1));
1157 
1158 	return 0;
1159 }
1160 
1161 /**
1162  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1163  *
1164  * @adev: amdgpu_device pointer
1165  * @bo_va: requested BO and VM object
1166  * @clear: if true clear the entries
1167  *
1168  * Fill in the page table entries for @bo_va.
1169  * Returns 0 for success, -EINVAL for failure.
1170  */
1171 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1172 			struct amdgpu_bo_va *bo_va,
1173 			bool clear)
1174 {
1175 	struct amdgpu_vm *vm = bo_va->vm;
1176 	struct amdgpu_bo_va_mapping *mapping;
1177 	dma_addr_t *pages_addr = NULL;
1178 	uint32_t gtt_flags, flags;
1179 	struct ttm_mem_reg *mem;
1180 	struct drm_mm_node *nodes;
1181 	struct dma_fence *exclusive;
1182 	int r;
1183 
1184 	if (clear) {
1185 		mem = NULL;
1186 		nodes = NULL;
1187 		exclusive = NULL;
1188 	} else {
1189 		struct ttm_dma_tt *ttm;
1190 
1191 		mem = &bo_va->bo->tbo.mem;
1192 		nodes = mem->mm_node;
1193 		if (mem->mem_type == TTM_PL_TT) {
1194 			ttm = container_of(bo_va->bo->tbo.ttm, struct
1195 					   ttm_dma_tt, ttm);
1196 			pages_addr = ttm->dma_address;
1197 		}
1198 		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
1199 	}
1200 
1201 	flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
1202 	gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
1203 		adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0;
1204 
1205 	spin_lock(&vm->status_lock);
1206 	if (!list_empty(&bo_va->vm_status))
1207 		list_splice_init(&bo_va->valids, &bo_va->invalids);
1208 	spin_unlock(&vm->status_lock);
1209 
1210 	list_for_each_entry(mapping, &bo_va->invalids, list) {
1211 		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1212 					       gtt_flags, pages_addr, vm,
1213 					       mapping, flags, nodes,
1214 					       &bo_va->last_pt_update);
1215 		if (r)
1216 			return r;
1217 	}
1218 
1219 	if (trace_amdgpu_vm_bo_mapping_enabled()) {
1220 		list_for_each_entry(mapping, &bo_va->valids, list)
1221 			trace_amdgpu_vm_bo_mapping(mapping);
1222 
1223 		list_for_each_entry(mapping, &bo_va->invalids, list)
1224 			trace_amdgpu_vm_bo_mapping(mapping);
1225 	}
1226 
1227 	spin_lock(&vm->status_lock);
1228 	list_splice_init(&bo_va->invalids, &bo_va->valids);
1229 	list_del_init(&bo_va->vm_status);
1230 	if (clear)
1231 		list_add(&bo_va->vm_status, &vm->cleared);
1232 	spin_unlock(&vm->status_lock);
1233 
1234 	return 0;
1235 }
1236 
1237 /**
1238  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1239  *
1240  * @adev: amdgpu_device pointer
1241  * @vm: requested vm
1242  *
1243  * Make sure all freed BOs are cleared in the PT.
1244  * Returns 0 for success.
1245  *
1246  * PTs have to be reserved and mutex must be locked!
1247  */
1248 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1249 			  struct amdgpu_vm *vm)
1250 {
1251 	struct amdgpu_bo_va_mapping *mapping;
1252 	int r;
1253 
1254 	while (!list_empty(&vm->freed)) {
1255 		mapping = list_first_entry(&vm->freed,
1256 			struct amdgpu_bo_va_mapping, list);
1257 		list_del(&mapping->list);
1258 
1259 		r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1260 					       0, 0, NULL);
1261 		kfree(mapping);
1262 		if (r)
1263 			return r;
1264 
1265 	}
1266 	return 0;
1267 
1268 }
1269 
1270 /**
1271  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1272  *
1273  * @adev: amdgpu_device pointer
1274  * @vm: requested vm
1275  *
1276  * Make sure all invalidated BOs are cleared in the PT.
1277  * Returns 0 for success.
1278  *
1279  * PTs have to be reserved and mutex must be locked!
1280  */
1281 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1282 			     struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1283 {
1284 	struct amdgpu_bo_va *bo_va = NULL;
1285 	int r = 0;
1286 
1287 	spin_lock(&vm->status_lock);
1288 	while (!list_empty(&vm->invalidated)) {
1289 		bo_va = list_first_entry(&vm->invalidated,
1290 			struct amdgpu_bo_va, vm_status);
1291 		spin_unlock(&vm->status_lock);
1292 
1293 		r = amdgpu_vm_bo_update(adev, bo_va, true);
1294 		if (r)
1295 			return r;
1296 
1297 		spin_lock(&vm->status_lock);
1298 	}
1299 	spin_unlock(&vm->status_lock);
1300 
1301 	if (bo_va)
1302 		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1303 
1304 	return r;
1305 }
1306 
1307 /**
1308  * amdgpu_vm_bo_add - add a bo to a specific vm
1309  *
1310  * @adev: amdgpu_device pointer
1311  * @vm: requested vm
1312  * @bo: amdgpu buffer object
1313  *
1314  * Add @bo into the requested vm.
1315  * Add @bo to the list of bos associated with the vm
1316  * Returns newly added bo_va or NULL for failure
1317  *
1318  * Object has to be reserved!
1319  */
1320 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1321 				      struct amdgpu_vm *vm,
1322 				      struct amdgpu_bo *bo)
1323 {
1324 	struct amdgpu_bo_va *bo_va;
1325 
1326 	bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1327 	if (bo_va == NULL) {
1328 		return NULL;
1329 	}
1330 	bo_va->vm = vm;
1331 	bo_va->bo = bo;
1332 	bo_va->ref_count = 1;
1333 	INIT_LIST_HEAD(&bo_va->bo_list);
1334 	INIT_LIST_HEAD(&bo_va->valids);
1335 	INIT_LIST_HEAD(&bo_va->invalids);
1336 	INIT_LIST_HEAD(&bo_va->vm_status);
1337 
1338 	list_add_tail(&bo_va->bo_list, &bo->va);
1339 
1340 	return bo_va;
1341 }
1342 
1343 /**
1344  * amdgpu_vm_bo_map - map bo inside a vm
1345  *
1346  * @adev: amdgpu_device pointer
1347  * @bo_va: bo_va to store the address
1348  * @saddr: where to map the BO
1349  * @offset: requested offset in the BO
1350  * @flags: attributes of pages (read/write/valid/etc.)
1351  *
1352  * Add a mapping of the BO at the specefied addr into the VM.
1353  * Returns 0 for success, error for failure.
1354  *
1355  * Object has to be reserved and unreserved outside!
1356  */
1357 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1358 		     struct amdgpu_bo_va *bo_va,
1359 		     uint64_t saddr, uint64_t offset,
1360 		     uint64_t size, uint32_t flags)
1361 {
1362 	struct amdgpu_bo_va_mapping *mapping;
1363 	struct amdgpu_vm *vm = bo_va->vm;
1364 	struct interval_tree_node *it;
1365 	unsigned last_pfn, pt_idx;
1366 	uint64_t eaddr;
1367 	int r;
1368 
1369 	/* validate the parameters */
1370 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1371 	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1372 		return -EINVAL;
1373 
1374 	/* make sure object fit at this offset */
1375 	eaddr = saddr + size - 1;
1376 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1377 		return -EINVAL;
1378 
1379 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1380 	if (last_pfn >= adev->vm_manager.max_pfn) {
1381 		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1382 			last_pfn, adev->vm_manager.max_pfn);
1383 		return -EINVAL;
1384 	}
1385 
1386 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1387 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
1388 
1389 	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1390 	if (it) {
1391 		struct amdgpu_bo_va_mapping *tmp;
1392 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1393 		/* bo and tmp overlap, invalid addr */
1394 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1395 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1396 			tmp->it.start, tmp->it.last + 1);
1397 		r = -EINVAL;
1398 		goto error;
1399 	}
1400 
1401 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1402 	if (!mapping) {
1403 		r = -ENOMEM;
1404 		goto error;
1405 	}
1406 
1407 	INIT_LIST_HEAD(&mapping->list);
1408 	mapping->it.start = saddr;
1409 	mapping->it.last = eaddr;
1410 	mapping->offset = offset;
1411 	mapping->flags = flags;
1412 
1413 	list_add(&mapping->list, &bo_va->invalids);
1414 	interval_tree_insert(&mapping->it, &vm->va);
1415 
1416 	/* Make sure the page tables are allocated */
1417 	saddr >>= amdgpu_vm_block_size;
1418 	eaddr >>= amdgpu_vm_block_size;
1419 
1420 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1421 
1422 	if (eaddr > vm->max_pde_used)
1423 		vm->max_pde_used = eaddr;
1424 
1425 	/* walk over the address space and allocate the page tables */
1426 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1427 		struct reservation_object *resv = vm->page_directory->tbo.resv;
1428 		struct amdgpu_bo *pt;
1429 
1430 		if (vm->page_tables[pt_idx].bo)
1431 			continue;
1432 
1433 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1434 				     AMDGPU_GPU_PAGE_SIZE, true,
1435 				     AMDGPU_GEM_DOMAIN_VRAM,
1436 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1437 				     AMDGPU_GEM_CREATE_SHADOW |
1438 				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1439 				     NULL, resv, &pt);
1440 		if (r)
1441 			goto error_free;
1442 
1443 		/* Keep a reference to the page table to avoid freeing
1444 		 * them up in the wrong order.
1445 		 */
1446 		pt->parent = amdgpu_bo_ref(vm->page_directory);
1447 
1448 		r = amdgpu_vm_clear_bo(adev, vm, pt);
1449 		if (r) {
1450 			amdgpu_bo_unref(&pt->shadow);
1451 			amdgpu_bo_unref(&pt);
1452 			goto error_free;
1453 		}
1454 
1455 		if (pt->shadow) {
1456 			r = amdgpu_vm_clear_bo(adev, vm, pt->shadow);
1457 			if (r) {
1458 				amdgpu_bo_unref(&pt->shadow);
1459 				amdgpu_bo_unref(&pt);
1460 				goto error_free;
1461 			}
1462 		}
1463 
1464 		vm->page_tables[pt_idx].bo = pt;
1465 		vm->page_tables[pt_idx].addr = 0;
1466 	}
1467 
1468 	return 0;
1469 
1470 error_free:
1471 	list_del(&mapping->list);
1472 	interval_tree_remove(&mapping->it, &vm->va);
1473 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1474 	kfree(mapping);
1475 
1476 error:
1477 	return r;
1478 }
1479 
1480 /**
1481  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1482  *
1483  * @adev: amdgpu_device pointer
1484  * @bo_va: bo_va to remove the address from
1485  * @saddr: where to the BO is mapped
1486  *
1487  * Remove a mapping of the BO at the specefied addr from the VM.
1488  * Returns 0 for success, error for failure.
1489  *
1490  * Object has to be reserved and unreserved outside!
1491  */
1492 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1493 		       struct amdgpu_bo_va *bo_va,
1494 		       uint64_t saddr)
1495 {
1496 	struct amdgpu_bo_va_mapping *mapping;
1497 	struct amdgpu_vm *vm = bo_va->vm;
1498 	bool valid = true;
1499 
1500 	saddr /= AMDGPU_GPU_PAGE_SIZE;
1501 
1502 	list_for_each_entry(mapping, &bo_va->valids, list) {
1503 		if (mapping->it.start == saddr)
1504 			break;
1505 	}
1506 
1507 	if (&mapping->list == &bo_va->valids) {
1508 		valid = false;
1509 
1510 		list_for_each_entry(mapping, &bo_va->invalids, list) {
1511 			if (mapping->it.start == saddr)
1512 				break;
1513 		}
1514 
1515 		if (&mapping->list == &bo_va->invalids)
1516 			return -ENOENT;
1517 	}
1518 
1519 	list_del(&mapping->list);
1520 	interval_tree_remove(&mapping->it, &vm->va);
1521 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1522 
1523 	if (valid)
1524 		list_add(&mapping->list, &vm->freed);
1525 	else
1526 		kfree(mapping);
1527 
1528 	return 0;
1529 }
1530 
1531 /**
1532  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1533  *
1534  * @adev: amdgpu_device pointer
1535  * @bo_va: requested bo_va
1536  *
1537  * Remove @bo_va->bo from the requested vm.
1538  *
1539  * Object have to be reserved!
1540  */
1541 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1542 		      struct amdgpu_bo_va *bo_va)
1543 {
1544 	struct amdgpu_bo_va_mapping *mapping, *next;
1545 	struct amdgpu_vm *vm = bo_va->vm;
1546 
1547 	list_del(&bo_va->bo_list);
1548 
1549 	spin_lock(&vm->status_lock);
1550 	list_del(&bo_va->vm_status);
1551 	spin_unlock(&vm->status_lock);
1552 
1553 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1554 		list_del(&mapping->list);
1555 		interval_tree_remove(&mapping->it, &vm->va);
1556 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1557 		list_add(&mapping->list, &vm->freed);
1558 	}
1559 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1560 		list_del(&mapping->list);
1561 		interval_tree_remove(&mapping->it, &vm->va);
1562 		kfree(mapping);
1563 	}
1564 
1565 	dma_fence_put(bo_va->last_pt_update);
1566 	kfree(bo_va);
1567 }
1568 
1569 /**
1570  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1571  *
1572  * @adev: amdgpu_device pointer
1573  * @vm: requested vm
1574  * @bo: amdgpu buffer object
1575  *
1576  * Mark @bo as invalid.
1577  */
1578 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1579 			     struct amdgpu_bo *bo)
1580 {
1581 	struct amdgpu_bo_va *bo_va;
1582 
1583 	list_for_each_entry(bo_va, &bo->va, bo_list) {
1584 		spin_lock(&bo_va->vm->status_lock);
1585 		if (list_empty(&bo_va->vm_status))
1586 			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1587 		spin_unlock(&bo_va->vm->status_lock);
1588 	}
1589 }
1590 
1591 /**
1592  * amdgpu_vm_init - initialize a vm instance
1593  *
1594  * @adev: amdgpu_device pointer
1595  * @vm: requested vm
1596  *
1597  * Init @vm fields.
1598  */
1599 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1600 {
1601 	const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1602 		AMDGPU_VM_PTE_COUNT * 8);
1603 	unsigned pd_size, pd_entries;
1604 	unsigned ring_instance;
1605 	struct amdgpu_ring *ring;
1606 	struct amd_sched_rq *rq;
1607 	int i, r;
1608 
1609 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1610 		vm->ids[i] = NULL;
1611 	vm->va = RB_ROOT;
1612 	vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
1613 	spin_lock_init(&vm->status_lock);
1614 	INIT_LIST_HEAD(&vm->invalidated);
1615 	INIT_LIST_HEAD(&vm->cleared);
1616 	INIT_LIST_HEAD(&vm->freed);
1617 
1618 	pd_size = amdgpu_vm_directory_size(adev);
1619 	pd_entries = amdgpu_vm_num_pdes(adev);
1620 
1621 	/* allocate page table array */
1622 	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1623 	if (vm->page_tables == NULL) {
1624 		DRM_ERROR("Cannot allocate memory for page table array\n");
1625 		return -ENOMEM;
1626 	}
1627 
1628 	/* create scheduler entity for page table updates */
1629 
1630 	ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1631 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
1632 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
1633 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1634 	r = amd_sched_entity_init(&ring->sched, &vm->entity,
1635 				  rq, amdgpu_sched_jobs);
1636 	if (r)
1637 		goto err;
1638 
1639 	vm->page_directory_fence = NULL;
1640 
1641 	r = amdgpu_bo_create(adev, pd_size, align, true,
1642 			     AMDGPU_GEM_DOMAIN_VRAM,
1643 			     AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
1644 			     AMDGPU_GEM_CREATE_SHADOW |
1645 			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
1646 			     NULL, NULL, &vm->page_directory);
1647 	if (r)
1648 		goto error_free_sched_entity;
1649 
1650 	r = amdgpu_bo_reserve(vm->page_directory, false);
1651 	if (r)
1652 		goto error_free_page_directory;
1653 
1654 	r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1655 	if (r)
1656 		goto error_unreserve;
1657 
1658 	if (vm->page_directory->shadow) {
1659 		r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory->shadow);
1660 		if (r)
1661 			goto error_unreserve;
1662 	}
1663 
1664 	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1665 	amdgpu_bo_unreserve(vm->page_directory);
1666 
1667 	return 0;
1668 
1669 error_unreserve:
1670 	amdgpu_bo_unreserve(vm->page_directory);
1671 
1672 error_free_page_directory:
1673 	amdgpu_bo_unref(&vm->page_directory->shadow);
1674 	amdgpu_bo_unref(&vm->page_directory);
1675 	vm->page_directory = NULL;
1676 
1677 error_free_sched_entity:
1678 	amd_sched_entity_fini(&ring->sched, &vm->entity);
1679 
1680 err:
1681 	drm_free_large(vm->page_tables);
1682 
1683 	return r;
1684 }
1685 
1686 /**
1687  * amdgpu_vm_fini - tear down a vm instance
1688  *
1689  * @adev: amdgpu_device pointer
1690  * @vm: requested vm
1691  *
1692  * Tear down @vm.
1693  * Unbind the VM and remove all bos from the vm bo list
1694  */
1695 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1696 {
1697 	struct amdgpu_bo_va_mapping *mapping, *tmp;
1698 	int i;
1699 
1700 	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1701 
1702 	if (!RB_EMPTY_ROOT(&vm->va)) {
1703 		dev_err(adev->dev, "still active bo inside vm\n");
1704 	}
1705 	rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1706 		list_del(&mapping->list);
1707 		interval_tree_remove(&mapping->it, &vm->va);
1708 		kfree(mapping);
1709 	}
1710 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1711 		list_del(&mapping->list);
1712 		kfree(mapping);
1713 	}
1714 
1715 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
1716 		struct amdgpu_bo *pt = vm->page_tables[i].bo;
1717 
1718 		if (!pt)
1719 			continue;
1720 
1721 		amdgpu_bo_unref(&pt->shadow);
1722 		amdgpu_bo_unref(&pt);
1723 	}
1724 	drm_free_large(vm->page_tables);
1725 
1726 	amdgpu_bo_unref(&vm->page_directory->shadow);
1727 	amdgpu_bo_unref(&vm->page_directory);
1728 	dma_fence_put(vm->page_directory_fence);
1729 }
1730 
1731 /**
1732  * amdgpu_vm_manager_init - init the VM manager
1733  *
1734  * @adev: amdgpu_device pointer
1735  *
1736  * Initialize the VM manager structures
1737  */
1738 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1739 {
1740 	unsigned i;
1741 
1742 	INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1743 
1744 	/* skip over VMID 0, since it is the system VM */
1745 	for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1746 		amdgpu_vm_reset_id(adev, i);
1747 		amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1748 		list_add_tail(&adev->vm_manager.ids[i].list,
1749 			      &adev->vm_manager.ids_lru);
1750 	}
1751 
1752 	adev->vm_manager.fence_context =
1753 		dma_fence_context_alloc(AMDGPU_MAX_RINGS);
1754 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1755 		adev->vm_manager.seqno[i] = 0;
1756 
1757 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1758 	atomic64_set(&adev->vm_manager.client_counter, 0);
1759 }
1760 
1761 /**
1762  * amdgpu_vm_manager_fini - cleanup VM manager
1763  *
1764  * @adev: amdgpu_device pointer
1765  *
1766  * Cleanup the VM manager and free resources.
1767  */
1768 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1769 {
1770 	unsigned i;
1771 
1772 	for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1773 		struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1774 
1775 		dma_fence_put(adev->vm_manager.ids[i].first);
1776 		amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1777 		dma_fence_put(id->flushed_updates);
1778 		dma_fence_put(id->last_flush);
1779 	}
1780 }
1781