xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 
33 /* BO flag to indicate a KFD userptr BO */
34 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
35 
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40 
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43 	uint64_t max_system_mem_limit;
44 	uint64_t max_ttm_mem_limit;
45 	int64_t system_mem_used;
46 	int64_t ttm_mem_used;
47 	spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49 
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
52 	uint32_t        domain;
53 	bool            wait;
54 };
55 
56 static const char * const domain_bit_to_string[] = {
57 		"CPU",
58 		"GTT",
59 		"VRAM",
60 		"GDS",
61 		"GWS",
62 		"OA"
63 };
64 
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
66 
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
68 
69 
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
71 {
72 	return (struct amdgpu_device *)kgd;
73 }
74 
75 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
76 		struct kgd_mem *mem)
77 {
78 	struct kfd_bo_va_list *entry;
79 
80 	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
81 		if (entry->bo_va->base.vm == avm)
82 			return false;
83 
84 	return true;
85 }
86 
87 /* Set memory usage limits. Current, limits are
88  *  System (TTM + userptr) memory - 15/16th System RAM
89  *  TTM memory - 3/8th System RAM
90  */
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
92 {
93 	struct sysinfo si;
94 	uint64_t mem;
95 
96 	si_meminfo(&si);
97 	mem = si.totalram - si.totalhigh;
98 	mem *= si.mem_unit;
99 
100 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
102 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104 		(kfd_mem_limit.max_system_mem_limit >> 20),
105 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
106 }
107 
108 /* Estimate page table size needed to represent a given memory size
109  *
110  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
111  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
112  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
113  * for 2MB pages for TLB efficiency. However, small allocations and
114  * fragmented system memory still need some 4KB pages. We choose a
115  * compromise that should work in most cases without reserving too
116  * much memory for page tables unnecessarily (factor 16K, >> 14).
117  */
118 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
119 
120 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
121 		uint64_t size, u32 domain, bool sg)
122 {
123 	uint64_t reserved_for_pt =
124 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
125 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
126 	int ret = 0;
127 
128 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
129 				       sizeof(struct amdgpu_bo));
130 
131 	vram_needed = 0;
132 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
133 		/* TTM GTT memory */
134 		system_mem_needed = acc_size + size;
135 		ttm_mem_needed = acc_size + size;
136 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
137 		/* Userptr */
138 		system_mem_needed = acc_size + size;
139 		ttm_mem_needed = acc_size;
140 	} else {
141 		/* VRAM and SG */
142 		system_mem_needed = acc_size;
143 		ttm_mem_needed = acc_size;
144 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
145 			vram_needed = size;
146 	}
147 
148 	spin_lock(&kfd_mem_limit.mem_limit_lock);
149 
150 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
151 	     kfd_mem_limit.max_system_mem_limit) ||
152 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
153 	     kfd_mem_limit.max_ttm_mem_limit) ||
154 	    (adev->kfd.vram_used + vram_needed >
155 	     adev->gmc.real_vram_size - reserved_for_pt)) {
156 		ret = -ENOMEM;
157 	} else {
158 		kfd_mem_limit.system_mem_used += system_mem_needed;
159 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
160 		adev->kfd.vram_used += vram_needed;
161 	}
162 
163 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
164 	return ret;
165 }
166 
167 static void unreserve_mem_limit(struct amdgpu_device *adev,
168 		uint64_t size, u32 domain, bool sg)
169 {
170 	size_t acc_size;
171 
172 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
173 				       sizeof(struct amdgpu_bo));
174 
175 	spin_lock(&kfd_mem_limit.mem_limit_lock);
176 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
177 		kfd_mem_limit.system_mem_used -= (acc_size + size);
178 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
179 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
180 		kfd_mem_limit.system_mem_used -= (acc_size + size);
181 		kfd_mem_limit.ttm_mem_used -= acc_size;
182 	} else {
183 		kfd_mem_limit.system_mem_used -= acc_size;
184 		kfd_mem_limit.ttm_mem_used -= acc_size;
185 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
186 			adev->kfd.vram_used -= size;
187 			WARN_ONCE(adev->kfd.vram_used < 0,
188 				  "kfd VRAM memory accounting unbalanced");
189 		}
190 	}
191 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
192 		  "kfd system memory accounting unbalanced");
193 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
194 		  "kfd TTM memory accounting unbalanced");
195 
196 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
197 }
198 
199 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
200 {
201 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
202 	u32 domain = bo->preferred_domains;
203 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
204 
205 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
206 		domain = AMDGPU_GEM_DOMAIN_CPU;
207 		sg = false;
208 	}
209 
210 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
211 }
212 
213 
214 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
215  *  reservation object.
216  *
217  * @bo: [IN] Remove eviction fence(s) from this BO
218  * @ef: [IN] This eviction fence is removed if it
219  *  is present in the shared list.
220  *
221  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
222  */
223 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
224 					struct amdgpu_amdkfd_fence *ef)
225 {
226 	struct dma_resv *resv = bo->tbo.base.resv;
227 	struct dma_resv_list *old, *new;
228 	unsigned int i, j, k;
229 
230 	if (!ef)
231 		return -EINVAL;
232 
233 	old = dma_resv_get_list(resv);
234 	if (!old)
235 		return 0;
236 
237 	new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
238 		      GFP_KERNEL);
239 	if (!new)
240 		return -ENOMEM;
241 
242 	/* Go through all the shared fences in the resevation object and sort
243 	 * the interesting ones to the end of the list.
244 	 */
245 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
246 		struct dma_fence *f;
247 
248 		f = rcu_dereference_protected(old->shared[i],
249 					      dma_resv_held(resv));
250 
251 		if (f->context == ef->base.context)
252 			RCU_INIT_POINTER(new->shared[--j], f);
253 		else
254 			RCU_INIT_POINTER(new->shared[k++], f);
255 	}
256 	new->shared_max = old->shared_max;
257 	new->shared_count = k;
258 
259 	/* Install the new fence list, seqcount provides the barriers */
260 	preempt_disable();
261 	write_seqcount_begin(&resv->seq);
262 	RCU_INIT_POINTER(resv->fence, new);
263 	write_seqcount_end(&resv->seq);
264 	preempt_enable();
265 
266 	/* Drop the references to the removed fences or move them to ef_list */
267 	for (i = j, k = 0; i < old->shared_count; ++i) {
268 		struct dma_fence *f;
269 
270 		f = rcu_dereference_protected(new->shared[i],
271 					      dma_resv_held(resv));
272 		dma_fence_put(f);
273 	}
274 	kfree_rcu(old, rcu);
275 
276 	return 0;
277 }
278 
279 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
280 {
281 	struct amdgpu_bo *root = bo;
282 	struct amdgpu_vm_bo_base *vm_bo;
283 	struct amdgpu_vm *vm;
284 	struct amdkfd_process_info *info;
285 	struct amdgpu_amdkfd_fence *ef;
286 	int ret;
287 
288 	/* we can always get vm_bo from root PD bo.*/
289 	while (root->parent)
290 		root = root->parent;
291 
292 	vm_bo = root->vm_bo;
293 	if (!vm_bo)
294 		return 0;
295 
296 	vm = vm_bo->vm;
297 	if (!vm)
298 		return 0;
299 
300 	info = vm->process_info;
301 	if (!info || !info->eviction_fence)
302 		return 0;
303 
304 	ef = container_of(dma_fence_get(&info->eviction_fence->base),
305 			struct amdgpu_amdkfd_fence, base);
306 
307 	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
308 	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
309 	dma_resv_unlock(bo->tbo.base.resv);
310 
311 	dma_fence_put(&ef->base);
312 	return ret;
313 }
314 
315 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
316 				     bool wait)
317 {
318 	struct ttm_operation_ctx ctx = { false, false };
319 	int ret;
320 
321 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
322 		 "Called with userptr BO"))
323 		return -EINVAL;
324 
325 	amdgpu_bo_placement_from_domain(bo, domain);
326 
327 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
328 	if (ret)
329 		goto validate_fail;
330 	if (wait)
331 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
332 
333 validate_fail:
334 	return ret;
335 }
336 
337 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
338 {
339 	struct amdgpu_vm_parser *p = param;
340 
341 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
342 }
343 
344 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
345  *
346  * Page directories are not updated here because huge page handling
347  * during page table updates can invalidate page directory entries
348  * again. Page directories are only updated after updating page
349  * tables.
350  */
351 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
352 {
353 	struct amdgpu_bo *pd = vm->root.base.bo;
354 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
355 	struct amdgpu_vm_parser param;
356 	int ret;
357 
358 	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
359 	param.wait = false;
360 
361 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
362 					&param);
363 	if (ret) {
364 		pr_err("amdgpu: failed to validate PT BOs\n");
365 		return ret;
366 	}
367 
368 	ret = amdgpu_amdkfd_validate(&param, pd);
369 	if (ret) {
370 		pr_err("amdgpu: failed to validate PD\n");
371 		return ret;
372 	}
373 
374 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
375 
376 	if (vm->use_cpu_for_update) {
377 		ret = amdgpu_bo_kmap(pd, NULL);
378 		if (ret) {
379 			pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
380 			return ret;
381 		}
382 	}
383 
384 	return 0;
385 }
386 
387 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
388 {
389 	struct amdgpu_bo *pd = vm->root.base.bo;
390 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
391 	int ret;
392 
393 	ret = amdgpu_vm_update_pdes(adev, vm, false);
394 	if (ret)
395 		return ret;
396 
397 	return amdgpu_sync_fence(sync, vm->last_update, false);
398 }
399 
400 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
401 {
402 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
403 	bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
404 	uint32_t mapping_flags;
405 
406 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
407 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
408 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
409 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
410 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
411 
412 	switch (adev->asic_type) {
413 	case CHIP_ARCTURUS:
414 		if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
415 			if (bo_adev == adev)
416 				mapping_flags |= coherent ?
417 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
418 			else
419 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
420 		} else {
421 			mapping_flags |= coherent ?
422 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
423 		}
424 		break;
425 	default:
426 		mapping_flags |= coherent ?
427 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
428 	}
429 
430 	return amdgpu_gem_va_map_flags(adev, mapping_flags);
431 }
432 
433 /* add_bo_to_vm - Add a BO to a VM
434  *
435  * Everything that needs to bo done only once when a BO is first added
436  * to a VM. It can later be mapped and unmapped many times without
437  * repeating these steps.
438  *
439  * 1. Allocate and initialize BO VA entry data structure
440  * 2. Add BO to the VM
441  * 3. Determine ASIC-specific PTE flags
442  * 4. Alloc page tables and directories if needed
443  * 4a.  Validate new page tables and directories
444  */
445 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
446 		struct amdgpu_vm *vm, bool is_aql,
447 		struct kfd_bo_va_list **p_bo_va_entry)
448 {
449 	int ret;
450 	struct kfd_bo_va_list *bo_va_entry;
451 	struct amdgpu_bo *bo = mem->bo;
452 	uint64_t va = mem->va;
453 	struct list_head *list_bo_va = &mem->bo_va_list;
454 	unsigned long bo_size = bo->tbo.mem.size;
455 
456 	if (!va) {
457 		pr_err("Invalid VA when adding BO to VM\n");
458 		return -EINVAL;
459 	}
460 
461 	if (is_aql)
462 		va += bo_size;
463 
464 	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
465 	if (!bo_va_entry)
466 		return -ENOMEM;
467 
468 	pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
469 			va + bo_size, vm);
470 
471 	/* Add BO to VM internal data structures*/
472 	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
473 	if (!bo_va_entry->bo_va) {
474 		ret = -EINVAL;
475 		pr_err("Failed to add BO object to VM. ret == %d\n",
476 				ret);
477 		goto err_vmadd;
478 	}
479 
480 	bo_va_entry->va = va;
481 	bo_va_entry->pte_flags = get_pte_flags(adev, mem);
482 	bo_va_entry->kgd_dev = (void *)adev;
483 	list_add(&bo_va_entry->bo_list, list_bo_va);
484 
485 	if (p_bo_va_entry)
486 		*p_bo_va_entry = bo_va_entry;
487 
488 	/* Allocate validate page tables if needed */
489 	ret = vm_validate_pt_pd_bos(vm);
490 	if (ret) {
491 		pr_err("validate_pt_pd_bos() failed\n");
492 		goto err_alloc_pts;
493 	}
494 
495 	return 0;
496 
497 err_alloc_pts:
498 	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
499 	list_del(&bo_va_entry->bo_list);
500 err_vmadd:
501 	kfree(bo_va_entry);
502 	return ret;
503 }
504 
505 static void remove_bo_from_vm(struct amdgpu_device *adev,
506 		struct kfd_bo_va_list *entry, unsigned long size)
507 {
508 	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
509 			entry->va,
510 			entry->va + size, entry);
511 	amdgpu_vm_bo_rmv(adev, entry->bo_va);
512 	list_del(&entry->bo_list);
513 	kfree(entry);
514 }
515 
516 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
517 				struct amdkfd_process_info *process_info,
518 				bool userptr)
519 {
520 	struct ttm_validate_buffer *entry = &mem->validate_list;
521 	struct amdgpu_bo *bo = mem->bo;
522 
523 	INIT_LIST_HEAD(&entry->head);
524 	entry->num_shared = 1;
525 	entry->bo = &bo->tbo;
526 	mutex_lock(&process_info->lock);
527 	if (userptr)
528 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
529 	else
530 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
531 	mutex_unlock(&process_info->lock);
532 }
533 
534 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
535 		struct amdkfd_process_info *process_info)
536 {
537 	struct ttm_validate_buffer *bo_list_entry;
538 
539 	bo_list_entry = &mem->validate_list;
540 	mutex_lock(&process_info->lock);
541 	list_del(&bo_list_entry->head);
542 	mutex_unlock(&process_info->lock);
543 }
544 
545 /* Initializes user pages. It registers the MMU notifier and validates
546  * the userptr BO in the GTT domain.
547  *
548  * The BO must already be on the userptr_valid_list. Otherwise an
549  * eviction and restore may happen that leaves the new BO unmapped
550  * with the user mode queues running.
551  *
552  * Takes the process_info->lock to protect against concurrent restore
553  * workers.
554  *
555  * Returns 0 for success, negative errno for errors.
556  */
557 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
558 {
559 	struct amdkfd_process_info *process_info = mem->process_info;
560 	struct amdgpu_bo *bo = mem->bo;
561 	struct ttm_operation_ctx ctx = { true, false };
562 	int ret = 0;
563 
564 	mutex_lock(&process_info->lock);
565 
566 	ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
567 	if (ret) {
568 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
569 		goto out;
570 	}
571 
572 	ret = amdgpu_mn_register(bo, user_addr);
573 	if (ret) {
574 		pr_err("%s: Failed to register MMU notifier: %d\n",
575 		       __func__, ret);
576 		goto out;
577 	}
578 
579 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
580 	if (ret) {
581 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
582 		goto unregister_out;
583 	}
584 
585 	ret = amdgpu_bo_reserve(bo, true);
586 	if (ret) {
587 		pr_err("%s: Failed to reserve BO\n", __func__);
588 		goto release_out;
589 	}
590 	amdgpu_bo_placement_from_domain(bo, mem->domain);
591 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
592 	if (ret)
593 		pr_err("%s: failed to validate BO\n", __func__);
594 	amdgpu_bo_unreserve(bo);
595 
596 release_out:
597 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
598 unregister_out:
599 	if (ret)
600 		amdgpu_mn_unregister(bo);
601 out:
602 	mutex_unlock(&process_info->lock);
603 	return ret;
604 }
605 
606 /* Reserving a BO and its page table BOs must happen atomically to
607  * avoid deadlocks. Some operations update multiple VMs at once. Track
608  * all the reservation info in a context structure. Optionally a sync
609  * object can track VM updates.
610  */
611 struct bo_vm_reservation_context {
612 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
613 	unsigned int n_vms;		    /* Number of VMs reserved	    */
614 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
615 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
616 	struct list_head list, duplicates;  /* BO lists			    */
617 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
618 	bool reserved;			    /* Whether BOs are reserved	    */
619 };
620 
621 enum bo_vm_match {
622 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
623 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
624 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
625 };
626 
627 /**
628  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
629  * @mem: KFD BO structure.
630  * @vm: the VM to reserve.
631  * @ctx: the struct that will be used in unreserve_bo_and_vms().
632  */
633 static int reserve_bo_and_vm(struct kgd_mem *mem,
634 			      struct amdgpu_vm *vm,
635 			      struct bo_vm_reservation_context *ctx)
636 {
637 	struct amdgpu_bo *bo = mem->bo;
638 	int ret;
639 
640 	WARN_ON(!vm);
641 
642 	ctx->reserved = false;
643 	ctx->n_vms = 1;
644 	ctx->sync = &mem->sync;
645 
646 	INIT_LIST_HEAD(&ctx->list);
647 	INIT_LIST_HEAD(&ctx->duplicates);
648 
649 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
650 	if (!ctx->vm_pd)
651 		return -ENOMEM;
652 
653 	ctx->kfd_bo.priority = 0;
654 	ctx->kfd_bo.tv.bo = &bo->tbo;
655 	ctx->kfd_bo.tv.num_shared = 1;
656 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
657 
658 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
659 
660 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
661 				     false, &ctx->duplicates);
662 	if (!ret)
663 		ctx->reserved = true;
664 	else {
665 		pr_err("Failed to reserve buffers in ttm\n");
666 		kfree(ctx->vm_pd);
667 		ctx->vm_pd = NULL;
668 	}
669 
670 	return ret;
671 }
672 
673 /**
674  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
675  * @mem: KFD BO structure.
676  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
677  * is used. Otherwise, a single VM associated with the BO.
678  * @map_type: the mapping status that will be used to filter the VMs.
679  * @ctx: the struct that will be used in unreserve_bo_and_vms().
680  *
681  * Returns 0 for success, negative for failure.
682  */
683 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
684 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
685 				struct bo_vm_reservation_context *ctx)
686 {
687 	struct amdgpu_bo *bo = mem->bo;
688 	struct kfd_bo_va_list *entry;
689 	unsigned int i;
690 	int ret;
691 
692 	ctx->reserved = false;
693 	ctx->n_vms = 0;
694 	ctx->vm_pd = NULL;
695 	ctx->sync = &mem->sync;
696 
697 	INIT_LIST_HEAD(&ctx->list);
698 	INIT_LIST_HEAD(&ctx->duplicates);
699 
700 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
701 		if ((vm && vm != entry->bo_va->base.vm) ||
702 			(entry->is_mapped != map_type
703 			&& map_type != BO_VM_ALL))
704 			continue;
705 
706 		ctx->n_vms++;
707 	}
708 
709 	if (ctx->n_vms != 0) {
710 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
711 				     GFP_KERNEL);
712 		if (!ctx->vm_pd)
713 			return -ENOMEM;
714 	}
715 
716 	ctx->kfd_bo.priority = 0;
717 	ctx->kfd_bo.tv.bo = &bo->tbo;
718 	ctx->kfd_bo.tv.num_shared = 1;
719 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
720 
721 	i = 0;
722 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
723 		if ((vm && vm != entry->bo_va->base.vm) ||
724 			(entry->is_mapped != map_type
725 			&& map_type != BO_VM_ALL))
726 			continue;
727 
728 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
729 				&ctx->vm_pd[i]);
730 		i++;
731 	}
732 
733 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
734 				     false, &ctx->duplicates);
735 	if (!ret)
736 		ctx->reserved = true;
737 	else
738 		pr_err("Failed to reserve buffers in ttm.\n");
739 
740 	if (ret) {
741 		kfree(ctx->vm_pd);
742 		ctx->vm_pd = NULL;
743 	}
744 
745 	return ret;
746 }
747 
748 /**
749  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
750  * @ctx: Reservation context to unreserve
751  * @wait: Optionally wait for a sync object representing pending VM updates
752  * @intr: Whether the wait is interruptible
753  *
754  * Also frees any resources allocated in
755  * reserve_bo_and_(cond_)vm(s). Returns the status from
756  * amdgpu_sync_wait.
757  */
758 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
759 				 bool wait, bool intr)
760 {
761 	int ret = 0;
762 
763 	if (wait)
764 		ret = amdgpu_sync_wait(ctx->sync, intr);
765 
766 	if (ctx->reserved)
767 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
768 	kfree(ctx->vm_pd);
769 
770 	ctx->sync = NULL;
771 
772 	ctx->reserved = false;
773 	ctx->vm_pd = NULL;
774 
775 	return ret;
776 }
777 
778 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
779 				struct kfd_bo_va_list *entry,
780 				struct amdgpu_sync *sync)
781 {
782 	struct amdgpu_bo_va *bo_va = entry->bo_va;
783 	struct amdgpu_vm *vm = bo_va->base.vm;
784 
785 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
786 
787 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
788 
789 	amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
790 
791 	return 0;
792 }
793 
794 static int update_gpuvm_pte(struct amdgpu_device *adev,
795 		struct kfd_bo_va_list *entry,
796 		struct amdgpu_sync *sync)
797 {
798 	int ret;
799 	struct amdgpu_bo_va *bo_va = entry->bo_va;
800 
801 	/* Update the page tables  */
802 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
803 	if (ret) {
804 		pr_err("amdgpu_vm_bo_update failed\n");
805 		return ret;
806 	}
807 
808 	return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
809 }
810 
811 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
812 		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
813 		bool no_update_pte)
814 {
815 	int ret;
816 
817 	/* Set virtual address for the allocation */
818 	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
819 			       amdgpu_bo_size(entry->bo_va->base.bo),
820 			       entry->pte_flags);
821 	if (ret) {
822 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
823 				entry->va, ret);
824 		return ret;
825 	}
826 
827 	if (no_update_pte)
828 		return 0;
829 
830 	ret = update_gpuvm_pte(adev, entry, sync);
831 	if (ret) {
832 		pr_err("update_gpuvm_pte() failed\n");
833 		goto update_gpuvm_pte_failed;
834 	}
835 
836 	return 0;
837 
838 update_gpuvm_pte_failed:
839 	unmap_bo_from_gpuvm(adev, entry, sync);
840 	return ret;
841 }
842 
843 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
844 {
845 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
846 
847 	if (!sg)
848 		return NULL;
849 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
850 		kfree(sg);
851 		return NULL;
852 	}
853 	sg->sgl->dma_address = addr;
854 	sg->sgl->length = size;
855 #ifdef CONFIG_NEED_SG_DMA_LENGTH
856 	sg->sgl->dma_length = size;
857 #endif
858 	return sg;
859 }
860 
861 static int process_validate_vms(struct amdkfd_process_info *process_info)
862 {
863 	struct amdgpu_vm *peer_vm;
864 	int ret;
865 
866 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
867 			    vm_list_node) {
868 		ret = vm_validate_pt_pd_bos(peer_vm);
869 		if (ret)
870 			return ret;
871 	}
872 
873 	return 0;
874 }
875 
876 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
877 				 struct amdgpu_sync *sync)
878 {
879 	struct amdgpu_vm *peer_vm;
880 	int ret;
881 
882 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
883 			    vm_list_node) {
884 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
885 
886 		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
887 				       AMDGPU_SYNC_NE_OWNER,
888 				       AMDGPU_FENCE_OWNER_KFD);
889 		if (ret)
890 			return ret;
891 	}
892 
893 	return 0;
894 }
895 
896 static int process_update_pds(struct amdkfd_process_info *process_info,
897 			      struct amdgpu_sync *sync)
898 {
899 	struct amdgpu_vm *peer_vm;
900 	int ret;
901 
902 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
903 			    vm_list_node) {
904 		ret = vm_update_pds(peer_vm, sync);
905 		if (ret)
906 			return ret;
907 	}
908 
909 	return 0;
910 }
911 
912 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
913 		       struct dma_fence **ef)
914 {
915 	struct amdkfd_process_info *info = NULL;
916 	int ret;
917 
918 	if (!*process_info) {
919 		info = kzalloc(sizeof(*info), GFP_KERNEL);
920 		if (!info)
921 			return -ENOMEM;
922 
923 		mutex_init(&info->lock);
924 		INIT_LIST_HEAD(&info->vm_list_head);
925 		INIT_LIST_HEAD(&info->kfd_bo_list);
926 		INIT_LIST_HEAD(&info->userptr_valid_list);
927 		INIT_LIST_HEAD(&info->userptr_inval_list);
928 
929 		info->eviction_fence =
930 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
931 						   current->mm);
932 		if (!info->eviction_fence) {
933 			pr_err("Failed to create eviction fence\n");
934 			ret = -ENOMEM;
935 			goto create_evict_fence_fail;
936 		}
937 
938 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
939 		atomic_set(&info->evicted_bos, 0);
940 		INIT_DELAYED_WORK(&info->restore_userptr_work,
941 				  amdgpu_amdkfd_restore_userptr_worker);
942 
943 		*process_info = info;
944 		*ef = dma_fence_get(&info->eviction_fence->base);
945 	}
946 
947 	vm->process_info = *process_info;
948 
949 	/* Validate page directory and attach eviction fence */
950 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
951 	if (ret)
952 		goto reserve_pd_fail;
953 	ret = vm_validate_pt_pd_bos(vm);
954 	if (ret) {
955 		pr_err("validate_pt_pd_bos() failed\n");
956 		goto validate_pd_fail;
957 	}
958 	ret = amdgpu_bo_sync_wait(vm->root.base.bo,
959 				  AMDGPU_FENCE_OWNER_KFD, false);
960 	if (ret)
961 		goto wait_pd_fail;
962 	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
963 	if (ret)
964 		goto reserve_shared_fail;
965 	amdgpu_bo_fence(vm->root.base.bo,
966 			&vm->process_info->eviction_fence->base, true);
967 	amdgpu_bo_unreserve(vm->root.base.bo);
968 
969 	/* Update process info */
970 	mutex_lock(&vm->process_info->lock);
971 	list_add_tail(&vm->vm_list_node,
972 			&(vm->process_info->vm_list_head));
973 	vm->process_info->n_vms++;
974 	mutex_unlock(&vm->process_info->lock);
975 
976 	return 0;
977 
978 reserve_shared_fail:
979 wait_pd_fail:
980 validate_pd_fail:
981 	amdgpu_bo_unreserve(vm->root.base.bo);
982 reserve_pd_fail:
983 	vm->process_info = NULL;
984 	if (info) {
985 		/* Two fence references: one in info and one in *ef */
986 		dma_fence_put(&info->eviction_fence->base);
987 		dma_fence_put(*ef);
988 		*ef = NULL;
989 		*process_info = NULL;
990 		put_pid(info->pid);
991 create_evict_fence_fail:
992 		mutex_destroy(&info->lock);
993 		kfree(info);
994 	}
995 	return ret;
996 }
997 
998 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
999 					  void **vm, void **process_info,
1000 					  struct dma_fence **ef)
1001 {
1002 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1003 	struct amdgpu_vm *new_vm;
1004 	int ret;
1005 
1006 	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1007 	if (!new_vm)
1008 		return -ENOMEM;
1009 
1010 	/* Initialize AMDGPU part of the VM */
1011 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1012 	if (ret) {
1013 		pr_err("Failed init vm ret %d\n", ret);
1014 		goto amdgpu_vm_init_fail;
1015 	}
1016 
1017 	/* Initialize KFD part of the VM and process info */
1018 	ret = init_kfd_vm(new_vm, process_info, ef);
1019 	if (ret)
1020 		goto init_kfd_vm_fail;
1021 
1022 	*vm = (void *) new_vm;
1023 
1024 	return 0;
1025 
1026 init_kfd_vm_fail:
1027 	amdgpu_vm_fini(adev, new_vm);
1028 amdgpu_vm_init_fail:
1029 	kfree(new_vm);
1030 	return ret;
1031 }
1032 
1033 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1034 					   struct file *filp, unsigned int pasid,
1035 					   void **vm, void **process_info,
1036 					   struct dma_fence **ef)
1037 {
1038 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1039 	struct drm_file *drm_priv = filp->private_data;
1040 	struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1041 	struct amdgpu_vm *avm = &drv_priv->vm;
1042 	int ret;
1043 
1044 	/* Already a compute VM? */
1045 	if (avm->process_info)
1046 		return -EINVAL;
1047 
1048 	/* Convert VM into a compute VM */
1049 	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1050 	if (ret)
1051 		return ret;
1052 
1053 	/* Initialize KFD part of the VM and process info */
1054 	ret = init_kfd_vm(avm, process_info, ef);
1055 	if (ret)
1056 		return ret;
1057 
1058 	*vm = (void *)avm;
1059 
1060 	return 0;
1061 }
1062 
1063 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1064 				    struct amdgpu_vm *vm)
1065 {
1066 	struct amdkfd_process_info *process_info = vm->process_info;
1067 	struct amdgpu_bo *pd = vm->root.base.bo;
1068 
1069 	if (!process_info)
1070 		return;
1071 
1072 	/* Release eviction fence from PD */
1073 	amdgpu_bo_reserve(pd, false);
1074 	amdgpu_bo_fence(pd, NULL, false);
1075 	amdgpu_bo_unreserve(pd);
1076 
1077 	/* Update process info */
1078 	mutex_lock(&process_info->lock);
1079 	process_info->n_vms--;
1080 	list_del(&vm->vm_list_node);
1081 	mutex_unlock(&process_info->lock);
1082 
1083 	vm->process_info = NULL;
1084 
1085 	/* Release per-process resources when last compute VM is destroyed */
1086 	if (!process_info->n_vms) {
1087 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1088 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1089 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1090 
1091 		dma_fence_put(&process_info->eviction_fence->base);
1092 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1093 		put_pid(process_info->pid);
1094 		mutex_destroy(&process_info->lock);
1095 		kfree(process_info);
1096 	}
1097 }
1098 
1099 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1100 {
1101 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1102 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1103 
1104 	if (WARN_ON(!kgd || !vm))
1105 		return;
1106 
1107 	pr_debug("Destroying process vm %p\n", vm);
1108 
1109 	/* Release the VM context */
1110 	amdgpu_vm_fini(adev, avm);
1111 	kfree(vm);
1112 }
1113 
1114 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1115 {
1116 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1117         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1118 
1119 	if (WARN_ON(!kgd || !vm))
1120                 return;
1121 
1122         pr_debug("Releasing process vm %p\n", vm);
1123 
1124         /* The original pasid of amdgpu vm has already been
1125          * released during making a amdgpu vm to a compute vm
1126          * The current pasid is managed by kfd and will be
1127          * released on kfd process destroy. Set amdgpu pasid
1128          * to 0 to avoid duplicate release.
1129          */
1130 	amdgpu_vm_release_compute(adev, avm);
1131 }
1132 
1133 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1134 {
1135 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1136 	struct amdgpu_bo *pd = avm->root.base.bo;
1137 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1138 
1139 	if (adev->asic_type < CHIP_VEGA10)
1140 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1141 	return avm->pd_phys_addr;
1142 }
1143 
1144 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1145 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1146 		void *vm, struct kgd_mem **mem,
1147 		uint64_t *offset, uint32_t flags)
1148 {
1149 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1150 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1151 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1152 	struct sg_table *sg = NULL;
1153 	uint64_t user_addr = 0;
1154 	struct amdgpu_bo *bo;
1155 	struct amdgpu_bo_param bp;
1156 	u32 domain, alloc_domain;
1157 	u64 alloc_flags;
1158 	int ret;
1159 
1160 	/*
1161 	 * Check on which domain to allocate BO
1162 	 */
1163 	if (flags & ALLOC_MEM_FLAGS_VRAM) {
1164 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1165 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1166 		alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1167 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1168 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1169 	} else if (flags & ALLOC_MEM_FLAGS_GTT) {
1170 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1171 		alloc_flags = 0;
1172 	} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1173 		domain = AMDGPU_GEM_DOMAIN_GTT;
1174 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1175 		alloc_flags = 0;
1176 		if (!offset || !*offset)
1177 			return -EINVAL;
1178 		user_addr = untagged_addr(*offset);
1179 	} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1180 			ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1181 		domain = AMDGPU_GEM_DOMAIN_GTT;
1182 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1183 		bo_type = ttm_bo_type_sg;
1184 		alloc_flags = 0;
1185 		if (size > UINT_MAX)
1186 			return -EINVAL;
1187 		sg = create_doorbell_sg(*offset, size);
1188 		if (!sg)
1189 			return -ENOMEM;
1190 	} else {
1191 		return -EINVAL;
1192 	}
1193 
1194 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1195 	if (!*mem) {
1196 		ret = -ENOMEM;
1197 		goto err;
1198 	}
1199 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1200 	mutex_init(&(*mem)->lock);
1201 	(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1202 
1203 	/* Workaround for AQL queue wraparound bug. Map the same
1204 	 * memory twice. That means we only actually allocate half
1205 	 * the memory.
1206 	 */
1207 	if ((*mem)->aql_queue)
1208 		size = size >> 1;
1209 
1210 	(*mem)->alloc_flags = flags;
1211 
1212 	amdgpu_sync_create(&(*mem)->sync);
1213 
1214 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1215 	if (ret) {
1216 		pr_debug("Insufficient system memory\n");
1217 		goto err_reserve_limit;
1218 	}
1219 
1220 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1221 			va, size, domain_string(alloc_domain));
1222 
1223 	memset(&bp, 0, sizeof(bp));
1224 	bp.size = size;
1225 	bp.byte_align = 1;
1226 	bp.domain = alloc_domain;
1227 	bp.flags = alloc_flags;
1228 	bp.type = bo_type;
1229 	bp.resv = NULL;
1230 	ret = amdgpu_bo_create(adev, &bp, &bo);
1231 	if (ret) {
1232 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1233 				domain_string(alloc_domain), ret);
1234 		goto err_bo_create;
1235 	}
1236 	if (bo_type == ttm_bo_type_sg) {
1237 		bo->tbo.sg = sg;
1238 		bo->tbo.ttm->sg = sg;
1239 	}
1240 	bo->kfd_bo = *mem;
1241 	(*mem)->bo = bo;
1242 	if (user_addr)
1243 		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1244 
1245 	(*mem)->va = va;
1246 	(*mem)->domain = domain;
1247 	(*mem)->mapped_to_gpu_memory = 0;
1248 	(*mem)->process_info = avm->process_info;
1249 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1250 
1251 	if (user_addr) {
1252 		ret = init_user_pages(*mem, user_addr);
1253 		if (ret)
1254 			goto allocate_init_user_pages_failed;
1255 	}
1256 
1257 	if (offset)
1258 		*offset = amdgpu_bo_mmap_offset(bo);
1259 
1260 	return 0;
1261 
1262 allocate_init_user_pages_failed:
1263 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1264 	amdgpu_bo_unref(&bo);
1265 	/* Don't unreserve system mem limit twice */
1266 	goto err_reserve_limit;
1267 err_bo_create:
1268 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1269 err_reserve_limit:
1270 	mutex_destroy(&(*mem)->lock);
1271 	kfree(*mem);
1272 err:
1273 	if (sg) {
1274 		sg_free_table(sg);
1275 		kfree(sg);
1276 	}
1277 	return ret;
1278 }
1279 
1280 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1281 		struct kgd_dev *kgd, struct kgd_mem *mem)
1282 {
1283 	struct amdkfd_process_info *process_info = mem->process_info;
1284 	unsigned long bo_size = mem->bo->tbo.mem.size;
1285 	struct kfd_bo_va_list *entry, *tmp;
1286 	struct bo_vm_reservation_context ctx;
1287 	struct ttm_validate_buffer *bo_list_entry;
1288 	int ret;
1289 
1290 	mutex_lock(&mem->lock);
1291 
1292 	if (mem->mapped_to_gpu_memory > 0) {
1293 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1294 				mem->va, bo_size);
1295 		mutex_unlock(&mem->lock);
1296 		return -EBUSY;
1297 	}
1298 
1299 	mutex_unlock(&mem->lock);
1300 	/* lock is not needed after this, since mem is unused and will
1301 	 * be freed anyway
1302 	 */
1303 
1304 	/* No more MMU notifiers */
1305 	amdgpu_mn_unregister(mem->bo);
1306 
1307 	/* Make sure restore workers don't access the BO any more */
1308 	bo_list_entry = &mem->validate_list;
1309 	mutex_lock(&process_info->lock);
1310 	list_del(&bo_list_entry->head);
1311 	mutex_unlock(&process_info->lock);
1312 
1313 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1314 	if (unlikely(ret))
1315 		return ret;
1316 
1317 	/* The eviction fence should be removed by the last unmap.
1318 	 * TODO: Log an error condition if the bo still has the eviction fence
1319 	 * attached
1320 	 */
1321 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1322 					process_info->eviction_fence);
1323 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1324 		mem->va + bo_size * (1 + mem->aql_queue));
1325 
1326 	/* Remove from VM internal data structures */
1327 	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1328 		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1329 				entry, bo_size);
1330 
1331 	ret = unreserve_bo_and_vms(&ctx, false, false);
1332 
1333 	/* Free the sync object */
1334 	amdgpu_sync_free(&mem->sync);
1335 
1336 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1337 	 * remap BO. We need to free it.
1338 	 */
1339 	if (mem->bo->tbo.sg) {
1340 		sg_free_table(mem->bo->tbo.sg);
1341 		kfree(mem->bo->tbo.sg);
1342 	}
1343 
1344 	/* Free the BO*/
1345 	amdgpu_bo_unref(&mem->bo);
1346 	mutex_destroy(&mem->lock);
1347 	kfree(mem);
1348 
1349 	return ret;
1350 }
1351 
1352 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1353 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1354 {
1355 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1356 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1357 	int ret;
1358 	struct amdgpu_bo *bo;
1359 	uint32_t domain;
1360 	struct kfd_bo_va_list *entry;
1361 	struct bo_vm_reservation_context ctx;
1362 	struct kfd_bo_va_list *bo_va_entry = NULL;
1363 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1364 	unsigned long bo_size;
1365 	bool is_invalid_userptr = false;
1366 
1367 	bo = mem->bo;
1368 	if (!bo) {
1369 		pr_err("Invalid BO when mapping memory to GPU\n");
1370 		return -EINVAL;
1371 	}
1372 
1373 	/* Make sure restore is not running concurrently. Since we
1374 	 * don't map invalid userptr BOs, we rely on the next restore
1375 	 * worker to do the mapping
1376 	 */
1377 	mutex_lock(&mem->process_info->lock);
1378 
1379 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1380 	 * sure that the MMU notifier is no longer running
1381 	 * concurrently and the queues are actually stopped
1382 	 */
1383 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1384 		down_write(&current->mm->mmap_sem);
1385 		is_invalid_userptr = atomic_read(&mem->invalid);
1386 		up_write(&current->mm->mmap_sem);
1387 	}
1388 
1389 	mutex_lock(&mem->lock);
1390 
1391 	domain = mem->domain;
1392 	bo_size = bo->tbo.mem.size;
1393 
1394 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1395 			mem->va,
1396 			mem->va + bo_size * (1 + mem->aql_queue),
1397 			vm, domain_string(domain));
1398 
1399 	ret = reserve_bo_and_vm(mem, vm, &ctx);
1400 	if (unlikely(ret))
1401 		goto out;
1402 
1403 	/* Userptr can be marked as "not invalid", but not actually be
1404 	 * validated yet (still in the system domain). In that case
1405 	 * the queues are still stopped and we can leave mapping for
1406 	 * the next restore worker
1407 	 */
1408 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1409 	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1410 		is_invalid_userptr = true;
1411 
1412 	if (check_if_add_bo_to_vm(avm, mem)) {
1413 		ret = add_bo_to_vm(adev, mem, avm, false,
1414 				&bo_va_entry);
1415 		if (ret)
1416 			goto add_bo_to_vm_failed;
1417 		if (mem->aql_queue) {
1418 			ret = add_bo_to_vm(adev, mem, avm,
1419 					true, &bo_va_entry_aql);
1420 			if (ret)
1421 				goto add_bo_to_vm_failed_aql;
1422 		}
1423 	} else {
1424 		ret = vm_validate_pt_pd_bos(avm);
1425 		if (unlikely(ret))
1426 			goto add_bo_to_vm_failed;
1427 	}
1428 
1429 	if (mem->mapped_to_gpu_memory == 0 &&
1430 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1431 		/* Validate BO only once. The eviction fence gets added to BO
1432 		 * the first time it is mapped. Validate will wait for all
1433 		 * background evictions to complete.
1434 		 */
1435 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1436 		if (ret) {
1437 			pr_debug("Validate failed\n");
1438 			goto map_bo_to_gpuvm_failed;
1439 		}
1440 	}
1441 
1442 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1443 		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1444 			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1445 					entry->va, entry->va + bo_size,
1446 					entry);
1447 
1448 			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1449 					      is_invalid_userptr);
1450 			if (ret) {
1451 				pr_err("Failed to map bo to gpuvm\n");
1452 				goto map_bo_to_gpuvm_failed;
1453 			}
1454 
1455 			ret = vm_update_pds(vm, ctx.sync);
1456 			if (ret) {
1457 				pr_err("Failed to update page directories\n");
1458 				goto map_bo_to_gpuvm_failed;
1459 			}
1460 
1461 			entry->is_mapped = true;
1462 			mem->mapped_to_gpu_memory++;
1463 			pr_debug("\t INC mapping count %d\n",
1464 					mem->mapped_to_gpu_memory);
1465 		}
1466 	}
1467 
1468 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1469 		amdgpu_bo_fence(bo,
1470 				&avm->process_info->eviction_fence->base,
1471 				true);
1472 	ret = unreserve_bo_and_vms(&ctx, false, false);
1473 
1474 	goto out;
1475 
1476 map_bo_to_gpuvm_failed:
1477 	if (bo_va_entry_aql)
1478 		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1479 add_bo_to_vm_failed_aql:
1480 	if (bo_va_entry)
1481 		remove_bo_from_vm(adev, bo_va_entry, bo_size);
1482 add_bo_to_vm_failed:
1483 	unreserve_bo_and_vms(&ctx, false, false);
1484 out:
1485 	mutex_unlock(&mem->process_info->lock);
1486 	mutex_unlock(&mem->lock);
1487 	return ret;
1488 }
1489 
1490 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1491 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1492 {
1493 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1494 	struct amdkfd_process_info *process_info =
1495 		((struct amdgpu_vm *)vm)->process_info;
1496 	unsigned long bo_size = mem->bo->tbo.mem.size;
1497 	struct kfd_bo_va_list *entry;
1498 	struct bo_vm_reservation_context ctx;
1499 	int ret;
1500 
1501 	mutex_lock(&mem->lock);
1502 
1503 	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1504 	if (unlikely(ret))
1505 		goto out;
1506 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1507 	if (ctx.n_vms == 0) {
1508 		ret = -EINVAL;
1509 		goto unreserve_out;
1510 	}
1511 
1512 	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1513 	if (unlikely(ret))
1514 		goto unreserve_out;
1515 
1516 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1517 		mem->va,
1518 		mem->va + bo_size * (1 + mem->aql_queue),
1519 		vm);
1520 
1521 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1522 		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1523 			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1524 					entry->va,
1525 					entry->va + bo_size,
1526 					entry);
1527 
1528 			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1529 			if (ret == 0) {
1530 				entry->is_mapped = false;
1531 			} else {
1532 				pr_err("failed to unmap VA 0x%llx\n",
1533 						mem->va);
1534 				goto unreserve_out;
1535 			}
1536 
1537 			mem->mapped_to_gpu_memory--;
1538 			pr_debug("\t DEC mapping count %d\n",
1539 					mem->mapped_to_gpu_memory);
1540 		}
1541 	}
1542 
1543 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1544 	 * required.
1545 	 */
1546 	if (mem->mapped_to_gpu_memory == 0 &&
1547 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1548 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1549 						process_info->eviction_fence);
1550 
1551 unreserve_out:
1552 	unreserve_bo_and_vms(&ctx, false, false);
1553 out:
1554 	mutex_unlock(&mem->lock);
1555 	return ret;
1556 }
1557 
1558 int amdgpu_amdkfd_gpuvm_sync_memory(
1559 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1560 {
1561 	struct amdgpu_sync sync;
1562 	int ret;
1563 
1564 	amdgpu_sync_create(&sync);
1565 
1566 	mutex_lock(&mem->lock);
1567 	amdgpu_sync_clone(&mem->sync, &sync);
1568 	mutex_unlock(&mem->lock);
1569 
1570 	ret = amdgpu_sync_wait(&sync, intr);
1571 	amdgpu_sync_free(&sync);
1572 	return ret;
1573 }
1574 
1575 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1576 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1577 {
1578 	int ret;
1579 	struct amdgpu_bo *bo = mem->bo;
1580 
1581 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1582 		pr_err("userptr can't be mapped to kernel\n");
1583 		return -EINVAL;
1584 	}
1585 
1586 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1587 	 * this BO in BO's restoring after eviction.
1588 	 */
1589 	mutex_lock(&mem->process_info->lock);
1590 
1591 	ret = amdgpu_bo_reserve(bo, true);
1592 	if (ret) {
1593 		pr_err("Failed to reserve bo. ret %d\n", ret);
1594 		goto bo_reserve_failed;
1595 	}
1596 
1597 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1598 	if (ret) {
1599 		pr_err("Failed to pin bo. ret %d\n", ret);
1600 		goto pin_failed;
1601 	}
1602 
1603 	ret = amdgpu_bo_kmap(bo, kptr);
1604 	if (ret) {
1605 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1606 		goto kmap_failed;
1607 	}
1608 
1609 	amdgpu_amdkfd_remove_eviction_fence(
1610 		bo, mem->process_info->eviction_fence);
1611 	list_del_init(&mem->validate_list.head);
1612 
1613 	if (size)
1614 		*size = amdgpu_bo_size(bo);
1615 
1616 	amdgpu_bo_unreserve(bo);
1617 
1618 	mutex_unlock(&mem->process_info->lock);
1619 	return 0;
1620 
1621 kmap_failed:
1622 	amdgpu_bo_unpin(bo);
1623 pin_failed:
1624 	amdgpu_bo_unreserve(bo);
1625 bo_reserve_failed:
1626 	mutex_unlock(&mem->process_info->lock);
1627 
1628 	return ret;
1629 }
1630 
1631 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1632 					      struct kfd_vm_fault_info *mem)
1633 {
1634 	struct amdgpu_device *adev;
1635 
1636 	adev = (struct amdgpu_device *)kgd;
1637 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1638 		*mem = *adev->gmc.vm_fault_info;
1639 		mb();
1640 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1641 	}
1642 	return 0;
1643 }
1644 
1645 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1646 				      struct dma_buf *dma_buf,
1647 				      uint64_t va, void *vm,
1648 				      struct kgd_mem **mem, uint64_t *size,
1649 				      uint64_t *mmap_offset)
1650 {
1651 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1652 	struct drm_gem_object *obj;
1653 	struct amdgpu_bo *bo;
1654 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1655 
1656 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1657 		/* Can't handle non-graphics buffers */
1658 		return -EINVAL;
1659 
1660 	obj = dma_buf->priv;
1661 	if (obj->dev->dev_private != adev)
1662 		/* Can't handle buffers from other devices */
1663 		return -EINVAL;
1664 
1665 	bo = gem_to_amdgpu_bo(obj);
1666 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1667 				    AMDGPU_GEM_DOMAIN_GTT)))
1668 		/* Only VRAM and GTT BOs are supported */
1669 		return -EINVAL;
1670 
1671 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1672 	if (!*mem)
1673 		return -ENOMEM;
1674 
1675 	if (size)
1676 		*size = amdgpu_bo_size(bo);
1677 
1678 	if (mmap_offset)
1679 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1680 
1681 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1682 	mutex_init(&(*mem)->lock);
1683 	(*mem)->alloc_flags =
1684 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1685 		 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
1686 		ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
1687 
1688 	(*mem)->bo = amdgpu_bo_ref(bo);
1689 	(*mem)->va = va;
1690 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1691 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1692 	(*mem)->mapped_to_gpu_memory = 0;
1693 	(*mem)->process_info = avm->process_info;
1694 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1695 	amdgpu_sync_create(&(*mem)->sync);
1696 
1697 	return 0;
1698 }
1699 
1700 /* Evict a userptr BO by stopping the queues if necessary
1701  *
1702  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1703  * cannot do any memory allocations, and cannot take any locks that
1704  * are held elsewhere while allocating memory. Therefore this is as
1705  * simple as possible, using atomic counters.
1706  *
1707  * It doesn't do anything to the BO itself. The real work happens in
1708  * restore, where we get updated page addresses. This function only
1709  * ensures that GPU access to the BO is stopped.
1710  */
1711 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1712 				struct mm_struct *mm)
1713 {
1714 	struct amdkfd_process_info *process_info = mem->process_info;
1715 	int evicted_bos;
1716 	int r = 0;
1717 
1718 	atomic_inc(&mem->invalid);
1719 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1720 	if (evicted_bos == 1) {
1721 		/* First eviction, stop the queues */
1722 		r = kgd2kfd_quiesce_mm(mm);
1723 		if (r)
1724 			pr_err("Failed to quiesce KFD\n");
1725 		schedule_delayed_work(&process_info->restore_userptr_work,
1726 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1727 	}
1728 
1729 	return r;
1730 }
1731 
1732 /* Update invalid userptr BOs
1733  *
1734  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1735  * userptr_inval_list and updates user pages for all BOs that have
1736  * been invalidated since their last update.
1737  */
1738 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1739 				     struct mm_struct *mm)
1740 {
1741 	struct kgd_mem *mem, *tmp_mem;
1742 	struct amdgpu_bo *bo;
1743 	struct ttm_operation_ctx ctx = { false, false };
1744 	int invalid, ret;
1745 
1746 	/* Move all invalidated BOs to the userptr_inval_list and
1747 	 * release their user pages by migration to the CPU domain
1748 	 */
1749 	list_for_each_entry_safe(mem, tmp_mem,
1750 				 &process_info->userptr_valid_list,
1751 				 validate_list.head) {
1752 		if (!atomic_read(&mem->invalid))
1753 			continue; /* BO is still valid */
1754 
1755 		bo = mem->bo;
1756 
1757 		if (amdgpu_bo_reserve(bo, true))
1758 			return -EAGAIN;
1759 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1760 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1761 		amdgpu_bo_unreserve(bo);
1762 		if (ret) {
1763 			pr_err("%s: Failed to invalidate userptr BO\n",
1764 			       __func__);
1765 			return -EAGAIN;
1766 		}
1767 
1768 		list_move_tail(&mem->validate_list.head,
1769 			       &process_info->userptr_inval_list);
1770 	}
1771 
1772 	if (list_empty(&process_info->userptr_inval_list))
1773 		return 0; /* All evicted userptr BOs were freed */
1774 
1775 	/* Go through userptr_inval_list and update any invalid user_pages */
1776 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1777 			    validate_list.head) {
1778 		invalid = atomic_read(&mem->invalid);
1779 		if (!invalid)
1780 			/* BO hasn't been invalidated since the last
1781 			 * revalidation attempt. Keep its BO list.
1782 			 */
1783 			continue;
1784 
1785 		bo = mem->bo;
1786 
1787 		/* Get updated user pages */
1788 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1789 		if (ret) {
1790 			pr_debug("%s: Failed to get user pages: %d\n",
1791 				__func__, ret);
1792 
1793 			/* Return error -EBUSY or -ENOMEM, retry restore */
1794 			return ret;
1795 		}
1796 
1797 		/*
1798 		 * FIXME: Cannot ignore the return code, must hold
1799 		 * notifier_lock
1800 		 */
1801 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1802 
1803 		/* Mark the BO as valid unless it was invalidated
1804 		 * again concurrently.
1805 		 */
1806 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1807 			return -EAGAIN;
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 /* Validate invalid userptr BOs
1814  *
1815  * Validates BOs on the userptr_inval_list, and moves them back to the
1816  * userptr_valid_list. Also updates GPUVM page tables with new page
1817  * addresses and waits for the page table updates to complete.
1818  */
1819 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1820 {
1821 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
1822 	struct list_head resv_list, duplicates;
1823 	struct ww_acquire_ctx ticket;
1824 	struct amdgpu_sync sync;
1825 
1826 	struct amdgpu_vm *peer_vm;
1827 	struct kgd_mem *mem, *tmp_mem;
1828 	struct amdgpu_bo *bo;
1829 	struct ttm_operation_ctx ctx = { false, false };
1830 	int i, ret;
1831 
1832 	pd_bo_list_entries = kcalloc(process_info->n_vms,
1833 				     sizeof(struct amdgpu_bo_list_entry),
1834 				     GFP_KERNEL);
1835 	if (!pd_bo_list_entries) {
1836 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1837 		ret = -ENOMEM;
1838 		goto out_no_mem;
1839 	}
1840 
1841 	INIT_LIST_HEAD(&resv_list);
1842 	INIT_LIST_HEAD(&duplicates);
1843 
1844 	/* Get all the page directory BOs that need to be reserved */
1845 	i = 0;
1846 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1847 			    vm_list_node)
1848 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1849 				    &pd_bo_list_entries[i++]);
1850 	/* Add the userptr_inval_list entries to resv_list */
1851 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1852 			    validate_list.head) {
1853 		list_add_tail(&mem->resv_list.head, &resv_list);
1854 		mem->resv_list.bo = mem->validate_list.bo;
1855 		mem->resv_list.num_shared = mem->validate_list.num_shared;
1856 	}
1857 
1858 	/* Reserve all BOs and page tables for validation */
1859 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1860 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
1861 	if (ret)
1862 		goto out_free;
1863 
1864 	amdgpu_sync_create(&sync);
1865 
1866 	ret = process_validate_vms(process_info);
1867 	if (ret)
1868 		goto unreserve_out;
1869 
1870 	/* Validate BOs and update GPUVM page tables */
1871 	list_for_each_entry_safe(mem, tmp_mem,
1872 				 &process_info->userptr_inval_list,
1873 				 validate_list.head) {
1874 		struct kfd_bo_va_list *bo_va_entry;
1875 
1876 		bo = mem->bo;
1877 
1878 		/* Validate the BO if we got user pages */
1879 		if (bo->tbo.ttm->pages[0]) {
1880 			amdgpu_bo_placement_from_domain(bo, mem->domain);
1881 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1882 			if (ret) {
1883 				pr_err("%s: failed to validate BO\n", __func__);
1884 				goto unreserve_out;
1885 			}
1886 		}
1887 
1888 		list_move_tail(&mem->validate_list.head,
1889 			       &process_info->userptr_valid_list);
1890 
1891 		/* Update mapping. If the BO was not validated
1892 		 * (because we couldn't get user pages), this will
1893 		 * clear the page table entries, which will result in
1894 		 * VM faults if the GPU tries to access the invalid
1895 		 * memory.
1896 		 */
1897 		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1898 			if (!bo_va_entry->is_mapped)
1899 				continue;
1900 
1901 			ret = update_gpuvm_pte((struct amdgpu_device *)
1902 					       bo_va_entry->kgd_dev,
1903 					       bo_va_entry, &sync);
1904 			if (ret) {
1905 				pr_err("%s: update PTE failed\n", __func__);
1906 				/* make sure this gets validated again */
1907 				atomic_inc(&mem->invalid);
1908 				goto unreserve_out;
1909 			}
1910 		}
1911 	}
1912 
1913 	/* Update page directories */
1914 	ret = process_update_pds(process_info, &sync);
1915 
1916 unreserve_out:
1917 	ttm_eu_backoff_reservation(&ticket, &resv_list);
1918 	amdgpu_sync_wait(&sync, false);
1919 	amdgpu_sync_free(&sync);
1920 out_free:
1921 	kfree(pd_bo_list_entries);
1922 out_no_mem:
1923 
1924 	return ret;
1925 }
1926 
1927 /* Worker callback to restore evicted userptr BOs
1928  *
1929  * Tries to update and validate all userptr BOs. If successful and no
1930  * concurrent evictions happened, the queues are restarted. Otherwise,
1931  * reschedule for another attempt later.
1932  */
1933 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1934 {
1935 	struct delayed_work *dwork = to_delayed_work(work);
1936 	struct amdkfd_process_info *process_info =
1937 		container_of(dwork, struct amdkfd_process_info,
1938 			     restore_userptr_work);
1939 	struct task_struct *usertask;
1940 	struct mm_struct *mm;
1941 	int evicted_bos;
1942 
1943 	evicted_bos = atomic_read(&process_info->evicted_bos);
1944 	if (!evicted_bos)
1945 		return;
1946 
1947 	/* Reference task and mm in case of concurrent process termination */
1948 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1949 	if (!usertask)
1950 		return;
1951 	mm = get_task_mm(usertask);
1952 	if (!mm) {
1953 		put_task_struct(usertask);
1954 		return;
1955 	}
1956 
1957 	mutex_lock(&process_info->lock);
1958 
1959 	if (update_invalid_user_pages(process_info, mm))
1960 		goto unlock_out;
1961 	/* userptr_inval_list can be empty if all evicted userptr BOs
1962 	 * have been freed. In that case there is nothing to validate
1963 	 * and we can just restart the queues.
1964 	 */
1965 	if (!list_empty(&process_info->userptr_inval_list)) {
1966 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1967 			goto unlock_out; /* Concurrent eviction, try again */
1968 
1969 		if (validate_invalid_user_pages(process_info))
1970 			goto unlock_out;
1971 	}
1972 	/* Final check for concurrent evicton and atomic update. If
1973 	 * another eviction happens after successful update, it will
1974 	 * be a first eviction that calls quiesce_mm. The eviction
1975 	 * reference counting inside KFD will handle this case.
1976 	 */
1977 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1978 	    evicted_bos)
1979 		goto unlock_out;
1980 	evicted_bos = 0;
1981 	if (kgd2kfd_resume_mm(mm)) {
1982 		pr_err("%s: Failed to resume KFD\n", __func__);
1983 		/* No recovery from this failure. Probably the CP is
1984 		 * hanging. No point trying again.
1985 		 */
1986 	}
1987 
1988 unlock_out:
1989 	mutex_unlock(&process_info->lock);
1990 	mmput(mm);
1991 	put_task_struct(usertask);
1992 
1993 	/* If validation failed, reschedule another attempt */
1994 	if (evicted_bos)
1995 		schedule_delayed_work(&process_info->restore_userptr_work,
1996 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1997 }
1998 
1999 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2000  *   KFD process identified by process_info
2001  *
2002  * @process_info: amdkfd_process_info of the KFD process
2003  *
2004  * After memory eviction, restore thread calls this function. The function
2005  * should be called when the Process is still valid. BO restore involves -
2006  *
2007  * 1.  Release old eviction fence and create new one
2008  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2009  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2010  *     BOs that need to be reserved.
2011  * 4.  Reserve all the BOs
2012  * 5.  Validate of PD and PT BOs.
2013  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2014  * 7.  Add fence to all PD and PT BOs.
2015  * 8.  Unreserve all BOs
2016  */
2017 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2018 {
2019 	struct amdgpu_bo_list_entry *pd_bo_list;
2020 	struct amdkfd_process_info *process_info = info;
2021 	struct amdgpu_vm *peer_vm;
2022 	struct kgd_mem *mem;
2023 	struct bo_vm_reservation_context ctx;
2024 	struct amdgpu_amdkfd_fence *new_fence;
2025 	int ret = 0, i;
2026 	struct list_head duplicate_save;
2027 	struct amdgpu_sync sync_obj;
2028 
2029 	INIT_LIST_HEAD(&duplicate_save);
2030 	INIT_LIST_HEAD(&ctx.list);
2031 	INIT_LIST_HEAD(&ctx.duplicates);
2032 
2033 	pd_bo_list = kcalloc(process_info->n_vms,
2034 			     sizeof(struct amdgpu_bo_list_entry),
2035 			     GFP_KERNEL);
2036 	if (!pd_bo_list)
2037 		return -ENOMEM;
2038 
2039 	i = 0;
2040 	mutex_lock(&process_info->lock);
2041 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2042 			vm_list_node)
2043 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2044 
2045 	/* Reserve all BOs and page tables/directory. Add all BOs from
2046 	 * kfd_bo_list to ctx.list
2047 	 */
2048 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2049 			    validate_list.head) {
2050 
2051 		list_add_tail(&mem->resv_list.head, &ctx.list);
2052 		mem->resv_list.bo = mem->validate_list.bo;
2053 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2054 	}
2055 
2056 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2057 				     false, &duplicate_save);
2058 	if (ret) {
2059 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2060 		goto ttm_reserve_fail;
2061 	}
2062 
2063 	amdgpu_sync_create(&sync_obj);
2064 
2065 	/* Validate PDs and PTs */
2066 	ret = process_validate_vms(process_info);
2067 	if (ret)
2068 		goto validate_map_fail;
2069 
2070 	ret = process_sync_pds_resv(process_info, &sync_obj);
2071 	if (ret) {
2072 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2073 		goto validate_map_fail;
2074 	}
2075 
2076 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2077 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2078 			    validate_list.head) {
2079 
2080 		struct amdgpu_bo *bo = mem->bo;
2081 		uint32_t domain = mem->domain;
2082 		struct kfd_bo_va_list *bo_va_entry;
2083 
2084 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2085 		if (ret) {
2086 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2087 			goto validate_map_fail;
2088 		}
2089 		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
2090 		if (ret) {
2091 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2092 			goto validate_map_fail;
2093 		}
2094 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2095 				    bo_list) {
2096 			ret = update_gpuvm_pte((struct amdgpu_device *)
2097 					      bo_va_entry->kgd_dev,
2098 					      bo_va_entry,
2099 					      &sync_obj);
2100 			if (ret) {
2101 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2102 				goto validate_map_fail;
2103 			}
2104 		}
2105 	}
2106 
2107 	/* Update page directories */
2108 	ret = process_update_pds(process_info, &sync_obj);
2109 	if (ret) {
2110 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2111 		goto validate_map_fail;
2112 	}
2113 
2114 	/* Wait for validate and PT updates to finish */
2115 	amdgpu_sync_wait(&sync_obj, false);
2116 
2117 	/* Release old eviction fence and create new one, because fence only
2118 	 * goes from unsignaled to signaled, fence cannot be reused.
2119 	 * Use context and mm from the old fence.
2120 	 */
2121 	new_fence = amdgpu_amdkfd_fence_create(
2122 				process_info->eviction_fence->base.context,
2123 				process_info->eviction_fence->mm);
2124 	if (!new_fence) {
2125 		pr_err("Failed to create eviction fence\n");
2126 		ret = -ENOMEM;
2127 		goto validate_map_fail;
2128 	}
2129 	dma_fence_put(&process_info->eviction_fence->base);
2130 	process_info->eviction_fence = new_fence;
2131 	*ef = dma_fence_get(&new_fence->base);
2132 
2133 	/* Attach new eviction fence to all BOs */
2134 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2135 		validate_list.head)
2136 		amdgpu_bo_fence(mem->bo,
2137 			&process_info->eviction_fence->base, true);
2138 
2139 	/* Attach eviction fence to PD / PT BOs */
2140 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2141 			    vm_list_node) {
2142 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2143 
2144 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2145 	}
2146 
2147 validate_map_fail:
2148 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2149 	amdgpu_sync_free(&sync_obj);
2150 ttm_reserve_fail:
2151 	mutex_unlock(&process_info->lock);
2152 	kfree(pd_bo_list);
2153 	return ret;
2154 }
2155 
2156 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2157 {
2158 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2159 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2160 	int ret;
2161 
2162 	if (!info || !gws)
2163 		return -EINVAL;
2164 
2165 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2166 	if (!*mem)
2167 		return -ENOMEM;
2168 
2169 	mutex_init(&(*mem)->lock);
2170 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
2171 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2172 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2173 	(*mem)->process_info = process_info;
2174 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2175 	amdgpu_sync_create(&(*mem)->sync);
2176 
2177 
2178 	/* Validate gws bo the first time it is added to process */
2179 	mutex_lock(&(*mem)->process_info->lock);
2180 	ret = amdgpu_bo_reserve(gws_bo, false);
2181 	if (unlikely(ret)) {
2182 		pr_err("Reserve gws bo failed %d\n", ret);
2183 		goto bo_reservation_failure;
2184 	}
2185 
2186 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2187 	if (ret) {
2188 		pr_err("GWS BO validate failed %d\n", ret);
2189 		goto bo_validation_failure;
2190 	}
2191 	/* GWS resource is shared b/t amdgpu and amdkfd
2192 	 * Add process eviction fence to bo so they can
2193 	 * evict each other.
2194 	 */
2195 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2196 	if (ret)
2197 		goto reserve_shared_fail;
2198 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2199 	amdgpu_bo_unreserve(gws_bo);
2200 	mutex_unlock(&(*mem)->process_info->lock);
2201 
2202 	return ret;
2203 
2204 reserve_shared_fail:
2205 bo_validation_failure:
2206 	amdgpu_bo_unreserve(gws_bo);
2207 bo_reservation_failure:
2208 	mutex_unlock(&(*mem)->process_info->lock);
2209 	amdgpu_sync_free(&(*mem)->sync);
2210 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2211 	amdgpu_bo_unref(&gws_bo);
2212 	mutex_destroy(&(*mem)->lock);
2213 	kfree(*mem);
2214 	*mem = NULL;
2215 	return ret;
2216 }
2217 
2218 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2219 {
2220 	int ret;
2221 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2222 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2223 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2224 
2225 	/* Remove BO from process's validate list so restore worker won't touch
2226 	 * it anymore
2227 	 */
2228 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2229 
2230 	ret = amdgpu_bo_reserve(gws_bo, false);
2231 	if (unlikely(ret)) {
2232 		pr_err("Reserve gws bo failed %d\n", ret);
2233 		//TODO add BO back to validate_list?
2234 		return ret;
2235 	}
2236 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2237 			process_info->eviction_fence);
2238 	amdgpu_bo_unreserve(gws_bo);
2239 	amdgpu_sync_free(&kgd_mem->sync);
2240 	amdgpu_bo_unref(&gws_bo);
2241 	mutex_destroy(&kgd_mem->lock);
2242 	kfree(mem);
2243 	return 0;
2244 }
2245 
2246 /* Returns GPU-specific tiling mode information */
2247 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2248 				struct tile_config *config)
2249 {
2250 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2251 
2252 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
2253 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2254 	config->num_tile_configs =
2255 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2256 	config->macro_tile_config_ptr =
2257 			adev->gfx.config.macrotile_mode_array;
2258 	config->num_macro_tile_configs =
2259 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2260 
2261 	/* Those values are not set from GFX9 onwards */
2262 	config->num_banks = adev->gfx.config.num_banks;
2263 	config->num_ranks = adev->gfx.config.num_ranks;
2264 
2265 	return 0;
2266 }
2267