1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 
33 /* BO flag to indicate a KFD userptr BO */
34 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
35 
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40 
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43 	uint64_t max_system_mem_limit;
44 	uint64_t max_ttm_mem_limit;
45 	int64_t system_mem_used;
46 	int64_t ttm_mem_used;
47 	spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49 
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
52 	uint32_t        domain;
53 	bool            wait;
54 };
55 
56 static const char * const domain_bit_to_string[] = {
57 		"CPU",
58 		"GTT",
59 		"VRAM",
60 		"GDS",
61 		"GWS",
62 		"OA"
63 };
64 
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
66 
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
68 
69 
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
71 {
72 	return (struct amdgpu_device *)kgd;
73 }
74 
75 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
76 		struct kgd_mem *mem)
77 {
78 	struct kfd_bo_va_list *entry;
79 
80 	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
81 		if (entry->bo_va->base.vm == avm)
82 			return false;
83 
84 	return true;
85 }
86 
87 /* Set memory usage limits. Current, limits are
88  *  System (TTM + userptr) memory - 15/16th System RAM
89  *  TTM memory - 3/8th System RAM
90  */
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
92 {
93 	struct sysinfo si;
94 	uint64_t mem;
95 
96 	si_meminfo(&si);
97 	mem = si.totalram - si.totalhigh;
98 	mem *= si.mem_unit;
99 
100 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
102 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104 		(kfd_mem_limit.max_system_mem_limit >> 20),
105 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
106 }
107 
108 /* Estimate page table size needed to represent a given memory size
109  *
110  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
111  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
112  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
113  * for 2MB pages for TLB efficiency. However, small allocations and
114  * fragmented system memory still need some 4KB pages. We choose a
115  * compromise that should work in most cases without reserving too
116  * much memory for page tables unnecessarily (factor 16K, >> 14).
117  */
118 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
119 
120 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
121 		uint64_t size, u32 domain, bool sg)
122 {
123 	uint64_t reserved_for_pt =
124 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
125 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
126 	int ret = 0;
127 
128 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
129 				       sizeof(struct amdgpu_bo));
130 
131 	vram_needed = 0;
132 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
133 		/* TTM GTT memory */
134 		system_mem_needed = acc_size + size;
135 		ttm_mem_needed = acc_size + size;
136 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
137 		/* Userptr */
138 		system_mem_needed = acc_size + size;
139 		ttm_mem_needed = acc_size;
140 	} else {
141 		/* VRAM and SG */
142 		system_mem_needed = acc_size;
143 		ttm_mem_needed = acc_size;
144 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
145 			vram_needed = size;
146 	}
147 
148 	spin_lock(&kfd_mem_limit.mem_limit_lock);
149 
150 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
151 	     kfd_mem_limit.max_system_mem_limit) ||
152 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
153 	     kfd_mem_limit.max_ttm_mem_limit) ||
154 	    (adev->kfd.vram_used + vram_needed >
155 	     adev->gmc.real_vram_size - reserved_for_pt)) {
156 		ret = -ENOMEM;
157 	} else {
158 		kfd_mem_limit.system_mem_used += system_mem_needed;
159 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
160 		adev->kfd.vram_used += vram_needed;
161 	}
162 
163 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
164 	return ret;
165 }
166 
167 static void unreserve_mem_limit(struct amdgpu_device *adev,
168 		uint64_t size, u32 domain, bool sg)
169 {
170 	size_t acc_size;
171 
172 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
173 				       sizeof(struct amdgpu_bo));
174 
175 	spin_lock(&kfd_mem_limit.mem_limit_lock);
176 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
177 		kfd_mem_limit.system_mem_used -= (acc_size + size);
178 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
179 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
180 		kfd_mem_limit.system_mem_used -= (acc_size + size);
181 		kfd_mem_limit.ttm_mem_used -= acc_size;
182 	} else {
183 		kfd_mem_limit.system_mem_used -= acc_size;
184 		kfd_mem_limit.ttm_mem_used -= acc_size;
185 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
186 			adev->kfd.vram_used -= size;
187 			WARN_ONCE(adev->kfd.vram_used < 0,
188 				  "kfd VRAM memory accounting unbalanced");
189 		}
190 	}
191 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
192 		  "kfd system memory accounting unbalanced");
193 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
194 		  "kfd TTM memory accounting unbalanced");
195 
196 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
197 }
198 
199 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
200 {
201 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
202 	u32 domain = bo->preferred_domains;
203 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
204 
205 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
206 		domain = AMDGPU_GEM_DOMAIN_CPU;
207 		sg = false;
208 	}
209 
210 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
211 }
212 
213 
214 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
215  *  reservation object.
216  *
217  * @bo: [IN] Remove eviction fence(s) from this BO
218  * @ef: [IN] This eviction fence is removed if it
219  *  is present in the shared list.
220  *
221  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
222  */
223 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
224 					struct amdgpu_amdkfd_fence *ef)
225 {
226 	struct dma_resv *resv = bo->tbo.base.resv;
227 	struct dma_resv_list *old, *new;
228 	unsigned int i, j, k;
229 
230 	if (!ef)
231 		return -EINVAL;
232 
233 	old = dma_resv_get_list(resv);
234 	if (!old)
235 		return 0;
236 
237 	new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
238 		      GFP_KERNEL);
239 	if (!new)
240 		return -ENOMEM;
241 
242 	/* Go through all the shared fences in the resevation object and sort
243 	 * the interesting ones to the end of the list.
244 	 */
245 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
246 		struct dma_fence *f;
247 
248 		f = rcu_dereference_protected(old->shared[i],
249 					      dma_resv_held(resv));
250 
251 		if (f->context == ef->base.context)
252 			RCU_INIT_POINTER(new->shared[--j], f);
253 		else
254 			RCU_INIT_POINTER(new->shared[k++], f);
255 	}
256 	new->shared_max = old->shared_max;
257 	new->shared_count = k;
258 
259 	/* Install the new fence list, seqcount provides the barriers */
260 	preempt_disable();
261 	write_seqcount_begin(&resv->seq);
262 	RCU_INIT_POINTER(resv->fence, new);
263 	write_seqcount_end(&resv->seq);
264 	preempt_enable();
265 
266 	/* Drop the references to the removed fences or move them to ef_list */
267 	for (i = j, k = 0; i < old->shared_count; ++i) {
268 		struct dma_fence *f;
269 
270 		f = rcu_dereference_protected(new->shared[i],
271 					      dma_resv_held(resv));
272 		dma_fence_put(f);
273 	}
274 	kfree_rcu(old, rcu);
275 
276 	return 0;
277 }
278 
279 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
280 				     bool wait)
281 {
282 	struct ttm_operation_ctx ctx = { false, false };
283 	int ret;
284 
285 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
286 		 "Called with userptr BO"))
287 		return -EINVAL;
288 
289 	amdgpu_bo_placement_from_domain(bo, domain);
290 
291 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
292 	if (ret)
293 		goto validate_fail;
294 	if (wait)
295 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
296 
297 validate_fail:
298 	return ret;
299 }
300 
301 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
302 {
303 	struct amdgpu_vm_parser *p = param;
304 
305 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
306 }
307 
308 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
309  *
310  * Page directories are not updated here because huge page handling
311  * during page table updates can invalidate page directory entries
312  * again. Page directories are only updated after updating page
313  * tables.
314  */
315 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
316 {
317 	struct amdgpu_bo *pd = vm->root.base.bo;
318 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
319 	struct amdgpu_vm_parser param;
320 	int ret;
321 
322 	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
323 	param.wait = false;
324 
325 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
326 					&param);
327 	if (ret) {
328 		pr_err("amdgpu: failed to validate PT BOs\n");
329 		return ret;
330 	}
331 
332 	ret = amdgpu_amdkfd_validate(&param, pd);
333 	if (ret) {
334 		pr_err("amdgpu: failed to validate PD\n");
335 		return ret;
336 	}
337 
338 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
339 
340 	if (vm->use_cpu_for_update) {
341 		ret = amdgpu_bo_kmap(pd, NULL);
342 		if (ret) {
343 			pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
344 			return ret;
345 		}
346 	}
347 
348 	return 0;
349 }
350 
351 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
352 {
353 	struct amdgpu_bo *pd = vm->root.base.bo;
354 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
355 	int ret;
356 
357 	ret = amdgpu_vm_update_pdes(adev, vm, false);
358 	if (ret)
359 		return ret;
360 
361 	return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
362 }
363 
364 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
365 {
366 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
367 	bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
368 	uint32_t mapping_flags;
369 
370 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
371 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
372 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
373 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
374 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
375 
376 	switch (adev->asic_type) {
377 	case CHIP_ARCTURUS:
378 		if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
379 			if (bo_adev == adev)
380 				mapping_flags |= coherent ?
381 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
382 			else
383 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
384 		} else {
385 			mapping_flags |= coherent ?
386 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
387 		}
388 		break;
389 	default:
390 		mapping_flags |= coherent ?
391 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
392 	}
393 
394 	return amdgpu_gem_va_map_flags(adev, mapping_flags);
395 }
396 
397 /* add_bo_to_vm - Add a BO to a VM
398  *
399  * Everything that needs to bo done only once when a BO is first added
400  * to a VM. It can later be mapped and unmapped many times without
401  * repeating these steps.
402  *
403  * 1. Allocate and initialize BO VA entry data structure
404  * 2. Add BO to the VM
405  * 3. Determine ASIC-specific PTE flags
406  * 4. Alloc page tables and directories if needed
407  * 4a.  Validate new page tables and directories
408  */
409 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
410 		struct amdgpu_vm *vm, bool is_aql,
411 		struct kfd_bo_va_list **p_bo_va_entry)
412 {
413 	int ret;
414 	struct kfd_bo_va_list *bo_va_entry;
415 	struct amdgpu_bo *bo = mem->bo;
416 	uint64_t va = mem->va;
417 	struct list_head *list_bo_va = &mem->bo_va_list;
418 	unsigned long bo_size = bo->tbo.mem.size;
419 
420 	if (!va) {
421 		pr_err("Invalid VA when adding BO to VM\n");
422 		return -EINVAL;
423 	}
424 
425 	if (is_aql)
426 		va += bo_size;
427 
428 	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
429 	if (!bo_va_entry)
430 		return -ENOMEM;
431 
432 	pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
433 			va + bo_size, vm);
434 
435 	/* Add BO to VM internal data structures*/
436 	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
437 	if (!bo_va_entry->bo_va) {
438 		ret = -EINVAL;
439 		pr_err("Failed to add BO object to VM. ret == %d\n",
440 				ret);
441 		goto err_vmadd;
442 	}
443 
444 	bo_va_entry->va = va;
445 	bo_va_entry->pte_flags = get_pte_flags(adev, mem);
446 	bo_va_entry->kgd_dev = (void *)adev;
447 	list_add(&bo_va_entry->bo_list, list_bo_va);
448 
449 	if (p_bo_va_entry)
450 		*p_bo_va_entry = bo_va_entry;
451 
452 	/* Allocate validate page tables if needed */
453 	ret = vm_validate_pt_pd_bos(vm);
454 	if (ret) {
455 		pr_err("validate_pt_pd_bos() failed\n");
456 		goto err_alloc_pts;
457 	}
458 
459 	return 0;
460 
461 err_alloc_pts:
462 	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
463 	list_del(&bo_va_entry->bo_list);
464 err_vmadd:
465 	kfree(bo_va_entry);
466 	return ret;
467 }
468 
469 static void remove_bo_from_vm(struct amdgpu_device *adev,
470 		struct kfd_bo_va_list *entry, unsigned long size)
471 {
472 	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
473 			entry->va,
474 			entry->va + size, entry);
475 	amdgpu_vm_bo_rmv(adev, entry->bo_va);
476 	list_del(&entry->bo_list);
477 	kfree(entry);
478 }
479 
480 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
481 				struct amdkfd_process_info *process_info,
482 				bool userptr)
483 {
484 	struct ttm_validate_buffer *entry = &mem->validate_list;
485 	struct amdgpu_bo *bo = mem->bo;
486 
487 	INIT_LIST_HEAD(&entry->head);
488 	entry->num_shared = 1;
489 	entry->bo = &bo->tbo;
490 	mutex_lock(&process_info->lock);
491 	if (userptr)
492 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
493 	else
494 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
495 	mutex_unlock(&process_info->lock);
496 }
497 
498 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
499 		struct amdkfd_process_info *process_info)
500 {
501 	struct ttm_validate_buffer *bo_list_entry;
502 
503 	bo_list_entry = &mem->validate_list;
504 	mutex_lock(&process_info->lock);
505 	list_del(&bo_list_entry->head);
506 	mutex_unlock(&process_info->lock);
507 }
508 
509 /* Initializes user pages. It registers the MMU notifier and validates
510  * the userptr BO in the GTT domain.
511  *
512  * The BO must already be on the userptr_valid_list. Otherwise an
513  * eviction and restore may happen that leaves the new BO unmapped
514  * with the user mode queues running.
515  *
516  * Takes the process_info->lock to protect against concurrent restore
517  * workers.
518  *
519  * Returns 0 for success, negative errno for errors.
520  */
521 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
522 			   uint64_t user_addr)
523 {
524 	struct amdkfd_process_info *process_info = mem->process_info;
525 	struct amdgpu_bo *bo = mem->bo;
526 	struct ttm_operation_ctx ctx = { true, false };
527 	int ret = 0;
528 
529 	mutex_lock(&process_info->lock);
530 
531 	ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
532 	if (ret) {
533 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
534 		goto out;
535 	}
536 
537 	ret = amdgpu_mn_register(bo, user_addr);
538 	if (ret) {
539 		pr_err("%s: Failed to register MMU notifier: %d\n",
540 		       __func__, ret);
541 		goto out;
542 	}
543 
544 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
545 	if (ret) {
546 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
547 		goto unregister_out;
548 	}
549 
550 	ret = amdgpu_bo_reserve(bo, true);
551 	if (ret) {
552 		pr_err("%s: Failed to reserve BO\n", __func__);
553 		goto release_out;
554 	}
555 	amdgpu_bo_placement_from_domain(bo, mem->domain);
556 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
557 	if (ret)
558 		pr_err("%s: failed to validate BO\n", __func__);
559 	amdgpu_bo_unreserve(bo);
560 
561 release_out:
562 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
563 unregister_out:
564 	if (ret)
565 		amdgpu_mn_unregister(bo);
566 out:
567 	mutex_unlock(&process_info->lock);
568 	return ret;
569 }
570 
571 /* Reserving a BO and its page table BOs must happen atomically to
572  * avoid deadlocks. Some operations update multiple VMs at once. Track
573  * all the reservation info in a context structure. Optionally a sync
574  * object can track VM updates.
575  */
576 struct bo_vm_reservation_context {
577 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
578 	unsigned int n_vms;		    /* Number of VMs reserved	    */
579 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
580 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
581 	struct list_head list, duplicates;  /* BO lists			    */
582 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
583 	bool reserved;			    /* Whether BOs are reserved	    */
584 };
585 
586 enum bo_vm_match {
587 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
588 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
589 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
590 };
591 
592 /**
593  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
594  * @mem: KFD BO structure.
595  * @vm: the VM to reserve.
596  * @ctx: the struct that will be used in unreserve_bo_and_vms().
597  */
598 static int reserve_bo_and_vm(struct kgd_mem *mem,
599 			      struct amdgpu_vm *vm,
600 			      struct bo_vm_reservation_context *ctx)
601 {
602 	struct amdgpu_bo *bo = mem->bo;
603 	int ret;
604 
605 	WARN_ON(!vm);
606 
607 	ctx->reserved = false;
608 	ctx->n_vms = 1;
609 	ctx->sync = &mem->sync;
610 
611 	INIT_LIST_HEAD(&ctx->list);
612 	INIT_LIST_HEAD(&ctx->duplicates);
613 
614 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
615 	if (!ctx->vm_pd)
616 		return -ENOMEM;
617 
618 	ctx->kfd_bo.priority = 0;
619 	ctx->kfd_bo.tv.bo = &bo->tbo;
620 	ctx->kfd_bo.tv.num_shared = 1;
621 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
622 
623 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
624 
625 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
626 				     false, &ctx->duplicates);
627 	if (!ret)
628 		ctx->reserved = true;
629 	else {
630 		pr_err("Failed to reserve buffers in ttm\n");
631 		kfree(ctx->vm_pd);
632 		ctx->vm_pd = NULL;
633 	}
634 
635 	return ret;
636 }
637 
638 /**
639  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
640  * @mem: KFD BO structure.
641  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
642  * is used. Otherwise, a single VM associated with the BO.
643  * @map_type: the mapping status that will be used to filter the VMs.
644  * @ctx: the struct that will be used in unreserve_bo_and_vms().
645  *
646  * Returns 0 for success, negative for failure.
647  */
648 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
649 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
650 				struct bo_vm_reservation_context *ctx)
651 {
652 	struct amdgpu_bo *bo = mem->bo;
653 	struct kfd_bo_va_list *entry;
654 	unsigned int i;
655 	int ret;
656 
657 	ctx->reserved = false;
658 	ctx->n_vms = 0;
659 	ctx->vm_pd = NULL;
660 	ctx->sync = &mem->sync;
661 
662 	INIT_LIST_HEAD(&ctx->list);
663 	INIT_LIST_HEAD(&ctx->duplicates);
664 
665 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
666 		if ((vm && vm != entry->bo_va->base.vm) ||
667 			(entry->is_mapped != map_type
668 			&& map_type != BO_VM_ALL))
669 			continue;
670 
671 		ctx->n_vms++;
672 	}
673 
674 	if (ctx->n_vms != 0) {
675 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
676 				     GFP_KERNEL);
677 		if (!ctx->vm_pd)
678 			return -ENOMEM;
679 	}
680 
681 	ctx->kfd_bo.priority = 0;
682 	ctx->kfd_bo.tv.bo = &bo->tbo;
683 	ctx->kfd_bo.tv.num_shared = 1;
684 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
685 
686 	i = 0;
687 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
688 		if ((vm && vm != entry->bo_va->base.vm) ||
689 			(entry->is_mapped != map_type
690 			&& map_type != BO_VM_ALL))
691 			continue;
692 
693 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
694 				&ctx->vm_pd[i]);
695 		i++;
696 	}
697 
698 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
699 				     false, &ctx->duplicates);
700 	if (!ret)
701 		ctx->reserved = true;
702 	else
703 		pr_err("Failed to reserve buffers in ttm.\n");
704 
705 	if (ret) {
706 		kfree(ctx->vm_pd);
707 		ctx->vm_pd = NULL;
708 	}
709 
710 	return ret;
711 }
712 
713 /**
714  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
715  * @ctx: Reservation context to unreserve
716  * @wait: Optionally wait for a sync object representing pending VM updates
717  * @intr: Whether the wait is interruptible
718  *
719  * Also frees any resources allocated in
720  * reserve_bo_and_(cond_)vm(s). Returns the status from
721  * amdgpu_sync_wait.
722  */
723 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
724 				 bool wait, bool intr)
725 {
726 	int ret = 0;
727 
728 	if (wait)
729 		ret = amdgpu_sync_wait(ctx->sync, intr);
730 
731 	if (ctx->reserved)
732 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
733 	kfree(ctx->vm_pd);
734 
735 	ctx->sync = NULL;
736 
737 	ctx->reserved = false;
738 	ctx->vm_pd = NULL;
739 
740 	return ret;
741 }
742 
743 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
744 				struct kfd_bo_va_list *entry,
745 				struct amdgpu_sync *sync)
746 {
747 	struct amdgpu_bo_va *bo_va = entry->bo_va;
748 	struct amdgpu_vm *vm = bo_va->base.vm;
749 
750 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
751 
752 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
753 
754 	amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
755 
756 	return 0;
757 }
758 
759 static int update_gpuvm_pte(struct amdgpu_device *adev,
760 		struct kfd_bo_va_list *entry,
761 		struct amdgpu_sync *sync)
762 {
763 	int ret;
764 	struct amdgpu_bo_va *bo_va = entry->bo_va;
765 
766 	/* Update the page tables  */
767 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
768 	if (ret) {
769 		pr_err("amdgpu_vm_bo_update failed\n");
770 		return ret;
771 	}
772 
773 	return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
774 }
775 
776 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
777 		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
778 		bool no_update_pte)
779 {
780 	int ret;
781 
782 	/* Set virtual address for the allocation */
783 	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
784 			       amdgpu_bo_size(entry->bo_va->base.bo),
785 			       entry->pte_flags);
786 	if (ret) {
787 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
788 				entry->va, ret);
789 		return ret;
790 	}
791 
792 	if (no_update_pte)
793 		return 0;
794 
795 	ret = update_gpuvm_pte(adev, entry, sync);
796 	if (ret) {
797 		pr_err("update_gpuvm_pte() failed\n");
798 		goto update_gpuvm_pte_failed;
799 	}
800 
801 	return 0;
802 
803 update_gpuvm_pte_failed:
804 	unmap_bo_from_gpuvm(adev, entry, sync);
805 	return ret;
806 }
807 
808 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
809 {
810 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
811 
812 	if (!sg)
813 		return NULL;
814 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
815 		kfree(sg);
816 		return NULL;
817 	}
818 	sg->sgl->dma_address = addr;
819 	sg->sgl->length = size;
820 #ifdef CONFIG_NEED_SG_DMA_LENGTH
821 	sg->sgl->dma_length = size;
822 #endif
823 	return sg;
824 }
825 
826 static int process_validate_vms(struct amdkfd_process_info *process_info)
827 {
828 	struct amdgpu_vm *peer_vm;
829 	int ret;
830 
831 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
832 			    vm_list_node) {
833 		ret = vm_validate_pt_pd_bos(peer_vm);
834 		if (ret)
835 			return ret;
836 	}
837 
838 	return 0;
839 }
840 
841 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
842 				 struct amdgpu_sync *sync)
843 {
844 	struct amdgpu_vm *peer_vm;
845 	int ret;
846 
847 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
848 			    vm_list_node) {
849 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
850 
851 		ret = amdgpu_sync_resv(NULL,
852 					sync, pd->tbo.base.resv,
853 					AMDGPU_FENCE_OWNER_KFD, false);
854 		if (ret)
855 			return ret;
856 	}
857 
858 	return 0;
859 }
860 
861 static int process_update_pds(struct amdkfd_process_info *process_info,
862 			      struct amdgpu_sync *sync)
863 {
864 	struct amdgpu_vm *peer_vm;
865 	int ret;
866 
867 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
868 			    vm_list_node) {
869 		ret = vm_update_pds(peer_vm, sync);
870 		if (ret)
871 			return ret;
872 	}
873 
874 	return 0;
875 }
876 
877 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
878 		       struct dma_fence **ef)
879 {
880 	struct amdkfd_process_info *info = NULL;
881 	int ret;
882 
883 	if (!*process_info) {
884 		info = kzalloc(sizeof(*info), GFP_KERNEL);
885 		if (!info)
886 			return -ENOMEM;
887 
888 		mutex_init(&info->lock);
889 		INIT_LIST_HEAD(&info->vm_list_head);
890 		INIT_LIST_HEAD(&info->kfd_bo_list);
891 		INIT_LIST_HEAD(&info->userptr_valid_list);
892 		INIT_LIST_HEAD(&info->userptr_inval_list);
893 
894 		info->eviction_fence =
895 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
896 						   current->mm);
897 		if (!info->eviction_fence) {
898 			pr_err("Failed to create eviction fence\n");
899 			ret = -ENOMEM;
900 			goto create_evict_fence_fail;
901 		}
902 
903 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
904 		atomic_set(&info->evicted_bos, 0);
905 		INIT_DELAYED_WORK(&info->restore_userptr_work,
906 				  amdgpu_amdkfd_restore_userptr_worker);
907 
908 		*process_info = info;
909 		*ef = dma_fence_get(&info->eviction_fence->base);
910 	}
911 
912 	vm->process_info = *process_info;
913 
914 	/* Validate page directory and attach eviction fence */
915 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
916 	if (ret)
917 		goto reserve_pd_fail;
918 	ret = vm_validate_pt_pd_bos(vm);
919 	if (ret) {
920 		pr_err("validate_pt_pd_bos() failed\n");
921 		goto validate_pd_fail;
922 	}
923 	ret = amdgpu_bo_sync_wait(vm->root.base.bo,
924 				  AMDGPU_FENCE_OWNER_KFD, false);
925 	if (ret)
926 		goto wait_pd_fail;
927 	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
928 	if (ret)
929 		goto reserve_shared_fail;
930 	amdgpu_bo_fence(vm->root.base.bo,
931 			&vm->process_info->eviction_fence->base, true);
932 	amdgpu_bo_unreserve(vm->root.base.bo);
933 
934 	/* Update process info */
935 	mutex_lock(&vm->process_info->lock);
936 	list_add_tail(&vm->vm_list_node,
937 			&(vm->process_info->vm_list_head));
938 	vm->process_info->n_vms++;
939 	mutex_unlock(&vm->process_info->lock);
940 
941 	return 0;
942 
943 reserve_shared_fail:
944 wait_pd_fail:
945 validate_pd_fail:
946 	amdgpu_bo_unreserve(vm->root.base.bo);
947 reserve_pd_fail:
948 	vm->process_info = NULL;
949 	if (info) {
950 		/* Two fence references: one in info and one in *ef */
951 		dma_fence_put(&info->eviction_fence->base);
952 		dma_fence_put(*ef);
953 		*ef = NULL;
954 		*process_info = NULL;
955 		put_pid(info->pid);
956 create_evict_fence_fail:
957 		mutex_destroy(&info->lock);
958 		kfree(info);
959 	}
960 	return ret;
961 }
962 
963 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
964 					  void **vm, void **process_info,
965 					  struct dma_fence **ef)
966 {
967 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
968 	struct amdgpu_vm *new_vm;
969 	int ret;
970 
971 	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
972 	if (!new_vm)
973 		return -ENOMEM;
974 
975 	/* Initialize AMDGPU part of the VM */
976 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
977 	if (ret) {
978 		pr_err("Failed init vm ret %d\n", ret);
979 		goto amdgpu_vm_init_fail;
980 	}
981 
982 	/* Initialize KFD part of the VM and process info */
983 	ret = init_kfd_vm(new_vm, process_info, ef);
984 	if (ret)
985 		goto init_kfd_vm_fail;
986 
987 	*vm = (void *) new_vm;
988 
989 	return 0;
990 
991 init_kfd_vm_fail:
992 	amdgpu_vm_fini(adev, new_vm);
993 amdgpu_vm_init_fail:
994 	kfree(new_vm);
995 	return ret;
996 }
997 
998 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
999 					   struct file *filp, unsigned int pasid,
1000 					   void **vm, void **process_info,
1001 					   struct dma_fence **ef)
1002 {
1003 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1004 	struct drm_file *drm_priv = filp->private_data;
1005 	struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1006 	struct amdgpu_vm *avm = &drv_priv->vm;
1007 	int ret;
1008 
1009 	/* Already a compute VM? */
1010 	if (avm->process_info)
1011 		return -EINVAL;
1012 
1013 	/* Convert VM into a compute VM */
1014 	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1015 	if (ret)
1016 		return ret;
1017 
1018 	/* Initialize KFD part of the VM and process info */
1019 	ret = init_kfd_vm(avm, process_info, ef);
1020 	if (ret)
1021 		return ret;
1022 
1023 	*vm = (void *)avm;
1024 
1025 	return 0;
1026 }
1027 
1028 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1029 				    struct amdgpu_vm *vm)
1030 {
1031 	struct amdkfd_process_info *process_info = vm->process_info;
1032 	struct amdgpu_bo *pd = vm->root.base.bo;
1033 
1034 	if (!process_info)
1035 		return;
1036 
1037 	/* Release eviction fence from PD */
1038 	amdgpu_bo_reserve(pd, false);
1039 	amdgpu_bo_fence(pd, NULL, false);
1040 	amdgpu_bo_unreserve(pd);
1041 
1042 	/* Update process info */
1043 	mutex_lock(&process_info->lock);
1044 	process_info->n_vms--;
1045 	list_del(&vm->vm_list_node);
1046 	mutex_unlock(&process_info->lock);
1047 
1048 	/* Release per-process resources when last compute VM is destroyed */
1049 	if (!process_info->n_vms) {
1050 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1051 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1052 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1053 
1054 		dma_fence_put(&process_info->eviction_fence->base);
1055 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1056 		put_pid(process_info->pid);
1057 		mutex_destroy(&process_info->lock);
1058 		kfree(process_info);
1059 	}
1060 }
1061 
1062 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1063 {
1064 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1065 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1066 
1067 	if (WARN_ON(!kgd || !vm))
1068 		return;
1069 
1070 	pr_debug("Destroying process vm %p\n", vm);
1071 
1072 	/* Release the VM context */
1073 	amdgpu_vm_fini(adev, avm);
1074 	kfree(vm);
1075 }
1076 
1077 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1078 {
1079 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1080         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1081 
1082 	if (WARN_ON(!kgd || !vm))
1083                 return;
1084 
1085         pr_debug("Releasing process vm %p\n", vm);
1086 
1087         /* The original pasid of amdgpu vm has already been
1088          * released during making a amdgpu vm to a compute vm
1089          * The current pasid is managed by kfd and will be
1090          * released on kfd process destroy. Set amdgpu pasid
1091          * to 0 to avoid duplicate release.
1092          */
1093 	amdgpu_vm_release_compute(adev, avm);
1094 }
1095 
1096 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1097 {
1098 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1099 	struct amdgpu_bo *pd = avm->root.base.bo;
1100 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1101 
1102 	if (adev->asic_type < CHIP_VEGA10)
1103 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1104 	return avm->pd_phys_addr;
1105 }
1106 
1107 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1108 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1109 		void *vm, struct kgd_mem **mem,
1110 		uint64_t *offset, uint32_t flags)
1111 {
1112 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1113 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1114 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1115 	struct sg_table *sg = NULL;
1116 	uint64_t user_addr = 0;
1117 	struct amdgpu_bo *bo;
1118 	struct amdgpu_bo_param bp;
1119 	u32 domain, alloc_domain;
1120 	u64 alloc_flags;
1121 	int ret;
1122 
1123 	/*
1124 	 * Check on which domain to allocate BO
1125 	 */
1126 	if (flags & ALLOC_MEM_FLAGS_VRAM) {
1127 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1128 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1129 		alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1130 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1131 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1132 	} else if (flags & ALLOC_MEM_FLAGS_GTT) {
1133 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1134 		alloc_flags = 0;
1135 	} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1136 		domain = AMDGPU_GEM_DOMAIN_GTT;
1137 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1138 		alloc_flags = 0;
1139 		if (!offset || !*offset)
1140 			return -EINVAL;
1141 		user_addr = untagged_addr(*offset);
1142 	} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1143 			ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1144 		domain = AMDGPU_GEM_DOMAIN_GTT;
1145 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1146 		bo_type = ttm_bo_type_sg;
1147 		alloc_flags = 0;
1148 		if (size > UINT_MAX)
1149 			return -EINVAL;
1150 		sg = create_doorbell_sg(*offset, size);
1151 		if (!sg)
1152 			return -ENOMEM;
1153 	} else {
1154 		return -EINVAL;
1155 	}
1156 
1157 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1158 	if (!*mem) {
1159 		ret = -ENOMEM;
1160 		goto err;
1161 	}
1162 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1163 	mutex_init(&(*mem)->lock);
1164 	(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1165 
1166 	/* Workaround for AQL queue wraparound bug. Map the same
1167 	 * memory twice. That means we only actually allocate half
1168 	 * the memory.
1169 	 */
1170 	if ((*mem)->aql_queue)
1171 		size = size >> 1;
1172 
1173 	(*mem)->alloc_flags = flags;
1174 
1175 	amdgpu_sync_create(&(*mem)->sync);
1176 
1177 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1178 	if (ret) {
1179 		pr_debug("Insufficient system memory\n");
1180 		goto err_reserve_limit;
1181 	}
1182 
1183 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1184 			va, size, domain_string(alloc_domain));
1185 
1186 	memset(&bp, 0, sizeof(bp));
1187 	bp.size = size;
1188 	bp.byte_align = 1;
1189 	bp.domain = alloc_domain;
1190 	bp.flags = alloc_flags;
1191 	bp.type = bo_type;
1192 	bp.resv = NULL;
1193 	ret = amdgpu_bo_create(adev, &bp, &bo);
1194 	if (ret) {
1195 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1196 				domain_string(alloc_domain), ret);
1197 		goto err_bo_create;
1198 	}
1199 	if (bo_type == ttm_bo_type_sg) {
1200 		bo->tbo.sg = sg;
1201 		bo->tbo.ttm->sg = sg;
1202 	}
1203 	bo->kfd_bo = *mem;
1204 	(*mem)->bo = bo;
1205 	if (user_addr)
1206 		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1207 
1208 	(*mem)->va = va;
1209 	(*mem)->domain = domain;
1210 	(*mem)->mapped_to_gpu_memory = 0;
1211 	(*mem)->process_info = avm->process_info;
1212 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1213 
1214 	if (user_addr) {
1215 		ret = init_user_pages(*mem, current->mm, user_addr);
1216 		if (ret)
1217 			goto allocate_init_user_pages_failed;
1218 	}
1219 
1220 	if (offset)
1221 		*offset = amdgpu_bo_mmap_offset(bo);
1222 
1223 	return 0;
1224 
1225 allocate_init_user_pages_failed:
1226 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1227 	amdgpu_bo_unref(&bo);
1228 	/* Don't unreserve system mem limit twice */
1229 	goto err_reserve_limit;
1230 err_bo_create:
1231 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1232 err_reserve_limit:
1233 	mutex_destroy(&(*mem)->lock);
1234 	kfree(*mem);
1235 err:
1236 	if (sg) {
1237 		sg_free_table(sg);
1238 		kfree(sg);
1239 	}
1240 	return ret;
1241 }
1242 
1243 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1244 		struct kgd_dev *kgd, struct kgd_mem *mem)
1245 {
1246 	struct amdkfd_process_info *process_info = mem->process_info;
1247 	unsigned long bo_size = mem->bo->tbo.mem.size;
1248 	struct kfd_bo_va_list *entry, *tmp;
1249 	struct bo_vm_reservation_context ctx;
1250 	struct ttm_validate_buffer *bo_list_entry;
1251 	int ret;
1252 
1253 	mutex_lock(&mem->lock);
1254 
1255 	if (mem->mapped_to_gpu_memory > 0) {
1256 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1257 				mem->va, bo_size);
1258 		mutex_unlock(&mem->lock);
1259 		return -EBUSY;
1260 	}
1261 
1262 	mutex_unlock(&mem->lock);
1263 	/* lock is not needed after this, since mem is unused and will
1264 	 * be freed anyway
1265 	 */
1266 
1267 	/* No more MMU notifiers */
1268 	amdgpu_mn_unregister(mem->bo);
1269 
1270 	/* Make sure restore workers don't access the BO any more */
1271 	bo_list_entry = &mem->validate_list;
1272 	mutex_lock(&process_info->lock);
1273 	list_del(&bo_list_entry->head);
1274 	mutex_unlock(&process_info->lock);
1275 
1276 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1277 	if (unlikely(ret))
1278 		return ret;
1279 
1280 	/* The eviction fence should be removed by the last unmap.
1281 	 * TODO: Log an error condition if the bo still has the eviction fence
1282 	 * attached
1283 	 */
1284 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1285 					process_info->eviction_fence);
1286 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1287 		mem->va + bo_size * (1 + mem->aql_queue));
1288 
1289 	/* Remove from VM internal data structures */
1290 	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1291 		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1292 				entry, bo_size);
1293 
1294 	ret = unreserve_bo_and_vms(&ctx, false, false);
1295 
1296 	/* Free the sync object */
1297 	amdgpu_sync_free(&mem->sync);
1298 
1299 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1300 	 * remap BO. We need to free it.
1301 	 */
1302 	if (mem->bo->tbo.sg) {
1303 		sg_free_table(mem->bo->tbo.sg);
1304 		kfree(mem->bo->tbo.sg);
1305 	}
1306 
1307 	/* Free the BO*/
1308 	amdgpu_bo_unref(&mem->bo);
1309 	mutex_destroy(&mem->lock);
1310 	kfree(mem);
1311 
1312 	return ret;
1313 }
1314 
1315 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1316 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1317 {
1318 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1319 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1320 	int ret;
1321 	struct amdgpu_bo *bo;
1322 	uint32_t domain;
1323 	struct kfd_bo_va_list *entry;
1324 	struct bo_vm_reservation_context ctx;
1325 	struct kfd_bo_va_list *bo_va_entry = NULL;
1326 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1327 	unsigned long bo_size;
1328 	bool is_invalid_userptr = false;
1329 
1330 	bo = mem->bo;
1331 	if (!bo) {
1332 		pr_err("Invalid BO when mapping memory to GPU\n");
1333 		return -EINVAL;
1334 	}
1335 
1336 	/* Make sure restore is not running concurrently. Since we
1337 	 * don't map invalid userptr BOs, we rely on the next restore
1338 	 * worker to do the mapping
1339 	 */
1340 	mutex_lock(&mem->process_info->lock);
1341 
1342 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1343 	 * sure that the MMU notifier is no longer running
1344 	 * concurrently and the queues are actually stopped
1345 	 */
1346 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1347 		down_write(&current->mm->mmap_sem);
1348 		is_invalid_userptr = atomic_read(&mem->invalid);
1349 		up_write(&current->mm->mmap_sem);
1350 	}
1351 
1352 	mutex_lock(&mem->lock);
1353 
1354 	domain = mem->domain;
1355 	bo_size = bo->tbo.mem.size;
1356 
1357 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1358 			mem->va,
1359 			mem->va + bo_size * (1 + mem->aql_queue),
1360 			vm, domain_string(domain));
1361 
1362 	ret = reserve_bo_and_vm(mem, vm, &ctx);
1363 	if (unlikely(ret))
1364 		goto out;
1365 
1366 	/* Userptr can be marked as "not invalid", but not actually be
1367 	 * validated yet (still in the system domain). In that case
1368 	 * the queues are still stopped and we can leave mapping for
1369 	 * the next restore worker
1370 	 */
1371 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1372 	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1373 		is_invalid_userptr = true;
1374 
1375 	if (check_if_add_bo_to_vm(avm, mem)) {
1376 		ret = add_bo_to_vm(adev, mem, avm, false,
1377 				&bo_va_entry);
1378 		if (ret)
1379 			goto add_bo_to_vm_failed;
1380 		if (mem->aql_queue) {
1381 			ret = add_bo_to_vm(adev, mem, avm,
1382 					true, &bo_va_entry_aql);
1383 			if (ret)
1384 				goto add_bo_to_vm_failed_aql;
1385 		}
1386 	} else {
1387 		ret = vm_validate_pt_pd_bos(avm);
1388 		if (unlikely(ret))
1389 			goto add_bo_to_vm_failed;
1390 	}
1391 
1392 	if (mem->mapped_to_gpu_memory == 0 &&
1393 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1394 		/* Validate BO only once. The eviction fence gets added to BO
1395 		 * the first time it is mapped. Validate will wait for all
1396 		 * background evictions to complete.
1397 		 */
1398 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1399 		if (ret) {
1400 			pr_debug("Validate failed\n");
1401 			goto map_bo_to_gpuvm_failed;
1402 		}
1403 	}
1404 
1405 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1406 		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1407 			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1408 					entry->va, entry->va + bo_size,
1409 					entry);
1410 
1411 			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1412 					      is_invalid_userptr);
1413 			if (ret) {
1414 				pr_err("Failed to map bo to gpuvm\n");
1415 				goto map_bo_to_gpuvm_failed;
1416 			}
1417 
1418 			ret = vm_update_pds(vm, ctx.sync);
1419 			if (ret) {
1420 				pr_err("Failed to update page directories\n");
1421 				goto map_bo_to_gpuvm_failed;
1422 			}
1423 
1424 			entry->is_mapped = true;
1425 			mem->mapped_to_gpu_memory++;
1426 			pr_debug("\t INC mapping count %d\n",
1427 					mem->mapped_to_gpu_memory);
1428 		}
1429 	}
1430 
1431 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1432 		amdgpu_bo_fence(bo,
1433 				&avm->process_info->eviction_fence->base,
1434 				true);
1435 	ret = unreserve_bo_and_vms(&ctx, false, false);
1436 
1437 	goto out;
1438 
1439 map_bo_to_gpuvm_failed:
1440 	if (bo_va_entry_aql)
1441 		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1442 add_bo_to_vm_failed_aql:
1443 	if (bo_va_entry)
1444 		remove_bo_from_vm(adev, bo_va_entry, bo_size);
1445 add_bo_to_vm_failed:
1446 	unreserve_bo_and_vms(&ctx, false, false);
1447 out:
1448 	mutex_unlock(&mem->process_info->lock);
1449 	mutex_unlock(&mem->lock);
1450 	return ret;
1451 }
1452 
1453 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1454 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1455 {
1456 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1457 	struct amdkfd_process_info *process_info =
1458 		((struct amdgpu_vm *)vm)->process_info;
1459 	unsigned long bo_size = mem->bo->tbo.mem.size;
1460 	struct kfd_bo_va_list *entry;
1461 	struct bo_vm_reservation_context ctx;
1462 	int ret;
1463 
1464 	mutex_lock(&mem->lock);
1465 
1466 	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1467 	if (unlikely(ret))
1468 		goto out;
1469 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1470 	if (ctx.n_vms == 0) {
1471 		ret = -EINVAL;
1472 		goto unreserve_out;
1473 	}
1474 
1475 	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1476 	if (unlikely(ret))
1477 		goto unreserve_out;
1478 
1479 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1480 		mem->va,
1481 		mem->va + bo_size * (1 + mem->aql_queue),
1482 		vm);
1483 
1484 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1485 		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1486 			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1487 					entry->va,
1488 					entry->va + bo_size,
1489 					entry);
1490 
1491 			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1492 			if (ret == 0) {
1493 				entry->is_mapped = false;
1494 			} else {
1495 				pr_err("failed to unmap VA 0x%llx\n",
1496 						mem->va);
1497 				goto unreserve_out;
1498 			}
1499 
1500 			mem->mapped_to_gpu_memory--;
1501 			pr_debug("\t DEC mapping count %d\n",
1502 					mem->mapped_to_gpu_memory);
1503 		}
1504 	}
1505 
1506 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1507 	 * required.
1508 	 */
1509 	if (mem->mapped_to_gpu_memory == 0 &&
1510 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1511 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1512 						process_info->eviction_fence);
1513 
1514 unreserve_out:
1515 	unreserve_bo_and_vms(&ctx, false, false);
1516 out:
1517 	mutex_unlock(&mem->lock);
1518 	return ret;
1519 }
1520 
1521 int amdgpu_amdkfd_gpuvm_sync_memory(
1522 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1523 {
1524 	struct amdgpu_sync sync;
1525 	int ret;
1526 
1527 	amdgpu_sync_create(&sync);
1528 
1529 	mutex_lock(&mem->lock);
1530 	amdgpu_sync_clone(&mem->sync, &sync);
1531 	mutex_unlock(&mem->lock);
1532 
1533 	ret = amdgpu_sync_wait(&sync, intr);
1534 	amdgpu_sync_free(&sync);
1535 	return ret;
1536 }
1537 
1538 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1539 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1540 {
1541 	int ret;
1542 	struct amdgpu_bo *bo = mem->bo;
1543 
1544 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1545 		pr_err("userptr can't be mapped to kernel\n");
1546 		return -EINVAL;
1547 	}
1548 
1549 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1550 	 * this BO in BO's restoring after eviction.
1551 	 */
1552 	mutex_lock(&mem->process_info->lock);
1553 
1554 	ret = amdgpu_bo_reserve(bo, true);
1555 	if (ret) {
1556 		pr_err("Failed to reserve bo. ret %d\n", ret);
1557 		goto bo_reserve_failed;
1558 	}
1559 
1560 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1561 	if (ret) {
1562 		pr_err("Failed to pin bo. ret %d\n", ret);
1563 		goto pin_failed;
1564 	}
1565 
1566 	ret = amdgpu_bo_kmap(bo, kptr);
1567 	if (ret) {
1568 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1569 		goto kmap_failed;
1570 	}
1571 
1572 	amdgpu_amdkfd_remove_eviction_fence(
1573 		bo, mem->process_info->eviction_fence);
1574 	list_del_init(&mem->validate_list.head);
1575 
1576 	if (size)
1577 		*size = amdgpu_bo_size(bo);
1578 
1579 	amdgpu_bo_unreserve(bo);
1580 
1581 	mutex_unlock(&mem->process_info->lock);
1582 	return 0;
1583 
1584 kmap_failed:
1585 	amdgpu_bo_unpin(bo);
1586 pin_failed:
1587 	amdgpu_bo_unreserve(bo);
1588 bo_reserve_failed:
1589 	mutex_unlock(&mem->process_info->lock);
1590 
1591 	return ret;
1592 }
1593 
1594 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1595 					      struct kfd_vm_fault_info *mem)
1596 {
1597 	struct amdgpu_device *adev;
1598 
1599 	adev = (struct amdgpu_device *)kgd;
1600 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1601 		*mem = *adev->gmc.vm_fault_info;
1602 		mb();
1603 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1604 	}
1605 	return 0;
1606 }
1607 
1608 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1609 				      struct dma_buf *dma_buf,
1610 				      uint64_t va, void *vm,
1611 				      struct kgd_mem **mem, uint64_t *size,
1612 				      uint64_t *mmap_offset)
1613 {
1614 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1615 	struct drm_gem_object *obj;
1616 	struct amdgpu_bo *bo;
1617 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1618 
1619 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1620 		/* Can't handle non-graphics buffers */
1621 		return -EINVAL;
1622 
1623 	obj = dma_buf->priv;
1624 	if (obj->dev->dev_private != adev)
1625 		/* Can't handle buffers from other devices */
1626 		return -EINVAL;
1627 
1628 	bo = gem_to_amdgpu_bo(obj);
1629 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1630 				    AMDGPU_GEM_DOMAIN_GTT)))
1631 		/* Only VRAM and GTT BOs are supported */
1632 		return -EINVAL;
1633 
1634 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1635 	if (!*mem)
1636 		return -ENOMEM;
1637 
1638 	if (size)
1639 		*size = amdgpu_bo_size(bo);
1640 
1641 	if (mmap_offset)
1642 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1643 
1644 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1645 	mutex_init(&(*mem)->lock);
1646 	(*mem)->alloc_flags =
1647 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1648 		 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
1649 		ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
1650 
1651 	(*mem)->bo = amdgpu_bo_ref(bo);
1652 	(*mem)->va = va;
1653 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1654 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1655 	(*mem)->mapped_to_gpu_memory = 0;
1656 	(*mem)->process_info = avm->process_info;
1657 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1658 	amdgpu_sync_create(&(*mem)->sync);
1659 
1660 	return 0;
1661 }
1662 
1663 /* Evict a userptr BO by stopping the queues if necessary
1664  *
1665  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1666  * cannot do any memory allocations, and cannot take any locks that
1667  * are held elsewhere while allocating memory. Therefore this is as
1668  * simple as possible, using atomic counters.
1669  *
1670  * It doesn't do anything to the BO itself. The real work happens in
1671  * restore, where we get updated page addresses. This function only
1672  * ensures that GPU access to the BO is stopped.
1673  */
1674 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1675 				struct mm_struct *mm)
1676 {
1677 	struct amdkfd_process_info *process_info = mem->process_info;
1678 	int evicted_bos;
1679 	int r = 0;
1680 
1681 	atomic_inc(&mem->invalid);
1682 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1683 	if (evicted_bos == 1) {
1684 		/* First eviction, stop the queues */
1685 		r = kgd2kfd_quiesce_mm(mm);
1686 		if (r)
1687 			pr_err("Failed to quiesce KFD\n");
1688 		schedule_delayed_work(&process_info->restore_userptr_work,
1689 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1690 	}
1691 
1692 	return r;
1693 }
1694 
1695 /* Update invalid userptr BOs
1696  *
1697  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1698  * userptr_inval_list and updates user pages for all BOs that have
1699  * been invalidated since their last update.
1700  */
1701 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1702 				     struct mm_struct *mm)
1703 {
1704 	struct kgd_mem *mem, *tmp_mem;
1705 	struct amdgpu_bo *bo;
1706 	struct ttm_operation_ctx ctx = { false, false };
1707 	int invalid, ret;
1708 
1709 	/* Move all invalidated BOs to the userptr_inval_list and
1710 	 * release their user pages by migration to the CPU domain
1711 	 */
1712 	list_for_each_entry_safe(mem, tmp_mem,
1713 				 &process_info->userptr_valid_list,
1714 				 validate_list.head) {
1715 		if (!atomic_read(&mem->invalid))
1716 			continue; /* BO is still valid */
1717 
1718 		bo = mem->bo;
1719 
1720 		if (amdgpu_bo_reserve(bo, true))
1721 			return -EAGAIN;
1722 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1723 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1724 		amdgpu_bo_unreserve(bo);
1725 		if (ret) {
1726 			pr_err("%s: Failed to invalidate userptr BO\n",
1727 			       __func__);
1728 			return -EAGAIN;
1729 		}
1730 
1731 		list_move_tail(&mem->validate_list.head,
1732 			       &process_info->userptr_inval_list);
1733 	}
1734 
1735 	if (list_empty(&process_info->userptr_inval_list))
1736 		return 0; /* All evicted userptr BOs were freed */
1737 
1738 	/* Go through userptr_inval_list and update any invalid user_pages */
1739 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1740 			    validate_list.head) {
1741 		invalid = atomic_read(&mem->invalid);
1742 		if (!invalid)
1743 			/* BO hasn't been invalidated since the last
1744 			 * revalidation attempt. Keep its BO list.
1745 			 */
1746 			continue;
1747 
1748 		bo = mem->bo;
1749 
1750 		/* Get updated user pages */
1751 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1752 		if (ret) {
1753 			pr_debug("%s: Failed to get user pages: %d\n",
1754 				__func__, ret);
1755 
1756 			/* Return error -EBUSY or -ENOMEM, retry restore */
1757 			return ret;
1758 		}
1759 
1760 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1761 
1762 		/* Mark the BO as valid unless it was invalidated
1763 		 * again concurrently.
1764 		 */
1765 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1766 			return -EAGAIN;
1767 	}
1768 
1769 	return 0;
1770 }
1771 
1772 /* Validate invalid userptr BOs
1773  *
1774  * Validates BOs on the userptr_inval_list, and moves them back to the
1775  * userptr_valid_list. Also updates GPUVM page tables with new page
1776  * addresses and waits for the page table updates to complete.
1777  */
1778 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1779 {
1780 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
1781 	struct list_head resv_list, duplicates;
1782 	struct ww_acquire_ctx ticket;
1783 	struct amdgpu_sync sync;
1784 
1785 	struct amdgpu_vm *peer_vm;
1786 	struct kgd_mem *mem, *tmp_mem;
1787 	struct amdgpu_bo *bo;
1788 	struct ttm_operation_ctx ctx = { false, false };
1789 	int i, ret;
1790 
1791 	pd_bo_list_entries = kcalloc(process_info->n_vms,
1792 				     sizeof(struct amdgpu_bo_list_entry),
1793 				     GFP_KERNEL);
1794 	if (!pd_bo_list_entries) {
1795 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1796 		ret = -ENOMEM;
1797 		goto out_no_mem;
1798 	}
1799 
1800 	INIT_LIST_HEAD(&resv_list);
1801 	INIT_LIST_HEAD(&duplicates);
1802 
1803 	/* Get all the page directory BOs that need to be reserved */
1804 	i = 0;
1805 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1806 			    vm_list_node)
1807 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1808 				    &pd_bo_list_entries[i++]);
1809 	/* Add the userptr_inval_list entries to resv_list */
1810 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1811 			    validate_list.head) {
1812 		list_add_tail(&mem->resv_list.head, &resv_list);
1813 		mem->resv_list.bo = mem->validate_list.bo;
1814 		mem->resv_list.num_shared = mem->validate_list.num_shared;
1815 	}
1816 
1817 	/* Reserve all BOs and page tables for validation */
1818 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1819 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
1820 	if (ret)
1821 		goto out_free;
1822 
1823 	amdgpu_sync_create(&sync);
1824 
1825 	ret = process_validate_vms(process_info);
1826 	if (ret)
1827 		goto unreserve_out;
1828 
1829 	/* Validate BOs and update GPUVM page tables */
1830 	list_for_each_entry_safe(mem, tmp_mem,
1831 				 &process_info->userptr_inval_list,
1832 				 validate_list.head) {
1833 		struct kfd_bo_va_list *bo_va_entry;
1834 
1835 		bo = mem->bo;
1836 
1837 		/* Validate the BO if we got user pages */
1838 		if (bo->tbo.ttm->pages[0]) {
1839 			amdgpu_bo_placement_from_domain(bo, mem->domain);
1840 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1841 			if (ret) {
1842 				pr_err("%s: failed to validate BO\n", __func__);
1843 				goto unreserve_out;
1844 			}
1845 		}
1846 
1847 		list_move_tail(&mem->validate_list.head,
1848 			       &process_info->userptr_valid_list);
1849 
1850 		/* Update mapping. If the BO was not validated
1851 		 * (because we couldn't get user pages), this will
1852 		 * clear the page table entries, which will result in
1853 		 * VM faults if the GPU tries to access the invalid
1854 		 * memory.
1855 		 */
1856 		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1857 			if (!bo_va_entry->is_mapped)
1858 				continue;
1859 
1860 			ret = update_gpuvm_pte((struct amdgpu_device *)
1861 					       bo_va_entry->kgd_dev,
1862 					       bo_va_entry, &sync);
1863 			if (ret) {
1864 				pr_err("%s: update PTE failed\n", __func__);
1865 				/* make sure this gets validated again */
1866 				atomic_inc(&mem->invalid);
1867 				goto unreserve_out;
1868 			}
1869 		}
1870 	}
1871 
1872 	/* Update page directories */
1873 	ret = process_update_pds(process_info, &sync);
1874 
1875 unreserve_out:
1876 	ttm_eu_backoff_reservation(&ticket, &resv_list);
1877 	amdgpu_sync_wait(&sync, false);
1878 	amdgpu_sync_free(&sync);
1879 out_free:
1880 	kfree(pd_bo_list_entries);
1881 out_no_mem:
1882 
1883 	return ret;
1884 }
1885 
1886 /* Worker callback to restore evicted userptr BOs
1887  *
1888  * Tries to update and validate all userptr BOs. If successful and no
1889  * concurrent evictions happened, the queues are restarted. Otherwise,
1890  * reschedule for another attempt later.
1891  */
1892 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1893 {
1894 	struct delayed_work *dwork = to_delayed_work(work);
1895 	struct amdkfd_process_info *process_info =
1896 		container_of(dwork, struct amdkfd_process_info,
1897 			     restore_userptr_work);
1898 	struct task_struct *usertask;
1899 	struct mm_struct *mm;
1900 	int evicted_bos;
1901 
1902 	evicted_bos = atomic_read(&process_info->evicted_bos);
1903 	if (!evicted_bos)
1904 		return;
1905 
1906 	/* Reference task and mm in case of concurrent process termination */
1907 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1908 	if (!usertask)
1909 		return;
1910 	mm = get_task_mm(usertask);
1911 	if (!mm) {
1912 		put_task_struct(usertask);
1913 		return;
1914 	}
1915 
1916 	mutex_lock(&process_info->lock);
1917 
1918 	if (update_invalid_user_pages(process_info, mm))
1919 		goto unlock_out;
1920 	/* userptr_inval_list can be empty if all evicted userptr BOs
1921 	 * have been freed. In that case there is nothing to validate
1922 	 * and we can just restart the queues.
1923 	 */
1924 	if (!list_empty(&process_info->userptr_inval_list)) {
1925 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1926 			goto unlock_out; /* Concurrent eviction, try again */
1927 
1928 		if (validate_invalid_user_pages(process_info))
1929 			goto unlock_out;
1930 	}
1931 	/* Final check for concurrent evicton and atomic update. If
1932 	 * another eviction happens after successful update, it will
1933 	 * be a first eviction that calls quiesce_mm. The eviction
1934 	 * reference counting inside KFD will handle this case.
1935 	 */
1936 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1937 	    evicted_bos)
1938 		goto unlock_out;
1939 	evicted_bos = 0;
1940 	if (kgd2kfd_resume_mm(mm)) {
1941 		pr_err("%s: Failed to resume KFD\n", __func__);
1942 		/* No recovery from this failure. Probably the CP is
1943 		 * hanging. No point trying again.
1944 		 */
1945 	}
1946 
1947 unlock_out:
1948 	mutex_unlock(&process_info->lock);
1949 	mmput(mm);
1950 	put_task_struct(usertask);
1951 
1952 	/* If validation failed, reschedule another attempt */
1953 	if (evicted_bos)
1954 		schedule_delayed_work(&process_info->restore_userptr_work,
1955 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1956 }
1957 
1958 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1959  *   KFD process identified by process_info
1960  *
1961  * @process_info: amdkfd_process_info of the KFD process
1962  *
1963  * After memory eviction, restore thread calls this function. The function
1964  * should be called when the Process is still valid. BO restore involves -
1965  *
1966  * 1.  Release old eviction fence and create new one
1967  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1968  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1969  *     BOs that need to be reserved.
1970  * 4.  Reserve all the BOs
1971  * 5.  Validate of PD and PT BOs.
1972  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1973  * 7.  Add fence to all PD and PT BOs.
1974  * 8.  Unreserve all BOs
1975  */
1976 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1977 {
1978 	struct amdgpu_bo_list_entry *pd_bo_list;
1979 	struct amdkfd_process_info *process_info = info;
1980 	struct amdgpu_vm *peer_vm;
1981 	struct kgd_mem *mem;
1982 	struct bo_vm_reservation_context ctx;
1983 	struct amdgpu_amdkfd_fence *new_fence;
1984 	int ret = 0, i;
1985 	struct list_head duplicate_save;
1986 	struct amdgpu_sync sync_obj;
1987 
1988 	INIT_LIST_HEAD(&duplicate_save);
1989 	INIT_LIST_HEAD(&ctx.list);
1990 	INIT_LIST_HEAD(&ctx.duplicates);
1991 
1992 	pd_bo_list = kcalloc(process_info->n_vms,
1993 			     sizeof(struct amdgpu_bo_list_entry),
1994 			     GFP_KERNEL);
1995 	if (!pd_bo_list)
1996 		return -ENOMEM;
1997 
1998 	i = 0;
1999 	mutex_lock(&process_info->lock);
2000 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2001 			vm_list_node)
2002 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2003 
2004 	/* Reserve all BOs and page tables/directory. Add all BOs from
2005 	 * kfd_bo_list to ctx.list
2006 	 */
2007 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2008 			    validate_list.head) {
2009 
2010 		list_add_tail(&mem->resv_list.head, &ctx.list);
2011 		mem->resv_list.bo = mem->validate_list.bo;
2012 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2013 	}
2014 
2015 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2016 				     false, &duplicate_save);
2017 	if (ret) {
2018 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2019 		goto ttm_reserve_fail;
2020 	}
2021 
2022 	amdgpu_sync_create(&sync_obj);
2023 
2024 	/* Validate PDs and PTs */
2025 	ret = process_validate_vms(process_info);
2026 	if (ret)
2027 		goto validate_map_fail;
2028 
2029 	ret = process_sync_pds_resv(process_info, &sync_obj);
2030 	if (ret) {
2031 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2032 		goto validate_map_fail;
2033 	}
2034 
2035 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2036 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2037 			    validate_list.head) {
2038 
2039 		struct amdgpu_bo *bo = mem->bo;
2040 		uint32_t domain = mem->domain;
2041 		struct kfd_bo_va_list *bo_va_entry;
2042 
2043 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2044 		if (ret) {
2045 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2046 			goto validate_map_fail;
2047 		}
2048 		ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2049 		if (ret) {
2050 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2051 			goto validate_map_fail;
2052 		}
2053 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2054 				    bo_list) {
2055 			ret = update_gpuvm_pte((struct amdgpu_device *)
2056 					      bo_va_entry->kgd_dev,
2057 					      bo_va_entry,
2058 					      &sync_obj);
2059 			if (ret) {
2060 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2061 				goto validate_map_fail;
2062 			}
2063 		}
2064 	}
2065 
2066 	/* Update page directories */
2067 	ret = process_update_pds(process_info, &sync_obj);
2068 	if (ret) {
2069 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2070 		goto validate_map_fail;
2071 	}
2072 
2073 	/* Wait for validate and PT updates to finish */
2074 	amdgpu_sync_wait(&sync_obj, false);
2075 
2076 	/* Release old eviction fence and create new one, because fence only
2077 	 * goes from unsignaled to signaled, fence cannot be reused.
2078 	 * Use context and mm from the old fence.
2079 	 */
2080 	new_fence = amdgpu_amdkfd_fence_create(
2081 				process_info->eviction_fence->base.context,
2082 				process_info->eviction_fence->mm);
2083 	if (!new_fence) {
2084 		pr_err("Failed to create eviction fence\n");
2085 		ret = -ENOMEM;
2086 		goto validate_map_fail;
2087 	}
2088 	dma_fence_put(&process_info->eviction_fence->base);
2089 	process_info->eviction_fence = new_fence;
2090 	*ef = dma_fence_get(&new_fence->base);
2091 
2092 	/* Attach new eviction fence to all BOs */
2093 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2094 		validate_list.head)
2095 		amdgpu_bo_fence(mem->bo,
2096 			&process_info->eviction_fence->base, true);
2097 
2098 	/* Attach eviction fence to PD / PT BOs */
2099 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2100 			    vm_list_node) {
2101 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2102 
2103 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2104 	}
2105 
2106 validate_map_fail:
2107 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2108 	amdgpu_sync_free(&sync_obj);
2109 ttm_reserve_fail:
2110 	mutex_unlock(&process_info->lock);
2111 	kfree(pd_bo_list);
2112 	return ret;
2113 }
2114 
2115 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2116 {
2117 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2118 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2119 	int ret;
2120 
2121 	if (!info || !gws)
2122 		return -EINVAL;
2123 
2124 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2125 	if (!*mem)
2126 		return -ENOMEM;
2127 
2128 	mutex_init(&(*mem)->lock);
2129 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2130 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2131 	(*mem)->process_info = process_info;
2132 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2133 	amdgpu_sync_create(&(*mem)->sync);
2134 
2135 
2136 	/* Validate gws bo the first time it is added to process */
2137 	mutex_lock(&(*mem)->process_info->lock);
2138 	ret = amdgpu_bo_reserve(gws_bo, false);
2139 	if (unlikely(ret)) {
2140 		pr_err("Reserve gws bo failed %d\n", ret);
2141 		goto bo_reservation_failure;
2142 	}
2143 
2144 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2145 	if (ret) {
2146 		pr_err("GWS BO validate failed %d\n", ret);
2147 		goto bo_validation_failure;
2148 	}
2149 	/* GWS resource is shared b/t amdgpu and amdkfd
2150 	 * Add process eviction fence to bo so they can
2151 	 * evict each other.
2152 	 */
2153 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2154 	if (ret)
2155 		goto reserve_shared_fail;
2156 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2157 	amdgpu_bo_unreserve(gws_bo);
2158 	mutex_unlock(&(*mem)->process_info->lock);
2159 
2160 	return ret;
2161 
2162 reserve_shared_fail:
2163 bo_validation_failure:
2164 	amdgpu_bo_unreserve(gws_bo);
2165 bo_reservation_failure:
2166 	mutex_unlock(&(*mem)->process_info->lock);
2167 	amdgpu_sync_free(&(*mem)->sync);
2168 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2169 	amdgpu_bo_unref(&gws_bo);
2170 	mutex_destroy(&(*mem)->lock);
2171 	kfree(*mem);
2172 	*mem = NULL;
2173 	return ret;
2174 }
2175 
2176 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2177 {
2178 	int ret;
2179 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2180 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2181 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2182 
2183 	/* Remove BO from process's validate list so restore worker won't touch
2184 	 * it anymore
2185 	 */
2186 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2187 
2188 	ret = amdgpu_bo_reserve(gws_bo, false);
2189 	if (unlikely(ret)) {
2190 		pr_err("Reserve gws bo failed %d\n", ret);
2191 		//TODO add BO back to validate_list?
2192 		return ret;
2193 	}
2194 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2195 			process_info->eviction_fence);
2196 	amdgpu_bo_unreserve(gws_bo);
2197 	amdgpu_sync_free(&kgd_mem->sync);
2198 	amdgpu_bo_unref(&gws_bo);
2199 	mutex_destroy(&kgd_mem->lock);
2200 	kfree(mem);
2201 	return 0;
2202 }
2203