1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 
33 /* BO flag to indicate a KFD userptr BO */
34 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
35 
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40 
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43 	uint64_t max_system_mem_limit;
44 	uint64_t max_ttm_mem_limit;
45 	int64_t system_mem_used;
46 	int64_t ttm_mem_used;
47 	spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49 
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
52 	uint32_t        domain;
53 	bool            wait;
54 };
55 
56 static const char * const domain_bit_to_string[] = {
57 		"CPU",
58 		"GTT",
59 		"VRAM",
60 		"GDS",
61 		"GWS",
62 		"OA"
63 };
64 
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
66 
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
68 
69 
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
71 {
72 	return (struct amdgpu_device *)kgd;
73 }
74 
75 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
76 		struct kgd_mem *mem)
77 {
78 	struct kfd_bo_va_list *entry;
79 
80 	list_for_each_entry(entry, &mem->bo_va_list, bo_list)
81 		if (entry->bo_va->base.vm == avm)
82 			return false;
83 
84 	return true;
85 }
86 
87 /* Set memory usage limits. Current, limits are
88  *  System (TTM + userptr) memory - 3/4th System RAM
89  *  TTM memory - 3/8th System RAM
90  */
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
92 {
93 	struct sysinfo si;
94 	uint64_t mem;
95 
96 	si_meminfo(&si);
97 	mem = si.totalram - si.totalhigh;
98 	mem *= si.mem_unit;
99 
100 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101 	kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
102 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104 		(kfd_mem_limit.max_system_mem_limit >> 20),
105 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
106 }
107 
108 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
109 		uint64_t size, u32 domain, bool sg)
110 {
111 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
112 	uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
113 	int ret = 0;
114 
115 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
116 				       sizeof(struct amdgpu_bo));
117 
118 	vram_needed = 0;
119 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
120 		/* TTM GTT memory */
121 		system_mem_needed = acc_size + size;
122 		ttm_mem_needed = acc_size + size;
123 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
124 		/* Userptr */
125 		system_mem_needed = acc_size + size;
126 		ttm_mem_needed = acc_size;
127 	} else {
128 		/* VRAM and SG */
129 		system_mem_needed = acc_size;
130 		ttm_mem_needed = acc_size;
131 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
132 			vram_needed = size;
133 	}
134 
135 	spin_lock(&kfd_mem_limit.mem_limit_lock);
136 
137 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
138 	     kfd_mem_limit.max_system_mem_limit) ||
139 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
140 	     kfd_mem_limit.max_ttm_mem_limit) ||
141 	    (adev->kfd.vram_used + vram_needed >
142 	     adev->gmc.real_vram_size - reserved_for_pt)) {
143 		ret = -ENOMEM;
144 	} else {
145 		kfd_mem_limit.system_mem_used += system_mem_needed;
146 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
147 		adev->kfd.vram_used += vram_needed;
148 	}
149 
150 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
151 	return ret;
152 }
153 
154 static void unreserve_mem_limit(struct amdgpu_device *adev,
155 		uint64_t size, u32 domain, bool sg)
156 {
157 	size_t acc_size;
158 
159 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
160 				       sizeof(struct amdgpu_bo));
161 
162 	spin_lock(&kfd_mem_limit.mem_limit_lock);
163 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
164 		kfd_mem_limit.system_mem_used -= (acc_size + size);
165 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
166 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
167 		kfd_mem_limit.system_mem_used -= (acc_size + size);
168 		kfd_mem_limit.ttm_mem_used -= acc_size;
169 	} else {
170 		kfd_mem_limit.system_mem_used -= acc_size;
171 		kfd_mem_limit.ttm_mem_used -= acc_size;
172 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
173 			adev->kfd.vram_used -= size;
174 			WARN_ONCE(adev->kfd.vram_used < 0,
175 				  "kfd VRAM memory accounting unbalanced");
176 		}
177 	}
178 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
179 		  "kfd system memory accounting unbalanced");
180 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
181 		  "kfd TTM memory accounting unbalanced");
182 
183 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
184 }
185 
186 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
187 {
188 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
189 	u32 domain = bo->preferred_domains;
190 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
191 
192 	if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
193 		domain = AMDGPU_GEM_DOMAIN_CPU;
194 		sg = false;
195 	}
196 
197 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
198 }
199 
200 
201 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
202  *  reservation object.
203  *
204  * @bo: [IN] Remove eviction fence(s) from this BO
205  * @ef: [IN] This eviction fence is removed if it
206  *  is present in the shared list.
207  *
208  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
209  */
210 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
211 					struct amdgpu_amdkfd_fence *ef)
212 {
213 	struct dma_resv *resv = bo->tbo.base.resv;
214 	struct dma_resv_list *old, *new;
215 	unsigned int i, j, k;
216 
217 	if (!ef)
218 		return -EINVAL;
219 
220 	old = dma_resv_get_list(resv);
221 	if (!old)
222 		return 0;
223 
224 	new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
225 		      GFP_KERNEL);
226 	if (!new)
227 		return -ENOMEM;
228 
229 	/* Go through all the shared fences in the resevation object and sort
230 	 * the interesting ones to the end of the list.
231 	 */
232 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
233 		struct dma_fence *f;
234 
235 		f = rcu_dereference_protected(old->shared[i],
236 					      dma_resv_held(resv));
237 
238 		if (f->context == ef->base.context)
239 			RCU_INIT_POINTER(new->shared[--j], f);
240 		else
241 			RCU_INIT_POINTER(new->shared[k++], f);
242 	}
243 	new->shared_max = old->shared_max;
244 	new->shared_count = k;
245 
246 	/* Install the new fence list, seqcount provides the barriers */
247 	preempt_disable();
248 	write_seqcount_begin(&resv->seq);
249 	RCU_INIT_POINTER(resv->fence, new);
250 	write_seqcount_end(&resv->seq);
251 	preempt_enable();
252 
253 	/* Drop the references to the removed fences or move them to ef_list */
254 	for (i = j, k = 0; i < old->shared_count; ++i) {
255 		struct dma_fence *f;
256 
257 		f = rcu_dereference_protected(new->shared[i],
258 					      dma_resv_held(resv));
259 		dma_fence_put(f);
260 	}
261 	kfree_rcu(old, rcu);
262 
263 	return 0;
264 }
265 
266 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
267 				     bool wait)
268 {
269 	struct ttm_operation_ctx ctx = { false, false };
270 	int ret;
271 
272 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
273 		 "Called with userptr BO"))
274 		return -EINVAL;
275 
276 	amdgpu_bo_placement_from_domain(bo, domain);
277 
278 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
279 	if (ret)
280 		goto validate_fail;
281 	if (wait)
282 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
283 
284 validate_fail:
285 	return ret;
286 }
287 
288 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
289 {
290 	struct amdgpu_vm_parser *p = param;
291 
292 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
293 }
294 
295 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
296  *
297  * Page directories are not updated here because huge page handling
298  * during page table updates can invalidate page directory entries
299  * again. Page directories are only updated after updating page
300  * tables.
301  */
302 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
303 {
304 	struct amdgpu_bo *pd = vm->root.base.bo;
305 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
306 	struct amdgpu_vm_parser param;
307 	int ret;
308 
309 	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
310 	param.wait = false;
311 
312 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
313 					&param);
314 	if (ret) {
315 		pr_err("amdgpu: failed to validate PT BOs\n");
316 		return ret;
317 	}
318 
319 	ret = amdgpu_amdkfd_validate(&param, pd);
320 	if (ret) {
321 		pr_err("amdgpu: failed to validate PD\n");
322 		return ret;
323 	}
324 
325 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
326 
327 	if (vm->use_cpu_for_update) {
328 		ret = amdgpu_bo_kmap(pd, NULL);
329 		if (ret) {
330 			pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
331 			return ret;
332 		}
333 	}
334 
335 	return 0;
336 }
337 
338 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
339 {
340 	struct amdgpu_bo *pd = vm->root.base.bo;
341 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
342 	int ret;
343 
344 	ret = amdgpu_vm_update_pdes(adev, vm, false);
345 	if (ret)
346 		return ret;
347 
348 	return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
349 }
350 
351 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
352 {
353 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
354 	bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
355 	uint32_t mapping_flags;
356 
357 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
358 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
359 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
360 	if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
361 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
362 
363 	switch (adev->asic_type) {
364 	case CHIP_ARCTURUS:
365 		if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
366 			if (bo_adev == adev)
367 				mapping_flags |= coherent ?
368 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
369 			else
370 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
371 		} else {
372 			mapping_flags |= coherent ?
373 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
374 		}
375 		break;
376 	default:
377 		mapping_flags |= coherent ?
378 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
379 	}
380 
381 	return amdgpu_gem_va_map_flags(adev, mapping_flags);
382 }
383 
384 /* add_bo_to_vm - Add a BO to a VM
385  *
386  * Everything that needs to bo done only once when a BO is first added
387  * to a VM. It can later be mapped and unmapped many times without
388  * repeating these steps.
389  *
390  * 1. Allocate and initialize BO VA entry data structure
391  * 2. Add BO to the VM
392  * 3. Determine ASIC-specific PTE flags
393  * 4. Alloc page tables and directories if needed
394  * 4a.  Validate new page tables and directories
395  */
396 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
397 		struct amdgpu_vm *vm, bool is_aql,
398 		struct kfd_bo_va_list **p_bo_va_entry)
399 {
400 	int ret;
401 	struct kfd_bo_va_list *bo_va_entry;
402 	struct amdgpu_bo *bo = mem->bo;
403 	uint64_t va = mem->va;
404 	struct list_head *list_bo_va = &mem->bo_va_list;
405 	unsigned long bo_size = bo->tbo.mem.size;
406 
407 	if (!va) {
408 		pr_err("Invalid VA when adding BO to VM\n");
409 		return -EINVAL;
410 	}
411 
412 	if (is_aql)
413 		va += bo_size;
414 
415 	bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
416 	if (!bo_va_entry)
417 		return -ENOMEM;
418 
419 	pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
420 			va + bo_size, vm);
421 
422 	/* Add BO to VM internal data structures*/
423 	bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
424 	if (!bo_va_entry->bo_va) {
425 		ret = -EINVAL;
426 		pr_err("Failed to add BO object to VM. ret == %d\n",
427 				ret);
428 		goto err_vmadd;
429 	}
430 
431 	bo_va_entry->va = va;
432 	bo_va_entry->pte_flags = get_pte_flags(adev, mem);
433 	bo_va_entry->kgd_dev = (void *)adev;
434 	list_add(&bo_va_entry->bo_list, list_bo_va);
435 
436 	if (p_bo_va_entry)
437 		*p_bo_va_entry = bo_va_entry;
438 
439 	/* Allocate validate page tables if needed */
440 	ret = vm_validate_pt_pd_bos(vm);
441 	if (ret) {
442 		pr_err("validate_pt_pd_bos() failed\n");
443 		goto err_alloc_pts;
444 	}
445 
446 	return 0;
447 
448 err_alloc_pts:
449 	amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
450 	list_del(&bo_va_entry->bo_list);
451 err_vmadd:
452 	kfree(bo_va_entry);
453 	return ret;
454 }
455 
456 static void remove_bo_from_vm(struct amdgpu_device *adev,
457 		struct kfd_bo_va_list *entry, unsigned long size)
458 {
459 	pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
460 			entry->va,
461 			entry->va + size, entry);
462 	amdgpu_vm_bo_rmv(adev, entry->bo_va);
463 	list_del(&entry->bo_list);
464 	kfree(entry);
465 }
466 
467 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
468 				struct amdkfd_process_info *process_info,
469 				bool userptr)
470 {
471 	struct ttm_validate_buffer *entry = &mem->validate_list;
472 	struct amdgpu_bo *bo = mem->bo;
473 
474 	INIT_LIST_HEAD(&entry->head);
475 	entry->num_shared = 1;
476 	entry->bo = &bo->tbo;
477 	mutex_lock(&process_info->lock);
478 	if (userptr)
479 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
480 	else
481 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
482 	mutex_unlock(&process_info->lock);
483 }
484 
485 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
486 		struct amdkfd_process_info *process_info)
487 {
488 	struct ttm_validate_buffer *bo_list_entry;
489 
490 	bo_list_entry = &mem->validate_list;
491 	mutex_lock(&process_info->lock);
492 	list_del(&bo_list_entry->head);
493 	mutex_unlock(&process_info->lock);
494 }
495 
496 /* Initializes user pages. It registers the MMU notifier and validates
497  * the userptr BO in the GTT domain.
498  *
499  * The BO must already be on the userptr_valid_list. Otherwise an
500  * eviction and restore may happen that leaves the new BO unmapped
501  * with the user mode queues running.
502  *
503  * Takes the process_info->lock to protect against concurrent restore
504  * workers.
505  *
506  * Returns 0 for success, negative errno for errors.
507  */
508 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
509 {
510 	struct amdkfd_process_info *process_info = mem->process_info;
511 	struct amdgpu_bo *bo = mem->bo;
512 	struct ttm_operation_ctx ctx = { true, false };
513 	int ret = 0;
514 
515 	mutex_lock(&process_info->lock);
516 
517 	ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
518 	if (ret) {
519 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
520 		goto out;
521 	}
522 
523 	ret = amdgpu_mn_register(bo, user_addr);
524 	if (ret) {
525 		pr_err("%s: Failed to register MMU notifier: %d\n",
526 		       __func__, ret);
527 		goto out;
528 	}
529 
530 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
531 	if (ret) {
532 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
533 		goto unregister_out;
534 	}
535 
536 	ret = amdgpu_bo_reserve(bo, true);
537 	if (ret) {
538 		pr_err("%s: Failed to reserve BO\n", __func__);
539 		goto release_out;
540 	}
541 	amdgpu_bo_placement_from_domain(bo, mem->domain);
542 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
543 	if (ret)
544 		pr_err("%s: failed to validate BO\n", __func__);
545 	amdgpu_bo_unreserve(bo);
546 
547 release_out:
548 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
549 unregister_out:
550 	if (ret)
551 		amdgpu_mn_unregister(bo);
552 out:
553 	mutex_unlock(&process_info->lock);
554 	return ret;
555 }
556 
557 /* Reserving a BO and its page table BOs must happen atomically to
558  * avoid deadlocks. Some operations update multiple VMs at once. Track
559  * all the reservation info in a context structure. Optionally a sync
560  * object can track VM updates.
561  */
562 struct bo_vm_reservation_context {
563 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
564 	unsigned int n_vms;		    /* Number of VMs reserved	    */
565 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
566 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
567 	struct list_head list, duplicates;  /* BO lists			    */
568 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
569 	bool reserved;			    /* Whether BOs are reserved	    */
570 };
571 
572 enum bo_vm_match {
573 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
574 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
575 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
576 };
577 
578 /**
579  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
580  * @mem: KFD BO structure.
581  * @vm: the VM to reserve.
582  * @ctx: the struct that will be used in unreserve_bo_and_vms().
583  */
584 static int reserve_bo_and_vm(struct kgd_mem *mem,
585 			      struct amdgpu_vm *vm,
586 			      struct bo_vm_reservation_context *ctx)
587 {
588 	struct amdgpu_bo *bo = mem->bo;
589 	int ret;
590 
591 	WARN_ON(!vm);
592 
593 	ctx->reserved = false;
594 	ctx->n_vms = 1;
595 	ctx->sync = &mem->sync;
596 
597 	INIT_LIST_HEAD(&ctx->list);
598 	INIT_LIST_HEAD(&ctx->duplicates);
599 
600 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
601 	if (!ctx->vm_pd)
602 		return -ENOMEM;
603 
604 	ctx->kfd_bo.priority = 0;
605 	ctx->kfd_bo.tv.bo = &bo->tbo;
606 	ctx->kfd_bo.tv.num_shared = 1;
607 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
608 
609 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
610 
611 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
612 				     false, &ctx->duplicates);
613 	if (!ret)
614 		ctx->reserved = true;
615 	else {
616 		pr_err("Failed to reserve buffers in ttm\n");
617 		kfree(ctx->vm_pd);
618 		ctx->vm_pd = NULL;
619 	}
620 
621 	return ret;
622 }
623 
624 /**
625  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
626  * @mem: KFD BO structure.
627  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
628  * is used. Otherwise, a single VM associated with the BO.
629  * @map_type: the mapping status that will be used to filter the VMs.
630  * @ctx: the struct that will be used in unreserve_bo_and_vms().
631  *
632  * Returns 0 for success, negative for failure.
633  */
634 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
635 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
636 				struct bo_vm_reservation_context *ctx)
637 {
638 	struct amdgpu_bo *bo = mem->bo;
639 	struct kfd_bo_va_list *entry;
640 	unsigned int i;
641 	int ret;
642 
643 	ctx->reserved = false;
644 	ctx->n_vms = 0;
645 	ctx->vm_pd = NULL;
646 	ctx->sync = &mem->sync;
647 
648 	INIT_LIST_HEAD(&ctx->list);
649 	INIT_LIST_HEAD(&ctx->duplicates);
650 
651 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
652 		if ((vm && vm != entry->bo_va->base.vm) ||
653 			(entry->is_mapped != map_type
654 			&& map_type != BO_VM_ALL))
655 			continue;
656 
657 		ctx->n_vms++;
658 	}
659 
660 	if (ctx->n_vms != 0) {
661 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
662 				     GFP_KERNEL);
663 		if (!ctx->vm_pd)
664 			return -ENOMEM;
665 	}
666 
667 	ctx->kfd_bo.priority = 0;
668 	ctx->kfd_bo.tv.bo = &bo->tbo;
669 	ctx->kfd_bo.tv.num_shared = 1;
670 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
671 
672 	i = 0;
673 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
674 		if ((vm && vm != entry->bo_va->base.vm) ||
675 			(entry->is_mapped != map_type
676 			&& map_type != BO_VM_ALL))
677 			continue;
678 
679 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
680 				&ctx->vm_pd[i]);
681 		i++;
682 	}
683 
684 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
685 				     false, &ctx->duplicates);
686 	if (!ret)
687 		ctx->reserved = true;
688 	else
689 		pr_err("Failed to reserve buffers in ttm.\n");
690 
691 	if (ret) {
692 		kfree(ctx->vm_pd);
693 		ctx->vm_pd = NULL;
694 	}
695 
696 	return ret;
697 }
698 
699 /**
700  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
701  * @ctx: Reservation context to unreserve
702  * @wait: Optionally wait for a sync object representing pending VM updates
703  * @intr: Whether the wait is interruptible
704  *
705  * Also frees any resources allocated in
706  * reserve_bo_and_(cond_)vm(s). Returns the status from
707  * amdgpu_sync_wait.
708  */
709 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
710 				 bool wait, bool intr)
711 {
712 	int ret = 0;
713 
714 	if (wait)
715 		ret = amdgpu_sync_wait(ctx->sync, intr);
716 
717 	if (ctx->reserved)
718 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
719 	kfree(ctx->vm_pd);
720 
721 	ctx->sync = NULL;
722 
723 	ctx->reserved = false;
724 	ctx->vm_pd = NULL;
725 
726 	return ret;
727 }
728 
729 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
730 				struct kfd_bo_va_list *entry,
731 				struct amdgpu_sync *sync)
732 {
733 	struct amdgpu_bo_va *bo_va = entry->bo_va;
734 	struct amdgpu_vm *vm = bo_va->base.vm;
735 
736 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
737 
738 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
739 
740 	amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
741 
742 	return 0;
743 }
744 
745 static int update_gpuvm_pte(struct amdgpu_device *adev,
746 		struct kfd_bo_va_list *entry,
747 		struct amdgpu_sync *sync)
748 {
749 	int ret;
750 	struct amdgpu_bo_va *bo_va = entry->bo_va;
751 
752 	/* Update the page tables  */
753 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
754 	if (ret) {
755 		pr_err("amdgpu_vm_bo_update failed\n");
756 		return ret;
757 	}
758 
759 	return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
760 }
761 
762 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
763 		struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
764 		bool no_update_pte)
765 {
766 	int ret;
767 
768 	/* Set virtual address for the allocation */
769 	ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
770 			       amdgpu_bo_size(entry->bo_va->base.bo),
771 			       entry->pte_flags);
772 	if (ret) {
773 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
774 				entry->va, ret);
775 		return ret;
776 	}
777 
778 	if (no_update_pte)
779 		return 0;
780 
781 	ret = update_gpuvm_pte(adev, entry, sync);
782 	if (ret) {
783 		pr_err("update_gpuvm_pte() failed\n");
784 		goto update_gpuvm_pte_failed;
785 	}
786 
787 	return 0;
788 
789 update_gpuvm_pte_failed:
790 	unmap_bo_from_gpuvm(adev, entry, sync);
791 	return ret;
792 }
793 
794 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
795 {
796 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
797 
798 	if (!sg)
799 		return NULL;
800 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
801 		kfree(sg);
802 		return NULL;
803 	}
804 	sg->sgl->dma_address = addr;
805 	sg->sgl->length = size;
806 #ifdef CONFIG_NEED_SG_DMA_LENGTH
807 	sg->sgl->dma_length = size;
808 #endif
809 	return sg;
810 }
811 
812 static int process_validate_vms(struct amdkfd_process_info *process_info)
813 {
814 	struct amdgpu_vm *peer_vm;
815 	int ret;
816 
817 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
818 			    vm_list_node) {
819 		ret = vm_validate_pt_pd_bos(peer_vm);
820 		if (ret)
821 			return ret;
822 	}
823 
824 	return 0;
825 }
826 
827 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
828 				 struct amdgpu_sync *sync)
829 {
830 	struct amdgpu_vm *peer_vm;
831 	int ret;
832 
833 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
834 			    vm_list_node) {
835 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
836 
837 		ret = amdgpu_sync_resv(NULL,
838 					sync, pd->tbo.base.resv,
839 					AMDGPU_FENCE_OWNER_KFD, false);
840 		if (ret)
841 			return ret;
842 	}
843 
844 	return 0;
845 }
846 
847 static int process_update_pds(struct amdkfd_process_info *process_info,
848 			      struct amdgpu_sync *sync)
849 {
850 	struct amdgpu_vm *peer_vm;
851 	int ret;
852 
853 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
854 			    vm_list_node) {
855 		ret = vm_update_pds(peer_vm, sync);
856 		if (ret)
857 			return ret;
858 	}
859 
860 	return 0;
861 }
862 
863 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
864 		       struct dma_fence **ef)
865 {
866 	struct amdkfd_process_info *info = NULL;
867 	int ret;
868 
869 	if (!*process_info) {
870 		info = kzalloc(sizeof(*info), GFP_KERNEL);
871 		if (!info)
872 			return -ENOMEM;
873 
874 		mutex_init(&info->lock);
875 		INIT_LIST_HEAD(&info->vm_list_head);
876 		INIT_LIST_HEAD(&info->kfd_bo_list);
877 		INIT_LIST_HEAD(&info->userptr_valid_list);
878 		INIT_LIST_HEAD(&info->userptr_inval_list);
879 
880 		info->eviction_fence =
881 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
882 						   current->mm);
883 		if (!info->eviction_fence) {
884 			pr_err("Failed to create eviction fence\n");
885 			ret = -ENOMEM;
886 			goto create_evict_fence_fail;
887 		}
888 
889 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
890 		atomic_set(&info->evicted_bos, 0);
891 		INIT_DELAYED_WORK(&info->restore_userptr_work,
892 				  amdgpu_amdkfd_restore_userptr_worker);
893 
894 		*process_info = info;
895 		*ef = dma_fence_get(&info->eviction_fence->base);
896 	}
897 
898 	vm->process_info = *process_info;
899 
900 	/* Validate page directory and attach eviction fence */
901 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
902 	if (ret)
903 		goto reserve_pd_fail;
904 	ret = vm_validate_pt_pd_bos(vm);
905 	if (ret) {
906 		pr_err("validate_pt_pd_bos() failed\n");
907 		goto validate_pd_fail;
908 	}
909 	ret = amdgpu_bo_sync_wait(vm->root.base.bo,
910 				  AMDGPU_FENCE_OWNER_KFD, false);
911 	if (ret)
912 		goto wait_pd_fail;
913 	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
914 	if (ret)
915 		goto reserve_shared_fail;
916 	amdgpu_bo_fence(vm->root.base.bo,
917 			&vm->process_info->eviction_fence->base, true);
918 	amdgpu_bo_unreserve(vm->root.base.bo);
919 
920 	/* Update process info */
921 	mutex_lock(&vm->process_info->lock);
922 	list_add_tail(&vm->vm_list_node,
923 			&(vm->process_info->vm_list_head));
924 	vm->process_info->n_vms++;
925 	mutex_unlock(&vm->process_info->lock);
926 
927 	return 0;
928 
929 reserve_shared_fail:
930 wait_pd_fail:
931 validate_pd_fail:
932 	amdgpu_bo_unreserve(vm->root.base.bo);
933 reserve_pd_fail:
934 	vm->process_info = NULL;
935 	if (info) {
936 		/* Two fence references: one in info and one in *ef */
937 		dma_fence_put(&info->eviction_fence->base);
938 		dma_fence_put(*ef);
939 		*ef = NULL;
940 		*process_info = NULL;
941 		put_pid(info->pid);
942 create_evict_fence_fail:
943 		mutex_destroy(&info->lock);
944 		kfree(info);
945 	}
946 	return ret;
947 }
948 
949 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
950 					  void **vm, void **process_info,
951 					  struct dma_fence **ef)
952 {
953 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
954 	struct amdgpu_vm *new_vm;
955 	int ret;
956 
957 	new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
958 	if (!new_vm)
959 		return -ENOMEM;
960 
961 	/* Initialize AMDGPU part of the VM */
962 	ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
963 	if (ret) {
964 		pr_err("Failed init vm ret %d\n", ret);
965 		goto amdgpu_vm_init_fail;
966 	}
967 
968 	/* Initialize KFD part of the VM and process info */
969 	ret = init_kfd_vm(new_vm, process_info, ef);
970 	if (ret)
971 		goto init_kfd_vm_fail;
972 
973 	*vm = (void *) new_vm;
974 
975 	return 0;
976 
977 init_kfd_vm_fail:
978 	amdgpu_vm_fini(adev, new_vm);
979 amdgpu_vm_init_fail:
980 	kfree(new_vm);
981 	return ret;
982 }
983 
984 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
985 					   struct file *filp, unsigned int pasid,
986 					   void **vm, void **process_info,
987 					   struct dma_fence **ef)
988 {
989 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
990 	struct drm_file *drm_priv = filp->private_data;
991 	struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
992 	struct amdgpu_vm *avm = &drv_priv->vm;
993 	int ret;
994 
995 	/* Already a compute VM? */
996 	if (avm->process_info)
997 		return -EINVAL;
998 
999 	/* Convert VM into a compute VM */
1000 	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1001 	if (ret)
1002 		return ret;
1003 
1004 	/* Initialize KFD part of the VM and process info */
1005 	ret = init_kfd_vm(avm, process_info, ef);
1006 	if (ret)
1007 		return ret;
1008 
1009 	*vm = (void *)avm;
1010 
1011 	return 0;
1012 }
1013 
1014 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1015 				    struct amdgpu_vm *vm)
1016 {
1017 	struct amdkfd_process_info *process_info = vm->process_info;
1018 	struct amdgpu_bo *pd = vm->root.base.bo;
1019 
1020 	if (!process_info)
1021 		return;
1022 
1023 	/* Release eviction fence from PD */
1024 	amdgpu_bo_reserve(pd, false);
1025 	amdgpu_bo_fence(pd, NULL, false);
1026 	amdgpu_bo_unreserve(pd);
1027 
1028 	/* Update process info */
1029 	mutex_lock(&process_info->lock);
1030 	process_info->n_vms--;
1031 	list_del(&vm->vm_list_node);
1032 	mutex_unlock(&process_info->lock);
1033 
1034 	/* Release per-process resources when last compute VM is destroyed */
1035 	if (!process_info->n_vms) {
1036 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1037 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1038 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1039 
1040 		dma_fence_put(&process_info->eviction_fence->base);
1041 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1042 		put_pid(process_info->pid);
1043 		mutex_destroy(&process_info->lock);
1044 		kfree(process_info);
1045 	}
1046 }
1047 
1048 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1049 {
1050 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1051 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1052 
1053 	if (WARN_ON(!kgd || !vm))
1054 		return;
1055 
1056 	pr_debug("Destroying process vm %p\n", vm);
1057 
1058 	/* Release the VM context */
1059 	amdgpu_vm_fini(adev, avm);
1060 	kfree(vm);
1061 }
1062 
1063 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1064 {
1065 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1066         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1067 
1068 	if (WARN_ON(!kgd || !vm))
1069                 return;
1070 
1071         pr_debug("Releasing process vm %p\n", vm);
1072 
1073         /* The original pasid of amdgpu vm has already been
1074          * released during making a amdgpu vm to a compute vm
1075          * The current pasid is managed by kfd and will be
1076          * released on kfd process destroy. Set amdgpu pasid
1077          * to 0 to avoid duplicate release.
1078          */
1079 	amdgpu_vm_release_compute(adev, avm);
1080 }
1081 
1082 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1083 {
1084 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1085 	struct amdgpu_bo *pd = avm->root.base.bo;
1086 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1087 
1088 	if (adev->asic_type < CHIP_VEGA10)
1089 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1090 	return avm->pd_phys_addr;
1091 }
1092 
1093 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1094 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1095 		void *vm, struct kgd_mem **mem,
1096 		uint64_t *offset, uint32_t flags)
1097 {
1098 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1099 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1100 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1101 	struct sg_table *sg = NULL;
1102 	uint64_t user_addr = 0;
1103 	struct amdgpu_bo *bo;
1104 	struct amdgpu_bo_param bp;
1105 	u32 domain, alloc_domain;
1106 	u64 alloc_flags;
1107 	int ret;
1108 
1109 	/*
1110 	 * Check on which domain to allocate BO
1111 	 */
1112 	if (flags & ALLOC_MEM_FLAGS_VRAM) {
1113 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1114 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1115 		alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1116 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1117 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1118 	} else if (flags & ALLOC_MEM_FLAGS_GTT) {
1119 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1120 		alloc_flags = 0;
1121 	} else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1122 		domain = AMDGPU_GEM_DOMAIN_GTT;
1123 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1124 		alloc_flags = 0;
1125 		if (!offset || !*offset)
1126 			return -EINVAL;
1127 		user_addr = untagged_addr(*offset);
1128 	} else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1129 			ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1130 		domain = AMDGPU_GEM_DOMAIN_GTT;
1131 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1132 		bo_type = ttm_bo_type_sg;
1133 		alloc_flags = 0;
1134 		if (size > UINT_MAX)
1135 			return -EINVAL;
1136 		sg = create_doorbell_sg(*offset, size);
1137 		if (!sg)
1138 			return -ENOMEM;
1139 	} else {
1140 		return -EINVAL;
1141 	}
1142 
1143 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1144 	if (!*mem) {
1145 		ret = -ENOMEM;
1146 		goto err;
1147 	}
1148 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1149 	mutex_init(&(*mem)->lock);
1150 	(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1151 
1152 	/* Workaround for AQL queue wraparound bug. Map the same
1153 	 * memory twice. That means we only actually allocate half
1154 	 * the memory.
1155 	 */
1156 	if ((*mem)->aql_queue)
1157 		size = size >> 1;
1158 
1159 	(*mem)->alloc_flags = flags;
1160 
1161 	amdgpu_sync_create(&(*mem)->sync);
1162 
1163 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1164 	if (ret) {
1165 		pr_debug("Insufficient system memory\n");
1166 		goto err_reserve_limit;
1167 	}
1168 
1169 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1170 			va, size, domain_string(alloc_domain));
1171 
1172 	memset(&bp, 0, sizeof(bp));
1173 	bp.size = size;
1174 	bp.byte_align = 1;
1175 	bp.domain = alloc_domain;
1176 	bp.flags = alloc_flags;
1177 	bp.type = bo_type;
1178 	bp.resv = NULL;
1179 	ret = amdgpu_bo_create(adev, &bp, &bo);
1180 	if (ret) {
1181 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1182 				domain_string(alloc_domain), ret);
1183 		goto err_bo_create;
1184 	}
1185 	if (bo_type == ttm_bo_type_sg) {
1186 		bo->tbo.sg = sg;
1187 		bo->tbo.ttm->sg = sg;
1188 	}
1189 	bo->kfd_bo = *mem;
1190 	(*mem)->bo = bo;
1191 	if (user_addr)
1192 		bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1193 
1194 	(*mem)->va = va;
1195 	(*mem)->domain = domain;
1196 	(*mem)->mapped_to_gpu_memory = 0;
1197 	(*mem)->process_info = avm->process_info;
1198 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1199 
1200 	if (user_addr) {
1201 		ret = init_user_pages(*mem, user_addr);
1202 		if (ret)
1203 			goto allocate_init_user_pages_failed;
1204 	}
1205 
1206 	if (offset)
1207 		*offset = amdgpu_bo_mmap_offset(bo);
1208 
1209 	return 0;
1210 
1211 allocate_init_user_pages_failed:
1212 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1213 	amdgpu_bo_unref(&bo);
1214 	/* Don't unreserve system mem limit twice */
1215 	goto err_reserve_limit;
1216 err_bo_create:
1217 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1218 err_reserve_limit:
1219 	mutex_destroy(&(*mem)->lock);
1220 	kfree(*mem);
1221 err:
1222 	if (sg) {
1223 		sg_free_table(sg);
1224 		kfree(sg);
1225 	}
1226 	return ret;
1227 }
1228 
1229 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1230 		struct kgd_dev *kgd, struct kgd_mem *mem)
1231 {
1232 	struct amdkfd_process_info *process_info = mem->process_info;
1233 	unsigned long bo_size = mem->bo->tbo.mem.size;
1234 	struct kfd_bo_va_list *entry, *tmp;
1235 	struct bo_vm_reservation_context ctx;
1236 	struct ttm_validate_buffer *bo_list_entry;
1237 	int ret;
1238 
1239 	mutex_lock(&mem->lock);
1240 
1241 	if (mem->mapped_to_gpu_memory > 0) {
1242 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1243 				mem->va, bo_size);
1244 		mutex_unlock(&mem->lock);
1245 		return -EBUSY;
1246 	}
1247 
1248 	mutex_unlock(&mem->lock);
1249 	/* lock is not needed after this, since mem is unused and will
1250 	 * be freed anyway
1251 	 */
1252 
1253 	/* No more MMU notifiers */
1254 	amdgpu_mn_unregister(mem->bo);
1255 
1256 	/* Make sure restore workers don't access the BO any more */
1257 	bo_list_entry = &mem->validate_list;
1258 	mutex_lock(&process_info->lock);
1259 	list_del(&bo_list_entry->head);
1260 	mutex_unlock(&process_info->lock);
1261 
1262 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1263 	if (unlikely(ret))
1264 		return ret;
1265 
1266 	/* The eviction fence should be removed by the last unmap.
1267 	 * TODO: Log an error condition if the bo still has the eviction fence
1268 	 * attached
1269 	 */
1270 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1271 					process_info->eviction_fence);
1272 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1273 		mem->va + bo_size * (1 + mem->aql_queue));
1274 
1275 	/* Remove from VM internal data structures */
1276 	list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1277 		remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1278 				entry, bo_size);
1279 
1280 	ret = unreserve_bo_and_vms(&ctx, false, false);
1281 
1282 	/* Free the sync object */
1283 	amdgpu_sync_free(&mem->sync);
1284 
1285 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1286 	 * remap BO. We need to free it.
1287 	 */
1288 	if (mem->bo->tbo.sg) {
1289 		sg_free_table(mem->bo->tbo.sg);
1290 		kfree(mem->bo->tbo.sg);
1291 	}
1292 
1293 	/* Free the BO*/
1294 	amdgpu_bo_unref(&mem->bo);
1295 	mutex_destroy(&mem->lock);
1296 	kfree(mem);
1297 
1298 	return ret;
1299 }
1300 
1301 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1302 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1303 {
1304 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1305 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1306 	int ret;
1307 	struct amdgpu_bo *bo;
1308 	uint32_t domain;
1309 	struct kfd_bo_va_list *entry;
1310 	struct bo_vm_reservation_context ctx;
1311 	struct kfd_bo_va_list *bo_va_entry = NULL;
1312 	struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1313 	unsigned long bo_size;
1314 	bool is_invalid_userptr = false;
1315 
1316 	bo = mem->bo;
1317 	if (!bo) {
1318 		pr_err("Invalid BO when mapping memory to GPU\n");
1319 		return -EINVAL;
1320 	}
1321 
1322 	/* Make sure restore is not running concurrently. Since we
1323 	 * don't map invalid userptr BOs, we rely on the next restore
1324 	 * worker to do the mapping
1325 	 */
1326 	mutex_lock(&mem->process_info->lock);
1327 
1328 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1329 	 * sure that the MMU notifier is no longer running
1330 	 * concurrently and the queues are actually stopped
1331 	 */
1332 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1333 		down_write(&current->mm->mmap_sem);
1334 		is_invalid_userptr = atomic_read(&mem->invalid);
1335 		up_write(&current->mm->mmap_sem);
1336 	}
1337 
1338 	mutex_lock(&mem->lock);
1339 
1340 	domain = mem->domain;
1341 	bo_size = bo->tbo.mem.size;
1342 
1343 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1344 			mem->va,
1345 			mem->va + bo_size * (1 + mem->aql_queue),
1346 			vm, domain_string(domain));
1347 
1348 	ret = reserve_bo_and_vm(mem, vm, &ctx);
1349 	if (unlikely(ret))
1350 		goto out;
1351 
1352 	/* Userptr can be marked as "not invalid", but not actually be
1353 	 * validated yet (still in the system domain). In that case
1354 	 * the queues are still stopped and we can leave mapping for
1355 	 * the next restore worker
1356 	 */
1357 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1358 	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1359 		is_invalid_userptr = true;
1360 
1361 	if (check_if_add_bo_to_vm(avm, mem)) {
1362 		ret = add_bo_to_vm(adev, mem, avm, false,
1363 				&bo_va_entry);
1364 		if (ret)
1365 			goto add_bo_to_vm_failed;
1366 		if (mem->aql_queue) {
1367 			ret = add_bo_to_vm(adev, mem, avm,
1368 					true, &bo_va_entry_aql);
1369 			if (ret)
1370 				goto add_bo_to_vm_failed_aql;
1371 		}
1372 	} else {
1373 		ret = vm_validate_pt_pd_bos(avm);
1374 		if (unlikely(ret))
1375 			goto add_bo_to_vm_failed;
1376 	}
1377 
1378 	if (mem->mapped_to_gpu_memory == 0 &&
1379 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1380 		/* Validate BO only once. The eviction fence gets added to BO
1381 		 * the first time it is mapped. Validate will wait for all
1382 		 * background evictions to complete.
1383 		 */
1384 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1385 		if (ret) {
1386 			pr_debug("Validate failed\n");
1387 			goto map_bo_to_gpuvm_failed;
1388 		}
1389 	}
1390 
1391 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1392 		if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1393 			pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1394 					entry->va, entry->va + bo_size,
1395 					entry);
1396 
1397 			ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1398 					      is_invalid_userptr);
1399 			if (ret) {
1400 				pr_err("Failed to map bo to gpuvm\n");
1401 				goto map_bo_to_gpuvm_failed;
1402 			}
1403 
1404 			ret = vm_update_pds(vm, ctx.sync);
1405 			if (ret) {
1406 				pr_err("Failed to update page directories\n");
1407 				goto map_bo_to_gpuvm_failed;
1408 			}
1409 
1410 			entry->is_mapped = true;
1411 			mem->mapped_to_gpu_memory++;
1412 			pr_debug("\t INC mapping count %d\n",
1413 					mem->mapped_to_gpu_memory);
1414 		}
1415 	}
1416 
1417 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1418 		amdgpu_bo_fence(bo,
1419 				&avm->process_info->eviction_fence->base,
1420 				true);
1421 	ret = unreserve_bo_and_vms(&ctx, false, false);
1422 
1423 	goto out;
1424 
1425 map_bo_to_gpuvm_failed:
1426 	if (bo_va_entry_aql)
1427 		remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1428 add_bo_to_vm_failed_aql:
1429 	if (bo_va_entry)
1430 		remove_bo_from_vm(adev, bo_va_entry, bo_size);
1431 add_bo_to_vm_failed:
1432 	unreserve_bo_and_vms(&ctx, false, false);
1433 out:
1434 	mutex_unlock(&mem->process_info->lock);
1435 	mutex_unlock(&mem->lock);
1436 	return ret;
1437 }
1438 
1439 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1440 		struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1441 {
1442 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1443 	struct amdkfd_process_info *process_info =
1444 		((struct amdgpu_vm *)vm)->process_info;
1445 	unsigned long bo_size = mem->bo->tbo.mem.size;
1446 	struct kfd_bo_va_list *entry;
1447 	struct bo_vm_reservation_context ctx;
1448 	int ret;
1449 
1450 	mutex_lock(&mem->lock);
1451 
1452 	ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1453 	if (unlikely(ret))
1454 		goto out;
1455 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1456 	if (ctx.n_vms == 0) {
1457 		ret = -EINVAL;
1458 		goto unreserve_out;
1459 	}
1460 
1461 	ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1462 	if (unlikely(ret))
1463 		goto unreserve_out;
1464 
1465 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1466 		mem->va,
1467 		mem->va + bo_size * (1 + mem->aql_queue),
1468 		vm);
1469 
1470 	list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1471 		if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1472 			pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1473 					entry->va,
1474 					entry->va + bo_size,
1475 					entry);
1476 
1477 			ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1478 			if (ret == 0) {
1479 				entry->is_mapped = false;
1480 			} else {
1481 				pr_err("failed to unmap VA 0x%llx\n",
1482 						mem->va);
1483 				goto unreserve_out;
1484 			}
1485 
1486 			mem->mapped_to_gpu_memory--;
1487 			pr_debug("\t DEC mapping count %d\n",
1488 					mem->mapped_to_gpu_memory);
1489 		}
1490 	}
1491 
1492 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1493 	 * required.
1494 	 */
1495 	if (mem->mapped_to_gpu_memory == 0 &&
1496 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1497 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1498 						process_info->eviction_fence);
1499 
1500 unreserve_out:
1501 	unreserve_bo_and_vms(&ctx, false, false);
1502 out:
1503 	mutex_unlock(&mem->lock);
1504 	return ret;
1505 }
1506 
1507 int amdgpu_amdkfd_gpuvm_sync_memory(
1508 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1509 {
1510 	struct amdgpu_sync sync;
1511 	int ret;
1512 
1513 	amdgpu_sync_create(&sync);
1514 
1515 	mutex_lock(&mem->lock);
1516 	amdgpu_sync_clone(&mem->sync, &sync);
1517 	mutex_unlock(&mem->lock);
1518 
1519 	ret = amdgpu_sync_wait(&sync, intr);
1520 	amdgpu_sync_free(&sync);
1521 	return ret;
1522 }
1523 
1524 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1525 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1526 {
1527 	int ret;
1528 	struct amdgpu_bo *bo = mem->bo;
1529 
1530 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1531 		pr_err("userptr can't be mapped to kernel\n");
1532 		return -EINVAL;
1533 	}
1534 
1535 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1536 	 * this BO in BO's restoring after eviction.
1537 	 */
1538 	mutex_lock(&mem->process_info->lock);
1539 
1540 	ret = amdgpu_bo_reserve(bo, true);
1541 	if (ret) {
1542 		pr_err("Failed to reserve bo. ret %d\n", ret);
1543 		goto bo_reserve_failed;
1544 	}
1545 
1546 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1547 	if (ret) {
1548 		pr_err("Failed to pin bo. ret %d\n", ret);
1549 		goto pin_failed;
1550 	}
1551 
1552 	ret = amdgpu_bo_kmap(bo, kptr);
1553 	if (ret) {
1554 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1555 		goto kmap_failed;
1556 	}
1557 
1558 	amdgpu_amdkfd_remove_eviction_fence(
1559 		bo, mem->process_info->eviction_fence);
1560 	list_del_init(&mem->validate_list.head);
1561 
1562 	if (size)
1563 		*size = amdgpu_bo_size(bo);
1564 
1565 	amdgpu_bo_unreserve(bo);
1566 
1567 	mutex_unlock(&mem->process_info->lock);
1568 	return 0;
1569 
1570 kmap_failed:
1571 	amdgpu_bo_unpin(bo);
1572 pin_failed:
1573 	amdgpu_bo_unreserve(bo);
1574 bo_reserve_failed:
1575 	mutex_unlock(&mem->process_info->lock);
1576 
1577 	return ret;
1578 }
1579 
1580 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1581 					      struct kfd_vm_fault_info *mem)
1582 {
1583 	struct amdgpu_device *adev;
1584 
1585 	adev = (struct amdgpu_device *)kgd;
1586 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1587 		*mem = *adev->gmc.vm_fault_info;
1588 		mb();
1589 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1590 	}
1591 	return 0;
1592 }
1593 
1594 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1595 				      struct dma_buf *dma_buf,
1596 				      uint64_t va, void *vm,
1597 				      struct kgd_mem **mem, uint64_t *size,
1598 				      uint64_t *mmap_offset)
1599 {
1600 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1601 	struct drm_gem_object *obj;
1602 	struct amdgpu_bo *bo;
1603 	struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1604 
1605 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1606 		/* Can't handle non-graphics buffers */
1607 		return -EINVAL;
1608 
1609 	obj = dma_buf->priv;
1610 	if (obj->dev->dev_private != adev)
1611 		/* Can't handle buffers from other devices */
1612 		return -EINVAL;
1613 
1614 	bo = gem_to_amdgpu_bo(obj);
1615 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1616 				    AMDGPU_GEM_DOMAIN_GTT)))
1617 		/* Only VRAM and GTT BOs are supported */
1618 		return -EINVAL;
1619 
1620 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1621 	if (!*mem)
1622 		return -ENOMEM;
1623 
1624 	if (size)
1625 		*size = amdgpu_bo_size(bo);
1626 
1627 	if (mmap_offset)
1628 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1629 
1630 	INIT_LIST_HEAD(&(*mem)->bo_va_list);
1631 	mutex_init(&(*mem)->lock);
1632 	(*mem)->alloc_flags =
1633 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1634 		 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
1635 		ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
1636 
1637 	(*mem)->bo = amdgpu_bo_ref(bo);
1638 	(*mem)->va = va;
1639 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1640 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1641 	(*mem)->mapped_to_gpu_memory = 0;
1642 	(*mem)->process_info = avm->process_info;
1643 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1644 	amdgpu_sync_create(&(*mem)->sync);
1645 
1646 	return 0;
1647 }
1648 
1649 /* Evict a userptr BO by stopping the queues if necessary
1650  *
1651  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1652  * cannot do any memory allocations, and cannot take any locks that
1653  * are held elsewhere while allocating memory. Therefore this is as
1654  * simple as possible, using atomic counters.
1655  *
1656  * It doesn't do anything to the BO itself. The real work happens in
1657  * restore, where we get updated page addresses. This function only
1658  * ensures that GPU access to the BO is stopped.
1659  */
1660 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1661 				struct mm_struct *mm)
1662 {
1663 	struct amdkfd_process_info *process_info = mem->process_info;
1664 	int invalid, evicted_bos;
1665 	int r = 0;
1666 
1667 	invalid = atomic_inc_return(&mem->invalid);
1668 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1669 	if (evicted_bos == 1) {
1670 		/* First eviction, stop the queues */
1671 		r = kgd2kfd_quiesce_mm(mm);
1672 		if (r)
1673 			pr_err("Failed to quiesce KFD\n");
1674 		schedule_delayed_work(&process_info->restore_userptr_work,
1675 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1676 	}
1677 
1678 	return r;
1679 }
1680 
1681 /* Update invalid userptr BOs
1682  *
1683  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1684  * userptr_inval_list and updates user pages for all BOs that have
1685  * been invalidated since their last update.
1686  */
1687 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1688 				     struct mm_struct *mm)
1689 {
1690 	struct kgd_mem *mem, *tmp_mem;
1691 	struct amdgpu_bo *bo;
1692 	struct ttm_operation_ctx ctx = { false, false };
1693 	int invalid, ret;
1694 
1695 	/* Move all invalidated BOs to the userptr_inval_list and
1696 	 * release their user pages by migration to the CPU domain
1697 	 */
1698 	list_for_each_entry_safe(mem, tmp_mem,
1699 				 &process_info->userptr_valid_list,
1700 				 validate_list.head) {
1701 		if (!atomic_read(&mem->invalid))
1702 			continue; /* BO is still valid */
1703 
1704 		bo = mem->bo;
1705 
1706 		if (amdgpu_bo_reserve(bo, true))
1707 			return -EAGAIN;
1708 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1709 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1710 		amdgpu_bo_unreserve(bo);
1711 		if (ret) {
1712 			pr_err("%s: Failed to invalidate userptr BO\n",
1713 			       __func__);
1714 			return -EAGAIN;
1715 		}
1716 
1717 		list_move_tail(&mem->validate_list.head,
1718 			       &process_info->userptr_inval_list);
1719 	}
1720 
1721 	if (list_empty(&process_info->userptr_inval_list))
1722 		return 0; /* All evicted userptr BOs were freed */
1723 
1724 	/* Go through userptr_inval_list and update any invalid user_pages */
1725 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1726 			    validate_list.head) {
1727 		invalid = atomic_read(&mem->invalid);
1728 		if (!invalid)
1729 			/* BO hasn't been invalidated since the last
1730 			 * revalidation attempt. Keep its BO list.
1731 			 */
1732 			continue;
1733 
1734 		bo = mem->bo;
1735 
1736 		/* Get updated user pages */
1737 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1738 		if (ret) {
1739 			pr_debug("%s: Failed to get user pages: %d\n",
1740 				__func__, ret);
1741 
1742 			/* Return error -EBUSY or -ENOMEM, retry restore */
1743 			return ret;
1744 		}
1745 
1746 		/*
1747 		 * FIXME: Cannot ignore the return code, must hold
1748 		 * notifier_lock
1749 		 */
1750 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1751 
1752 		/* Mark the BO as valid unless it was invalidated
1753 		 * again concurrently.
1754 		 */
1755 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1756 			return -EAGAIN;
1757 	}
1758 
1759 	return 0;
1760 }
1761 
1762 /* Validate invalid userptr BOs
1763  *
1764  * Validates BOs on the userptr_inval_list, and moves them back to the
1765  * userptr_valid_list. Also updates GPUVM page tables with new page
1766  * addresses and waits for the page table updates to complete.
1767  */
1768 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1769 {
1770 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
1771 	struct list_head resv_list, duplicates;
1772 	struct ww_acquire_ctx ticket;
1773 	struct amdgpu_sync sync;
1774 
1775 	struct amdgpu_vm *peer_vm;
1776 	struct kgd_mem *mem, *tmp_mem;
1777 	struct amdgpu_bo *bo;
1778 	struct ttm_operation_ctx ctx = { false, false };
1779 	int i, ret;
1780 
1781 	pd_bo_list_entries = kcalloc(process_info->n_vms,
1782 				     sizeof(struct amdgpu_bo_list_entry),
1783 				     GFP_KERNEL);
1784 	if (!pd_bo_list_entries) {
1785 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1786 		ret = -ENOMEM;
1787 		goto out_no_mem;
1788 	}
1789 
1790 	INIT_LIST_HEAD(&resv_list);
1791 	INIT_LIST_HEAD(&duplicates);
1792 
1793 	/* Get all the page directory BOs that need to be reserved */
1794 	i = 0;
1795 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1796 			    vm_list_node)
1797 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1798 				    &pd_bo_list_entries[i++]);
1799 	/* Add the userptr_inval_list entries to resv_list */
1800 	list_for_each_entry(mem, &process_info->userptr_inval_list,
1801 			    validate_list.head) {
1802 		list_add_tail(&mem->resv_list.head, &resv_list);
1803 		mem->resv_list.bo = mem->validate_list.bo;
1804 		mem->resv_list.num_shared = mem->validate_list.num_shared;
1805 	}
1806 
1807 	/* Reserve all BOs and page tables for validation */
1808 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1809 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
1810 	if (ret)
1811 		goto out_free;
1812 
1813 	amdgpu_sync_create(&sync);
1814 
1815 	ret = process_validate_vms(process_info);
1816 	if (ret)
1817 		goto unreserve_out;
1818 
1819 	/* Validate BOs and update GPUVM page tables */
1820 	list_for_each_entry_safe(mem, tmp_mem,
1821 				 &process_info->userptr_inval_list,
1822 				 validate_list.head) {
1823 		struct kfd_bo_va_list *bo_va_entry;
1824 
1825 		bo = mem->bo;
1826 
1827 		/* Validate the BO if we got user pages */
1828 		if (bo->tbo.ttm->pages[0]) {
1829 			amdgpu_bo_placement_from_domain(bo, mem->domain);
1830 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1831 			if (ret) {
1832 				pr_err("%s: failed to validate BO\n", __func__);
1833 				goto unreserve_out;
1834 			}
1835 		}
1836 
1837 		list_move_tail(&mem->validate_list.head,
1838 			       &process_info->userptr_valid_list);
1839 
1840 		/* Update mapping. If the BO was not validated
1841 		 * (because we couldn't get user pages), this will
1842 		 * clear the page table entries, which will result in
1843 		 * VM faults if the GPU tries to access the invalid
1844 		 * memory.
1845 		 */
1846 		list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1847 			if (!bo_va_entry->is_mapped)
1848 				continue;
1849 
1850 			ret = update_gpuvm_pte((struct amdgpu_device *)
1851 					       bo_va_entry->kgd_dev,
1852 					       bo_va_entry, &sync);
1853 			if (ret) {
1854 				pr_err("%s: update PTE failed\n", __func__);
1855 				/* make sure this gets validated again */
1856 				atomic_inc(&mem->invalid);
1857 				goto unreserve_out;
1858 			}
1859 		}
1860 	}
1861 
1862 	/* Update page directories */
1863 	ret = process_update_pds(process_info, &sync);
1864 
1865 unreserve_out:
1866 	ttm_eu_backoff_reservation(&ticket, &resv_list);
1867 	amdgpu_sync_wait(&sync, false);
1868 	amdgpu_sync_free(&sync);
1869 out_free:
1870 	kfree(pd_bo_list_entries);
1871 out_no_mem:
1872 
1873 	return ret;
1874 }
1875 
1876 /* Worker callback to restore evicted userptr BOs
1877  *
1878  * Tries to update and validate all userptr BOs. If successful and no
1879  * concurrent evictions happened, the queues are restarted. Otherwise,
1880  * reschedule for another attempt later.
1881  */
1882 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1883 {
1884 	struct delayed_work *dwork = to_delayed_work(work);
1885 	struct amdkfd_process_info *process_info =
1886 		container_of(dwork, struct amdkfd_process_info,
1887 			     restore_userptr_work);
1888 	struct task_struct *usertask;
1889 	struct mm_struct *mm;
1890 	int evicted_bos;
1891 
1892 	evicted_bos = atomic_read(&process_info->evicted_bos);
1893 	if (!evicted_bos)
1894 		return;
1895 
1896 	/* Reference task and mm in case of concurrent process termination */
1897 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1898 	if (!usertask)
1899 		return;
1900 	mm = get_task_mm(usertask);
1901 	if (!mm) {
1902 		put_task_struct(usertask);
1903 		return;
1904 	}
1905 
1906 	mutex_lock(&process_info->lock);
1907 
1908 	if (update_invalid_user_pages(process_info, mm))
1909 		goto unlock_out;
1910 	/* userptr_inval_list can be empty if all evicted userptr BOs
1911 	 * have been freed. In that case there is nothing to validate
1912 	 * and we can just restart the queues.
1913 	 */
1914 	if (!list_empty(&process_info->userptr_inval_list)) {
1915 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1916 			goto unlock_out; /* Concurrent eviction, try again */
1917 
1918 		if (validate_invalid_user_pages(process_info))
1919 			goto unlock_out;
1920 	}
1921 	/* Final check for concurrent evicton and atomic update. If
1922 	 * another eviction happens after successful update, it will
1923 	 * be a first eviction that calls quiesce_mm. The eviction
1924 	 * reference counting inside KFD will handle this case.
1925 	 */
1926 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1927 	    evicted_bos)
1928 		goto unlock_out;
1929 	evicted_bos = 0;
1930 	if (kgd2kfd_resume_mm(mm)) {
1931 		pr_err("%s: Failed to resume KFD\n", __func__);
1932 		/* No recovery from this failure. Probably the CP is
1933 		 * hanging. No point trying again.
1934 		 */
1935 	}
1936 
1937 unlock_out:
1938 	mutex_unlock(&process_info->lock);
1939 	mmput(mm);
1940 	put_task_struct(usertask);
1941 
1942 	/* If validation failed, reschedule another attempt */
1943 	if (evicted_bos)
1944 		schedule_delayed_work(&process_info->restore_userptr_work,
1945 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1946 }
1947 
1948 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1949  *   KFD process identified by process_info
1950  *
1951  * @process_info: amdkfd_process_info of the KFD process
1952  *
1953  * After memory eviction, restore thread calls this function. The function
1954  * should be called when the Process is still valid. BO restore involves -
1955  *
1956  * 1.  Release old eviction fence and create new one
1957  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1958  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1959  *     BOs that need to be reserved.
1960  * 4.  Reserve all the BOs
1961  * 5.  Validate of PD and PT BOs.
1962  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1963  * 7.  Add fence to all PD and PT BOs.
1964  * 8.  Unreserve all BOs
1965  */
1966 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1967 {
1968 	struct amdgpu_bo_list_entry *pd_bo_list;
1969 	struct amdkfd_process_info *process_info = info;
1970 	struct amdgpu_vm *peer_vm;
1971 	struct kgd_mem *mem;
1972 	struct bo_vm_reservation_context ctx;
1973 	struct amdgpu_amdkfd_fence *new_fence;
1974 	int ret = 0, i;
1975 	struct list_head duplicate_save;
1976 	struct amdgpu_sync sync_obj;
1977 
1978 	INIT_LIST_HEAD(&duplicate_save);
1979 	INIT_LIST_HEAD(&ctx.list);
1980 	INIT_LIST_HEAD(&ctx.duplicates);
1981 
1982 	pd_bo_list = kcalloc(process_info->n_vms,
1983 			     sizeof(struct amdgpu_bo_list_entry),
1984 			     GFP_KERNEL);
1985 	if (!pd_bo_list)
1986 		return -ENOMEM;
1987 
1988 	i = 0;
1989 	mutex_lock(&process_info->lock);
1990 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1991 			vm_list_node)
1992 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1993 
1994 	/* Reserve all BOs and page tables/directory. Add all BOs from
1995 	 * kfd_bo_list to ctx.list
1996 	 */
1997 	list_for_each_entry(mem, &process_info->kfd_bo_list,
1998 			    validate_list.head) {
1999 
2000 		list_add_tail(&mem->resv_list.head, &ctx.list);
2001 		mem->resv_list.bo = mem->validate_list.bo;
2002 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2003 	}
2004 
2005 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2006 				     false, &duplicate_save);
2007 	if (ret) {
2008 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2009 		goto ttm_reserve_fail;
2010 	}
2011 
2012 	amdgpu_sync_create(&sync_obj);
2013 
2014 	/* Validate PDs and PTs */
2015 	ret = process_validate_vms(process_info);
2016 	if (ret)
2017 		goto validate_map_fail;
2018 
2019 	ret = process_sync_pds_resv(process_info, &sync_obj);
2020 	if (ret) {
2021 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2022 		goto validate_map_fail;
2023 	}
2024 
2025 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2026 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2027 			    validate_list.head) {
2028 
2029 		struct amdgpu_bo *bo = mem->bo;
2030 		uint32_t domain = mem->domain;
2031 		struct kfd_bo_va_list *bo_va_entry;
2032 
2033 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2034 		if (ret) {
2035 			pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2036 			goto validate_map_fail;
2037 		}
2038 		ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2039 		if (ret) {
2040 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2041 			goto validate_map_fail;
2042 		}
2043 		list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2044 				    bo_list) {
2045 			ret = update_gpuvm_pte((struct amdgpu_device *)
2046 					      bo_va_entry->kgd_dev,
2047 					      bo_va_entry,
2048 					      &sync_obj);
2049 			if (ret) {
2050 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2051 				goto validate_map_fail;
2052 			}
2053 		}
2054 	}
2055 
2056 	/* Update page directories */
2057 	ret = process_update_pds(process_info, &sync_obj);
2058 	if (ret) {
2059 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2060 		goto validate_map_fail;
2061 	}
2062 
2063 	/* Wait for validate and PT updates to finish */
2064 	amdgpu_sync_wait(&sync_obj, false);
2065 
2066 	/* Release old eviction fence and create new one, because fence only
2067 	 * goes from unsignaled to signaled, fence cannot be reused.
2068 	 * Use context and mm from the old fence.
2069 	 */
2070 	new_fence = amdgpu_amdkfd_fence_create(
2071 				process_info->eviction_fence->base.context,
2072 				process_info->eviction_fence->mm);
2073 	if (!new_fence) {
2074 		pr_err("Failed to create eviction fence\n");
2075 		ret = -ENOMEM;
2076 		goto validate_map_fail;
2077 	}
2078 	dma_fence_put(&process_info->eviction_fence->base);
2079 	process_info->eviction_fence = new_fence;
2080 	*ef = dma_fence_get(&new_fence->base);
2081 
2082 	/* Attach new eviction fence to all BOs */
2083 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2084 		validate_list.head)
2085 		amdgpu_bo_fence(mem->bo,
2086 			&process_info->eviction_fence->base, true);
2087 
2088 	/* Attach eviction fence to PD / PT BOs */
2089 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2090 			    vm_list_node) {
2091 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2092 
2093 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2094 	}
2095 
2096 validate_map_fail:
2097 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2098 	amdgpu_sync_free(&sync_obj);
2099 ttm_reserve_fail:
2100 	mutex_unlock(&process_info->lock);
2101 	kfree(pd_bo_list);
2102 	return ret;
2103 }
2104 
2105 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2106 {
2107 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2108 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2109 	int ret;
2110 
2111 	if (!info || !gws)
2112 		return -EINVAL;
2113 
2114 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2115 	if (!*mem)
2116 		return -ENOMEM;
2117 
2118 	mutex_init(&(*mem)->lock);
2119 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2120 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2121 	(*mem)->process_info = process_info;
2122 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2123 	amdgpu_sync_create(&(*mem)->sync);
2124 
2125 
2126 	/* Validate gws bo the first time it is added to process */
2127 	mutex_lock(&(*mem)->process_info->lock);
2128 	ret = amdgpu_bo_reserve(gws_bo, false);
2129 	if (unlikely(ret)) {
2130 		pr_err("Reserve gws bo failed %d\n", ret);
2131 		goto bo_reservation_failure;
2132 	}
2133 
2134 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2135 	if (ret) {
2136 		pr_err("GWS BO validate failed %d\n", ret);
2137 		goto bo_validation_failure;
2138 	}
2139 	/* GWS resource is shared b/t amdgpu and amdkfd
2140 	 * Add process eviction fence to bo so they can
2141 	 * evict each other.
2142 	 */
2143 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2144 	if (ret)
2145 		goto reserve_shared_fail;
2146 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2147 	amdgpu_bo_unreserve(gws_bo);
2148 	mutex_unlock(&(*mem)->process_info->lock);
2149 
2150 	return ret;
2151 
2152 reserve_shared_fail:
2153 bo_validation_failure:
2154 	amdgpu_bo_unreserve(gws_bo);
2155 bo_reservation_failure:
2156 	mutex_unlock(&(*mem)->process_info->lock);
2157 	amdgpu_sync_free(&(*mem)->sync);
2158 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2159 	amdgpu_bo_unref(&gws_bo);
2160 	mutex_destroy(&(*mem)->lock);
2161 	kfree(*mem);
2162 	*mem = NULL;
2163 	return ret;
2164 }
2165 
2166 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2167 {
2168 	int ret;
2169 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2170 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2171 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2172 
2173 	/* Remove BO from process's validate list so restore worker won't touch
2174 	 * it anymore
2175 	 */
2176 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2177 
2178 	ret = amdgpu_bo_reserve(gws_bo, false);
2179 	if (unlikely(ret)) {
2180 		pr_err("Reserve gws bo failed %d\n", ret);
2181 		//TODO add BO back to validate_list?
2182 		return ret;
2183 	}
2184 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2185 			process_info->eviction_fence);
2186 	amdgpu_bo_unreserve(gws_bo);
2187 	amdgpu_sync_free(&kgd_mem->sync);
2188 	amdgpu_bo_unref(&gws_bo);
2189 	mutex_destroy(&kgd_mem->lock);
2190 	kfree(mem);
2191 	return 0;
2192 }
2193