1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35 
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40 
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43 	uint64_t max_system_mem_limit;
44 	uint64_t max_ttm_mem_limit;
45 	int64_t system_mem_used;
46 	int64_t ttm_mem_used;
47 	spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49 
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
52 	uint32_t        domain;
53 	bool            wait;
54 };
55 
56 static const char * const domain_bit_to_string[] = {
57 		"CPU",
58 		"GTT",
59 		"VRAM",
60 		"GDS",
61 		"GWS",
62 		"OA"
63 };
64 
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
66 
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
68 
69 
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
71 {
72 	return (struct amdgpu_device *)kgd;
73 }
74 
75 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
76 		struct kgd_mem *mem)
77 {
78 	struct kfd_mem_attachment *entry;
79 
80 	list_for_each_entry(entry, &mem->attachments, list)
81 		if (entry->bo_va->base.vm == avm)
82 			return true;
83 
84 	return false;
85 }
86 
87 /* Set memory usage limits. Current, limits are
88  *  System (TTM + userptr) memory - 15/16th System RAM
89  *  TTM memory - 3/8th System RAM
90  */
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
92 {
93 	struct sysinfo si;
94 	uint64_t mem;
95 
96 	si_meminfo(&si);
97 	mem = si.freeram - si.freehigh;
98 	mem *= si.mem_unit;
99 
100 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
102 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104 		(kfd_mem_limit.max_system_mem_limit >> 20),
105 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
106 }
107 
108 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
109 {
110 	kfd_mem_limit.system_mem_used += size;
111 }
112 
113 /* Estimate page table size needed to represent a given memory size
114  *
115  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
116  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
117  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
118  * for 2MB pages for TLB efficiency. However, small allocations and
119  * fragmented system memory still need some 4KB pages. We choose a
120  * compromise that should work in most cases without reserving too
121  * much memory for page tables unnecessarily (factor 16K, >> 14).
122  */
123 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
124 
125 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
126 {
127 	size >>= PAGE_SHIFT;
128 	size *= sizeof(dma_addr_t) + sizeof(void *);
129 
130 	return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
131 		__roundup_pow_of_two(sizeof(struct ttm_tt)) +
132 		PAGE_ALIGN(size);
133 }
134 
135 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
136 		uint64_t size, u32 domain, bool sg)
137 {
138 	uint64_t reserved_for_pt =
139 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
140 	size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
141 	int ret = 0;
142 
143 	acc_size = amdgpu_amdkfd_acc_size(size);
144 
145 	vram_needed = 0;
146 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
147 		/* TTM GTT memory */
148 		system_mem_needed = acc_size + size;
149 		ttm_mem_needed = acc_size + size;
150 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
151 		/* Userptr */
152 		system_mem_needed = acc_size + size;
153 		ttm_mem_needed = acc_size;
154 	} else {
155 		/* VRAM and SG */
156 		system_mem_needed = acc_size;
157 		ttm_mem_needed = acc_size;
158 		if (domain == AMDGPU_GEM_DOMAIN_VRAM)
159 			vram_needed = size;
160 	}
161 
162 	spin_lock(&kfd_mem_limit.mem_limit_lock);
163 
164 	if (kfd_mem_limit.system_mem_used + system_mem_needed >
165 	    kfd_mem_limit.max_system_mem_limit)
166 		pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
167 
168 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
169 	     kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
170 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
171 	     kfd_mem_limit.max_ttm_mem_limit) ||
172 	    (adev->kfd.vram_used + vram_needed >
173 	     adev->gmc.real_vram_size - reserved_for_pt)) {
174 		ret = -ENOMEM;
175 	} else {
176 		kfd_mem_limit.system_mem_used += system_mem_needed;
177 		kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
178 		adev->kfd.vram_used += vram_needed;
179 	}
180 
181 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
182 	return ret;
183 }
184 
185 static void unreserve_mem_limit(struct amdgpu_device *adev,
186 		uint64_t size, u32 domain, bool sg)
187 {
188 	size_t acc_size;
189 
190 	acc_size = amdgpu_amdkfd_acc_size(size);
191 
192 	spin_lock(&kfd_mem_limit.mem_limit_lock);
193 	if (domain == AMDGPU_GEM_DOMAIN_GTT) {
194 		kfd_mem_limit.system_mem_used -= (acc_size + size);
195 		kfd_mem_limit.ttm_mem_used -= (acc_size + size);
196 	} else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
197 		kfd_mem_limit.system_mem_used -= (acc_size + size);
198 		kfd_mem_limit.ttm_mem_used -= acc_size;
199 	} else {
200 		kfd_mem_limit.system_mem_used -= acc_size;
201 		kfd_mem_limit.ttm_mem_used -= acc_size;
202 		if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
203 			adev->kfd.vram_used -= size;
204 			WARN_ONCE(adev->kfd.vram_used < 0,
205 				  "kfd VRAM memory accounting unbalanced");
206 		}
207 	}
208 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
209 		  "kfd system memory accounting unbalanced");
210 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
211 		  "kfd TTM memory accounting unbalanced");
212 
213 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
214 }
215 
216 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
217 {
218 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
219 	u32 domain = bo->preferred_domains;
220 	bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
221 
222 	if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
223 		domain = AMDGPU_GEM_DOMAIN_CPU;
224 		sg = false;
225 	}
226 
227 	unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
228 }
229 
230 
231 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
232  *  reservation object.
233  *
234  * @bo: [IN] Remove eviction fence(s) from this BO
235  * @ef: [IN] This eviction fence is removed if it
236  *  is present in the shared list.
237  *
238  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
239  */
240 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
241 					struct amdgpu_amdkfd_fence *ef)
242 {
243 	struct dma_resv *resv = bo->tbo.base.resv;
244 	struct dma_resv_list *old, *new;
245 	unsigned int i, j, k;
246 
247 	if (!ef)
248 		return -EINVAL;
249 
250 	old = dma_resv_get_list(resv);
251 	if (!old)
252 		return 0;
253 
254 	new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
255 	if (!new)
256 		return -ENOMEM;
257 
258 	/* Go through all the shared fences in the resevation object and sort
259 	 * the interesting ones to the end of the list.
260 	 */
261 	for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
262 		struct dma_fence *f;
263 
264 		f = rcu_dereference_protected(old->shared[i],
265 					      dma_resv_held(resv));
266 
267 		if (f->context == ef->base.context)
268 			RCU_INIT_POINTER(new->shared[--j], f);
269 		else
270 			RCU_INIT_POINTER(new->shared[k++], f);
271 	}
272 	new->shared_max = old->shared_max;
273 	new->shared_count = k;
274 
275 	/* Install the new fence list, seqcount provides the barriers */
276 	write_seqcount_begin(&resv->seq);
277 	RCU_INIT_POINTER(resv->fence, new);
278 	write_seqcount_end(&resv->seq);
279 
280 	/* Drop the references to the removed fences or move them to ef_list */
281 	for (i = j; i < old->shared_count; ++i) {
282 		struct dma_fence *f;
283 
284 		f = rcu_dereference_protected(new->shared[i],
285 					      dma_resv_held(resv));
286 		dma_fence_put(f);
287 	}
288 	kfree_rcu(old, rcu);
289 
290 	return 0;
291 }
292 
293 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
294 {
295 	struct amdgpu_bo *root = bo;
296 	struct amdgpu_vm_bo_base *vm_bo;
297 	struct amdgpu_vm *vm;
298 	struct amdkfd_process_info *info;
299 	struct amdgpu_amdkfd_fence *ef;
300 	int ret;
301 
302 	/* we can always get vm_bo from root PD bo.*/
303 	while (root->parent)
304 		root = root->parent;
305 
306 	vm_bo = root->vm_bo;
307 	if (!vm_bo)
308 		return 0;
309 
310 	vm = vm_bo->vm;
311 	if (!vm)
312 		return 0;
313 
314 	info = vm->process_info;
315 	if (!info || !info->eviction_fence)
316 		return 0;
317 
318 	ef = container_of(dma_fence_get(&info->eviction_fence->base),
319 			struct amdgpu_amdkfd_fence, base);
320 
321 	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
322 	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
323 	dma_resv_unlock(bo->tbo.base.resv);
324 
325 	dma_fence_put(&ef->base);
326 	return ret;
327 }
328 
329 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
330 				     bool wait)
331 {
332 	struct ttm_operation_ctx ctx = { false, false };
333 	int ret;
334 
335 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
336 		 "Called with userptr BO"))
337 		return -EINVAL;
338 
339 	amdgpu_bo_placement_from_domain(bo, domain);
340 
341 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
342 	if (ret)
343 		goto validate_fail;
344 	if (wait)
345 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
346 
347 validate_fail:
348 	return ret;
349 }
350 
351 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
352 {
353 	struct amdgpu_vm_parser *p = param;
354 
355 	return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
356 }
357 
358 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
359  *
360  * Page directories are not updated here because huge page handling
361  * during page table updates can invalidate page directory entries
362  * again. Page directories are only updated after updating page
363  * tables.
364  */
365 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
366 {
367 	struct amdgpu_bo *pd = vm->root.base.bo;
368 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
369 	struct amdgpu_vm_parser param;
370 	int ret;
371 
372 	param.domain = AMDGPU_GEM_DOMAIN_VRAM;
373 	param.wait = false;
374 
375 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
376 					&param);
377 	if (ret) {
378 		pr_err("failed to validate PT BOs\n");
379 		return ret;
380 	}
381 
382 	ret = amdgpu_amdkfd_validate(&param, pd);
383 	if (ret) {
384 		pr_err("failed to validate PD\n");
385 		return ret;
386 	}
387 
388 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
389 
390 	if (vm->use_cpu_for_update) {
391 		ret = amdgpu_bo_kmap(pd, NULL);
392 		if (ret) {
393 			pr_err("failed to kmap PD, ret=%d\n", ret);
394 			return ret;
395 		}
396 	}
397 
398 	return 0;
399 }
400 
401 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
402 {
403 	struct amdgpu_bo *pd = vm->root.base.bo;
404 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
405 	int ret;
406 
407 	ret = amdgpu_vm_update_pdes(adev, vm, false);
408 	if (ret)
409 		return ret;
410 
411 	return amdgpu_sync_fence(sync, vm->last_update);
412 }
413 
414 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
415 {
416 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
417 	bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
418 	bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
419 	uint32_t mapping_flags;
420 	uint64_t pte_flags;
421 	bool snoop = false;
422 
423 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
424 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
425 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
426 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
427 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
428 
429 	switch (adev->asic_type) {
430 	case CHIP_ARCTURUS:
431 		if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
432 			if (bo_adev == adev)
433 				mapping_flags |= coherent ?
434 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
435 			else
436 				mapping_flags |= coherent ?
437 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
438 		} else {
439 			mapping_flags |= coherent ?
440 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
441 		}
442 		break;
443 	case CHIP_ALDEBARAN:
444 		if (coherent && uncached) {
445 			if (adev->gmc.xgmi.connected_to_cpu ||
446 				!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
447 				snoop = true;
448 			mapping_flags |= AMDGPU_VM_MTYPE_UC;
449 		} else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
450 			if (bo_adev == adev) {
451 				mapping_flags |= coherent ?
452 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
453 				if (adev->gmc.xgmi.connected_to_cpu)
454 					snoop = true;
455 			} else {
456 				mapping_flags |= coherent ?
457 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
458 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
459 					snoop = true;
460 			}
461 		} else {
462 			snoop = true;
463 			mapping_flags |= coherent ?
464 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
465 		}
466 		break;
467 	default:
468 		mapping_flags |= coherent ?
469 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
470 	}
471 
472 	pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
473 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
474 
475 	return pte_flags;
476 }
477 
478 static int
479 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
480 		       struct kfd_mem_attachment *attachment)
481 {
482 	enum dma_data_direction direction =
483 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
484 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
485 	struct ttm_operation_ctx ctx = {.interruptible = true};
486 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
487 	struct amdgpu_device *adev = attachment->adev;
488 	struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
489 	struct ttm_tt *ttm = bo->tbo.ttm;
490 	int ret;
491 
492 	ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
493 	if (unlikely(!ttm->sg))
494 		return -ENOMEM;
495 
496 	if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
497 		return -EINVAL;
498 
499 	/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
500 	ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
501 					ttm->num_pages, 0,
502 					(u64)ttm->num_pages << PAGE_SHIFT,
503 					GFP_KERNEL);
504 	if (unlikely(ret))
505 		goto free_sg;
506 
507 	ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
508 	if (unlikely(ret))
509 		goto release_sg;
510 
511 	drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
512 				       ttm->num_pages);
513 
514 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
515 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
516 	if (ret)
517 		goto unmap_sg;
518 
519 	return 0;
520 
521 unmap_sg:
522 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
523 release_sg:
524 	pr_err("DMA map userptr failed: %d\n", ret);
525 	sg_free_table(ttm->sg);
526 free_sg:
527 	kfree(ttm->sg);
528 	ttm->sg = NULL;
529 	return ret;
530 }
531 
532 static int
533 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
534 {
535 	struct ttm_operation_ctx ctx = {.interruptible = true};
536 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
537 
538 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
539 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
540 }
541 
542 static int
543 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
544 			  struct kfd_mem_attachment *attachment)
545 {
546 	switch (attachment->type) {
547 	case KFD_MEM_ATT_SHARED:
548 		return 0;
549 	case KFD_MEM_ATT_USERPTR:
550 		return kfd_mem_dmamap_userptr(mem, attachment);
551 	case KFD_MEM_ATT_DMABUF:
552 		return kfd_mem_dmamap_dmabuf(attachment);
553 	default:
554 		WARN_ON_ONCE(1);
555 	}
556 	return -EINVAL;
557 }
558 
559 static void
560 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
561 			 struct kfd_mem_attachment *attachment)
562 {
563 	enum dma_data_direction direction =
564 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
565 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
566 	struct ttm_operation_ctx ctx = {.interruptible = false};
567 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
568 	struct amdgpu_device *adev = attachment->adev;
569 	struct ttm_tt *ttm = bo->tbo.ttm;
570 
571 	if (unlikely(!ttm->sg))
572 		return;
573 
574 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
575 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
576 
577 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
578 	sg_free_table(ttm->sg);
579 	ttm->sg = NULL;
580 }
581 
582 static void
583 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
584 {
585 	struct ttm_operation_ctx ctx = {.interruptible = true};
586 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
587 
588 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
589 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
590 }
591 
592 static void
593 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
594 			    struct kfd_mem_attachment *attachment)
595 {
596 	switch (attachment->type) {
597 	case KFD_MEM_ATT_SHARED:
598 		break;
599 	case KFD_MEM_ATT_USERPTR:
600 		kfd_mem_dmaunmap_userptr(mem, attachment);
601 		break;
602 	case KFD_MEM_ATT_DMABUF:
603 		kfd_mem_dmaunmap_dmabuf(attachment);
604 		break;
605 	default:
606 		WARN_ON_ONCE(1);
607 	}
608 }
609 
610 static int
611 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
612 		       struct amdgpu_bo **bo)
613 {
614 	unsigned long bo_size = mem->bo->tbo.base.size;
615 	struct drm_gem_object *gobj;
616 	int ret;
617 
618 	ret = amdgpu_bo_reserve(mem->bo, false);
619 	if (ret)
620 		return ret;
621 
622 	ret = amdgpu_gem_object_create(adev, bo_size, 1,
623 				       AMDGPU_GEM_DOMAIN_CPU,
624 				       AMDGPU_GEM_CREATE_PREEMPTIBLE,
625 				       ttm_bo_type_sg, mem->bo->tbo.base.resv,
626 				       &gobj);
627 	amdgpu_bo_unreserve(mem->bo);
628 	if (ret)
629 		return ret;
630 
631 	*bo = gem_to_amdgpu_bo(gobj);
632 	(*bo)->parent = amdgpu_bo_ref(mem->bo);
633 
634 	return 0;
635 }
636 
637 static int
638 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
639 		      struct amdgpu_bo **bo)
640 {
641 	struct drm_gem_object *gobj;
642 	int ret;
643 
644 	if (!mem->dmabuf) {
645 		mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
646 			mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
647 				DRM_RDWR : 0);
648 		if (IS_ERR(mem->dmabuf)) {
649 			ret = PTR_ERR(mem->dmabuf);
650 			mem->dmabuf = NULL;
651 			return ret;
652 		}
653 	}
654 
655 	gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf);
656 	if (IS_ERR(gobj))
657 		return PTR_ERR(gobj);
658 
659 	/* Import takes an extra reference on the dmabuf. Drop it now to
660 	 * avoid leaking it. We only need the one reference in
661 	 * kgd_mem->dmabuf.
662 	 */
663 	dma_buf_put(mem->dmabuf);
664 
665 	*bo = gem_to_amdgpu_bo(gobj);
666 	(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
667 	(*bo)->parent = amdgpu_bo_ref(mem->bo);
668 
669 	return 0;
670 }
671 
672 /* kfd_mem_attach - Add a BO to a VM
673  *
674  * Everything that needs to bo done only once when a BO is first added
675  * to a VM. It can later be mapped and unmapped many times without
676  * repeating these steps.
677  *
678  * 0. Create BO for DMA mapping, if needed
679  * 1. Allocate and initialize BO VA entry data structure
680  * 2. Add BO to the VM
681  * 3. Determine ASIC-specific PTE flags
682  * 4. Alloc page tables and directories if needed
683  * 4a.  Validate new page tables and directories
684  */
685 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
686 		struct amdgpu_vm *vm, bool is_aql)
687 {
688 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
689 	unsigned long bo_size = mem->bo->tbo.base.size;
690 	uint64_t va = mem->va;
691 	struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
692 	struct amdgpu_bo *bo[2] = {NULL, NULL};
693 	int i, ret;
694 
695 	if (!va) {
696 		pr_err("Invalid VA when adding BO to VM\n");
697 		return -EINVAL;
698 	}
699 
700 	for (i = 0; i <= is_aql; i++) {
701 		attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
702 		if (unlikely(!attachment[i])) {
703 			ret = -ENOMEM;
704 			goto unwind;
705 		}
706 
707 		pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
708 			 va + bo_size, vm);
709 
710 		if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
711 					amdgpu_xgmi_same_hive(adev, bo_adev))) {
712 			/* Mappings on the local GPU and VRAM mappings in the
713 			 * local hive share the original BO
714 			 */
715 			attachment[i]->type = KFD_MEM_ATT_SHARED;
716 			bo[i] = mem->bo;
717 			drm_gem_object_get(&bo[i]->tbo.base);
718 		} else if (i > 0) {
719 			/* Multiple mappings on the same GPU share the BO */
720 			attachment[i]->type = KFD_MEM_ATT_SHARED;
721 			bo[i] = bo[0];
722 			drm_gem_object_get(&bo[i]->tbo.base);
723 		} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
724 			/* Create an SG BO to DMA-map userptrs on other GPUs */
725 			attachment[i]->type = KFD_MEM_ATT_USERPTR;
726 			ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
727 			if (ret)
728 				goto unwind;
729 		} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
730 			   mem->bo->tbo.type != ttm_bo_type_sg) {
731 			/* GTT BOs use DMA-mapping ability of dynamic-attach
732 			 * DMA bufs. TODO: The same should work for VRAM on
733 			 * large-BAR GPUs.
734 			 */
735 			attachment[i]->type = KFD_MEM_ATT_DMABUF;
736 			ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
737 			if (ret)
738 				goto unwind;
739 		} else {
740 			/* FIXME: Need to DMA-map other BO types:
741 			 * large-BAR VRAM, doorbells, MMIO remap
742 			 */
743 			attachment[i]->type = KFD_MEM_ATT_SHARED;
744 			bo[i] = mem->bo;
745 			drm_gem_object_get(&bo[i]->tbo.base);
746 		}
747 
748 		/* Add BO to VM internal data structures */
749 		attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
750 		if (unlikely(!attachment[i]->bo_va)) {
751 			ret = -ENOMEM;
752 			pr_err("Failed to add BO object to VM. ret == %d\n",
753 			       ret);
754 			goto unwind;
755 		}
756 
757 		attachment[i]->va = va;
758 		attachment[i]->pte_flags = get_pte_flags(adev, mem);
759 		attachment[i]->adev = adev;
760 		list_add(&attachment[i]->list, &mem->attachments);
761 
762 		va += bo_size;
763 	}
764 
765 	return 0;
766 
767 unwind:
768 	for (; i >= 0; i--) {
769 		if (!attachment[i])
770 			continue;
771 		if (attachment[i]->bo_va) {
772 			amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
773 			list_del(&attachment[i]->list);
774 		}
775 		if (bo[i])
776 			drm_gem_object_put(&bo[i]->tbo.base);
777 		kfree(attachment[i]);
778 	}
779 	return ret;
780 }
781 
782 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
783 {
784 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
785 
786 	pr_debug("\t remove VA 0x%llx in entry %p\n",
787 			attachment->va, attachment);
788 	amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
789 	drm_gem_object_put(&bo->tbo.base);
790 	list_del(&attachment->list);
791 	kfree(attachment);
792 }
793 
794 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
795 				struct amdkfd_process_info *process_info,
796 				bool userptr)
797 {
798 	struct ttm_validate_buffer *entry = &mem->validate_list;
799 	struct amdgpu_bo *bo = mem->bo;
800 
801 	INIT_LIST_HEAD(&entry->head);
802 	entry->num_shared = 1;
803 	entry->bo = &bo->tbo;
804 	mutex_lock(&process_info->lock);
805 	if (userptr)
806 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
807 	else
808 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
809 	mutex_unlock(&process_info->lock);
810 }
811 
812 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
813 		struct amdkfd_process_info *process_info)
814 {
815 	struct ttm_validate_buffer *bo_list_entry;
816 
817 	bo_list_entry = &mem->validate_list;
818 	mutex_lock(&process_info->lock);
819 	list_del(&bo_list_entry->head);
820 	mutex_unlock(&process_info->lock);
821 }
822 
823 /* Initializes user pages. It registers the MMU notifier and validates
824  * the userptr BO in the GTT domain.
825  *
826  * The BO must already be on the userptr_valid_list. Otherwise an
827  * eviction and restore may happen that leaves the new BO unmapped
828  * with the user mode queues running.
829  *
830  * Takes the process_info->lock to protect against concurrent restore
831  * workers.
832  *
833  * Returns 0 for success, negative errno for errors.
834  */
835 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
836 {
837 	struct amdkfd_process_info *process_info = mem->process_info;
838 	struct amdgpu_bo *bo = mem->bo;
839 	struct ttm_operation_ctx ctx = { true, false };
840 	int ret = 0;
841 
842 	mutex_lock(&process_info->lock);
843 
844 	ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
845 	if (ret) {
846 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
847 		goto out;
848 	}
849 
850 	ret = amdgpu_mn_register(bo, user_addr);
851 	if (ret) {
852 		pr_err("%s: Failed to register MMU notifier: %d\n",
853 		       __func__, ret);
854 		goto out;
855 	}
856 
857 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
858 	if (ret) {
859 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
860 		goto unregister_out;
861 	}
862 
863 	ret = amdgpu_bo_reserve(bo, true);
864 	if (ret) {
865 		pr_err("%s: Failed to reserve BO\n", __func__);
866 		goto release_out;
867 	}
868 	amdgpu_bo_placement_from_domain(bo, mem->domain);
869 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
870 	if (ret)
871 		pr_err("%s: failed to validate BO\n", __func__);
872 	amdgpu_bo_unreserve(bo);
873 
874 release_out:
875 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
876 unregister_out:
877 	if (ret)
878 		amdgpu_mn_unregister(bo);
879 out:
880 	mutex_unlock(&process_info->lock);
881 	return ret;
882 }
883 
884 /* Reserving a BO and its page table BOs must happen atomically to
885  * avoid deadlocks. Some operations update multiple VMs at once. Track
886  * all the reservation info in a context structure. Optionally a sync
887  * object can track VM updates.
888  */
889 struct bo_vm_reservation_context {
890 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
891 	unsigned int n_vms;		    /* Number of VMs reserved	    */
892 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
893 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
894 	struct list_head list, duplicates;  /* BO lists			    */
895 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
896 	bool reserved;			    /* Whether BOs are reserved	    */
897 };
898 
899 enum bo_vm_match {
900 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
901 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
902 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
903 };
904 
905 /**
906  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
907  * @mem: KFD BO structure.
908  * @vm: the VM to reserve.
909  * @ctx: the struct that will be used in unreserve_bo_and_vms().
910  */
911 static int reserve_bo_and_vm(struct kgd_mem *mem,
912 			      struct amdgpu_vm *vm,
913 			      struct bo_vm_reservation_context *ctx)
914 {
915 	struct amdgpu_bo *bo = mem->bo;
916 	int ret;
917 
918 	WARN_ON(!vm);
919 
920 	ctx->reserved = false;
921 	ctx->n_vms = 1;
922 	ctx->sync = &mem->sync;
923 
924 	INIT_LIST_HEAD(&ctx->list);
925 	INIT_LIST_HEAD(&ctx->duplicates);
926 
927 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
928 	if (!ctx->vm_pd)
929 		return -ENOMEM;
930 
931 	ctx->kfd_bo.priority = 0;
932 	ctx->kfd_bo.tv.bo = &bo->tbo;
933 	ctx->kfd_bo.tv.num_shared = 1;
934 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
935 
936 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
937 
938 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
939 				     false, &ctx->duplicates);
940 	if (ret) {
941 		pr_err("Failed to reserve buffers in ttm.\n");
942 		kfree(ctx->vm_pd);
943 		ctx->vm_pd = NULL;
944 		return ret;
945 	}
946 
947 	ctx->reserved = true;
948 	return 0;
949 }
950 
951 /**
952  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
953  * @mem: KFD BO structure.
954  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
955  * is used. Otherwise, a single VM associated with the BO.
956  * @map_type: the mapping status that will be used to filter the VMs.
957  * @ctx: the struct that will be used in unreserve_bo_and_vms().
958  *
959  * Returns 0 for success, negative for failure.
960  */
961 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
962 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
963 				struct bo_vm_reservation_context *ctx)
964 {
965 	struct amdgpu_bo *bo = mem->bo;
966 	struct kfd_mem_attachment *entry;
967 	unsigned int i;
968 	int ret;
969 
970 	ctx->reserved = false;
971 	ctx->n_vms = 0;
972 	ctx->vm_pd = NULL;
973 	ctx->sync = &mem->sync;
974 
975 	INIT_LIST_HEAD(&ctx->list);
976 	INIT_LIST_HEAD(&ctx->duplicates);
977 
978 	list_for_each_entry(entry, &mem->attachments, list) {
979 		if ((vm && vm != entry->bo_va->base.vm) ||
980 			(entry->is_mapped != map_type
981 			&& map_type != BO_VM_ALL))
982 			continue;
983 
984 		ctx->n_vms++;
985 	}
986 
987 	if (ctx->n_vms != 0) {
988 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
989 				     GFP_KERNEL);
990 		if (!ctx->vm_pd)
991 			return -ENOMEM;
992 	}
993 
994 	ctx->kfd_bo.priority = 0;
995 	ctx->kfd_bo.tv.bo = &bo->tbo;
996 	ctx->kfd_bo.tv.num_shared = 1;
997 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
998 
999 	i = 0;
1000 	list_for_each_entry(entry, &mem->attachments, list) {
1001 		if ((vm && vm != entry->bo_va->base.vm) ||
1002 			(entry->is_mapped != map_type
1003 			&& map_type != BO_VM_ALL))
1004 			continue;
1005 
1006 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1007 				&ctx->vm_pd[i]);
1008 		i++;
1009 	}
1010 
1011 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1012 				     false, &ctx->duplicates);
1013 	if (ret) {
1014 		pr_err("Failed to reserve buffers in ttm.\n");
1015 		kfree(ctx->vm_pd);
1016 		ctx->vm_pd = NULL;
1017 		return ret;
1018 	}
1019 
1020 	ctx->reserved = true;
1021 	return 0;
1022 }
1023 
1024 /**
1025  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1026  * @ctx: Reservation context to unreserve
1027  * @wait: Optionally wait for a sync object representing pending VM updates
1028  * @intr: Whether the wait is interruptible
1029  *
1030  * Also frees any resources allocated in
1031  * reserve_bo_and_(cond_)vm(s). Returns the status from
1032  * amdgpu_sync_wait.
1033  */
1034 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1035 				 bool wait, bool intr)
1036 {
1037 	int ret = 0;
1038 
1039 	if (wait)
1040 		ret = amdgpu_sync_wait(ctx->sync, intr);
1041 
1042 	if (ctx->reserved)
1043 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1044 	kfree(ctx->vm_pd);
1045 
1046 	ctx->sync = NULL;
1047 
1048 	ctx->reserved = false;
1049 	ctx->vm_pd = NULL;
1050 
1051 	return ret;
1052 }
1053 
1054 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1055 				struct kfd_mem_attachment *entry,
1056 				struct amdgpu_sync *sync)
1057 {
1058 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1059 	struct amdgpu_device *adev = entry->adev;
1060 	struct amdgpu_vm *vm = bo_va->base.vm;
1061 
1062 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1063 
1064 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1065 
1066 	amdgpu_sync_fence(sync, bo_va->last_pt_update);
1067 
1068 	kfd_mem_dmaunmap_attachment(mem, entry);
1069 }
1070 
1071 static int update_gpuvm_pte(struct kgd_mem *mem,
1072 			    struct kfd_mem_attachment *entry,
1073 			    struct amdgpu_sync *sync,
1074 			    bool *table_freed)
1075 {
1076 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1077 	struct amdgpu_device *adev = entry->adev;
1078 	int ret;
1079 
1080 	ret = kfd_mem_dmamap_attachment(mem, entry);
1081 	if (ret)
1082 		return ret;
1083 
1084 	/* Update the page tables  */
1085 	ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1086 	if (ret) {
1087 		pr_err("amdgpu_vm_bo_update failed\n");
1088 		return ret;
1089 	}
1090 
1091 	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1092 }
1093 
1094 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1095 			   struct kfd_mem_attachment *entry,
1096 			   struct amdgpu_sync *sync,
1097 			   bool no_update_pte,
1098 			   bool *table_freed)
1099 {
1100 	int ret;
1101 
1102 	/* Set virtual address for the allocation */
1103 	ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1104 			       amdgpu_bo_size(entry->bo_va->base.bo),
1105 			       entry->pte_flags);
1106 	if (ret) {
1107 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1108 				entry->va, ret);
1109 		return ret;
1110 	}
1111 
1112 	if (no_update_pte)
1113 		return 0;
1114 
1115 	ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1116 	if (ret) {
1117 		pr_err("update_gpuvm_pte() failed\n");
1118 		goto update_gpuvm_pte_failed;
1119 	}
1120 
1121 	return 0;
1122 
1123 update_gpuvm_pte_failed:
1124 	unmap_bo_from_gpuvm(mem, entry, sync);
1125 	return ret;
1126 }
1127 
1128 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1129 {
1130 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1131 
1132 	if (!sg)
1133 		return NULL;
1134 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1135 		kfree(sg);
1136 		return NULL;
1137 	}
1138 	sg->sgl->dma_address = addr;
1139 	sg->sgl->length = size;
1140 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1141 	sg->sgl->dma_length = size;
1142 #endif
1143 	return sg;
1144 }
1145 
1146 static int process_validate_vms(struct amdkfd_process_info *process_info)
1147 {
1148 	struct amdgpu_vm *peer_vm;
1149 	int ret;
1150 
1151 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1152 			    vm_list_node) {
1153 		ret = vm_validate_pt_pd_bos(peer_vm);
1154 		if (ret)
1155 			return ret;
1156 	}
1157 
1158 	return 0;
1159 }
1160 
1161 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1162 				 struct amdgpu_sync *sync)
1163 {
1164 	struct amdgpu_vm *peer_vm;
1165 	int ret;
1166 
1167 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1168 			    vm_list_node) {
1169 		struct amdgpu_bo *pd = peer_vm->root.base.bo;
1170 
1171 		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1172 				       AMDGPU_SYNC_NE_OWNER,
1173 				       AMDGPU_FENCE_OWNER_KFD);
1174 		if (ret)
1175 			return ret;
1176 	}
1177 
1178 	return 0;
1179 }
1180 
1181 static int process_update_pds(struct amdkfd_process_info *process_info,
1182 			      struct amdgpu_sync *sync)
1183 {
1184 	struct amdgpu_vm *peer_vm;
1185 	int ret;
1186 
1187 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1188 			    vm_list_node) {
1189 		ret = vm_update_pds(peer_vm, sync);
1190 		if (ret)
1191 			return ret;
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1198 		       struct dma_fence **ef)
1199 {
1200 	struct amdkfd_process_info *info = NULL;
1201 	int ret;
1202 
1203 	if (!*process_info) {
1204 		info = kzalloc(sizeof(*info), GFP_KERNEL);
1205 		if (!info)
1206 			return -ENOMEM;
1207 
1208 		mutex_init(&info->lock);
1209 		INIT_LIST_HEAD(&info->vm_list_head);
1210 		INIT_LIST_HEAD(&info->kfd_bo_list);
1211 		INIT_LIST_HEAD(&info->userptr_valid_list);
1212 		INIT_LIST_HEAD(&info->userptr_inval_list);
1213 
1214 		info->eviction_fence =
1215 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1216 						   current->mm,
1217 						   NULL);
1218 		if (!info->eviction_fence) {
1219 			pr_err("Failed to create eviction fence\n");
1220 			ret = -ENOMEM;
1221 			goto create_evict_fence_fail;
1222 		}
1223 
1224 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1225 		atomic_set(&info->evicted_bos, 0);
1226 		INIT_DELAYED_WORK(&info->restore_userptr_work,
1227 				  amdgpu_amdkfd_restore_userptr_worker);
1228 
1229 		*process_info = info;
1230 		*ef = dma_fence_get(&info->eviction_fence->base);
1231 	}
1232 
1233 	vm->process_info = *process_info;
1234 
1235 	/* Validate page directory and attach eviction fence */
1236 	ret = amdgpu_bo_reserve(vm->root.base.bo, true);
1237 	if (ret)
1238 		goto reserve_pd_fail;
1239 	ret = vm_validate_pt_pd_bos(vm);
1240 	if (ret) {
1241 		pr_err("validate_pt_pd_bos() failed\n");
1242 		goto validate_pd_fail;
1243 	}
1244 	ret = amdgpu_bo_sync_wait(vm->root.base.bo,
1245 				  AMDGPU_FENCE_OWNER_KFD, false);
1246 	if (ret)
1247 		goto wait_pd_fail;
1248 	ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
1249 	if (ret)
1250 		goto reserve_shared_fail;
1251 	amdgpu_bo_fence(vm->root.base.bo,
1252 			&vm->process_info->eviction_fence->base, true);
1253 	amdgpu_bo_unreserve(vm->root.base.bo);
1254 
1255 	/* Update process info */
1256 	mutex_lock(&vm->process_info->lock);
1257 	list_add_tail(&vm->vm_list_node,
1258 			&(vm->process_info->vm_list_head));
1259 	vm->process_info->n_vms++;
1260 	mutex_unlock(&vm->process_info->lock);
1261 
1262 	return 0;
1263 
1264 reserve_shared_fail:
1265 wait_pd_fail:
1266 validate_pd_fail:
1267 	amdgpu_bo_unreserve(vm->root.base.bo);
1268 reserve_pd_fail:
1269 	vm->process_info = NULL;
1270 	if (info) {
1271 		/* Two fence references: one in info and one in *ef */
1272 		dma_fence_put(&info->eviction_fence->base);
1273 		dma_fence_put(*ef);
1274 		*ef = NULL;
1275 		*process_info = NULL;
1276 		put_pid(info->pid);
1277 create_evict_fence_fail:
1278 		mutex_destroy(&info->lock);
1279 		kfree(info);
1280 	}
1281 	return ret;
1282 }
1283 
1284 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1285 					   struct file *filp, u32 pasid,
1286 					   void **process_info,
1287 					   struct dma_fence **ef)
1288 {
1289 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1290 	struct amdgpu_fpriv *drv_priv;
1291 	struct amdgpu_vm *avm;
1292 	int ret;
1293 
1294 	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1295 	if (ret)
1296 		return ret;
1297 	avm = &drv_priv->vm;
1298 
1299 	/* Already a compute VM? */
1300 	if (avm->process_info)
1301 		return -EINVAL;
1302 
1303 	/* Convert VM into a compute VM */
1304 	ret = amdgpu_vm_make_compute(adev, avm, pasid);
1305 	if (ret)
1306 		return ret;
1307 
1308 	/* Initialize KFD part of the VM and process info */
1309 	ret = init_kfd_vm(avm, process_info, ef);
1310 	if (ret)
1311 		return ret;
1312 
1313 	amdgpu_vm_set_task_info(avm);
1314 
1315 	return 0;
1316 }
1317 
1318 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1319 				    struct amdgpu_vm *vm)
1320 {
1321 	struct amdkfd_process_info *process_info = vm->process_info;
1322 	struct amdgpu_bo *pd = vm->root.base.bo;
1323 
1324 	if (!process_info)
1325 		return;
1326 
1327 	/* Release eviction fence from PD */
1328 	amdgpu_bo_reserve(pd, false);
1329 	amdgpu_bo_fence(pd, NULL, false);
1330 	amdgpu_bo_unreserve(pd);
1331 
1332 	/* Update process info */
1333 	mutex_lock(&process_info->lock);
1334 	process_info->n_vms--;
1335 	list_del(&vm->vm_list_node);
1336 	mutex_unlock(&process_info->lock);
1337 
1338 	vm->process_info = NULL;
1339 
1340 	/* Release per-process resources when last compute VM is destroyed */
1341 	if (!process_info->n_vms) {
1342 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1343 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1344 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1345 
1346 		dma_fence_put(&process_info->eviction_fence->base);
1347 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1348 		put_pid(process_info->pid);
1349 		mutex_destroy(&process_info->lock);
1350 		kfree(process_info);
1351 	}
1352 }
1353 
1354 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1355 {
1356 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1357 	struct amdgpu_vm *avm;
1358 
1359 	if (WARN_ON(!kgd || !drm_priv))
1360 		return;
1361 
1362 	avm = drm_priv_to_vm(drm_priv);
1363 
1364 	pr_debug("Releasing process vm %p\n", avm);
1365 
1366 	/* The original pasid of amdgpu vm has already been
1367 	 * released during making a amdgpu vm to a compute vm
1368 	 * The current pasid is managed by kfd and will be
1369 	 * released on kfd process destroy. Set amdgpu pasid
1370 	 * to 0 to avoid duplicate release.
1371 	 */
1372 	amdgpu_vm_release_compute(adev, avm);
1373 }
1374 
1375 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1376 {
1377 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1378 	struct amdgpu_bo *pd = avm->root.base.bo;
1379 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1380 
1381 	if (adev->asic_type < CHIP_VEGA10)
1382 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1383 	return avm->pd_phys_addr;
1384 }
1385 
1386 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1387 		struct kgd_dev *kgd, uint64_t va, uint64_t size,
1388 		void *drm_priv, struct kgd_mem **mem,
1389 		uint64_t *offset, uint32_t flags)
1390 {
1391 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1392 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1393 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1394 	struct sg_table *sg = NULL;
1395 	uint64_t user_addr = 0;
1396 	struct amdgpu_bo *bo;
1397 	struct drm_gem_object *gobj;
1398 	u32 domain, alloc_domain;
1399 	u64 alloc_flags;
1400 	int ret;
1401 
1402 	/*
1403 	 * Check on which domain to allocate BO
1404 	 */
1405 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1406 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1407 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1408 		alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1409 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1410 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1411 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1412 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1413 		alloc_flags = 0;
1414 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1415 		domain = AMDGPU_GEM_DOMAIN_GTT;
1416 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1417 		alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1418 		if (!offset || !*offset)
1419 			return -EINVAL;
1420 		user_addr = untagged_addr(*offset);
1421 	} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1422 			KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1423 		domain = AMDGPU_GEM_DOMAIN_GTT;
1424 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1425 		bo_type = ttm_bo_type_sg;
1426 		alloc_flags = 0;
1427 		if (size > UINT_MAX)
1428 			return -EINVAL;
1429 		sg = create_doorbell_sg(*offset, size);
1430 		if (!sg)
1431 			return -ENOMEM;
1432 	} else {
1433 		return -EINVAL;
1434 	}
1435 
1436 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1437 	if (!*mem) {
1438 		ret = -ENOMEM;
1439 		goto err;
1440 	}
1441 	INIT_LIST_HEAD(&(*mem)->attachments);
1442 	mutex_init(&(*mem)->lock);
1443 	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1444 
1445 	/* Workaround for AQL queue wraparound bug. Map the same
1446 	 * memory twice. That means we only actually allocate half
1447 	 * the memory.
1448 	 */
1449 	if ((*mem)->aql_queue)
1450 		size = size >> 1;
1451 
1452 	(*mem)->alloc_flags = flags;
1453 
1454 	amdgpu_sync_create(&(*mem)->sync);
1455 
1456 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1457 	if (ret) {
1458 		pr_debug("Insufficient memory\n");
1459 		goto err_reserve_limit;
1460 	}
1461 
1462 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1463 			va, size, domain_string(alloc_domain));
1464 
1465 	ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1466 				       bo_type, NULL, &gobj);
1467 	if (ret) {
1468 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1469 			 domain_string(alloc_domain), ret);
1470 		goto err_bo_create;
1471 	}
1472 	ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1473 	if (ret) {
1474 		pr_debug("Failed to allow vma node access. ret %d\n", ret);
1475 		goto err_node_allow;
1476 	}
1477 	bo = gem_to_amdgpu_bo(gobj);
1478 	if (bo_type == ttm_bo_type_sg) {
1479 		bo->tbo.sg = sg;
1480 		bo->tbo.ttm->sg = sg;
1481 	}
1482 	bo->kfd_bo = *mem;
1483 	(*mem)->bo = bo;
1484 	if (user_addr)
1485 		bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1486 
1487 	(*mem)->va = va;
1488 	(*mem)->domain = domain;
1489 	(*mem)->mapped_to_gpu_memory = 0;
1490 	(*mem)->process_info = avm->process_info;
1491 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1492 
1493 	if (user_addr) {
1494 		ret = init_user_pages(*mem, user_addr);
1495 		if (ret)
1496 			goto allocate_init_user_pages_failed;
1497 	}
1498 
1499 	if (offset)
1500 		*offset = amdgpu_bo_mmap_offset(bo);
1501 
1502 	return 0;
1503 
1504 allocate_init_user_pages_failed:
1505 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1506 	drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1507 err_node_allow:
1508 	amdgpu_bo_unref(&bo);
1509 	/* Don't unreserve system mem limit twice */
1510 	goto err_reserve_limit;
1511 err_bo_create:
1512 	unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1513 err_reserve_limit:
1514 	mutex_destroy(&(*mem)->lock);
1515 	kfree(*mem);
1516 err:
1517 	if (sg) {
1518 		sg_free_table(sg);
1519 		kfree(sg);
1520 	}
1521 	return ret;
1522 }
1523 
1524 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1525 		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1526 		uint64_t *size)
1527 {
1528 	struct amdkfd_process_info *process_info = mem->process_info;
1529 	unsigned long bo_size = mem->bo->tbo.base.size;
1530 	struct kfd_mem_attachment *entry, *tmp;
1531 	struct bo_vm_reservation_context ctx;
1532 	struct ttm_validate_buffer *bo_list_entry;
1533 	unsigned int mapped_to_gpu_memory;
1534 	int ret;
1535 	bool is_imported = false;
1536 
1537 	mutex_lock(&mem->lock);
1538 	mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1539 	is_imported = mem->is_imported;
1540 	mutex_unlock(&mem->lock);
1541 	/* lock is not needed after this, since mem is unused and will
1542 	 * be freed anyway
1543 	 */
1544 
1545 	if (mapped_to_gpu_memory > 0) {
1546 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1547 				mem->va, bo_size);
1548 		return -EBUSY;
1549 	}
1550 
1551 	/* Make sure restore workers don't access the BO any more */
1552 	bo_list_entry = &mem->validate_list;
1553 	mutex_lock(&process_info->lock);
1554 	list_del(&bo_list_entry->head);
1555 	mutex_unlock(&process_info->lock);
1556 
1557 	/* No more MMU notifiers */
1558 	amdgpu_mn_unregister(mem->bo);
1559 
1560 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1561 	if (unlikely(ret))
1562 		return ret;
1563 
1564 	/* The eviction fence should be removed by the last unmap.
1565 	 * TODO: Log an error condition if the bo still has the eviction fence
1566 	 * attached
1567 	 */
1568 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1569 					process_info->eviction_fence);
1570 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1571 		mem->va + bo_size * (1 + mem->aql_queue));
1572 
1573 	ret = unreserve_bo_and_vms(&ctx, false, false);
1574 
1575 	/* Remove from VM internal data structures */
1576 	list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1577 		kfd_mem_detach(entry);
1578 
1579 	/* Free the sync object */
1580 	amdgpu_sync_free(&mem->sync);
1581 
1582 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1583 	 * remap BO. We need to free it.
1584 	 */
1585 	if (mem->bo->tbo.sg) {
1586 		sg_free_table(mem->bo->tbo.sg);
1587 		kfree(mem->bo->tbo.sg);
1588 	}
1589 
1590 	/* Update the size of the BO being freed if it was allocated from
1591 	 * VRAM and is not imported.
1592 	 */
1593 	if (size) {
1594 		if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1595 		    (!is_imported))
1596 			*size = bo_size;
1597 		else
1598 			*size = 0;
1599 	}
1600 
1601 	/* Free the BO*/
1602 	drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1603 	if (mem->dmabuf)
1604 		dma_buf_put(mem->dmabuf);
1605 	drm_gem_object_put(&mem->bo->tbo.base);
1606 	mutex_destroy(&mem->lock);
1607 	kfree(mem);
1608 
1609 	return ret;
1610 }
1611 
1612 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1613 		struct kgd_dev *kgd, struct kgd_mem *mem,
1614 		void *drm_priv, bool *table_freed)
1615 {
1616 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
1617 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1618 	int ret;
1619 	struct amdgpu_bo *bo;
1620 	uint32_t domain;
1621 	struct kfd_mem_attachment *entry;
1622 	struct bo_vm_reservation_context ctx;
1623 	unsigned long bo_size;
1624 	bool is_invalid_userptr = false;
1625 
1626 	bo = mem->bo;
1627 	if (!bo) {
1628 		pr_err("Invalid BO when mapping memory to GPU\n");
1629 		return -EINVAL;
1630 	}
1631 
1632 	/* Make sure restore is not running concurrently. Since we
1633 	 * don't map invalid userptr BOs, we rely on the next restore
1634 	 * worker to do the mapping
1635 	 */
1636 	mutex_lock(&mem->process_info->lock);
1637 
1638 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1639 	 * sure that the MMU notifier is no longer running
1640 	 * concurrently and the queues are actually stopped
1641 	 */
1642 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1643 		mmap_write_lock(current->mm);
1644 		is_invalid_userptr = atomic_read(&mem->invalid);
1645 		mmap_write_unlock(current->mm);
1646 	}
1647 
1648 	mutex_lock(&mem->lock);
1649 
1650 	domain = mem->domain;
1651 	bo_size = bo->tbo.base.size;
1652 
1653 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1654 			mem->va,
1655 			mem->va + bo_size * (1 + mem->aql_queue),
1656 			avm, domain_string(domain));
1657 
1658 	if (!kfd_mem_is_attached(avm, mem)) {
1659 		ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1660 		if (ret)
1661 			goto out;
1662 	}
1663 
1664 	ret = reserve_bo_and_vm(mem, avm, &ctx);
1665 	if (unlikely(ret))
1666 		goto out;
1667 
1668 	/* Userptr can be marked as "not invalid", but not actually be
1669 	 * validated yet (still in the system domain). In that case
1670 	 * the queues are still stopped and we can leave mapping for
1671 	 * the next restore worker
1672 	 */
1673 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1674 	    bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1675 		is_invalid_userptr = true;
1676 
1677 	ret = vm_validate_pt_pd_bos(avm);
1678 	if (unlikely(ret))
1679 		goto out_unreserve;
1680 
1681 	if (mem->mapped_to_gpu_memory == 0 &&
1682 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1683 		/* Validate BO only once. The eviction fence gets added to BO
1684 		 * the first time it is mapped. Validate will wait for all
1685 		 * background evictions to complete.
1686 		 */
1687 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1688 		if (ret) {
1689 			pr_debug("Validate failed\n");
1690 			goto out_unreserve;
1691 		}
1692 	}
1693 
1694 	list_for_each_entry(entry, &mem->attachments, list) {
1695 		if (entry->bo_va->base.vm != avm || entry->is_mapped)
1696 			continue;
1697 
1698 		pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1699 			 entry->va, entry->va + bo_size, entry);
1700 
1701 		ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1702 				      is_invalid_userptr, table_freed);
1703 		if (ret) {
1704 			pr_err("Failed to map bo to gpuvm\n");
1705 			goto out_unreserve;
1706 		}
1707 
1708 		ret = vm_update_pds(avm, ctx.sync);
1709 		if (ret) {
1710 			pr_err("Failed to update page directories\n");
1711 			goto out_unreserve;
1712 		}
1713 
1714 		entry->is_mapped = true;
1715 		mem->mapped_to_gpu_memory++;
1716 		pr_debug("\t INC mapping count %d\n",
1717 			 mem->mapped_to_gpu_memory);
1718 	}
1719 
1720 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1721 		amdgpu_bo_fence(bo,
1722 				&avm->process_info->eviction_fence->base,
1723 				true);
1724 	ret = unreserve_bo_and_vms(&ctx, false, false);
1725 
1726 	goto out;
1727 
1728 out_unreserve:
1729 	unreserve_bo_and_vms(&ctx, false, false);
1730 out:
1731 	mutex_unlock(&mem->process_info->lock);
1732 	mutex_unlock(&mem->lock);
1733 	return ret;
1734 }
1735 
1736 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1737 		struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1738 {
1739 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1740 	struct amdkfd_process_info *process_info = avm->process_info;
1741 	unsigned long bo_size = mem->bo->tbo.base.size;
1742 	struct kfd_mem_attachment *entry;
1743 	struct bo_vm_reservation_context ctx;
1744 	int ret;
1745 
1746 	mutex_lock(&mem->lock);
1747 
1748 	ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1749 	if (unlikely(ret))
1750 		goto out;
1751 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
1752 	if (ctx.n_vms == 0) {
1753 		ret = -EINVAL;
1754 		goto unreserve_out;
1755 	}
1756 
1757 	ret = vm_validate_pt_pd_bos(avm);
1758 	if (unlikely(ret))
1759 		goto unreserve_out;
1760 
1761 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1762 		mem->va,
1763 		mem->va + bo_size * (1 + mem->aql_queue),
1764 		avm);
1765 
1766 	list_for_each_entry(entry, &mem->attachments, list) {
1767 		if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1768 			continue;
1769 
1770 		pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1771 			 entry->va, entry->va + bo_size, entry);
1772 
1773 		unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1774 		entry->is_mapped = false;
1775 
1776 		mem->mapped_to_gpu_memory--;
1777 		pr_debug("\t DEC mapping count %d\n",
1778 			 mem->mapped_to_gpu_memory);
1779 	}
1780 
1781 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
1782 	 * required.
1783 	 */
1784 	if (mem->mapped_to_gpu_memory == 0 &&
1785 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1786 	    !mem->bo->tbo.pin_count)
1787 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1788 						process_info->eviction_fence);
1789 
1790 unreserve_out:
1791 	unreserve_bo_and_vms(&ctx, false, false);
1792 out:
1793 	mutex_unlock(&mem->lock);
1794 	return ret;
1795 }
1796 
1797 int amdgpu_amdkfd_gpuvm_sync_memory(
1798 		struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1799 {
1800 	struct amdgpu_sync sync;
1801 	int ret;
1802 
1803 	amdgpu_sync_create(&sync);
1804 
1805 	mutex_lock(&mem->lock);
1806 	amdgpu_sync_clone(&mem->sync, &sync);
1807 	mutex_unlock(&mem->lock);
1808 
1809 	ret = amdgpu_sync_wait(&sync, intr);
1810 	amdgpu_sync_free(&sync);
1811 	return ret;
1812 }
1813 
1814 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1815 		struct kgd_mem *mem, void **kptr, uint64_t *size)
1816 {
1817 	int ret;
1818 	struct amdgpu_bo *bo = mem->bo;
1819 
1820 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1821 		pr_err("userptr can't be mapped to kernel\n");
1822 		return -EINVAL;
1823 	}
1824 
1825 	/* delete kgd_mem from kfd_bo_list to avoid re-validating
1826 	 * this BO in BO's restoring after eviction.
1827 	 */
1828 	mutex_lock(&mem->process_info->lock);
1829 
1830 	ret = amdgpu_bo_reserve(bo, true);
1831 	if (ret) {
1832 		pr_err("Failed to reserve bo. ret %d\n", ret);
1833 		goto bo_reserve_failed;
1834 	}
1835 
1836 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1837 	if (ret) {
1838 		pr_err("Failed to pin bo. ret %d\n", ret);
1839 		goto pin_failed;
1840 	}
1841 
1842 	ret = amdgpu_bo_kmap(bo, kptr);
1843 	if (ret) {
1844 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
1845 		goto kmap_failed;
1846 	}
1847 
1848 	amdgpu_amdkfd_remove_eviction_fence(
1849 		bo, mem->process_info->eviction_fence);
1850 	list_del_init(&mem->validate_list.head);
1851 
1852 	if (size)
1853 		*size = amdgpu_bo_size(bo);
1854 
1855 	amdgpu_bo_unreserve(bo);
1856 
1857 	mutex_unlock(&mem->process_info->lock);
1858 	return 0;
1859 
1860 kmap_failed:
1861 	amdgpu_bo_unpin(bo);
1862 pin_failed:
1863 	amdgpu_bo_unreserve(bo);
1864 bo_reserve_failed:
1865 	mutex_unlock(&mem->process_info->lock);
1866 
1867 	return ret;
1868 }
1869 
1870 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1871 					      struct kfd_vm_fault_info *mem)
1872 {
1873 	struct amdgpu_device *adev;
1874 
1875 	adev = (struct amdgpu_device *)kgd;
1876 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1877 		*mem = *adev->gmc.vm_fault_info;
1878 		mb();
1879 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1880 	}
1881 	return 0;
1882 }
1883 
1884 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1885 				      struct dma_buf *dma_buf,
1886 				      uint64_t va, void *drm_priv,
1887 				      struct kgd_mem **mem, uint64_t *size,
1888 				      uint64_t *mmap_offset)
1889 {
1890 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1891 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1892 	struct drm_gem_object *obj;
1893 	struct amdgpu_bo *bo;
1894 	int ret;
1895 
1896 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
1897 		/* Can't handle non-graphics buffers */
1898 		return -EINVAL;
1899 
1900 	obj = dma_buf->priv;
1901 	if (drm_to_adev(obj->dev) != adev)
1902 		/* Can't handle buffers from other devices */
1903 		return -EINVAL;
1904 
1905 	bo = gem_to_amdgpu_bo(obj);
1906 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1907 				    AMDGPU_GEM_DOMAIN_GTT)))
1908 		/* Only VRAM and GTT BOs are supported */
1909 		return -EINVAL;
1910 
1911 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1912 	if (!*mem)
1913 		return -ENOMEM;
1914 
1915 	ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1916 	if (ret) {
1917 		kfree(mem);
1918 		return ret;
1919 	}
1920 
1921 	if (size)
1922 		*size = amdgpu_bo_size(bo);
1923 
1924 	if (mmap_offset)
1925 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
1926 
1927 	INIT_LIST_HEAD(&(*mem)->attachments);
1928 	mutex_init(&(*mem)->lock);
1929 
1930 	(*mem)->alloc_flags =
1931 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1932 		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1933 		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1934 		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1935 
1936 	drm_gem_object_get(&bo->tbo.base);
1937 	(*mem)->bo = bo;
1938 	(*mem)->va = va;
1939 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1940 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1941 	(*mem)->mapped_to_gpu_memory = 0;
1942 	(*mem)->process_info = avm->process_info;
1943 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1944 	amdgpu_sync_create(&(*mem)->sync);
1945 	(*mem)->is_imported = true;
1946 
1947 	return 0;
1948 }
1949 
1950 /* Evict a userptr BO by stopping the queues if necessary
1951  *
1952  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1953  * cannot do any memory allocations, and cannot take any locks that
1954  * are held elsewhere while allocating memory. Therefore this is as
1955  * simple as possible, using atomic counters.
1956  *
1957  * It doesn't do anything to the BO itself. The real work happens in
1958  * restore, where we get updated page addresses. This function only
1959  * ensures that GPU access to the BO is stopped.
1960  */
1961 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1962 				struct mm_struct *mm)
1963 {
1964 	struct amdkfd_process_info *process_info = mem->process_info;
1965 	int evicted_bos;
1966 	int r = 0;
1967 
1968 	atomic_inc(&mem->invalid);
1969 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1970 	if (evicted_bos == 1) {
1971 		/* First eviction, stop the queues */
1972 		r = kgd2kfd_quiesce_mm(mm);
1973 		if (r)
1974 			pr_err("Failed to quiesce KFD\n");
1975 		schedule_delayed_work(&process_info->restore_userptr_work,
1976 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1977 	}
1978 
1979 	return r;
1980 }
1981 
1982 /* Update invalid userptr BOs
1983  *
1984  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1985  * userptr_inval_list and updates user pages for all BOs that have
1986  * been invalidated since their last update.
1987  */
1988 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1989 				     struct mm_struct *mm)
1990 {
1991 	struct kgd_mem *mem, *tmp_mem;
1992 	struct amdgpu_bo *bo;
1993 	struct ttm_operation_ctx ctx = { false, false };
1994 	int invalid, ret;
1995 
1996 	/* Move all invalidated BOs to the userptr_inval_list and
1997 	 * release their user pages by migration to the CPU domain
1998 	 */
1999 	list_for_each_entry_safe(mem, tmp_mem,
2000 				 &process_info->userptr_valid_list,
2001 				 validate_list.head) {
2002 		if (!atomic_read(&mem->invalid))
2003 			continue; /* BO is still valid */
2004 
2005 		bo = mem->bo;
2006 
2007 		if (amdgpu_bo_reserve(bo, true))
2008 			return -EAGAIN;
2009 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2010 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2011 		amdgpu_bo_unreserve(bo);
2012 		if (ret) {
2013 			pr_err("%s: Failed to invalidate userptr BO\n",
2014 			       __func__);
2015 			return -EAGAIN;
2016 		}
2017 
2018 		list_move_tail(&mem->validate_list.head,
2019 			       &process_info->userptr_inval_list);
2020 	}
2021 
2022 	if (list_empty(&process_info->userptr_inval_list))
2023 		return 0; /* All evicted userptr BOs were freed */
2024 
2025 	/* Go through userptr_inval_list and update any invalid user_pages */
2026 	list_for_each_entry(mem, &process_info->userptr_inval_list,
2027 			    validate_list.head) {
2028 		invalid = atomic_read(&mem->invalid);
2029 		if (!invalid)
2030 			/* BO hasn't been invalidated since the last
2031 			 * revalidation attempt. Keep its BO list.
2032 			 */
2033 			continue;
2034 
2035 		bo = mem->bo;
2036 
2037 		/* Get updated user pages */
2038 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2039 		if (ret) {
2040 			pr_debug("%s: Failed to get user pages: %d\n",
2041 				__func__, ret);
2042 
2043 			/* Return error -EBUSY or -ENOMEM, retry restore */
2044 			return ret;
2045 		}
2046 
2047 		/*
2048 		 * FIXME: Cannot ignore the return code, must hold
2049 		 * notifier_lock
2050 		 */
2051 		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2052 
2053 		/* Mark the BO as valid unless it was invalidated
2054 		 * again concurrently.
2055 		 */
2056 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2057 			return -EAGAIN;
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 /* Validate invalid userptr BOs
2064  *
2065  * Validates BOs on the userptr_inval_list, and moves them back to the
2066  * userptr_valid_list. Also updates GPUVM page tables with new page
2067  * addresses and waits for the page table updates to complete.
2068  */
2069 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2070 {
2071 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
2072 	struct list_head resv_list, duplicates;
2073 	struct ww_acquire_ctx ticket;
2074 	struct amdgpu_sync sync;
2075 
2076 	struct amdgpu_vm *peer_vm;
2077 	struct kgd_mem *mem, *tmp_mem;
2078 	struct amdgpu_bo *bo;
2079 	struct ttm_operation_ctx ctx = { false, false };
2080 	int i, ret;
2081 
2082 	pd_bo_list_entries = kcalloc(process_info->n_vms,
2083 				     sizeof(struct amdgpu_bo_list_entry),
2084 				     GFP_KERNEL);
2085 	if (!pd_bo_list_entries) {
2086 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2087 		ret = -ENOMEM;
2088 		goto out_no_mem;
2089 	}
2090 
2091 	INIT_LIST_HEAD(&resv_list);
2092 	INIT_LIST_HEAD(&duplicates);
2093 
2094 	/* Get all the page directory BOs that need to be reserved */
2095 	i = 0;
2096 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2097 			    vm_list_node)
2098 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2099 				    &pd_bo_list_entries[i++]);
2100 	/* Add the userptr_inval_list entries to resv_list */
2101 	list_for_each_entry(mem, &process_info->userptr_inval_list,
2102 			    validate_list.head) {
2103 		list_add_tail(&mem->resv_list.head, &resv_list);
2104 		mem->resv_list.bo = mem->validate_list.bo;
2105 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2106 	}
2107 
2108 	/* Reserve all BOs and page tables for validation */
2109 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2110 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
2111 	if (ret)
2112 		goto out_free;
2113 
2114 	amdgpu_sync_create(&sync);
2115 
2116 	ret = process_validate_vms(process_info);
2117 	if (ret)
2118 		goto unreserve_out;
2119 
2120 	/* Validate BOs and update GPUVM page tables */
2121 	list_for_each_entry_safe(mem, tmp_mem,
2122 				 &process_info->userptr_inval_list,
2123 				 validate_list.head) {
2124 		struct kfd_mem_attachment *attachment;
2125 
2126 		bo = mem->bo;
2127 
2128 		/* Validate the BO if we got user pages */
2129 		if (bo->tbo.ttm->pages[0]) {
2130 			amdgpu_bo_placement_from_domain(bo, mem->domain);
2131 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2132 			if (ret) {
2133 				pr_err("%s: failed to validate BO\n", __func__);
2134 				goto unreserve_out;
2135 			}
2136 		}
2137 
2138 		list_move_tail(&mem->validate_list.head,
2139 			       &process_info->userptr_valid_list);
2140 
2141 		/* Update mapping. If the BO was not validated
2142 		 * (because we couldn't get user pages), this will
2143 		 * clear the page table entries, which will result in
2144 		 * VM faults if the GPU tries to access the invalid
2145 		 * memory.
2146 		 */
2147 		list_for_each_entry(attachment, &mem->attachments, list) {
2148 			if (!attachment->is_mapped)
2149 				continue;
2150 
2151 			kfd_mem_dmaunmap_attachment(mem, attachment);
2152 			ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2153 			if (ret) {
2154 				pr_err("%s: update PTE failed\n", __func__);
2155 				/* make sure this gets validated again */
2156 				atomic_inc(&mem->invalid);
2157 				goto unreserve_out;
2158 			}
2159 		}
2160 	}
2161 
2162 	/* Update page directories */
2163 	ret = process_update_pds(process_info, &sync);
2164 
2165 unreserve_out:
2166 	ttm_eu_backoff_reservation(&ticket, &resv_list);
2167 	amdgpu_sync_wait(&sync, false);
2168 	amdgpu_sync_free(&sync);
2169 out_free:
2170 	kfree(pd_bo_list_entries);
2171 out_no_mem:
2172 
2173 	return ret;
2174 }
2175 
2176 /* Worker callback to restore evicted userptr BOs
2177  *
2178  * Tries to update and validate all userptr BOs. If successful and no
2179  * concurrent evictions happened, the queues are restarted. Otherwise,
2180  * reschedule for another attempt later.
2181  */
2182 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2183 {
2184 	struct delayed_work *dwork = to_delayed_work(work);
2185 	struct amdkfd_process_info *process_info =
2186 		container_of(dwork, struct amdkfd_process_info,
2187 			     restore_userptr_work);
2188 	struct task_struct *usertask;
2189 	struct mm_struct *mm;
2190 	int evicted_bos;
2191 
2192 	evicted_bos = atomic_read(&process_info->evicted_bos);
2193 	if (!evicted_bos)
2194 		return;
2195 
2196 	/* Reference task and mm in case of concurrent process termination */
2197 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2198 	if (!usertask)
2199 		return;
2200 	mm = get_task_mm(usertask);
2201 	if (!mm) {
2202 		put_task_struct(usertask);
2203 		return;
2204 	}
2205 
2206 	mutex_lock(&process_info->lock);
2207 
2208 	if (update_invalid_user_pages(process_info, mm))
2209 		goto unlock_out;
2210 	/* userptr_inval_list can be empty if all evicted userptr BOs
2211 	 * have been freed. In that case there is nothing to validate
2212 	 * and we can just restart the queues.
2213 	 */
2214 	if (!list_empty(&process_info->userptr_inval_list)) {
2215 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2216 			goto unlock_out; /* Concurrent eviction, try again */
2217 
2218 		if (validate_invalid_user_pages(process_info))
2219 			goto unlock_out;
2220 	}
2221 	/* Final check for concurrent evicton and atomic update. If
2222 	 * another eviction happens after successful update, it will
2223 	 * be a first eviction that calls quiesce_mm. The eviction
2224 	 * reference counting inside KFD will handle this case.
2225 	 */
2226 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2227 	    evicted_bos)
2228 		goto unlock_out;
2229 	evicted_bos = 0;
2230 	if (kgd2kfd_resume_mm(mm)) {
2231 		pr_err("%s: Failed to resume KFD\n", __func__);
2232 		/* No recovery from this failure. Probably the CP is
2233 		 * hanging. No point trying again.
2234 		 */
2235 	}
2236 
2237 unlock_out:
2238 	mutex_unlock(&process_info->lock);
2239 	mmput(mm);
2240 	put_task_struct(usertask);
2241 
2242 	/* If validation failed, reschedule another attempt */
2243 	if (evicted_bos)
2244 		schedule_delayed_work(&process_info->restore_userptr_work,
2245 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2246 }
2247 
2248 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2249  *   KFD process identified by process_info
2250  *
2251  * @process_info: amdkfd_process_info of the KFD process
2252  *
2253  * After memory eviction, restore thread calls this function. The function
2254  * should be called when the Process is still valid. BO restore involves -
2255  *
2256  * 1.  Release old eviction fence and create new one
2257  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2258  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2259  *     BOs that need to be reserved.
2260  * 4.  Reserve all the BOs
2261  * 5.  Validate of PD and PT BOs.
2262  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2263  * 7.  Add fence to all PD and PT BOs.
2264  * 8.  Unreserve all BOs
2265  */
2266 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2267 {
2268 	struct amdgpu_bo_list_entry *pd_bo_list;
2269 	struct amdkfd_process_info *process_info = info;
2270 	struct amdgpu_vm *peer_vm;
2271 	struct kgd_mem *mem;
2272 	struct bo_vm_reservation_context ctx;
2273 	struct amdgpu_amdkfd_fence *new_fence;
2274 	int ret = 0, i;
2275 	struct list_head duplicate_save;
2276 	struct amdgpu_sync sync_obj;
2277 	unsigned long failed_size = 0;
2278 	unsigned long total_size = 0;
2279 
2280 	INIT_LIST_HEAD(&duplicate_save);
2281 	INIT_LIST_HEAD(&ctx.list);
2282 	INIT_LIST_HEAD(&ctx.duplicates);
2283 
2284 	pd_bo_list = kcalloc(process_info->n_vms,
2285 			     sizeof(struct amdgpu_bo_list_entry),
2286 			     GFP_KERNEL);
2287 	if (!pd_bo_list)
2288 		return -ENOMEM;
2289 
2290 	i = 0;
2291 	mutex_lock(&process_info->lock);
2292 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2293 			vm_list_node)
2294 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2295 
2296 	/* Reserve all BOs and page tables/directory. Add all BOs from
2297 	 * kfd_bo_list to ctx.list
2298 	 */
2299 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2300 			    validate_list.head) {
2301 
2302 		list_add_tail(&mem->resv_list.head, &ctx.list);
2303 		mem->resv_list.bo = mem->validate_list.bo;
2304 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2305 	}
2306 
2307 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2308 				     false, &duplicate_save);
2309 	if (ret) {
2310 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2311 		goto ttm_reserve_fail;
2312 	}
2313 
2314 	amdgpu_sync_create(&sync_obj);
2315 
2316 	/* Validate PDs and PTs */
2317 	ret = process_validate_vms(process_info);
2318 	if (ret)
2319 		goto validate_map_fail;
2320 
2321 	ret = process_sync_pds_resv(process_info, &sync_obj);
2322 	if (ret) {
2323 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2324 		goto validate_map_fail;
2325 	}
2326 
2327 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2328 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2329 			    validate_list.head) {
2330 
2331 		struct amdgpu_bo *bo = mem->bo;
2332 		uint32_t domain = mem->domain;
2333 		struct kfd_mem_attachment *attachment;
2334 
2335 		total_size += amdgpu_bo_size(bo);
2336 
2337 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2338 		if (ret) {
2339 			pr_debug("Memory eviction: Validate BOs failed\n");
2340 			failed_size += amdgpu_bo_size(bo);
2341 			ret = amdgpu_amdkfd_bo_validate(bo,
2342 						AMDGPU_GEM_DOMAIN_GTT, false);
2343 			if (ret) {
2344 				pr_debug("Memory eviction: Try again\n");
2345 				goto validate_map_fail;
2346 			}
2347 		}
2348 		ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2349 		if (ret) {
2350 			pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2351 			goto validate_map_fail;
2352 		}
2353 		list_for_each_entry(attachment, &mem->attachments, list) {
2354 			if (!attachment->is_mapped)
2355 				continue;
2356 
2357 			kfd_mem_dmaunmap_attachment(mem, attachment);
2358 			ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2359 			if (ret) {
2360 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2361 				goto validate_map_fail;
2362 			}
2363 		}
2364 	}
2365 
2366 	if (failed_size)
2367 		pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2368 
2369 	/* Update page directories */
2370 	ret = process_update_pds(process_info, &sync_obj);
2371 	if (ret) {
2372 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2373 		goto validate_map_fail;
2374 	}
2375 
2376 	/* Wait for validate and PT updates to finish */
2377 	amdgpu_sync_wait(&sync_obj, false);
2378 
2379 	/* Release old eviction fence and create new one, because fence only
2380 	 * goes from unsignaled to signaled, fence cannot be reused.
2381 	 * Use context and mm from the old fence.
2382 	 */
2383 	new_fence = amdgpu_amdkfd_fence_create(
2384 				process_info->eviction_fence->base.context,
2385 				process_info->eviction_fence->mm,
2386 				NULL);
2387 	if (!new_fence) {
2388 		pr_err("Failed to create eviction fence\n");
2389 		ret = -ENOMEM;
2390 		goto validate_map_fail;
2391 	}
2392 	dma_fence_put(&process_info->eviction_fence->base);
2393 	process_info->eviction_fence = new_fence;
2394 	*ef = dma_fence_get(&new_fence->base);
2395 
2396 	/* Attach new eviction fence to all BOs */
2397 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2398 		validate_list.head)
2399 		amdgpu_bo_fence(mem->bo,
2400 			&process_info->eviction_fence->base, true);
2401 
2402 	/* Attach eviction fence to PD / PT BOs */
2403 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2404 			    vm_list_node) {
2405 		struct amdgpu_bo *bo = peer_vm->root.base.bo;
2406 
2407 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2408 	}
2409 
2410 validate_map_fail:
2411 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2412 	amdgpu_sync_free(&sync_obj);
2413 ttm_reserve_fail:
2414 	mutex_unlock(&process_info->lock);
2415 	kfree(pd_bo_list);
2416 	return ret;
2417 }
2418 
2419 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2420 {
2421 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2422 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2423 	int ret;
2424 
2425 	if (!info || !gws)
2426 		return -EINVAL;
2427 
2428 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2429 	if (!*mem)
2430 		return -ENOMEM;
2431 
2432 	mutex_init(&(*mem)->lock);
2433 	INIT_LIST_HEAD(&(*mem)->attachments);
2434 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2435 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2436 	(*mem)->process_info = process_info;
2437 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2438 	amdgpu_sync_create(&(*mem)->sync);
2439 
2440 
2441 	/* Validate gws bo the first time it is added to process */
2442 	mutex_lock(&(*mem)->process_info->lock);
2443 	ret = amdgpu_bo_reserve(gws_bo, false);
2444 	if (unlikely(ret)) {
2445 		pr_err("Reserve gws bo failed %d\n", ret);
2446 		goto bo_reservation_failure;
2447 	}
2448 
2449 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2450 	if (ret) {
2451 		pr_err("GWS BO validate failed %d\n", ret);
2452 		goto bo_validation_failure;
2453 	}
2454 	/* GWS resource is shared b/t amdgpu and amdkfd
2455 	 * Add process eviction fence to bo so they can
2456 	 * evict each other.
2457 	 */
2458 	ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2459 	if (ret)
2460 		goto reserve_shared_fail;
2461 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2462 	amdgpu_bo_unreserve(gws_bo);
2463 	mutex_unlock(&(*mem)->process_info->lock);
2464 
2465 	return ret;
2466 
2467 reserve_shared_fail:
2468 bo_validation_failure:
2469 	amdgpu_bo_unreserve(gws_bo);
2470 bo_reservation_failure:
2471 	mutex_unlock(&(*mem)->process_info->lock);
2472 	amdgpu_sync_free(&(*mem)->sync);
2473 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2474 	amdgpu_bo_unref(&gws_bo);
2475 	mutex_destroy(&(*mem)->lock);
2476 	kfree(*mem);
2477 	*mem = NULL;
2478 	return ret;
2479 }
2480 
2481 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2482 {
2483 	int ret;
2484 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2485 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2486 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2487 
2488 	/* Remove BO from process's validate list so restore worker won't touch
2489 	 * it anymore
2490 	 */
2491 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2492 
2493 	ret = amdgpu_bo_reserve(gws_bo, false);
2494 	if (unlikely(ret)) {
2495 		pr_err("Reserve gws bo failed %d\n", ret);
2496 		//TODO add BO back to validate_list?
2497 		return ret;
2498 	}
2499 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2500 			process_info->eviction_fence);
2501 	amdgpu_bo_unreserve(gws_bo);
2502 	amdgpu_sync_free(&kgd_mem->sync);
2503 	amdgpu_bo_unref(&gws_bo);
2504 	mutex_destroy(&kgd_mem->lock);
2505 	kfree(mem);
2506 	return 0;
2507 }
2508 
2509 /* Returns GPU-specific tiling mode information */
2510 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2511 				struct tile_config *config)
2512 {
2513 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2514 
2515 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
2516 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2517 	config->num_tile_configs =
2518 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2519 	config->macro_tile_config_ptr =
2520 			adev->gfx.config.macrotile_mode_array;
2521 	config->num_macro_tile_configs =
2522 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2523 
2524 	/* Those values are not set from GFX9 onwards */
2525 	config->num_banks = adev->gfx.config.num_banks;
2526 	config->num_ranks = adev->gfx.config.num_ranks;
2527 
2528 	return 0;
2529 }
2530