1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27 
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35 #include "kfd_smi_events.h"
36 
37 /* Userptr restore delay, just long enough to allow consecutive VM
38  * changes to accumulate
39  */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41 
42 /*
43  * Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
44  * BO chunk
45  */
46 #define VRAM_ALLOCATION_ALIGN (1 << 21)
47 
48 /* Impose limit on how much memory KFD can use */
49 static struct {
50 	uint64_t max_system_mem_limit;
51 	uint64_t max_ttm_mem_limit;
52 	int64_t system_mem_used;
53 	int64_t ttm_mem_used;
54 	spinlock_t mem_limit_lock;
55 } kfd_mem_limit;
56 
57 static const char * const domain_bit_to_string[] = {
58 		"CPU",
59 		"GTT",
60 		"VRAM",
61 		"GDS",
62 		"GWS",
63 		"OA"
64 };
65 
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67 
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
69 
70 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
71 		struct kgd_mem *mem)
72 {
73 	struct kfd_mem_attachment *entry;
74 
75 	list_for_each_entry(entry, &mem->attachments, list)
76 		if (entry->bo_va->base.vm == avm)
77 			return true;
78 
79 	return false;
80 }
81 
82 /* Set memory usage limits. Current, limits are
83  *  System (TTM + userptr) memory - 15/16th System RAM
84  *  TTM memory - 3/8th System RAM
85  */
86 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
87 {
88 	struct sysinfo si;
89 	uint64_t mem;
90 
91 	si_meminfo(&si);
92 	mem = si.freeram - si.freehigh;
93 	mem *= si.mem_unit;
94 
95 	spin_lock_init(&kfd_mem_limit.mem_limit_lock);
96 	kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
97 	kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
98 	pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
99 		(kfd_mem_limit.max_system_mem_limit >> 20),
100 		(kfd_mem_limit.max_ttm_mem_limit >> 20));
101 }
102 
103 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
104 {
105 	kfd_mem_limit.system_mem_used += size;
106 }
107 
108 /* Estimate page table size needed to represent a given memory size
109  *
110  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
111  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
112  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
113  * for 2MB pages for TLB efficiency. However, small allocations and
114  * fragmented system memory still need some 4KB pages. We choose a
115  * compromise that should work in most cases without reserving too
116  * much memory for page tables unnecessarily (factor 16K, >> 14).
117  */
118 
119 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
120 
121 /**
122  * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
123  * of buffer.
124  *
125  * @adev: Device to which allocated BO belongs to
126  * @size: Size of buffer, in bytes, encapsulated by B0. This should be
127  * equivalent to amdgpu_bo_size(BO)
128  * @alloc_flag: Flag used in allocating a BO as noted above
129  *
130  * Return: returns -ENOMEM in case of error, ZERO otherwise
131  */
132 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
133 		uint64_t size, u32 alloc_flag)
134 {
135 	uint64_t reserved_for_pt =
136 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
137 	size_t system_mem_needed, ttm_mem_needed, vram_needed;
138 	int ret = 0;
139 
140 	system_mem_needed = 0;
141 	ttm_mem_needed = 0;
142 	vram_needed = 0;
143 	if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
144 		system_mem_needed = size;
145 		ttm_mem_needed = size;
146 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
147 		/*
148 		 * Conservatively round up the allocation requirement to 2 MB
149 		 * to avoid fragmentation caused by 4K allocations in the tail
150 		 * 2M BO chunk.
151 		 */
152 		vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
153 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
154 		system_mem_needed = size;
155 	} else if (!(alloc_flag &
156 				(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
157 				 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
158 		pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
159 		return -ENOMEM;
160 	}
161 
162 	spin_lock(&kfd_mem_limit.mem_limit_lock);
163 
164 	if (kfd_mem_limit.system_mem_used + system_mem_needed >
165 	    kfd_mem_limit.max_system_mem_limit)
166 		pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
167 
168 	if ((kfd_mem_limit.system_mem_used + system_mem_needed >
169 	     kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
170 	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
171 	     kfd_mem_limit.max_ttm_mem_limit) ||
172 	    (adev->kfd.vram_used + vram_needed >
173 	     adev->gmc.real_vram_size -
174 	     atomic64_read(&adev->vram_pin_size) -
175 	     reserved_for_pt)) {
176 		ret = -ENOMEM;
177 		goto release;
178 	}
179 
180 	/* Update memory accounting by decreasing available system
181 	 * memory, TTM memory and GPU memory as computed above
182 	 */
183 	adev->kfd.vram_used += vram_needed;
184 	kfd_mem_limit.system_mem_used += system_mem_needed;
185 	kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
186 
187 release:
188 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
189 	return ret;
190 }
191 
192 static void unreserve_mem_limit(struct amdgpu_device *adev,
193 		uint64_t size, u32 alloc_flag)
194 {
195 	spin_lock(&kfd_mem_limit.mem_limit_lock);
196 
197 	if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
198 		kfd_mem_limit.system_mem_used -= size;
199 		kfd_mem_limit.ttm_mem_used -= size;
200 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
201 		adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
202 	} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
203 		kfd_mem_limit.system_mem_used -= size;
204 	} else if (!(alloc_flag &
205 				(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
206 				 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
207 		pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
208 		goto release;
209 	}
210 
211 	WARN_ONCE(adev->kfd.vram_used < 0,
212 		  "KFD VRAM memory accounting unbalanced");
213 	WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
214 		  "KFD TTM memory accounting unbalanced");
215 	WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
216 		  "KFD system memory accounting unbalanced");
217 
218 release:
219 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
220 }
221 
222 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
223 {
224 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
225 	u32 alloc_flags = bo->kfd_bo->alloc_flags;
226 	u64 size = amdgpu_bo_size(bo);
227 
228 	unreserve_mem_limit(adev, size, alloc_flags);
229 
230 	kfree(bo->kfd_bo);
231 }
232 
233 /**
234  * @create_dmamap_sg_bo: Creates a amdgpu_bo object to reflect information
235  * about USERPTR or DOOREBELL or MMIO BO.
236  * @adev: Device for which dmamap BO is being created
237  * @mem: BO of peer device that is being DMA mapped. Provides parameters
238  *	 in building the dmamap BO
239  * @bo_out: Output parameter updated with handle of dmamap BO
240  */
241 static int
242 create_dmamap_sg_bo(struct amdgpu_device *adev,
243 		 struct kgd_mem *mem, struct amdgpu_bo **bo_out)
244 {
245 	struct drm_gem_object *gem_obj;
246 	int ret, align;
247 
248 	ret = amdgpu_bo_reserve(mem->bo, false);
249 	if (ret)
250 		return ret;
251 
252 	align = 1;
253 	ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, align,
254 			AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE,
255 			ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj);
256 
257 	amdgpu_bo_unreserve(mem->bo);
258 
259 	if (ret) {
260 		pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
261 		return -EINVAL;
262 	}
263 
264 	*bo_out = gem_to_amdgpu_bo(gem_obj);
265 	(*bo_out)->parent = amdgpu_bo_ref(mem->bo);
266 	return ret;
267 }
268 
269 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
270  *  reservation object.
271  *
272  * @bo: [IN] Remove eviction fence(s) from this BO
273  * @ef: [IN] This eviction fence is removed if it
274  *  is present in the shared list.
275  *
276  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
277  */
278 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
279 					struct amdgpu_amdkfd_fence *ef)
280 {
281 	struct dma_fence *replacement;
282 
283 	if (!ef)
284 		return -EINVAL;
285 
286 	/* TODO: Instead of block before we should use the fence of the page
287 	 * table update and TLB flush here directly.
288 	 */
289 	replacement = dma_fence_get_stub();
290 	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
291 				replacement, DMA_RESV_USAGE_READ);
292 	dma_fence_put(replacement);
293 	return 0;
294 }
295 
296 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
297 {
298 	struct amdgpu_bo *root = bo;
299 	struct amdgpu_vm_bo_base *vm_bo;
300 	struct amdgpu_vm *vm;
301 	struct amdkfd_process_info *info;
302 	struct amdgpu_amdkfd_fence *ef;
303 	int ret;
304 
305 	/* we can always get vm_bo from root PD bo.*/
306 	while (root->parent)
307 		root = root->parent;
308 
309 	vm_bo = root->vm_bo;
310 	if (!vm_bo)
311 		return 0;
312 
313 	vm = vm_bo->vm;
314 	if (!vm)
315 		return 0;
316 
317 	info = vm->process_info;
318 	if (!info || !info->eviction_fence)
319 		return 0;
320 
321 	ef = container_of(dma_fence_get(&info->eviction_fence->base),
322 			struct amdgpu_amdkfd_fence, base);
323 
324 	BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
325 	ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
326 	dma_resv_unlock(bo->tbo.base.resv);
327 
328 	dma_fence_put(&ef->base);
329 	return ret;
330 }
331 
332 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
333 				     bool wait)
334 {
335 	struct ttm_operation_ctx ctx = { false, false };
336 	int ret;
337 
338 	if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
339 		 "Called with userptr BO"))
340 		return -EINVAL;
341 
342 	amdgpu_bo_placement_from_domain(bo, domain);
343 
344 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
345 	if (ret)
346 		goto validate_fail;
347 	if (wait)
348 		amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
349 
350 validate_fail:
351 	return ret;
352 }
353 
354 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
355 {
356 	return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
357 }
358 
359 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
360  *
361  * Page directories are not updated here because huge page handling
362  * during page table updates can invalidate page directory entries
363  * again. Page directories are only updated after updating page
364  * tables.
365  */
366 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
367 {
368 	struct amdgpu_bo *pd = vm->root.bo;
369 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
370 	int ret;
371 
372 	ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
373 	if (ret) {
374 		pr_err("failed to validate PT BOs\n");
375 		return ret;
376 	}
377 
378 	vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
379 
380 	return 0;
381 }
382 
383 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
384 {
385 	struct amdgpu_bo *pd = vm->root.bo;
386 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
387 	int ret;
388 
389 	ret = amdgpu_vm_update_pdes(adev, vm, false);
390 	if (ret)
391 		return ret;
392 
393 	return amdgpu_sync_fence(sync, vm->last_update);
394 }
395 
396 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
397 {
398 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
399 	bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
400 	bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
401 	uint32_t mapping_flags;
402 	uint64_t pte_flags;
403 	bool snoop = false;
404 
405 	mapping_flags = AMDGPU_VM_PAGE_READABLE;
406 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
407 		mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
408 	if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
409 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
410 
411 	switch (adev->asic_type) {
412 	case CHIP_ARCTURUS:
413 	case CHIP_ALDEBARAN:
414 		if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
415 			if (bo_adev == adev) {
416 				if (uncached)
417 					mapping_flags |= AMDGPU_VM_MTYPE_UC;
418 				else if (coherent)
419 					mapping_flags |= AMDGPU_VM_MTYPE_CC;
420 				else
421 					mapping_flags |= AMDGPU_VM_MTYPE_RW;
422 				if (adev->asic_type == CHIP_ALDEBARAN &&
423 				    adev->gmc.xgmi.connected_to_cpu)
424 					snoop = true;
425 			} else {
426 				if (uncached || coherent)
427 					mapping_flags |= AMDGPU_VM_MTYPE_UC;
428 				else
429 					mapping_flags |= AMDGPU_VM_MTYPE_NC;
430 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
431 					snoop = true;
432 			}
433 		} else {
434 			if (uncached || coherent)
435 				mapping_flags |= AMDGPU_VM_MTYPE_UC;
436 			else
437 				mapping_flags |= AMDGPU_VM_MTYPE_NC;
438 			snoop = true;
439 		}
440 		break;
441 	default:
442 		if (uncached || coherent)
443 			mapping_flags |= AMDGPU_VM_MTYPE_UC;
444 		else
445 			mapping_flags |= AMDGPU_VM_MTYPE_NC;
446 
447 		if (!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
448 			snoop = true;
449 	}
450 
451 	pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
452 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
453 
454 	return pte_flags;
455 }
456 
457 /**
458  * create_sg_table() - Create an sg_table for a contiguous DMA addr range
459  * @addr: The starting address to point to
460  * @size: Size of memory area in bytes being pointed to
461  *
462  * Allocates an instance of sg_table and initializes it to point to memory
463  * area specified by input parameters. The address used to build is assumed
464  * to be DMA mapped, if needed.
465  *
466  * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
467  * because they are physically contiguous.
468  *
469  * Return: Initialized instance of SG Table or NULL
470  */
471 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
472 {
473 	struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
474 
475 	if (!sg)
476 		return NULL;
477 	if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
478 		kfree(sg);
479 		return NULL;
480 	}
481 	sg_dma_address(sg->sgl) = addr;
482 	sg->sgl->length = size;
483 #ifdef CONFIG_NEED_SG_DMA_LENGTH
484 	sg->sgl->dma_length = size;
485 #endif
486 	return sg;
487 }
488 
489 static int
490 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
491 		       struct kfd_mem_attachment *attachment)
492 {
493 	enum dma_data_direction direction =
494 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
495 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
496 	struct ttm_operation_ctx ctx = {.interruptible = true};
497 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
498 	struct amdgpu_device *adev = attachment->adev;
499 	struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
500 	struct ttm_tt *ttm = bo->tbo.ttm;
501 	int ret;
502 
503 	ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
504 	if (unlikely(!ttm->sg))
505 		return -ENOMEM;
506 
507 	if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
508 		return -EINVAL;
509 
510 	/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
511 	ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
512 					ttm->num_pages, 0,
513 					(u64)ttm->num_pages << PAGE_SHIFT,
514 					GFP_KERNEL);
515 	if (unlikely(ret))
516 		goto free_sg;
517 
518 	ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
519 	if (unlikely(ret))
520 		goto release_sg;
521 
522 	drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
523 				       ttm->num_pages);
524 
525 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
526 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
527 	if (ret)
528 		goto unmap_sg;
529 
530 	return 0;
531 
532 unmap_sg:
533 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
534 release_sg:
535 	pr_err("DMA map userptr failed: %d\n", ret);
536 	sg_free_table(ttm->sg);
537 free_sg:
538 	kfree(ttm->sg);
539 	ttm->sg = NULL;
540 	return ret;
541 }
542 
543 static int
544 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
545 {
546 	struct ttm_operation_ctx ctx = {.interruptible = true};
547 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
548 
549 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
550 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
551 }
552 
553 /**
554  * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
555  * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
556  * @attachment: Virtual address attachment of the BO on accessing device
557  *
558  * An access request from the device that owns DOORBELL does not require DMA mapping.
559  * This is because the request doesn't go through PCIe root complex i.e. it instead
560  * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
561  *
562  * In contrast, all access requests for MMIO need to be DMA mapped without regard to
563  * device ownership. This is because access requests for MMIO go through PCIe root
564  * complex.
565  *
566  * This is accomplished in two steps:
567  *   - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
568  *         in updating requesting device's page table
569  *   - Signal TTM to mark memory pointed to by requesting device's BO as GPU
570  *         accessible. This allows an update of requesting device's page table
571  *         with entries associated with DOOREBELL or MMIO memory
572  *
573  * This method is invoked in the following contexts:
574  *   - Mapping of DOORBELL or MMIO BO of same or peer device
575  *   - Validating an evicted DOOREBELL or MMIO BO on device seeking access
576  *
577  * Return: ZERO if successful, NON-ZERO otherwise
578  */
579 static int
580 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
581 		     struct kfd_mem_attachment *attachment)
582 {
583 	struct ttm_operation_ctx ctx = {.interruptible = true};
584 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
585 	struct amdgpu_device *adev = attachment->adev;
586 	struct ttm_tt *ttm = bo->tbo.ttm;
587 	enum dma_data_direction dir;
588 	dma_addr_t dma_addr;
589 	bool mmio;
590 	int ret;
591 
592 	/* Expect SG Table of dmapmap BO to be NULL */
593 	mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
594 	if (unlikely(ttm->sg)) {
595 		pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
596 		return -EINVAL;
597 	}
598 
599 	dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
600 			DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
601 	dma_addr = mem->bo->tbo.sg->sgl->dma_address;
602 	pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
603 	pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
604 	dma_addr = dma_map_resource(adev->dev, dma_addr,
605 			mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
606 	ret = dma_mapping_error(adev->dev, dma_addr);
607 	if (unlikely(ret))
608 		return ret;
609 	pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
610 
611 	ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
612 	if (unlikely(!ttm->sg)) {
613 		ret = -ENOMEM;
614 		goto unmap_sg;
615 	}
616 
617 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
618 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
619 	if (unlikely(ret))
620 		goto free_sg;
621 
622 	return ret;
623 
624 free_sg:
625 	sg_free_table(ttm->sg);
626 	kfree(ttm->sg);
627 	ttm->sg = NULL;
628 unmap_sg:
629 	dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
630 			   dir, DMA_ATTR_SKIP_CPU_SYNC);
631 	return ret;
632 }
633 
634 static int
635 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
636 			  struct kfd_mem_attachment *attachment)
637 {
638 	switch (attachment->type) {
639 	case KFD_MEM_ATT_SHARED:
640 		return 0;
641 	case KFD_MEM_ATT_USERPTR:
642 		return kfd_mem_dmamap_userptr(mem, attachment);
643 	case KFD_MEM_ATT_DMABUF:
644 		return kfd_mem_dmamap_dmabuf(attachment);
645 	case KFD_MEM_ATT_SG:
646 		return kfd_mem_dmamap_sg_bo(mem, attachment);
647 	default:
648 		WARN_ON_ONCE(1);
649 	}
650 	return -EINVAL;
651 }
652 
653 static void
654 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
655 			 struct kfd_mem_attachment *attachment)
656 {
657 	enum dma_data_direction direction =
658 		mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
659 		DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
660 	struct ttm_operation_ctx ctx = {.interruptible = false};
661 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
662 	struct amdgpu_device *adev = attachment->adev;
663 	struct ttm_tt *ttm = bo->tbo.ttm;
664 
665 	if (unlikely(!ttm->sg))
666 		return;
667 
668 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
669 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
670 
671 	dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
672 	sg_free_table(ttm->sg);
673 	kfree(ttm->sg);
674 	ttm->sg = NULL;
675 }
676 
677 static void
678 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
679 {
680 	struct ttm_operation_ctx ctx = {.interruptible = true};
681 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
682 
683 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
684 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
685 }
686 
687 /**
688  * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
689  * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
690  * @attachment: Virtual address attachment of the BO on accessing device
691  *
692  * The method performs following steps:
693  *   - Signal TTM to mark memory pointed to by BO as GPU inaccessible
694  *   - Free SG Table that is used to encapsulate DMA mapped memory of
695  *          peer device's DOORBELL or MMIO memory
696  *
697  * This method is invoked in the following contexts:
698  *     UNMapping of DOORBELL or MMIO BO on a device having access to its memory
699  *     Eviction of DOOREBELL or MMIO BO on device having access to its memory
700  *
701  * Return: void
702  */
703 static void
704 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
705 		       struct kfd_mem_attachment *attachment)
706 {
707 	struct ttm_operation_ctx ctx = {.interruptible = true};
708 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
709 	struct amdgpu_device *adev = attachment->adev;
710 	struct ttm_tt *ttm = bo->tbo.ttm;
711 	enum dma_data_direction dir;
712 
713 	if (unlikely(!ttm->sg)) {
714 		pr_err("SG Table of BO is UNEXPECTEDLY NULL");
715 		return;
716 	}
717 
718 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
719 	ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
720 
721 	dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
722 				DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
723 	dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
724 			ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
725 	sg_free_table(ttm->sg);
726 	kfree(ttm->sg);
727 	ttm->sg = NULL;
728 	bo->tbo.sg = NULL;
729 }
730 
731 static void
732 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
733 			    struct kfd_mem_attachment *attachment)
734 {
735 	switch (attachment->type) {
736 	case KFD_MEM_ATT_SHARED:
737 		break;
738 	case KFD_MEM_ATT_USERPTR:
739 		kfd_mem_dmaunmap_userptr(mem, attachment);
740 		break;
741 	case KFD_MEM_ATT_DMABUF:
742 		kfd_mem_dmaunmap_dmabuf(attachment);
743 		break;
744 	case KFD_MEM_ATT_SG:
745 		kfd_mem_dmaunmap_sg_bo(mem, attachment);
746 		break;
747 	default:
748 		WARN_ON_ONCE(1);
749 	}
750 }
751 
752 static int
753 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
754 		      struct amdgpu_bo **bo)
755 {
756 	struct drm_gem_object *gobj;
757 	int ret;
758 
759 	if (!mem->dmabuf) {
760 		mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
761 			mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
762 				DRM_RDWR : 0);
763 		if (IS_ERR(mem->dmabuf)) {
764 			ret = PTR_ERR(mem->dmabuf);
765 			mem->dmabuf = NULL;
766 			return ret;
767 		}
768 	}
769 
770 	gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
771 	if (IS_ERR(gobj))
772 		return PTR_ERR(gobj);
773 
774 	*bo = gem_to_amdgpu_bo(gobj);
775 	(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
776 	(*bo)->parent = amdgpu_bo_ref(mem->bo);
777 
778 	return 0;
779 }
780 
781 /* kfd_mem_attach - Add a BO to a VM
782  *
783  * Everything that needs to bo done only once when a BO is first added
784  * to a VM. It can later be mapped and unmapped many times without
785  * repeating these steps.
786  *
787  * 0. Create BO for DMA mapping, if needed
788  * 1. Allocate and initialize BO VA entry data structure
789  * 2. Add BO to the VM
790  * 3. Determine ASIC-specific PTE flags
791  * 4. Alloc page tables and directories if needed
792  * 4a.  Validate new page tables and directories
793  */
794 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
795 		struct amdgpu_vm *vm, bool is_aql)
796 {
797 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
798 	unsigned long bo_size = mem->bo->tbo.base.size;
799 	uint64_t va = mem->va;
800 	struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
801 	struct amdgpu_bo *bo[2] = {NULL, NULL};
802 	bool same_hive = false;
803 	int i, ret;
804 
805 	if (!va) {
806 		pr_err("Invalid VA when adding BO to VM\n");
807 		return -EINVAL;
808 	}
809 
810 	/* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
811 	 *
812 	 * The access path of MMIO and DOORBELL BOs of is always over PCIe.
813 	 * In contrast the access path of VRAM BOs depens upon the type of
814 	 * link that connects the peer device. Access over PCIe is allowed
815 	 * if peer device has large BAR. In contrast, access over xGMI is
816 	 * allowed for both small and large BAR configurations of peer device
817 	 */
818 	if ((adev != bo_adev) &&
819 	    ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
820 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
821 	     (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
822 		if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
823 			same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
824 		if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
825 			return -EINVAL;
826 	}
827 
828 	for (i = 0; i <= is_aql; i++) {
829 		attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
830 		if (unlikely(!attachment[i])) {
831 			ret = -ENOMEM;
832 			goto unwind;
833 		}
834 
835 		pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
836 			 va + bo_size, vm);
837 
838 		if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
839 		    (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
840 		    same_hive) {
841 			/* Mappings on the local GPU, or VRAM mappings in the
842 			 * local hive, or userptr mapping IOMMU direct map mode
843 			 * share the original BO
844 			 */
845 			attachment[i]->type = KFD_MEM_ATT_SHARED;
846 			bo[i] = mem->bo;
847 			drm_gem_object_get(&bo[i]->tbo.base);
848 		} else if (i > 0) {
849 			/* Multiple mappings on the same GPU share the BO */
850 			attachment[i]->type = KFD_MEM_ATT_SHARED;
851 			bo[i] = bo[0];
852 			drm_gem_object_get(&bo[i]->tbo.base);
853 		} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
854 			/* Create an SG BO to DMA-map userptrs on other GPUs */
855 			attachment[i]->type = KFD_MEM_ATT_USERPTR;
856 			ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
857 			if (ret)
858 				goto unwind;
859 		/* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
860 		} else if (mem->bo->tbo.type == ttm_bo_type_sg) {
861 			WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
862 				    mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
863 				  "Handing invalid SG BO in ATTACH request");
864 			attachment[i]->type = KFD_MEM_ATT_SG;
865 			ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
866 			if (ret)
867 				goto unwind;
868 		/* Enable acces to GTT and VRAM BOs of peer devices */
869 		} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
870 			   mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
871 			attachment[i]->type = KFD_MEM_ATT_DMABUF;
872 			ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
873 			if (ret)
874 				goto unwind;
875 			pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
876 		} else {
877 			WARN_ONCE(true, "Handling invalid ATTACH request");
878 			ret = -EINVAL;
879 			goto unwind;
880 		}
881 
882 		/* Add BO to VM internal data structures */
883 		ret = amdgpu_bo_reserve(bo[i], false);
884 		if (ret) {
885 			pr_debug("Unable to reserve BO during memory attach");
886 			goto unwind;
887 		}
888 		attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
889 		amdgpu_bo_unreserve(bo[i]);
890 		if (unlikely(!attachment[i]->bo_va)) {
891 			ret = -ENOMEM;
892 			pr_err("Failed to add BO object to VM. ret == %d\n",
893 			       ret);
894 			goto unwind;
895 		}
896 		attachment[i]->va = va;
897 		attachment[i]->pte_flags = get_pte_flags(adev, mem);
898 		attachment[i]->adev = adev;
899 		list_add(&attachment[i]->list, &mem->attachments);
900 
901 		va += bo_size;
902 	}
903 
904 	return 0;
905 
906 unwind:
907 	for (; i >= 0; i--) {
908 		if (!attachment[i])
909 			continue;
910 		if (attachment[i]->bo_va) {
911 			amdgpu_bo_reserve(bo[i], true);
912 			amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
913 			amdgpu_bo_unreserve(bo[i]);
914 			list_del(&attachment[i]->list);
915 		}
916 		if (bo[i])
917 			drm_gem_object_put(&bo[i]->tbo.base);
918 		kfree(attachment[i]);
919 	}
920 	return ret;
921 }
922 
923 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
924 {
925 	struct amdgpu_bo *bo = attachment->bo_va->base.bo;
926 
927 	pr_debug("\t remove VA 0x%llx in entry %p\n",
928 			attachment->va, attachment);
929 	amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
930 	drm_gem_object_put(&bo->tbo.base);
931 	list_del(&attachment->list);
932 	kfree(attachment);
933 }
934 
935 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
936 				struct amdkfd_process_info *process_info,
937 				bool userptr)
938 {
939 	struct ttm_validate_buffer *entry = &mem->validate_list;
940 	struct amdgpu_bo *bo = mem->bo;
941 
942 	INIT_LIST_HEAD(&entry->head);
943 	entry->num_shared = 1;
944 	entry->bo = &bo->tbo;
945 	mutex_lock(&process_info->lock);
946 	if (userptr)
947 		list_add_tail(&entry->head, &process_info->userptr_valid_list);
948 	else
949 		list_add_tail(&entry->head, &process_info->kfd_bo_list);
950 	mutex_unlock(&process_info->lock);
951 }
952 
953 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
954 		struct amdkfd_process_info *process_info)
955 {
956 	struct ttm_validate_buffer *bo_list_entry;
957 
958 	bo_list_entry = &mem->validate_list;
959 	mutex_lock(&process_info->lock);
960 	list_del(&bo_list_entry->head);
961 	mutex_unlock(&process_info->lock);
962 }
963 
964 /* Initializes user pages. It registers the MMU notifier and validates
965  * the userptr BO in the GTT domain.
966  *
967  * The BO must already be on the userptr_valid_list. Otherwise an
968  * eviction and restore may happen that leaves the new BO unmapped
969  * with the user mode queues running.
970  *
971  * Takes the process_info->lock to protect against concurrent restore
972  * workers.
973  *
974  * Returns 0 for success, negative errno for errors.
975  */
976 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
977 			   bool criu_resume)
978 {
979 	struct amdkfd_process_info *process_info = mem->process_info;
980 	struct amdgpu_bo *bo = mem->bo;
981 	struct ttm_operation_ctx ctx = { true, false };
982 	int ret = 0;
983 
984 	mutex_lock(&process_info->lock);
985 
986 	ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
987 	if (ret) {
988 		pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
989 		goto out;
990 	}
991 
992 	ret = amdgpu_mn_register(bo, user_addr);
993 	if (ret) {
994 		pr_err("%s: Failed to register MMU notifier: %d\n",
995 		       __func__, ret);
996 		goto out;
997 	}
998 
999 	if (criu_resume) {
1000 		/*
1001 		 * During a CRIU restore operation, the userptr buffer objects
1002 		 * will be validated in the restore_userptr_work worker at a
1003 		 * later stage when it is scheduled by another ioctl called by
1004 		 * CRIU master process for the target pid for restore.
1005 		 */
1006 		atomic_inc(&mem->invalid);
1007 		mutex_unlock(&process_info->lock);
1008 		return 0;
1009 	}
1010 
1011 	ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1012 	if (ret) {
1013 		pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1014 		goto unregister_out;
1015 	}
1016 
1017 	ret = amdgpu_bo_reserve(bo, true);
1018 	if (ret) {
1019 		pr_err("%s: Failed to reserve BO\n", __func__);
1020 		goto release_out;
1021 	}
1022 	amdgpu_bo_placement_from_domain(bo, mem->domain);
1023 	ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1024 	if (ret)
1025 		pr_err("%s: failed to validate BO\n", __func__);
1026 	amdgpu_bo_unreserve(bo);
1027 
1028 release_out:
1029 	amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1030 unregister_out:
1031 	if (ret)
1032 		amdgpu_mn_unregister(bo);
1033 out:
1034 	mutex_unlock(&process_info->lock);
1035 	return ret;
1036 }
1037 
1038 /* Reserving a BO and its page table BOs must happen atomically to
1039  * avoid deadlocks. Some operations update multiple VMs at once. Track
1040  * all the reservation info in a context structure. Optionally a sync
1041  * object can track VM updates.
1042  */
1043 struct bo_vm_reservation_context {
1044 	struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
1045 	unsigned int n_vms;		    /* Number of VMs reserved	    */
1046 	struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
1047 	struct ww_acquire_ctx ticket;	    /* Reservation ticket	    */
1048 	struct list_head list, duplicates;  /* BO lists			    */
1049 	struct amdgpu_sync *sync;	    /* Pointer to sync object	    */
1050 	bool reserved;			    /* Whether BOs are reserved	    */
1051 };
1052 
1053 enum bo_vm_match {
1054 	BO_VM_NOT_MAPPED = 0,	/* Match VMs where a BO is not mapped */
1055 	BO_VM_MAPPED,		/* Match VMs where a BO is mapped     */
1056 	BO_VM_ALL,		/* Match all VMs a BO was added to    */
1057 };
1058 
1059 /**
1060  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1061  * @mem: KFD BO structure.
1062  * @vm: the VM to reserve.
1063  * @ctx: the struct that will be used in unreserve_bo_and_vms().
1064  */
1065 static int reserve_bo_and_vm(struct kgd_mem *mem,
1066 			      struct amdgpu_vm *vm,
1067 			      struct bo_vm_reservation_context *ctx)
1068 {
1069 	struct amdgpu_bo *bo = mem->bo;
1070 	int ret;
1071 
1072 	WARN_ON(!vm);
1073 
1074 	ctx->reserved = false;
1075 	ctx->n_vms = 1;
1076 	ctx->sync = &mem->sync;
1077 
1078 	INIT_LIST_HEAD(&ctx->list);
1079 	INIT_LIST_HEAD(&ctx->duplicates);
1080 
1081 	ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
1082 	if (!ctx->vm_pd)
1083 		return -ENOMEM;
1084 
1085 	ctx->kfd_bo.priority = 0;
1086 	ctx->kfd_bo.tv.bo = &bo->tbo;
1087 	ctx->kfd_bo.tv.num_shared = 1;
1088 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1089 
1090 	amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
1091 
1092 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1093 				     false, &ctx->duplicates);
1094 	if (ret) {
1095 		pr_err("Failed to reserve buffers in ttm.\n");
1096 		kfree(ctx->vm_pd);
1097 		ctx->vm_pd = NULL;
1098 		return ret;
1099 	}
1100 
1101 	ctx->reserved = true;
1102 	return 0;
1103 }
1104 
1105 /**
1106  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1107  * @mem: KFD BO structure.
1108  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1109  * is used. Otherwise, a single VM associated with the BO.
1110  * @map_type: the mapping status that will be used to filter the VMs.
1111  * @ctx: the struct that will be used in unreserve_bo_and_vms().
1112  *
1113  * Returns 0 for success, negative for failure.
1114  */
1115 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1116 				struct amdgpu_vm *vm, enum bo_vm_match map_type,
1117 				struct bo_vm_reservation_context *ctx)
1118 {
1119 	struct amdgpu_bo *bo = mem->bo;
1120 	struct kfd_mem_attachment *entry;
1121 	unsigned int i;
1122 	int ret;
1123 
1124 	ctx->reserved = false;
1125 	ctx->n_vms = 0;
1126 	ctx->vm_pd = NULL;
1127 	ctx->sync = &mem->sync;
1128 
1129 	INIT_LIST_HEAD(&ctx->list);
1130 	INIT_LIST_HEAD(&ctx->duplicates);
1131 
1132 	list_for_each_entry(entry, &mem->attachments, list) {
1133 		if ((vm && vm != entry->bo_va->base.vm) ||
1134 			(entry->is_mapped != map_type
1135 			&& map_type != BO_VM_ALL))
1136 			continue;
1137 
1138 		ctx->n_vms++;
1139 	}
1140 
1141 	if (ctx->n_vms != 0) {
1142 		ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
1143 				     GFP_KERNEL);
1144 		if (!ctx->vm_pd)
1145 			return -ENOMEM;
1146 	}
1147 
1148 	ctx->kfd_bo.priority = 0;
1149 	ctx->kfd_bo.tv.bo = &bo->tbo;
1150 	ctx->kfd_bo.tv.num_shared = 1;
1151 	list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1152 
1153 	i = 0;
1154 	list_for_each_entry(entry, &mem->attachments, list) {
1155 		if ((vm && vm != entry->bo_va->base.vm) ||
1156 			(entry->is_mapped != map_type
1157 			&& map_type != BO_VM_ALL))
1158 			continue;
1159 
1160 		amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1161 				&ctx->vm_pd[i]);
1162 		i++;
1163 	}
1164 
1165 	ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1166 				     false, &ctx->duplicates);
1167 	if (ret) {
1168 		pr_err("Failed to reserve buffers in ttm.\n");
1169 		kfree(ctx->vm_pd);
1170 		ctx->vm_pd = NULL;
1171 		return ret;
1172 	}
1173 
1174 	ctx->reserved = true;
1175 	return 0;
1176 }
1177 
1178 /**
1179  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1180  * @ctx: Reservation context to unreserve
1181  * @wait: Optionally wait for a sync object representing pending VM updates
1182  * @intr: Whether the wait is interruptible
1183  *
1184  * Also frees any resources allocated in
1185  * reserve_bo_and_(cond_)vm(s). Returns the status from
1186  * amdgpu_sync_wait.
1187  */
1188 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1189 				 bool wait, bool intr)
1190 {
1191 	int ret = 0;
1192 
1193 	if (wait)
1194 		ret = amdgpu_sync_wait(ctx->sync, intr);
1195 
1196 	if (ctx->reserved)
1197 		ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1198 	kfree(ctx->vm_pd);
1199 
1200 	ctx->sync = NULL;
1201 
1202 	ctx->reserved = false;
1203 	ctx->vm_pd = NULL;
1204 
1205 	return ret;
1206 }
1207 
1208 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1209 				struct kfd_mem_attachment *entry,
1210 				struct amdgpu_sync *sync)
1211 {
1212 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1213 	struct amdgpu_device *adev = entry->adev;
1214 	struct amdgpu_vm *vm = bo_va->base.vm;
1215 
1216 	amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1217 
1218 	amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1219 
1220 	amdgpu_sync_fence(sync, bo_va->last_pt_update);
1221 
1222 	kfd_mem_dmaunmap_attachment(mem, entry);
1223 }
1224 
1225 static int update_gpuvm_pte(struct kgd_mem *mem,
1226 			    struct kfd_mem_attachment *entry,
1227 			    struct amdgpu_sync *sync)
1228 {
1229 	struct amdgpu_bo_va *bo_va = entry->bo_va;
1230 	struct amdgpu_device *adev = entry->adev;
1231 	int ret;
1232 
1233 	ret = kfd_mem_dmamap_attachment(mem, entry);
1234 	if (ret)
1235 		return ret;
1236 
1237 	/* Update the page tables  */
1238 	ret = amdgpu_vm_bo_update(adev, bo_va, false);
1239 	if (ret) {
1240 		pr_err("amdgpu_vm_bo_update failed\n");
1241 		return ret;
1242 	}
1243 
1244 	return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1245 }
1246 
1247 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1248 			   struct kfd_mem_attachment *entry,
1249 			   struct amdgpu_sync *sync,
1250 			   bool no_update_pte)
1251 {
1252 	int ret;
1253 
1254 	/* Set virtual address for the allocation */
1255 	ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1256 			       amdgpu_bo_size(entry->bo_va->base.bo),
1257 			       entry->pte_flags);
1258 	if (ret) {
1259 		pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1260 				entry->va, ret);
1261 		return ret;
1262 	}
1263 
1264 	if (no_update_pte)
1265 		return 0;
1266 
1267 	ret = update_gpuvm_pte(mem, entry, sync);
1268 	if (ret) {
1269 		pr_err("update_gpuvm_pte() failed\n");
1270 		goto update_gpuvm_pte_failed;
1271 	}
1272 
1273 	return 0;
1274 
1275 update_gpuvm_pte_failed:
1276 	unmap_bo_from_gpuvm(mem, entry, sync);
1277 	return ret;
1278 }
1279 
1280 static int process_validate_vms(struct amdkfd_process_info *process_info)
1281 {
1282 	struct amdgpu_vm *peer_vm;
1283 	int ret;
1284 
1285 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1286 			    vm_list_node) {
1287 		ret = vm_validate_pt_pd_bos(peer_vm);
1288 		if (ret)
1289 			return ret;
1290 	}
1291 
1292 	return 0;
1293 }
1294 
1295 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1296 				 struct amdgpu_sync *sync)
1297 {
1298 	struct amdgpu_vm *peer_vm;
1299 	int ret;
1300 
1301 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1302 			    vm_list_node) {
1303 		struct amdgpu_bo *pd = peer_vm->root.bo;
1304 
1305 		ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1306 				       AMDGPU_SYNC_NE_OWNER,
1307 				       AMDGPU_FENCE_OWNER_KFD);
1308 		if (ret)
1309 			return ret;
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 static int process_update_pds(struct amdkfd_process_info *process_info,
1316 			      struct amdgpu_sync *sync)
1317 {
1318 	struct amdgpu_vm *peer_vm;
1319 	int ret;
1320 
1321 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
1322 			    vm_list_node) {
1323 		ret = vm_update_pds(peer_vm, sync);
1324 		if (ret)
1325 			return ret;
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1332 		       struct dma_fence **ef)
1333 {
1334 	struct amdkfd_process_info *info = NULL;
1335 	int ret;
1336 
1337 	if (!*process_info) {
1338 		info = kzalloc(sizeof(*info), GFP_KERNEL);
1339 		if (!info)
1340 			return -ENOMEM;
1341 
1342 		mutex_init(&info->lock);
1343 		INIT_LIST_HEAD(&info->vm_list_head);
1344 		INIT_LIST_HEAD(&info->kfd_bo_list);
1345 		INIT_LIST_HEAD(&info->userptr_valid_list);
1346 		INIT_LIST_HEAD(&info->userptr_inval_list);
1347 
1348 		info->eviction_fence =
1349 			amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1350 						   current->mm,
1351 						   NULL);
1352 		if (!info->eviction_fence) {
1353 			pr_err("Failed to create eviction fence\n");
1354 			ret = -ENOMEM;
1355 			goto create_evict_fence_fail;
1356 		}
1357 
1358 		info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1359 		atomic_set(&info->evicted_bos, 0);
1360 		INIT_DELAYED_WORK(&info->restore_userptr_work,
1361 				  amdgpu_amdkfd_restore_userptr_worker);
1362 
1363 		*process_info = info;
1364 		*ef = dma_fence_get(&info->eviction_fence->base);
1365 	}
1366 
1367 	vm->process_info = *process_info;
1368 
1369 	/* Validate page directory and attach eviction fence */
1370 	ret = amdgpu_bo_reserve(vm->root.bo, true);
1371 	if (ret)
1372 		goto reserve_pd_fail;
1373 	ret = vm_validate_pt_pd_bos(vm);
1374 	if (ret) {
1375 		pr_err("validate_pt_pd_bos() failed\n");
1376 		goto validate_pd_fail;
1377 	}
1378 	ret = amdgpu_bo_sync_wait(vm->root.bo,
1379 				  AMDGPU_FENCE_OWNER_KFD, false);
1380 	if (ret)
1381 		goto wait_pd_fail;
1382 	ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1383 	if (ret)
1384 		goto reserve_shared_fail;
1385 	amdgpu_bo_fence(vm->root.bo,
1386 			&vm->process_info->eviction_fence->base, true);
1387 	amdgpu_bo_unreserve(vm->root.bo);
1388 
1389 	/* Update process info */
1390 	mutex_lock(&vm->process_info->lock);
1391 	list_add_tail(&vm->vm_list_node,
1392 			&(vm->process_info->vm_list_head));
1393 	vm->process_info->n_vms++;
1394 	mutex_unlock(&vm->process_info->lock);
1395 
1396 	return 0;
1397 
1398 reserve_shared_fail:
1399 wait_pd_fail:
1400 validate_pd_fail:
1401 	amdgpu_bo_unreserve(vm->root.bo);
1402 reserve_pd_fail:
1403 	vm->process_info = NULL;
1404 	if (info) {
1405 		/* Two fence references: one in info and one in *ef */
1406 		dma_fence_put(&info->eviction_fence->base);
1407 		dma_fence_put(*ef);
1408 		*ef = NULL;
1409 		*process_info = NULL;
1410 		put_pid(info->pid);
1411 create_evict_fence_fail:
1412 		mutex_destroy(&info->lock);
1413 		kfree(info);
1414 	}
1415 	return ret;
1416 }
1417 
1418 /**
1419  * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1420  * @bo: Handle of buffer object being pinned
1421  * @domain: Domain into which BO should be pinned
1422  *
1423  *   - USERPTR BOs are UNPINNABLE and will return error
1424  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1425  *     PIN count incremented. It is valid to PIN a BO multiple times
1426  *
1427  * Return: ZERO if successful in pinning, Non-Zero in case of error.
1428  */
1429 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1430 {
1431 	int ret = 0;
1432 
1433 	ret = amdgpu_bo_reserve(bo, false);
1434 	if (unlikely(ret))
1435 		return ret;
1436 
1437 	ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1438 	if (ret)
1439 		pr_err("Error in Pinning BO to domain: %d\n", domain);
1440 
1441 	amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1442 	amdgpu_bo_unreserve(bo);
1443 
1444 	return ret;
1445 }
1446 
1447 /**
1448  * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1449  * @bo: Handle of buffer object being unpinned
1450  *
1451  *   - Is a illegal request for USERPTR BOs and is ignored
1452  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1453  *     PIN count decremented. Calls to UNPIN must balance calls to PIN
1454  */
1455 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1456 {
1457 	int ret = 0;
1458 
1459 	ret = amdgpu_bo_reserve(bo, false);
1460 	if (unlikely(ret))
1461 		return;
1462 
1463 	amdgpu_bo_unpin(bo);
1464 	amdgpu_bo_unreserve(bo);
1465 }
1466 
1467 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1468 					   struct file *filp, u32 pasid,
1469 					   void **process_info,
1470 					   struct dma_fence **ef)
1471 {
1472 	struct amdgpu_fpriv *drv_priv;
1473 	struct amdgpu_vm *avm;
1474 	int ret;
1475 
1476 	ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1477 	if (ret)
1478 		return ret;
1479 	avm = &drv_priv->vm;
1480 
1481 	/* Already a compute VM? */
1482 	if (avm->process_info)
1483 		return -EINVAL;
1484 
1485 	/* Free the original amdgpu allocated pasid,
1486 	 * will be replaced with kfd allocated pasid.
1487 	 */
1488 	if (avm->pasid) {
1489 		amdgpu_pasid_free(avm->pasid);
1490 		amdgpu_vm_set_pasid(adev, avm, 0);
1491 	}
1492 
1493 	/* Convert VM into a compute VM */
1494 	ret = amdgpu_vm_make_compute(adev, avm);
1495 	if (ret)
1496 		return ret;
1497 
1498 	ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1499 	if (ret)
1500 		return ret;
1501 	/* Initialize KFD part of the VM and process info */
1502 	ret = init_kfd_vm(avm, process_info, ef);
1503 	if (ret)
1504 		return ret;
1505 
1506 	amdgpu_vm_set_task_info(avm);
1507 
1508 	return 0;
1509 }
1510 
1511 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1512 				    struct amdgpu_vm *vm)
1513 {
1514 	struct amdkfd_process_info *process_info = vm->process_info;
1515 
1516 	if (!process_info)
1517 		return;
1518 
1519 	/* Update process info */
1520 	mutex_lock(&process_info->lock);
1521 	process_info->n_vms--;
1522 	list_del(&vm->vm_list_node);
1523 	mutex_unlock(&process_info->lock);
1524 
1525 	vm->process_info = NULL;
1526 
1527 	/* Release per-process resources when last compute VM is destroyed */
1528 	if (!process_info->n_vms) {
1529 		WARN_ON(!list_empty(&process_info->kfd_bo_list));
1530 		WARN_ON(!list_empty(&process_info->userptr_valid_list));
1531 		WARN_ON(!list_empty(&process_info->userptr_inval_list));
1532 
1533 		dma_fence_put(&process_info->eviction_fence->base);
1534 		cancel_delayed_work_sync(&process_info->restore_userptr_work);
1535 		put_pid(process_info->pid);
1536 		mutex_destroy(&process_info->lock);
1537 		kfree(process_info);
1538 	}
1539 }
1540 
1541 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1542 					    void *drm_priv)
1543 {
1544 	struct amdgpu_vm *avm;
1545 
1546 	if (WARN_ON(!adev || !drm_priv))
1547 		return;
1548 
1549 	avm = drm_priv_to_vm(drm_priv);
1550 
1551 	pr_debug("Releasing process vm %p\n", avm);
1552 
1553 	/* The original pasid of amdgpu vm has already been
1554 	 * released during making a amdgpu vm to a compute vm
1555 	 * The current pasid is managed by kfd and will be
1556 	 * released on kfd process destroy. Set amdgpu pasid
1557 	 * to 0 to avoid duplicate release.
1558 	 */
1559 	amdgpu_vm_release_compute(adev, avm);
1560 }
1561 
1562 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1563 {
1564 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1565 	struct amdgpu_bo *pd = avm->root.bo;
1566 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1567 
1568 	if (adev->asic_type < CHIP_VEGA10)
1569 		return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1570 	return avm->pd_phys_addr;
1571 }
1572 
1573 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1574 {
1575 	struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1576 
1577 	mutex_lock(&pinfo->lock);
1578 	WRITE_ONCE(pinfo->block_mmu_notifications, true);
1579 	mutex_unlock(&pinfo->lock);
1580 }
1581 
1582 int amdgpu_amdkfd_criu_resume(void *p)
1583 {
1584 	int ret = 0;
1585 	struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1586 
1587 	mutex_lock(&pinfo->lock);
1588 	pr_debug("scheduling work\n");
1589 	atomic_inc(&pinfo->evicted_bos);
1590 	if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1591 		ret = -EINVAL;
1592 		goto out_unlock;
1593 	}
1594 	WRITE_ONCE(pinfo->block_mmu_notifications, false);
1595 	schedule_delayed_work(&pinfo->restore_userptr_work, 0);
1596 
1597 out_unlock:
1598 	mutex_unlock(&pinfo->lock);
1599 	return ret;
1600 }
1601 
1602 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
1603 {
1604 	uint64_t reserved_for_pt =
1605 		ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1606 	size_t available;
1607 
1608 	spin_lock(&kfd_mem_limit.mem_limit_lock);
1609 	available = adev->gmc.real_vram_size
1610 		- adev->kfd.vram_used
1611 		- atomic64_read(&adev->vram_pin_size)
1612 		- reserved_for_pt;
1613 	spin_unlock(&kfd_mem_limit.mem_limit_lock);
1614 
1615 	return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
1616 }
1617 
1618 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1619 		struct amdgpu_device *adev, uint64_t va, uint64_t size,
1620 		void *drm_priv, struct kgd_mem **mem,
1621 		uint64_t *offset, uint32_t flags, bool criu_resume)
1622 {
1623 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1624 	enum ttm_bo_type bo_type = ttm_bo_type_device;
1625 	struct sg_table *sg = NULL;
1626 	uint64_t user_addr = 0;
1627 	struct amdgpu_bo *bo;
1628 	struct drm_gem_object *gobj = NULL;
1629 	u32 domain, alloc_domain;
1630 	u64 alloc_flags;
1631 	int ret;
1632 
1633 	/*
1634 	 * Check on which domain to allocate BO
1635 	 */
1636 	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1637 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1638 		alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1639 		alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1640 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1641 	} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1642 		domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1643 		alloc_flags = 0;
1644 	} else {
1645 		domain = AMDGPU_GEM_DOMAIN_GTT;
1646 		alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1647 		alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1648 
1649 		if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1650 			if (!offset || !*offset)
1651 				return -EINVAL;
1652 			user_addr = untagged_addr(*offset);
1653 		} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1654 				    KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1655 			bo_type = ttm_bo_type_sg;
1656 			if (size > UINT_MAX)
1657 				return -EINVAL;
1658 			sg = create_sg_table(*offset, size);
1659 			if (!sg)
1660 				return -ENOMEM;
1661 		} else {
1662 			return -EINVAL;
1663 		}
1664 	}
1665 
1666 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1667 	if (!*mem) {
1668 		ret = -ENOMEM;
1669 		goto err;
1670 	}
1671 	INIT_LIST_HEAD(&(*mem)->attachments);
1672 	mutex_init(&(*mem)->lock);
1673 	(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1674 
1675 	/* Workaround for AQL queue wraparound bug. Map the same
1676 	 * memory twice. That means we only actually allocate half
1677 	 * the memory.
1678 	 */
1679 	if ((*mem)->aql_queue)
1680 		size = size >> 1;
1681 
1682 	(*mem)->alloc_flags = flags;
1683 
1684 	amdgpu_sync_create(&(*mem)->sync);
1685 
1686 	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
1687 	if (ret) {
1688 		pr_debug("Insufficient memory\n");
1689 		goto err_reserve_limit;
1690 	}
1691 
1692 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1693 			va, size, domain_string(alloc_domain));
1694 
1695 	ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1696 				       bo_type, NULL, &gobj);
1697 	if (ret) {
1698 		pr_debug("Failed to create BO on domain %s. ret %d\n",
1699 			 domain_string(alloc_domain), ret);
1700 		goto err_bo_create;
1701 	}
1702 	ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1703 	if (ret) {
1704 		pr_debug("Failed to allow vma node access. ret %d\n", ret);
1705 		goto err_node_allow;
1706 	}
1707 	bo = gem_to_amdgpu_bo(gobj);
1708 	if (bo_type == ttm_bo_type_sg) {
1709 		bo->tbo.sg = sg;
1710 		bo->tbo.ttm->sg = sg;
1711 	}
1712 	bo->kfd_bo = *mem;
1713 	(*mem)->bo = bo;
1714 	if (user_addr)
1715 		bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1716 
1717 	(*mem)->va = va;
1718 	(*mem)->domain = domain;
1719 	(*mem)->mapped_to_gpu_memory = 0;
1720 	(*mem)->process_info = avm->process_info;
1721 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1722 
1723 	if (user_addr) {
1724 		pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
1725 		ret = init_user_pages(*mem, user_addr, criu_resume);
1726 		if (ret)
1727 			goto allocate_init_user_pages_failed;
1728 	} else  if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1729 				KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1730 		ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1731 		if (ret) {
1732 			pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1733 			goto err_pin_bo;
1734 		}
1735 		bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1736 		bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1737 	}
1738 
1739 	if (offset)
1740 		*offset = amdgpu_bo_mmap_offset(bo);
1741 
1742 	return 0;
1743 
1744 allocate_init_user_pages_failed:
1745 err_pin_bo:
1746 	remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1747 	drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1748 err_node_allow:
1749 	/* Don't unreserve system mem limit twice */
1750 	goto err_reserve_limit;
1751 err_bo_create:
1752 	unreserve_mem_limit(adev, size, flags);
1753 err_reserve_limit:
1754 	mutex_destroy(&(*mem)->lock);
1755 	if (gobj)
1756 		drm_gem_object_put(gobj);
1757 	else
1758 		kfree(*mem);
1759 err:
1760 	if (sg) {
1761 		sg_free_table(sg);
1762 		kfree(sg);
1763 	}
1764 	return ret;
1765 }
1766 
1767 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1768 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1769 		uint64_t *size)
1770 {
1771 	struct amdkfd_process_info *process_info = mem->process_info;
1772 	unsigned long bo_size = mem->bo->tbo.base.size;
1773 	struct kfd_mem_attachment *entry, *tmp;
1774 	struct bo_vm_reservation_context ctx;
1775 	struct ttm_validate_buffer *bo_list_entry;
1776 	unsigned int mapped_to_gpu_memory;
1777 	int ret;
1778 	bool is_imported = false;
1779 
1780 	mutex_lock(&mem->lock);
1781 
1782 	/* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1783 	if (mem->alloc_flags &
1784 	    (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1785 	     KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1786 		amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1787 	}
1788 
1789 	mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1790 	is_imported = mem->is_imported;
1791 	mutex_unlock(&mem->lock);
1792 	/* lock is not needed after this, since mem is unused and will
1793 	 * be freed anyway
1794 	 */
1795 
1796 	if (mapped_to_gpu_memory > 0) {
1797 		pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1798 				mem->va, bo_size);
1799 		return -EBUSY;
1800 	}
1801 
1802 	/* Make sure restore workers don't access the BO any more */
1803 	bo_list_entry = &mem->validate_list;
1804 	mutex_lock(&process_info->lock);
1805 	list_del(&bo_list_entry->head);
1806 	mutex_unlock(&process_info->lock);
1807 
1808 	/* No more MMU notifiers */
1809 	amdgpu_mn_unregister(mem->bo);
1810 
1811 	ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1812 	if (unlikely(ret))
1813 		return ret;
1814 
1815 	/* The eviction fence should be removed by the last unmap.
1816 	 * TODO: Log an error condition if the bo still has the eviction fence
1817 	 * attached
1818 	 */
1819 	amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1820 					process_info->eviction_fence);
1821 	pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1822 		mem->va + bo_size * (1 + mem->aql_queue));
1823 
1824 	/* Remove from VM internal data structures */
1825 	list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1826 		kfd_mem_detach(entry);
1827 
1828 	ret = unreserve_bo_and_vms(&ctx, false, false);
1829 
1830 	/* Free the sync object */
1831 	amdgpu_sync_free(&mem->sync);
1832 
1833 	/* If the SG is not NULL, it's one we created for a doorbell or mmio
1834 	 * remap BO. We need to free it.
1835 	 */
1836 	if (mem->bo->tbo.sg) {
1837 		sg_free_table(mem->bo->tbo.sg);
1838 		kfree(mem->bo->tbo.sg);
1839 	}
1840 
1841 	/* Update the size of the BO being freed if it was allocated from
1842 	 * VRAM and is not imported.
1843 	 */
1844 	if (size) {
1845 		if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1846 		    (!is_imported))
1847 			*size = bo_size;
1848 		else
1849 			*size = 0;
1850 	}
1851 
1852 	/* Free the BO*/
1853 	drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1854 	if (mem->dmabuf)
1855 		dma_buf_put(mem->dmabuf);
1856 	mutex_destroy(&mem->lock);
1857 
1858 	/* If this releases the last reference, it will end up calling
1859 	 * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1860 	 * this needs to be the last call here.
1861 	 */
1862 	drm_gem_object_put(&mem->bo->tbo.base);
1863 
1864 	return ret;
1865 }
1866 
1867 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1868 		struct amdgpu_device *adev, struct kgd_mem *mem,
1869 		void *drm_priv)
1870 {
1871 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1872 	int ret;
1873 	struct amdgpu_bo *bo;
1874 	uint32_t domain;
1875 	struct kfd_mem_attachment *entry;
1876 	struct bo_vm_reservation_context ctx;
1877 	unsigned long bo_size;
1878 	bool is_invalid_userptr = false;
1879 
1880 	bo = mem->bo;
1881 	if (!bo) {
1882 		pr_err("Invalid BO when mapping memory to GPU\n");
1883 		return -EINVAL;
1884 	}
1885 
1886 	/* Make sure restore is not running concurrently. Since we
1887 	 * don't map invalid userptr BOs, we rely on the next restore
1888 	 * worker to do the mapping
1889 	 */
1890 	mutex_lock(&mem->process_info->lock);
1891 
1892 	/* Lock mmap-sem. If we find an invalid userptr BO, we can be
1893 	 * sure that the MMU notifier is no longer running
1894 	 * concurrently and the queues are actually stopped
1895 	 */
1896 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1897 		mmap_write_lock(current->mm);
1898 		is_invalid_userptr = atomic_read(&mem->invalid);
1899 		mmap_write_unlock(current->mm);
1900 	}
1901 
1902 	mutex_lock(&mem->lock);
1903 
1904 	domain = mem->domain;
1905 	bo_size = bo->tbo.base.size;
1906 
1907 	pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1908 			mem->va,
1909 			mem->va + bo_size * (1 + mem->aql_queue),
1910 			avm, domain_string(domain));
1911 
1912 	if (!kfd_mem_is_attached(avm, mem)) {
1913 		ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1914 		if (ret)
1915 			goto out;
1916 	}
1917 
1918 	ret = reserve_bo_and_vm(mem, avm, &ctx);
1919 	if (unlikely(ret))
1920 		goto out;
1921 
1922 	/* Userptr can be marked as "not invalid", but not actually be
1923 	 * validated yet (still in the system domain). In that case
1924 	 * the queues are still stopped and we can leave mapping for
1925 	 * the next restore worker
1926 	 */
1927 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1928 	    bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1929 		is_invalid_userptr = true;
1930 
1931 	ret = vm_validate_pt_pd_bos(avm);
1932 	if (unlikely(ret))
1933 		goto out_unreserve;
1934 
1935 	if (mem->mapped_to_gpu_memory == 0 &&
1936 	    !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1937 		/* Validate BO only once. The eviction fence gets added to BO
1938 		 * the first time it is mapped. Validate will wait for all
1939 		 * background evictions to complete.
1940 		 */
1941 		ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1942 		if (ret) {
1943 			pr_debug("Validate failed\n");
1944 			goto out_unreserve;
1945 		}
1946 	}
1947 
1948 	list_for_each_entry(entry, &mem->attachments, list) {
1949 		if (entry->bo_va->base.vm != avm || entry->is_mapped)
1950 			continue;
1951 
1952 		pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1953 			 entry->va, entry->va + bo_size, entry);
1954 
1955 		ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1956 				      is_invalid_userptr);
1957 		if (ret) {
1958 			pr_err("Failed to map bo to gpuvm\n");
1959 			goto out_unreserve;
1960 		}
1961 
1962 		ret = vm_update_pds(avm, ctx.sync);
1963 		if (ret) {
1964 			pr_err("Failed to update page directories\n");
1965 			goto out_unreserve;
1966 		}
1967 
1968 		entry->is_mapped = true;
1969 		mem->mapped_to_gpu_memory++;
1970 		pr_debug("\t INC mapping count %d\n",
1971 			 mem->mapped_to_gpu_memory);
1972 	}
1973 
1974 	if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1975 		amdgpu_bo_fence(bo,
1976 				&avm->process_info->eviction_fence->base,
1977 				true);
1978 	ret = unreserve_bo_and_vms(&ctx, false, false);
1979 
1980 	goto out;
1981 
1982 out_unreserve:
1983 	unreserve_bo_and_vms(&ctx, false, false);
1984 out:
1985 	mutex_unlock(&mem->process_info->lock);
1986 	mutex_unlock(&mem->lock);
1987 	return ret;
1988 }
1989 
1990 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1991 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
1992 {
1993 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1994 	struct amdkfd_process_info *process_info = avm->process_info;
1995 	unsigned long bo_size = mem->bo->tbo.base.size;
1996 	struct kfd_mem_attachment *entry;
1997 	struct bo_vm_reservation_context ctx;
1998 	int ret;
1999 
2000 	mutex_lock(&mem->lock);
2001 
2002 	ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2003 	if (unlikely(ret))
2004 		goto out;
2005 	/* If no VMs were reserved, it means the BO wasn't actually mapped */
2006 	if (ctx.n_vms == 0) {
2007 		ret = -EINVAL;
2008 		goto unreserve_out;
2009 	}
2010 
2011 	ret = vm_validate_pt_pd_bos(avm);
2012 	if (unlikely(ret))
2013 		goto unreserve_out;
2014 
2015 	pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2016 		mem->va,
2017 		mem->va + bo_size * (1 + mem->aql_queue),
2018 		avm);
2019 
2020 	list_for_each_entry(entry, &mem->attachments, list) {
2021 		if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2022 			continue;
2023 
2024 		pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2025 			 entry->va, entry->va + bo_size, entry);
2026 
2027 		unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2028 		entry->is_mapped = false;
2029 
2030 		mem->mapped_to_gpu_memory--;
2031 		pr_debug("\t DEC mapping count %d\n",
2032 			 mem->mapped_to_gpu_memory);
2033 	}
2034 
2035 	/* If BO is unmapped from all VMs, unfence it. It can be evicted if
2036 	 * required.
2037 	 */
2038 	if (mem->mapped_to_gpu_memory == 0 &&
2039 	    !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
2040 	    !mem->bo->tbo.pin_count)
2041 		amdgpu_amdkfd_remove_eviction_fence(mem->bo,
2042 						process_info->eviction_fence);
2043 
2044 unreserve_out:
2045 	unreserve_bo_and_vms(&ctx, false, false);
2046 out:
2047 	mutex_unlock(&mem->lock);
2048 	return ret;
2049 }
2050 
2051 int amdgpu_amdkfd_gpuvm_sync_memory(
2052 		struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2053 {
2054 	struct amdgpu_sync sync;
2055 	int ret;
2056 
2057 	amdgpu_sync_create(&sync);
2058 
2059 	mutex_lock(&mem->lock);
2060 	amdgpu_sync_clone(&mem->sync, &sync);
2061 	mutex_unlock(&mem->lock);
2062 
2063 	ret = amdgpu_sync_wait(&sync, intr);
2064 	amdgpu_sync_free(&sync);
2065 	return ret;
2066 }
2067 
2068 /**
2069  * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2070  * @adev: Device to which allocated BO belongs
2071  * @bo: Buffer object to be mapped
2072  *
2073  * Before return, bo reference count is incremented. To release the reference and unpin/
2074  * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2075  */
2076 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
2077 {
2078 	int ret;
2079 
2080 	ret = amdgpu_bo_reserve(bo, true);
2081 	if (ret) {
2082 		pr_err("Failed to reserve bo. ret %d\n", ret);
2083 		goto err_reserve_bo_failed;
2084 	}
2085 
2086 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2087 	if (ret) {
2088 		pr_err("Failed to pin bo. ret %d\n", ret);
2089 		goto err_pin_bo_failed;
2090 	}
2091 
2092 	ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2093 	if (ret) {
2094 		pr_err("Failed to bind bo to GART. ret %d\n", ret);
2095 		goto err_map_bo_gart_failed;
2096 	}
2097 
2098 	amdgpu_amdkfd_remove_eviction_fence(
2099 		bo, bo->kfd_bo->process_info->eviction_fence);
2100 
2101 	amdgpu_bo_unreserve(bo);
2102 
2103 	bo = amdgpu_bo_ref(bo);
2104 
2105 	return 0;
2106 
2107 err_map_bo_gart_failed:
2108 	amdgpu_bo_unpin(bo);
2109 err_pin_bo_failed:
2110 	amdgpu_bo_unreserve(bo);
2111 err_reserve_bo_failed:
2112 
2113 	return ret;
2114 }
2115 
2116 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2117  *
2118  * @mem: Buffer object to be mapped for CPU access
2119  * @kptr[out]: pointer in kernel CPU address space
2120  * @size[out]: size of the buffer
2121  *
2122  * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
2123  * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
2124  * validate_list, so the GPU mapping can be restored after a page table was
2125  * evicted.
2126  *
2127  * Return: 0 on success, error code on failure
2128  */
2129 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
2130 					     void **kptr, uint64_t *size)
2131 {
2132 	int ret;
2133 	struct amdgpu_bo *bo = mem->bo;
2134 
2135 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2136 		pr_err("userptr can't be mapped to kernel\n");
2137 		return -EINVAL;
2138 	}
2139 
2140 	mutex_lock(&mem->process_info->lock);
2141 
2142 	ret = amdgpu_bo_reserve(bo, true);
2143 	if (ret) {
2144 		pr_err("Failed to reserve bo. ret %d\n", ret);
2145 		goto bo_reserve_failed;
2146 	}
2147 
2148 	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2149 	if (ret) {
2150 		pr_err("Failed to pin bo. ret %d\n", ret);
2151 		goto pin_failed;
2152 	}
2153 
2154 	ret = amdgpu_bo_kmap(bo, kptr);
2155 	if (ret) {
2156 		pr_err("Failed to map bo to kernel. ret %d\n", ret);
2157 		goto kmap_failed;
2158 	}
2159 
2160 	amdgpu_amdkfd_remove_eviction_fence(
2161 		bo, mem->process_info->eviction_fence);
2162 
2163 	if (size)
2164 		*size = amdgpu_bo_size(bo);
2165 
2166 	amdgpu_bo_unreserve(bo);
2167 
2168 	mutex_unlock(&mem->process_info->lock);
2169 	return 0;
2170 
2171 kmap_failed:
2172 	amdgpu_bo_unpin(bo);
2173 pin_failed:
2174 	amdgpu_bo_unreserve(bo);
2175 bo_reserve_failed:
2176 	mutex_unlock(&mem->process_info->lock);
2177 
2178 	return ret;
2179 }
2180 
2181 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2182  *
2183  * @mem: Buffer object to be unmapped for CPU access
2184  *
2185  * Removes the kernel CPU mapping and unpins the BO. It does not restore the
2186  * eviction fence, so this function should only be used for cleanup before the
2187  * BO is destroyed.
2188  */
2189 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
2190 {
2191 	struct amdgpu_bo *bo = mem->bo;
2192 
2193 	amdgpu_bo_reserve(bo, true);
2194 	amdgpu_bo_kunmap(bo);
2195 	amdgpu_bo_unpin(bo);
2196 	amdgpu_bo_unreserve(bo);
2197 }
2198 
2199 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2200 					  struct kfd_vm_fault_info *mem)
2201 {
2202 	if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2203 		*mem = *adev->gmc.vm_fault_info;
2204 		mb();
2205 		atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2206 	}
2207 	return 0;
2208 }
2209 
2210 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
2211 				      struct dma_buf *dma_buf,
2212 				      uint64_t va, void *drm_priv,
2213 				      struct kgd_mem **mem, uint64_t *size,
2214 				      uint64_t *mmap_offset)
2215 {
2216 	struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2217 	struct drm_gem_object *obj;
2218 	struct amdgpu_bo *bo;
2219 	int ret;
2220 
2221 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
2222 		/* Can't handle non-graphics buffers */
2223 		return -EINVAL;
2224 
2225 	obj = dma_buf->priv;
2226 	if (drm_to_adev(obj->dev) != adev)
2227 		/* Can't handle buffers from other devices */
2228 		return -EINVAL;
2229 
2230 	bo = gem_to_amdgpu_bo(obj);
2231 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2232 				    AMDGPU_GEM_DOMAIN_GTT)))
2233 		/* Only VRAM and GTT BOs are supported */
2234 		return -EINVAL;
2235 
2236 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2237 	if (!*mem)
2238 		return -ENOMEM;
2239 
2240 	ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2241 	if (ret) {
2242 		kfree(mem);
2243 		return ret;
2244 	}
2245 
2246 	if (size)
2247 		*size = amdgpu_bo_size(bo);
2248 
2249 	if (mmap_offset)
2250 		*mmap_offset = amdgpu_bo_mmap_offset(bo);
2251 
2252 	INIT_LIST_HEAD(&(*mem)->attachments);
2253 	mutex_init(&(*mem)->lock);
2254 
2255 	(*mem)->alloc_flags =
2256 		((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2257 		KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2258 		| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2259 		| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2260 
2261 	drm_gem_object_get(&bo->tbo.base);
2262 	(*mem)->bo = bo;
2263 	(*mem)->va = va;
2264 	(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2265 		AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2266 	(*mem)->mapped_to_gpu_memory = 0;
2267 	(*mem)->process_info = avm->process_info;
2268 	add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2269 	amdgpu_sync_create(&(*mem)->sync);
2270 	(*mem)->is_imported = true;
2271 
2272 	return 0;
2273 }
2274 
2275 /* Evict a userptr BO by stopping the queues if necessary
2276  *
2277  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2278  * cannot do any memory allocations, and cannot take any locks that
2279  * are held elsewhere while allocating memory. Therefore this is as
2280  * simple as possible, using atomic counters.
2281  *
2282  * It doesn't do anything to the BO itself. The real work happens in
2283  * restore, where we get updated page addresses. This function only
2284  * ensures that GPU access to the BO is stopped.
2285  */
2286 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
2287 				struct mm_struct *mm)
2288 {
2289 	struct amdkfd_process_info *process_info = mem->process_info;
2290 	int evicted_bos;
2291 	int r = 0;
2292 
2293 	/* Do not process MMU notifications until stage-4 IOCTL is received */
2294 	if (READ_ONCE(process_info->block_mmu_notifications))
2295 		return 0;
2296 
2297 	atomic_inc(&mem->invalid);
2298 	evicted_bos = atomic_inc_return(&process_info->evicted_bos);
2299 	if (evicted_bos == 1) {
2300 		/* First eviction, stop the queues */
2301 		r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
2302 		if (r)
2303 			pr_err("Failed to quiesce KFD\n");
2304 		schedule_delayed_work(&process_info->restore_userptr_work,
2305 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2306 	}
2307 
2308 	return r;
2309 }
2310 
2311 /* Update invalid userptr BOs
2312  *
2313  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2314  * userptr_inval_list and updates user pages for all BOs that have
2315  * been invalidated since their last update.
2316  */
2317 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2318 				     struct mm_struct *mm)
2319 {
2320 	struct kgd_mem *mem, *tmp_mem;
2321 	struct amdgpu_bo *bo;
2322 	struct ttm_operation_ctx ctx = { false, false };
2323 	int invalid, ret;
2324 
2325 	/* Move all invalidated BOs to the userptr_inval_list and
2326 	 * release their user pages by migration to the CPU domain
2327 	 */
2328 	list_for_each_entry_safe(mem, tmp_mem,
2329 				 &process_info->userptr_valid_list,
2330 				 validate_list.head) {
2331 		if (!atomic_read(&mem->invalid))
2332 			continue; /* BO is still valid */
2333 
2334 		bo = mem->bo;
2335 
2336 		if (amdgpu_bo_reserve(bo, true))
2337 			return -EAGAIN;
2338 		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2339 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2340 		amdgpu_bo_unreserve(bo);
2341 		if (ret) {
2342 			pr_err("%s: Failed to invalidate userptr BO\n",
2343 			       __func__);
2344 			return -EAGAIN;
2345 		}
2346 
2347 		list_move_tail(&mem->validate_list.head,
2348 			       &process_info->userptr_inval_list);
2349 	}
2350 
2351 	if (list_empty(&process_info->userptr_inval_list))
2352 		return 0; /* All evicted userptr BOs were freed */
2353 
2354 	/* Go through userptr_inval_list and update any invalid user_pages */
2355 	list_for_each_entry(mem, &process_info->userptr_inval_list,
2356 			    validate_list.head) {
2357 		invalid = atomic_read(&mem->invalid);
2358 		if (!invalid)
2359 			/* BO hasn't been invalidated since the last
2360 			 * revalidation attempt. Keep its BO list.
2361 			 */
2362 			continue;
2363 
2364 		bo = mem->bo;
2365 
2366 		/* Get updated user pages */
2367 		ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2368 		if (ret) {
2369 			pr_debug("Failed %d to get user pages\n", ret);
2370 
2371 			/* Return -EFAULT bad address error as success. It will
2372 			 * fail later with a VM fault if the GPU tries to access
2373 			 * it. Better than hanging indefinitely with stalled
2374 			 * user mode queues.
2375 			 *
2376 			 * Return other error -EBUSY or -ENOMEM to retry restore
2377 			 */
2378 			if (ret != -EFAULT)
2379 				return ret;
2380 		} else {
2381 
2382 			/*
2383 			 * FIXME: Cannot ignore the return code, must hold
2384 			 * notifier_lock
2385 			 */
2386 			amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2387 		}
2388 
2389 		/* Mark the BO as valid unless it was invalidated
2390 		 * again concurrently.
2391 		 */
2392 		if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2393 			return -EAGAIN;
2394 	}
2395 
2396 	return 0;
2397 }
2398 
2399 /* Validate invalid userptr BOs
2400  *
2401  * Validates BOs on the userptr_inval_list, and moves them back to the
2402  * userptr_valid_list. Also updates GPUVM page tables with new page
2403  * addresses and waits for the page table updates to complete.
2404  */
2405 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2406 {
2407 	struct amdgpu_bo_list_entry *pd_bo_list_entries;
2408 	struct list_head resv_list, duplicates;
2409 	struct ww_acquire_ctx ticket;
2410 	struct amdgpu_sync sync;
2411 
2412 	struct amdgpu_vm *peer_vm;
2413 	struct kgd_mem *mem, *tmp_mem;
2414 	struct amdgpu_bo *bo;
2415 	struct ttm_operation_ctx ctx = { false, false };
2416 	int i, ret;
2417 
2418 	pd_bo_list_entries = kcalloc(process_info->n_vms,
2419 				     sizeof(struct amdgpu_bo_list_entry),
2420 				     GFP_KERNEL);
2421 	if (!pd_bo_list_entries) {
2422 		pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2423 		ret = -ENOMEM;
2424 		goto out_no_mem;
2425 	}
2426 
2427 	INIT_LIST_HEAD(&resv_list);
2428 	INIT_LIST_HEAD(&duplicates);
2429 
2430 	/* Get all the page directory BOs that need to be reserved */
2431 	i = 0;
2432 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2433 			    vm_list_node)
2434 		amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2435 				    &pd_bo_list_entries[i++]);
2436 	/* Add the userptr_inval_list entries to resv_list */
2437 	list_for_each_entry(mem, &process_info->userptr_inval_list,
2438 			    validate_list.head) {
2439 		list_add_tail(&mem->resv_list.head, &resv_list);
2440 		mem->resv_list.bo = mem->validate_list.bo;
2441 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2442 	}
2443 
2444 	/* Reserve all BOs and page tables for validation */
2445 	ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2446 	WARN(!list_empty(&duplicates), "Duplicates should be empty");
2447 	if (ret)
2448 		goto out_free;
2449 
2450 	amdgpu_sync_create(&sync);
2451 
2452 	ret = process_validate_vms(process_info);
2453 	if (ret)
2454 		goto unreserve_out;
2455 
2456 	/* Validate BOs and update GPUVM page tables */
2457 	list_for_each_entry_safe(mem, tmp_mem,
2458 				 &process_info->userptr_inval_list,
2459 				 validate_list.head) {
2460 		struct kfd_mem_attachment *attachment;
2461 
2462 		bo = mem->bo;
2463 
2464 		/* Validate the BO if we got user pages */
2465 		if (bo->tbo.ttm->pages[0]) {
2466 			amdgpu_bo_placement_from_domain(bo, mem->domain);
2467 			ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2468 			if (ret) {
2469 				pr_err("%s: failed to validate BO\n", __func__);
2470 				goto unreserve_out;
2471 			}
2472 		}
2473 
2474 		list_move_tail(&mem->validate_list.head,
2475 			       &process_info->userptr_valid_list);
2476 
2477 		/* Update mapping. If the BO was not validated
2478 		 * (because we couldn't get user pages), this will
2479 		 * clear the page table entries, which will result in
2480 		 * VM faults if the GPU tries to access the invalid
2481 		 * memory.
2482 		 */
2483 		list_for_each_entry(attachment, &mem->attachments, list) {
2484 			if (!attachment->is_mapped)
2485 				continue;
2486 
2487 			kfd_mem_dmaunmap_attachment(mem, attachment);
2488 			ret = update_gpuvm_pte(mem, attachment, &sync);
2489 			if (ret) {
2490 				pr_err("%s: update PTE failed\n", __func__);
2491 				/* make sure this gets validated again */
2492 				atomic_inc(&mem->invalid);
2493 				goto unreserve_out;
2494 			}
2495 		}
2496 	}
2497 
2498 	/* Update page directories */
2499 	ret = process_update_pds(process_info, &sync);
2500 
2501 unreserve_out:
2502 	ttm_eu_backoff_reservation(&ticket, &resv_list);
2503 	amdgpu_sync_wait(&sync, false);
2504 	amdgpu_sync_free(&sync);
2505 out_free:
2506 	kfree(pd_bo_list_entries);
2507 out_no_mem:
2508 
2509 	return ret;
2510 }
2511 
2512 /* Worker callback to restore evicted userptr BOs
2513  *
2514  * Tries to update and validate all userptr BOs. If successful and no
2515  * concurrent evictions happened, the queues are restarted. Otherwise,
2516  * reschedule for another attempt later.
2517  */
2518 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2519 {
2520 	struct delayed_work *dwork = to_delayed_work(work);
2521 	struct amdkfd_process_info *process_info =
2522 		container_of(dwork, struct amdkfd_process_info,
2523 			     restore_userptr_work);
2524 	struct task_struct *usertask;
2525 	struct mm_struct *mm;
2526 	int evicted_bos;
2527 
2528 	evicted_bos = atomic_read(&process_info->evicted_bos);
2529 	if (!evicted_bos)
2530 		return;
2531 
2532 	/* Reference task and mm in case of concurrent process termination */
2533 	usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2534 	if (!usertask)
2535 		return;
2536 	mm = get_task_mm(usertask);
2537 	if (!mm) {
2538 		put_task_struct(usertask);
2539 		return;
2540 	}
2541 
2542 	mutex_lock(&process_info->lock);
2543 
2544 	if (update_invalid_user_pages(process_info, mm))
2545 		goto unlock_out;
2546 	/* userptr_inval_list can be empty if all evicted userptr BOs
2547 	 * have been freed. In that case there is nothing to validate
2548 	 * and we can just restart the queues.
2549 	 */
2550 	if (!list_empty(&process_info->userptr_inval_list)) {
2551 		if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2552 			goto unlock_out; /* Concurrent eviction, try again */
2553 
2554 		if (validate_invalid_user_pages(process_info))
2555 			goto unlock_out;
2556 	}
2557 	/* Final check for concurrent evicton and atomic update. If
2558 	 * another eviction happens after successful update, it will
2559 	 * be a first eviction that calls quiesce_mm. The eviction
2560 	 * reference counting inside KFD will handle this case.
2561 	 */
2562 	if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2563 	    evicted_bos)
2564 		goto unlock_out;
2565 	evicted_bos = 0;
2566 	if (kgd2kfd_resume_mm(mm)) {
2567 		pr_err("%s: Failed to resume KFD\n", __func__);
2568 		/* No recovery from this failure. Probably the CP is
2569 		 * hanging. No point trying again.
2570 		 */
2571 	}
2572 
2573 unlock_out:
2574 	mutex_unlock(&process_info->lock);
2575 
2576 	/* If validation failed, reschedule another attempt */
2577 	if (evicted_bos) {
2578 		schedule_delayed_work(&process_info->restore_userptr_work,
2579 			msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2580 
2581 		kfd_smi_event_queue_restore_rescheduled(mm);
2582 	}
2583 	mmput(mm);
2584 	put_task_struct(usertask);
2585 }
2586 
2587 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2588  *   KFD process identified by process_info
2589  *
2590  * @process_info: amdkfd_process_info of the KFD process
2591  *
2592  * After memory eviction, restore thread calls this function. The function
2593  * should be called when the Process is still valid. BO restore involves -
2594  *
2595  * 1.  Release old eviction fence and create new one
2596  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2597  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2598  *     BOs that need to be reserved.
2599  * 4.  Reserve all the BOs
2600  * 5.  Validate of PD and PT BOs.
2601  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2602  * 7.  Add fence to all PD and PT BOs.
2603  * 8.  Unreserve all BOs
2604  */
2605 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2606 {
2607 	struct amdgpu_bo_list_entry *pd_bo_list;
2608 	struct amdkfd_process_info *process_info = info;
2609 	struct amdgpu_vm *peer_vm;
2610 	struct kgd_mem *mem;
2611 	struct bo_vm_reservation_context ctx;
2612 	struct amdgpu_amdkfd_fence *new_fence;
2613 	int ret = 0, i;
2614 	struct list_head duplicate_save;
2615 	struct amdgpu_sync sync_obj;
2616 	unsigned long failed_size = 0;
2617 	unsigned long total_size = 0;
2618 
2619 	INIT_LIST_HEAD(&duplicate_save);
2620 	INIT_LIST_HEAD(&ctx.list);
2621 	INIT_LIST_HEAD(&ctx.duplicates);
2622 
2623 	pd_bo_list = kcalloc(process_info->n_vms,
2624 			     sizeof(struct amdgpu_bo_list_entry),
2625 			     GFP_KERNEL);
2626 	if (!pd_bo_list)
2627 		return -ENOMEM;
2628 
2629 	i = 0;
2630 	mutex_lock(&process_info->lock);
2631 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2632 			vm_list_node)
2633 		amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2634 
2635 	/* Reserve all BOs and page tables/directory. Add all BOs from
2636 	 * kfd_bo_list to ctx.list
2637 	 */
2638 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2639 			    validate_list.head) {
2640 
2641 		list_add_tail(&mem->resv_list.head, &ctx.list);
2642 		mem->resv_list.bo = mem->validate_list.bo;
2643 		mem->resv_list.num_shared = mem->validate_list.num_shared;
2644 	}
2645 
2646 	ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2647 				     false, &duplicate_save);
2648 	if (ret) {
2649 		pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2650 		goto ttm_reserve_fail;
2651 	}
2652 
2653 	amdgpu_sync_create(&sync_obj);
2654 
2655 	/* Validate PDs and PTs */
2656 	ret = process_validate_vms(process_info);
2657 	if (ret)
2658 		goto validate_map_fail;
2659 
2660 	ret = process_sync_pds_resv(process_info, &sync_obj);
2661 	if (ret) {
2662 		pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2663 		goto validate_map_fail;
2664 	}
2665 
2666 	/* Validate BOs and map them to GPUVM (update VM page tables). */
2667 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2668 			    validate_list.head) {
2669 
2670 		struct amdgpu_bo *bo = mem->bo;
2671 		uint32_t domain = mem->domain;
2672 		struct kfd_mem_attachment *attachment;
2673 		struct dma_resv_iter cursor;
2674 		struct dma_fence *fence;
2675 
2676 		total_size += amdgpu_bo_size(bo);
2677 
2678 		ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2679 		if (ret) {
2680 			pr_debug("Memory eviction: Validate BOs failed\n");
2681 			failed_size += amdgpu_bo_size(bo);
2682 			ret = amdgpu_amdkfd_bo_validate(bo,
2683 						AMDGPU_GEM_DOMAIN_GTT, false);
2684 			if (ret) {
2685 				pr_debug("Memory eviction: Try again\n");
2686 				goto validate_map_fail;
2687 			}
2688 		}
2689 		dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2690 					DMA_RESV_USAGE_KERNEL, fence) {
2691 			ret = amdgpu_sync_fence(&sync_obj, fence);
2692 			if (ret) {
2693 				pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2694 				goto validate_map_fail;
2695 			}
2696 		}
2697 		list_for_each_entry(attachment, &mem->attachments, list) {
2698 			if (!attachment->is_mapped)
2699 				continue;
2700 
2701 			kfd_mem_dmaunmap_attachment(mem, attachment);
2702 			ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2703 			if (ret) {
2704 				pr_debug("Memory eviction: update PTE failed. Try again\n");
2705 				goto validate_map_fail;
2706 			}
2707 		}
2708 	}
2709 
2710 	if (failed_size)
2711 		pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2712 
2713 	/* Update page directories */
2714 	ret = process_update_pds(process_info, &sync_obj);
2715 	if (ret) {
2716 		pr_debug("Memory eviction: update PDs failed. Try again\n");
2717 		goto validate_map_fail;
2718 	}
2719 
2720 	/* Wait for validate and PT updates to finish */
2721 	amdgpu_sync_wait(&sync_obj, false);
2722 
2723 	/* Release old eviction fence and create new one, because fence only
2724 	 * goes from unsignaled to signaled, fence cannot be reused.
2725 	 * Use context and mm from the old fence.
2726 	 */
2727 	new_fence = amdgpu_amdkfd_fence_create(
2728 				process_info->eviction_fence->base.context,
2729 				process_info->eviction_fence->mm,
2730 				NULL);
2731 	if (!new_fence) {
2732 		pr_err("Failed to create eviction fence\n");
2733 		ret = -ENOMEM;
2734 		goto validate_map_fail;
2735 	}
2736 	dma_fence_put(&process_info->eviction_fence->base);
2737 	process_info->eviction_fence = new_fence;
2738 	*ef = dma_fence_get(&new_fence->base);
2739 
2740 	/* Attach new eviction fence to all BOs except pinned ones */
2741 	list_for_each_entry(mem, &process_info->kfd_bo_list,
2742 		validate_list.head) {
2743 		if (mem->bo->tbo.pin_count)
2744 			continue;
2745 
2746 		amdgpu_bo_fence(mem->bo,
2747 			&process_info->eviction_fence->base, true);
2748 	}
2749 	/* Attach eviction fence to PD / PT BOs */
2750 	list_for_each_entry(peer_vm, &process_info->vm_list_head,
2751 			    vm_list_node) {
2752 		struct amdgpu_bo *bo = peer_vm->root.bo;
2753 
2754 		amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2755 	}
2756 
2757 validate_map_fail:
2758 	ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2759 	amdgpu_sync_free(&sync_obj);
2760 ttm_reserve_fail:
2761 	mutex_unlock(&process_info->lock);
2762 	kfree(pd_bo_list);
2763 	return ret;
2764 }
2765 
2766 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2767 {
2768 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2769 	struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2770 	int ret;
2771 
2772 	if (!info || !gws)
2773 		return -EINVAL;
2774 
2775 	*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2776 	if (!*mem)
2777 		return -ENOMEM;
2778 
2779 	mutex_init(&(*mem)->lock);
2780 	INIT_LIST_HEAD(&(*mem)->attachments);
2781 	(*mem)->bo = amdgpu_bo_ref(gws_bo);
2782 	(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2783 	(*mem)->process_info = process_info;
2784 	add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2785 	amdgpu_sync_create(&(*mem)->sync);
2786 
2787 
2788 	/* Validate gws bo the first time it is added to process */
2789 	mutex_lock(&(*mem)->process_info->lock);
2790 	ret = amdgpu_bo_reserve(gws_bo, false);
2791 	if (unlikely(ret)) {
2792 		pr_err("Reserve gws bo failed %d\n", ret);
2793 		goto bo_reservation_failure;
2794 	}
2795 
2796 	ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2797 	if (ret) {
2798 		pr_err("GWS BO validate failed %d\n", ret);
2799 		goto bo_validation_failure;
2800 	}
2801 	/* GWS resource is shared b/t amdgpu and amdkfd
2802 	 * Add process eviction fence to bo so they can
2803 	 * evict each other.
2804 	 */
2805 	ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
2806 	if (ret)
2807 		goto reserve_shared_fail;
2808 	amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2809 	amdgpu_bo_unreserve(gws_bo);
2810 	mutex_unlock(&(*mem)->process_info->lock);
2811 
2812 	return ret;
2813 
2814 reserve_shared_fail:
2815 bo_validation_failure:
2816 	amdgpu_bo_unreserve(gws_bo);
2817 bo_reservation_failure:
2818 	mutex_unlock(&(*mem)->process_info->lock);
2819 	amdgpu_sync_free(&(*mem)->sync);
2820 	remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2821 	amdgpu_bo_unref(&gws_bo);
2822 	mutex_destroy(&(*mem)->lock);
2823 	kfree(*mem);
2824 	*mem = NULL;
2825 	return ret;
2826 }
2827 
2828 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2829 {
2830 	int ret;
2831 	struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2832 	struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2833 	struct amdgpu_bo *gws_bo = kgd_mem->bo;
2834 
2835 	/* Remove BO from process's validate list so restore worker won't touch
2836 	 * it anymore
2837 	 */
2838 	remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2839 
2840 	ret = amdgpu_bo_reserve(gws_bo, false);
2841 	if (unlikely(ret)) {
2842 		pr_err("Reserve gws bo failed %d\n", ret);
2843 		//TODO add BO back to validate_list?
2844 		return ret;
2845 	}
2846 	amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2847 			process_info->eviction_fence);
2848 	amdgpu_bo_unreserve(gws_bo);
2849 	amdgpu_sync_free(&kgd_mem->sync);
2850 	amdgpu_bo_unref(&gws_bo);
2851 	mutex_destroy(&kgd_mem->lock);
2852 	kfree(mem);
2853 	return 0;
2854 }
2855 
2856 /* Returns GPU-specific tiling mode information */
2857 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
2858 				struct tile_config *config)
2859 {
2860 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
2861 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2862 	config->num_tile_configs =
2863 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2864 	config->macro_tile_config_ptr =
2865 			adev->gfx.config.macrotile_mode_array;
2866 	config->num_macro_tile_configs =
2867 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2868 
2869 	/* Those values are not set from GFX9 onwards */
2870 	config->num_banks = adev->gfx.config.num_banks;
2871 	config->num_ranks = adev->gfx.config.num_ranks;
2872 
2873 	return 0;
2874 }
2875 
2876 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
2877 {
2878 	struct kfd_mem_attachment *entry;
2879 
2880 	list_for_each_entry(entry, &mem->attachments, list) {
2881 		if (entry->is_mapped && entry->adev == adev)
2882 			return true;
2883 	}
2884 	return false;
2885 }
2886