xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_svm.c (revision 799dce6f)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020-2021 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
30 #include "amdgpu.h"
31 #include "amdgpu_xgmi.h"
32 #include "kfd_priv.h"
33 #include "kfd_svm.h"
34 #include "kfd_migrate.h"
35 
36 #ifdef dev_fmt
37 #undef dev_fmt
38 #endif
39 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
40 
41 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
42 
43 /* Long enough to ensure no retry fault comes after svm range is restored and
44  * page table is updated.
45  */
46 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	2000
47 
48 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
49 static bool
50 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
51 				    const struct mmu_notifier_range *range,
52 				    unsigned long cur_seq);
53 static int
54 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
55 		   uint64_t *bo_s, uint64_t *bo_l);
56 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
57 	.invalidate = svm_range_cpu_invalidate_pagetables,
58 };
59 
60 /**
61  * svm_range_unlink - unlink svm_range from lists and interval tree
62  * @prange: svm range structure to be removed
63  *
64  * Remove the svm_range from the svms and svm_bo lists and the svms
65  * interval tree.
66  *
67  * Context: The caller must hold svms->lock
68  */
69 static void svm_range_unlink(struct svm_range *prange)
70 {
71 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
72 		 prange, prange->start, prange->last);
73 
74 	if (prange->svm_bo) {
75 		spin_lock(&prange->svm_bo->list_lock);
76 		list_del(&prange->svm_bo_list);
77 		spin_unlock(&prange->svm_bo->list_lock);
78 	}
79 
80 	list_del(&prange->list);
81 	if (prange->it_node.start != 0 && prange->it_node.last != 0)
82 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
83 }
84 
85 static void
86 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
87 {
88 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
89 		 prange, prange->start, prange->last);
90 
91 	mmu_interval_notifier_insert_locked(&prange->notifier, mm,
92 				     prange->start << PAGE_SHIFT,
93 				     prange->npages << PAGE_SHIFT,
94 				     &svm_range_mn_ops);
95 }
96 
97 /**
98  * svm_range_add_to_svms - add svm range to svms
99  * @prange: svm range structure to be added
100  *
101  * Add the svm range to svms interval tree and link list
102  *
103  * Context: The caller must hold svms->lock
104  */
105 static void svm_range_add_to_svms(struct svm_range *prange)
106 {
107 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
108 		 prange, prange->start, prange->last);
109 
110 	list_add_tail(&prange->list, &prange->svms->list);
111 	prange->it_node.start = prange->start;
112 	prange->it_node.last = prange->last;
113 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
114 }
115 
116 static void svm_range_remove_notifier(struct svm_range *prange)
117 {
118 	pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
119 		 prange->svms, prange,
120 		 prange->notifier.interval_tree.start >> PAGE_SHIFT,
121 		 prange->notifier.interval_tree.last >> PAGE_SHIFT);
122 
123 	if (prange->notifier.interval_tree.start != 0 &&
124 	    prange->notifier.interval_tree.last != 0)
125 		mmu_interval_notifier_remove(&prange->notifier);
126 }
127 
128 static bool
129 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
130 {
131 	return dma_addr && !dma_mapping_error(dev, dma_addr) &&
132 	       !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
133 }
134 
135 static int
136 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
137 		      unsigned long offset, unsigned long npages,
138 		      unsigned long *hmm_pfns, uint32_t gpuidx)
139 {
140 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
141 	dma_addr_t *addr = prange->dma_addr[gpuidx];
142 	struct device *dev = adev->dev;
143 	struct page *page;
144 	int i, r;
145 
146 	if (!addr) {
147 		addr = kvmalloc_array(prange->npages, sizeof(*addr),
148 				      GFP_KERNEL | __GFP_ZERO);
149 		if (!addr)
150 			return -ENOMEM;
151 		prange->dma_addr[gpuidx] = addr;
152 	}
153 
154 	addr += offset;
155 	for (i = 0; i < npages; i++) {
156 		if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
157 			dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
158 
159 		page = hmm_pfn_to_page(hmm_pfns[i]);
160 		if (is_zone_device_page(page)) {
161 			struct amdgpu_device *bo_adev =
162 					amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
163 
164 			addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
165 				   bo_adev->vm_manager.vram_base_offset -
166 				   bo_adev->kfd.dev->pgmap.range.start;
167 			addr[i] |= SVM_RANGE_VRAM_DOMAIN;
168 			pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
169 			continue;
170 		}
171 		addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
172 		r = dma_mapping_error(dev, addr[i]);
173 		if (r) {
174 			dev_err(dev, "failed %d dma_map_page\n", r);
175 			return r;
176 		}
177 		pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
178 				     addr[i] >> PAGE_SHIFT, page_to_pfn(page));
179 	}
180 	return 0;
181 }
182 
183 static int
184 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
185 		  unsigned long offset, unsigned long npages,
186 		  unsigned long *hmm_pfns)
187 {
188 	struct kfd_process *p;
189 	uint32_t gpuidx;
190 	int r;
191 
192 	p = container_of(prange->svms, struct kfd_process, svms);
193 
194 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
195 		struct kfd_process_device *pdd;
196 
197 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
198 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
199 		if (!pdd) {
200 			pr_debug("failed to find device idx %d\n", gpuidx);
201 			return -EINVAL;
202 		}
203 
204 		r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
205 					  hmm_pfns, gpuidx);
206 		if (r)
207 			break;
208 	}
209 
210 	return r;
211 }
212 
213 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
214 			 unsigned long offset, unsigned long npages)
215 {
216 	enum dma_data_direction dir = DMA_BIDIRECTIONAL;
217 	int i;
218 
219 	if (!dma_addr)
220 		return;
221 
222 	for (i = offset; i < offset + npages; i++) {
223 		if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
224 			continue;
225 		pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
226 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
227 		dma_addr[i] = 0;
228 	}
229 }
230 
231 void svm_range_free_dma_mappings(struct svm_range *prange)
232 {
233 	struct kfd_process_device *pdd;
234 	dma_addr_t *dma_addr;
235 	struct device *dev;
236 	struct kfd_process *p;
237 	uint32_t gpuidx;
238 
239 	p = container_of(prange->svms, struct kfd_process, svms);
240 
241 	for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
242 		dma_addr = prange->dma_addr[gpuidx];
243 		if (!dma_addr)
244 			continue;
245 
246 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
247 		if (!pdd) {
248 			pr_debug("failed to find device idx %d\n", gpuidx);
249 			continue;
250 		}
251 		dev = &pdd->dev->pdev->dev;
252 		svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
253 		kvfree(dma_addr);
254 		prange->dma_addr[gpuidx] = NULL;
255 	}
256 }
257 
258 static void svm_range_free(struct svm_range *prange)
259 {
260 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
261 		 prange->start, prange->last);
262 
263 	svm_range_vram_node_free(prange);
264 	svm_range_free_dma_mappings(prange);
265 	mutex_destroy(&prange->lock);
266 	mutex_destroy(&prange->migrate_mutex);
267 	kfree(prange);
268 }
269 
270 static void
271 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
272 				 uint8_t *granularity, uint32_t *flags)
273 {
274 	*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
275 	*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
276 	*granularity = 9;
277 	*flags =
278 		KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
279 }
280 
281 static struct
282 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
283 			 uint64_t last)
284 {
285 	uint64_t size = last - start + 1;
286 	struct svm_range *prange;
287 	struct kfd_process *p;
288 
289 	prange = kzalloc(sizeof(*prange), GFP_KERNEL);
290 	if (!prange)
291 		return NULL;
292 	prange->npages = size;
293 	prange->svms = svms;
294 	prange->start = start;
295 	prange->last = last;
296 	INIT_LIST_HEAD(&prange->list);
297 	INIT_LIST_HEAD(&prange->update_list);
298 	INIT_LIST_HEAD(&prange->remove_list);
299 	INIT_LIST_HEAD(&prange->insert_list);
300 	INIT_LIST_HEAD(&prange->svm_bo_list);
301 	INIT_LIST_HEAD(&prange->deferred_list);
302 	INIT_LIST_HEAD(&prange->child_list);
303 	atomic_set(&prange->invalid, 0);
304 	prange->validate_timestamp = 0;
305 	mutex_init(&prange->migrate_mutex);
306 	mutex_init(&prange->lock);
307 
308 	p = container_of(svms, struct kfd_process, svms);
309 	if (p->xnack_enabled)
310 		bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
311 			    MAX_GPU_INSTANCE);
312 
313 	svm_range_set_default_attributes(&prange->preferred_loc,
314 					 &prange->prefetch_loc,
315 					 &prange->granularity, &prange->flags);
316 
317 	pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
318 
319 	return prange;
320 }
321 
322 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
323 {
324 	if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
325 		return false;
326 
327 	return true;
328 }
329 
330 static void svm_range_bo_release(struct kref *kref)
331 {
332 	struct svm_range_bo *svm_bo;
333 
334 	svm_bo = container_of(kref, struct svm_range_bo, kref);
335 	pr_debug("svm_bo 0x%p\n", svm_bo);
336 
337 	spin_lock(&svm_bo->list_lock);
338 	while (!list_empty(&svm_bo->range_list)) {
339 		struct svm_range *prange =
340 				list_first_entry(&svm_bo->range_list,
341 						struct svm_range, svm_bo_list);
342 		/* list_del_init tells a concurrent svm_range_vram_node_new when
343 		 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
344 		 */
345 		list_del_init(&prange->svm_bo_list);
346 		spin_unlock(&svm_bo->list_lock);
347 
348 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
349 			 prange->start, prange->last);
350 		mutex_lock(&prange->lock);
351 		prange->svm_bo = NULL;
352 		mutex_unlock(&prange->lock);
353 
354 		spin_lock(&svm_bo->list_lock);
355 	}
356 	spin_unlock(&svm_bo->list_lock);
357 	if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
358 		/* We're not in the eviction worker.
359 		 * Signal the fence and synchronize with any
360 		 * pending eviction work.
361 		 */
362 		dma_fence_signal(&svm_bo->eviction_fence->base);
363 		cancel_work_sync(&svm_bo->eviction_work);
364 	}
365 	dma_fence_put(&svm_bo->eviction_fence->base);
366 	amdgpu_bo_unref(&svm_bo->bo);
367 	kfree(svm_bo);
368 }
369 
370 static void svm_range_bo_wq_release(struct work_struct *work)
371 {
372 	struct svm_range_bo *svm_bo;
373 
374 	svm_bo = container_of(work, struct svm_range_bo, release_work);
375 	svm_range_bo_release(&svm_bo->kref);
376 }
377 
378 static void svm_range_bo_release_async(struct kref *kref)
379 {
380 	struct svm_range_bo *svm_bo;
381 
382 	svm_bo = container_of(kref, struct svm_range_bo, kref);
383 	pr_debug("svm_bo 0x%p\n", svm_bo);
384 	INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
385 	schedule_work(&svm_bo->release_work);
386 }
387 
388 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
389 {
390 	kref_put(&svm_bo->kref, svm_range_bo_release_async);
391 }
392 
393 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
394 {
395 	if (svm_bo)
396 		kref_put(&svm_bo->kref, svm_range_bo_release);
397 }
398 
399 static bool
400 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
401 {
402 	struct amdgpu_device *bo_adev;
403 
404 	mutex_lock(&prange->lock);
405 	if (!prange->svm_bo) {
406 		mutex_unlock(&prange->lock);
407 		return false;
408 	}
409 	if (prange->ttm_res) {
410 		/* We still have a reference, all is well */
411 		mutex_unlock(&prange->lock);
412 		return true;
413 	}
414 	if (svm_bo_ref_unless_zero(prange->svm_bo)) {
415 		/*
416 		 * Migrate from GPU to GPU, remove range from source bo_adev
417 		 * svm_bo range list, and return false to allocate svm_bo from
418 		 * destination adev.
419 		 */
420 		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
421 		if (bo_adev != adev) {
422 			mutex_unlock(&prange->lock);
423 
424 			spin_lock(&prange->svm_bo->list_lock);
425 			list_del_init(&prange->svm_bo_list);
426 			spin_unlock(&prange->svm_bo->list_lock);
427 
428 			svm_range_bo_unref(prange->svm_bo);
429 			return false;
430 		}
431 		if (READ_ONCE(prange->svm_bo->evicting)) {
432 			struct dma_fence *f;
433 			struct svm_range_bo *svm_bo;
434 			/* The BO is getting evicted,
435 			 * we need to get a new one
436 			 */
437 			mutex_unlock(&prange->lock);
438 			svm_bo = prange->svm_bo;
439 			f = dma_fence_get(&svm_bo->eviction_fence->base);
440 			svm_range_bo_unref(prange->svm_bo);
441 			/* wait for the fence to avoid long spin-loop
442 			 * at list_empty_careful
443 			 */
444 			dma_fence_wait(f, false);
445 			dma_fence_put(f);
446 		} else {
447 			/* The BO was still around and we got
448 			 * a new reference to it
449 			 */
450 			mutex_unlock(&prange->lock);
451 			pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
452 				 prange->svms, prange->start, prange->last);
453 
454 			prange->ttm_res = prange->svm_bo->bo->tbo.resource;
455 			return true;
456 		}
457 
458 	} else {
459 		mutex_unlock(&prange->lock);
460 	}
461 
462 	/* We need a new svm_bo. Spin-loop to wait for concurrent
463 	 * svm_range_bo_release to finish removing this range from
464 	 * its range list. After this, it is safe to reuse the
465 	 * svm_bo pointer and svm_bo_list head.
466 	 */
467 	while (!list_empty_careful(&prange->svm_bo_list))
468 		;
469 
470 	return false;
471 }
472 
473 static struct svm_range_bo *svm_range_bo_new(void)
474 {
475 	struct svm_range_bo *svm_bo;
476 
477 	svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
478 	if (!svm_bo)
479 		return NULL;
480 
481 	kref_init(&svm_bo->kref);
482 	INIT_LIST_HEAD(&svm_bo->range_list);
483 	spin_lock_init(&svm_bo->list_lock);
484 
485 	return svm_bo;
486 }
487 
488 int
489 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
490 			bool clear)
491 {
492 	struct amdgpu_bo_param bp;
493 	struct svm_range_bo *svm_bo;
494 	struct amdgpu_bo_user *ubo;
495 	struct amdgpu_bo *bo;
496 	struct kfd_process *p;
497 	struct mm_struct *mm;
498 	int r;
499 
500 	p = container_of(prange->svms, struct kfd_process, svms);
501 	pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
502 		 prange->start, prange->last);
503 
504 	if (svm_range_validate_svm_bo(adev, prange))
505 		return 0;
506 
507 	svm_bo = svm_range_bo_new();
508 	if (!svm_bo) {
509 		pr_debug("failed to alloc svm bo\n");
510 		return -ENOMEM;
511 	}
512 	mm = get_task_mm(p->lead_thread);
513 	if (!mm) {
514 		pr_debug("failed to get mm\n");
515 		kfree(svm_bo);
516 		return -ESRCH;
517 	}
518 	svm_bo->svms = prange->svms;
519 	svm_bo->eviction_fence =
520 		amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
521 					   mm,
522 					   svm_bo);
523 	mmput(mm);
524 	INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
525 	svm_bo->evicting = 0;
526 	memset(&bp, 0, sizeof(bp));
527 	bp.size = prange->npages * PAGE_SIZE;
528 	bp.byte_align = PAGE_SIZE;
529 	bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
530 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
531 	bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
532 	bp.flags |= AMDGPU_AMDKFD_CREATE_SVM_BO;
533 	bp.type = ttm_bo_type_device;
534 	bp.resv = NULL;
535 
536 	r = amdgpu_bo_create_user(adev, &bp, &ubo);
537 	if (r) {
538 		pr_debug("failed %d to create bo\n", r);
539 		goto create_bo_failed;
540 	}
541 	bo = &ubo->bo;
542 	r = amdgpu_bo_reserve(bo, true);
543 	if (r) {
544 		pr_debug("failed %d to reserve bo\n", r);
545 		goto reserve_bo_failed;
546 	}
547 
548 	r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
549 	if (r) {
550 		pr_debug("failed %d to reserve bo\n", r);
551 		amdgpu_bo_unreserve(bo);
552 		goto reserve_bo_failed;
553 	}
554 	amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
555 
556 	amdgpu_bo_unreserve(bo);
557 
558 	svm_bo->bo = bo;
559 	prange->svm_bo = svm_bo;
560 	prange->ttm_res = bo->tbo.resource;
561 	prange->offset = 0;
562 
563 	spin_lock(&svm_bo->list_lock);
564 	list_add(&prange->svm_bo_list, &svm_bo->range_list);
565 	spin_unlock(&svm_bo->list_lock);
566 
567 	return 0;
568 
569 reserve_bo_failed:
570 	amdgpu_bo_unref(&bo);
571 create_bo_failed:
572 	dma_fence_put(&svm_bo->eviction_fence->base);
573 	kfree(svm_bo);
574 	prange->ttm_res = NULL;
575 
576 	return r;
577 }
578 
579 void svm_range_vram_node_free(struct svm_range *prange)
580 {
581 	svm_range_bo_unref(prange->svm_bo);
582 	prange->ttm_res = NULL;
583 }
584 
585 struct amdgpu_device *
586 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
587 {
588 	struct kfd_process_device *pdd;
589 	struct kfd_process *p;
590 	int32_t gpu_idx;
591 
592 	p = container_of(prange->svms, struct kfd_process, svms);
593 
594 	gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
595 	if (gpu_idx < 0) {
596 		pr_debug("failed to get device by id 0x%x\n", gpu_id);
597 		return NULL;
598 	}
599 	pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
600 	if (!pdd) {
601 		pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
602 		return NULL;
603 	}
604 
605 	return pdd->dev->adev;
606 }
607 
608 struct kfd_process_device *
609 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
610 {
611 	struct kfd_process *p;
612 	int32_t gpu_idx, gpuid;
613 	int r;
614 
615 	p = container_of(prange->svms, struct kfd_process, svms);
616 
617 	r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
618 	if (r) {
619 		pr_debug("failed to get device id by adev %p\n", adev);
620 		return NULL;
621 	}
622 
623 	return kfd_process_device_from_gpuidx(p, gpu_idx);
624 }
625 
626 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
627 {
628 	struct ttm_operation_ctx ctx = { false, false };
629 
630 	amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
631 
632 	return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
633 }
634 
635 static int
636 svm_range_check_attr(struct kfd_process *p,
637 		     uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
638 {
639 	uint32_t i;
640 
641 	for (i = 0; i < nattr; i++) {
642 		uint32_t val = attrs[i].value;
643 		int gpuidx = MAX_GPU_INSTANCE;
644 
645 		switch (attrs[i].type) {
646 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
647 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
648 			    val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
649 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
650 			break;
651 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
652 			if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
653 				gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
654 			break;
655 		case KFD_IOCTL_SVM_ATTR_ACCESS:
656 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
657 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
658 			gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
659 			break;
660 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
661 			break;
662 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
663 			break;
664 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
665 			break;
666 		default:
667 			pr_debug("unknown attr type 0x%x\n", attrs[i].type);
668 			return -EINVAL;
669 		}
670 
671 		if (gpuidx < 0) {
672 			pr_debug("no GPU 0x%x found\n", val);
673 			return -EINVAL;
674 		} else if (gpuidx < MAX_GPU_INSTANCE &&
675 			   !test_bit(gpuidx, p->svms.bitmap_supported)) {
676 			pr_debug("GPU 0x%x not supported\n", val);
677 			return -EINVAL;
678 		}
679 	}
680 
681 	return 0;
682 }
683 
684 static void
685 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
686 		      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
687 {
688 	uint32_t i;
689 	int gpuidx;
690 
691 	for (i = 0; i < nattr; i++) {
692 		switch (attrs[i].type) {
693 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
694 			prange->preferred_loc = attrs[i].value;
695 			break;
696 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
697 			prange->prefetch_loc = attrs[i].value;
698 			break;
699 		case KFD_IOCTL_SVM_ATTR_ACCESS:
700 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
701 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
702 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
703 							       attrs[i].value);
704 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
705 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
706 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
707 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
708 				bitmap_set(prange->bitmap_access, gpuidx, 1);
709 				bitmap_clear(prange->bitmap_aip, gpuidx, 1);
710 			} else {
711 				bitmap_clear(prange->bitmap_access, gpuidx, 1);
712 				bitmap_set(prange->bitmap_aip, gpuidx, 1);
713 			}
714 			break;
715 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
716 			prange->flags |= attrs[i].value;
717 			break;
718 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
719 			prange->flags &= ~attrs[i].value;
720 			break;
721 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
722 			prange->granularity = attrs[i].value;
723 			break;
724 		default:
725 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
726 		}
727 	}
728 }
729 
730 static bool
731 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
732 			uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
733 {
734 	uint32_t i;
735 	int gpuidx;
736 
737 	for (i = 0; i < nattr; i++) {
738 		switch (attrs[i].type) {
739 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
740 			if (prange->preferred_loc != attrs[i].value)
741 				return false;
742 			break;
743 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
744 			/* Prefetch should always trigger a migration even
745 			 * if the value of the attribute didn't change.
746 			 */
747 			return false;
748 		case KFD_IOCTL_SVM_ATTR_ACCESS:
749 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
750 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
751 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
752 							       attrs[i].value);
753 			if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
754 				if (test_bit(gpuidx, prange->bitmap_access) ||
755 				    test_bit(gpuidx, prange->bitmap_aip))
756 					return false;
757 			} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
758 				if (!test_bit(gpuidx, prange->bitmap_access))
759 					return false;
760 			} else {
761 				if (!test_bit(gpuidx, prange->bitmap_aip))
762 					return false;
763 			}
764 			break;
765 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
766 			if ((prange->flags & attrs[i].value) != attrs[i].value)
767 				return false;
768 			break;
769 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
770 			if ((prange->flags & attrs[i].value) != 0)
771 				return false;
772 			break;
773 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
774 			if (prange->granularity != attrs[i].value)
775 				return false;
776 			break;
777 		default:
778 			WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
779 		}
780 	}
781 
782 	return true;
783 }
784 
785 /**
786  * svm_range_debug_dump - print all range information from svms
787  * @svms: svm range list header
788  *
789  * debug output svm range start, end, prefetch location from svms
790  * interval tree and link list
791  *
792  * Context: The caller must hold svms->lock
793  */
794 static void svm_range_debug_dump(struct svm_range_list *svms)
795 {
796 	struct interval_tree_node *node;
797 	struct svm_range *prange;
798 
799 	pr_debug("dump svms 0x%p list\n", svms);
800 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
801 
802 	list_for_each_entry(prange, &svms->list, list) {
803 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
804 			 prange, prange->start, prange->npages,
805 			 prange->start + prange->npages - 1,
806 			 prange->actual_loc);
807 	}
808 
809 	pr_debug("dump svms 0x%p interval tree\n", svms);
810 	pr_debug("range\tstart\tpage\tend\t\tlocation\n");
811 	node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
812 	while (node) {
813 		prange = container_of(node, struct svm_range, it_node);
814 		pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
815 			 prange, prange->start, prange->npages,
816 			 prange->start + prange->npages - 1,
817 			 prange->actual_loc);
818 		node = interval_tree_iter_next(node, 0, ~0ULL);
819 	}
820 }
821 
822 static int
823 svm_range_split_array(void *ppnew, void *ppold, size_t size,
824 		      uint64_t old_start, uint64_t old_n,
825 		      uint64_t new_start, uint64_t new_n)
826 {
827 	unsigned char *new, *old, *pold;
828 	uint64_t d;
829 
830 	if (!ppold)
831 		return 0;
832 	pold = *(unsigned char **)ppold;
833 	if (!pold)
834 		return 0;
835 
836 	new = kvmalloc_array(new_n, size, GFP_KERNEL);
837 	if (!new)
838 		return -ENOMEM;
839 
840 	d = (new_start - old_start) * size;
841 	memcpy(new, pold + d, new_n * size);
842 
843 	old = kvmalloc_array(old_n, size, GFP_KERNEL);
844 	if (!old) {
845 		kvfree(new);
846 		return -ENOMEM;
847 	}
848 
849 	d = (new_start == old_start) ? new_n * size : 0;
850 	memcpy(old, pold + d, old_n * size);
851 
852 	kvfree(pold);
853 	*(void **)ppold = old;
854 	*(void **)ppnew = new;
855 
856 	return 0;
857 }
858 
859 static int
860 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
861 		      uint64_t start, uint64_t last)
862 {
863 	uint64_t npages = last - start + 1;
864 	int i, r;
865 
866 	for (i = 0; i < MAX_GPU_INSTANCE; i++) {
867 		r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
868 					  sizeof(*old->dma_addr[i]), old->start,
869 					  npages, new->start, new->npages);
870 		if (r)
871 			return r;
872 	}
873 
874 	return 0;
875 }
876 
877 static int
878 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
879 		      uint64_t start, uint64_t last)
880 {
881 	uint64_t npages = last - start + 1;
882 
883 	pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
884 		 new->svms, new, new->start, start, last);
885 
886 	if (new->start == old->start) {
887 		new->offset = old->offset;
888 		old->offset += new->npages;
889 	} else {
890 		new->offset = old->offset + npages;
891 	}
892 
893 	new->svm_bo = svm_range_bo_ref(old->svm_bo);
894 	new->ttm_res = old->ttm_res;
895 
896 	spin_lock(&new->svm_bo->list_lock);
897 	list_add(&new->svm_bo_list, &new->svm_bo->range_list);
898 	spin_unlock(&new->svm_bo->list_lock);
899 
900 	return 0;
901 }
902 
903 /**
904  * svm_range_split_adjust - split range and adjust
905  *
906  * @new: new range
907  * @old: the old range
908  * @start: the old range adjust to start address in pages
909  * @last: the old range adjust to last address in pages
910  *
911  * Copy system memory dma_addr or vram ttm_res in old range to new
912  * range from new_start up to size new->npages, the remaining old range is from
913  * start to last
914  *
915  * Return:
916  * 0 - OK, -ENOMEM - out of memory
917  */
918 static int
919 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
920 		      uint64_t start, uint64_t last)
921 {
922 	int r;
923 
924 	pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
925 		 new->svms, new->start, old->start, old->last, start, last);
926 
927 	if (new->start < old->start ||
928 	    new->last > old->last) {
929 		WARN_ONCE(1, "invalid new range start or last\n");
930 		return -EINVAL;
931 	}
932 
933 	r = svm_range_split_pages(new, old, start, last);
934 	if (r)
935 		return r;
936 
937 	if (old->actual_loc && old->ttm_res) {
938 		r = svm_range_split_nodes(new, old, start, last);
939 		if (r)
940 			return r;
941 	}
942 
943 	old->npages = last - start + 1;
944 	old->start = start;
945 	old->last = last;
946 	new->flags = old->flags;
947 	new->preferred_loc = old->preferred_loc;
948 	new->prefetch_loc = old->prefetch_loc;
949 	new->actual_loc = old->actual_loc;
950 	new->granularity = old->granularity;
951 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
952 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
953 
954 	return 0;
955 }
956 
957 /**
958  * svm_range_split - split a range in 2 ranges
959  *
960  * @prange: the svm range to split
961  * @start: the remaining range start address in pages
962  * @last: the remaining range last address in pages
963  * @new: the result new range generated
964  *
965  * Two cases only:
966  * case 1: if start == prange->start
967  *         prange ==> prange[start, last]
968  *         new range [last + 1, prange->last]
969  *
970  * case 2: if last == prange->last
971  *         prange ==> prange[start, last]
972  *         new range [prange->start, start - 1]
973  *
974  * Return:
975  * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
976  */
977 static int
978 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
979 		struct svm_range **new)
980 {
981 	uint64_t old_start = prange->start;
982 	uint64_t old_last = prange->last;
983 	struct svm_range_list *svms;
984 	int r = 0;
985 
986 	pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
987 		 old_start, old_last, start, last);
988 
989 	if (old_start != start && old_last != last)
990 		return -EINVAL;
991 	if (start < old_start || last > old_last)
992 		return -EINVAL;
993 
994 	svms = prange->svms;
995 	if (old_start == start)
996 		*new = svm_range_new(svms, last + 1, old_last);
997 	else
998 		*new = svm_range_new(svms, old_start, start - 1);
999 	if (!*new)
1000 		return -ENOMEM;
1001 
1002 	r = svm_range_split_adjust(*new, prange, start, last);
1003 	if (r) {
1004 		pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1005 			 r, old_start, old_last, start, last);
1006 		svm_range_free(*new);
1007 		*new = NULL;
1008 	}
1009 
1010 	return r;
1011 }
1012 
1013 static int
1014 svm_range_split_tail(struct svm_range *prange,
1015 		     uint64_t new_last, struct list_head *insert_list)
1016 {
1017 	struct svm_range *tail;
1018 	int r = svm_range_split(prange, prange->start, new_last, &tail);
1019 
1020 	if (!r)
1021 		list_add(&tail->insert_list, insert_list);
1022 	return r;
1023 }
1024 
1025 static int
1026 svm_range_split_head(struct svm_range *prange,
1027 		     uint64_t new_start, struct list_head *insert_list)
1028 {
1029 	struct svm_range *head;
1030 	int r = svm_range_split(prange, new_start, prange->last, &head);
1031 
1032 	if (!r)
1033 		list_add(&head->insert_list, insert_list);
1034 	return r;
1035 }
1036 
1037 static void
1038 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1039 		    struct svm_range *pchild, enum svm_work_list_ops op)
1040 {
1041 	pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1042 		 pchild, pchild->start, pchild->last, prange, op);
1043 
1044 	pchild->work_item.mm = mm;
1045 	pchild->work_item.op = op;
1046 	list_add_tail(&pchild->child_list, &prange->child_list);
1047 }
1048 
1049 /**
1050  * svm_range_split_by_granularity - collect ranges within granularity boundary
1051  *
1052  * @p: the process with svms list
1053  * @mm: mm structure
1054  * @addr: the vm fault address in pages, to split the prange
1055  * @parent: parent range if prange is from child list
1056  * @prange: prange to split
1057  *
1058  * Trims @prange to be a single aligned block of prange->granularity if
1059  * possible. The head and tail are added to the child_list in @parent.
1060  *
1061  * Context: caller must hold mmap_read_lock and prange->lock
1062  *
1063  * Return:
1064  * 0 - OK, otherwise error code
1065  */
1066 int
1067 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1068 			       unsigned long addr, struct svm_range *parent,
1069 			       struct svm_range *prange)
1070 {
1071 	struct svm_range *head, *tail;
1072 	unsigned long start, last, size;
1073 	int r;
1074 
1075 	/* Align splited range start and size to granularity size, then a single
1076 	 * PTE will be used for whole range, this reduces the number of PTE
1077 	 * updated and the L1 TLB space used for translation.
1078 	 */
1079 	size = 1UL << prange->granularity;
1080 	start = ALIGN_DOWN(addr, size);
1081 	last = ALIGN(addr + 1, size) - 1;
1082 
1083 	pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1084 		 prange->svms, prange->start, prange->last, start, last, size);
1085 
1086 	if (start > prange->start) {
1087 		r = svm_range_split(prange, start, prange->last, &head);
1088 		if (r)
1089 			return r;
1090 		svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1091 	}
1092 
1093 	if (last < prange->last) {
1094 		r = svm_range_split(prange, prange->start, last, &tail);
1095 		if (r)
1096 			return r;
1097 		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1098 	}
1099 
1100 	/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1101 	if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1102 		prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1103 		pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1104 			 prange, prange->start, prange->last,
1105 			 SVM_OP_ADD_RANGE_AND_MAP);
1106 	}
1107 	return 0;
1108 }
1109 
1110 static uint64_t
1111 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1112 			int domain)
1113 {
1114 	struct amdgpu_device *bo_adev;
1115 	uint32_t flags = prange->flags;
1116 	uint32_t mapping_flags = 0;
1117 	uint64_t pte_flags;
1118 	bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1119 	bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1120 
1121 	if (domain == SVM_RANGE_VRAM_DOMAIN)
1122 		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1123 
1124 	switch (KFD_GC_VERSION(adev->kfd.dev)) {
1125 	case IP_VERSION(9, 4, 1):
1126 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1127 			if (bo_adev == adev) {
1128 				mapping_flags |= coherent ?
1129 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1130 			} else {
1131 				mapping_flags |= coherent ?
1132 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1133 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
1134 					snoop = true;
1135 			}
1136 		} else {
1137 			mapping_flags |= coherent ?
1138 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1139 		}
1140 		break;
1141 	case IP_VERSION(9, 4, 2):
1142 		if (domain == SVM_RANGE_VRAM_DOMAIN) {
1143 			if (bo_adev == adev) {
1144 				mapping_flags |= coherent ?
1145 					AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1146 				if (adev->gmc.xgmi.connected_to_cpu)
1147 					snoop = true;
1148 			} else {
1149 				mapping_flags |= coherent ?
1150 					AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1151 				if (amdgpu_xgmi_same_hive(adev, bo_adev))
1152 					snoop = true;
1153 			}
1154 		} else {
1155 			mapping_flags |= coherent ?
1156 				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1157 		}
1158 		break;
1159 	default:
1160 		mapping_flags |= coherent ?
1161 			AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1162 	}
1163 
1164 	mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1165 
1166 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1167 		mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1168 	if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1169 		mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1170 
1171 	pte_flags = AMDGPU_PTE_VALID;
1172 	pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1173 	pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1174 
1175 	pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1176 	return pte_flags;
1177 }
1178 
1179 static int
1180 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1181 			 uint64_t start, uint64_t last,
1182 			 struct dma_fence **fence)
1183 {
1184 	uint64_t init_pte_value = 0;
1185 
1186 	pr_debug("[0x%llx 0x%llx]\n", start, last);
1187 
1188 	return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
1189 					   start, last, init_pte_value, 0,
1190 					   NULL, NULL, fence, NULL);
1191 }
1192 
1193 static int
1194 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1195 			  unsigned long last)
1196 {
1197 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1198 	struct kfd_process_device *pdd;
1199 	struct dma_fence *fence = NULL;
1200 	struct kfd_process *p;
1201 	uint32_t gpuidx;
1202 	int r = 0;
1203 
1204 	bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1205 		  MAX_GPU_INSTANCE);
1206 	p = container_of(prange->svms, struct kfd_process, svms);
1207 
1208 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1209 		pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1210 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1211 		if (!pdd) {
1212 			pr_debug("failed to find device idx %d\n", gpuidx);
1213 			return -EINVAL;
1214 		}
1215 
1216 		r = svm_range_unmap_from_gpu(pdd->dev->adev,
1217 					     drm_priv_to_vm(pdd->drm_priv),
1218 					     start, last, &fence);
1219 		if (r)
1220 			break;
1221 
1222 		if (fence) {
1223 			r = dma_fence_wait(fence, false);
1224 			dma_fence_put(fence);
1225 			fence = NULL;
1226 			if (r)
1227 				break;
1228 		}
1229 		amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev,
1230 					p->pasid, TLB_FLUSH_HEAVYWEIGHT);
1231 	}
1232 
1233 	return r;
1234 }
1235 
1236 static int
1237 svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1238 		     struct svm_range *prange, unsigned long offset,
1239 		     unsigned long npages, bool readonly, dma_addr_t *dma_addr,
1240 		     struct amdgpu_device *bo_adev, struct dma_fence **fence)
1241 {
1242 	bool table_freed = false;
1243 	uint64_t pte_flags;
1244 	unsigned long last_start;
1245 	int last_domain;
1246 	int r = 0;
1247 	int64_t i, j;
1248 
1249 	last_start = prange->start + offset;
1250 
1251 	pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1252 		 last_start, last_start + npages - 1, readonly);
1253 
1254 	for (i = offset; i < offset + npages; i++) {
1255 		last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1256 		dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1257 
1258 		/* Collect all pages in the same address range and memory domain
1259 		 * that can be mapped with a single call to update mapping.
1260 		 */
1261 		if (i < offset + npages - 1 &&
1262 		    last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1263 			continue;
1264 
1265 		pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1266 			 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1267 
1268 		pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1269 		if (readonly)
1270 			pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1271 
1272 		pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1273 			 prange->svms, last_start, prange->start + i,
1274 			 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1275 			 pte_flags);
1276 
1277 		r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
1278 						NULL, last_start,
1279 						prange->start + i, pte_flags,
1280 						last_start - prange->start,
1281 						NULL, dma_addr,
1282 						&vm->last_update,
1283 						&table_freed);
1284 
1285 		for (j = last_start - prange->start; j <= i; j++)
1286 			dma_addr[j] |= last_domain;
1287 
1288 		if (r) {
1289 			pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1290 			goto out;
1291 		}
1292 		last_start = prange->start + i + 1;
1293 	}
1294 
1295 	r = amdgpu_vm_update_pdes(adev, vm, false);
1296 	if (r) {
1297 		pr_debug("failed %d to update directories 0x%lx\n", r,
1298 			 prange->start);
1299 		goto out;
1300 	}
1301 
1302 	if (fence)
1303 		*fence = dma_fence_get(vm->last_update);
1304 
1305 	if (table_freed) {
1306 		struct kfd_process *p;
1307 
1308 		p = container_of(prange->svms, struct kfd_process, svms);
1309 		amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY);
1310 	}
1311 out:
1312 	return r;
1313 }
1314 
1315 static int
1316 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1317 		      unsigned long npages, bool readonly,
1318 		      unsigned long *bitmap, bool wait)
1319 {
1320 	struct kfd_process_device *pdd;
1321 	struct amdgpu_device *bo_adev;
1322 	struct kfd_process *p;
1323 	struct dma_fence *fence = NULL;
1324 	uint32_t gpuidx;
1325 	int r = 0;
1326 
1327 	if (prange->svm_bo && prange->ttm_res)
1328 		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1329 	else
1330 		bo_adev = NULL;
1331 
1332 	p = container_of(prange->svms, struct kfd_process, svms);
1333 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1334 		pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1335 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1336 		if (!pdd) {
1337 			pr_debug("failed to find device idx %d\n", gpuidx);
1338 			return -EINVAL;
1339 		}
1340 
1341 		pdd = kfd_bind_process_to_device(pdd->dev, p);
1342 		if (IS_ERR(pdd))
1343 			return -EINVAL;
1344 
1345 		if (bo_adev && pdd->dev->adev != bo_adev &&
1346 		    !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1347 			pr_debug("cannot map to device idx %d\n", gpuidx);
1348 			continue;
1349 		}
1350 
1351 		r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv),
1352 					 prange, offset, npages, readonly,
1353 					 prange->dma_addr[gpuidx],
1354 					 bo_adev, wait ? &fence : NULL);
1355 		if (r)
1356 			break;
1357 
1358 		if (fence) {
1359 			r = dma_fence_wait(fence, false);
1360 			dma_fence_put(fence);
1361 			fence = NULL;
1362 			if (r) {
1363 				pr_debug("failed %d to dma fence wait\n", r);
1364 				break;
1365 			}
1366 		}
1367 	}
1368 
1369 	return r;
1370 }
1371 
1372 struct svm_validate_context {
1373 	struct kfd_process *process;
1374 	struct svm_range *prange;
1375 	bool intr;
1376 	unsigned long bitmap[MAX_GPU_INSTANCE];
1377 	struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1378 	struct list_head validate_list;
1379 	struct ww_acquire_ctx ticket;
1380 };
1381 
1382 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1383 {
1384 	struct kfd_process_device *pdd;
1385 	struct amdgpu_vm *vm;
1386 	uint32_t gpuidx;
1387 	int r;
1388 
1389 	INIT_LIST_HEAD(&ctx->validate_list);
1390 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1391 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1392 		if (!pdd) {
1393 			pr_debug("failed to find device idx %d\n", gpuidx);
1394 			return -EINVAL;
1395 		}
1396 		vm = drm_priv_to_vm(pdd->drm_priv);
1397 
1398 		ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1399 		ctx->tv[gpuidx].num_shared = 4;
1400 		list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1401 	}
1402 
1403 	r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1404 				   ctx->intr, NULL);
1405 	if (r) {
1406 		pr_debug("failed %d to reserve bo\n", r);
1407 		return r;
1408 	}
1409 
1410 	for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1411 		pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1412 		if (!pdd) {
1413 			pr_debug("failed to find device idx %d\n", gpuidx);
1414 			r = -EINVAL;
1415 			goto unreserve_out;
1416 		}
1417 
1418 		r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1419 					      drm_priv_to_vm(pdd->drm_priv),
1420 					      svm_range_bo_validate, NULL);
1421 		if (r) {
1422 			pr_debug("failed %d validate pt bos\n", r);
1423 			goto unreserve_out;
1424 		}
1425 	}
1426 
1427 	return 0;
1428 
1429 unreserve_out:
1430 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1431 	return r;
1432 }
1433 
1434 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1435 {
1436 	ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1437 }
1438 
1439 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1440 {
1441 	struct kfd_process_device *pdd;
1442 
1443 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1444 
1445 	return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1446 }
1447 
1448 /*
1449  * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1450  *
1451  * To prevent concurrent destruction or change of range attributes, the
1452  * svm_read_lock must be held. The caller must not hold the svm_write_lock
1453  * because that would block concurrent evictions and lead to deadlocks. To
1454  * serialize concurrent migrations or validations of the same range, the
1455  * prange->migrate_mutex must be held.
1456  *
1457  * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1458  * eviction fence.
1459  *
1460  * The following sequence ensures race-free validation and GPU mapping:
1461  *
1462  * 1. Reserve page table (and SVM BO if range is in VRAM)
1463  * 2. hmm_range_fault to get page addresses (if system memory)
1464  * 3. DMA-map pages (if system memory)
1465  * 4-a. Take notifier lock
1466  * 4-b. Check that pages still valid (mmu_interval_read_retry)
1467  * 4-c. Check that the range was not split or otherwise invalidated
1468  * 4-d. Update GPU page table
1469  * 4.e. Release notifier lock
1470  * 5. Release page table (and SVM BO) reservation
1471  */
1472 static int svm_range_validate_and_map(struct mm_struct *mm,
1473 				      struct svm_range *prange,
1474 				      int32_t gpuidx, bool intr, bool wait)
1475 {
1476 	struct svm_validate_context ctx;
1477 	unsigned long start, end, addr;
1478 	struct kfd_process *p;
1479 	void *owner;
1480 	int32_t idx;
1481 	int r = 0;
1482 
1483 	ctx.process = container_of(prange->svms, struct kfd_process, svms);
1484 	ctx.prange = prange;
1485 	ctx.intr = intr;
1486 
1487 	if (gpuidx < MAX_GPU_INSTANCE) {
1488 		bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1489 		bitmap_set(ctx.bitmap, gpuidx, 1);
1490 	} else if (ctx.process->xnack_enabled) {
1491 		bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1492 
1493 		/* If prefetch range to GPU, or GPU retry fault migrate range to
1494 		 * GPU, which has ACCESS attribute to the range, create mapping
1495 		 * on that GPU.
1496 		 */
1497 		if (prange->actual_loc) {
1498 			gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1499 							prange->actual_loc);
1500 			if (gpuidx < 0) {
1501 				WARN_ONCE(1, "failed get device by id 0x%x\n",
1502 					 prange->actual_loc);
1503 				return -EINVAL;
1504 			}
1505 			if (test_bit(gpuidx, prange->bitmap_access))
1506 				bitmap_set(ctx.bitmap, gpuidx, 1);
1507 		}
1508 	} else {
1509 		bitmap_or(ctx.bitmap, prange->bitmap_access,
1510 			  prange->bitmap_aip, MAX_GPU_INSTANCE);
1511 	}
1512 
1513 	if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
1514 		return 0;
1515 
1516 	if (prange->actual_loc && !prange->ttm_res) {
1517 		/* This should never happen. actual_loc gets set by
1518 		 * svm_migrate_ram_to_vram after allocating a BO.
1519 		 */
1520 		WARN_ONCE(1, "VRAM BO missing during validation\n");
1521 		return -EINVAL;
1522 	}
1523 
1524 	svm_range_reserve_bos(&ctx);
1525 
1526 	p = container_of(prange->svms, struct kfd_process, svms);
1527 	owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1528 						MAX_GPU_INSTANCE));
1529 	for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1530 		if (kfd_svm_page_owner(p, idx) != owner) {
1531 			owner = NULL;
1532 			break;
1533 		}
1534 	}
1535 
1536 	start = prange->start << PAGE_SHIFT;
1537 	end = (prange->last + 1) << PAGE_SHIFT;
1538 	for (addr = start; addr < end && !r; ) {
1539 		struct hmm_range *hmm_range;
1540 		struct vm_area_struct *vma;
1541 		unsigned long next;
1542 		unsigned long offset;
1543 		unsigned long npages;
1544 		bool readonly;
1545 
1546 		vma = find_vma(mm, addr);
1547 		if (!vma || addr < vma->vm_start) {
1548 			r = -EFAULT;
1549 			goto unreserve_out;
1550 		}
1551 		readonly = !(vma->vm_flags & VM_WRITE);
1552 
1553 		next = min(vma->vm_end, end);
1554 		npages = (next - addr) >> PAGE_SHIFT;
1555 		WRITE_ONCE(p->svms.faulting_task, current);
1556 		r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1557 					       addr, npages, &hmm_range,
1558 					       readonly, true, owner);
1559 		WRITE_ONCE(p->svms.faulting_task, NULL);
1560 		if (r) {
1561 			pr_debug("failed %d to get svm range pages\n", r);
1562 			goto unreserve_out;
1563 		}
1564 
1565 		offset = (addr - start) >> PAGE_SHIFT;
1566 		r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1567 				      hmm_range->hmm_pfns);
1568 		if (r) {
1569 			pr_debug("failed %d to dma map range\n", r);
1570 			goto unreserve_out;
1571 		}
1572 
1573 		svm_range_lock(prange);
1574 		if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1575 			pr_debug("hmm update the range, need validate again\n");
1576 			r = -EAGAIN;
1577 			goto unlock_out;
1578 		}
1579 		if (!list_empty(&prange->child_list)) {
1580 			pr_debug("range split by unmap in parallel, validate again\n");
1581 			r = -EAGAIN;
1582 			goto unlock_out;
1583 		}
1584 
1585 		r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1586 					  ctx.bitmap, wait);
1587 
1588 unlock_out:
1589 		svm_range_unlock(prange);
1590 
1591 		addr = next;
1592 	}
1593 
1594 	if (addr == end)
1595 		prange->validated_once = true;
1596 
1597 unreserve_out:
1598 	svm_range_unreserve_bos(&ctx);
1599 
1600 	if (!r)
1601 		prange->validate_timestamp = ktime_to_us(ktime_get());
1602 
1603 	return r;
1604 }
1605 
1606 /**
1607  * svm_range_list_lock_and_flush_work - flush pending deferred work
1608  *
1609  * @svms: the svm range list
1610  * @mm: the mm structure
1611  *
1612  * Context: Returns with mmap write lock held, pending deferred work flushed
1613  *
1614  */
1615 void
1616 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1617 				   struct mm_struct *mm)
1618 {
1619 retry_flush_work:
1620 	flush_work(&svms->deferred_list_work);
1621 	mmap_write_lock(mm);
1622 
1623 	if (list_empty(&svms->deferred_range_list))
1624 		return;
1625 	mmap_write_unlock(mm);
1626 	pr_debug("retry flush\n");
1627 	goto retry_flush_work;
1628 }
1629 
1630 static void svm_range_restore_work(struct work_struct *work)
1631 {
1632 	struct delayed_work *dwork = to_delayed_work(work);
1633 	struct svm_range_list *svms;
1634 	struct svm_range *prange;
1635 	struct kfd_process *p;
1636 	struct mm_struct *mm;
1637 	int evicted_ranges;
1638 	int invalid;
1639 	int r;
1640 
1641 	svms = container_of(dwork, struct svm_range_list, restore_work);
1642 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1643 	if (!evicted_ranges)
1644 		return;
1645 
1646 	pr_debug("restore svm ranges\n");
1647 
1648 	/* kfd_process_notifier_release destroys this worker thread. So during
1649 	 * the lifetime of this thread, kfd_process and mm will be valid.
1650 	 */
1651 	p = container_of(svms, struct kfd_process, svms);
1652 	mm = p->mm;
1653 	if (!mm)
1654 		return;
1655 
1656 	svm_range_list_lock_and_flush_work(svms, mm);
1657 	mutex_lock(&svms->lock);
1658 
1659 	evicted_ranges = atomic_read(&svms->evicted_ranges);
1660 
1661 	list_for_each_entry(prange, &svms->list, list) {
1662 		invalid = atomic_read(&prange->invalid);
1663 		if (!invalid)
1664 			continue;
1665 
1666 		pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1667 			 prange->svms, prange, prange->start, prange->last,
1668 			 invalid);
1669 
1670 		/*
1671 		 * If range is migrating, wait for migration is done.
1672 		 */
1673 		mutex_lock(&prange->migrate_mutex);
1674 
1675 		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1676 					       false, true);
1677 		if (r)
1678 			pr_debug("failed %d to map 0x%lx to gpus\n", r,
1679 				 prange->start);
1680 
1681 		mutex_unlock(&prange->migrate_mutex);
1682 		if (r)
1683 			goto out_reschedule;
1684 
1685 		if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1686 			goto out_reschedule;
1687 	}
1688 
1689 	if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1690 	    evicted_ranges)
1691 		goto out_reschedule;
1692 
1693 	evicted_ranges = 0;
1694 
1695 	r = kgd2kfd_resume_mm(mm);
1696 	if (r) {
1697 		/* No recovery from this failure. Probably the CP is
1698 		 * hanging. No point trying again.
1699 		 */
1700 		pr_debug("failed %d to resume KFD\n", r);
1701 	}
1702 
1703 	pr_debug("restore svm ranges successfully\n");
1704 
1705 out_reschedule:
1706 	mutex_unlock(&svms->lock);
1707 	mmap_write_unlock(mm);
1708 
1709 	/* If validation failed, reschedule another attempt */
1710 	if (evicted_ranges) {
1711 		pr_debug("reschedule to restore svm range\n");
1712 		schedule_delayed_work(&svms->restore_work,
1713 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1714 	}
1715 }
1716 
1717 /**
1718  * svm_range_evict - evict svm range
1719  * @prange: svm range structure
1720  * @mm: current process mm_struct
1721  * @start: starting process queue number
1722  * @last: last process queue number
1723  *
1724  * Stop all queues of the process to ensure GPU doesn't access the memory, then
1725  * return to let CPU evict the buffer and proceed CPU pagetable update.
1726  *
1727  * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1728  * If invalidation happens while restore work is running, restore work will
1729  * restart to ensure to get the latest CPU pages mapping to GPU, then start
1730  * the queues.
1731  */
1732 static int
1733 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1734 		unsigned long start, unsigned long last)
1735 {
1736 	struct svm_range_list *svms = prange->svms;
1737 	struct svm_range *pchild;
1738 	struct kfd_process *p;
1739 	int r = 0;
1740 
1741 	p = container_of(svms, struct kfd_process, svms);
1742 
1743 	pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1744 		 svms, prange->start, prange->last, start, last);
1745 
1746 	if (!p->xnack_enabled) {
1747 		int evicted_ranges;
1748 
1749 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1750 			mutex_lock_nested(&pchild->lock, 1);
1751 			if (pchild->start <= last && pchild->last >= start) {
1752 				pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1753 					 pchild->start, pchild->last);
1754 				atomic_inc(&pchild->invalid);
1755 			}
1756 			mutex_unlock(&pchild->lock);
1757 		}
1758 
1759 		if (prange->start <= last && prange->last >= start)
1760 			atomic_inc(&prange->invalid);
1761 
1762 		evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1763 		if (evicted_ranges != 1)
1764 			return r;
1765 
1766 		pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1767 			 prange->svms, prange->start, prange->last);
1768 
1769 		/* First eviction, stop the queues */
1770 		r = kgd2kfd_quiesce_mm(mm);
1771 		if (r)
1772 			pr_debug("failed to quiesce KFD\n");
1773 
1774 		pr_debug("schedule to restore svm %p ranges\n", svms);
1775 		schedule_delayed_work(&svms->restore_work,
1776 			msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1777 	} else {
1778 		unsigned long s, l;
1779 
1780 		pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1781 			 prange->svms, start, last);
1782 		list_for_each_entry(pchild, &prange->child_list, child_list) {
1783 			mutex_lock_nested(&pchild->lock, 1);
1784 			s = max(start, pchild->start);
1785 			l = min(last, pchild->last);
1786 			if (l >= s)
1787 				svm_range_unmap_from_gpus(pchild, s, l);
1788 			mutex_unlock(&pchild->lock);
1789 		}
1790 		s = max(start, prange->start);
1791 		l = min(last, prange->last);
1792 		if (l >= s)
1793 			svm_range_unmap_from_gpus(prange, s, l);
1794 	}
1795 
1796 	return r;
1797 }
1798 
1799 static struct svm_range *svm_range_clone(struct svm_range *old)
1800 {
1801 	struct svm_range *new;
1802 
1803 	new = svm_range_new(old->svms, old->start, old->last);
1804 	if (!new)
1805 		return NULL;
1806 
1807 	if (old->svm_bo) {
1808 		new->ttm_res = old->ttm_res;
1809 		new->offset = old->offset;
1810 		new->svm_bo = svm_range_bo_ref(old->svm_bo);
1811 		spin_lock(&new->svm_bo->list_lock);
1812 		list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1813 		spin_unlock(&new->svm_bo->list_lock);
1814 	}
1815 	new->flags = old->flags;
1816 	new->preferred_loc = old->preferred_loc;
1817 	new->prefetch_loc = old->prefetch_loc;
1818 	new->actual_loc = old->actual_loc;
1819 	new->granularity = old->granularity;
1820 	bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1821 	bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1822 
1823 	return new;
1824 }
1825 
1826 /**
1827  * svm_range_add - add svm range and handle overlap
1828  * @p: the range add to this process svms
1829  * @start: page size aligned
1830  * @size: page size aligned
1831  * @nattr: number of attributes
1832  * @attrs: array of attributes
1833  * @update_list: output, the ranges need validate and update GPU mapping
1834  * @insert_list: output, the ranges need insert to svms
1835  * @remove_list: output, the ranges are replaced and need remove from svms
1836  *
1837  * Check if the virtual address range has overlap with any existing ranges,
1838  * split partly overlapping ranges and add new ranges in the gaps. All changes
1839  * should be applied to the range_list and interval tree transactionally. If
1840  * any range split or allocation fails, the entire update fails. Therefore any
1841  * existing overlapping svm_ranges are cloned and the original svm_ranges left
1842  * unchanged.
1843  *
1844  * If the transaction succeeds, the caller can update and insert clones and
1845  * new ranges, then free the originals.
1846  *
1847  * Otherwise the caller can free the clones and new ranges, while the old
1848  * svm_ranges remain unchanged.
1849  *
1850  * Context: Process context, caller must hold svms->lock
1851  *
1852  * Return:
1853  * 0 - OK, otherwise error code
1854  */
1855 static int
1856 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
1857 	      uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
1858 	      struct list_head *update_list, struct list_head *insert_list,
1859 	      struct list_head *remove_list)
1860 {
1861 	unsigned long last = start + size - 1UL;
1862 	struct svm_range_list *svms = &p->svms;
1863 	struct interval_tree_node *node;
1864 	struct svm_range *prange;
1865 	struct svm_range *tmp;
1866 	int r = 0;
1867 
1868 	pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
1869 
1870 	INIT_LIST_HEAD(update_list);
1871 	INIT_LIST_HEAD(insert_list);
1872 	INIT_LIST_HEAD(remove_list);
1873 
1874 	node = interval_tree_iter_first(&svms->objects, start, last);
1875 	while (node) {
1876 		struct interval_tree_node *next;
1877 		unsigned long next_start;
1878 
1879 		pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1880 			 node->last);
1881 
1882 		prange = container_of(node, struct svm_range, it_node);
1883 		next = interval_tree_iter_next(node, start, last);
1884 		next_start = min(node->last, last) + 1;
1885 
1886 		if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
1887 			/* nothing to do */
1888 		} else if (node->start < start || node->last > last) {
1889 			/* node intersects the update range and its attributes
1890 			 * will change. Clone and split it, apply updates only
1891 			 * to the overlapping part
1892 			 */
1893 			struct svm_range *old = prange;
1894 
1895 			prange = svm_range_clone(old);
1896 			if (!prange) {
1897 				r = -ENOMEM;
1898 				goto out;
1899 			}
1900 
1901 			list_add(&old->remove_list, remove_list);
1902 			list_add(&prange->insert_list, insert_list);
1903 			list_add(&prange->update_list, update_list);
1904 
1905 			if (node->start < start) {
1906 				pr_debug("change old range start\n");
1907 				r = svm_range_split_head(prange, start,
1908 							 insert_list);
1909 				if (r)
1910 					goto out;
1911 			}
1912 			if (node->last > last) {
1913 				pr_debug("change old range last\n");
1914 				r = svm_range_split_tail(prange, last,
1915 							 insert_list);
1916 				if (r)
1917 					goto out;
1918 			}
1919 		} else {
1920 			/* The node is contained within start..last,
1921 			 * just update it
1922 			 */
1923 			list_add(&prange->update_list, update_list);
1924 		}
1925 
1926 		/* insert a new node if needed */
1927 		if (node->start > start) {
1928 			prange = svm_range_new(svms, start, node->start - 1);
1929 			if (!prange) {
1930 				r = -ENOMEM;
1931 				goto out;
1932 			}
1933 
1934 			list_add(&prange->insert_list, insert_list);
1935 			list_add(&prange->update_list, update_list);
1936 		}
1937 
1938 		node = next;
1939 		start = next_start;
1940 	}
1941 
1942 	/* add a final range at the end if needed */
1943 	if (start <= last) {
1944 		prange = svm_range_new(svms, start, last);
1945 		if (!prange) {
1946 			r = -ENOMEM;
1947 			goto out;
1948 		}
1949 		list_add(&prange->insert_list, insert_list);
1950 		list_add(&prange->update_list, update_list);
1951 	}
1952 
1953 out:
1954 	if (r)
1955 		list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
1956 			svm_range_free(prange);
1957 
1958 	return r;
1959 }
1960 
1961 static void
1962 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
1963 					    struct svm_range *prange)
1964 {
1965 	unsigned long start;
1966 	unsigned long last;
1967 
1968 	start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
1969 	last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
1970 
1971 	if (prange->start == start && prange->last == last)
1972 		return;
1973 
1974 	pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1975 		  prange->svms, prange, start, last, prange->start,
1976 		  prange->last);
1977 
1978 	if (start != 0 && last != 0) {
1979 		interval_tree_remove(&prange->it_node, &prange->svms->objects);
1980 		svm_range_remove_notifier(prange);
1981 	}
1982 	prange->it_node.start = prange->start;
1983 	prange->it_node.last = prange->last;
1984 
1985 	interval_tree_insert(&prange->it_node, &prange->svms->objects);
1986 	svm_range_add_notifier_locked(mm, prange);
1987 }
1988 
1989 static void
1990 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
1991 {
1992 	struct mm_struct *mm = prange->work_item.mm;
1993 
1994 	switch (prange->work_item.op) {
1995 	case SVM_OP_NULL:
1996 		pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
1997 			 svms, prange, prange->start, prange->last);
1998 		break;
1999 	case SVM_OP_UNMAP_RANGE:
2000 		pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2001 			 svms, prange, prange->start, prange->last);
2002 		svm_range_unlink(prange);
2003 		svm_range_remove_notifier(prange);
2004 		svm_range_free(prange);
2005 		break;
2006 	case SVM_OP_UPDATE_RANGE_NOTIFIER:
2007 		pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2008 			 svms, prange, prange->start, prange->last);
2009 		svm_range_update_notifier_and_interval_tree(mm, prange);
2010 		break;
2011 	case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2012 		pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2013 			 svms, prange, prange->start, prange->last);
2014 		svm_range_update_notifier_and_interval_tree(mm, prange);
2015 		/* TODO: implement deferred validation and mapping */
2016 		break;
2017 	case SVM_OP_ADD_RANGE:
2018 		pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2019 			 prange->start, prange->last);
2020 		svm_range_add_to_svms(prange);
2021 		svm_range_add_notifier_locked(mm, prange);
2022 		break;
2023 	case SVM_OP_ADD_RANGE_AND_MAP:
2024 		pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2025 			 prange, prange->start, prange->last);
2026 		svm_range_add_to_svms(prange);
2027 		svm_range_add_notifier_locked(mm, prange);
2028 		/* TODO: implement deferred validation and mapping */
2029 		break;
2030 	default:
2031 		WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2032 			 prange->work_item.op);
2033 	}
2034 }
2035 
2036 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2037 {
2038 	struct kfd_process_device *pdd;
2039 	struct kfd_process *p;
2040 	int drain;
2041 	uint32_t i;
2042 
2043 	p = container_of(svms, struct kfd_process, svms);
2044 
2045 restart:
2046 	drain = atomic_read(&svms->drain_pagefaults);
2047 	if (!drain)
2048 		return;
2049 
2050 	for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2051 		pdd = p->pdds[i];
2052 		if (!pdd)
2053 			continue;
2054 
2055 		pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2056 
2057 		amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2058 						     &pdd->dev->adev->irq.ih1);
2059 		pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2060 	}
2061 	if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2062 		goto restart;
2063 }
2064 
2065 static void svm_range_deferred_list_work(struct work_struct *work)
2066 {
2067 	struct svm_range_list *svms;
2068 	struct svm_range *prange;
2069 	struct mm_struct *mm;
2070 	struct kfd_process *p;
2071 
2072 	svms = container_of(work, struct svm_range_list, deferred_list_work);
2073 	pr_debug("enter svms 0x%p\n", svms);
2074 
2075 	p = container_of(svms, struct kfd_process, svms);
2076 	/* Avoid mm is gone when inserting mmu notifier */
2077 	mm = get_task_mm(p->lead_thread);
2078 	if (!mm) {
2079 		pr_debug("svms 0x%p process mm gone\n", svms);
2080 		return;
2081 	}
2082 retry:
2083 	mmap_write_lock(mm);
2084 
2085 	/* Checking for the need to drain retry faults must be inside
2086 	 * mmap write lock to serialize with munmap notifiers.
2087 	 */
2088 	if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2089 		mmap_write_unlock(mm);
2090 		svm_range_drain_retry_fault(svms);
2091 		goto retry;
2092 	}
2093 
2094 	spin_lock(&svms->deferred_list_lock);
2095 	while (!list_empty(&svms->deferred_range_list)) {
2096 		prange = list_first_entry(&svms->deferred_range_list,
2097 					  struct svm_range, deferred_list);
2098 		list_del_init(&prange->deferred_list);
2099 		spin_unlock(&svms->deferred_list_lock);
2100 
2101 		pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2102 			 prange->start, prange->last, prange->work_item.op);
2103 
2104 		mutex_lock(&svms->lock);
2105 		mutex_lock(&prange->migrate_mutex);
2106 		while (!list_empty(&prange->child_list)) {
2107 			struct svm_range *pchild;
2108 
2109 			pchild = list_first_entry(&prange->child_list,
2110 						struct svm_range, child_list);
2111 			pr_debug("child prange 0x%p op %d\n", pchild,
2112 				 pchild->work_item.op);
2113 			list_del_init(&pchild->child_list);
2114 			svm_range_handle_list_op(svms, pchild);
2115 		}
2116 		mutex_unlock(&prange->migrate_mutex);
2117 
2118 		svm_range_handle_list_op(svms, prange);
2119 		mutex_unlock(&svms->lock);
2120 
2121 		spin_lock(&svms->deferred_list_lock);
2122 	}
2123 	spin_unlock(&svms->deferred_list_lock);
2124 
2125 	mmap_write_unlock(mm);
2126 	mmput(mm);
2127 	pr_debug("exit svms 0x%p\n", svms);
2128 }
2129 
2130 void
2131 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2132 			struct mm_struct *mm, enum svm_work_list_ops op)
2133 {
2134 	spin_lock(&svms->deferred_list_lock);
2135 	/* if prange is on the deferred list */
2136 	if (!list_empty(&prange->deferred_list)) {
2137 		pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2138 		WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2139 		if (op != SVM_OP_NULL &&
2140 		    prange->work_item.op != SVM_OP_UNMAP_RANGE)
2141 			prange->work_item.op = op;
2142 	} else {
2143 		prange->work_item.op = op;
2144 		prange->work_item.mm = mm;
2145 		list_add_tail(&prange->deferred_list,
2146 			      &prange->svms->deferred_range_list);
2147 		pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2148 			 prange, prange->start, prange->last, op);
2149 	}
2150 	spin_unlock(&svms->deferred_list_lock);
2151 }
2152 
2153 void schedule_deferred_list_work(struct svm_range_list *svms)
2154 {
2155 	spin_lock(&svms->deferred_list_lock);
2156 	if (!list_empty(&svms->deferred_range_list))
2157 		schedule_work(&svms->deferred_list_work);
2158 	spin_unlock(&svms->deferred_list_lock);
2159 }
2160 
2161 static void
2162 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2163 		      struct svm_range *prange, unsigned long start,
2164 		      unsigned long last)
2165 {
2166 	struct svm_range *head;
2167 	struct svm_range *tail;
2168 
2169 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2170 		pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2171 			 prange->start, prange->last);
2172 		return;
2173 	}
2174 	if (start > prange->last || last < prange->start)
2175 		return;
2176 
2177 	head = tail = prange;
2178 	if (start > prange->start)
2179 		svm_range_split(prange, prange->start, start - 1, &tail);
2180 	if (last < tail->last)
2181 		svm_range_split(tail, last + 1, tail->last, &head);
2182 
2183 	if (head != prange && tail != prange) {
2184 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2185 		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2186 	} else if (tail != prange) {
2187 		svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2188 	} else if (head != prange) {
2189 		svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2190 	} else if (parent != prange) {
2191 		prange->work_item.op = SVM_OP_UNMAP_RANGE;
2192 	}
2193 }
2194 
2195 static void
2196 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2197 			 unsigned long start, unsigned long last)
2198 {
2199 	struct svm_range_list *svms;
2200 	struct svm_range *pchild;
2201 	struct kfd_process *p;
2202 	unsigned long s, l;
2203 	bool unmap_parent;
2204 
2205 	p = kfd_lookup_process_by_mm(mm);
2206 	if (!p)
2207 		return;
2208 	svms = &p->svms;
2209 
2210 	pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2211 		 prange, prange->start, prange->last, start, last);
2212 
2213 	/* Make sure pending page faults are drained in the deferred worker
2214 	 * before the range is freed to avoid straggler interrupts on
2215 	 * unmapped memory causing "phantom faults".
2216 	 */
2217 	atomic_inc(&svms->drain_pagefaults);
2218 
2219 	unmap_parent = start <= prange->start && last >= prange->last;
2220 
2221 	list_for_each_entry(pchild, &prange->child_list, child_list) {
2222 		mutex_lock_nested(&pchild->lock, 1);
2223 		s = max(start, pchild->start);
2224 		l = min(last, pchild->last);
2225 		if (l >= s)
2226 			svm_range_unmap_from_gpus(pchild, s, l);
2227 		svm_range_unmap_split(mm, prange, pchild, start, last);
2228 		mutex_unlock(&pchild->lock);
2229 	}
2230 	s = max(start, prange->start);
2231 	l = min(last, prange->last);
2232 	if (l >= s)
2233 		svm_range_unmap_from_gpus(prange, s, l);
2234 	svm_range_unmap_split(mm, prange, prange, start, last);
2235 
2236 	if (unmap_parent)
2237 		svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2238 	else
2239 		svm_range_add_list_work(svms, prange, mm,
2240 					SVM_OP_UPDATE_RANGE_NOTIFIER);
2241 	schedule_deferred_list_work(svms);
2242 
2243 	kfd_unref_process(p);
2244 }
2245 
2246 /**
2247  * svm_range_cpu_invalidate_pagetables - interval notifier callback
2248  * @mni: mmu_interval_notifier struct
2249  * @range: mmu_notifier_range struct
2250  * @cur_seq: value to pass to mmu_interval_set_seq()
2251  *
2252  * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2253  * is from migration, or CPU page invalidation callback.
2254  *
2255  * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2256  * work thread, and split prange if only part of prange is unmapped.
2257  *
2258  * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2259  * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2260  * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2261  * update GPU mapping to recover.
2262  *
2263  * Context: mmap lock, notifier_invalidate_start lock are held
2264  *          for invalidate event, prange lock is held if this is from migration
2265  */
2266 static bool
2267 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2268 				    const struct mmu_notifier_range *range,
2269 				    unsigned long cur_seq)
2270 {
2271 	struct svm_range *prange;
2272 	unsigned long start;
2273 	unsigned long last;
2274 
2275 	if (range->event == MMU_NOTIFY_RELEASE)
2276 		return true;
2277 
2278 	start = mni->interval_tree.start;
2279 	last = mni->interval_tree.last;
2280 	start = max(start, range->start) >> PAGE_SHIFT;
2281 	last = min(last, range->end - 1) >> PAGE_SHIFT;
2282 	pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2283 		 start, last, range->start >> PAGE_SHIFT,
2284 		 (range->end - 1) >> PAGE_SHIFT,
2285 		 mni->interval_tree.start >> PAGE_SHIFT,
2286 		 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2287 
2288 	prange = container_of(mni, struct svm_range, notifier);
2289 
2290 	svm_range_lock(prange);
2291 	mmu_interval_set_seq(mni, cur_seq);
2292 
2293 	switch (range->event) {
2294 	case MMU_NOTIFY_UNMAP:
2295 		svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2296 		break;
2297 	default:
2298 		svm_range_evict(prange, mni->mm, start, last);
2299 		break;
2300 	}
2301 
2302 	svm_range_unlock(prange);
2303 
2304 	return true;
2305 }
2306 
2307 /**
2308  * svm_range_from_addr - find svm range from fault address
2309  * @svms: svm range list header
2310  * @addr: address to search range interval tree, in pages
2311  * @parent: parent range if range is on child list
2312  *
2313  * Context: The caller must hold svms->lock
2314  *
2315  * Return: the svm_range found or NULL
2316  */
2317 struct svm_range *
2318 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2319 		    struct svm_range **parent)
2320 {
2321 	struct interval_tree_node *node;
2322 	struct svm_range *prange;
2323 	struct svm_range *pchild;
2324 
2325 	node = interval_tree_iter_first(&svms->objects, addr, addr);
2326 	if (!node)
2327 		return NULL;
2328 
2329 	prange = container_of(node, struct svm_range, it_node);
2330 	pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2331 		 addr, prange->start, prange->last, node->start, node->last);
2332 
2333 	if (addr >= prange->start && addr <= prange->last) {
2334 		if (parent)
2335 			*parent = prange;
2336 		return prange;
2337 	}
2338 	list_for_each_entry(pchild, &prange->child_list, child_list)
2339 		if (addr >= pchild->start && addr <= pchild->last) {
2340 			pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2341 				 addr, pchild->start, pchild->last);
2342 			if (parent)
2343 				*parent = prange;
2344 			return pchild;
2345 		}
2346 
2347 	return NULL;
2348 }
2349 
2350 /* svm_range_best_restore_location - decide the best fault restore location
2351  * @prange: svm range structure
2352  * @adev: the GPU on which vm fault happened
2353  *
2354  * This is only called when xnack is on, to decide the best location to restore
2355  * the range mapping after GPU vm fault. Caller uses the best location to do
2356  * migration if actual loc is not best location, then update GPU page table
2357  * mapping to the best location.
2358  *
2359  * If the preferred loc is accessible by faulting GPU, use preferred loc.
2360  * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2361  * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2362  *    if range actual loc is cpu, best_loc is cpu
2363  *    if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2364  *    range actual loc.
2365  * Otherwise, GPU no access, best_loc is -1.
2366  *
2367  * Return:
2368  * -1 means vm fault GPU no access
2369  * 0 for CPU or GPU id
2370  */
2371 static int32_t
2372 svm_range_best_restore_location(struct svm_range *prange,
2373 				struct amdgpu_device *adev,
2374 				int32_t *gpuidx)
2375 {
2376 	struct amdgpu_device *bo_adev, *preferred_adev;
2377 	struct kfd_process *p;
2378 	uint32_t gpuid;
2379 	int r;
2380 
2381 	p = container_of(prange->svms, struct kfd_process, svms);
2382 
2383 	r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
2384 	if (r < 0) {
2385 		pr_debug("failed to get gpuid from kgd\n");
2386 		return -1;
2387 	}
2388 
2389 	if (prange->preferred_loc == gpuid ||
2390 	    prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2391 		return prange->preferred_loc;
2392 	} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2393 		preferred_adev = svm_range_get_adev_by_id(prange,
2394 							prange->preferred_loc);
2395 		if (amdgpu_xgmi_same_hive(adev, preferred_adev))
2396 			return prange->preferred_loc;
2397 		/* fall through */
2398 	}
2399 
2400 	if (test_bit(*gpuidx, prange->bitmap_access))
2401 		return gpuid;
2402 
2403 	if (test_bit(*gpuidx, prange->bitmap_aip)) {
2404 		if (!prange->actual_loc)
2405 			return 0;
2406 
2407 		bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2408 		if (amdgpu_xgmi_same_hive(adev, bo_adev))
2409 			return prange->actual_loc;
2410 		else
2411 			return 0;
2412 	}
2413 
2414 	return -1;
2415 }
2416 
2417 static int
2418 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2419 			       unsigned long *start, unsigned long *last,
2420 			       bool *is_heap_stack)
2421 {
2422 	struct vm_area_struct *vma;
2423 	struct interval_tree_node *node;
2424 	unsigned long start_limit, end_limit;
2425 
2426 	vma = find_vma(p->mm, addr << PAGE_SHIFT);
2427 	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2428 		pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2429 		return -EFAULT;
2430 	}
2431 
2432 	*is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2433 			  vma->vm_end >= vma->vm_mm->start_brk) ||
2434 			 (vma->vm_start <= vma->vm_mm->start_stack &&
2435 			  vma->vm_end >= vma->vm_mm->start_stack);
2436 
2437 	start_limit = max(vma->vm_start >> PAGE_SHIFT,
2438 		      (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2439 	end_limit = min(vma->vm_end >> PAGE_SHIFT,
2440 		    (unsigned long)ALIGN(addr + 1, 2UL << 8));
2441 	/* First range that starts after the fault address */
2442 	node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2443 	if (node) {
2444 		end_limit = min(end_limit, node->start);
2445 		/* Last range that ends before the fault address */
2446 		node = container_of(rb_prev(&node->rb),
2447 				    struct interval_tree_node, rb);
2448 	} else {
2449 		/* Last range must end before addr because
2450 		 * there was no range after addr
2451 		 */
2452 		node = container_of(rb_last(&p->svms.objects.rb_root),
2453 				    struct interval_tree_node, rb);
2454 	}
2455 	if (node) {
2456 		if (node->last >= addr) {
2457 			WARN(1, "Overlap with prev node and page fault addr\n");
2458 			return -EFAULT;
2459 		}
2460 		start_limit = max(start_limit, node->last + 1);
2461 	}
2462 
2463 	*start = start_limit;
2464 	*last = end_limit - 1;
2465 
2466 	pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2467 		 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2468 		 *start, *last, *is_heap_stack);
2469 
2470 	return 0;
2471 }
2472 
2473 static int
2474 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2475 			   uint64_t *bo_s, uint64_t *bo_l)
2476 {
2477 	struct amdgpu_bo_va_mapping *mapping;
2478 	struct interval_tree_node *node;
2479 	struct amdgpu_bo *bo = NULL;
2480 	unsigned long userptr;
2481 	uint32_t i;
2482 	int r;
2483 
2484 	for (i = 0; i < p->n_pdds; i++) {
2485 		struct amdgpu_vm *vm;
2486 
2487 		if (!p->pdds[i]->drm_priv)
2488 			continue;
2489 
2490 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2491 		r = amdgpu_bo_reserve(vm->root.bo, false);
2492 		if (r)
2493 			return r;
2494 
2495 		/* Check userptr by searching entire vm->va interval tree */
2496 		node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2497 		while (node) {
2498 			mapping = container_of((struct rb_node *)node,
2499 					       struct amdgpu_bo_va_mapping, rb);
2500 			bo = mapping->bo_va->base.bo;
2501 
2502 			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2503 							 start << PAGE_SHIFT,
2504 							 last << PAGE_SHIFT,
2505 							 &userptr)) {
2506 				node = interval_tree_iter_next(node, 0, ~0ULL);
2507 				continue;
2508 			}
2509 
2510 			pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2511 				 start, last);
2512 			if (bo_s && bo_l) {
2513 				*bo_s = userptr >> PAGE_SHIFT;
2514 				*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2515 			}
2516 			amdgpu_bo_unreserve(vm->root.bo);
2517 			return -EADDRINUSE;
2518 		}
2519 		amdgpu_bo_unreserve(vm->root.bo);
2520 	}
2521 	return 0;
2522 }
2523 
2524 static struct
2525 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2526 						struct kfd_process *p,
2527 						struct mm_struct *mm,
2528 						int64_t addr)
2529 {
2530 	struct svm_range *prange = NULL;
2531 	unsigned long start, last;
2532 	uint32_t gpuid, gpuidx;
2533 	bool is_heap_stack;
2534 	uint64_t bo_s = 0;
2535 	uint64_t bo_l = 0;
2536 	int r;
2537 
2538 	if (svm_range_get_range_boundaries(p, addr, &start, &last,
2539 					   &is_heap_stack))
2540 		return NULL;
2541 
2542 	r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2543 	if (r != -EADDRINUSE)
2544 		r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2545 
2546 	if (r == -EADDRINUSE) {
2547 		if (addr >= bo_s && addr <= bo_l)
2548 			return NULL;
2549 
2550 		/* Create one page svm range if 2MB range overlapping */
2551 		start = addr;
2552 		last = addr;
2553 	}
2554 
2555 	prange = svm_range_new(&p->svms, start, last);
2556 	if (!prange) {
2557 		pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2558 		return NULL;
2559 	}
2560 	if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
2561 		pr_debug("failed to get gpuid from kgd\n");
2562 		svm_range_free(prange);
2563 		return NULL;
2564 	}
2565 
2566 	if (is_heap_stack)
2567 		prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2568 
2569 	svm_range_add_to_svms(prange);
2570 	svm_range_add_notifier_locked(mm, prange);
2571 
2572 	return prange;
2573 }
2574 
2575 /* svm_range_skip_recover - decide if prange can be recovered
2576  * @prange: svm range structure
2577  *
2578  * GPU vm retry fault handle skip recover the range for cases:
2579  * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2580  *    deferred list work will drain the stale fault before free the prange.
2581  * 2. prange is on deferred list to add interval notifier after split, or
2582  * 3. prange is child range, it is split from parent prange, recover later
2583  *    after interval notifier is added.
2584  *
2585  * Return: true to skip recover, false to recover
2586  */
2587 static bool svm_range_skip_recover(struct svm_range *prange)
2588 {
2589 	struct svm_range_list *svms = prange->svms;
2590 
2591 	spin_lock(&svms->deferred_list_lock);
2592 	if (list_empty(&prange->deferred_list) &&
2593 	    list_empty(&prange->child_list)) {
2594 		spin_unlock(&svms->deferred_list_lock);
2595 		return false;
2596 	}
2597 	spin_unlock(&svms->deferred_list_lock);
2598 
2599 	if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2600 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2601 			 svms, prange, prange->start, prange->last);
2602 		return true;
2603 	}
2604 	if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2605 	    prange->work_item.op == SVM_OP_ADD_RANGE) {
2606 		pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2607 			 svms, prange, prange->start, prange->last);
2608 		return true;
2609 	}
2610 	return false;
2611 }
2612 
2613 static void
2614 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2615 		      int32_t gpuidx)
2616 {
2617 	struct kfd_process_device *pdd;
2618 
2619 	/* fault is on different page of same range
2620 	 * or fault is skipped to recover later
2621 	 * or fault is on invalid virtual address
2622 	 */
2623 	if (gpuidx == MAX_GPU_INSTANCE) {
2624 		uint32_t gpuid;
2625 		int r;
2626 
2627 		r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
2628 		if (r < 0)
2629 			return;
2630 	}
2631 
2632 	/* fault is recovered
2633 	 * or fault cannot recover because GPU no access on the range
2634 	 */
2635 	pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2636 	if (pdd)
2637 		WRITE_ONCE(pdd->faults, pdd->faults + 1);
2638 }
2639 
2640 static bool
2641 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2642 {
2643 	unsigned long requested = VM_READ;
2644 
2645 	if (write_fault)
2646 		requested |= VM_WRITE;
2647 
2648 	pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2649 		vma->vm_flags);
2650 	return (vma->vm_flags & requested) == requested;
2651 }
2652 
2653 int
2654 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2655 			uint64_t addr, bool write_fault)
2656 {
2657 	struct mm_struct *mm = NULL;
2658 	struct svm_range_list *svms;
2659 	struct svm_range *prange;
2660 	struct kfd_process *p;
2661 	uint64_t timestamp;
2662 	int32_t best_loc;
2663 	int32_t gpuidx = MAX_GPU_INSTANCE;
2664 	bool write_locked = false;
2665 	struct vm_area_struct *vma;
2666 	int r = 0;
2667 
2668 	if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2669 		pr_debug("device does not support SVM\n");
2670 		return -EFAULT;
2671 	}
2672 
2673 	p = kfd_lookup_process_by_pasid(pasid);
2674 	if (!p) {
2675 		pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2676 		return 0;
2677 	}
2678 	if (!p->xnack_enabled) {
2679 		pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2680 		r = -EFAULT;
2681 		goto out;
2682 	}
2683 	svms = &p->svms;
2684 
2685 	pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2686 
2687 	if (atomic_read(&svms->drain_pagefaults)) {
2688 		pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2689 		r = 0;
2690 		goto out;
2691 	}
2692 
2693 	/* p->lead_thread is available as kfd_process_wq_release flush the work
2694 	 * before releasing task ref.
2695 	 */
2696 	mm = get_task_mm(p->lead_thread);
2697 	if (!mm) {
2698 		pr_debug("svms 0x%p failed to get mm\n", svms);
2699 		r = 0;
2700 		goto out;
2701 	}
2702 
2703 	mmap_read_lock(mm);
2704 retry_write_locked:
2705 	mutex_lock(&svms->lock);
2706 	prange = svm_range_from_addr(svms, addr, NULL);
2707 	if (!prange) {
2708 		pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2709 			 svms, addr);
2710 		if (!write_locked) {
2711 			/* Need the write lock to create new range with MMU notifier.
2712 			 * Also flush pending deferred work to make sure the interval
2713 			 * tree is up to date before we add a new range
2714 			 */
2715 			mutex_unlock(&svms->lock);
2716 			mmap_read_unlock(mm);
2717 			mmap_write_lock(mm);
2718 			write_locked = true;
2719 			goto retry_write_locked;
2720 		}
2721 		prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2722 		if (!prange) {
2723 			pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2724 				 svms, addr);
2725 			mmap_write_downgrade(mm);
2726 			r = -EFAULT;
2727 			goto out_unlock_svms;
2728 		}
2729 	}
2730 	if (write_locked)
2731 		mmap_write_downgrade(mm);
2732 
2733 	mutex_lock(&prange->migrate_mutex);
2734 
2735 	if (svm_range_skip_recover(prange)) {
2736 		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2737 		r = 0;
2738 		goto out_unlock_range;
2739 	}
2740 
2741 	timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
2742 	/* skip duplicate vm fault on different pages of same range */
2743 	if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
2744 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2745 			 svms, prange->start, prange->last);
2746 		r = 0;
2747 		goto out_unlock_range;
2748 	}
2749 
2750 	/* __do_munmap removed VMA, return success as we are handling stale
2751 	 * retry fault.
2752 	 */
2753 	vma = find_vma(mm, addr << PAGE_SHIFT);
2754 	if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2755 		pr_debug("address 0x%llx VMA is removed\n", addr);
2756 		r = 0;
2757 		goto out_unlock_range;
2758 	}
2759 
2760 	if (!svm_fault_allowed(vma, write_fault)) {
2761 		pr_debug("fault addr 0x%llx no %s permission\n", addr,
2762 			write_fault ? "write" : "read");
2763 		r = -EPERM;
2764 		goto out_unlock_range;
2765 	}
2766 
2767 	best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2768 	if (best_loc == -1) {
2769 		pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2770 			 svms, prange->start, prange->last);
2771 		r = -EACCES;
2772 		goto out_unlock_range;
2773 	}
2774 
2775 	pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2776 		 svms, prange->start, prange->last, best_loc,
2777 		 prange->actual_loc);
2778 
2779 	if (prange->actual_loc != best_loc) {
2780 		if (best_loc) {
2781 			r = svm_migrate_to_vram(prange, best_loc, mm);
2782 			if (r) {
2783 				pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2784 					 r, addr);
2785 				/* Fallback to system memory if migration to
2786 				 * VRAM failed
2787 				 */
2788 				if (prange->actual_loc)
2789 					r = svm_migrate_vram_to_ram(prange, mm);
2790 				else
2791 					r = 0;
2792 			}
2793 		} else {
2794 			r = svm_migrate_vram_to_ram(prange, mm);
2795 		}
2796 		if (r) {
2797 			pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2798 				 r, svms, prange->start, prange->last);
2799 			goto out_unlock_range;
2800 		}
2801 	}
2802 
2803 	r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
2804 	if (r)
2805 		pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2806 			 r, svms, prange->start, prange->last);
2807 
2808 out_unlock_range:
2809 	mutex_unlock(&prange->migrate_mutex);
2810 out_unlock_svms:
2811 	mutex_unlock(&svms->lock);
2812 	mmap_read_unlock(mm);
2813 
2814 	svm_range_count_fault(adev, p, gpuidx);
2815 
2816 	mmput(mm);
2817 out:
2818 	kfd_unref_process(p);
2819 
2820 	if (r == -EAGAIN) {
2821 		pr_debug("recover vm fault later\n");
2822 		amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2823 		r = 0;
2824 	}
2825 	return r;
2826 }
2827 
2828 void svm_range_list_fini(struct kfd_process *p)
2829 {
2830 	struct svm_range *prange;
2831 	struct svm_range *next;
2832 
2833 	pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2834 
2835 	/* Ensure list work is finished before process is destroyed */
2836 	flush_work(&p->svms.deferred_list_work);
2837 
2838 	/*
2839 	 * Ensure no retry fault comes in afterwards, as page fault handler will
2840 	 * not find kfd process and take mm lock to recover fault.
2841 	 */
2842 	atomic_inc(&p->svms.drain_pagefaults);
2843 	svm_range_drain_retry_fault(&p->svms);
2844 
2845 
2846 	list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2847 		svm_range_unlink(prange);
2848 		svm_range_remove_notifier(prange);
2849 		svm_range_free(prange);
2850 	}
2851 
2852 	mutex_destroy(&p->svms.lock);
2853 
2854 	pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2855 }
2856 
2857 int svm_range_list_init(struct kfd_process *p)
2858 {
2859 	struct svm_range_list *svms = &p->svms;
2860 	int i;
2861 
2862 	svms->objects = RB_ROOT_CACHED;
2863 	mutex_init(&svms->lock);
2864 	INIT_LIST_HEAD(&svms->list);
2865 	atomic_set(&svms->evicted_ranges, 0);
2866 	atomic_set(&svms->drain_pagefaults, 0);
2867 	INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
2868 	INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
2869 	INIT_LIST_HEAD(&svms->deferred_range_list);
2870 	spin_lock_init(&svms->deferred_list_lock);
2871 
2872 	for (i = 0; i < p->n_pdds; i++)
2873 		if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
2874 			bitmap_set(svms->bitmap_supported, i, 1);
2875 
2876 	return 0;
2877 }
2878 
2879 /**
2880  * svm_range_check_vm - check if virtual address range mapped already
2881  * @p: current kfd_process
2882  * @start: range start address, in pages
2883  * @last: range last address, in pages
2884  * @bo_s: mapping start address in pages if address range already mapped
2885  * @bo_l: mapping last address in pages if address range already mapped
2886  *
2887  * The purpose is to avoid virtual address ranges already allocated by
2888  * kfd_ioctl_alloc_memory_of_gpu ioctl.
2889  * It looks for each pdd in the kfd_process.
2890  *
2891  * Context: Process context
2892  *
2893  * Return 0 - OK, if the range is not mapped.
2894  * Otherwise error code:
2895  * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
2896  * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
2897  * a signal. Release all buffer reservations and return to user-space.
2898  */
2899 static int
2900 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
2901 		   uint64_t *bo_s, uint64_t *bo_l)
2902 {
2903 	struct amdgpu_bo_va_mapping *mapping;
2904 	struct interval_tree_node *node;
2905 	uint32_t i;
2906 	int r;
2907 
2908 	for (i = 0; i < p->n_pdds; i++) {
2909 		struct amdgpu_vm *vm;
2910 
2911 		if (!p->pdds[i]->drm_priv)
2912 			continue;
2913 
2914 		vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2915 		r = amdgpu_bo_reserve(vm->root.bo, false);
2916 		if (r)
2917 			return r;
2918 
2919 		node = interval_tree_iter_first(&vm->va, start, last);
2920 		if (node) {
2921 			pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
2922 				 start, last);
2923 			mapping = container_of((struct rb_node *)node,
2924 					       struct amdgpu_bo_va_mapping, rb);
2925 			if (bo_s && bo_l) {
2926 				*bo_s = mapping->start;
2927 				*bo_l = mapping->last;
2928 			}
2929 			amdgpu_bo_unreserve(vm->root.bo);
2930 			return -EADDRINUSE;
2931 		}
2932 		amdgpu_bo_unreserve(vm->root.bo);
2933 	}
2934 
2935 	return 0;
2936 }
2937 
2938 /**
2939  * svm_range_is_valid - check if virtual address range is valid
2940  * @p: current kfd_process
2941  * @start: range start address, in pages
2942  * @size: range size, in pages
2943  *
2944  * Valid virtual address range means it belongs to one or more VMAs
2945  *
2946  * Context: Process context
2947  *
2948  * Return:
2949  *  0 - OK, otherwise error code
2950  */
2951 static int
2952 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
2953 {
2954 	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
2955 	struct vm_area_struct *vma;
2956 	unsigned long end;
2957 	unsigned long start_unchg = start;
2958 
2959 	start <<= PAGE_SHIFT;
2960 	end = start + (size << PAGE_SHIFT);
2961 	do {
2962 		vma = find_vma(p->mm, start);
2963 		if (!vma || start < vma->vm_start ||
2964 		    (vma->vm_flags & device_vma))
2965 			return -EFAULT;
2966 		start = min(end, vma->vm_end);
2967 	} while (start < end);
2968 
2969 	return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
2970 				  NULL);
2971 }
2972 
2973 /**
2974  * svm_range_best_prefetch_location - decide the best prefetch location
2975  * @prange: svm range structure
2976  *
2977  * For xnack off:
2978  * If range map to single GPU, the best prefetch location is prefetch_loc, which
2979  * can be CPU or GPU.
2980  *
2981  * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
2982  * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
2983  * the best prefetch location is always CPU, because GPU can not have coherent
2984  * mapping VRAM of other GPUs even with large-BAR PCIe connection.
2985  *
2986  * For xnack on:
2987  * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
2988  * prefetch_loc, other GPU access will generate vm fault and trigger migration.
2989  *
2990  * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
2991  * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
2992  * prefetch location is always CPU.
2993  *
2994  * Context: Process context
2995  *
2996  * Return:
2997  * 0 for CPU or GPU id
2998  */
2999 static uint32_t
3000 svm_range_best_prefetch_location(struct svm_range *prange)
3001 {
3002 	DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3003 	uint32_t best_loc = prange->prefetch_loc;
3004 	struct kfd_process_device *pdd;
3005 	struct amdgpu_device *bo_adev;
3006 	struct kfd_process *p;
3007 	uint32_t gpuidx;
3008 
3009 	p = container_of(prange->svms, struct kfd_process, svms);
3010 
3011 	if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3012 		goto out;
3013 
3014 	bo_adev = svm_range_get_adev_by_id(prange, best_loc);
3015 	if (!bo_adev) {
3016 		WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
3017 		best_loc = 0;
3018 		goto out;
3019 	}
3020 
3021 	if (p->xnack_enabled)
3022 		bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3023 	else
3024 		bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3025 			  MAX_GPU_INSTANCE);
3026 
3027 	for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3028 		pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3029 		if (!pdd) {
3030 			pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3031 			continue;
3032 		}
3033 
3034 		if (pdd->dev->adev == bo_adev)
3035 			continue;
3036 
3037 		if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
3038 			best_loc = 0;
3039 			break;
3040 		}
3041 	}
3042 
3043 out:
3044 	pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3045 		 p->xnack_enabled, &p->svms, prange->start, prange->last,
3046 		 best_loc);
3047 
3048 	return best_loc;
3049 }
3050 
3051 /* FIXME: This is a workaround for page locking bug when some pages are
3052  * invalid during migration to VRAM
3053  */
3054 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
3055 			void *owner)
3056 {
3057 	struct hmm_range *hmm_range;
3058 	int r;
3059 
3060 	if (prange->validated_once)
3061 		return;
3062 
3063 	r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
3064 				       prange->start << PAGE_SHIFT,
3065 				       prange->npages, &hmm_range,
3066 				       false, true, owner);
3067 	if (!r) {
3068 		amdgpu_hmm_range_get_pages_done(hmm_range);
3069 		prange->validated_once = true;
3070 	}
3071 }
3072 
3073 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3074  * @mm: current process mm_struct
3075  * @prange: svm range structure
3076  * @migrated: output, true if migration is triggered
3077  *
3078  * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3079  * from ram to vram.
3080  * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3081  * from vram to ram.
3082  *
3083  * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3084  * and restore work:
3085  * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3086  *    stops all queues, schedule restore work
3087  * 2. svm_range_restore_work wait for migration is done by
3088  *    a. svm_range_validate_vram takes prange->migrate_mutex
3089  *    b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3090  * 3. restore work update mappings of GPU, resume all queues.
3091  *
3092  * Context: Process context
3093  *
3094  * Return:
3095  * 0 - OK, otherwise - error code of migration
3096  */
3097 static int
3098 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3099 			    bool *migrated)
3100 {
3101 	uint32_t best_loc;
3102 	int r = 0;
3103 
3104 	*migrated = false;
3105 	best_loc = svm_range_best_prefetch_location(prange);
3106 
3107 	if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3108 	    best_loc == prange->actual_loc)
3109 		return 0;
3110 
3111 	if (!best_loc) {
3112 		r = svm_migrate_vram_to_ram(prange, mm);
3113 		*migrated = !r;
3114 		return r;
3115 	}
3116 
3117 	r = svm_migrate_to_vram(prange, best_loc, mm);
3118 	*migrated = !r;
3119 
3120 	return r;
3121 }
3122 
3123 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3124 {
3125 	if (!fence)
3126 		return -EINVAL;
3127 
3128 	if (dma_fence_is_signaled(&fence->base))
3129 		return 0;
3130 
3131 	if (fence->svm_bo) {
3132 		WRITE_ONCE(fence->svm_bo->evicting, 1);
3133 		schedule_work(&fence->svm_bo->eviction_work);
3134 	}
3135 
3136 	return 0;
3137 }
3138 
3139 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3140 {
3141 	struct svm_range_bo *svm_bo;
3142 	struct kfd_process *p;
3143 	struct mm_struct *mm;
3144 
3145 	svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3146 	if (!svm_bo_ref_unless_zero(svm_bo))
3147 		return; /* svm_bo was freed while eviction was pending */
3148 
3149 	/* svm_range_bo_release destroys this worker thread. So during
3150 	 * the lifetime of this thread, kfd_process and mm will be valid.
3151 	 */
3152 	p = container_of(svm_bo->svms, struct kfd_process, svms);
3153 	mm = p->mm;
3154 	if (!mm)
3155 		return;
3156 
3157 	mmap_read_lock(mm);
3158 	spin_lock(&svm_bo->list_lock);
3159 	while (!list_empty(&svm_bo->range_list)) {
3160 		struct svm_range *prange =
3161 				list_first_entry(&svm_bo->range_list,
3162 						struct svm_range, svm_bo_list);
3163 		int retries = 3;
3164 
3165 		list_del_init(&prange->svm_bo_list);
3166 		spin_unlock(&svm_bo->list_lock);
3167 
3168 		pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3169 			 prange->start, prange->last);
3170 
3171 		mutex_lock(&prange->migrate_mutex);
3172 		do {
3173 			svm_migrate_vram_to_ram(prange,
3174 						svm_bo->eviction_fence->mm);
3175 		} while (prange->actual_loc && --retries);
3176 		WARN(prange->actual_loc, "Migration failed during eviction");
3177 
3178 		mutex_lock(&prange->lock);
3179 		prange->svm_bo = NULL;
3180 		mutex_unlock(&prange->lock);
3181 
3182 		mutex_unlock(&prange->migrate_mutex);
3183 
3184 		spin_lock(&svm_bo->list_lock);
3185 	}
3186 	spin_unlock(&svm_bo->list_lock);
3187 	mmap_read_unlock(mm);
3188 
3189 	dma_fence_signal(&svm_bo->eviction_fence->base);
3190 	/* This is the last reference to svm_bo, after svm_range_vram_node_free
3191 	 * has been called in svm_migrate_vram_to_ram
3192 	 */
3193 	WARN_ONCE(kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3194 	svm_range_bo_unref(svm_bo);
3195 }
3196 
3197 static int
3198 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3199 		   uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3200 {
3201 	struct mm_struct *mm = current->mm;
3202 	struct list_head update_list;
3203 	struct list_head insert_list;
3204 	struct list_head remove_list;
3205 	struct svm_range_list *svms;
3206 	struct svm_range *prange;
3207 	struct svm_range *next;
3208 	int r = 0;
3209 
3210 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3211 		 p->pasid, &p->svms, start, start + size - 1, size);
3212 
3213 	r = svm_range_check_attr(p, nattr, attrs);
3214 	if (r)
3215 		return r;
3216 
3217 	svms = &p->svms;
3218 
3219 	svm_range_list_lock_and_flush_work(svms, mm);
3220 
3221 	r = svm_range_is_valid(p, start, size);
3222 	if (r) {
3223 		pr_debug("invalid range r=%d\n", r);
3224 		mmap_write_unlock(mm);
3225 		goto out;
3226 	}
3227 
3228 	mutex_lock(&svms->lock);
3229 
3230 	/* Add new range and split existing ranges as needed */
3231 	r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3232 			  &insert_list, &remove_list);
3233 	if (r) {
3234 		mutex_unlock(&svms->lock);
3235 		mmap_write_unlock(mm);
3236 		goto out;
3237 	}
3238 	/* Apply changes as a transaction */
3239 	list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
3240 		svm_range_add_to_svms(prange);
3241 		svm_range_add_notifier_locked(mm, prange);
3242 	}
3243 	list_for_each_entry(prange, &update_list, update_list) {
3244 		svm_range_apply_attrs(p, prange, nattr, attrs);
3245 		/* TODO: unmap ranges from GPU that lost access */
3246 	}
3247 	list_for_each_entry_safe(prange, next, &remove_list,
3248 				remove_list) {
3249 		pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3250 			 prange->svms, prange, prange->start,
3251 			 prange->last);
3252 		svm_range_unlink(prange);
3253 		svm_range_remove_notifier(prange);
3254 		svm_range_free(prange);
3255 	}
3256 
3257 	mmap_write_downgrade(mm);
3258 	/* Trigger migrations and revalidate and map to GPUs as needed. If
3259 	 * this fails we may be left with partially completed actions. There
3260 	 * is no clean way of rolling back to the previous state in such a
3261 	 * case because the rollback wouldn't be guaranteed to work either.
3262 	 */
3263 	list_for_each_entry(prange, &update_list, update_list) {
3264 		bool migrated;
3265 
3266 		mutex_lock(&prange->migrate_mutex);
3267 
3268 		r = svm_range_trigger_migration(mm, prange, &migrated);
3269 		if (r)
3270 			goto out_unlock_range;
3271 
3272 		if (migrated && !p->xnack_enabled) {
3273 			pr_debug("restore_work will update mappings of GPUs\n");
3274 			mutex_unlock(&prange->migrate_mutex);
3275 			continue;
3276 		}
3277 
3278 		r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3279 					       true, true);
3280 		if (r)
3281 			pr_debug("failed %d to map svm range\n", r);
3282 
3283 out_unlock_range:
3284 		mutex_unlock(&prange->migrate_mutex);
3285 		if (r)
3286 			break;
3287 	}
3288 
3289 	svm_range_debug_dump(svms);
3290 
3291 	mutex_unlock(&svms->lock);
3292 	mmap_read_unlock(mm);
3293 out:
3294 	pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3295 		 &p->svms, start, start + size - 1, r);
3296 
3297 	return r;
3298 }
3299 
3300 static int
3301 svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
3302 		   uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
3303 {
3304 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3305 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3306 	bool get_preferred_loc = false;
3307 	bool get_prefetch_loc = false;
3308 	bool get_granularity = false;
3309 	bool get_accessible = false;
3310 	bool get_flags = false;
3311 	uint64_t last = start + size - 1UL;
3312 	struct mm_struct *mm = current->mm;
3313 	uint8_t granularity = 0xff;
3314 	struct interval_tree_node *node;
3315 	struct svm_range_list *svms;
3316 	struct svm_range *prange;
3317 	uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3318 	uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3319 	uint32_t flags_and = 0xffffffff;
3320 	uint32_t flags_or = 0;
3321 	int gpuidx;
3322 	uint32_t i;
3323 	int r = 0;
3324 
3325 	pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3326 		 start + size - 1, nattr);
3327 
3328 	/* Flush pending deferred work to avoid racing with deferred actions from
3329 	 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3330 	 * can still race with get_attr because we don't hold the mmap lock. But that
3331 	 * would be a race condition in the application anyway, and undefined
3332 	 * behaviour is acceptable in that case.
3333 	 */
3334 	flush_work(&p->svms.deferred_list_work);
3335 
3336 	mmap_read_lock(mm);
3337 	r = svm_range_is_valid(p, start, size);
3338 	mmap_read_unlock(mm);
3339 	if (r) {
3340 		pr_debug("invalid range r=%d\n", r);
3341 		return r;
3342 	}
3343 
3344 	for (i = 0; i < nattr; i++) {
3345 		switch (attrs[i].type) {
3346 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3347 			get_preferred_loc = true;
3348 			break;
3349 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3350 			get_prefetch_loc = true;
3351 			break;
3352 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3353 			get_accessible = true;
3354 			break;
3355 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3356 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3357 			get_flags = true;
3358 			break;
3359 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3360 			get_granularity = true;
3361 			break;
3362 		case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3363 		case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3364 			fallthrough;
3365 		default:
3366 			pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3367 			return -EINVAL;
3368 		}
3369 	}
3370 
3371 	svms = &p->svms;
3372 
3373 	mutex_lock(&svms->lock);
3374 
3375 	node = interval_tree_iter_first(&svms->objects, start, last);
3376 	if (!node) {
3377 		pr_debug("range attrs not found return default values\n");
3378 		svm_range_set_default_attributes(&location, &prefetch_loc,
3379 						 &granularity, &flags_and);
3380 		flags_or = flags_and;
3381 		if (p->xnack_enabled)
3382 			bitmap_copy(bitmap_access, svms->bitmap_supported,
3383 				    MAX_GPU_INSTANCE);
3384 		else
3385 			bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3386 		bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3387 		goto fill_values;
3388 	}
3389 	bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3390 	bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3391 
3392 	while (node) {
3393 		struct interval_tree_node *next;
3394 
3395 		prange = container_of(node, struct svm_range, it_node);
3396 		next = interval_tree_iter_next(node, start, last);
3397 
3398 		if (get_preferred_loc) {
3399 			if (prange->preferred_loc ==
3400 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3401 			    (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3402 			     location != prange->preferred_loc)) {
3403 				location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3404 				get_preferred_loc = false;
3405 			} else {
3406 				location = prange->preferred_loc;
3407 			}
3408 		}
3409 		if (get_prefetch_loc) {
3410 			if (prange->prefetch_loc ==
3411 					KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3412 			    (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3413 			     prefetch_loc != prange->prefetch_loc)) {
3414 				prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3415 				get_prefetch_loc = false;
3416 			} else {
3417 				prefetch_loc = prange->prefetch_loc;
3418 			}
3419 		}
3420 		if (get_accessible) {
3421 			bitmap_and(bitmap_access, bitmap_access,
3422 				   prange->bitmap_access, MAX_GPU_INSTANCE);
3423 			bitmap_and(bitmap_aip, bitmap_aip,
3424 				   prange->bitmap_aip, MAX_GPU_INSTANCE);
3425 		}
3426 		if (get_flags) {
3427 			flags_and &= prange->flags;
3428 			flags_or |= prange->flags;
3429 		}
3430 
3431 		if (get_granularity && prange->granularity < granularity)
3432 			granularity = prange->granularity;
3433 
3434 		node = next;
3435 	}
3436 fill_values:
3437 	mutex_unlock(&svms->lock);
3438 
3439 	for (i = 0; i < nattr; i++) {
3440 		switch (attrs[i].type) {
3441 		case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3442 			attrs[i].value = location;
3443 			break;
3444 		case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3445 			attrs[i].value = prefetch_loc;
3446 			break;
3447 		case KFD_IOCTL_SVM_ATTR_ACCESS:
3448 			gpuidx = kfd_process_gpuidx_from_gpuid(p,
3449 							       attrs[i].value);
3450 			if (gpuidx < 0) {
3451 				pr_debug("invalid gpuid %x\n", attrs[i].value);
3452 				return -EINVAL;
3453 			}
3454 			if (test_bit(gpuidx, bitmap_access))
3455 				attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3456 			else if (test_bit(gpuidx, bitmap_aip))
3457 				attrs[i].type =
3458 					KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3459 			else
3460 				attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3461 			break;
3462 		case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3463 			attrs[i].value = flags_and;
3464 			break;
3465 		case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3466 			attrs[i].value = ~flags_or;
3467 			break;
3468 		case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3469 			attrs[i].value = (uint32_t)granularity;
3470 			break;
3471 		}
3472 	}
3473 
3474 	return 0;
3475 }
3476 
3477 int
3478 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3479 	  uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3480 {
3481 	int r;
3482 
3483 	start >>= PAGE_SHIFT;
3484 	size >>= PAGE_SHIFT;
3485 
3486 	switch (op) {
3487 	case KFD_IOCTL_SVM_OP_SET_ATTR:
3488 		r = svm_range_set_attr(p, start, size, nattrs, attrs);
3489 		break;
3490 	case KFD_IOCTL_SVM_OP_GET_ATTR:
3491 		r = svm_range_get_attr(p, start, size, nattrs, attrs);
3492 		break;
3493 	default:
3494 		r = EINVAL;
3495 		break;
3496 	}
3497 
3498 	return r;
3499 }
3500