1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include <linux/dynamic_debug.h>
27 #include <drm/ttm/ttm_tt.h>
28 #include <drm/drm_exec.h>
29
30 #include "amdgpu_sync.h"
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_hmm.h"
34 #include "amdgpu.h"
35 #include "amdgpu_xgmi.h"
36 #include "kfd_priv.h"
37 #include "kfd_svm.h"
38 #include "kfd_migrate.h"
39 #include "kfd_smi_events.h"
40
41 #ifdef dev_fmt
42 #undef dev_fmt
43 #endif
44 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
45
46 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
47
48 /* Long enough to ensure no retry fault comes after svm range is restored and
49 * page table is updated.
50 */
51 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
52 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
53 #define dynamic_svm_range_dump(svms) \
54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
55 #else
56 #define dynamic_svm_range_dump(svms) \
57 do { if (0) svm_range_debug_dump(svms); } while (0)
58 #endif
59
60 /* Giant svm range split into smaller ranges based on this, it is decided using
61 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
62 * power of 2MB.
63 */
64 static uint64_t max_svm_range_pages;
65
66 struct criu_svm_metadata {
67 struct list_head list;
68 struct kfd_criu_svm_range_priv_data data;
69 };
70
71 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
72 static bool
73 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
74 const struct mmu_notifier_range *range,
75 unsigned long cur_seq);
76 static int
77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
78 uint64_t *bo_s, uint64_t *bo_l);
79 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
80 .invalidate = svm_range_cpu_invalidate_pagetables,
81 };
82
83 /**
84 * svm_range_unlink - unlink svm_range from lists and interval tree
85 * @prange: svm range structure to be removed
86 *
87 * Remove the svm_range from the svms and svm_bo lists and the svms
88 * interval tree.
89 *
90 * Context: The caller must hold svms->lock
91 */
svm_range_unlink(struct svm_range * prange)92 static void svm_range_unlink(struct svm_range *prange)
93 {
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 prange, prange->start, prange->last);
96
97 if (prange->svm_bo) {
98 spin_lock(&prange->svm_bo->list_lock);
99 list_del(&prange->svm_bo_list);
100 spin_unlock(&prange->svm_bo->list_lock);
101 }
102
103 list_del(&prange->list);
104 if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
106 }
107
108 static void
svm_range_add_notifier_locked(struct mm_struct * mm,struct svm_range * prange)109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
110 {
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 prange, prange->start, prange->last);
113
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 prange->start << PAGE_SHIFT,
116 prange->npages << PAGE_SHIFT,
117 &svm_range_mn_ops);
118 }
119
120 /**
121 * svm_range_add_to_svms - add svm range to svms
122 * @prange: svm range structure to be added
123 *
124 * Add the svm range to svms interval tree and link list
125 *
126 * Context: The caller must hold svms->lock
127 */
svm_range_add_to_svms(struct svm_range * prange)128 static void svm_range_add_to_svms(struct svm_range *prange)
129 {
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 prange, prange->start, prange->last);
132
133 list_move_tail(&prange->list, &prange->svms->list);
134 prange->it_node.start = prange->start;
135 prange->it_node.last = prange->last;
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
137 }
138
svm_range_remove_notifier(struct svm_range * prange)139 static void svm_range_remove_notifier(struct svm_range *prange)
140 {
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
143 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 prange->notifier.interval_tree.last >> PAGE_SHIFT);
145
146 if (prange->notifier.interval_tree.start != 0 &&
147 prange->notifier.interval_tree.last != 0)
148 mmu_interval_notifier_remove(&prange->notifier);
149 }
150
151 static bool
svm_is_valid_dma_mapping_addr(struct device * dev,dma_addr_t dma_addr)152 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
153 {
154 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
155 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
156 }
157
158 static int
svm_range_dma_map_dev(struct amdgpu_device * adev,struct svm_range * prange,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns,uint32_t gpuidx)159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
160 unsigned long offset, unsigned long npages,
161 unsigned long *hmm_pfns, uint32_t gpuidx)
162 {
163 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
164 dma_addr_t *addr = prange->dma_addr[gpuidx];
165 struct device *dev = adev->dev;
166 struct page *page;
167 int i, r;
168
169 if (!addr) {
170 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
171 if (!addr)
172 return -ENOMEM;
173 prange->dma_addr[gpuidx] = addr;
174 }
175
176 addr += offset;
177 for (i = 0; i < npages; i++) {
178 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
179 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
180
181 page = hmm_pfn_to_page(hmm_pfns[i]);
182 if (is_zone_device_page(page)) {
183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
184
185 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
186 bo_adev->vm_manager.vram_base_offset -
187 bo_adev->kfd.pgmap.range.start;
188 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
189 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
190 continue;
191 }
192 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
193 r = dma_mapping_error(dev, addr[i]);
194 if (r) {
195 dev_err(dev, "failed %d dma_map_page\n", r);
196 return r;
197 }
198 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
199 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
200 }
201 return 0;
202 }
203
204 static int
svm_range_dma_map(struct svm_range * prange,unsigned long * bitmap,unsigned long offset,unsigned long npages,unsigned long * hmm_pfns)205 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
206 unsigned long offset, unsigned long npages,
207 unsigned long *hmm_pfns)
208 {
209 struct kfd_process *p;
210 uint32_t gpuidx;
211 int r;
212
213 p = container_of(prange->svms, struct kfd_process, svms);
214
215 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
216 struct kfd_process_device *pdd;
217
218 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
219 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
220 if (!pdd) {
221 pr_debug("failed to find device idx %d\n", gpuidx);
222 return -EINVAL;
223 }
224
225 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
226 hmm_pfns, gpuidx);
227 if (r)
228 break;
229 }
230
231 return r;
232 }
233
svm_range_dma_unmap(struct device * dev,dma_addr_t * dma_addr,unsigned long offset,unsigned long npages)234 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
235 unsigned long offset, unsigned long npages)
236 {
237 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
238 int i;
239
240 if (!dma_addr)
241 return;
242
243 for (i = offset; i < offset + npages; i++) {
244 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
245 continue;
246 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
247 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
248 dma_addr[i] = 0;
249 }
250 }
251
svm_range_free_dma_mappings(struct svm_range * prange,bool unmap_dma)252 void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
253 {
254 struct kfd_process_device *pdd;
255 dma_addr_t *dma_addr;
256 struct device *dev;
257 struct kfd_process *p;
258 uint32_t gpuidx;
259
260 p = container_of(prange->svms, struct kfd_process, svms);
261
262 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
263 dma_addr = prange->dma_addr[gpuidx];
264 if (!dma_addr)
265 continue;
266
267 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
268 if (!pdd) {
269 pr_debug("failed to find device idx %d\n", gpuidx);
270 continue;
271 }
272 dev = &pdd->dev->adev->pdev->dev;
273 if (unmap_dma)
274 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
275 kvfree(dma_addr);
276 prange->dma_addr[gpuidx] = NULL;
277 }
278 }
279
svm_range_free(struct svm_range * prange,bool do_unmap)280 static void svm_range_free(struct svm_range *prange, bool do_unmap)
281 {
282 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
283 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
284
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 prange->start, prange->last);
287
288 svm_range_vram_node_free(prange);
289 svm_range_free_dma_mappings(prange, do_unmap);
290
291 if (do_unmap && !p->xnack_enabled) {
292 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
293 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
294 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
295 }
296 mutex_destroy(&prange->lock);
297 mutex_destroy(&prange->migrate_mutex);
298 kfree(prange);
299 }
300
301 static void
svm_range_set_default_attributes(int32_t * location,int32_t * prefetch_loc,uint8_t * granularity,uint32_t * flags)302 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
303 uint8_t *granularity, uint32_t *flags)
304 {
305 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
306 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
307 *granularity = 9;
308 *flags =
309 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
310 }
311
312 static struct
svm_range_new(struct svm_range_list * svms,uint64_t start,uint64_t last,bool update_mem_usage)313 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
314 uint64_t last, bool update_mem_usage)
315 {
316 uint64_t size = last - start + 1;
317 struct svm_range *prange;
318 struct kfd_process *p;
319
320 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
321 if (!prange)
322 return NULL;
323
324 p = container_of(svms, struct kfd_process, svms);
325 if (!p->xnack_enabled && update_mem_usage &&
326 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
327 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
328 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
329 kfree(prange);
330 return NULL;
331 }
332 prange->npages = size;
333 prange->svms = svms;
334 prange->start = start;
335 prange->last = last;
336 INIT_LIST_HEAD(&prange->list);
337 INIT_LIST_HEAD(&prange->update_list);
338 INIT_LIST_HEAD(&prange->svm_bo_list);
339 INIT_LIST_HEAD(&prange->deferred_list);
340 INIT_LIST_HEAD(&prange->child_list);
341 atomic_set(&prange->invalid, 0);
342 prange->validate_timestamp = 0;
343 mutex_init(&prange->migrate_mutex);
344 mutex_init(&prange->lock);
345
346 if (p->xnack_enabled)
347 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
348 MAX_GPU_INSTANCE);
349
350 svm_range_set_default_attributes(&prange->preferred_loc,
351 &prange->prefetch_loc,
352 &prange->granularity, &prange->flags);
353
354 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
355
356 return prange;
357 }
358
svm_bo_ref_unless_zero(struct svm_range_bo * svm_bo)359 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
360 {
361 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
362 return false;
363
364 return true;
365 }
366
svm_range_bo_release(struct kref * kref)367 static void svm_range_bo_release(struct kref *kref)
368 {
369 struct svm_range_bo *svm_bo;
370
371 svm_bo = container_of(kref, struct svm_range_bo, kref);
372 pr_debug("svm_bo 0x%p\n", svm_bo);
373
374 spin_lock(&svm_bo->list_lock);
375 while (!list_empty(&svm_bo->range_list)) {
376 struct svm_range *prange =
377 list_first_entry(&svm_bo->range_list,
378 struct svm_range, svm_bo_list);
379 /* list_del_init tells a concurrent svm_range_vram_node_new when
380 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
381 */
382 list_del_init(&prange->svm_bo_list);
383 spin_unlock(&svm_bo->list_lock);
384
385 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
386 prange->start, prange->last);
387 mutex_lock(&prange->lock);
388 prange->svm_bo = NULL;
389 mutex_unlock(&prange->lock);
390
391 spin_lock(&svm_bo->list_lock);
392 }
393 spin_unlock(&svm_bo->list_lock);
394
395 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
396 struct kfd_process_device *pdd;
397 struct kfd_process *p;
398 struct mm_struct *mm;
399
400 mm = svm_bo->eviction_fence->mm;
401 /*
402 * The forked child process takes svm_bo device pages ref, svm_bo could be
403 * released after parent process is gone.
404 */
405 p = kfd_lookup_process_by_mm(mm);
406 if (p) {
407 pdd = kfd_get_process_device_data(svm_bo->node, p);
408 if (pdd)
409 atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
410 kfd_unref_process(p);
411 }
412 mmput(mm);
413 }
414
415 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
416 /* We're not in the eviction worker. Signal the fence. */
417 dma_fence_signal(&svm_bo->eviction_fence->base);
418 dma_fence_put(&svm_bo->eviction_fence->base);
419 amdgpu_bo_unref(&svm_bo->bo);
420 kfree(svm_bo);
421 }
422
svm_range_bo_wq_release(struct work_struct * work)423 static void svm_range_bo_wq_release(struct work_struct *work)
424 {
425 struct svm_range_bo *svm_bo;
426
427 svm_bo = container_of(work, struct svm_range_bo, release_work);
428 svm_range_bo_release(&svm_bo->kref);
429 }
430
svm_range_bo_release_async(struct kref * kref)431 static void svm_range_bo_release_async(struct kref *kref)
432 {
433 struct svm_range_bo *svm_bo;
434
435 svm_bo = container_of(kref, struct svm_range_bo, kref);
436 pr_debug("svm_bo 0x%p\n", svm_bo);
437 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
438 schedule_work(&svm_bo->release_work);
439 }
440
svm_range_bo_unref_async(struct svm_range_bo * svm_bo)441 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
442 {
443 kref_put(&svm_bo->kref, svm_range_bo_release_async);
444 }
445
svm_range_bo_unref(struct svm_range_bo * svm_bo)446 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
447 {
448 if (svm_bo)
449 kref_put(&svm_bo->kref, svm_range_bo_release);
450 }
451
452 static bool
svm_range_validate_svm_bo(struct kfd_node * node,struct svm_range * prange)453 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
454 {
455 mutex_lock(&prange->lock);
456 if (!prange->svm_bo) {
457 mutex_unlock(&prange->lock);
458 return false;
459 }
460 if (prange->ttm_res) {
461 /* We still have a reference, all is well */
462 mutex_unlock(&prange->lock);
463 return true;
464 }
465 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
466 /*
467 * Migrate from GPU to GPU, remove range from source svm_bo->node
468 * range list, and return false to allocate svm_bo from destination
469 * node.
470 */
471 if (prange->svm_bo->node != node) {
472 mutex_unlock(&prange->lock);
473
474 spin_lock(&prange->svm_bo->list_lock);
475 list_del_init(&prange->svm_bo_list);
476 spin_unlock(&prange->svm_bo->list_lock);
477
478 svm_range_bo_unref(prange->svm_bo);
479 return false;
480 }
481 if (READ_ONCE(prange->svm_bo->evicting)) {
482 struct dma_fence *f;
483 struct svm_range_bo *svm_bo;
484 /* The BO is getting evicted,
485 * we need to get a new one
486 */
487 mutex_unlock(&prange->lock);
488 svm_bo = prange->svm_bo;
489 f = dma_fence_get(&svm_bo->eviction_fence->base);
490 svm_range_bo_unref(prange->svm_bo);
491 /* wait for the fence to avoid long spin-loop
492 * at list_empty_careful
493 */
494 dma_fence_wait(f, false);
495 dma_fence_put(f);
496 } else {
497 /* The BO was still around and we got
498 * a new reference to it
499 */
500 mutex_unlock(&prange->lock);
501 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
502 prange->svms, prange->start, prange->last);
503
504 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
505 return true;
506 }
507
508 } else {
509 mutex_unlock(&prange->lock);
510 }
511
512 /* We need a new svm_bo. Spin-loop to wait for concurrent
513 * svm_range_bo_release to finish removing this range from
514 * its range list and set prange->svm_bo to null. After this,
515 * it is safe to reuse the svm_bo pointer and svm_bo_list head.
516 */
517 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
518 cond_resched();
519
520 return false;
521 }
522
svm_range_bo_new(void)523 static struct svm_range_bo *svm_range_bo_new(void)
524 {
525 struct svm_range_bo *svm_bo;
526
527 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
528 if (!svm_bo)
529 return NULL;
530
531 kref_init(&svm_bo->kref);
532 INIT_LIST_HEAD(&svm_bo->range_list);
533 spin_lock_init(&svm_bo->list_lock);
534
535 return svm_bo;
536 }
537
538 int
svm_range_vram_node_new(struct kfd_node * node,struct svm_range * prange,bool clear)539 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
540 bool clear)
541 {
542 struct kfd_process_device *pdd;
543 struct amdgpu_bo_param bp;
544 struct svm_range_bo *svm_bo;
545 struct amdgpu_bo_user *ubo;
546 struct amdgpu_bo *bo;
547 struct kfd_process *p;
548 struct mm_struct *mm;
549 int r;
550
551 p = container_of(prange->svms, struct kfd_process, svms);
552 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
553 prange->start, prange->last);
554
555 if (svm_range_validate_svm_bo(node, prange))
556 return 0;
557
558 svm_bo = svm_range_bo_new();
559 if (!svm_bo) {
560 pr_debug("failed to alloc svm bo\n");
561 return -ENOMEM;
562 }
563 mm = get_task_mm(p->lead_thread);
564 if (!mm) {
565 pr_debug("failed to get mm\n");
566 kfree(svm_bo);
567 return -ESRCH;
568 }
569 svm_bo->node = node;
570 svm_bo->eviction_fence =
571 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
572 mm,
573 svm_bo);
574 mmput(mm);
575 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
576 svm_bo->evicting = 0;
577 memset(&bp, 0, sizeof(bp));
578 bp.size = prange->npages * PAGE_SIZE;
579 bp.byte_align = PAGE_SIZE;
580 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
581 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
582 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
583 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
584 bp.type = ttm_bo_type_device;
585 bp.resv = NULL;
586 if (node->xcp)
587 bp.xcp_id_plus1 = node->xcp->id + 1;
588
589 r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
590 if (r) {
591 pr_debug("failed %d to create bo\n", r);
592 goto create_bo_failed;
593 }
594 bo = &ubo->bo;
595
596 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
597 bo->tbo.resource->start << PAGE_SHIFT, bp.size,
598 bp.xcp_id_plus1 - 1);
599
600 r = amdgpu_bo_reserve(bo, true);
601 if (r) {
602 pr_debug("failed %d to reserve bo\n", r);
603 goto reserve_bo_failed;
604 }
605
606 if (clear) {
607 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
608 if (r) {
609 pr_debug("failed %d to sync bo\n", r);
610 amdgpu_bo_unreserve(bo);
611 goto reserve_bo_failed;
612 }
613 }
614
615 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
616 if (r) {
617 pr_debug("failed %d to reserve bo\n", r);
618 amdgpu_bo_unreserve(bo);
619 goto reserve_bo_failed;
620 }
621 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
622
623 amdgpu_bo_unreserve(bo);
624
625 svm_bo->bo = bo;
626 prange->svm_bo = svm_bo;
627 prange->ttm_res = bo->tbo.resource;
628 prange->offset = 0;
629
630 spin_lock(&svm_bo->list_lock);
631 list_add(&prange->svm_bo_list, &svm_bo->range_list);
632 spin_unlock(&svm_bo->list_lock);
633
634 pdd = svm_range_get_pdd_by_node(prange, node);
635 if (pdd)
636 atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
637
638 return 0;
639
640 reserve_bo_failed:
641 amdgpu_bo_unref(&bo);
642 create_bo_failed:
643 dma_fence_put(&svm_bo->eviction_fence->base);
644 kfree(svm_bo);
645 prange->ttm_res = NULL;
646
647 return r;
648 }
649
svm_range_vram_node_free(struct svm_range * prange)650 void svm_range_vram_node_free(struct svm_range *prange)
651 {
652 /* serialize prange->svm_bo unref */
653 mutex_lock(&prange->lock);
654 /* prange->svm_bo has not been unref */
655 if (prange->ttm_res) {
656 prange->ttm_res = NULL;
657 mutex_unlock(&prange->lock);
658 svm_range_bo_unref(prange->svm_bo);
659 } else
660 mutex_unlock(&prange->lock);
661 }
662
663 struct kfd_node *
svm_range_get_node_by_id(struct svm_range * prange,uint32_t gpu_id)664 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
665 {
666 struct kfd_process *p;
667 struct kfd_process_device *pdd;
668
669 p = container_of(prange->svms, struct kfd_process, svms);
670 pdd = kfd_process_device_data_by_id(p, gpu_id);
671 if (!pdd) {
672 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
673 return NULL;
674 }
675
676 return pdd->dev;
677 }
678
679 struct kfd_process_device *
svm_range_get_pdd_by_node(struct svm_range * prange,struct kfd_node * node)680 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
681 {
682 struct kfd_process *p;
683
684 p = container_of(prange->svms, struct kfd_process, svms);
685
686 return kfd_get_process_device_data(node, p);
687 }
688
svm_range_bo_validate(void * param,struct amdgpu_bo * bo)689 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
690 {
691 struct ttm_operation_ctx ctx = { false, false };
692
693 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
694
695 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
696 }
697
698 static int
svm_range_check_attr(struct kfd_process * p,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)699 svm_range_check_attr(struct kfd_process *p,
700 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
701 {
702 uint32_t i;
703
704 for (i = 0; i < nattr; i++) {
705 uint32_t val = attrs[i].value;
706 int gpuidx = MAX_GPU_INSTANCE;
707
708 switch (attrs[i].type) {
709 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
710 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
711 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
712 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
713 break;
714 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
715 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
716 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
717 break;
718 case KFD_IOCTL_SVM_ATTR_ACCESS:
719 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
720 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
721 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
722 break;
723 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
724 break;
725 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
726 break;
727 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
728 break;
729 default:
730 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
731 return -EINVAL;
732 }
733
734 if (gpuidx < 0) {
735 pr_debug("no GPU 0x%x found\n", val);
736 return -EINVAL;
737 } else if (gpuidx < MAX_GPU_INSTANCE &&
738 !test_bit(gpuidx, p->svms.bitmap_supported)) {
739 pr_debug("GPU 0x%x not supported\n", val);
740 return -EINVAL;
741 }
742 }
743
744 return 0;
745 }
746
747 static void
svm_range_apply_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,bool * update_mapping)748 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
749 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
750 bool *update_mapping)
751 {
752 uint32_t i;
753 int gpuidx;
754
755 for (i = 0; i < nattr; i++) {
756 switch (attrs[i].type) {
757 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
758 prange->preferred_loc = attrs[i].value;
759 break;
760 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
761 prange->prefetch_loc = attrs[i].value;
762 break;
763 case KFD_IOCTL_SVM_ATTR_ACCESS:
764 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
765 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
766 if (!p->xnack_enabled)
767 *update_mapping = true;
768
769 gpuidx = kfd_process_gpuidx_from_gpuid(p,
770 attrs[i].value);
771 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
772 bitmap_clear(prange->bitmap_access, gpuidx, 1);
773 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
774 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
775 bitmap_set(prange->bitmap_access, gpuidx, 1);
776 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
777 } else {
778 bitmap_clear(prange->bitmap_access, gpuidx, 1);
779 bitmap_set(prange->bitmap_aip, gpuidx, 1);
780 }
781 break;
782 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
783 *update_mapping = true;
784 prange->flags |= attrs[i].value;
785 break;
786 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
787 *update_mapping = true;
788 prange->flags &= ~attrs[i].value;
789 break;
790 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
791 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
792 break;
793 default:
794 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
795 }
796 }
797 }
798
799 static bool
svm_range_is_same_attrs(struct kfd_process * p,struct svm_range * prange,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)800 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
801 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
802 {
803 uint32_t i;
804 int gpuidx;
805
806 for (i = 0; i < nattr; i++) {
807 switch (attrs[i].type) {
808 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
809 if (prange->preferred_loc != attrs[i].value)
810 return false;
811 break;
812 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
813 /* Prefetch should always trigger a migration even
814 * if the value of the attribute didn't change.
815 */
816 return false;
817 case KFD_IOCTL_SVM_ATTR_ACCESS:
818 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
819 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
820 gpuidx = kfd_process_gpuidx_from_gpuid(p,
821 attrs[i].value);
822 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
823 if (test_bit(gpuidx, prange->bitmap_access) ||
824 test_bit(gpuidx, prange->bitmap_aip))
825 return false;
826 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
827 if (!test_bit(gpuidx, prange->bitmap_access))
828 return false;
829 } else {
830 if (!test_bit(gpuidx, prange->bitmap_aip))
831 return false;
832 }
833 break;
834 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
835 if ((prange->flags & attrs[i].value) != attrs[i].value)
836 return false;
837 break;
838 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
839 if ((prange->flags & attrs[i].value) != 0)
840 return false;
841 break;
842 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
843 if (prange->granularity != attrs[i].value)
844 return false;
845 break;
846 default:
847 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
848 }
849 }
850
851 return true;
852 }
853
854 /**
855 * svm_range_debug_dump - print all range information from svms
856 * @svms: svm range list header
857 *
858 * debug output svm range start, end, prefetch location from svms
859 * interval tree and link list
860 *
861 * Context: The caller must hold svms->lock
862 */
svm_range_debug_dump(struct svm_range_list * svms)863 static void svm_range_debug_dump(struct svm_range_list *svms)
864 {
865 struct interval_tree_node *node;
866 struct svm_range *prange;
867
868 pr_debug("dump svms 0x%p list\n", svms);
869 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
870
871 list_for_each_entry(prange, &svms->list, list) {
872 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
873 prange, prange->start, prange->npages,
874 prange->start + prange->npages - 1,
875 prange->actual_loc);
876 }
877
878 pr_debug("dump svms 0x%p interval tree\n", svms);
879 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
880 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
881 while (node) {
882 prange = container_of(node, struct svm_range, it_node);
883 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
884 prange, prange->start, prange->npages,
885 prange->start + prange->npages - 1,
886 prange->actual_loc);
887 node = interval_tree_iter_next(node, 0, ~0ULL);
888 }
889 }
890
891 static void *
svm_range_copy_array(void * psrc,size_t size,uint64_t num_elements,uint64_t offset)892 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
893 uint64_t offset)
894 {
895 unsigned char *dst;
896
897 dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
898 if (!dst)
899 return NULL;
900 memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
901
902 return (void *)dst;
903 }
904
905 static int
svm_range_copy_dma_addrs(struct svm_range * dst,struct svm_range * src)906 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
907 {
908 int i;
909
910 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
911 if (!src->dma_addr[i])
912 continue;
913 dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
914 sizeof(*src->dma_addr[i]), src->npages, 0);
915 if (!dst->dma_addr[i])
916 return -ENOMEM;
917 }
918
919 return 0;
920 }
921
922 static int
svm_range_split_array(void * ppnew,void * ppold,size_t size,uint64_t old_start,uint64_t old_n,uint64_t new_start,uint64_t new_n)923 svm_range_split_array(void *ppnew, void *ppold, size_t size,
924 uint64_t old_start, uint64_t old_n,
925 uint64_t new_start, uint64_t new_n)
926 {
927 unsigned char *new, *old, *pold;
928 uint64_t d;
929
930 if (!ppold)
931 return 0;
932 pold = *(unsigned char **)ppold;
933 if (!pold)
934 return 0;
935
936 d = (new_start - old_start) * size;
937 new = svm_range_copy_array(pold, size, new_n, d);
938 if (!new)
939 return -ENOMEM;
940 d = (new_start == old_start) ? new_n * size : 0;
941 old = svm_range_copy_array(pold, size, old_n, d);
942 if (!old) {
943 kvfree(new);
944 return -ENOMEM;
945 }
946 kvfree(pold);
947 *(void **)ppold = old;
948 *(void **)ppnew = new;
949
950 return 0;
951 }
952
953 static int
svm_range_split_pages(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)954 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
955 uint64_t start, uint64_t last)
956 {
957 uint64_t npages = last - start + 1;
958 int i, r;
959
960 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
961 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
962 sizeof(*old->dma_addr[i]), old->start,
963 npages, new->start, new->npages);
964 if (r)
965 return r;
966 }
967
968 return 0;
969 }
970
971 static int
svm_range_split_nodes(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)972 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
973 uint64_t start, uint64_t last)
974 {
975 uint64_t npages = last - start + 1;
976
977 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
978 new->svms, new, new->start, start, last);
979
980 if (new->start == old->start) {
981 new->offset = old->offset;
982 old->offset += new->npages;
983 } else {
984 new->offset = old->offset + npages;
985 }
986
987 new->svm_bo = svm_range_bo_ref(old->svm_bo);
988 new->ttm_res = old->ttm_res;
989
990 spin_lock(&new->svm_bo->list_lock);
991 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
992 spin_unlock(&new->svm_bo->list_lock);
993
994 return 0;
995 }
996
997 /**
998 * svm_range_split_adjust - split range and adjust
999 *
1000 * @new: new range
1001 * @old: the old range
1002 * @start: the old range adjust to start address in pages
1003 * @last: the old range adjust to last address in pages
1004 *
1005 * Copy system memory dma_addr or vram ttm_res in old range to new
1006 * range from new_start up to size new->npages, the remaining old range is from
1007 * start to last
1008 *
1009 * Return:
1010 * 0 - OK, -ENOMEM - out of memory
1011 */
1012 static int
svm_range_split_adjust(struct svm_range * new,struct svm_range * old,uint64_t start,uint64_t last)1013 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
1014 uint64_t start, uint64_t last)
1015 {
1016 int r;
1017
1018 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1019 new->svms, new->start, old->start, old->last, start, last);
1020
1021 if (new->start < old->start ||
1022 new->last > old->last) {
1023 WARN_ONCE(1, "invalid new range start or last\n");
1024 return -EINVAL;
1025 }
1026
1027 r = svm_range_split_pages(new, old, start, last);
1028 if (r)
1029 return r;
1030
1031 if (old->actual_loc && old->ttm_res) {
1032 r = svm_range_split_nodes(new, old, start, last);
1033 if (r)
1034 return r;
1035 }
1036
1037 old->npages = last - start + 1;
1038 old->start = start;
1039 old->last = last;
1040 new->flags = old->flags;
1041 new->preferred_loc = old->preferred_loc;
1042 new->prefetch_loc = old->prefetch_loc;
1043 new->actual_loc = old->actual_loc;
1044 new->granularity = old->granularity;
1045 new->mapped_to_gpu = old->mapped_to_gpu;
1046 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1047 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1048
1049 return 0;
1050 }
1051
1052 /**
1053 * svm_range_split - split a range in 2 ranges
1054 *
1055 * @prange: the svm range to split
1056 * @start: the remaining range start address in pages
1057 * @last: the remaining range last address in pages
1058 * @new: the result new range generated
1059 *
1060 * Two cases only:
1061 * case 1: if start == prange->start
1062 * prange ==> prange[start, last]
1063 * new range [last + 1, prange->last]
1064 *
1065 * case 2: if last == prange->last
1066 * prange ==> prange[start, last]
1067 * new range [prange->start, start - 1]
1068 *
1069 * Return:
1070 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1071 */
1072 static int
svm_range_split(struct svm_range * prange,uint64_t start,uint64_t last,struct svm_range ** new)1073 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1074 struct svm_range **new)
1075 {
1076 uint64_t old_start = prange->start;
1077 uint64_t old_last = prange->last;
1078 struct svm_range_list *svms;
1079 int r = 0;
1080
1081 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1082 old_start, old_last, start, last);
1083
1084 if (old_start != start && old_last != last)
1085 return -EINVAL;
1086 if (start < old_start || last > old_last)
1087 return -EINVAL;
1088
1089 svms = prange->svms;
1090 if (old_start == start)
1091 *new = svm_range_new(svms, last + 1, old_last, false);
1092 else
1093 *new = svm_range_new(svms, old_start, start - 1, false);
1094 if (!*new)
1095 return -ENOMEM;
1096
1097 r = svm_range_split_adjust(*new, prange, start, last);
1098 if (r) {
1099 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1100 r, old_start, old_last, start, last);
1101 svm_range_free(*new, false);
1102 *new = NULL;
1103 }
1104
1105 return r;
1106 }
1107
1108 static int
svm_range_split_tail(struct svm_range * prange,uint64_t new_last,struct list_head * insert_list)1109 svm_range_split_tail(struct svm_range *prange,
1110 uint64_t new_last, struct list_head *insert_list)
1111 {
1112 struct svm_range *tail;
1113 int r = svm_range_split(prange, prange->start, new_last, &tail);
1114
1115 if (!r)
1116 list_add(&tail->list, insert_list);
1117 return r;
1118 }
1119
1120 static int
svm_range_split_head(struct svm_range * prange,uint64_t new_start,struct list_head * insert_list)1121 svm_range_split_head(struct svm_range *prange,
1122 uint64_t new_start, struct list_head *insert_list)
1123 {
1124 struct svm_range *head;
1125 int r = svm_range_split(prange, new_start, prange->last, &head);
1126
1127 if (!r)
1128 list_add(&head->list, insert_list);
1129 return r;
1130 }
1131
1132 static void
svm_range_add_child(struct svm_range * prange,struct mm_struct * mm,struct svm_range * pchild,enum svm_work_list_ops op)1133 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1134 struct svm_range *pchild, enum svm_work_list_ops op)
1135 {
1136 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1137 pchild, pchild->start, pchild->last, prange, op);
1138
1139 pchild->work_item.mm = mm;
1140 pchild->work_item.op = op;
1141 list_add_tail(&pchild->child_list, &prange->child_list);
1142 }
1143
1144 /**
1145 * svm_range_split_by_granularity - collect ranges within granularity boundary
1146 *
1147 * @p: the process with svms list
1148 * @mm: mm structure
1149 * @addr: the vm fault address in pages, to split the prange
1150 * @parent: parent range if prange is from child list
1151 * @prange: prange to split
1152 *
1153 * Trims @prange to be a single aligned block of prange->granularity if
1154 * possible. The head and tail are added to the child_list in @parent.
1155 *
1156 * Context: caller must hold mmap_read_lock and prange->lock
1157 *
1158 * Return:
1159 * 0 - OK, otherwise error code
1160 */
1161 int
svm_range_split_by_granularity(struct kfd_process * p,struct mm_struct * mm,unsigned long addr,struct svm_range * parent,struct svm_range * prange)1162 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1163 unsigned long addr, struct svm_range *parent,
1164 struct svm_range *prange)
1165 {
1166 struct svm_range *head, *tail;
1167 unsigned long start, last, size;
1168 int r;
1169
1170 /* Align splited range start and size to granularity size, then a single
1171 * PTE will be used for whole range, this reduces the number of PTE
1172 * updated and the L1 TLB space used for translation.
1173 */
1174 size = 1UL << prange->granularity;
1175 start = ALIGN_DOWN(addr, size);
1176 last = ALIGN(addr + 1, size) - 1;
1177
1178 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1179 prange->svms, prange->start, prange->last, start, last, size);
1180
1181 if (start > prange->start) {
1182 r = svm_range_split(prange, start, prange->last, &head);
1183 if (r)
1184 return r;
1185 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1186 }
1187
1188 if (last < prange->last) {
1189 r = svm_range_split(prange, prange->start, last, &tail);
1190 if (r)
1191 return r;
1192 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1193 }
1194
1195 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1196 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1197 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1198 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1199 prange, prange->start, prange->last,
1200 SVM_OP_ADD_RANGE_AND_MAP);
1201 }
1202 return 0;
1203 }
1204 static bool
svm_nodes_in_same_hive(struct kfd_node * node_a,struct kfd_node * node_b)1205 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
1206 {
1207 return (node_a->adev == node_b->adev ||
1208 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
1209 }
1210
1211 static uint64_t
svm_range_get_pte_flags(struct kfd_node * node,struct svm_range * prange,int domain)1212 svm_range_get_pte_flags(struct kfd_node *node,
1213 struct svm_range *prange, int domain)
1214 {
1215 struct kfd_node *bo_node;
1216 uint32_t flags = prange->flags;
1217 uint32_t mapping_flags = 0;
1218 uint64_t pte_flags;
1219 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1220 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1221 bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
1222 unsigned int mtype_local;
1223
1224 if (domain == SVM_RANGE_VRAM_DOMAIN)
1225 bo_node = prange->svm_bo->node;
1226
1227 switch (node->adev->ip_versions[GC_HWIP][0]) {
1228 case IP_VERSION(9, 4, 1):
1229 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1230 if (bo_node == node) {
1231 mapping_flags |= coherent ?
1232 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1233 } else {
1234 mapping_flags |= coherent ?
1235 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1236 if (svm_nodes_in_same_hive(node, bo_node))
1237 snoop = true;
1238 }
1239 } else {
1240 mapping_flags |= coherent ?
1241 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1242 }
1243 break;
1244 case IP_VERSION(9, 4, 2):
1245 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1246 if (bo_node == node) {
1247 mapping_flags |= coherent ?
1248 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1249 if (node->adev->gmc.xgmi.connected_to_cpu)
1250 snoop = true;
1251 } else {
1252 mapping_flags |= coherent ?
1253 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1254 if (svm_nodes_in_same_hive(node, bo_node))
1255 snoop = true;
1256 }
1257 } else {
1258 mapping_flags |= coherent ?
1259 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1260 }
1261 break;
1262 case IP_VERSION(9, 4, 3):
1263 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
1264 (amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW);
1265 snoop = true;
1266 if (uncached) {
1267 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1268 } else if (domain == SVM_RANGE_VRAM_DOMAIN) {
1269 /* local HBM region close to partition */
1270 if (bo_node->adev == node->adev &&
1271 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
1272 mapping_flags |= mtype_local;
1273 /* local HBM region far from partition or remote XGMI GPU */
1274 else if (svm_nodes_in_same_hive(bo_node, node))
1275 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1276 /* PCIe P2P */
1277 else
1278 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1279 /* system memory accessed by the APU */
1280 } else if (node->adev->flags & AMD_IS_APU) {
1281 /* On NUMA systems, locality is determined per-page
1282 * in amdgpu_gmc_override_vm_pte_flags
1283 */
1284 if (num_possible_nodes() <= 1)
1285 mapping_flags |= mtype_local;
1286 else
1287 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1288 /* system memory accessed by the dGPU */
1289 } else {
1290 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1291 }
1292 break;
1293 default:
1294 mapping_flags |= coherent ?
1295 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1296 }
1297
1298 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1299
1300 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1301 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1302 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1303 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1304
1305 pte_flags = AMDGPU_PTE_VALID;
1306 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1307 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1308
1309 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
1310 return pte_flags;
1311 }
1312
1313 static int
svm_range_unmap_from_gpu(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t start,uint64_t last,struct dma_fence ** fence)1314 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1315 uint64_t start, uint64_t last,
1316 struct dma_fence **fence)
1317 {
1318 uint64_t init_pte_value = 0;
1319
1320 pr_debug("[0x%llx 0x%llx]\n", start, last);
1321
1322 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
1323 last, init_pte_value, 0, 0, NULL, NULL,
1324 fence);
1325 }
1326
1327 static int
svm_range_unmap_from_gpus(struct svm_range * prange,unsigned long start,unsigned long last,uint32_t trigger)1328 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1329 unsigned long last, uint32_t trigger)
1330 {
1331 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1332 struct kfd_process_device *pdd;
1333 struct dma_fence *fence = NULL;
1334 struct kfd_process *p;
1335 uint32_t gpuidx;
1336 int r = 0;
1337
1338 if (!prange->mapped_to_gpu) {
1339 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1340 prange, prange->start, prange->last);
1341 return 0;
1342 }
1343
1344 if (prange->start == start && prange->last == last) {
1345 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1346 prange->mapped_to_gpu = false;
1347 }
1348
1349 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1350 MAX_GPU_INSTANCE);
1351 p = container_of(prange->svms, struct kfd_process, svms);
1352
1353 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1354 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1355 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1356 if (!pdd) {
1357 pr_debug("failed to find device idx %d\n", gpuidx);
1358 return -EINVAL;
1359 }
1360
1361 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1362 start, last, trigger);
1363
1364 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1365 drm_priv_to_vm(pdd->drm_priv),
1366 start, last, &fence);
1367 if (r)
1368 break;
1369
1370 if (fence) {
1371 r = dma_fence_wait(fence, false);
1372 dma_fence_put(fence);
1373 fence = NULL;
1374 if (r)
1375 break;
1376 }
1377 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1378 }
1379
1380 return r;
1381 }
1382
1383 static int
svm_range_map_to_gpu(struct kfd_process_device * pdd,struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,dma_addr_t * dma_addr,struct amdgpu_device * bo_adev,struct dma_fence ** fence,bool flush_tlb)1384 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1385 unsigned long offset, unsigned long npages, bool readonly,
1386 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1387 struct dma_fence **fence, bool flush_tlb)
1388 {
1389 struct amdgpu_device *adev = pdd->dev->adev;
1390 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1391 uint64_t pte_flags;
1392 unsigned long last_start;
1393 int last_domain;
1394 int r = 0;
1395 int64_t i, j;
1396
1397 last_start = prange->start + offset;
1398
1399 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1400 last_start, last_start + npages - 1, readonly);
1401
1402 for (i = offset; i < offset + npages; i++) {
1403 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1404 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1405
1406 /* Collect all pages in the same address range and memory domain
1407 * that can be mapped with a single call to update mapping.
1408 */
1409 if (i < offset + npages - 1 &&
1410 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1411 continue;
1412
1413 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1414 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1415
1416 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1417 if (readonly)
1418 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1419
1420 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1421 prange->svms, last_start, prange->start + i,
1422 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1423 pte_flags);
1424
1425 /* For dGPU mode, we use same vm_manager to allocate VRAM for
1426 * different memory partition based on fpfn/lpfn, we should use
1427 * same vm_manager.vram_base_offset regardless memory partition.
1428 */
1429 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
1430 last_start, prange->start + i,
1431 pte_flags,
1432 (last_start - prange->start) << PAGE_SHIFT,
1433 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1434 NULL, dma_addr, &vm->last_update);
1435
1436 for (j = last_start - prange->start; j <= i; j++)
1437 dma_addr[j] |= last_domain;
1438
1439 if (r) {
1440 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1441 goto out;
1442 }
1443 last_start = prange->start + i + 1;
1444 }
1445
1446 r = amdgpu_vm_update_pdes(adev, vm, false);
1447 if (r) {
1448 pr_debug("failed %d to update directories 0x%lx\n", r,
1449 prange->start);
1450 goto out;
1451 }
1452
1453 if (fence)
1454 *fence = dma_fence_get(vm->last_update);
1455
1456 out:
1457 return r;
1458 }
1459
1460 static int
svm_range_map_to_gpus(struct svm_range * prange,unsigned long offset,unsigned long npages,bool readonly,unsigned long * bitmap,bool wait,bool flush_tlb)1461 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1462 unsigned long npages, bool readonly,
1463 unsigned long *bitmap, bool wait, bool flush_tlb)
1464 {
1465 struct kfd_process_device *pdd;
1466 struct amdgpu_device *bo_adev = NULL;
1467 struct kfd_process *p;
1468 struct dma_fence *fence = NULL;
1469 uint32_t gpuidx;
1470 int r = 0;
1471
1472 if (prange->svm_bo && prange->ttm_res)
1473 bo_adev = prange->svm_bo->node->adev;
1474
1475 p = container_of(prange->svms, struct kfd_process, svms);
1476 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1477 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1478 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1479 if (!pdd) {
1480 pr_debug("failed to find device idx %d\n", gpuidx);
1481 return -EINVAL;
1482 }
1483
1484 pdd = kfd_bind_process_to_device(pdd->dev, p);
1485 if (IS_ERR(pdd))
1486 return -EINVAL;
1487
1488 if (bo_adev && pdd->dev->adev != bo_adev &&
1489 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1490 pr_debug("cannot map to device idx %d\n", gpuidx);
1491 continue;
1492 }
1493
1494 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1495 prange->dma_addr[gpuidx],
1496 bo_adev, wait ? &fence : NULL,
1497 flush_tlb);
1498 if (r)
1499 break;
1500
1501 if (fence) {
1502 r = dma_fence_wait(fence, false);
1503 dma_fence_put(fence);
1504 fence = NULL;
1505 if (r) {
1506 pr_debug("failed %d to dma fence wait\n", r);
1507 break;
1508 }
1509 }
1510
1511 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1512 }
1513
1514 return r;
1515 }
1516
1517 struct svm_validate_context {
1518 struct kfd_process *process;
1519 struct svm_range *prange;
1520 bool intr;
1521 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1522 struct drm_exec exec;
1523 };
1524
svm_range_reserve_bos(struct svm_validate_context * ctx,bool intr)1525 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
1526 {
1527 struct kfd_process_device *pdd;
1528 struct amdgpu_vm *vm;
1529 uint32_t gpuidx;
1530 int r;
1531
1532 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
1533 drm_exec_until_all_locked(&ctx->exec) {
1534 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1535 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1536 if (!pdd) {
1537 pr_debug("failed to find device idx %d\n", gpuidx);
1538 r = -EINVAL;
1539 goto unreserve_out;
1540 }
1541 vm = drm_priv_to_vm(pdd->drm_priv);
1542
1543 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1544 drm_exec_retry_on_contention(&ctx->exec);
1545 if (unlikely(r)) {
1546 pr_debug("failed %d to reserve bo\n", r);
1547 goto unreserve_out;
1548 }
1549 }
1550 }
1551
1552 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1553 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1554 if (!pdd) {
1555 pr_debug("failed to find device idx %d\n", gpuidx);
1556 r = -EINVAL;
1557 goto unreserve_out;
1558 }
1559
1560 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1561 drm_priv_to_vm(pdd->drm_priv),
1562 svm_range_bo_validate, NULL);
1563 if (r) {
1564 pr_debug("failed %d validate pt bos\n", r);
1565 goto unreserve_out;
1566 }
1567 }
1568
1569 return 0;
1570
1571 unreserve_out:
1572 drm_exec_fini(&ctx->exec);
1573 return r;
1574 }
1575
svm_range_unreserve_bos(struct svm_validate_context * ctx)1576 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1577 {
1578 drm_exec_fini(&ctx->exec);
1579 }
1580
kfd_svm_page_owner(struct kfd_process * p,int32_t gpuidx)1581 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1582 {
1583 struct kfd_process_device *pdd;
1584
1585 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1586 if (!pdd)
1587 return NULL;
1588
1589 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1590 }
1591
1592 /*
1593 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1594 *
1595 * To prevent concurrent destruction or change of range attributes, the
1596 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1597 * because that would block concurrent evictions and lead to deadlocks. To
1598 * serialize concurrent migrations or validations of the same range, the
1599 * prange->migrate_mutex must be held.
1600 *
1601 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1602 * eviction fence.
1603 *
1604 * The following sequence ensures race-free validation and GPU mapping:
1605 *
1606 * 1. Reserve page table (and SVM BO if range is in VRAM)
1607 * 2. hmm_range_fault to get page addresses (if system memory)
1608 * 3. DMA-map pages (if system memory)
1609 * 4-a. Take notifier lock
1610 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1611 * 4-c. Check that the range was not split or otherwise invalidated
1612 * 4-d. Update GPU page table
1613 * 4.e. Release notifier lock
1614 * 5. Release page table (and SVM BO) reservation
1615 */
svm_range_validate_and_map(struct mm_struct * mm,struct svm_range * prange,int32_t gpuidx,bool intr,bool wait,bool flush_tlb)1616 static int svm_range_validate_and_map(struct mm_struct *mm,
1617 struct svm_range *prange, int32_t gpuidx,
1618 bool intr, bool wait, bool flush_tlb)
1619 {
1620 struct svm_validate_context *ctx;
1621 unsigned long start, end, addr;
1622 struct kfd_process *p;
1623 void *owner;
1624 int32_t idx;
1625 int r = 0;
1626
1627 ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
1628 if (!ctx)
1629 return -ENOMEM;
1630 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1631 ctx->prange = prange;
1632 ctx->intr = intr;
1633
1634 if (gpuidx < MAX_GPU_INSTANCE) {
1635 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
1636 bitmap_set(ctx->bitmap, gpuidx, 1);
1637 } else if (ctx->process->xnack_enabled) {
1638 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1639
1640 /* If prefetch range to GPU, or GPU retry fault migrate range to
1641 * GPU, which has ACCESS attribute to the range, create mapping
1642 * on that GPU.
1643 */
1644 if (prange->actual_loc) {
1645 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
1646 prange->actual_loc);
1647 if (gpuidx < 0) {
1648 WARN_ONCE(1, "failed get device by id 0x%x\n",
1649 prange->actual_loc);
1650 r = -EINVAL;
1651 goto free_ctx;
1652 }
1653 if (test_bit(gpuidx, prange->bitmap_access))
1654 bitmap_set(ctx->bitmap, gpuidx, 1);
1655 }
1656
1657 /*
1658 * If prange is already mapped or with always mapped flag,
1659 * update mapping on GPUs with ACCESS attribute
1660 */
1661 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1662 if (prange->mapped_to_gpu ||
1663 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1664 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1665 }
1666 } else {
1667 bitmap_or(ctx->bitmap, prange->bitmap_access,
1668 prange->bitmap_aip, MAX_GPU_INSTANCE);
1669 }
1670
1671 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
1672 r = 0;
1673 goto free_ctx;
1674 }
1675
1676 if (prange->actual_loc && !prange->ttm_res) {
1677 /* This should never happen. actual_loc gets set by
1678 * svm_migrate_ram_to_vram after allocating a BO.
1679 */
1680 WARN_ONCE(1, "VRAM BO missing during validation\n");
1681 r = -EINVAL;
1682 goto free_ctx;
1683 }
1684
1685 svm_range_reserve_bos(ctx, intr);
1686
1687 p = container_of(prange->svms, struct kfd_process, svms);
1688 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
1689 MAX_GPU_INSTANCE));
1690 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
1691 if (kfd_svm_page_owner(p, idx) != owner) {
1692 owner = NULL;
1693 break;
1694 }
1695 }
1696
1697 start = prange->start << PAGE_SHIFT;
1698 end = (prange->last + 1) << PAGE_SHIFT;
1699 for (addr = start; !r && addr < end; ) {
1700 struct hmm_range *hmm_range;
1701 struct vm_area_struct *vma;
1702 unsigned long next = 0;
1703 unsigned long offset;
1704 unsigned long npages;
1705 bool readonly;
1706
1707 vma = vma_lookup(mm, addr);
1708 if (vma) {
1709 readonly = !(vma->vm_flags & VM_WRITE);
1710
1711 next = min(vma->vm_end, end);
1712 npages = (next - addr) >> PAGE_SHIFT;
1713 WRITE_ONCE(p->svms.faulting_task, current);
1714 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1715 readonly, owner, NULL,
1716 &hmm_range);
1717 WRITE_ONCE(p->svms.faulting_task, NULL);
1718 if (r) {
1719 pr_debug("failed %d to get svm range pages\n", r);
1720 if (r == -EBUSY)
1721 r = -EAGAIN;
1722 }
1723 } else {
1724 r = -EFAULT;
1725 }
1726
1727 if (!r) {
1728 offset = (addr - start) >> PAGE_SHIFT;
1729 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1730 hmm_range->hmm_pfns);
1731 if (r)
1732 pr_debug("failed %d to dma map range\n", r);
1733 }
1734
1735 svm_range_lock(prange);
1736 if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) {
1737 pr_debug("hmm update the range, need validate again\n");
1738 r = -EAGAIN;
1739 }
1740
1741 if (!r && !list_empty(&prange->child_list)) {
1742 pr_debug("range split by unmap in parallel, validate again\n");
1743 r = -EAGAIN;
1744 }
1745
1746 if (!r)
1747 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1748 ctx->bitmap, wait, flush_tlb);
1749
1750 if (!r && next == end)
1751 prange->mapped_to_gpu = true;
1752
1753 svm_range_unlock(prange);
1754
1755 addr = next;
1756 }
1757
1758 svm_range_unreserve_bos(ctx);
1759 if (!r)
1760 prange->validate_timestamp = ktime_get_boottime();
1761
1762 free_ctx:
1763 kfree(ctx);
1764
1765 return r;
1766 }
1767
1768 /**
1769 * svm_range_list_lock_and_flush_work - flush pending deferred work
1770 *
1771 * @svms: the svm range list
1772 * @mm: the mm structure
1773 *
1774 * Context: Returns with mmap write lock held, pending deferred work flushed
1775 *
1776 */
1777 void
svm_range_list_lock_and_flush_work(struct svm_range_list * svms,struct mm_struct * mm)1778 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1779 struct mm_struct *mm)
1780 {
1781 retry_flush_work:
1782 flush_work(&svms->deferred_list_work);
1783 mmap_write_lock(mm);
1784
1785 if (list_empty(&svms->deferred_range_list))
1786 return;
1787 mmap_write_unlock(mm);
1788 pr_debug("retry flush\n");
1789 goto retry_flush_work;
1790 }
1791
svm_range_restore_work(struct work_struct * work)1792 static void svm_range_restore_work(struct work_struct *work)
1793 {
1794 struct delayed_work *dwork = to_delayed_work(work);
1795 struct amdkfd_process_info *process_info;
1796 struct svm_range_list *svms;
1797 struct svm_range *prange;
1798 struct kfd_process *p;
1799 struct mm_struct *mm;
1800 int evicted_ranges;
1801 int invalid;
1802 int r;
1803
1804 svms = container_of(dwork, struct svm_range_list, restore_work);
1805 evicted_ranges = atomic_read(&svms->evicted_ranges);
1806 if (!evicted_ranges)
1807 return;
1808
1809 pr_debug("restore svm ranges\n");
1810
1811 p = container_of(svms, struct kfd_process, svms);
1812 process_info = p->kgd_process_info;
1813
1814 /* Keep mm reference when svm_range_validate_and_map ranges */
1815 mm = get_task_mm(p->lead_thread);
1816 if (!mm) {
1817 pr_debug("svms 0x%p process mm gone\n", svms);
1818 return;
1819 }
1820
1821 mutex_lock(&process_info->lock);
1822 svm_range_list_lock_and_flush_work(svms, mm);
1823 mutex_lock(&svms->lock);
1824
1825 evicted_ranges = atomic_read(&svms->evicted_ranges);
1826
1827 list_for_each_entry(prange, &svms->list, list) {
1828 invalid = atomic_read(&prange->invalid);
1829 if (!invalid)
1830 continue;
1831
1832 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1833 prange->svms, prange, prange->start, prange->last,
1834 invalid);
1835
1836 /*
1837 * If range is migrating, wait for migration is done.
1838 */
1839 mutex_lock(&prange->migrate_mutex);
1840
1841 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1842 false, true, false);
1843 if (r)
1844 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1845 prange->start);
1846
1847 mutex_unlock(&prange->migrate_mutex);
1848 if (r)
1849 goto out_reschedule;
1850
1851 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1852 goto out_reschedule;
1853 }
1854
1855 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1856 evicted_ranges)
1857 goto out_reschedule;
1858
1859 evicted_ranges = 0;
1860
1861 r = kgd2kfd_resume_mm(mm);
1862 if (r) {
1863 /* No recovery from this failure. Probably the CP is
1864 * hanging. No point trying again.
1865 */
1866 pr_debug("failed %d to resume KFD\n", r);
1867 }
1868
1869 pr_debug("restore svm ranges successfully\n");
1870
1871 out_reschedule:
1872 mutex_unlock(&svms->lock);
1873 mmap_write_unlock(mm);
1874 mutex_unlock(&process_info->lock);
1875
1876 /* If validation failed, reschedule another attempt */
1877 if (evicted_ranges) {
1878 pr_debug("reschedule to restore svm range\n");
1879 schedule_delayed_work(&svms->restore_work,
1880 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1881
1882 kfd_smi_event_queue_restore_rescheduled(mm);
1883 }
1884 mmput(mm);
1885 }
1886
1887 /**
1888 * svm_range_evict - evict svm range
1889 * @prange: svm range structure
1890 * @mm: current process mm_struct
1891 * @start: starting process queue number
1892 * @last: last process queue number
1893 * @event: mmu notifier event when range is evicted or migrated
1894 *
1895 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1896 * return to let CPU evict the buffer and proceed CPU pagetable update.
1897 *
1898 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1899 * If invalidation happens while restore work is running, restore work will
1900 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1901 * the queues.
1902 */
1903 static int
svm_range_evict(struct svm_range * prange,struct mm_struct * mm,unsigned long start,unsigned long last,enum mmu_notifier_event event)1904 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1905 unsigned long start, unsigned long last,
1906 enum mmu_notifier_event event)
1907 {
1908 struct svm_range_list *svms = prange->svms;
1909 struct svm_range *pchild;
1910 struct kfd_process *p;
1911 int r = 0;
1912
1913 p = container_of(svms, struct kfd_process, svms);
1914
1915 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1916 svms, prange->start, prange->last, start, last);
1917
1918 if (!p->xnack_enabled ||
1919 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1920 int evicted_ranges;
1921 bool mapped = prange->mapped_to_gpu;
1922
1923 list_for_each_entry(pchild, &prange->child_list, child_list) {
1924 if (!pchild->mapped_to_gpu)
1925 continue;
1926 mapped = true;
1927 mutex_lock_nested(&pchild->lock, 1);
1928 if (pchild->start <= last && pchild->last >= start) {
1929 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1930 pchild->start, pchild->last);
1931 atomic_inc(&pchild->invalid);
1932 }
1933 mutex_unlock(&pchild->lock);
1934 }
1935
1936 if (!mapped)
1937 return r;
1938
1939 if (prange->start <= last && prange->last >= start)
1940 atomic_inc(&prange->invalid);
1941
1942 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1943 if (evicted_ranges != 1)
1944 return r;
1945
1946 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1947 prange->svms, prange->start, prange->last);
1948
1949 /* First eviction, stop the queues */
1950 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1951 if (r)
1952 pr_debug("failed to quiesce KFD\n");
1953
1954 pr_debug("schedule to restore svm %p ranges\n", svms);
1955 schedule_delayed_work(&svms->restore_work,
1956 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1957 } else {
1958 unsigned long s, l;
1959 uint32_t trigger;
1960
1961 if (event == MMU_NOTIFY_MIGRATE)
1962 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1963 else
1964 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1965
1966 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1967 prange->svms, start, last);
1968 list_for_each_entry(pchild, &prange->child_list, child_list) {
1969 mutex_lock_nested(&pchild->lock, 1);
1970 s = max(start, pchild->start);
1971 l = min(last, pchild->last);
1972 if (l >= s)
1973 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1974 mutex_unlock(&pchild->lock);
1975 }
1976 s = max(start, prange->start);
1977 l = min(last, prange->last);
1978 if (l >= s)
1979 svm_range_unmap_from_gpus(prange, s, l, trigger);
1980 }
1981
1982 return r;
1983 }
1984
svm_range_clone(struct svm_range * old)1985 static struct svm_range *svm_range_clone(struct svm_range *old)
1986 {
1987 struct svm_range *new;
1988
1989 new = svm_range_new(old->svms, old->start, old->last, false);
1990 if (!new)
1991 return NULL;
1992 if (svm_range_copy_dma_addrs(new, old)) {
1993 svm_range_free(new, false);
1994 return NULL;
1995 }
1996 if (old->svm_bo) {
1997 new->ttm_res = old->ttm_res;
1998 new->offset = old->offset;
1999 new->svm_bo = svm_range_bo_ref(old->svm_bo);
2000 spin_lock(&new->svm_bo->list_lock);
2001 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
2002 spin_unlock(&new->svm_bo->list_lock);
2003 }
2004 new->flags = old->flags;
2005 new->preferred_loc = old->preferred_loc;
2006 new->prefetch_loc = old->prefetch_loc;
2007 new->actual_loc = old->actual_loc;
2008 new->granularity = old->granularity;
2009 new->mapped_to_gpu = old->mapped_to_gpu;
2010 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
2011 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
2012
2013 return new;
2014 }
2015
svm_range_set_max_pages(struct amdgpu_device * adev)2016 void svm_range_set_max_pages(struct amdgpu_device *adev)
2017 {
2018 uint64_t max_pages;
2019 uint64_t pages, _pages;
2020 uint64_t min_pages = 0;
2021 int i, id;
2022
2023 for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
2024 if (adev->kfd.dev->nodes[i]->xcp)
2025 id = adev->kfd.dev->nodes[i]->xcp->id;
2026 else
2027 id = -1;
2028 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
2029 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
2030 pages = rounddown_pow_of_two(pages);
2031 min_pages = min_not_zero(min_pages, pages);
2032 }
2033
2034 do {
2035 max_pages = READ_ONCE(max_svm_range_pages);
2036 _pages = min_not_zero(max_pages, min_pages);
2037 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
2038 }
2039
2040 static int
svm_range_split_new(struct svm_range_list * svms,uint64_t start,uint64_t last,uint64_t max_pages,struct list_head * insert_list,struct list_head * update_list)2041 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2042 uint64_t max_pages, struct list_head *insert_list,
2043 struct list_head *update_list)
2044 {
2045 struct svm_range *prange;
2046 uint64_t l;
2047
2048 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
2049 max_pages, start, last);
2050
2051 while (last >= start) {
2052 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
2053
2054 prange = svm_range_new(svms, start, l, true);
2055 if (!prange)
2056 return -ENOMEM;
2057 list_add(&prange->list, insert_list);
2058 list_add(&prange->update_list, update_list);
2059
2060 start = l + 1;
2061 }
2062 return 0;
2063 }
2064
2065 /**
2066 * svm_range_add - add svm range and handle overlap
2067 * @p: the range add to this process svms
2068 * @start: page size aligned
2069 * @size: page size aligned
2070 * @nattr: number of attributes
2071 * @attrs: array of attributes
2072 * @update_list: output, the ranges need validate and update GPU mapping
2073 * @insert_list: output, the ranges need insert to svms
2074 * @remove_list: output, the ranges are replaced and need remove from svms
2075 *
2076 * Check if the virtual address range has overlap with any existing ranges,
2077 * split partly overlapping ranges and add new ranges in the gaps. All changes
2078 * should be applied to the range_list and interval tree transactionally. If
2079 * any range split or allocation fails, the entire update fails. Therefore any
2080 * existing overlapping svm_ranges are cloned and the original svm_ranges left
2081 * unchanged.
2082 *
2083 * If the transaction succeeds, the caller can update and insert clones and
2084 * new ranges, then free the originals.
2085 *
2086 * Otherwise the caller can free the clones and new ranges, while the old
2087 * svm_ranges remain unchanged.
2088 *
2089 * Context: Process context, caller must hold svms->lock
2090 *
2091 * Return:
2092 * 0 - OK, otherwise error code
2093 */
2094 static int
svm_range_add(struct kfd_process * p,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs,struct list_head * update_list,struct list_head * insert_list,struct list_head * remove_list)2095 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2096 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
2097 struct list_head *update_list, struct list_head *insert_list,
2098 struct list_head *remove_list)
2099 {
2100 unsigned long last = start + size - 1UL;
2101 struct svm_range_list *svms = &p->svms;
2102 struct interval_tree_node *node;
2103 struct svm_range *prange;
2104 struct svm_range *tmp;
2105 struct list_head new_list;
2106 int r = 0;
2107
2108 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2109
2110 INIT_LIST_HEAD(update_list);
2111 INIT_LIST_HEAD(insert_list);
2112 INIT_LIST_HEAD(remove_list);
2113 INIT_LIST_HEAD(&new_list);
2114
2115 node = interval_tree_iter_first(&svms->objects, start, last);
2116 while (node) {
2117 struct interval_tree_node *next;
2118 unsigned long next_start;
2119
2120 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
2121 node->last);
2122
2123 prange = container_of(node, struct svm_range, it_node);
2124 next = interval_tree_iter_next(node, start, last);
2125 next_start = min(node->last, last) + 1;
2126
2127 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2128 prange->mapped_to_gpu) {
2129 /* nothing to do */
2130 } else if (node->start < start || node->last > last) {
2131 /* node intersects the update range and its attributes
2132 * will change. Clone and split it, apply updates only
2133 * to the overlapping part
2134 */
2135 struct svm_range *old = prange;
2136
2137 prange = svm_range_clone(old);
2138 if (!prange) {
2139 r = -ENOMEM;
2140 goto out;
2141 }
2142
2143 list_add(&old->update_list, remove_list);
2144 list_add(&prange->list, insert_list);
2145 list_add(&prange->update_list, update_list);
2146
2147 if (node->start < start) {
2148 pr_debug("change old range start\n");
2149 r = svm_range_split_head(prange, start,
2150 insert_list);
2151 if (r)
2152 goto out;
2153 }
2154 if (node->last > last) {
2155 pr_debug("change old range last\n");
2156 r = svm_range_split_tail(prange, last,
2157 insert_list);
2158 if (r)
2159 goto out;
2160 }
2161 } else {
2162 /* The node is contained within start..last,
2163 * just update it
2164 */
2165 list_add(&prange->update_list, update_list);
2166 }
2167
2168 /* insert a new node if needed */
2169 if (node->start > start) {
2170 r = svm_range_split_new(svms, start, node->start - 1,
2171 READ_ONCE(max_svm_range_pages),
2172 &new_list, update_list);
2173 if (r)
2174 goto out;
2175 }
2176
2177 node = next;
2178 start = next_start;
2179 }
2180
2181 /* add a final range at the end if needed */
2182 if (start <= last)
2183 r = svm_range_split_new(svms, start, last,
2184 READ_ONCE(max_svm_range_pages),
2185 &new_list, update_list);
2186
2187 out:
2188 if (r) {
2189 list_for_each_entry_safe(prange, tmp, insert_list, list)
2190 svm_range_free(prange, false);
2191 list_for_each_entry_safe(prange, tmp, &new_list, list)
2192 svm_range_free(prange, true);
2193 } else {
2194 list_splice(&new_list, insert_list);
2195 }
2196
2197 return r;
2198 }
2199
2200 static void
svm_range_update_notifier_and_interval_tree(struct mm_struct * mm,struct svm_range * prange)2201 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2202 struct svm_range *prange)
2203 {
2204 unsigned long start;
2205 unsigned long last;
2206
2207 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2208 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2209
2210 if (prange->start == start && prange->last == last)
2211 return;
2212
2213 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2214 prange->svms, prange, start, last, prange->start,
2215 prange->last);
2216
2217 if (start != 0 && last != 0) {
2218 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2219 svm_range_remove_notifier(prange);
2220 }
2221 prange->it_node.start = prange->start;
2222 prange->it_node.last = prange->last;
2223
2224 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2225 svm_range_add_notifier_locked(mm, prange);
2226 }
2227
2228 static void
svm_range_handle_list_op(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm)2229 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2230 struct mm_struct *mm)
2231 {
2232 switch (prange->work_item.op) {
2233 case SVM_OP_NULL:
2234 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2235 svms, prange, prange->start, prange->last);
2236 break;
2237 case SVM_OP_UNMAP_RANGE:
2238 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2239 svms, prange, prange->start, prange->last);
2240 svm_range_unlink(prange);
2241 svm_range_remove_notifier(prange);
2242 svm_range_free(prange, true);
2243 break;
2244 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2245 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2246 svms, prange, prange->start, prange->last);
2247 svm_range_update_notifier_and_interval_tree(mm, prange);
2248 break;
2249 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2250 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2251 svms, prange, prange->start, prange->last);
2252 svm_range_update_notifier_and_interval_tree(mm, prange);
2253 /* TODO: implement deferred validation and mapping */
2254 break;
2255 case SVM_OP_ADD_RANGE:
2256 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2257 prange->start, prange->last);
2258 svm_range_add_to_svms(prange);
2259 svm_range_add_notifier_locked(mm, prange);
2260 break;
2261 case SVM_OP_ADD_RANGE_AND_MAP:
2262 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2263 prange, prange->start, prange->last);
2264 svm_range_add_to_svms(prange);
2265 svm_range_add_notifier_locked(mm, prange);
2266 /* TODO: implement deferred validation and mapping */
2267 break;
2268 default:
2269 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2270 prange->work_item.op);
2271 }
2272 }
2273
svm_range_drain_retry_fault(struct svm_range_list * svms)2274 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2275 {
2276 struct kfd_process_device *pdd;
2277 struct kfd_process *p;
2278 int drain;
2279 uint32_t i;
2280
2281 p = container_of(svms, struct kfd_process, svms);
2282
2283 restart:
2284 drain = atomic_read(&svms->drain_pagefaults);
2285 if (!drain)
2286 return;
2287
2288 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2289 pdd = p->pdds[i];
2290 if (!pdd)
2291 continue;
2292
2293 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2294
2295 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2296 pdd->dev->adev->irq.retry_cam_enabled ?
2297 &pdd->dev->adev->irq.ih :
2298 &pdd->dev->adev->irq.ih1);
2299
2300 if (pdd->dev->adev->irq.retry_cam_enabled)
2301 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2302 &pdd->dev->adev->irq.ih_soft);
2303
2304
2305 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2306 }
2307 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2308 goto restart;
2309 }
2310
svm_range_deferred_list_work(struct work_struct * work)2311 static void svm_range_deferred_list_work(struct work_struct *work)
2312 {
2313 struct svm_range_list *svms;
2314 struct svm_range *prange;
2315 struct mm_struct *mm;
2316
2317 svms = container_of(work, struct svm_range_list, deferred_list_work);
2318 pr_debug("enter svms 0x%p\n", svms);
2319
2320 spin_lock(&svms->deferred_list_lock);
2321 while (!list_empty(&svms->deferred_range_list)) {
2322 prange = list_first_entry(&svms->deferred_range_list,
2323 struct svm_range, deferred_list);
2324 spin_unlock(&svms->deferred_list_lock);
2325
2326 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2327 prange->start, prange->last, prange->work_item.op);
2328
2329 mm = prange->work_item.mm;
2330 retry:
2331 mmap_write_lock(mm);
2332
2333 /* Checking for the need to drain retry faults must be inside
2334 * mmap write lock to serialize with munmap notifiers.
2335 */
2336 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2337 mmap_write_unlock(mm);
2338 svm_range_drain_retry_fault(svms);
2339 goto retry;
2340 }
2341
2342 /* Remove from deferred_list must be inside mmap write lock, for
2343 * two race cases:
2344 * 1. unmap_from_cpu may change work_item.op and add the range
2345 * to deferred_list again, cause use after free bug.
2346 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2347 * lock and continue because deferred_list is empty, but
2348 * deferred_list work is actually waiting for mmap lock.
2349 */
2350 spin_lock(&svms->deferred_list_lock);
2351 list_del_init(&prange->deferred_list);
2352 spin_unlock(&svms->deferred_list_lock);
2353
2354 mutex_lock(&svms->lock);
2355 mutex_lock(&prange->migrate_mutex);
2356 while (!list_empty(&prange->child_list)) {
2357 struct svm_range *pchild;
2358
2359 pchild = list_first_entry(&prange->child_list,
2360 struct svm_range, child_list);
2361 pr_debug("child prange 0x%p op %d\n", pchild,
2362 pchild->work_item.op);
2363 list_del_init(&pchild->child_list);
2364 svm_range_handle_list_op(svms, pchild, mm);
2365 }
2366 mutex_unlock(&prange->migrate_mutex);
2367
2368 svm_range_handle_list_op(svms, prange, mm);
2369 mutex_unlock(&svms->lock);
2370 mmap_write_unlock(mm);
2371
2372 /* Pairs with mmget in svm_range_add_list_work. If dropping the
2373 * last mm refcount, schedule release work to avoid circular locking
2374 */
2375 mmput_async(mm);
2376
2377 spin_lock(&svms->deferred_list_lock);
2378 }
2379 spin_unlock(&svms->deferred_list_lock);
2380 pr_debug("exit svms 0x%p\n", svms);
2381 }
2382
2383 void
svm_range_add_list_work(struct svm_range_list * svms,struct svm_range * prange,struct mm_struct * mm,enum svm_work_list_ops op)2384 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2385 struct mm_struct *mm, enum svm_work_list_ops op)
2386 {
2387 spin_lock(&svms->deferred_list_lock);
2388 /* if prange is on the deferred list */
2389 if (!list_empty(&prange->deferred_list)) {
2390 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2391 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2392 if (op != SVM_OP_NULL &&
2393 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2394 prange->work_item.op = op;
2395 } else {
2396 prange->work_item.op = op;
2397
2398 /* Pairs with mmput in deferred_list_work */
2399 mmget(mm);
2400 prange->work_item.mm = mm;
2401 list_add_tail(&prange->deferred_list,
2402 &prange->svms->deferred_range_list);
2403 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2404 prange, prange->start, prange->last, op);
2405 }
2406 spin_unlock(&svms->deferred_list_lock);
2407 }
2408
schedule_deferred_list_work(struct svm_range_list * svms)2409 void schedule_deferred_list_work(struct svm_range_list *svms)
2410 {
2411 spin_lock(&svms->deferred_list_lock);
2412 if (!list_empty(&svms->deferred_range_list))
2413 schedule_work(&svms->deferred_list_work);
2414 spin_unlock(&svms->deferred_list_lock);
2415 }
2416
2417 static void
svm_range_unmap_split(struct mm_struct * mm,struct svm_range * parent,struct svm_range * prange,unsigned long start,unsigned long last)2418 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2419 struct svm_range *prange, unsigned long start,
2420 unsigned long last)
2421 {
2422 struct svm_range *head;
2423 struct svm_range *tail;
2424
2425 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2426 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2427 prange->start, prange->last);
2428 return;
2429 }
2430 if (start > prange->last || last < prange->start)
2431 return;
2432
2433 head = tail = prange;
2434 if (start > prange->start)
2435 svm_range_split(prange, prange->start, start - 1, &tail);
2436 if (last < tail->last)
2437 svm_range_split(tail, last + 1, tail->last, &head);
2438
2439 if (head != prange && tail != prange) {
2440 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2441 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2442 } else if (tail != prange) {
2443 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2444 } else if (head != prange) {
2445 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2446 } else if (parent != prange) {
2447 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2448 }
2449 }
2450
2451 static void
svm_range_unmap_from_cpu(struct mm_struct * mm,struct svm_range * prange,unsigned long start,unsigned long last)2452 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2453 unsigned long start, unsigned long last)
2454 {
2455 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2456 struct svm_range_list *svms;
2457 struct svm_range *pchild;
2458 struct kfd_process *p;
2459 unsigned long s, l;
2460 bool unmap_parent;
2461
2462 p = kfd_lookup_process_by_mm(mm);
2463 if (!p)
2464 return;
2465 svms = &p->svms;
2466
2467 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2468 prange, prange->start, prange->last, start, last);
2469
2470 /* Make sure pending page faults are drained in the deferred worker
2471 * before the range is freed to avoid straggler interrupts on
2472 * unmapped memory causing "phantom faults".
2473 */
2474 atomic_inc(&svms->drain_pagefaults);
2475
2476 unmap_parent = start <= prange->start && last >= prange->last;
2477
2478 list_for_each_entry(pchild, &prange->child_list, child_list) {
2479 mutex_lock_nested(&pchild->lock, 1);
2480 s = max(start, pchild->start);
2481 l = min(last, pchild->last);
2482 if (l >= s)
2483 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2484 svm_range_unmap_split(mm, prange, pchild, start, last);
2485 mutex_unlock(&pchild->lock);
2486 }
2487 s = max(start, prange->start);
2488 l = min(last, prange->last);
2489 if (l >= s)
2490 svm_range_unmap_from_gpus(prange, s, l, trigger);
2491 svm_range_unmap_split(mm, prange, prange, start, last);
2492
2493 if (unmap_parent)
2494 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2495 else
2496 svm_range_add_list_work(svms, prange, mm,
2497 SVM_OP_UPDATE_RANGE_NOTIFIER);
2498 schedule_deferred_list_work(svms);
2499
2500 kfd_unref_process(p);
2501 }
2502
2503 /**
2504 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2505 * @mni: mmu_interval_notifier struct
2506 * @range: mmu_notifier_range struct
2507 * @cur_seq: value to pass to mmu_interval_set_seq()
2508 *
2509 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2510 * is from migration, or CPU page invalidation callback.
2511 *
2512 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2513 * work thread, and split prange if only part of prange is unmapped.
2514 *
2515 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2516 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2517 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2518 * update GPU mapping to recover.
2519 *
2520 * Context: mmap lock, notifier_invalidate_start lock are held
2521 * for invalidate event, prange lock is held if this is from migration
2522 */
2523 static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)2524 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2525 const struct mmu_notifier_range *range,
2526 unsigned long cur_seq)
2527 {
2528 struct svm_range *prange;
2529 unsigned long start;
2530 unsigned long last;
2531
2532 if (range->event == MMU_NOTIFY_RELEASE)
2533 return true;
2534 if (!mmget_not_zero(mni->mm))
2535 return true;
2536
2537 start = mni->interval_tree.start;
2538 last = mni->interval_tree.last;
2539 start = max(start, range->start) >> PAGE_SHIFT;
2540 last = min(last, range->end - 1) >> PAGE_SHIFT;
2541 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2542 start, last, range->start >> PAGE_SHIFT,
2543 (range->end - 1) >> PAGE_SHIFT,
2544 mni->interval_tree.start >> PAGE_SHIFT,
2545 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2546
2547 prange = container_of(mni, struct svm_range, notifier);
2548
2549 svm_range_lock(prange);
2550 mmu_interval_set_seq(mni, cur_seq);
2551
2552 switch (range->event) {
2553 case MMU_NOTIFY_UNMAP:
2554 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2555 break;
2556 default:
2557 svm_range_evict(prange, mni->mm, start, last, range->event);
2558 break;
2559 }
2560
2561 svm_range_unlock(prange);
2562 mmput(mni->mm);
2563
2564 return true;
2565 }
2566
2567 /**
2568 * svm_range_from_addr - find svm range from fault address
2569 * @svms: svm range list header
2570 * @addr: address to search range interval tree, in pages
2571 * @parent: parent range if range is on child list
2572 *
2573 * Context: The caller must hold svms->lock
2574 *
2575 * Return: the svm_range found or NULL
2576 */
2577 struct svm_range *
svm_range_from_addr(struct svm_range_list * svms,unsigned long addr,struct svm_range ** parent)2578 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2579 struct svm_range **parent)
2580 {
2581 struct interval_tree_node *node;
2582 struct svm_range *prange;
2583 struct svm_range *pchild;
2584
2585 node = interval_tree_iter_first(&svms->objects, addr, addr);
2586 if (!node)
2587 return NULL;
2588
2589 prange = container_of(node, struct svm_range, it_node);
2590 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2591 addr, prange->start, prange->last, node->start, node->last);
2592
2593 if (addr >= prange->start && addr <= prange->last) {
2594 if (parent)
2595 *parent = prange;
2596 return prange;
2597 }
2598 list_for_each_entry(pchild, &prange->child_list, child_list)
2599 if (addr >= pchild->start && addr <= pchild->last) {
2600 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2601 addr, pchild->start, pchild->last);
2602 if (parent)
2603 *parent = prange;
2604 return pchild;
2605 }
2606
2607 return NULL;
2608 }
2609
2610 /* svm_range_best_restore_location - decide the best fault restore location
2611 * @prange: svm range structure
2612 * @adev: the GPU on which vm fault happened
2613 *
2614 * This is only called when xnack is on, to decide the best location to restore
2615 * the range mapping after GPU vm fault. Caller uses the best location to do
2616 * migration if actual loc is not best location, then update GPU page table
2617 * mapping to the best location.
2618 *
2619 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2620 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2621 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2622 * if range actual loc is cpu, best_loc is cpu
2623 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2624 * range actual loc.
2625 * Otherwise, GPU no access, best_loc is -1.
2626 *
2627 * Return:
2628 * -1 means vm fault GPU no access
2629 * 0 for CPU or GPU id
2630 */
2631 static int32_t
svm_range_best_restore_location(struct svm_range * prange,struct kfd_node * node,int32_t * gpuidx)2632 svm_range_best_restore_location(struct svm_range *prange,
2633 struct kfd_node *node,
2634 int32_t *gpuidx)
2635 {
2636 struct kfd_node *bo_node, *preferred_node;
2637 struct kfd_process *p;
2638 uint32_t gpuid;
2639 int r;
2640
2641 p = container_of(prange->svms, struct kfd_process, svms);
2642
2643 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
2644 if (r < 0) {
2645 pr_debug("failed to get gpuid from kgd\n");
2646 return -1;
2647 }
2648
2649 if (node->adev->gmc.is_app_apu)
2650 return 0;
2651
2652 if (prange->preferred_loc == gpuid ||
2653 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2654 return prange->preferred_loc;
2655 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2656 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2657 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
2658 return prange->preferred_loc;
2659 /* fall through */
2660 }
2661
2662 if (test_bit(*gpuidx, prange->bitmap_access))
2663 return gpuid;
2664
2665 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2666 if (!prange->actual_loc)
2667 return 0;
2668
2669 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2670 if (bo_node && svm_nodes_in_same_hive(node, bo_node))
2671 return prange->actual_loc;
2672 else
2673 return 0;
2674 }
2675
2676 return -1;
2677 }
2678
2679 static int
svm_range_get_range_boundaries(struct kfd_process * p,int64_t addr,unsigned long * start,unsigned long * last,bool * is_heap_stack)2680 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2681 unsigned long *start, unsigned long *last,
2682 bool *is_heap_stack)
2683 {
2684 struct vm_area_struct *vma;
2685 struct interval_tree_node *node;
2686 struct rb_node *rb_node;
2687 unsigned long start_limit, end_limit;
2688
2689 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2690 if (!vma) {
2691 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2692 return -EFAULT;
2693 }
2694
2695 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
2696
2697 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2698 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2699 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2700 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2701 /* First range that starts after the fault address */
2702 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2703 if (node) {
2704 end_limit = min(end_limit, node->start);
2705 /* Last range that ends before the fault address */
2706 rb_node = rb_prev(&node->rb);
2707 } else {
2708 /* Last range must end before addr because
2709 * there was no range after addr
2710 */
2711 rb_node = rb_last(&p->svms.objects.rb_root);
2712 }
2713 if (rb_node) {
2714 node = container_of(rb_node, struct interval_tree_node, rb);
2715 if (node->last >= addr) {
2716 WARN(1, "Overlap with prev node and page fault addr\n");
2717 return -EFAULT;
2718 }
2719 start_limit = max(start_limit, node->last + 1);
2720 }
2721
2722 *start = start_limit;
2723 *last = end_limit - 1;
2724
2725 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2726 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2727 *start, *last, *is_heap_stack);
2728
2729 return 0;
2730 }
2731
2732 static int
svm_range_check_vm_userptr(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)2733 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2734 uint64_t *bo_s, uint64_t *bo_l)
2735 {
2736 struct amdgpu_bo_va_mapping *mapping;
2737 struct interval_tree_node *node;
2738 struct amdgpu_bo *bo = NULL;
2739 unsigned long userptr;
2740 uint32_t i;
2741 int r;
2742
2743 for (i = 0; i < p->n_pdds; i++) {
2744 struct amdgpu_vm *vm;
2745
2746 if (!p->pdds[i]->drm_priv)
2747 continue;
2748
2749 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2750 r = amdgpu_bo_reserve(vm->root.bo, false);
2751 if (r)
2752 return r;
2753
2754 /* Check userptr by searching entire vm->va interval tree */
2755 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2756 while (node) {
2757 mapping = container_of((struct rb_node *)node,
2758 struct amdgpu_bo_va_mapping, rb);
2759 bo = mapping->bo_va->base.bo;
2760
2761 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2762 start << PAGE_SHIFT,
2763 last << PAGE_SHIFT,
2764 &userptr)) {
2765 node = interval_tree_iter_next(node, 0, ~0ULL);
2766 continue;
2767 }
2768
2769 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2770 start, last);
2771 if (bo_s && bo_l) {
2772 *bo_s = userptr >> PAGE_SHIFT;
2773 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2774 }
2775 amdgpu_bo_unreserve(vm->root.bo);
2776 return -EADDRINUSE;
2777 }
2778 amdgpu_bo_unreserve(vm->root.bo);
2779 }
2780 return 0;
2781 }
2782
2783 static struct
svm_range_create_unregistered_range(struct kfd_node * node,struct kfd_process * p,struct mm_struct * mm,int64_t addr)2784 svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
2785 struct kfd_process *p,
2786 struct mm_struct *mm,
2787 int64_t addr)
2788 {
2789 struct svm_range *prange = NULL;
2790 unsigned long start, last;
2791 uint32_t gpuid, gpuidx;
2792 bool is_heap_stack;
2793 uint64_t bo_s = 0;
2794 uint64_t bo_l = 0;
2795 int r;
2796
2797 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2798 &is_heap_stack))
2799 return NULL;
2800
2801 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2802 if (r != -EADDRINUSE)
2803 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2804
2805 if (r == -EADDRINUSE) {
2806 if (addr >= bo_s && addr <= bo_l)
2807 return NULL;
2808
2809 /* Create one page svm range if 2MB range overlapping */
2810 start = addr;
2811 last = addr;
2812 }
2813
2814 prange = svm_range_new(&p->svms, start, last, true);
2815 if (!prange) {
2816 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2817 return NULL;
2818 }
2819 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
2820 pr_debug("failed to get gpuid from kgd\n");
2821 svm_range_free(prange, true);
2822 return NULL;
2823 }
2824
2825 if (is_heap_stack)
2826 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2827
2828 svm_range_add_to_svms(prange);
2829 svm_range_add_notifier_locked(mm, prange);
2830
2831 return prange;
2832 }
2833
2834 /* svm_range_skip_recover - decide if prange can be recovered
2835 * @prange: svm range structure
2836 *
2837 * GPU vm retry fault handle skip recover the range for cases:
2838 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2839 * deferred list work will drain the stale fault before free the prange.
2840 * 2. prange is on deferred list to add interval notifier after split, or
2841 * 3. prange is child range, it is split from parent prange, recover later
2842 * after interval notifier is added.
2843 *
2844 * Return: true to skip recover, false to recover
2845 */
svm_range_skip_recover(struct svm_range * prange)2846 static bool svm_range_skip_recover(struct svm_range *prange)
2847 {
2848 struct svm_range_list *svms = prange->svms;
2849
2850 spin_lock(&svms->deferred_list_lock);
2851 if (list_empty(&prange->deferred_list) &&
2852 list_empty(&prange->child_list)) {
2853 spin_unlock(&svms->deferred_list_lock);
2854 return false;
2855 }
2856 spin_unlock(&svms->deferred_list_lock);
2857
2858 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2859 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2860 svms, prange, prange->start, prange->last);
2861 return true;
2862 }
2863 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2864 prange->work_item.op == SVM_OP_ADD_RANGE) {
2865 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2866 svms, prange, prange->start, prange->last);
2867 return true;
2868 }
2869 return false;
2870 }
2871
2872 static void
svm_range_count_fault(struct kfd_node * node,struct kfd_process * p,int32_t gpuidx)2873 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
2874 int32_t gpuidx)
2875 {
2876 struct kfd_process_device *pdd;
2877
2878 /* fault is on different page of same range
2879 * or fault is skipped to recover later
2880 * or fault is on invalid virtual address
2881 */
2882 if (gpuidx == MAX_GPU_INSTANCE) {
2883 uint32_t gpuid;
2884 int r;
2885
2886 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
2887 if (r < 0)
2888 return;
2889 }
2890
2891 /* fault is recovered
2892 * or fault cannot recover because GPU no access on the range
2893 */
2894 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2895 if (pdd)
2896 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2897 }
2898
2899 static bool
svm_fault_allowed(struct vm_area_struct * vma,bool write_fault)2900 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2901 {
2902 unsigned long requested = VM_READ;
2903
2904 if (write_fault)
2905 requested |= VM_WRITE;
2906
2907 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2908 vma->vm_flags);
2909 return (vma->vm_flags & requested) == requested;
2910 }
2911
2912 int
svm_range_restore_pages(struct amdgpu_device * adev,unsigned int pasid,uint32_t vmid,uint32_t node_id,uint64_t addr,bool write_fault)2913 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2914 uint32_t vmid, uint32_t node_id,
2915 uint64_t addr, bool write_fault)
2916 {
2917 struct mm_struct *mm = NULL;
2918 struct svm_range_list *svms;
2919 struct svm_range *prange;
2920 struct kfd_process *p;
2921 ktime_t timestamp = ktime_get_boottime();
2922 struct kfd_node *node;
2923 int32_t best_loc;
2924 int32_t gpuidx = MAX_GPU_INSTANCE;
2925 bool write_locked = false;
2926 struct vm_area_struct *vma;
2927 bool migration = false;
2928 int r = 0;
2929
2930 if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
2931 pr_debug("device does not support SVM\n");
2932 return -EFAULT;
2933 }
2934
2935 p = kfd_lookup_process_by_pasid(pasid);
2936 if (!p) {
2937 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2938 return 0;
2939 }
2940 svms = &p->svms;
2941
2942 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2943
2944 if (atomic_read(&svms->drain_pagefaults)) {
2945 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2946 r = 0;
2947 goto out;
2948 }
2949
2950 if (!p->xnack_enabled) {
2951 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2952 r = -EFAULT;
2953 goto out;
2954 }
2955
2956 /* p->lead_thread is available as kfd_process_wq_release flush the work
2957 * before releasing task ref.
2958 */
2959 mm = get_task_mm(p->lead_thread);
2960 if (!mm) {
2961 pr_debug("svms 0x%p failed to get mm\n", svms);
2962 r = 0;
2963 goto out;
2964 }
2965
2966 node = kfd_node_by_irq_ids(adev, node_id, vmid);
2967 if (!node) {
2968 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
2969 vmid);
2970 r = -EFAULT;
2971 goto out;
2972 }
2973 mmap_read_lock(mm);
2974 retry_write_locked:
2975 mutex_lock(&svms->lock);
2976 prange = svm_range_from_addr(svms, addr, NULL);
2977 if (!prange) {
2978 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2979 svms, addr);
2980 if (!write_locked) {
2981 /* Need the write lock to create new range with MMU notifier.
2982 * Also flush pending deferred work to make sure the interval
2983 * tree is up to date before we add a new range
2984 */
2985 mutex_unlock(&svms->lock);
2986 mmap_read_unlock(mm);
2987 mmap_write_lock(mm);
2988 write_locked = true;
2989 goto retry_write_locked;
2990 }
2991 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2992 if (!prange) {
2993 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2994 svms, addr);
2995 mmap_write_downgrade(mm);
2996 r = -EFAULT;
2997 goto out_unlock_svms;
2998 }
2999 }
3000 if (write_locked)
3001 mmap_write_downgrade(mm);
3002
3003 mutex_lock(&prange->migrate_mutex);
3004
3005 if (svm_range_skip_recover(prange)) {
3006 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3007 r = 0;
3008 goto out_unlock_range;
3009 }
3010
3011 /* skip duplicate vm fault on different pages of same range */
3012 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3013 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
3014 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3015 svms, prange->start, prange->last);
3016 r = 0;
3017 goto out_unlock_range;
3018 }
3019
3020 /* __do_munmap removed VMA, return success as we are handling stale
3021 * retry fault.
3022 */
3023 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3024 if (!vma) {
3025 pr_debug("address 0x%llx VMA is removed\n", addr);
3026 r = 0;
3027 goto out_unlock_range;
3028 }
3029
3030 if (!svm_fault_allowed(vma, write_fault)) {
3031 pr_debug("fault addr 0x%llx no %s permission\n", addr,
3032 write_fault ? "write" : "read");
3033 r = -EPERM;
3034 goto out_unlock_range;
3035 }
3036
3037 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3038 if (best_loc == -1) {
3039 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3040 svms, prange->start, prange->last);
3041 r = -EACCES;
3042 goto out_unlock_range;
3043 }
3044
3045 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3046 svms, prange->start, prange->last, best_loc,
3047 prange->actual_loc);
3048
3049 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
3050 write_fault, timestamp);
3051
3052 if (prange->actual_loc != best_loc) {
3053 migration = true;
3054 if (best_loc) {
3055 r = svm_migrate_to_vram(prange, best_loc, mm,
3056 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
3057 if (r) {
3058 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
3059 r, addr);
3060 /* Fallback to system memory if migration to
3061 * VRAM failed
3062 */
3063 if (prange->actual_loc)
3064 r = svm_migrate_vram_to_ram(prange, mm,
3065 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
3066 NULL);
3067 else
3068 r = 0;
3069 }
3070 } else {
3071 r = svm_migrate_vram_to_ram(prange, mm,
3072 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
3073 NULL);
3074 }
3075 if (r) {
3076 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3077 r, svms, prange->start, prange->last);
3078 goto out_unlock_range;
3079 }
3080 }
3081
3082 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
3083 if (r)
3084 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3085 r, svms, prange->start, prange->last);
3086
3087 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
3088 migration);
3089
3090 out_unlock_range:
3091 mutex_unlock(&prange->migrate_mutex);
3092 out_unlock_svms:
3093 mutex_unlock(&svms->lock);
3094 mmap_read_unlock(mm);
3095
3096 svm_range_count_fault(node, p, gpuidx);
3097
3098 mmput(mm);
3099 out:
3100 kfd_unref_process(p);
3101
3102 if (r == -EAGAIN) {
3103 pr_debug("recover vm fault later\n");
3104 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
3105 r = 0;
3106 }
3107 return r;
3108 }
3109
3110 int
svm_range_switch_xnack_reserve_mem(struct kfd_process * p,bool xnack_enabled)3111 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
3112 {
3113 struct svm_range *prange, *pchild;
3114 uint64_t reserved_size = 0;
3115 uint64_t size;
3116 int r = 0;
3117
3118 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
3119
3120 mutex_lock(&p->svms.lock);
3121
3122 list_for_each_entry(prange, &p->svms.list, list) {
3123 svm_range_lock(prange);
3124 list_for_each_entry(pchild, &prange->child_list, child_list) {
3125 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
3126 if (xnack_enabled) {
3127 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3128 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3129 } else {
3130 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3131 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3132 if (r)
3133 goto out_unlock;
3134 reserved_size += size;
3135 }
3136 }
3137
3138 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3139 if (xnack_enabled) {
3140 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
3141 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3142 } else {
3143 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
3144 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3145 if (r)
3146 goto out_unlock;
3147 reserved_size += size;
3148 }
3149 out_unlock:
3150 svm_range_unlock(prange);
3151 if (r)
3152 break;
3153 }
3154
3155 if (r)
3156 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
3157 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
3158 else
3159 /* Change xnack mode must be inside svms lock, to avoid race with
3160 * svm_range_deferred_list_work unreserve memory in parallel.
3161 */
3162 p->xnack_enabled = xnack_enabled;
3163
3164 mutex_unlock(&p->svms.lock);
3165 return r;
3166 }
3167
svm_range_list_fini(struct kfd_process * p)3168 void svm_range_list_fini(struct kfd_process *p)
3169 {
3170 struct svm_range *prange;
3171 struct svm_range *next;
3172
3173 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3174
3175 cancel_delayed_work_sync(&p->svms.restore_work);
3176
3177 /* Ensure list work is finished before process is destroyed */
3178 flush_work(&p->svms.deferred_list_work);
3179
3180 /*
3181 * Ensure no retry fault comes in afterwards, as page fault handler will
3182 * not find kfd process and take mm lock to recover fault.
3183 */
3184 atomic_inc(&p->svms.drain_pagefaults);
3185 svm_range_drain_retry_fault(&p->svms);
3186
3187 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3188 svm_range_unlink(prange);
3189 svm_range_remove_notifier(prange);
3190 svm_range_free(prange, true);
3191 }
3192
3193 mutex_destroy(&p->svms.lock);
3194
3195 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3196 }
3197
svm_range_list_init(struct kfd_process * p)3198 int svm_range_list_init(struct kfd_process *p)
3199 {
3200 struct svm_range_list *svms = &p->svms;
3201 int i;
3202
3203 svms->objects = RB_ROOT_CACHED;
3204 mutex_init(&svms->lock);
3205 INIT_LIST_HEAD(&svms->list);
3206 atomic_set(&svms->evicted_ranges, 0);
3207 atomic_set(&svms->drain_pagefaults, 0);
3208 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3209 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3210 INIT_LIST_HEAD(&svms->deferred_range_list);
3211 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3212 spin_lock_init(&svms->deferred_list_lock);
3213
3214 for (i = 0; i < p->n_pdds; i++)
3215 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
3216 bitmap_set(svms->bitmap_supported, i, 1);
3217
3218 return 0;
3219 }
3220
3221 /**
3222 * svm_range_check_vm - check if virtual address range mapped already
3223 * @p: current kfd_process
3224 * @start: range start address, in pages
3225 * @last: range last address, in pages
3226 * @bo_s: mapping start address in pages if address range already mapped
3227 * @bo_l: mapping last address in pages if address range already mapped
3228 *
3229 * The purpose is to avoid virtual address ranges already allocated by
3230 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3231 * It looks for each pdd in the kfd_process.
3232 *
3233 * Context: Process context
3234 *
3235 * Return 0 - OK, if the range is not mapped.
3236 * Otherwise error code:
3237 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3238 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3239 * a signal. Release all buffer reservations and return to user-space.
3240 */
3241 static int
svm_range_check_vm(struct kfd_process * p,uint64_t start,uint64_t last,uint64_t * bo_s,uint64_t * bo_l)3242 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3243 uint64_t *bo_s, uint64_t *bo_l)
3244 {
3245 struct amdgpu_bo_va_mapping *mapping;
3246 struct interval_tree_node *node;
3247 uint32_t i;
3248 int r;
3249
3250 for (i = 0; i < p->n_pdds; i++) {
3251 struct amdgpu_vm *vm;
3252
3253 if (!p->pdds[i]->drm_priv)
3254 continue;
3255
3256 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3257 r = amdgpu_bo_reserve(vm->root.bo, false);
3258 if (r)
3259 return r;
3260
3261 node = interval_tree_iter_first(&vm->va, start, last);
3262 if (node) {
3263 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3264 start, last);
3265 mapping = container_of((struct rb_node *)node,
3266 struct amdgpu_bo_va_mapping, rb);
3267 if (bo_s && bo_l) {
3268 *bo_s = mapping->start;
3269 *bo_l = mapping->last;
3270 }
3271 amdgpu_bo_unreserve(vm->root.bo);
3272 return -EADDRINUSE;
3273 }
3274 amdgpu_bo_unreserve(vm->root.bo);
3275 }
3276
3277 return 0;
3278 }
3279
3280 /**
3281 * svm_range_is_valid - check if virtual address range is valid
3282 * @p: current kfd_process
3283 * @start: range start address, in pages
3284 * @size: range size, in pages
3285 *
3286 * Valid virtual address range means it belongs to one or more VMAs
3287 *
3288 * Context: Process context
3289 *
3290 * Return:
3291 * 0 - OK, otherwise error code
3292 */
3293 static int
svm_range_is_valid(struct kfd_process * p,uint64_t start,uint64_t size)3294 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3295 {
3296 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3297 struct vm_area_struct *vma;
3298 unsigned long end;
3299 unsigned long start_unchg = start;
3300
3301 start <<= PAGE_SHIFT;
3302 end = start + (size << PAGE_SHIFT);
3303 do {
3304 vma = vma_lookup(p->mm, start);
3305 if (!vma || (vma->vm_flags & device_vma))
3306 return -EFAULT;
3307 start = min(end, vma->vm_end);
3308 } while (start < end);
3309
3310 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3311 NULL);
3312 }
3313
3314 /**
3315 * svm_range_best_prefetch_location - decide the best prefetch location
3316 * @prange: svm range structure
3317 *
3318 * For xnack off:
3319 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3320 * can be CPU or GPU.
3321 *
3322 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3323 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3324 * the best prefetch location is always CPU, because GPU can not have coherent
3325 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3326 *
3327 * For xnack on:
3328 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3329 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3330 *
3331 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3332 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3333 * prefetch location is always CPU.
3334 *
3335 * Context: Process context
3336 *
3337 * Return:
3338 * 0 for CPU or GPU id
3339 */
3340 static uint32_t
svm_range_best_prefetch_location(struct svm_range * prange)3341 svm_range_best_prefetch_location(struct svm_range *prange)
3342 {
3343 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3344 uint32_t best_loc = prange->prefetch_loc;
3345 struct kfd_process_device *pdd;
3346 struct kfd_node *bo_node;
3347 struct kfd_process *p;
3348 uint32_t gpuidx;
3349
3350 p = container_of(prange->svms, struct kfd_process, svms);
3351
3352 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3353 goto out;
3354
3355 bo_node = svm_range_get_node_by_id(prange, best_loc);
3356 if (!bo_node) {
3357 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
3358 best_loc = 0;
3359 goto out;
3360 }
3361
3362 if (bo_node->adev->gmc.is_app_apu) {
3363 best_loc = 0;
3364 goto out;
3365 }
3366
3367 if (p->xnack_enabled)
3368 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3369 else
3370 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3371 MAX_GPU_INSTANCE);
3372
3373 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3374 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3375 if (!pdd) {
3376 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3377 continue;
3378 }
3379
3380 if (pdd->dev->adev == bo_node->adev)
3381 continue;
3382
3383 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
3384 best_loc = 0;
3385 break;
3386 }
3387 }
3388
3389 out:
3390 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3391 p->xnack_enabled, &p->svms, prange->start, prange->last,
3392 best_loc);
3393
3394 return best_loc;
3395 }
3396
3397 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3398 * @mm: current process mm_struct
3399 * @prange: svm range structure
3400 * @migrated: output, true if migration is triggered
3401 *
3402 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3403 * from ram to vram.
3404 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3405 * from vram to ram.
3406 *
3407 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3408 * and restore work:
3409 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3410 * stops all queues, schedule restore work
3411 * 2. svm_range_restore_work wait for migration is done by
3412 * a. svm_range_validate_vram takes prange->migrate_mutex
3413 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3414 * 3. restore work update mappings of GPU, resume all queues.
3415 *
3416 * Context: Process context
3417 *
3418 * Return:
3419 * 0 - OK, otherwise - error code of migration
3420 */
3421 static int
svm_range_trigger_migration(struct mm_struct * mm,struct svm_range * prange,bool * migrated)3422 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3423 bool *migrated)
3424 {
3425 uint32_t best_loc;
3426 int r = 0;
3427
3428 *migrated = false;
3429 best_loc = svm_range_best_prefetch_location(prange);
3430
3431 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3432 best_loc == prange->actual_loc)
3433 return 0;
3434
3435 if (!best_loc) {
3436 r = svm_migrate_vram_to_ram(prange, mm,
3437 KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
3438 *migrated = !r;
3439 return r;
3440 }
3441
3442 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3443 *migrated = !r;
3444
3445 return 0;
3446 }
3447
svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence * fence)3448 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3449 {
3450 /* Dereferencing fence->svm_bo is safe here because the fence hasn't
3451 * signaled yet and we're under the protection of the fence->lock.
3452 * After the fence is signaled in svm_range_bo_release, we cannot get
3453 * here any more.
3454 *
3455 * Reference is dropped in svm_range_evict_svm_bo_worker.
3456 */
3457 if (svm_bo_ref_unless_zero(fence->svm_bo)) {
3458 WRITE_ONCE(fence->svm_bo->evicting, 1);
3459 schedule_work(&fence->svm_bo->eviction_work);
3460 }
3461
3462 return 0;
3463 }
3464
svm_range_evict_svm_bo_worker(struct work_struct * work)3465 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3466 {
3467 struct svm_range_bo *svm_bo;
3468 struct mm_struct *mm;
3469 int r = 0;
3470
3471 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3472
3473 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3474 mm = svm_bo->eviction_fence->mm;
3475 } else {
3476 svm_range_bo_unref(svm_bo);
3477 return;
3478 }
3479
3480 mmap_read_lock(mm);
3481 spin_lock(&svm_bo->list_lock);
3482 while (!list_empty(&svm_bo->range_list) && !r) {
3483 struct svm_range *prange =
3484 list_first_entry(&svm_bo->range_list,
3485 struct svm_range, svm_bo_list);
3486 int retries = 3;
3487
3488 list_del_init(&prange->svm_bo_list);
3489 spin_unlock(&svm_bo->list_lock);
3490
3491 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3492 prange->start, prange->last);
3493
3494 mutex_lock(&prange->migrate_mutex);
3495 do {
3496 r = svm_migrate_vram_to_ram(prange, mm,
3497 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
3498 } while (!r && prange->actual_loc && --retries);
3499
3500 if (!r && prange->actual_loc)
3501 pr_info_once("Migration failed during eviction");
3502
3503 if (!prange->actual_loc) {
3504 mutex_lock(&prange->lock);
3505 prange->svm_bo = NULL;
3506 mutex_unlock(&prange->lock);
3507 }
3508 mutex_unlock(&prange->migrate_mutex);
3509
3510 spin_lock(&svm_bo->list_lock);
3511 }
3512 spin_unlock(&svm_bo->list_lock);
3513 mmap_read_unlock(mm);
3514 mmput(mm);
3515
3516 dma_fence_signal(&svm_bo->eviction_fence->base);
3517
3518 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3519 * has been called in svm_migrate_vram_to_ram
3520 */
3521 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3522 svm_range_bo_unref(svm_bo);
3523 }
3524
3525 static int
svm_range_set_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3526 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3527 uint64_t start, uint64_t size, uint32_t nattr,
3528 struct kfd_ioctl_svm_attribute *attrs)
3529 {
3530 struct amdkfd_process_info *process_info = p->kgd_process_info;
3531 struct list_head update_list;
3532 struct list_head insert_list;
3533 struct list_head remove_list;
3534 struct svm_range_list *svms;
3535 struct svm_range *prange;
3536 struct svm_range *next;
3537 bool update_mapping = false;
3538 bool flush_tlb;
3539 int r, ret = 0;
3540
3541 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3542 p->pasid, &p->svms, start, start + size - 1, size);
3543
3544 r = svm_range_check_attr(p, nattr, attrs);
3545 if (r)
3546 return r;
3547
3548 svms = &p->svms;
3549
3550 mutex_lock(&process_info->lock);
3551
3552 svm_range_list_lock_and_flush_work(svms, mm);
3553
3554 r = svm_range_is_valid(p, start, size);
3555 if (r) {
3556 pr_debug("invalid range r=%d\n", r);
3557 mmap_write_unlock(mm);
3558 goto out;
3559 }
3560
3561 mutex_lock(&svms->lock);
3562
3563 /* Add new range and split existing ranges as needed */
3564 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3565 &insert_list, &remove_list);
3566 if (r) {
3567 mutex_unlock(&svms->lock);
3568 mmap_write_unlock(mm);
3569 goto out;
3570 }
3571 /* Apply changes as a transaction */
3572 list_for_each_entry_safe(prange, next, &insert_list, list) {
3573 svm_range_add_to_svms(prange);
3574 svm_range_add_notifier_locked(mm, prange);
3575 }
3576 list_for_each_entry(prange, &update_list, update_list) {
3577 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3578 /* TODO: unmap ranges from GPU that lost access */
3579 }
3580 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3581 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3582 prange->svms, prange, prange->start,
3583 prange->last);
3584 svm_range_unlink(prange);
3585 svm_range_remove_notifier(prange);
3586 svm_range_free(prange, false);
3587 }
3588
3589 mmap_write_downgrade(mm);
3590 /* Trigger migrations and revalidate and map to GPUs as needed. If
3591 * this fails we may be left with partially completed actions. There
3592 * is no clean way of rolling back to the previous state in such a
3593 * case because the rollback wouldn't be guaranteed to work either.
3594 */
3595 list_for_each_entry(prange, &update_list, update_list) {
3596 bool migrated;
3597
3598 mutex_lock(&prange->migrate_mutex);
3599
3600 r = svm_range_trigger_migration(mm, prange, &migrated);
3601 if (r)
3602 goto out_unlock_range;
3603
3604 if (migrated && (!p->xnack_enabled ||
3605 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3606 prange->mapped_to_gpu) {
3607 pr_debug("restore_work will update mappings of GPUs\n");
3608 mutex_unlock(&prange->migrate_mutex);
3609 continue;
3610 }
3611
3612 if (!migrated && !update_mapping) {
3613 mutex_unlock(&prange->migrate_mutex);
3614 continue;
3615 }
3616
3617 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3618
3619 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3620 true, true, flush_tlb);
3621 if (r)
3622 pr_debug("failed %d to map svm range\n", r);
3623
3624 out_unlock_range:
3625 mutex_unlock(&prange->migrate_mutex);
3626 if (r)
3627 ret = r;
3628 }
3629
3630 dynamic_svm_range_dump(svms);
3631
3632 mutex_unlock(&svms->lock);
3633 mmap_read_unlock(mm);
3634 out:
3635 mutex_unlock(&process_info->lock);
3636
3637 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3638 &p->svms, start, start + size - 1, r);
3639
3640 return ret ? ret : r;
3641 }
3642
3643 static int
svm_range_get_attr(struct kfd_process * p,struct mm_struct * mm,uint64_t start,uint64_t size,uint32_t nattr,struct kfd_ioctl_svm_attribute * attrs)3644 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3645 uint64_t start, uint64_t size, uint32_t nattr,
3646 struct kfd_ioctl_svm_attribute *attrs)
3647 {
3648 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3649 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3650 bool get_preferred_loc = false;
3651 bool get_prefetch_loc = false;
3652 bool get_granularity = false;
3653 bool get_accessible = false;
3654 bool get_flags = false;
3655 uint64_t last = start + size - 1UL;
3656 uint8_t granularity = 0xff;
3657 struct interval_tree_node *node;
3658 struct svm_range_list *svms;
3659 struct svm_range *prange;
3660 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3661 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3662 uint32_t flags_and = 0xffffffff;
3663 uint32_t flags_or = 0;
3664 int gpuidx;
3665 uint32_t i;
3666 int r = 0;
3667
3668 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3669 start + size - 1, nattr);
3670
3671 /* Flush pending deferred work to avoid racing with deferred actions from
3672 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3673 * can still race with get_attr because we don't hold the mmap lock. But that
3674 * would be a race condition in the application anyway, and undefined
3675 * behaviour is acceptable in that case.
3676 */
3677 flush_work(&p->svms.deferred_list_work);
3678
3679 mmap_read_lock(mm);
3680 r = svm_range_is_valid(p, start, size);
3681 mmap_read_unlock(mm);
3682 if (r) {
3683 pr_debug("invalid range r=%d\n", r);
3684 return r;
3685 }
3686
3687 for (i = 0; i < nattr; i++) {
3688 switch (attrs[i].type) {
3689 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3690 get_preferred_loc = true;
3691 break;
3692 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3693 get_prefetch_loc = true;
3694 break;
3695 case KFD_IOCTL_SVM_ATTR_ACCESS:
3696 get_accessible = true;
3697 break;
3698 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3699 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3700 get_flags = true;
3701 break;
3702 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3703 get_granularity = true;
3704 break;
3705 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3706 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3707 fallthrough;
3708 default:
3709 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3710 return -EINVAL;
3711 }
3712 }
3713
3714 svms = &p->svms;
3715
3716 mutex_lock(&svms->lock);
3717
3718 node = interval_tree_iter_first(&svms->objects, start, last);
3719 if (!node) {
3720 pr_debug("range attrs not found return default values\n");
3721 svm_range_set_default_attributes(&location, &prefetch_loc,
3722 &granularity, &flags_and);
3723 flags_or = flags_and;
3724 if (p->xnack_enabled)
3725 bitmap_copy(bitmap_access, svms->bitmap_supported,
3726 MAX_GPU_INSTANCE);
3727 else
3728 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3729 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3730 goto fill_values;
3731 }
3732 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3733 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3734
3735 while (node) {
3736 struct interval_tree_node *next;
3737
3738 prange = container_of(node, struct svm_range, it_node);
3739 next = interval_tree_iter_next(node, start, last);
3740
3741 if (get_preferred_loc) {
3742 if (prange->preferred_loc ==
3743 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3744 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3745 location != prange->preferred_loc)) {
3746 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3747 get_preferred_loc = false;
3748 } else {
3749 location = prange->preferred_loc;
3750 }
3751 }
3752 if (get_prefetch_loc) {
3753 if (prange->prefetch_loc ==
3754 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3755 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3756 prefetch_loc != prange->prefetch_loc)) {
3757 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3758 get_prefetch_loc = false;
3759 } else {
3760 prefetch_loc = prange->prefetch_loc;
3761 }
3762 }
3763 if (get_accessible) {
3764 bitmap_and(bitmap_access, bitmap_access,
3765 prange->bitmap_access, MAX_GPU_INSTANCE);
3766 bitmap_and(bitmap_aip, bitmap_aip,
3767 prange->bitmap_aip, MAX_GPU_INSTANCE);
3768 }
3769 if (get_flags) {
3770 flags_and &= prange->flags;
3771 flags_or |= prange->flags;
3772 }
3773
3774 if (get_granularity && prange->granularity < granularity)
3775 granularity = prange->granularity;
3776
3777 node = next;
3778 }
3779 fill_values:
3780 mutex_unlock(&svms->lock);
3781
3782 for (i = 0; i < nattr; i++) {
3783 switch (attrs[i].type) {
3784 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3785 attrs[i].value = location;
3786 break;
3787 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3788 attrs[i].value = prefetch_loc;
3789 break;
3790 case KFD_IOCTL_SVM_ATTR_ACCESS:
3791 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3792 attrs[i].value);
3793 if (gpuidx < 0) {
3794 pr_debug("invalid gpuid %x\n", attrs[i].value);
3795 return -EINVAL;
3796 }
3797 if (test_bit(gpuidx, bitmap_access))
3798 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3799 else if (test_bit(gpuidx, bitmap_aip))
3800 attrs[i].type =
3801 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3802 else
3803 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3804 break;
3805 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3806 attrs[i].value = flags_and;
3807 break;
3808 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3809 attrs[i].value = ~flags_or;
3810 break;
3811 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3812 attrs[i].value = (uint32_t)granularity;
3813 break;
3814 }
3815 }
3816
3817 return 0;
3818 }
3819
kfd_criu_resume_svm(struct kfd_process * p)3820 int kfd_criu_resume_svm(struct kfd_process *p)
3821 {
3822 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3823 int nattr_common = 4, nattr_accessibility = 1;
3824 struct criu_svm_metadata *criu_svm_md = NULL;
3825 struct svm_range_list *svms = &p->svms;
3826 struct criu_svm_metadata *next = NULL;
3827 uint32_t set_flags = 0xffffffff;
3828 int i, j, num_attrs, ret = 0;
3829 uint64_t set_attr_size;
3830 struct mm_struct *mm;
3831
3832 if (list_empty(&svms->criu_svm_metadata_list)) {
3833 pr_debug("No SVM data from CRIU restore stage 2\n");
3834 return ret;
3835 }
3836
3837 mm = get_task_mm(p->lead_thread);
3838 if (!mm) {
3839 pr_err("failed to get mm for the target process\n");
3840 return -ESRCH;
3841 }
3842
3843 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3844
3845 i = j = 0;
3846 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3847 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3848 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3849
3850 for (j = 0; j < num_attrs; j++) {
3851 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3852 i, j, criu_svm_md->data.attrs[j].type,
3853 i, j, criu_svm_md->data.attrs[j].value);
3854 switch (criu_svm_md->data.attrs[j].type) {
3855 /* During Checkpoint operation, the query for
3856 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3857 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3858 * not used by the range which was checkpointed. Care
3859 * must be taken to not restore with an invalid value
3860 * otherwise the gpuidx value will be invalid and
3861 * set_attr would eventually fail so just replace those
3862 * with another dummy attribute such as
3863 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3864 */
3865 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3866 if (criu_svm_md->data.attrs[j].value ==
3867 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3868 criu_svm_md->data.attrs[j].type =
3869 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3870 criu_svm_md->data.attrs[j].value = 0;
3871 }
3872 break;
3873 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3874 set_flags = criu_svm_md->data.attrs[j].value;
3875 break;
3876 default:
3877 break;
3878 }
3879 }
3880
3881 /* CLR_FLAGS is not available via get_attr during checkpoint but
3882 * it needs to be inserted before restoring the ranges so
3883 * allocate extra space for it before calling set_attr
3884 */
3885 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3886 (num_attrs + 1);
3887 set_attr_new = krealloc(set_attr, set_attr_size,
3888 GFP_KERNEL);
3889 if (!set_attr_new) {
3890 ret = -ENOMEM;
3891 goto exit;
3892 }
3893 set_attr = set_attr_new;
3894
3895 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3896 sizeof(struct kfd_ioctl_svm_attribute));
3897 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3898 set_attr[num_attrs].value = ~set_flags;
3899
3900 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3901 criu_svm_md->data.size, num_attrs + 1,
3902 set_attr);
3903 if (ret) {
3904 pr_err("CRIU: failed to set range attributes\n");
3905 goto exit;
3906 }
3907
3908 i++;
3909 }
3910 exit:
3911 kfree(set_attr);
3912 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3913 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3914 criu_svm_md->data.start_addr);
3915 kfree(criu_svm_md);
3916 }
3917
3918 mmput(mm);
3919 return ret;
3920
3921 }
3922
kfd_criu_restore_svm(struct kfd_process * p,uint8_t __user * user_priv_ptr,uint64_t * priv_data_offset,uint64_t max_priv_data_size)3923 int kfd_criu_restore_svm(struct kfd_process *p,
3924 uint8_t __user *user_priv_ptr,
3925 uint64_t *priv_data_offset,
3926 uint64_t max_priv_data_size)
3927 {
3928 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3929 int nattr_common = 4, nattr_accessibility = 1;
3930 struct criu_svm_metadata *criu_svm_md = NULL;
3931 struct svm_range_list *svms = &p->svms;
3932 uint32_t num_devices;
3933 int ret = 0;
3934
3935 num_devices = p->n_pdds;
3936 /* Handle one SVM range object at a time, also the number of gpus are
3937 * assumed to be same on the restore node, checking must be done while
3938 * evaluating the topology earlier
3939 */
3940
3941 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3942 (nattr_common + nattr_accessibility * num_devices);
3943 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3944
3945 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3946 svm_attrs_size;
3947
3948 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3949 if (!criu_svm_md) {
3950 pr_err("failed to allocate memory to store svm metadata\n");
3951 return -ENOMEM;
3952 }
3953 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3954 ret = -EINVAL;
3955 goto exit;
3956 }
3957
3958 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3959 svm_priv_data_size);
3960 if (ret) {
3961 ret = -EFAULT;
3962 goto exit;
3963 }
3964 *priv_data_offset += svm_priv_data_size;
3965
3966 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3967
3968 return 0;
3969
3970
3971 exit:
3972 kfree(criu_svm_md);
3973 return ret;
3974 }
3975
svm_range_get_info(struct kfd_process * p,uint32_t * num_svm_ranges,uint64_t * svm_priv_data_size)3976 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3977 uint64_t *svm_priv_data_size)
3978 {
3979 uint64_t total_size, accessibility_size, common_attr_size;
3980 int nattr_common = 4, nattr_accessibility = 1;
3981 int num_devices = p->n_pdds;
3982 struct svm_range_list *svms;
3983 struct svm_range *prange;
3984 uint32_t count = 0;
3985
3986 *svm_priv_data_size = 0;
3987
3988 svms = &p->svms;
3989 if (!svms)
3990 return -EINVAL;
3991
3992 mutex_lock(&svms->lock);
3993 list_for_each_entry(prange, &svms->list, list) {
3994 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3995 prange, prange->start, prange->npages,
3996 prange->start + prange->npages - 1);
3997 count++;
3998 }
3999 mutex_unlock(&svms->lock);
4000
4001 *num_svm_ranges = count;
4002 /* Only the accessbility attributes need to be queried for all the gpus
4003 * individually, remaining ones are spanned across the entire process
4004 * regardless of the various gpu nodes. Of the remaining attributes,
4005 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
4006 *
4007 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
4008 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
4009 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
4010 * KFD_IOCTL_SVM_ATTR_GRANULARITY
4011 *
4012 * ** ACCESSBILITY ATTRIBUTES **
4013 * (Considered as one, type is altered during query, value is gpuid)
4014 * KFD_IOCTL_SVM_ATTR_ACCESS
4015 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
4016 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
4017 */
4018 if (*num_svm_ranges > 0) {
4019 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4020 nattr_common;
4021 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
4022 nattr_accessibility * num_devices;
4023
4024 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
4025 common_attr_size + accessibility_size;
4026
4027 *svm_priv_data_size = *num_svm_ranges * total_size;
4028 }
4029
4030 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
4031 *svm_priv_data_size);
4032 return 0;
4033 }
4034
kfd_criu_checkpoint_svm(struct kfd_process * p,uint8_t __user * user_priv_data,uint64_t * priv_data_offset)4035 int kfd_criu_checkpoint_svm(struct kfd_process *p,
4036 uint8_t __user *user_priv_data,
4037 uint64_t *priv_data_offset)
4038 {
4039 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
4040 struct kfd_ioctl_svm_attribute *query_attr = NULL;
4041 uint64_t svm_priv_data_size, query_attr_size = 0;
4042 int index, nattr_common = 4, ret = 0;
4043 struct svm_range_list *svms;
4044 int num_devices = p->n_pdds;
4045 struct svm_range *prange;
4046 struct mm_struct *mm;
4047
4048 svms = &p->svms;
4049 if (!svms)
4050 return -EINVAL;
4051
4052 mm = get_task_mm(p->lead_thread);
4053 if (!mm) {
4054 pr_err("failed to get mm for the target process\n");
4055 return -ESRCH;
4056 }
4057
4058 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
4059 (nattr_common + num_devices);
4060
4061 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
4062 if (!query_attr) {
4063 ret = -ENOMEM;
4064 goto exit;
4065 }
4066
4067 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
4068 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
4069 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
4070 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
4071
4072 for (index = 0; index < num_devices; index++) {
4073 struct kfd_process_device *pdd = p->pdds[index];
4074
4075 query_attr[index + nattr_common].type =
4076 KFD_IOCTL_SVM_ATTR_ACCESS;
4077 query_attr[index + nattr_common].value = pdd->user_gpu_id;
4078 }
4079
4080 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
4081
4082 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
4083 if (!svm_priv) {
4084 ret = -ENOMEM;
4085 goto exit_query;
4086 }
4087
4088 index = 0;
4089 list_for_each_entry(prange, &svms->list, list) {
4090
4091 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
4092 svm_priv->start_addr = prange->start;
4093 svm_priv->size = prange->npages;
4094 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
4095 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4096 prange, prange->start, prange->npages,
4097 prange->start + prange->npages - 1,
4098 prange->npages * PAGE_SIZE);
4099
4100 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4101 svm_priv->size,
4102 (nattr_common + num_devices),
4103 svm_priv->attrs);
4104 if (ret) {
4105 pr_err("CRIU: failed to obtain range attributes\n");
4106 goto exit_priv;
4107 }
4108
4109 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
4110 svm_priv_data_size)) {
4111 pr_err("Failed to copy svm priv to user\n");
4112 ret = -EFAULT;
4113 goto exit_priv;
4114 }
4115
4116 *priv_data_offset += svm_priv_data_size;
4117
4118 }
4119
4120
4121 exit_priv:
4122 kfree(svm_priv);
4123 exit_query:
4124 kfree(query_attr);
4125 exit:
4126 mmput(mm);
4127 return ret;
4128 }
4129
4130 int
svm_ioctl(struct kfd_process * p,enum kfd_ioctl_svm_op op,uint64_t start,uint64_t size,uint32_t nattrs,struct kfd_ioctl_svm_attribute * attrs)4131 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
4132 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
4133 {
4134 struct mm_struct *mm = current->mm;
4135 int r;
4136
4137 start >>= PAGE_SHIFT;
4138 size >>= PAGE_SHIFT;
4139
4140 switch (op) {
4141 case KFD_IOCTL_SVM_OP_SET_ATTR:
4142 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4143 break;
4144 case KFD_IOCTL_SVM_OP_GET_ATTR:
4145 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
4146 break;
4147 default:
4148 r = EINVAL;
4149 break;
4150 }
4151
4152 return r;
4153 }
4154