1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/slab.h>
29 #include <linux/amd-iommu.h>
30 #include <linux/notifier.h>
31 #include <linux/compat.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include <linux/pm_runtime.h>
35 #include "amdgpu_amdkfd.h"
36 #include "amdgpu.h"
37 
38 struct mm_struct;
39 
40 #include "kfd_priv.h"
41 #include "kfd_device_queue_manager.h"
42 #include "kfd_dbgmgr.h"
43 #include "kfd_iommu.h"
44 
45 /*
46  * List of struct kfd_process (field kfd_process).
47  * Unique/indexed by mm_struct*
48  */
49 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
50 static DEFINE_MUTEX(kfd_processes_mutex);
51 
52 DEFINE_SRCU(kfd_processes_srcu);
53 
54 /* For process termination handling */
55 static struct workqueue_struct *kfd_process_wq;
56 
57 /* Ordered, single-threaded workqueue for restoring evicted
58  * processes. Restoring multiple processes concurrently under memory
59  * pressure can lead to processes blocking each other from validating
60  * their BOs and result in a live-lock situation where processes
61  * remain evicted indefinitely.
62  */
63 static struct workqueue_struct *kfd_restore_wq;
64 
65 static struct kfd_process *find_process(const struct task_struct *thread);
66 static void kfd_process_ref_release(struct kref *ref);
67 static struct kfd_process *create_process(const struct task_struct *thread);
68 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
69 
70 static void evict_process_worker(struct work_struct *work);
71 static void restore_process_worker(struct work_struct *work);
72 
73 struct kfd_procfs_tree {
74 	struct kobject *kobj;
75 };
76 
77 static struct kfd_procfs_tree procfs;
78 
79 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
80 			       char *buffer)
81 {
82 	if (strcmp(attr->name, "pasid") == 0) {
83 		struct kfd_process *p = container_of(attr, struct kfd_process,
84 						     attr_pasid);
85 
86 		return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
87 	} else if (strncmp(attr->name, "vram_", 5) == 0) {
88 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
89 							      attr_vram);
90 		if (pdd)
91 			return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
92 	} else {
93 		pr_err("Invalid attribute");
94 		return -EINVAL;
95 	}
96 
97 	return 0;
98 }
99 
100 static void kfd_procfs_kobj_release(struct kobject *kobj)
101 {
102 	kfree(kobj);
103 }
104 
105 static const struct sysfs_ops kfd_procfs_ops = {
106 	.show = kfd_procfs_show,
107 };
108 
109 static struct kobj_type procfs_type = {
110 	.release = kfd_procfs_kobj_release,
111 	.sysfs_ops = &kfd_procfs_ops,
112 };
113 
114 void kfd_procfs_init(void)
115 {
116 	int ret = 0;
117 
118 	procfs.kobj = kfd_alloc_struct(procfs.kobj);
119 	if (!procfs.kobj)
120 		return;
121 
122 	ret = kobject_init_and_add(procfs.kobj, &procfs_type,
123 				   &kfd_device->kobj, "proc");
124 	if (ret) {
125 		pr_warn("Could not create procfs proc folder");
126 		/* If we fail to create the procfs, clean up */
127 		kfd_procfs_shutdown();
128 	}
129 }
130 
131 void kfd_procfs_shutdown(void)
132 {
133 	if (procfs.kobj) {
134 		kobject_del(procfs.kobj);
135 		kobject_put(procfs.kobj);
136 		procfs.kobj = NULL;
137 	}
138 }
139 
140 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
141 				     struct attribute *attr, char *buffer)
142 {
143 	struct queue *q = container_of(kobj, struct queue, kobj);
144 
145 	if (!strcmp(attr->name, "size"))
146 		return snprintf(buffer, PAGE_SIZE, "%llu",
147 				q->properties.queue_size);
148 	else if (!strcmp(attr->name, "type"))
149 		return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
150 	else if (!strcmp(attr->name, "gpuid"))
151 		return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
152 	else
153 		pr_err("Invalid attribute");
154 
155 	return 0;
156 }
157 
158 static struct attribute attr_queue_size = {
159 	.name = "size",
160 	.mode = KFD_SYSFS_FILE_MODE
161 };
162 
163 static struct attribute attr_queue_type = {
164 	.name = "type",
165 	.mode = KFD_SYSFS_FILE_MODE
166 };
167 
168 static struct attribute attr_queue_gpuid = {
169 	.name = "gpuid",
170 	.mode = KFD_SYSFS_FILE_MODE
171 };
172 
173 static struct attribute *procfs_queue_attrs[] = {
174 	&attr_queue_size,
175 	&attr_queue_type,
176 	&attr_queue_gpuid,
177 	NULL
178 };
179 
180 static const struct sysfs_ops procfs_queue_ops = {
181 	.show = kfd_procfs_queue_show,
182 };
183 
184 static struct kobj_type procfs_queue_type = {
185 	.sysfs_ops = &procfs_queue_ops,
186 	.default_attrs = procfs_queue_attrs,
187 };
188 
189 int kfd_procfs_add_queue(struct queue *q)
190 {
191 	struct kfd_process *proc;
192 	int ret;
193 
194 	if (!q || !q->process)
195 		return -EINVAL;
196 	proc = q->process;
197 
198 	/* Create proc/<pid>/queues/<queue id> folder */
199 	if (!proc->kobj_queues)
200 		return -EFAULT;
201 	ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
202 			proc->kobj_queues, "%u", q->properties.queue_id);
203 	if (ret < 0) {
204 		pr_warn("Creating proc/<pid>/queues/%u failed",
205 			q->properties.queue_id);
206 		kobject_put(&q->kobj);
207 		return ret;
208 	}
209 
210 	return 0;
211 }
212 
213 int kfd_procfs_add_vram_usage(struct kfd_process *p)
214 {
215 	int ret = 0;
216 	struct kfd_process_device *pdd;
217 
218 	if (!p)
219 		return -EINVAL;
220 
221 	if (!p->kobj)
222 		return -EFAULT;
223 
224 	/* Create proc/<pid>/vram_<gpuid> file for each GPU */
225 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
226 		snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
227 			 pdd->dev->id);
228 		pdd->attr_vram.name = pdd->vram_filename;
229 		pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
230 		sysfs_attr_init(&pdd->attr_vram);
231 		ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
232 		if (ret)
233 			pr_warn("Creating vram usage for gpu id %d failed",
234 				(int)pdd->dev->id);
235 	}
236 
237 	return ret;
238 }
239 
240 
241 void kfd_procfs_del_queue(struct queue *q)
242 {
243 	if (!q)
244 		return;
245 
246 	kobject_del(&q->kobj);
247 	kobject_put(&q->kobj);
248 }
249 
250 int kfd_process_create_wq(void)
251 {
252 	if (!kfd_process_wq)
253 		kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
254 	if (!kfd_restore_wq)
255 		kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
256 
257 	if (!kfd_process_wq || !kfd_restore_wq) {
258 		kfd_process_destroy_wq();
259 		return -ENOMEM;
260 	}
261 
262 	return 0;
263 }
264 
265 void kfd_process_destroy_wq(void)
266 {
267 	if (kfd_process_wq) {
268 		destroy_workqueue(kfd_process_wq);
269 		kfd_process_wq = NULL;
270 	}
271 	if (kfd_restore_wq) {
272 		destroy_workqueue(kfd_restore_wq);
273 		kfd_restore_wq = NULL;
274 	}
275 }
276 
277 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
278 			struct kfd_process_device *pdd)
279 {
280 	struct kfd_dev *dev = pdd->dev;
281 
282 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
283 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
284 }
285 
286 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
287  *	This function should be only called right after the process
288  *	is created and when kfd_processes_mutex is still being held
289  *	to avoid concurrency. Because of that exclusiveness, we do
290  *	not need to take p->mutex.
291  */
292 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
293 				   uint64_t gpu_va, uint32_t size,
294 				   uint32_t flags, void **kptr)
295 {
296 	struct kfd_dev *kdev = pdd->dev;
297 	struct kgd_mem *mem = NULL;
298 	int handle;
299 	int err;
300 
301 	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
302 						 pdd->vm, &mem, NULL, flags);
303 	if (err)
304 		goto err_alloc_mem;
305 
306 	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
307 	if (err)
308 		goto err_map_mem;
309 
310 	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
311 	if (err) {
312 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
313 		goto sync_memory_failed;
314 	}
315 
316 	/* Create an obj handle so kfd_process_device_remove_obj_handle
317 	 * will take care of the bo removal when the process finishes.
318 	 * We do not need to take p->mutex, because the process is just
319 	 * created and the ioctls have not had the chance to run.
320 	 */
321 	handle = kfd_process_device_create_obj_handle(pdd, mem);
322 
323 	if (handle < 0) {
324 		err = handle;
325 		goto free_gpuvm;
326 	}
327 
328 	if (kptr) {
329 		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
330 				(struct kgd_mem *)mem, kptr, NULL);
331 		if (err) {
332 			pr_debug("Map GTT BO to kernel failed\n");
333 			goto free_obj_handle;
334 		}
335 	}
336 
337 	return err;
338 
339 free_obj_handle:
340 	kfd_process_device_remove_obj_handle(pdd, handle);
341 free_gpuvm:
342 sync_memory_failed:
343 	kfd_process_free_gpuvm(mem, pdd);
344 	return err;
345 
346 err_map_mem:
347 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
348 err_alloc_mem:
349 	*kptr = NULL;
350 	return err;
351 }
352 
353 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
354  *	process for IB usage The memory reserved is for KFD to submit
355  *	IB to AMDGPU from kernel.  If the memory is reserved
356  *	successfully, ib_kaddr will have the CPU/kernel
357  *	address. Check ib_kaddr before accessing the memory.
358  */
359 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
360 {
361 	struct qcm_process_device *qpd = &pdd->qpd;
362 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
363 			KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
364 			KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
365 			KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
366 	void *kaddr;
367 	int ret;
368 
369 	if (qpd->ib_kaddr || !qpd->ib_base)
370 		return 0;
371 
372 	/* ib_base is only set for dGPU */
373 	ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
374 				      &kaddr);
375 	if (ret)
376 		return ret;
377 
378 	qpd->ib_kaddr = kaddr;
379 
380 	return 0;
381 }
382 
383 struct kfd_process *kfd_create_process(struct file *filep)
384 {
385 	struct kfd_process *process;
386 	struct task_struct *thread = current;
387 	int ret;
388 
389 	if (!thread->mm)
390 		return ERR_PTR(-EINVAL);
391 
392 	/* Only the pthreads threading model is supported. */
393 	if (thread->group_leader->mm != thread->mm)
394 		return ERR_PTR(-EINVAL);
395 
396 	/*
397 	 * take kfd processes mutex before starting of process creation
398 	 * so there won't be a case where two threads of the same process
399 	 * create two kfd_process structures
400 	 */
401 	mutex_lock(&kfd_processes_mutex);
402 
403 	/* A prior open of /dev/kfd could have already created the process. */
404 	process = find_process(thread);
405 	if (process) {
406 		pr_debug("Process already found\n");
407 	} else {
408 		process = create_process(thread);
409 		if (IS_ERR(process))
410 			goto out;
411 
412 		ret = kfd_process_init_cwsr_apu(process, filep);
413 		if (ret) {
414 			process = ERR_PTR(ret);
415 			goto out;
416 		}
417 
418 		if (!procfs.kobj)
419 			goto out;
420 
421 		process->kobj = kfd_alloc_struct(process->kobj);
422 		if (!process->kobj) {
423 			pr_warn("Creating procfs kobject failed");
424 			goto out;
425 		}
426 		ret = kobject_init_and_add(process->kobj, &procfs_type,
427 					   procfs.kobj, "%d",
428 					   (int)process->lead_thread->pid);
429 		if (ret) {
430 			pr_warn("Creating procfs pid directory failed");
431 			kobject_put(process->kobj);
432 			goto out;
433 		}
434 
435 		process->attr_pasid.name = "pasid";
436 		process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
437 		sysfs_attr_init(&process->attr_pasid);
438 		ret = sysfs_create_file(process->kobj, &process->attr_pasid);
439 		if (ret)
440 			pr_warn("Creating pasid for pid %d failed",
441 					(int)process->lead_thread->pid);
442 
443 		process->kobj_queues = kobject_create_and_add("queues",
444 							process->kobj);
445 		if (!process->kobj_queues)
446 			pr_warn("Creating KFD proc/queues folder failed");
447 
448 		ret = kfd_procfs_add_vram_usage(process);
449 		if (ret)
450 			pr_warn("Creating vram usage file for pid %d failed",
451 				(int)process->lead_thread->pid);
452 	}
453 out:
454 	if (!IS_ERR(process))
455 		kref_get(&process->ref);
456 	mutex_unlock(&kfd_processes_mutex);
457 
458 	return process;
459 }
460 
461 struct kfd_process *kfd_get_process(const struct task_struct *thread)
462 {
463 	struct kfd_process *process;
464 
465 	if (!thread->mm)
466 		return ERR_PTR(-EINVAL);
467 
468 	/* Only the pthreads threading model is supported. */
469 	if (thread->group_leader->mm != thread->mm)
470 		return ERR_PTR(-EINVAL);
471 
472 	process = find_process(thread);
473 	if (!process)
474 		return ERR_PTR(-EINVAL);
475 
476 	return process;
477 }
478 
479 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
480 {
481 	struct kfd_process *process;
482 
483 	hash_for_each_possible_rcu(kfd_processes_table, process,
484 					kfd_processes, (uintptr_t)mm)
485 		if (process->mm == mm)
486 			return process;
487 
488 	return NULL;
489 }
490 
491 static struct kfd_process *find_process(const struct task_struct *thread)
492 {
493 	struct kfd_process *p;
494 	int idx;
495 
496 	idx = srcu_read_lock(&kfd_processes_srcu);
497 	p = find_process_by_mm(thread->mm);
498 	srcu_read_unlock(&kfd_processes_srcu, idx);
499 
500 	return p;
501 }
502 
503 void kfd_unref_process(struct kfd_process *p)
504 {
505 	kref_put(&p->ref, kfd_process_ref_release);
506 }
507 
508 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
509 {
510 	struct kfd_process *p = pdd->process;
511 	void *mem;
512 	int id;
513 
514 	/*
515 	 * Remove all handles from idr and release appropriate
516 	 * local memory object
517 	 */
518 	idr_for_each_entry(&pdd->alloc_idr, mem, id) {
519 		struct kfd_process_device *peer_pdd;
520 
521 		list_for_each_entry(peer_pdd, &p->per_device_data,
522 				    per_device_list) {
523 			if (!peer_pdd->vm)
524 				continue;
525 			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
526 				peer_pdd->dev->kgd, mem, peer_pdd->vm);
527 		}
528 
529 		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
530 		kfd_process_device_remove_obj_handle(pdd, id);
531 	}
532 }
533 
534 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
535 {
536 	struct kfd_process_device *pdd;
537 
538 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
539 		kfd_process_device_free_bos(pdd);
540 }
541 
542 static void kfd_process_destroy_pdds(struct kfd_process *p)
543 {
544 	struct kfd_process_device *pdd, *temp;
545 
546 	list_for_each_entry_safe(pdd, temp, &p->per_device_data,
547 				 per_device_list) {
548 		pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
549 				pdd->dev->id, p->pasid);
550 
551 		if (pdd->drm_file) {
552 			amdgpu_amdkfd_gpuvm_release_process_vm(
553 					pdd->dev->kgd, pdd->vm);
554 			fput(pdd->drm_file);
555 		}
556 		else if (pdd->vm)
557 			amdgpu_amdkfd_gpuvm_destroy_process_vm(
558 				pdd->dev->kgd, pdd->vm);
559 
560 		list_del(&pdd->per_device_list);
561 
562 		if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
563 			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
564 				get_order(KFD_CWSR_TBA_TMA_SIZE));
565 
566 		kfree(pdd->qpd.doorbell_bitmap);
567 		idr_destroy(&pdd->alloc_idr);
568 
569 		/*
570 		 * before destroying pdd, make sure to report availability
571 		 * for auto suspend
572 		 */
573 		if (pdd->runtime_inuse) {
574 			pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
575 			pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
576 			pdd->runtime_inuse = false;
577 		}
578 
579 		kfree(pdd);
580 	}
581 }
582 
583 /* No process locking is needed in this function, because the process
584  * is not findable any more. We must assume that no other thread is
585  * using it any more, otherwise we couldn't safely free the process
586  * structure in the end.
587  */
588 static void kfd_process_wq_release(struct work_struct *work)
589 {
590 	struct kfd_process *p = container_of(work, struct kfd_process,
591 					     release_work);
592 	struct kfd_process_device *pdd;
593 
594 	/* Remove the procfs files */
595 	if (p->kobj) {
596 		sysfs_remove_file(p->kobj, &p->attr_pasid);
597 		kobject_del(p->kobj_queues);
598 		kobject_put(p->kobj_queues);
599 		p->kobj_queues = NULL;
600 
601 		list_for_each_entry(pdd, &p->per_device_data, per_device_list)
602 			sysfs_remove_file(p->kobj, &pdd->attr_vram);
603 
604 		kobject_del(p->kobj);
605 		kobject_put(p->kobj);
606 		p->kobj = NULL;
607 	}
608 
609 	kfd_iommu_unbind_process(p);
610 
611 	kfd_process_free_outstanding_kfd_bos(p);
612 
613 	kfd_process_destroy_pdds(p);
614 	dma_fence_put(p->ef);
615 
616 	kfd_event_free_process(p);
617 
618 	kfd_pasid_free(p->pasid);
619 	kfd_free_process_doorbells(p);
620 
621 	mutex_destroy(&p->mutex);
622 
623 	put_task_struct(p->lead_thread);
624 
625 	kfree(p);
626 }
627 
628 static void kfd_process_ref_release(struct kref *ref)
629 {
630 	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
631 
632 	INIT_WORK(&p->release_work, kfd_process_wq_release);
633 	queue_work(kfd_process_wq, &p->release_work);
634 }
635 
636 static void kfd_process_free_notifier(struct mmu_notifier *mn)
637 {
638 	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
639 }
640 
641 static void kfd_process_notifier_release(struct mmu_notifier *mn,
642 					struct mm_struct *mm)
643 {
644 	struct kfd_process *p;
645 	struct kfd_process_device *pdd = NULL;
646 
647 	/*
648 	 * The kfd_process structure can not be free because the
649 	 * mmu_notifier srcu is read locked
650 	 */
651 	p = container_of(mn, struct kfd_process, mmu_notifier);
652 	if (WARN_ON(p->mm != mm))
653 		return;
654 
655 	mutex_lock(&kfd_processes_mutex);
656 	hash_del_rcu(&p->kfd_processes);
657 	mutex_unlock(&kfd_processes_mutex);
658 	synchronize_srcu(&kfd_processes_srcu);
659 
660 	cancel_delayed_work_sync(&p->eviction_work);
661 	cancel_delayed_work_sync(&p->restore_work);
662 
663 	mutex_lock(&p->mutex);
664 
665 	/* Iterate over all process device data structures and if the
666 	 * pdd is in debug mode, we should first force unregistration,
667 	 * then we will be able to destroy the queues
668 	 */
669 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
670 		struct kfd_dev *dev = pdd->dev;
671 
672 		mutex_lock(kfd_get_dbgmgr_mutex());
673 		if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
674 			if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
675 				kfd_dbgmgr_destroy(dev->dbgmgr);
676 				dev->dbgmgr = NULL;
677 			}
678 		}
679 		mutex_unlock(kfd_get_dbgmgr_mutex());
680 	}
681 
682 	kfd_process_dequeue_from_all_devices(p);
683 	pqm_uninit(&p->pqm);
684 
685 	/* Indicate to other users that MM is no longer valid */
686 	p->mm = NULL;
687 	/* Signal the eviction fence after user mode queues are
688 	 * destroyed. This allows any BOs to be freed without
689 	 * triggering pointless evictions or waiting for fences.
690 	 */
691 	dma_fence_signal(p->ef);
692 
693 	mutex_unlock(&p->mutex);
694 
695 	mmu_notifier_put(&p->mmu_notifier);
696 }
697 
698 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
699 	.release = kfd_process_notifier_release,
700 	.free_notifier = kfd_process_free_notifier,
701 };
702 
703 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
704 {
705 	unsigned long  offset;
706 	struct kfd_process_device *pdd;
707 
708 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
709 		struct kfd_dev *dev = pdd->dev;
710 		struct qcm_process_device *qpd = &pdd->qpd;
711 
712 		if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
713 			continue;
714 
715 		offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
716 		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
717 			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
718 			MAP_SHARED, offset);
719 
720 		if (IS_ERR_VALUE(qpd->tba_addr)) {
721 			int err = qpd->tba_addr;
722 
723 			pr_err("Failure to set tba address. error %d.\n", err);
724 			qpd->tba_addr = 0;
725 			qpd->cwsr_kaddr = NULL;
726 			return err;
727 		}
728 
729 		memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
730 
731 		qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
732 		pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
733 			qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
734 	}
735 
736 	return 0;
737 }
738 
739 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
740 {
741 	struct kfd_dev *dev = pdd->dev;
742 	struct qcm_process_device *qpd = &pdd->qpd;
743 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
744 			| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
745 			| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
746 	void *kaddr;
747 	int ret;
748 
749 	if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
750 		return 0;
751 
752 	/* cwsr_base is only set for dGPU */
753 	ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
754 				      KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
755 	if (ret)
756 		return ret;
757 
758 	qpd->cwsr_kaddr = kaddr;
759 	qpd->tba_addr = qpd->cwsr_base;
760 
761 	memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
762 
763 	qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
764 	pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
765 		 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
766 
767 	return 0;
768 }
769 
770 /*
771  * On return the kfd_process is fully operational and will be freed when the
772  * mm is released
773  */
774 static struct kfd_process *create_process(const struct task_struct *thread)
775 {
776 	struct kfd_process *process;
777 	int err = -ENOMEM;
778 
779 	process = kzalloc(sizeof(*process), GFP_KERNEL);
780 	if (!process)
781 		goto err_alloc_process;
782 
783 	kref_init(&process->ref);
784 	mutex_init(&process->mutex);
785 	process->mm = thread->mm;
786 	process->lead_thread = thread->group_leader;
787 	INIT_LIST_HEAD(&process->per_device_data);
788 	INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
789 	INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
790 	process->last_restore_timestamp = get_jiffies_64();
791 	kfd_event_init_process(process);
792 	process->is_32bit_user_mode = in_compat_syscall();
793 
794 	process->pasid = kfd_pasid_alloc();
795 	if (process->pasid == 0)
796 		goto err_alloc_pasid;
797 
798 	if (kfd_alloc_process_doorbells(process) < 0)
799 		goto err_alloc_doorbells;
800 
801 	err = pqm_init(&process->pqm, process);
802 	if (err != 0)
803 		goto err_process_pqm_init;
804 
805 	/* init process apertures*/
806 	err = kfd_init_apertures(process);
807 	if (err != 0)
808 		goto err_init_apertures;
809 
810 	/* Must be last, have to use release destruction after this */
811 	process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
812 	err = mmu_notifier_register(&process->mmu_notifier, process->mm);
813 	if (err)
814 		goto err_register_notifier;
815 
816 	get_task_struct(process->lead_thread);
817 	hash_add_rcu(kfd_processes_table, &process->kfd_processes,
818 			(uintptr_t)process->mm);
819 
820 	return process;
821 
822 err_register_notifier:
823 	kfd_process_free_outstanding_kfd_bos(process);
824 	kfd_process_destroy_pdds(process);
825 err_init_apertures:
826 	pqm_uninit(&process->pqm);
827 err_process_pqm_init:
828 	kfd_free_process_doorbells(process);
829 err_alloc_doorbells:
830 	kfd_pasid_free(process->pasid);
831 err_alloc_pasid:
832 	mutex_destroy(&process->mutex);
833 	kfree(process);
834 err_alloc_process:
835 	return ERR_PTR(err);
836 }
837 
838 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
839 			struct kfd_dev *dev)
840 {
841 	unsigned int i;
842 	int range_start = dev->shared_resources.non_cp_doorbells_start;
843 	int range_end = dev->shared_resources.non_cp_doorbells_end;
844 
845 	if (!KFD_IS_SOC15(dev->device_info->asic_family))
846 		return 0;
847 
848 	qpd->doorbell_bitmap =
849 		kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
850 				     BITS_PER_BYTE), GFP_KERNEL);
851 	if (!qpd->doorbell_bitmap)
852 		return -ENOMEM;
853 
854 	/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
855 	pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
856 	pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
857 			range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
858 			range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
859 
860 	for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
861 		if (i >= range_start && i <= range_end) {
862 			set_bit(i, qpd->doorbell_bitmap);
863 			set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
864 				qpd->doorbell_bitmap);
865 		}
866 	}
867 
868 	return 0;
869 }
870 
871 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
872 							struct kfd_process *p)
873 {
874 	struct kfd_process_device *pdd = NULL;
875 
876 	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
877 		if (pdd->dev == dev)
878 			return pdd;
879 
880 	return NULL;
881 }
882 
883 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
884 							struct kfd_process *p)
885 {
886 	struct kfd_process_device *pdd = NULL;
887 
888 	pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
889 	if (!pdd)
890 		return NULL;
891 
892 	if (init_doorbell_bitmap(&pdd->qpd, dev)) {
893 		pr_err("Failed to init doorbell for process\n");
894 		kfree(pdd);
895 		return NULL;
896 	}
897 
898 	pdd->dev = dev;
899 	INIT_LIST_HEAD(&pdd->qpd.queues_list);
900 	INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
901 	pdd->qpd.dqm = dev->dqm;
902 	pdd->qpd.pqm = &p->pqm;
903 	pdd->qpd.evicted = 0;
904 	pdd->qpd.mapped_gws_queue = false;
905 	pdd->process = p;
906 	pdd->bound = PDD_UNBOUND;
907 	pdd->already_dequeued = false;
908 	pdd->runtime_inuse = false;
909 	pdd->vram_usage = 0;
910 	list_add(&pdd->per_device_list, &p->per_device_data);
911 
912 	/* Init idr used for memory handle translation */
913 	idr_init(&pdd->alloc_idr);
914 
915 	return pdd;
916 }
917 
918 /**
919  * kfd_process_device_init_vm - Initialize a VM for a process-device
920  *
921  * @pdd: The process-device
922  * @drm_file: Optional pointer to a DRM file descriptor
923  *
924  * If @drm_file is specified, it will be used to acquire the VM from
925  * that file descriptor. If successful, the @pdd takes ownership of
926  * the file descriptor.
927  *
928  * If @drm_file is NULL, a new VM is created.
929  *
930  * Returns 0 on success, -errno on failure.
931  */
932 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
933 			       struct file *drm_file)
934 {
935 	struct kfd_process *p;
936 	struct kfd_dev *dev;
937 	int ret;
938 
939 	if (pdd->vm)
940 		return drm_file ? -EBUSY : 0;
941 
942 	p = pdd->process;
943 	dev = pdd->dev;
944 
945 	if (drm_file)
946 		ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
947 			dev->kgd, drm_file, p->pasid,
948 			&pdd->vm, &p->kgd_process_info, &p->ef);
949 	else
950 		ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
951 			&pdd->vm, &p->kgd_process_info, &p->ef);
952 	if (ret) {
953 		pr_err("Failed to create process VM object\n");
954 		return ret;
955 	}
956 
957 	amdgpu_vm_set_task_info(pdd->vm);
958 
959 	ret = kfd_process_device_reserve_ib_mem(pdd);
960 	if (ret)
961 		goto err_reserve_ib_mem;
962 	ret = kfd_process_device_init_cwsr_dgpu(pdd);
963 	if (ret)
964 		goto err_init_cwsr;
965 
966 	pdd->drm_file = drm_file;
967 
968 	return 0;
969 
970 err_init_cwsr:
971 err_reserve_ib_mem:
972 	kfd_process_device_free_bos(pdd);
973 	if (!drm_file)
974 		amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
975 	pdd->vm = NULL;
976 
977 	return ret;
978 }
979 
980 /*
981  * Direct the IOMMU to bind the process (specifically the pasid->mm)
982  * to the device.
983  * Unbinding occurs when the process dies or the device is removed.
984  *
985  * Assumes that the process lock is held.
986  */
987 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
988 							struct kfd_process *p)
989 {
990 	struct kfd_process_device *pdd;
991 	int err;
992 
993 	pdd = kfd_get_process_device_data(dev, p);
994 	if (!pdd) {
995 		pr_err("Process device data doesn't exist\n");
996 		return ERR_PTR(-ENOMEM);
997 	}
998 
999 	/*
1000 	 * signal runtime-pm system to auto resume and prevent
1001 	 * further runtime suspend once device pdd is created until
1002 	 * pdd is destroyed.
1003 	 */
1004 	if (!pdd->runtime_inuse) {
1005 		err = pm_runtime_get_sync(dev->ddev->dev);
1006 		if (err < 0)
1007 			return ERR_PTR(err);
1008 	}
1009 
1010 	err = kfd_iommu_bind_process_to_device(pdd);
1011 	if (err)
1012 		goto out;
1013 
1014 	err = kfd_process_device_init_vm(pdd, NULL);
1015 	if (err)
1016 		goto out;
1017 
1018 	/*
1019 	 * make sure that runtime_usage counter is incremented just once
1020 	 * per pdd
1021 	 */
1022 	pdd->runtime_inuse = true;
1023 
1024 	return pdd;
1025 
1026 out:
1027 	/* balance runpm reference count and exit with error */
1028 	if (!pdd->runtime_inuse) {
1029 		pm_runtime_mark_last_busy(dev->ddev->dev);
1030 		pm_runtime_put_autosuspend(dev->ddev->dev);
1031 	}
1032 
1033 	return ERR_PTR(err);
1034 }
1035 
1036 struct kfd_process_device *kfd_get_first_process_device_data(
1037 						struct kfd_process *p)
1038 {
1039 	return list_first_entry(&p->per_device_data,
1040 				struct kfd_process_device,
1041 				per_device_list);
1042 }
1043 
1044 struct kfd_process_device *kfd_get_next_process_device_data(
1045 						struct kfd_process *p,
1046 						struct kfd_process_device *pdd)
1047 {
1048 	if (list_is_last(&pdd->per_device_list, &p->per_device_data))
1049 		return NULL;
1050 	return list_next_entry(pdd, per_device_list);
1051 }
1052 
1053 bool kfd_has_process_device_data(struct kfd_process *p)
1054 {
1055 	return !(list_empty(&p->per_device_data));
1056 }
1057 
1058 /* Create specific handle mapped to mem from process local memory idr
1059  * Assumes that the process lock is held.
1060  */
1061 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1062 					void *mem)
1063 {
1064 	return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1065 }
1066 
1067 /* Translate specific handle from process local memory idr
1068  * Assumes that the process lock is held.
1069  */
1070 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1071 					int handle)
1072 {
1073 	if (handle < 0)
1074 		return NULL;
1075 
1076 	return idr_find(&pdd->alloc_idr, handle);
1077 }
1078 
1079 /* Remove specific handle from process local memory idr
1080  * Assumes that the process lock is held.
1081  */
1082 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1083 					int handle)
1084 {
1085 	if (handle >= 0)
1086 		idr_remove(&pdd->alloc_idr, handle);
1087 }
1088 
1089 /* This increments the process->ref counter. */
1090 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
1091 {
1092 	struct kfd_process *p, *ret_p = NULL;
1093 	unsigned int temp;
1094 
1095 	int idx = srcu_read_lock(&kfd_processes_srcu);
1096 
1097 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1098 		if (p->pasid == pasid) {
1099 			kref_get(&p->ref);
1100 			ret_p = p;
1101 			break;
1102 		}
1103 	}
1104 
1105 	srcu_read_unlock(&kfd_processes_srcu, idx);
1106 
1107 	return ret_p;
1108 }
1109 
1110 /* This increments the process->ref counter. */
1111 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1112 {
1113 	struct kfd_process *p;
1114 
1115 	int idx = srcu_read_lock(&kfd_processes_srcu);
1116 
1117 	p = find_process_by_mm(mm);
1118 	if (p)
1119 		kref_get(&p->ref);
1120 
1121 	srcu_read_unlock(&kfd_processes_srcu, idx);
1122 
1123 	return p;
1124 }
1125 
1126 /* kfd_process_evict_queues - Evict all user queues of a process
1127  *
1128  * Eviction is reference-counted per process-device. This means multiple
1129  * evictions from different sources can be nested safely.
1130  */
1131 int kfd_process_evict_queues(struct kfd_process *p)
1132 {
1133 	struct kfd_process_device *pdd;
1134 	int r = 0;
1135 	unsigned int n_evicted = 0;
1136 
1137 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1138 		r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1139 							    &pdd->qpd);
1140 		if (r) {
1141 			pr_err("Failed to evict process queues\n");
1142 			goto fail;
1143 		}
1144 		n_evicted++;
1145 	}
1146 
1147 	return r;
1148 
1149 fail:
1150 	/* To keep state consistent, roll back partial eviction by
1151 	 * restoring queues
1152 	 */
1153 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1154 		if (n_evicted == 0)
1155 			break;
1156 		if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1157 							      &pdd->qpd))
1158 			pr_err("Failed to restore queues\n");
1159 
1160 		n_evicted--;
1161 	}
1162 
1163 	return r;
1164 }
1165 
1166 /* kfd_process_restore_queues - Restore all user queues of a process */
1167 int kfd_process_restore_queues(struct kfd_process *p)
1168 {
1169 	struct kfd_process_device *pdd;
1170 	int r, ret = 0;
1171 
1172 	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1173 		r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1174 							      &pdd->qpd);
1175 		if (r) {
1176 			pr_err("Failed to restore process queues\n");
1177 			if (!ret)
1178 				ret = r;
1179 		}
1180 	}
1181 
1182 	return ret;
1183 }
1184 
1185 static void evict_process_worker(struct work_struct *work)
1186 {
1187 	int ret;
1188 	struct kfd_process *p;
1189 	struct delayed_work *dwork;
1190 
1191 	dwork = to_delayed_work(work);
1192 
1193 	/* Process termination destroys this worker thread. So during the
1194 	 * lifetime of this thread, kfd_process p will be valid
1195 	 */
1196 	p = container_of(dwork, struct kfd_process, eviction_work);
1197 	WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1198 		  "Eviction fence mismatch\n");
1199 
1200 	/* Narrow window of overlap between restore and evict work
1201 	 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1202 	 * unreserves KFD BOs, it is possible to evicted again. But
1203 	 * restore has few more steps of finish. So lets wait for any
1204 	 * previous restore work to complete
1205 	 */
1206 	flush_delayed_work(&p->restore_work);
1207 
1208 	pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1209 	ret = kfd_process_evict_queues(p);
1210 	if (!ret) {
1211 		dma_fence_signal(p->ef);
1212 		dma_fence_put(p->ef);
1213 		p->ef = NULL;
1214 		queue_delayed_work(kfd_restore_wq, &p->restore_work,
1215 				msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1216 
1217 		pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1218 	} else
1219 		pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1220 }
1221 
1222 static void restore_process_worker(struct work_struct *work)
1223 {
1224 	struct delayed_work *dwork;
1225 	struct kfd_process *p;
1226 	int ret = 0;
1227 
1228 	dwork = to_delayed_work(work);
1229 
1230 	/* Process termination destroys this worker thread. So during the
1231 	 * lifetime of this thread, kfd_process p will be valid
1232 	 */
1233 	p = container_of(dwork, struct kfd_process, restore_work);
1234 	pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1235 
1236 	/* Setting last_restore_timestamp before successful restoration.
1237 	 * Otherwise this would have to be set by KGD (restore_process_bos)
1238 	 * before KFD BOs are unreserved. If not, the process can be evicted
1239 	 * again before the timestamp is set.
1240 	 * If restore fails, the timestamp will be set again in the next
1241 	 * attempt. This would mean that the minimum GPU quanta would be
1242 	 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1243 	 * functions)
1244 	 */
1245 
1246 	p->last_restore_timestamp = get_jiffies_64();
1247 	ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1248 						     &p->ef);
1249 	if (ret) {
1250 		pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1251 			 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1252 		ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1253 				msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1254 		WARN(!ret, "reschedule restore work failed\n");
1255 		return;
1256 	}
1257 
1258 	ret = kfd_process_restore_queues(p);
1259 	if (!ret)
1260 		pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1261 	else
1262 		pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1263 }
1264 
1265 void kfd_suspend_all_processes(void)
1266 {
1267 	struct kfd_process *p;
1268 	unsigned int temp;
1269 	int idx = srcu_read_lock(&kfd_processes_srcu);
1270 
1271 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1272 		cancel_delayed_work_sync(&p->eviction_work);
1273 		cancel_delayed_work_sync(&p->restore_work);
1274 
1275 		if (kfd_process_evict_queues(p))
1276 			pr_err("Failed to suspend process 0x%x\n", p->pasid);
1277 		dma_fence_signal(p->ef);
1278 		dma_fence_put(p->ef);
1279 		p->ef = NULL;
1280 	}
1281 	srcu_read_unlock(&kfd_processes_srcu, idx);
1282 }
1283 
1284 int kfd_resume_all_processes(void)
1285 {
1286 	struct kfd_process *p;
1287 	unsigned int temp;
1288 	int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1289 
1290 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1291 		if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1292 			pr_err("Restore process %d failed during resume\n",
1293 			       p->pasid);
1294 			ret = -EFAULT;
1295 		}
1296 	}
1297 	srcu_read_unlock(&kfd_processes_srcu, idx);
1298 	return ret;
1299 }
1300 
1301 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1302 			  struct vm_area_struct *vma)
1303 {
1304 	struct kfd_process_device *pdd;
1305 	struct qcm_process_device *qpd;
1306 
1307 	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1308 		pr_err("Incorrect CWSR mapping size.\n");
1309 		return -EINVAL;
1310 	}
1311 
1312 	pdd = kfd_get_process_device_data(dev, process);
1313 	if (!pdd)
1314 		return -EINVAL;
1315 	qpd = &pdd->qpd;
1316 
1317 	qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1318 					get_order(KFD_CWSR_TBA_TMA_SIZE));
1319 	if (!qpd->cwsr_kaddr) {
1320 		pr_err("Error allocating per process CWSR buffer.\n");
1321 		return -ENOMEM;
1322 	}
1323 
1324 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1325 		| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1326 	/* Mapping pages to user process */
1327 	return remap_pfn_range(vma, vma->vm_start,
1328 			       PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1329 			       KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1330 }
1331 
1332 void kfd_flush_tlb(struct kfd_process_device *pdd)
1333 {
1334 	struct kfd_dev *dev = pdd->dev;
1335 
1336 	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1337 		/* Nothing to flush until a VMID is assigned, which
1338 		 * only happens when the first queue is created.
1339 		 */
1340 		if (pdd->qpd.vmid)
1341 			amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
1342 							pdd->qpd.vmid);
1343 	} else {
1344 		amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
1345 						pdd->process->pasid);
1346 	}
1347 }
1348 
1349 #if defined(CONFIG_DEBUG_FS)
1350 
1351 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1352 {
1353 	struct kfd_process *p;
1354 	unsigned int temp;
1355 	int r = 0;
1356 
1357 	int idx = srcu_read_lock(&kfd_processes_srcu);
1358 
1359 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1360 		seq_printf(m, "Process %d PASID 0x%x:\n",
1361 			   p->lead_thread->tgid, p->pasid);
1362 
1363 		mutex_lock(&p->mutex);
1364 		r = pqm_debugfs_mqds(m, &p->pqm);
1365 		mutex_unlock(&p->mutex);
1366 
1367 		if (r)
1368 			break;
1369 	}
1370 
1371 	srcu_read_unlock(&kfd_processes_srcu, idx);
1372 
1373 	return r;
1374 }
1375 
1376 #endif
1377 
1378