1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/amd-iommu.h>
32 #include <linux/notifier.h>
33 #include <linux/compat.h>
34 #include <linux/mman.h>
35 #include <linux/file.h>
36 #include <linux/pm_runtime.h>
37 #include "amdgpu_amdkfd.h"
38 #include "amdgpu.h"
39 
40 struct mm_struct;
41 
42 #include "kfd_priv.h"
43 #include "kfd_device_queue_manager.h"
44 #include "kfd_iommu.h"
45 #include "kfd_svm.h"
46 #include "kfd_smi_events.h"
47 #include "kfd_debug.h"
48 
49 /*
50  * List of struct kfd_process (field kfd_process).
51  * Unique/indexed by mm_struct*
52  */
53 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
54 DEFINE_MUTEX(kfd_processes_mutex);
55 
56 DEFINE_SRCU(kfd_processes_srcu);
57 
58 /* For process termination handling */
59 static struct workqueue_struct *kfd_process_wq;
60 
61 /* Ordered, single-threaded workqueue for restoring evicted
62  * processes. Restoring multiple processes concurrently under memory
63  * pressure can lead to processes blocking each other from validating
64  * their BOs and result in a live-lock situation where processes
65  * remain evicted indefinitely.
66  */
67 static struct workqueue_struct *kfd_restore_wq;
68 
69 static struct kfd_process *find_process(const struct task_struct *thread,
70 					bool ref);
71 static void kfd_process_ref_release(struct kref *ref);
72 static struct kfd_process *create_process(const struct task_struct *thread);
73 
74 static void evict_process_worker(struct work_struct *work);
75 static void restore_process_worker(struct work_struct *work);
76 
77 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
78 
79 struct kfd_procfs_tree {
80 	struct kobject *kobj;
81 };
82 
83 static struct kfd_procfs_tree procfs;
84 
85 /*
86  * Structure for SDMA activity tracking
87  */
88 struct kfd_sdma_activity_handler_workarea {
89 	struct work_struct sdma_activity_work;
90 	struct kfd_process_device *pdd;
91 	uint64_t sdma_activity_counter;
92 };
93 
94 struct temp_sdma_queue_list {
95 	uint64_t __user *rptr;
96 	uint64_t sdma_val;
97 	unsigned int queue_id;
98 	struct list_head list;
99 };
100 
101 static void kfd_sdma_activity_worker(struct work_struct *work)
102 {
103 	struct kfd_sdma_activity_handler_workarea *workarea;
104 	struct kfd_process_device *pdd;
105 	uint64_t val;
106 	struct mm_struct *mm;
107 	struct queue *q;
108 	struct qcm_process_device *qpd;
109 	struct device_queue_manager *dqm;
110 	int ret = 0;
111 	struct temp_sdma_queue_list sdma_q_list;
112 	struct temp_sdma_queue_list *sdma_q, *next;
113 
114 	workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
115 				sdma_activity_work);
116 
117 	pdd = workarea->pdd;
118 	if (!pdd)
119 		return;
120 	dqm = pdd->dev->dqm;
121 	qpd = &pdd->qpd;
122 	if (!dqm || !qpd)
123 		return;
124 	/*
125 	 * Total SDMA activity is current SDMA activity + past SDMA activity
126 	 * Past SDMA count is stored in pdd.
127 	 * To get the current activity counters for all active SDMA queues,
128 	 * we loop over all SDMA queues and get their counts from user-space.
129 	 *
130 	 * We cannot call get_user() with dqm_lock held as it can cause
131 	 * a circular lock dependency situation. To read the SDMA stats,
132 	 * we need to do the following:
133 	 *
134 	 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
135 	 *    with dqm_lock/dqm_unlock().
136 	 * 2. Call get_user() for each node in temporary list without dqm_lock.
137 	 *    Save the SDMA count for each node and also add the count to the total
138 	 *    SDMA count counter.
139 	 *    Its possible, during this step, a few SDMA queue nodes got deleted
140 	 *    from the qpd->queues_list.
141 	 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
142 	 *    If any node got deleted, its SDMA count would be captured in the sdma
143 	 *    past activity counter. So subtract the SDMA counter stored in step 2
144 	 *    for this node from the total SDMA count.
145 	 */
146 	INIT_LIST_HEAD(&sdma_q_list.list);
147 
148 	/*
149 	 * Create the temp list of all SDMA queues
150 	 */
151 	dqm_lock(dqm);
152 
153 	list_for_each_entry(q, &qpd->queues_list, list) {
154 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
155 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
156 			continue;
157 
158 		sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
159 		if (!sdma_q) {
160 			dqm_unlock(dqm);
161 			goto cleanup;
162 		}
163 
164 		INIT_LIST_HEAD(&sdma_q->list);
165 		sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
166 		sdma_q->queue_id = q->properties.queue_id;
167 		list_add_tail(&sdma_q->list, &sdma_q_list.list);
168 	}
169 
170 	/*
171 	 * If the temp list is empty, then no SDMA queues nodes were found in
172 	 * qpd->queues_list. Return the past activity count as the total sdma
173 	 * count
174 	 */
175 	if (list_empty(&sdma_q_list.list)) {
176 		workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
177 		dqm_unlock(dqm);
178 		return;
179 	}
180 
181 	dqm_unlock(dqm);
182 
183 	/*
184 	 * Get the usage count for each SDMA queue in temp_list.
185 	 */
186 	mm = get_task_mm(pdd->process->lead_thread);
187 	if (!mm)
188 		goto cleanup;
189 
190 	kthread_use_mm(mm);
191 
192 	list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
193 		val = 0;
194 		ret = read_sdma_queue_counter(sdma_q->rptr, &val);
195 		if (ret) {
196 			pr_debug("Failed to read SDMA queue active counter for queue id: %d",
197 				 sdma_q->queue_id);
198 		} else {
199 			sdma_q->sdma_val = val;
200 			workarea->sdma_activity_counter += val;
201 		}
202 	}
203 
204 	kthread_unuse_mm(mm);
205 	mmput(mm);
206 
207 	/*
208 	 * Do a second iteration over qpd_queues_list to check if any SDMA
209 	 * nodes got deleted while fetching SDMA counter.
210 	 */
211 	dqm_lock(dqm);
212 
213 	workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
214 
215 	list_for_each_entry(q, &qpd->queues_list, list) {
216 		if (list_empty(&sdma_q_list.list))
217 			break;
218 
219 		if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
220 		    (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
221 			continue;
222 
223 		list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
224 			if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
225 			     (sdma_q->queue_id == q->properties.queue_id)) {
226 				list_del(&sdma_q->list);
227 				kfree(sdma_q);
228 				break;
229 			}
230 		}
231 	}
232 
233 	dqm_unlock(dqm);
234 
235 	/*
236 	 * If temp list is not empty, it implies some queues got deleted
237 	 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
238 	 * count for each node from the total SDMA count.
239 	 */
240 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
241 		workarea->sdma_activity_counter -= sdma_q->sdma_val;
242 		list_del(&sdma_q->list);
243 		kfree(sdma_q);
244 	}
245 
246 	return;
247 
248 cleanup:
249 	list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
250 		list_del(&sdma_q->list);
251 		kfree(sdma_q);
252 	}
253 }
254 
255 /**
256  * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
257  * by current process. Translates acquired wave count into number of compute units
258  * that are occupied.
259  *
260  * @attr: Handle of attribute that allows reporting of wave count. The attribute
261  * handle encapsulates GPU device it is associated with, thereby allowing collection
262  * of waves in flight, etc
263  * @buffer: Handle of user provided buffer updated with wave count
264  *
265  * Return: Number of bytes written to user buffer or an error value
266  */
267 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
268 {
269 	int cu_cnt;
270 	int wave_cnt;
271 	int max_waves_per_cu;
272 	struct kfd_node *dev = NULL;
273 	struct kfd_process *proc = NULL;
274 	struct kfd_process_device *pdd = NULL;
275 
276 	pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
277 	dev = pdd->dev;
278 	if (dev->kfd2kgd->get_cu_occupancy == NULL)
279 		return -EINVAL;
280 
281 	cu_cnt = 0;
282 	proc = pdd->process;
283 	if (pdd->qpd.queue_count == 0) {
284 		pr_debug("Gpu-Id: %d has no active queues for process %d\n",
285 			 dev->id, proc->pasid);
286 		return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
287 	}
288 
289 	/* Collect wave count from device if it supports */
290 	wave_cnt = 0;
291 	max_waves_per_cu = 0;
292 	dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
293 			&max_waves_per_cu, 0);
294 
295 	/* Translate wave count to number of compute units */
296 	cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
297 	return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
298 }
299 
300 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
301 			       char *buffer)
302 {
303 	if (strcmp(attr->name, "pasid") == 0) {
304 		struct kfd_process *p = container_of(attr, struct kfd_process,
305 						     attr_pasid);
306 
307 		return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
308 	} else if (strncmp(attr->name, "vram_", 5) == 0) {
309 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
310 							      attr_vram);
311 		return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
312 	} else if (strncmp(attr->name, "sdma_", 5) == 0) {
313 		struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
314 							      attr_sdma);
315 		struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
316 
317 		INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
318 					kfd_sdma_activity_worker);
319 
320 		sdma_activity_work_handler.pdd = pdd;
321 		sdma_activity_work_handler.sdma_activity_counter = 0;
322 
323 		schedule_work(&sdma_activity_work_handler.sdma_activity_work);
324 
325 		flush_work(&sdma_activity_work_handler.sdma_activity_work);
326 
327 		return snprintf(buffer, PAGE_SIZE, "%llu\n",
328 				(sdma_activity_work_handler.sdma_activity_counter)/
329 				 SDMA_ACTIVITY_DIVISOR);
330 	} else {
331 		pr_err("Invalid attribute");
332 		return -EINVAL;
333 	}
334 
335 	return 0;
336 }
337 
338 static void kfd_procfs_kobj_release(struct kobject *kobj)
339 {
340 	kfree(kobj);
341 }
342 
343 static const struct sysfs_ops kfd_procfs_ops = {
344 	.show = kfd_procfs_show,
345 };
346 
347 static const struct kobj_type procfs_type = {
348 	.release = kfd_procfs_kobj_release,
349 	.sysfs_ops = &kfd_procfs_ops,
350 };
351 
352 void kfd_procfs_init(void)
353 {
354 	int ret = 0;
355 
356 	procfs.kobj = kfd_alloc_struct(procfs.kobj);
357 	if (!procfs.kobj)
358 		return;
359 
360 	ret = kobject_init_and_add(procfs.kobj, &procfs_type,
361 				   &kfd_device->kobj, "proc");
362 	if (ret) {
363 		pr_warn("Could not create procfs proc folder");
364 		/* If we fail to create the procfs, clean up */
365 		kfd_procfs_shutdown();
366 	}
367 }
368 
369 void kfd_procfs_shutdown(void)
370 {
371 	if (procfs.kobj) {
372 		kobject_del(procfs.kobj);
373 		kobject_put(procfs.kobj);
374 		procfs.kobj = NULL;
375 	}
376 }
377 
378 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
379 				     struct attribute *attr, char *buffer)
380 {
381 	struct queue *q = container_of(kobj, struct queue, kobj);
382 
383 	if (!strcmp(attr->name, "size"))
384 		return snprintf(buffer, PAGE_SIZE, "%llu",
385 				q->properties.queue_size);
386 	else if (!strcmp(attr->name, "type"))
387 		return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
388 	else if (!strcmp(attr->name, "gpuid"))
389 		return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
390 	else
391 		pr_err("Invalid attribute");
392 
393 	return 0;
394 }
395 
396 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
397 				     struct attribute *attr, char *buffer)
398 {
399 	if (strcmp(attr->name, "evicted_ms") == 0) {
400 		struct kfd_process_device *pdd = container_of(attr,
401 				struct kfd_process_device,
402 				attr_evict);
403 		uint64_t evict_jiffies;
404 
405 		evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
406 
407 		return snprintf(buffer,
408 				PAGE_SIZE,
409 				"%llu\n",
410 				jiffies64_to_msecs(evict_jiffies));
411 
412 	/* Sysfs handle that gets CU occupancy is per device */
413 	} else if (strcmp(attr->name, "cu_occupancy") == 0) {
414 		return kfd_get_cu_occupancy(attr, buffer);
415 	} else {
416 		pr_err("Invalid attribute");
417 	}
418 
419 	return 0;
420 }
421 
422 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
423 				       struct attribute *attr, char *buf)
424 {
425 	struct kfd_process_device *pdd;
426 
427 	if (!strcmp(attr->name, "faults")) {
428 		pdd = container_of(attr, struct kfd_process_device,
429 				   attr_faults);
430 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
431 	}
432 	if (!strcmp(attr->name, "page_in")) {
433 		pdd = container_of(attr, struct kfd_process_device,
434 				   attr_page_in);
435 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
436 	}
437 	if (!strcmp(attr->name, "page_out")) {
438 		pdd = container_of(attr, struct kfd_process_device,
439 				   attr_page_out);
440 		return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
441 	}
442 	return 0;
443 }
444 
445 static struct attribute attr_queue_size = {
446 	.name = "size",
447 	.mode = KFD_SYSFS_FILE_MODE
448 };
449 
450 static struct attribute attr_queue_type = {
451 	.name = "type",
452 	.mode = KFD_SYSFS_FILE_MODE
453 };
454 
455 static struct attribute attr_queue_gpuid = {
456 	.name = "gpuid",
457 	.mode = KFD_SYSFS_FILE_MODE
458 };
459 
460 static struct attribute *procfs_queue_attrs[] = {
461 	&attr_queue_size,
462 	&attr_queue_type,
463 	&attr_queue_gpuid,
464 	NULL
465 };
466 ATTRIBUTE_GROUPS(procfs_queue);
467 
468 static const struct sysfs_ops procfs_queue_ops = {
469 	.show = kfd_procfs_queue_show,
470 };
471 
472 static const struct kobj_type procfs_queue_type = {
473 	.sysfs_ops = &procfs_queue_ops,
474 	.default_groups = procfs_queue_groups,
475 };
476 
477 static const struct sysfs_ops procfs_stats_ops = {
478 	.show = kfd_procfs_stats_show,
479 };
480 
481 static const struct kobj_type procfs_stats_type = {
482 	.sysfs_ops = &procfs_stats_ops,
483 	.release = kfd_procfs_kobj_release,
484 };
485 
486 static const struct sysfs_ops sysfs_counters_ops = {
487 	.show = kfd_sysfs_counters_show,
488 };
489 
490 static const struct kobj_type sysfs_counters_type = {
491 	.sysfs_ops = &sysfs_counters_ops,
492 	.release = kfd_procfs_kobj_release,
493 };
494 
495 int kfd_procfs_add_queue(struct queue *q)
496 {
497 	struct kfd_process *proc;
498 	int ret;
499 
500 	if (!q || !q->process)
501 		return -EINVAL;
502 	proc = q->process;
503 
504 	/* Create proc/<pid>/queues/<queue id> folder */
505 	if (!proc->kobj_queues)
506 		return -EFAULT;
507 	ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
508 			proc->kobj_queues, "%u", q->properties.queue_id);
509 	if (ret < 0) {
510 		pr_warn("Creating proc/<pid>/queues/%u failed",
511 			q->properties.queue_id);
512 		kobject_put(&q->kobj);
513 		return ret;
514 	}
515 
516 	return 0;
517 }
518 
519 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
520 				 char *name)
521 {
522 	int ret;
523 
524 	if (!kobj || !attr || !name)
525 		return;
526 
527 	attr->name = name;
528 	attr->mode = KFD_SYSFS_FILE_MODE;
529 	sysfs_attr_init(attr);
530 
531 	ret = sysfs_create_file(kobj, attr);
532 	if (ret)
533 		pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
534 }
535 
536 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
537 {
538 	int ret;
539 	int i;
540 	char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
541 
542 	if (!p || !p->kobj)
543 		return;
544 
545 	/*
546 	 * Create sysfs files for each GPU:
547 	 * - proc/<pid>/stats_<gpuid>/
548 	 * - proc/<pid>/stats_<gpuid>/evicted_ms
549 	 * - proc/<pid>/stats_<gpuid>/cu_occupancy
550 	 */
551 	for (i = 0; i < p->n_pdds; i++) {
552 		struct kfd_process_device *pdd = p->pdds[i];
553 
554 		snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
555 				"stats_%u", pdd->dev->id);
556 		pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
557 		if (!pdd->kobj_stats)
558 			return;
559 
560 		ret = kobject_init_and_add(pdd->kobj_stats,
561 					   &procfs_stats_type,
562 					   p->kobj,
563 					   stats_dir_filename);
564 
565 		if (ret) {
566 			pr_warn("Creating KFD proc/stats_%s folder failed",
567 				stats_dir_filename);
568 			kobject_put(pdd->kobj_stats);
569 			pdd->kobj_stats = NULL;
570 			return;
571 		}
572 
573 		kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
574 				      "evicted_ms");
575 		/* Add sysfs file to report compute unit occupancy */
576 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
577 			kfd_sysfs_create_file(pdd->kobj_stats,
578 					      &pdd->attr_cu_occupancy,
579 					      "cu_occupancy");
580 	}
581 }
582 
583 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
584 {
585 	int ret = 0;
586 	int i;
587 	char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
588 
589 	if (!p || !p->kobj)
590 		return;
591 
592 	/*
593 	 * Create sysfs files for each GPU which supports SVM
594 	 * - proc/<pid>/counters_<gpuid>/
595 	 * - proc/<pid>/counters_<gpuid>/faults
596 	 * - proc/<pid>/counters_<gpuid>/page_in
597 	 * - proc/<pid>/counters_<gpuid>/page_out
598 	 */
599 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
600 		struct kfd_process_device *pdd = p->pdds[i];
601 		struct kobject *kobj_counters;
602 
603 		snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
604 			"counters_%u", pdd->dev->id);
605 		kobj_counters = kfd_alloc_struct(kobj_counters);
606 		if (!kobj_counters)
607 			return;
608 
609 		ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
610 					   p->kobj, counters_dir_filename);
611 		if (ret) {
612 			pr_warn("Creating KFD proc/%s folder failed",
613 				counters_dir_filename);
614 			kobject_put(kobj_counters);
615 			return;
616 		}
617 
618 		pdd->kobj_counters = kobj_counters;
619 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
620 				      "faults");
621 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
622 				      "page_in");
623 		kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
624 				      "page_out");
625 	}
626 }
627 
628 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
629 {
630 	int i;
631 
632 	if (!p || !p->kobj)
633 		return;
634 
635 	/*
636 	 * Create sysfs files for each GPU:
637 	 * - proc/<pid>/vram_<gpuid>
638 	 * - proc/<pid>/sdma_<gpuid>
639 	 */
640 	for (i = 0; i < p->n_pdds; i++) {
641 		struct kfd_process_device *pdd = p->pdds[i];
642 
643 		snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
644 			 pdd->dev->id);
645 		kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
646 				      pdd->vram_filename);
647 
648 		snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
649 			 pdd->dev->id);
650 		kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
651 					    pdd->sdma_filename);
652 	}
653 }
654 
655 void kfd_procfs_del_queue(struct queue *q)
656 {
657 	if (!q)
658 		return;
659 
660 	kobject_del(&q->kobj);
661 	kobject_put(&q->kobj);
662 }
663 
664 int kfd_process_create_wq(void)
665 {
666 	if (!kfd_process_wq)
667 		kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
668 	if (!kfd_restore_wq)
669 		kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
670 
671 	if (!kfd_process_wq || !kfd_restore_wq) {
672 		kfd_process_destroy_wq();
673 		return -ENOMEM;
674 	}
675 
676 	return 0;
677 }
678 
679 void kfd_process_destroy_wq(void)
680 {
681 	if (kfd_process_wq) {
682 		destroy_workqueue(kfd_process_wq);
683 		kfd_process_wq = NULL;
684 	}
685 	if (kfd_restore_wq) {
686 		destroy_workqueue(kfd_restore_wq);
687 		kfd_restore_wq = NULL;
688 	}
689 }
690 
691 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
692 			struct kfd_process_device *pdd, void **kptr)
693 {
694 	struct kfd_node *dev = pdd->dev;
695 
696 	if (kptr && *kptr) {
697 		amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
698 		*kptr = NULL;
699 	}
700 
701 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
702 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
703 					       NULL);
704 }
705 
706 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
707  *	This function should be only called right after the process
708  *	is created and when kfd_processes_mutex is still being held
709  *	to avoid concurrency. Because of that exclusiveness, we do
710  *	not need to take p->mutex.
711  */
712 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
713 				   uint64_t gpu_va, uint32_t size,
714 				   uint32_t flags, struct kgd_mem **mem, void **kptr)
715 {
716 	struct kfd_node *kdev = pdd->dev;
717 	int err;
718 
719 	err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
720 						 pdd->drm_priv, mem, NULL,
721 						 flags, false);
722 	if (err)
723 		goto err_alloc_mem;
724 
725 	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
726 			pdd->drm_priv);
727 	if (err)
728 		goto err_map_mem;
729 
730 	err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
731 	if (err) {
732 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
733 		goto sync_memory_failed;
734 	}
735 
736 	if (kptr) {
737 		err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
738 				(struct kgd_mem *)*mem, kptr, NULL);
739 		if (err) {
740 			pr_debug("Map GTT BO to kernel failed\n");
741 			goto sync_memory_failed;
742 		}
743 	}
744 
745 	return err;
746 
747 sync_memory_failed:
748 	amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
749 
750 err_map_mem:
751 	amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
752 					       NULL);
753 err_alloc_mem:
754 	*mem = NULL;
755 	*kptr = NULL;
756 	return err;
757 }
758 
759 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
760  *	process for IB usage The memory reserved is for KFD to submit
761  *	IB to AMDGPU from kernel.  If the memory is reserved
762  *	successfully, ib_kaddr will have the CPU/kernel
763  *	address. Check ib_kaddr before accessing the memory.
764  */
765 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
766 {
767 	struct qcm_process_device *qpd = &pdd->qpd;
768 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
769 			KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
770 			KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
771 			KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
772 	struct kgd_mem *mem;
773 	void *kaddr;
774 	int ret;
775 
776 	if (qpd->ib_kaddr || !qpd->ib_base)
777 		return 0;
778 
779 	/* ib_base is only set for dGPU */
780 	ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
781 				      &mem, &kaddr);
782 	if (ret)
783 		return ret;
784 
785 	qpd->ib_mem = mem;
786 	qpd->ib_kaddr = kaddr;
787 
788 	return 0;
789 }
790 
791 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
792 {
793 	struct qcm_process_device *qpd = &pdd->qpd;
794 
795 	if (!qpd->ib_kaddr || !qpd->ib_base)
796 		return;
797 
798 	kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
799 }
800 
801 struct kfd_process *kfd_create_process(struct task_struct *thread)
802 {
803 	struct kfd_process *process;
804 	int ret;
805 
806 	if (!(thread->mm && mmget_not_zero(thread->mm)))
807 		return ERR_PTR(-EINVAL);
808 
809 	/* Only the pthreads threading model is supported. */
810 	if (thread->group_leader->mm != thread->mm) {
811 		mmput(thread->mm);
812 		return ERR_PTR(-EINVAL);
813 	}
814 
815 	/*
816 	 * take kfd processes mutex before starting of process creation
817 	 * so there won't be a case where two threads of the same process
818 	 * create two kfd_process structures
819 	 */
820 	mutex_lock(&kfd_processes_mutex);
821 
822 	if (kfd_is_locked()) {
823 		mutex_unlock(&kfd_processes_mutex);
824 		pr_debug("KFD is locked! Cannot create process");
825 		return ERR_PTR(-EINVAL);
826 	}
827 
828 	/* A prior open of /dev/kfd could have already created the process. */
829 	process = find_process(thread, false);
830 	if (process) {
831 		pr_debug("Process already found\n");
832 	} else {
833 		process = create_process(thread);
834 		if (IS_ERR(process))
835 			goto out;
836 
837 		if (!procfs.kobj)
838 			goto out;
839 
840 		process->kobj = kfd_alloc_struct(process->kobj);
841 		if (!process->kobj) {
842 			pr_warn("Creating procfs kobject failed");
843 			goto out;
844 		}
845 		ret = kobject_init_and_add(process->kobj, &procfs_type,
846 					   procfs.kobj, "%d",
847 					   (int)process->lead_thread->pid);
848 		if (ret) {
849 			pr_warn("Creating procfs pid directory failed");
850 			kobject_put(process->kobj);
851 			goto out;
852 		}
853 
854 		kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
855 				      "pasid");
856 
857 		process->kobj_queues = kobject_create_and_add("queues",
858 							process->kobj);
859 		if (!process->kobj_queues)
860 			pr_warn("Creating KFD proc/queues folder failed");
861 
862 		kfd_procfs_add_sysfs_stats(process);
863 		kfd_procfs_add_sysfs_files(process);
864 		kfd_procfs_add_sysfs_counters(process);
865 
866 		init_waitqueue_head(&process->wait_irq_drain);
867 	}
868 out:
869 	if (!IS_ERR(process))
870 		kref_get(&process->ref);
871 	mutex_unlock(&kfd_processes_mutex);
872 	mmput(thread->mm);
873 
874 	return process;
875 }
876 
877 struct kfd_process *kfd_get_process(const struct task_struct *thread)
878 {
879 	struct kfd_process *process;
880 
881 	if (!thread->mm)
882 		return ERR_PTR(-EINVAL);
883 
884 	/* Only the pthreads threading model is supported. */
885 	if (thread->group_leader->mm != thread->mm)
886 		return ERR_PTR(-EINVAL);
887 
888 	process = find_process(thread, false);
889 	if (!process)
890 		return ERR_PTR(-EINVAL);
891 
892 	return process;
893 }
894 
895 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
896 {
897 	struct kfd_process *process;
898 
899 	hash_for_each_possible_rcu(kfd_processes_table, process,
900 					kfd_processes, (uintptr_t)mm)
901 		if (process->mm == mm)
902 			return process;
903 
904 	return NULL;
905 }
906 
907 static struct kfd_process *find_process(const struct task_struct *thread,
908 					bool ref)
909 {
910 	struct kfd_process *p;
911 	int idx;
912 
913 	idx = srcu_read_lock(&kfd_processes_srcu);
914 	p = find_process_by_mm(thread->mm);
915 	if (p && ref)
916 		kref_get(&p->ref);
917 	srcu_read_unlock(&kfd_processes_srcu, idx);
918 
919 	return p;
920 }
921 
922 void kfd_unref_process(struct kfd_process *p)
923 {
924 	kref_put(&p->ref, kfd_process_ref_release);
925 }
926 
927 /* This increments the process->ref counter. */
928 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
929 {
930 	struct task_struct *task = NULL;
931 	struct kfd_process *p    = NULL;
932 
933 	if (!pid) {
934 		task = current;
935 		get_task_struct(task);
936 	} else {
937 		task = get_pid_task(pid, PIDTYPE_PID);
938 	}
939 
940 	if (task) {
941 		p = find_process(task, true);
942 		put_task_struct(task);
943 	}
944 
945 	return p;
946 }
947 
948 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
949 {
950 	struct kfd_process *p = pdd->process;
951 	void *mem;
952 	int id;
953 	int i;
954 
955 	/*
956 	 * Remove all handles from idr and release appropriate
957 	 * local memory object
958 	 */
959 	idr_for_each_entry(&pdd->alloc_idr, mem, id) {
960 
961 		for (i = 0; i < p->n_pdds; i++) {
962 			struct kfd_process_device *peer_pdd = p->pdds[i];
963 
964 			if (!peer_pdd->drm_priv)
965 				continue;
966 			amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
967 				peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
968 		}
969 
970 		amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
971 						       pdd->drm_priv, NULL);
972 		kfd_process_device_remove_obj_handle(pdd, id);
973 	}
974 }
975 
976 /*
977  * Just kunmap and unpin signal BO here. It will be freed in
978  * kfd_process_free_outstanding_kfd_bos()
979  */
980 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
981 {
982 	struct kfd_process_device *pdd;
983 	struct kfd_node *kdev;
984 	void *mem;
985 
986 	kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
987 	if (!kdev)
988 		return;
989 
990 	mutex_lock(&p->mutex);
991 
992 	pdd = kfd_get_process_device_data(kdev, p);
993 	if (!pdd)
994 		goto out;
995 
996 	mem = kfd_process_device_translate_handle(
997 		pdd, GET_IDR_HANDLE(p->signal_handle));
998 	if (!mem)
999 		goto out;
1000 
1001 	amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1002 
1003 out:
1004 	mutex_unlock(&p->mutex);
1005 }
1006 
1007 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1008 {
1009 	int i;
1010 
1011 	for (i = 0; i < p->n_pdds; i++)
1012 		kfd_process_device_free_bos(p->pdds[i]);
1013 }
1014 
1015 static void kfd_process_destroy_pdds(struct kfd_process *p)
1016 {
1017 	int i;
1018 
1019 	for (i = 0; i < p->n_pdds; i++) {
1020 		struct kfd_process_device *pdd = p->pdds[i];
1021 
1022 		pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1023 				pdd->dev->id, p->pasid);
1024 
1025 		kfd_process_device_destroy_cwsr_dgpu(pdd);
1026 		kfd_process_device_destroy_ib_mem(pdd);
1027 
1028 		if (pdd->drm_file) {
1029 			amdgpu_amdkfd_gpuvm_release_process_vm(
1030 					pdd->dev->adev, pdd->drm_priv);
1031 			fput(pdd->drm_file);
1032 		}
1033 
1034 		if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1035 			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1036 				get_order(KFD_CWSR_TBA_TMA_SIZE));
1037 
1038 		idr_destroy(&pdd->alloc_idr);
1039 
1040 		kfd_free_process_doorbells(pdd->dev->kfd, pdd);
1041 
1042 		if (pdd->dev->kfd->shared_resources.enable_mes)
1043 			amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1044 						   pdd->proc_ctx_bo);
1045 		/*
1046 		 * before destroying pdd, make sure to report availability
1047 		 * for auto suspend
1048 		 */
1049 		if (pdd->runtime_inuse) {
1050 			pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1051 			pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1052 			pdd->runtime_inuse = false;
1053 		}
1054 
1055 		kfree(pdd);
1056 		p->pdds[i] = NULL;
1057 	}
1058 	p->n_pdds = 0;
1059 }
1060 
1061 static void kfd_process_remove_sysfs(struct kfd_process *p)
1062 {
1063 	struct kfd_process_device *pdd;
1064 	int i;
1065 
1066 	if (!p->kobj)
1067 		return;
1068 
1069 	sysfs_remove_file(p->kobj, &p->attr_pasid);
1070 	kobject_del(p->kobj_queues);
1071 	kobject_put(p->kobj_queues);
1072 	p->kobj_queues = NULL;
1073 
1074 	for (i = 0; i < p->n_pdds; i++) {
1075 		pdd = p->pdds[i];
1076 
1077 		sysfs_remove_file(p->kobj, &pdd->attr_vram);
1078 		sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1079 
1080 		sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1081 		if (pdd->dev->kfd2kgd->get_cu_occupancy)
1082 			sysfs_remove_file(pdd->kobj_stats,
1083 					  &pdd->attr_cu_occupancy);
1084 		kobject_del(pdd->kobj_stats);
1085 		kobject_put(pdd->kobj_stats);
1086 		pdd->kobj_stats = NULL;
1087 	}
1088 
1089 	for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1090 		pdd = p->pdds[i];
1091 
1092 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1093 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1094 		sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1095 		kobject_del(pdd->kobj_counters);
1096 		kobject_put(pdd->kobj_counters);
1097 		pdd->kobj_counters = NULL;
1098 	}
1099 
1100 	kobject_del(p->kobj);
1101 	kobject_put(p->kobj);
1102 	p->kobj = NULL;
1103 }
1104 
1105 /* No process locking is needed in this function, because the process
1106  * is not findable any more. We must assume that no other thread is
1107  * using it any more, otherwise we couldn't safely free the process
1108  * structure in the end.
1109  */
1110 static void kfd_process_wq_release(struct work_struct *work)
1111 {
1112 	struct kfd_process *p = container_of(work, struct kfd_process,
1113 					     release_work);
1114 
1115 	kfd_process_dequeue_from_all_devices(p);
1116 	pqm_uninit(&p->pqm);
1117 
1118 	/* Signal the eviction fence after user mode queues are
1119 	 * destroyed. This allows any BOs to be freed without
1120 	 * triggering pointless evictions or waiting for fences.
1121 	 */
1122 	dma_fence_signal(p->ef);
1123 
1124 	kfd_process_remove_sysfs(p);
1125 	kfd_iommu_unbind_process(p);
1126 
1127 	kfd_process_kunmap_signal_bo(p);
1128 	kfd_process_free_outstanding_kfd_bos(p);
1129 	svm_range_list_fini(p);
1130 
1131 	kfd_process_destroy_pdds(p);
1132 	dma_fence_put(p->ef);
1133 
1134 	kfd_event_free_process(p);
1135 
1136 	kfd_pasid_free(p->pasid);
1137 	mutex_destroy(&p->mutex);
1138 
1139 	put_task_struct(p->lead_thread);
1140 
1141 	kfree(p);
1142 }
1143 
1144 static void kfd_process_ref_release(struct kref *ref)
1145 {
1146 	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1147 
1148 	INIT_WORK(&p->release_work, kfd_process_wq_release);
1149 	queue_work(kfd_process_wq, &p->release_work);
1150 }
1151 
1152 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1153 {
1154 	int idx = srcu_read_lock(&kfd_processes_srcu);
1155 	struct kfd_process *p = find_process_by_mm(mm);
1156 
1157 	srcu_read_unlock(&kfd_processes_srcu, idx);
1158 
1159 	return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1160 }
1161 
1162 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1163 {
1164 	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1165 }
1166 
1167 static void kfd_process_notifier_release_internal(struct kfd_process *p)
1168 {
1169 	int i;
1170 
1171 	cancel_delayed_work_sync(&p->eviction_work);
1172 	cancel_delayed_work_sync(&p->restore_work);
1173 
1174 	for (i = 0; i < p->n_pdds; i++) {
1175 		struct kfd_process_device *pdd = p->pdds[i];
1176 
1177 		/* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
1178 		if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
1179 			amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
1180 	}
1181 
1182 	/* Indicate to other users that MM is no longer valid */
1183 	p->mm = NULL;
1184 	kfd_dbg_trap_disable(p);
1185 
1186 	if (atomic_read(&p->debugged_process_count) > 0) {
1187 		struct kfd_process *target;
1188 		unsigned int temp;
1189 		int idx = srcu_read_lock(&kfd_processes_srcu);
1190 
1191 		hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
1192 			if (target->debugger_process && target->debugger_process == p) {
1193 				mutex_lock_nested(&target->mutex, 1);
1194 				kfd_dbg_trap_disable(target);
1195 				mutex_unlock(&target->mutex);
1196 				if (atomic_read(&p->debugged_process_count) == 0)
1197 					break;
1198 			}
1199 		}
1200 
1201 		srcu_read_unlock(&kfd_processes_srcu, idx);
1202 	}
1203 
1204 	mmu_notifier_put(&p->mmu_notifier);
1205 }
1206 
1207 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1208 					struct mm_struct *mm)
1209 {
1210 	struct kfd_process *p;
1211 
1212 	/*
1213 	 * The kfd_process structure can not be free because the
1214 	 * mmu_notifier srcu is read locked
1215 	 */
1216 	p = container_of(mn, struct kfd_process, mmu_notifier);
1217 	if (WARN_ON(p->mm != mm))
1218 		return;
1219 
1220 	mutex_lock(&kfd_processes_mutex);
1221 	/*
1222 	 * Do early return if table is empty.
1223 	 *
1224 	 * This could potentially happen if this function is called concurrently
1225 	 * by mmu_notifier and by kfd_cleanup_pocesses.
1226 	 *
1227 	 */
1228 	if (hash_empty(kfd_processes_table)) {
1229 		mutex_unlock(&kfd_processes_mutex);
1230 		return;
1231 	}
1232 	hash_del_rcu(&p->kfd_processes);
1233 	mutex_unlock(&kfd_processes_mutex);
1234 	synchronize_srcu(&kfd_processes_srcu);
1235 
1236 	kfd_process_notifier_release_internal(p);
1237 }
1238 
1239 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1240 	.release = kfd_process_notifier_release,
1241 	.alloc_notifier = kfd_process_alloc_notifier,
1242 	.free_notifier = kfd_process_free_notifier,
1243 };
1244 
1245 /*
1246  * This code handles the case when driver is being unloaded before all
1247  * mm_struct are released.  We need to safely free the kfd_process and
1248  * avoid race conditions with mmu_notifier that might try to free them.
1249  *
1250  */
1251 void kfd_cleanup_processes(void)
1252 {
1253 	struct kfd_process *p;
1254 	struct hlist_node *p_temp;
1255 	unsigned int temp;
1256 	HLIST_HEAD(cleanup_list);
1257 
1258 	/*
1259 	 * Move all remaining kfd_process from the process table to a
1260 	 * temp list for processing.   Once done, callback from mmu_notifier
1261 	 * release will not see the kfd_process in the table and do early return,
1262 	 * avoiding double free issues.
1263 	 */
1264 	mutex_lock(&kfd_processes_mutex);
1265 	hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1266 		hash_del_rcu(&p->kfd_processes);
1267 		synchronize_srcu(&kfd_processes_srcu);
1268 		hlist_add_head(&p->kfd_processes, &cleanup_list);
1269 	}
1270 	mutex_unlock(&kfd_processes_mutex);
1271 
1272 	hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1273 		kfd_process_notifier_release_internal(p);
1274 
1275 	/*
1276 	 * Ensures that all outstanding free_notifier get called, triggering
1277 	 * the release of the kfd_process struct.
1278 	 */
1279 	mmu_notifier_synchronize();
1280 }
1281 
1282 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1283 {
1284 	unsigned long  offset;
1285 	int i;
1286 
1287 	if (p->has_cwsr)
1288 		return 0;
1289 
1290 	for (i = 0; i < p->n_pdds; i++) {
1291 		struct kfd_node *dev = p->pdds[i]->dev;
1292 		struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1293 
1294 		if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1295 			continue;
1296 
1297 		offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1298 		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1299 			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1300 			MAP_SHARED, offset);
1301 
1302 		if (IS_ERR_VALUE(qpd->tba_addr)) {
1303 			int err = qpd->tba_addr;
1304 
1305 			pr_err("Failure to set tba address. error %d.\n", err);
1306 			qpd->tba_addr = 0;
1307 			qpd->cwsr_kaddr = NULL;
1308 			return err;
1309 		}
1310 
1311 		memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1312 
1313 		kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
1314 
1315 		qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1316 		pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1317 			qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1318 	}
1319 
1320 	p->has_cwsr = true;
1321 
1322 	return 0;
1323 }
1324 
1325 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1326 {
1327 	struct kfd_node *dev = pdd->dev;
1328 	struct qcm_process_device *qpd = &pdd->qpd;
1329 	uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1330 			| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1331 			| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1332 	struct kgd_mem *mem;
1333 	void *kaddr;
1334 	int ret;
1335 
1336 	if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1337 		return 0;
1338 
1339 	/* cwsr_base is only set for dGPU */
1340 	ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1341 				      KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1342 	if (ret)
1343 		return ret;
1344 
1345 	qpd->cwsr_mem = mem;
1346 	qpd->cwsr_kaddr = kaddr;
1347 	qpd->tba_addr = qpd->cwsr_base;
1348 
1349 	memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1350 
1351 	kfd_process_set_trap_debug_flag(&pdd->qpd,
1352 					pdd->process->debug_trap_enabled);
1353 
1354 	qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1355 	pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1356 		 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1357 
1358 	return 0;
1359 }
1360 
1361 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1362 {
1363 	struct kfd_node *dev = pdd->dev;
1364 	struct qcm_process_device *qpd = &pdd->qpd;
1365 
1366 	if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1367 		return;
1368 
1369 	kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1370 }
1371 
1372 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1373 				  uint64_t tba_addr,
1374 				  uint64_t tma_addr)
1375 {
1376 	if (qpd->cwsr_kaddr) {
1377 		/* KFD trap handler is bound, record as second-level TBA/TMA
1378 		 * in first-level TMA. First-level trap will jump to second.
1379 		 */
1380 		uint64_t *tma =
1381 			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1382 		tma[0] = tba_addr;
1383 		tma[1] = tma_addr;
1384 	} else {
1385 		/* No trap handler bound, bind as first-level TBA/TMA. */
1386 		qpd->tba_addr = tba_addr;
1387 		qpd->tma_addr = tma_addr;
1388 	}
1389 }
1390 
1391 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1392 {
1393 	int i;
1394 
1395 	/* On most GFXv9 GPUs, the retry mode in the SQ must match the
1396 	 * boot time retry setting. Mixing processes with different
1397 	 * XNACK/retry settings can hang the GPU.
1398 	 *
1399 	 * Different GPUs can have different noretry settings depending
1400 	 * on HW bugs or limitations. We need to find at least one
1401 	 * XNACK mode for this process that's compatible with all GPUs.
1402 	 * Fortunately GPUs with retry enabled (noretry=0) can run code
1403 	 * built for XNACK-off. On GFXv9 it may perform slower.
1404 	 *
1405 	 * Therefore applications built for XNACK-off can always be
1406 	 * supported and will be our fallback if any GPU does not
1407 	 * support retry.
1408 	 */
1409 	for (i = 0; i < p->n_pdds; i++) {
1410 		struct kfd_node *dev = p->pdds[i]->dev;
1411 
1412 		/* Only consider GFXv9 and higher GPUs. Older GPUs don't
1413 		 * support the SVM APIs and don't need to be considered
1414 		 * for the XNACK mode selection.
1415 		 */
1416 		if (!KFD_IS_SOC15(dev))
1417 			continue;
1418 		/* Aldebaran can always support XNACK because it can support
1419 		 * per-process XNACK mode selection. But let the dev->noretry
1420 		 * setting still influence the default XNACK mode.
1421 		 */
1422 		if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
1423 			continue;
1424 
1425 		/* GFXv10 and later GPUs do not support shader preemption
1426 		 * during page faults. This can lead to poor QoS for queue
1427 		 * management and memory-manager-related preemptions or
1428 		 * even deadlocks.
1429 		 */
1430 		if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1431 			return false;
1432 
1433 		if (dev->kfd->noretry)
1434 			return false;
1435 	}
1436 
1437 	return true;
1438 }
1439 
1440 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1441 				     bool enabled)
1442 {
1443 	if (qpd->cwsr_kaddr) {
1444 		uint64_t *tma =
1445 			(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1446 		tma[2] = enabled;
1447 	}
1448 }
1449 
1450 /*
1451  * On return the kfd_process is fully operational and will be freed when the
1452  * mm is released
1453  */
1454 static struct kfd_process *create_process(const struct task_struct *thread)
1455 {
1456 	struct kfd_process *process;
1457 	struct mmu_notifier *mn;
1458 	int err = -ENOMEM;
1459 
1460 	process = kzalloc(sizeof(*process), GFP_KERNEL);
1461 	if (!process)
1462 		goto err_alloc_process;
1463 
1464 	kref_init(&process->ref);
1465 	mutex_init(&process->mutex);
1466 	process->mm = thread->mm;
1467 	process->lead_thread = thread->group_leader;
1468 	process->n_pdds = 0;
1469 	process->queues_paused = false;
1470 	INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1471 	INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1472 	process->last_restore_timestamp = get_jiffies_64();
1473 	err = kfd_event_init_process(process);
1474 	if (err)
1475 		goto err_event_init;
1476 	process->is_32bit_user_mode = in_compat_syscall();
1477 	process->debug_trap_enabled = false;
1478 	process->debugger_process = NULL;
1479 	process->exception_enable_mask = 0;
1480 	atomic_set(&process->debugged_process_count, 0);
1481 	sema_init(&process->runtime_enable_sema, 0);
1482 
1483 	process->pasid = kfd_pasid_alloc();
1484 	if (process->pasid == 0) {
1485 		err = -ENOSPC;
1486 		goto err_alloc_pasid;
1487 	}
1488 
1489 	err = pqm_init(&process->pqm, process);
1490 	if (err != 0)
1491 		goto err_process_pqm_init;
1492 
1493 	/* init process apertures*/
1494 	err = kfd_init_apertures(process);
1495 	if (err != 0)
1496 		goto err_init_apertures;
1497 
1498 	/* Check XNACK support after PDDs are created in kfd_init_apertures */
1499 	process->xnack_enabled = kfd_process_xnack_mode(process, false);
1500 
1501 	err = svm_range_list_init(process);
1502 	if (err)
1503 		goto err_init_svm_range_list;
1504 
1505 	/* alloc_notifier needs to find the process in the hash table */
1506 	hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1507 			(uintptr_t)process->mm);
1508 
1509 	/* Avoid free_notifier to start kfd_process_wq_release if
1510 	 * mmu_notifier_get failed because of pending signal.
1511 	 */
1512 	kref_get(&process->ref);
1513 
1514 	/* MMU notifier registration must be the last call that can fail
1515 	 * because after this point we cannot unwind the process creation.
1516 	 * After this point, mmu_notifier_put will trigger the cleanup by
1517 	 * dropping the last process reference in the free_notifier.
1518 	 */
1519 	mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1520 	if (IS_ERR(mn)) {
1521 		err = PTR_ERR(mn);
1522 		goto err_register_notifier;
1523 	}
1524 	BUG_ON(mn != &process->mmu_notifier);
1525 
1526 	kfd_unref_process(process);
1527 	get_task_struct(process->lead_thread);
1528 
1529 	INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
1530 
1531 	return process;
1532 
1533 err_register_notifier:
1534 	hash_del_rcu(&process->kfd_processes);
1535 	svm_range_list_fini(process);
1536 err_init_svm_range_list:
1537 	kfd_process_free_outstanding_kfd_bos(process);
1538 	kfd_process_destroy_pdds(process);
1539 err_init_apertures:
1540 	pqm_uninit(&process->pqm);
1541 err_process_pqm_init:
1542 	kfd_pasid_free(process->pasid);
1543 err_alloc_pasid:
1544 	kfd_event_free_process(process);
1545 err_event_init:
1546 	mutex_destroy(&process->mutex);
1547 	kfree(process);
1548 err_alloc_process:
1549 	return ERR_PTR(err);
1550 }
1551 
1552 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1553 							struct kfd_process *p)
1554 {
1555 	int i;
1556 
1557 	for (i = 0; i < p->n_pdds; i++)
1558 		if (p->pdds[i]->dev == dev)
1559 			return p->pdds[i];
1560 
1561 	return NULL;
1562 }
1563 
1564 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1565 							struct kfd_process *p)
1566 {
1567 	struct kfd_process_device *pdd = NULL;
1568 	int retval = 0;
1569 
1570 	if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1571 		return NULL;
1572 	pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1573 	if (!pdd)
1574 		return NULL;
1575 
1576 	pdd->dev = dev;
1577 	INIT_LIST_HEAD(&pdd->qpd.queues_list);
1578 	INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1579 	pdd->qpd.dqm = dev->dqm;
1580 	pdd->qpd.pqm = &p->pqm;
1581 	pdd->qpd.evicted = 0;
1582 	pdd->qpd.mapped_gws_queue = false;
1583 	pdd->process = p;
1584 	pdd->bound = PDD_UNBOUND;
1585 	pdd->already_dequeued = false;
1586 	pdd->runtime_inuse = false;
1587 	pdd->vram_usage = 0;
1588 	pdd->sdma_past_activity_counter = 0;
1589 	pdd->user_gpu_id = dev->id;
1590 	atomic64_set(&pdd->evict_duration_counter, 0);
1591 
1592 	if (dev->kfd->shared_resources.enable_mes) {
1593 		retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
1594 						AMDGPU_MES_PROC_CTX_SIZE,
1595 						&pdd->proc_ctx_bo,
1596 						&pdd->proc_ctx_gpu_addr,
1597 						&pdd->proc_ctx_cpu_ptr,
1598 						false);
1599 		if (retval) {
1600 			pr_err("failed to allocate process context bo\n");
1601 			goto err_free_pdd;
1602 		}
1603 		memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
1604 	}
1605 
1606 	p->pdds[p->n_pdds++] = pdd;
1607 	if (kfd_dbg_is_per_vmid_supported(pdd->dev))
1608 		pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
1609 							pdd->dev->adev,
1610 							false,
1611 							0);
1612 
1613 	/* Init idr used for memory handle translation */
1614 	idr_init(&pdd->alloc_idr);
1615 
1616 	return pdd;
1617 
1618 err_free_pdd:
1619 	kfree(pdd);
1620 	return NULL;
1621 }
1622 
1623 /**
1624  * kfd_process_device_init_vm - Initialize a VM for a process-device
1625  *
1626  * @pdd: The process-device
1627  * @drm_file: Optional pointer to a DRM file descriptor
1628  *
1629  * If @drm_file is specified, it will be used to acquire the VM from
1630  * that file descriptor. If successful, the @pdd takes ownership of
1631  * the file descriptor.
1632  *
1633  * If @drm_file is NULL, a new VM is created.
1634  *
1635  * Returns 0 on success, -errno on failure.
1636  */
1637 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1638 			       struct file *drm_file)
1639 {
1640 	struct amdgpu_fpriv *drv_priv;
1641 	struct amdgpu_vm *avm;
1642 	struct kfd_process *p;
1643 	struct kfd_node *dev;
1644 	int ret;
1645 
1646 	if (!drm_file)
1647 		return -EINVAL;
1648 
1649 	if (pdd->drm_priv)
1650 		return -EBUSY;
1651 
1652 	ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1653 	if (ret)
1654 		return ret;
1655 	avm = &drv_priv->vm;
1656 
1657 	p = pdd->process;
1658 	dev = pdd->dev;
1659 
1660 	ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1661 						     &p->kgd_process_info,
1662 						     &p->ef);
1663 	if (ret) {
1664 		pr_err("Failed to create process VM object\n");
1665 		return ret;
1666 	}
1667 	pdd->drm_priv = drm_file->private_data;
1668 	atomic64_set(&pdd->tlb_seq, 0);
1669 
1670 	ret = kfd_process_device_reserve_ib_mem(pdd);
1671 	if (ret)
1672 		goto err_reserve_ib_mem;
1673 	ret = kfd_process_device_init_cwsr_dgpu(pdd);
1674 	if (ret)
1675 		goto err_init_cwsr;
1676 
1677 	ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
1678 	if (ret)
1679 		goto err_set_pasid;
1680 
1681 	pdd->drm_file = drm_file;
1682 
1683 	return 0;
1684 
1685 err_set_pasid:
1686 	kfd_process_device_destroy_cwsr_dgpu(pdd);
1687 err_init_cwsr:
1688 	kfd_process_device_destroy_ib_mem(pdd);
1689 err_reserve_ib_mem:
1690 	pdd->drm_priv = NULL;
1691 	amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1692 
1693 	return ret;
1694 }
1695 
1696 /*
1697  * Direct the IOMMU to bind the process (specifically the pasid->mm)
1698  * to the device.
1699  * Unbinding occurs when the process dies or the device is removed.
1700  *
1701  * Assumes that the process lock is held.
1702  */
1703 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1704 							struct kfd_process *p)
1705 {
1706 	struct kfd_process_device *pdd;
1707 	int err;
1708 
1709 	pdd = kfd_get_process_device_data(dev, p);
1710 	if (!pdd) {
1711 		pr_err("Process device data doesn't exist\n");
1712 		return ERR_PTR(-ENOMEM);
1713 	}
1714 
1715 	if (!pdd->drm_priv)
1716 		return ERR_PTR(-ENODEV);
1717 
1718 	/*
1719 	 * signal runtime-pm system to auto resume and prevent
1720 	 * further runtime suspend once device pdd is created until
1721 	 * pdd is destroyed.
1722 	 */
1723 	if (!pdd->runtime_inuse) {
1724 		err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1725 		if (err < 0) {
1726 			pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1727 			return ERR_PTR(err);
1728 		}
1729 	}
1730 
1731 	err = kfd_iommu_bind_process_to_device(pdd);
1732 	if (err)
1733 		goto out;
1734 
1735 	/*
1736 	 * make sure that runtime_usage counter is incremented just once
1737 	 * per pdd
1738 	 */
1739 	pdd->runtime_inuse = true;
1740 
1741 	return pdd;
1742 
1743 out:
1744 	/* balance runpm reference count and exit with error */
1745 	if (!pdd->runtime_inuse) {
1746 		pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev);
1747 		pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1748 	}
1749 
1750 	return ERR_PTR(err);
1751 }
1752 
1753 /* Create specific handle mapped to mem from process local memory idr
1754  * Assumes that the process lock is held.
1755  */
1756 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1757 					void *mem)
1758 {
1759 	return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1760 }
1761 
1762 /* Translate specific handle from process local memory idr
1763  * Assumes that the process lock is held.
1764  */
1765 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1766 					int handle)
1767 {
1768 	if (handle < 0)
1769 		return NULL;
1770 
1771 	return idr_find(&pdd->alloc_idr, handle);
1772 }
1773 
1774 /* Remove specific handle from process local memory idr
1775  * Assumes that the process lock is held.
1776  */
1777 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1778 					int handle)
1779 {
1780 	if (handle >= 0)
1781 		idr_remove(&pdd->alloc_idr, handle);
1782 }
1783 
1784 /* This increments the process->ref counter. */
1785 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1786 {
1787 	struct kfd_process *p, *ret_p = NULL;
1788 	unsigned int temp;
1789 
1790 	int idx = srcu_read_lock(&kfd_processes_srcu);
1791 
1792 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1793 		if (p->pasid == pasid) {
1794 			kref_get(&p->ref);
1795 			ret_p = p;
1796 			break;
1797 		}
1798 	}
1799 
1800 	srcu_read_unlock(&kfd_processes_srcu, idx);
1801 
1802 	return ret_p;
1803 }
1804 
1805 /* This increments the process->ref counter. */
1806 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1807 {
1808 	struct kfd_process *p;
1809 
1810 	int idx = srcu_read_lock(&kfd_processes_srcu);
1811 
1812 	p = find_process_by_mm(mm);
1813 	if (p)
1814 		kref_get(&p->ref);
1815 
1816 	srcu_read_unlock(&kfd_processes_srcu, idx);
1817 
1818 	return p;
1819 }
1820 
1821 /* kfd_process_evict_queues - Evict all user queues of a process
1822  *
1823  * Eviction is reference-counted per process-device. This means multiple
1824  * evictions from different sources can be nested safely.
1825  */
1826 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1827 {
1828 	int r = 0;
1829 	int i;
1830 	unsigned int n_evicted = 0;
1831 
1832 	for (i = 0; i < p->n_pdds; i++) {
1833 		struct kfd_process_device *pdd = p->pdds[i];
1834 
1835 		kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1836 					     trigger);
1837 
1838 		r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1839 							    &pdd->qpd);
1840 		/* evict return -EIO if HWS is hang or asic is resetting, in this case
1841 		 * we would like to set all the queues to be in evicted state to prevent
1842 		 * them been add back since they actually not be saved right now.
1843 		 */
1844 		if (r && r != -EIO) {
1845 			pr_err("Failed to evict process queues\n");
1846 			goto fail;
1847 		}
1848 		n_evicted++;
1849 	}
1850 
1851 	return r;
1852 
1853 fail:
1854 	/* To keep state consistent, roll back partial eviction by
1855 	 * restoring queues
1856 	 */
1857 	for (i = 0; i < p->n_pdds; i++) {
1858 		struct kfd_process_device *pdd = p->pdds[i];
1859 
1860 		if (n_evicted == 0)
1861 			break;
1862 
1863 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1864 
1865 		if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1866 							      &pdd->qpd))
1867 			pr_err("Failed to restore queues\n");
1868 
1869 		n_evicted--;
1870 	}
1871 
1872 	return r;
1873 }
1874 
1875 /* kfd_process_restore_queues - Restore all user queues of a process */
1876 int kfd_process_restore_queues(struct kfd_process *p)
1877 {
1878 	int r, ret = 0;
1879 	int i;
1880 
1881 	for (i = 0; i < p->n_pdds; i++) {
1882 		struct kfd_process_device *pdd = p->pdds[i];
1883 
1884 		kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1885 
1886 		r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1887 							      &pdd->qpd);
1888 		if (r) {
1889 			pr_err("Failed to restore process queues\n");
1890 			if (!ret)
1891 				ret = r;
1892 		}
1893 	}
1894 
1895 	return ret;
1896 }
1897 
1898 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1899 {
1900 	int i;
1901 
1902 	for (i = 0; i < p->n_pdds; i++)
1903 		if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1904 			return i;
1905 	return -EINVAL;
1906 }
1907 
1908 int
1909 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1910 			    uint32_t *gpuid, uint32_t *gpuidx)
1911 {
1912 	int i;
1913 
1914 	for (i = 0; i < p->n_pdds; i++)
1915 		if (p->pdds[i] && p->pdds[i]->dev == node) {
1916 			*gpuid = p->pdds[i]->user_gpu_id;
1917 			*gpuidx = i;
1918 			return 0;
1919 		}
1920 	return -EINVAL;
1921 }
1922 
1923 static void evict_process_worker(struct work_struct *work)
1924 {
1925 	int ret;
1926 	struct kfd_process *p;
1927 	struct delayed_work *dwork;
1928 
1929 	dwork = to_delayed_work(work);
1930 
1931 	/* Process termination destroys this worker thread. So during the
1932 	 * lifetime of this thread, kfd_process p will be valid
1933 	 */
1934 	p = container_of(dwork, struct kfd_process, eviction_work);
1935 	WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1936 		  "Eviction fence mismatch\n");
1937 
1938 	/* Narrow window of overlap between restore and evict work
1939 	 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1940 	 * unreserves KFD BOs, it is possible to evicted again. But
1941 	 * restore has few more steps of finish. So lets wait for any
1942 	 * previous restore work to complete
1943 	 */
1944 	flush_delayed_work(&p->restore_work);
1945 
1946 	pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1947 	ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1948 	if (!ret) {
1949 		dma_fence_signal(p->ef);
1950 		dma_fence_put(p->ef);
1951 		p->ef = NULL;
1952 		queue_delayed_work(kfd_restore_wq, &p->restore_work,
1953 				msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1954 
1955 		pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1956 	} else
1957 		pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1958 }
1959 
1960 static void restore_process_worker(struct work_struct *work)
1961 {
1962 	struct delayed_work *dwork;
1963 	struct kfd_process *p;
1964 	int ret = 0;
1965 
1966 	dwork = to_delayed_work(work);
1967 
1968 	/* Process termination destroys this worker thread. So during the
1969 	 * lifetime of this thread, kfd_process p will be valid
1970 	 */
1971 	p = container_of(dwork, struct kfd_process, restore_work);
1972 	pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1973 
1974 	/* Setting last_restore_timestamp before successful restoration.
1975 	 * Otherwise this would have to be set by KGD (restore_process_bos)
1976 	 * before KFD BOs are unreserved. If not, the process can be evicted
1977 	 * again before the timestamp is set.
1978 	 * If restore fails, the timestamp will be set again in the next
1979 	 * attempt. This would mean that the minimum GPU quanta would be
1980 	 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1981 	 * functions)
1982 	 */
1983 
1984 	p->last_restore_timestamp = get_jiffies_64();
1985 	/* VMs may not have been acquired yet during debugging. */
1986 	if (p->kgd_process_info)
1987 		ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1988 							     &p->ef);
1989 	if (ret) {
1990 		pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1991 			 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1992 		ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1993 				msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1994 		WARN(!ret, "reschedule restore work failed\n");
1995 		return;
1996 	}
1997 
1998 	ret = kfd_process_restore_queues(p);
1999 	if (!ret)
2000 		pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
2001 	else
2002 		pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
2003 }
2004 
2005 void kfd_suspend_all_processes(void)
2006 {
2007 	struct kfd_process *p;
2008 	unsigned int temp;
2009 	int idx = srcu_read_lock(&kfd_processes_srcu);
2010 
2011 	WARN(debug_evictions, "Evicting all processes");
2012 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2013 		cancel_delayed_work_sync(&p->eviction_work);
2014 		flush_delayed_work(&p->restore_work);
2015 
2016 		if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
2017 			pr_err("Failed to suspend process 0x%x\n", p->pasid);
2018 		dma_fence_signal(p->ef);
2019 		dma_fence_put(p->ef);
2020 		p->ef = NULL;
2021 	}
2022 	srcu_read_unlock(&kfd_processes_srcu, idx);
2023 }
2024 
2025 int kfd_resume_all_processes(void)
2026 {
2027 	struct kfd_process *p;
2028 	unsigned int temp;
2029 	int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2030 
2031 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2032 		if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
2033 			pr_err("Restore process %d failed during resume\n",
2034 			       p->pasid);
2035 			ret = -EFAULT;
2036 		}
2037 	}
2038 	srcu_read_unlock(&kfd_processes_srcu, idx);
2039 	return ret;
2040 }
2041 
2042 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
2043 			  struct vm_area_struct *vma)
2044 {
2045 	struct kfd_process_device *pdd;
2046 	struct qcm_process_device *qpd;
2047 
2048 	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2049 		pr_err("Incorrect CWSR mapping size.\n");
2050 		return -EINVAL;
2051 	}
2052 
2053 	pdd = kfd_get_process_device_data(dev, process);
2054 	if (!pdd)
2055 		return -EINVAL;
2056 	qpd = &pdd->qpd;
2057 
2058 	qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2059 					get_order(KFD_CWSR_TBA_TMA_SIZE));
2060 	if (!qpd->cwsr_kaddr) {
2061 		pr_err("Error allocating per process CWSR buffer.\n");
2062 		return -ENOMEM;
2063 	}
2064 
2065 	vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2066 		| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2067 	/* Mapping pages to user process */
2068 	return remap_pfn_range(vma, vma->vm_start,
2069 			       PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2070 			       KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2071 }
2072 
2073 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
2074 {
2075 	struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
2076 	uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
2077 	struct kfd_node *dev = pdd->dev;
2078 	uint32_t xcc_mask = dev->xcc_mask;
2079 	int xcc = 0;
2080 
2081 	/*
2082 	 * It can be that we race and lose here, but that is extremely unlikely
2083 	 * and the worst thing which could happen is that we flush the changes
2084 	 * into the TLB once more which is harmless.
2085 	 */
2086 	if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
2087 		return;
2088 
2089 	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2090 		/* Nothing to flush until a VMID is assigned, which
2091 		 * only happens when the first queue is created.
2092 		 */
2093 		if (pdd->qpd.vmid)
2094 			amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
2095 							pdd->qpd.vmid);
2096 	} else {
2097 		for_each_inst(xcc, xcc_mask)
2098 			amdgpu_amdkfd_flush_gpu_tlb_pasid(
2099 				dev->adev, pdd->process->pasid, type, xcc);
2100 	}
2101 }
2102 
2103 /* assumes caller holds process lock. */
2104 int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
2105 {
2106 	uint32_t irq_drain_fence[8];
2107 	uint8_t node_id = 0;
2108 	int r = 0;
2109 
2110 	if (!KFD_IS_SOC15(pdd->dev))
2111 		return 0;
2112 
2113 	pdd->process->irq_drain_is_open = true;
2114 
2115 	memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
2116 	irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
2117 							KFD_IRQ_FENCE_CLIENTID;
2118 	irq_drain_fence[3] = pdd->process->pasid;
2119 
2120 	/*
2121 	 * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
2122 	 */
2123 	if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) {
2124 		node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
2125 		irq_drain_fence[3] |= node_id << 16;
2126 	}
2127 
2128 	/* ensure stale irqs scheduled KFD interrupts and send drain fence. */
2129 	if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
2130 						     irq_drain_fence)) {
2131 		pdd->process->irq_drain_is_open = false;
2132 		return 0;
2133 	}
2134 
2135 	r = wait_event_interruptible(pdd->process->wait_irq_drain,
2136 				     !READ_ONCE(pdd->process->irq_drain_is_open));
2137 	if (r)
2138 		pdd->process->irq_drain_is_open = false;
2139 
2140 	return r;
2141 }
2142 
2143 void kfd_process_close_interrupt_drain(unsigned int pasid)
2144 {
2145 	struct kfd_process *p;
2146 
2147 	p = kfd_lookup_process_by_pasid(pasid);
2148 
2149 	if (!p)
2150 		return;
2151 
2152 	WRITE_ONCE(p->irq_drain_is_open, false);
2153 	wake_up_all(&p->wait_irq_drain);
2154 	kfd_unref_process(p);
2155 }
2156 
2157 struct send_exception_work_handler_workarea {
2158 	struct work_struct work;
2159 	struct kfd_process *p;
2160 	unsigned int queue_id;
2161 	uint64_t error_reason;
2162 };
2163 
2164 static void send_exception_work_handler(struct work_struct *work)
2165 {
2166 	struct send_exception_work_handler_workarea *workarea;
2167 	struct kfd_process *p;
2168 	struct queue *q;
2169 	struct mm_struct *mm;
2170 	struct kfd_context_save_area_header __user *csa_header;
2171 	uint64_t __user *err_payload_ptr;
2172 	uint64_t cur_err;
2173 	uint32_t ev_id;
2174 
2175 	workarea = container_of(work,
2176 				struct send_exception_work_handler_workarea,
2177 				work);
2178 	p = workarea->p;
2179 
2180 	mm = get_task_mm(p->lead_thread);
2181 
2182 	if (!mm)
2183 		return;
2184 
2185 	kthread_use_mm(mm);
2186 
2187 	q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
2188 
2189 	if (!q)
2190 		goto out;
2191 
2192 	csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
2193 
2194 	get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
2195 	get_user(cur_err, err_payload_ptr);
2196 	cur_err |= workarea->error_reason;
2197 	put_user(cur_err, err_payload_ptr);
2198 	get_user(ev_id, &csa_header->err_event_id);
2199 
2200 	kfd_set_event(p, ev_id);
2201 
2202 out:
2203 	kthread_unuse_mm(mm);
2204 	mmput(mm);
2205 }
2206 
2207 int kfd_send_exception_to_runtime(struct kfd_process *p,
2208 			unsigned int queue_id,
2209 			uint64_t error_reason)
2210 {
2211 	struct send_exception_work_handler_workarea worker;
2212 
2213 	INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2214 
2215 	worker.p = p;
2216 	worker.queue_id = queue_id;
2217 	worker.error_reason = error_reason;
2218 
2219 	schedule_work(&worker.work);
2220 	flush_work(&worker.work);
2221 	destroy_work_on_stack(&worker.work);
2222 
2223 	return 0;
2224 }
2225 
2226 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2227 {
2228 	int i;
2229 
2230 	if (gpu_id) {
2231 		for (i = 0; i < p->n_pdds; i++) {
2232 			struct kfd_process_device *pdd = p->pdds[i];
2233 
2234 			if (pdd->user_gpu_id == gpu_id)
2235 				return pdd;
2236 		}
2237 	}
2238 	return NULL;
2239 }
2240 
2241 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2242 {
2243 	int i;
2244 
2245 	if (!actual_gpu_id)
2246 		return 0;
2247 
2248 	for (i = 0; i < p->n_pdds; i++) {
2249 		struct kfd_process_device *pdd = p->pdds[i];
2250 
2251 		if (pdd->dev->id == actual_gpu_id)
2252 			return pdd->user_gpu_id;
2253 	}
2254 	return -EINVAL;
2255 }
2256 
2257 #if defined(CONFIG_DEBUG_FS)
2258 
2259 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2260 {
2261 	struct kfd_process *p;
2262 	unsigned int temp;
2263 	int r = 0;
2264 
2265 	int idx = srcu_read_lock(&kfd_processes_srcu);
2266 
2267 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2268 		seq_printf(m, "Process %d PASID 0x%x:\n",
2269 			   p->lead_thread->tgid, p->pasid);
2270 
2271 		mutex_lock(&p->mutex);
2272 		r = pqm_debugfs_mqds(m, &p->pqm);
2273 		mutex_unlock(&p->mutex);
2274 
2275 		if (r)
2276 			break;
2277 	}
2278 
2279 	srcu_read_unlock(&kfd_processes_srcu, idx);
2280 
2281 	return r;
2282 }
2283 
2284 #endif
2285