1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu.h"
38
39 struct mm_struct;
40
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "kfd_smi_events.h"
45 #include "kfd_debug.h"
46
47 /*
48 * List of struct kfd_process (field kfd_process).
49 * Unique/indexed by mm_struct*
50 */
51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52 DEFINE_MUTEX(kfd_processes_mutex);
53
54 DEFINE_SRCU(kfd_processes_srcu);
55
56 /* For process termination handling */
57 static struct workqueue_struct *kfd_process_wq;
58
59 /* Ordered, single-threaded workqueue for restoring evicted
60 * processes. Restoring multiple processes concurrently under memory
61 * pressure can lead to processes blocking each other from validating
62 * their BOs and result in a live-lock situation where processes
63 * remain evicted indefinitely.
64 */
65 static struct workqueue_struct *kfd_restore_wq;
66
67 static struct kfd_process *find_process(const struct task_struct *thread,
68 bool ref);
69 static void kfd_process_ref_release(struct kref *ref);
70 static struct kfd_process *create_process(const struct task_struct *thread);
71
72 static void evict_process_worker(struct work_struct *work);
73 static void restore_process_worker(struct work_struct *work);
74
75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
76
77 struct kfd_procfs_tree {
78 struct kobject *kobj;
79 };
80
81 static struct kfd_procfs_tree procfs;
82
83 /*
84 * Structure for SDMA activity tracking
85 */
86 struct kfd_sdma_activity_handler_workarea {
87 struct work_struct sdma_activity_work;
88 struct kfd_process_device *pdd;
89 uint64_t sdma_activity_counter;
90 };
91
92 struct temp_sdma_queue_list {
93 uint64_t __user *rptr;
94 uint64_t sdma_val;
95 unsigned int queue_id;
96 struct list_head list;
97 };
98
kfd_sdma_activity_worker(struct work_struct * work)99 static void kfd_sdma_activity_worker(struct work_struct *work)
100 {
101 struct kfd_sdma_activity_handler_workarea *workarea;
102 struct kfd_process_device *pdd;
103 uint64_t val;
104 struct mm_struct *mm;
105 struct queue *q;
106 struct qcm_process_device *qpd;
107 struct device_queue_manager *dqm;
108 int ret = 0;
109 struct temp_sdma_queue_list sdma_q_list;
110 struct temp_sdma_queue_list *sdma_q, *next;
111
112 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
113 sdma_activity_work);
114
115 pdd = workarea->pdd;
116 if (!pdd)
117 return;
118 dqm = pdd->dev->dqm;
119 qpd = &pdd->qpd;
120 if (!dqm || !qpd)
121 return;
122 /*
123 * Total SDMA activity is current SDMA activity + past SDMA activity
124 * Past SDMA count is stored in pdd.
125 * To get the current activity counters for all active SDMA queues,
126 * we loop over all SDMA queues and get their counts from user-space.
127 *
128 * We cannot call get_user() with dqm_lock held as it can cause
129 * a circular lock dependency situation. To read the SDMA stats,
130 * we need to do the following:
131 *
132 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
133 * with dqm_lock/dqm_unlock().
134 * 2. Call get_user() for each node in temporary list without dqm_lock.
135 * Save the SDMA count for each node and also add the count to the total
136 * SDMA count counter.
137 * Its possible, during this step, a few SDMA queue nodes got deleted
138 * from the qpd->queues_list.
139 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
140 * If any node got deleted, its SDMA count would be captured in the sdma
141 * past activity counter. So subtract the SDMA counter stored in step 2
142 * for this node from the total SDMA count.
143 */
144 INIT_LIST_HEAD(&sdma_q_list.list);
145
146 /*
147 * Create the temp list of all SDMA queues
148 */
149 dqm_lock(dqm);
150
151 list_for_each_entry(q, &qpd->queues_list, list) {
152 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
153 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
154 continue;
155
156 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
157 if (!sdma_q) {
158 dqm_unlock(dqm);
159 goto cleanup;
160 }
161
162 INIT_LIST_HEAD(&sdma_q->list);
163 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
164 sdma_q->queue_id = q->properties.queue_id;
165 list_add_tail(&sdma_q->list, &sdma_q_list.list);
166 }
167
168 /*
169 * If the temp list is empty, then no SDMA queues nodes were found in
170 * qpd->queues_list. Return the past activity count as the total sdma
171 * count
172 */
173 if (list_empty(&sdma_q_list.list)) {
174 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
175 dqm_unlock(dqm);
176 return;
177 }
178
179 dqm_unlock(dqm);
180
181 /*
182 * Get the usage count for each SDMA queue in temp_list.
183 */
184 mm = get_task_mm(pdd->process->lead_thread);
185 if (!mm)
186 goto cleanup;
187
188 kthread_use_mm(mm);
189
190 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
191 val = 0;
192 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
193 if (ret) {
194 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
195 sdma_q->queue_id);
196 } else {
197 sdma_q->sdma_val = val;
198 workarea->sdma_activity_counter += val;
199 }
200 }
201
202 kthread_unuse_mm(mm);
203 mmput(mm);
204
205 /*
206 * Do a second iteration over qpd_queues_list to check if any SDMA
207 * nodes got deleted while fetching SDMA counter.
208 */
209 dqm_lock(dqm);
210
211 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
212
213 list_for_each_entry(q, &qpd->queues_list, list) {
214 if (list_empty(&sdma_q_list.list))
215 break;
216
217 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
218 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
219 continue;
220
221 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
222 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
223 (sdma_q->queue_id == q->properties.queue_id)) {
224 list_del(&sdma_q->list);
225 kfree(sdma_q);
226 break;
227 }
228 }
229 }
230
231 dqm_unlock(dqm);
232
233 /*
234 * If temp list is not empty, it implies some queues got deleted
235 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
236 * count for each node from the total SDMA count.
237 */
238 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
239 workarea->sdma_activity_counter -= sdma_q->sdma_val;
240 list_del(&sdma_q->list);
241 kfree(sdma_q);
242 }
243
244 return;
245
246 cleanup:
247 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
248 list_del(&sdma_q->list);
249 kfree(sdma_q);
250 }
251 }
252
253 /**
254 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
255 * by current process. Translates acquired wave count into number of compute units
256 * that are occupied.
257 *
258 * @attr: Handle of attribute that allows reporting of wave count. The attribute
259 * handle encapsulates GPU device it is associated with, thereby allowing collection
260 * of waves in flight, etc
261 * @buffer: Handle of user provided buffer updated with wave count
262 *
263 * Return: Number of bytes written to user buffer or an error value
264 */
kfd_get_cu_occupancy(struct attribute * attr,char * buffer)265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
266 {
267 int cu_cnt;
268 int wave_cnt;
269 int max_waves_per_cu;
270 struct kfd_node *dev = NULL;
271 struct kfd_process *proc = NULL;
272 struct kfd_process_device *pdd = NULL;
273
274 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
275 dev = pdd->dev;
276 if (dev->kfd2kgd->get_cu_occupancy == NULL)
277 return -EINVAL;
278
279 cu_cnt = 0;
280 proc = pdd->process;
281 if (pdd->qpd.queue_count == 0) {
282 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
283 dev->id, proc->pasid);
284 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
285 }
286
287 /* Collect wave count from device if it supports */
288 wave_cnt = 0;
289 max_waves_per_cu = 0;
290 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
291 &max_waves_per_cu, 0);
292
293 /* Translate wave count to number of compute units */
294 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
295 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
296 }
297
kfd_procfs_show(struct kobject * kobj,struct attribute * attr,char * buffer)298 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
299 char *buffer)
300 {
301 if (strcmp(attr->name, "pasid") == 0) {
302 struct kfd_process *p = container_of(attr, struct kfd_process,
303 attr_pasid);
304
305 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
306 } else if (strncmp(attr->name, "vram_", 5) == 0) {
307 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
308 attr_vram);
309 return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
310 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
311 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
312 attr_sdma);
313 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
314
315 INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
316 kfd_sdma_activity_worker);
317
318 sdma_activity_work_handler.pdd = pdd;
319 sdma_activity_work_handler.sdma_activity_counter = 0;
320
321 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
322
323 flush_work(&sdma_activity_work_handler.sdma_activity_work);
324 destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
325
326 return snprintf(buffer, PAGE_SIZE, "%llu\n",
327 (sdma_activity_work_handler.sdma_activity_counter)/
328 SDMA_ACTIVITY_DIVISOR);
329 } else {
330 pr_err("Invalid attribute");
331 return -EINVAL;
332 }
333
334 return 0;
335 }
336
kfd_procfs_kobj_release(struct kobject * kobj)337 static void kfd_procfs_kobj_release(struct kobject *kobj)
338 {
339 kfree(kobj);
340 }
341
342 static const struct sysfs_ops kfd_procfs_ops = {
343 .show = kfd_procfs_show,
344 };
345
346 static const struct kobj_type procfs_type = {
347 .release = kfd_procfs_kobj_release,
348 .sysfs_ops = &kfd_procfs_ops,
349 };
350
kfd_procfs_init(void)351 void kfd_procfs_init(void)
352 {
353 int ret = 0;
354
355 procfs.kobj = kfd_alloc_struct(procfs.kobj);
356 if (!procfs.kobj)
357 return;
358
359 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
360 &kfd_device->kobj, "proc");
361 if (ret) {
362 pr_warn("Could not create procfs proc folder");
363 /* If we fail to create the procfs, clean up */
364 kfd_procfs_shutdown();
365 }
366 }
367
kfd_procfs_shutdown(void)368 void kfd_procfs_shutdown(void)
369 {
370 if (procfs.kobj) {
371 kobject_del(procfs.kobj);
372 kobject_put(procfs.kobj);
373 procfs.kobj = NULL;
374 }
375 }
376
kfd_procfs_queue_show(struct kobject * kobj,struct attribute * attr,char * buffer)377 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
378 struct attribute *attr, char *buffer)
379 {
380 struct queue *q = container_of(kobj, struct queue, kobj);
381
382 if (!strcmp(attr->name, "size"))
383 return snprintf(buffer, PAGE_SIZE, "%llu",
384 q->properties.queue_size);
385 else if (!strcmp(attr->name, "type"))
386 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
387 else if (!strcmp(attr->name, "gpuid"))
388 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
389 else
390 pr_err("Invalid attribute");
391
392 return 0;
393 }
394
kfd_procfs_stats_show(struct kobject * kobj,struct attribute * attr,char * buffer)395 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
396 struct attribute *attr, char *buffer)
397 {
398 if (strcmp(attr->name, "evicted_ms") == 0) {
399 struct kfd_process_device *pdd = container_of(attr,
400 struct kfd_process_device,
401 attr_evict);
402 uint64_t evict_jiffies;
403
404 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
405
406 return snprintf(buffer,
407 PAGE_SIZE,
408 "%llu\n",
409 jiffies64_to_msecs(evict_jiffies));
410
411 /* Sysfs handle that gets CU occupancy is per device */
412 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
413 return kfd_get_cu_occupancy(attr, buffer);
414 } else {
415 pr_err("Invalid attribute");
416 }
417
418 return 0;
419 }
420
kfd_sysfs_counters_show(struct kobject * kobj,struct attribute * attr,char * buf)421 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
422 struct attribute *attr, char *buf)
423 {
424 struct kfd_process_device *pdd;
425
426 if (!strcmp(attr->name, "faults")) {
427 pdd = container_of(attr, struct kfd_process_device,
428 attr_faults);
429 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
430 }
431 if (!strcmp(attr->name, "page_in")) {
432 pdd = container_of(attr, struct kfd_process_device,
433 attr_page_in);
434 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
435 }
436 if (!strcmp(attr->name, "page_out")) {
437 pdd = container_of(attr, struct kfd_process_device,
438 attr_page_out);
439 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
440 }
441 return 0;
442 }
443
444 static struct attribute attr_queue_size = {
445 .name = "size",
446 .mode = KFD_SYSFS_FILE_MODE
447 };
448
449 static struct attribute attr_queue_type = {
450 .name = "type",
451 .mode = KFD_SYSFS_FILE_MODE
452 };
453
454 static struct attribute attr_queue_gpuid = {
455 .name = "gpuid",
456 .mode = KFD_SYSFS_FILE_MODE
457 };
458
459 static struct attribute *procfs_queue_attrs[] = {
460 &attr_queue_size,
461 &attr_queue_type,
462 &attr_queue_gpuid,
463 NULL
464 };
465 ATTRIBUTE_GROUPS(procfs_queue);
466
467 static const struct sysfs_ops procfs_queue_ops = {
468 .show = kfd_procfs_queue_show,
469 };
470
471 static const struct kobj_type procfs_queue_type = {
472 .sysfs_ops = &procfs_queue_ops,
473 .default_groups = procfs_queue_groups,
474 };
475
476 static const struct sysfs_ops procfs_stats_ops = {
477 .show = kfd_procfs_stats_show,
478 };
479
480 static const struct kobj_type procfs_stats_type = {
481 .sysfs_ops = &procfs_stats_ops,
482 .release = kfd_procfs_kobj_release,
483 };
484
485 static const struct sysfs_ops sysfs_counters_ops = {
486 .show = kfd_sysfs_counters_show,
487 };
488
489 static const struct kobj_type sysfs_counters_type = {
490 .sysfs_ops = &sysfs_counters_ops,
491 .release = kfd_procfs_kobj_release,
492 };
493
kfd_procfs_add_queue(struct queue * q)494 int kfd_procfs_add_queue(struct queue *q)
495 {
496 struct kfd_process *proc;
497 int ret;
498
499 if (!q || !q->process)
500 return -EINVAL;
501 proc = q->process;
502
503 /* Create proc/<pid>/queues/<queue id> folder */
504 if (!proc->kobj_queues)
505 return -EFAULT;
506 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
507 proc->kobj_queues, "%u", q->properties.queue_id);
508 if (ret < 0) {
509 pr_warn("Creating proc/<pid>/queues/%u failed",
510 q->properties.queue_id);
511 kobject_put(&q->kobj);
512 return ret;
513 }
514
515 return 0;
516 }
517
kfd_sysfs_create_file(struct kobject * kobj,struct attribute * attr,char * name)518 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
519 char *name)
520 {
521 int ret;
522
523 if (!kobj || !attr || !name)
524 return;
525
526 attr->name = name;
527 attr->mode = KFD_SYSFS_FILE_MODE;
528 sysfs_attr_init(attr);
529
530 ret = sysfs_create_file(kobj, attr);
531 if (ret)
532 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
533 }
534
kfd_procfs_add_sysfs_stats(struct kfd_process * p)535 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
536 {
537 int ret;
538 int i;
539 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
540
541 if (!p || !p->kobj)
542 return;
543
544 /*
545 * Create sysfs files for each GPU:
546 * - proc/<pid>/stats_<gpuid>/
547 * - proc/<pid>/stats_<gpuid>/evicted_ms
548 * - proc/<pid>/stats_<gpuid>/cu_occupancy
549 */
550 for (i = 0; i < p->n_pdds; i++) {
551 struct kfd_process_device *pdd = p->pdds[i];
552
553 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
554 "stats_%u", pdd->dev->id);
555 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
556 if (!pdd->kobj_stats)
557 return;
558
559 ret = kobject_init_and_add(pdd->kobj_stats,
560 &procfs_stats_type,
561 p->kobj,
562 stats_dir_filename);
563
564 if (ret) {
565 pr_warn("Creating KFD proc/stats_%s folder failed",
566 stats_dir_filename);
567 kobject_put(pdd->kobj_stats);
568 pdd->kobj_stats = NULL;
569 return;
570 }
571
572 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
573 "evicted_ms");
574 /* Add sysfs file to report compute unit occupancy */
575 if (pdd->dev->kfd2kgd->get_cu_occupancy)
576 kfd_sysfs_create_file(pdd->kobj_stats,
577 &pdd->attr_cu_occupancy,
578 "cu_occupancy");
579 }
580 }
581
kfd_procfs_add_sysfs_counters(struct kfd_process * p)582 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
583 {
584 int ret = 0;
585 int i;
586 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
587
588 if (!p || !p->kobj)
589 return;
590
591 /*
592 * Create sysfs files for each GPU which supports SVM
593 * - proc/<pid>/counters_<gpuid>/
594 * - proc/<pid>/counters_<gpuid>/faults
595 * - proc/<pid>/counters_<gpuid>/page_in
596 * - proc/<pid>/counters_<gpuid>/page_out
597 */
598 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
599 struct kfd_process_device *pdd = p->pdds[i];
600 struct kobject *kobj_counters;
601
602 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
603 "counters_%u", pdd->dev->id);
604 kobj_counters = kfd_alloc_struct(kobj_counters);
605 if (!kobj_counters)
606 return;
607
608 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
609 p->kobj, counters_dir_filename);
610 if (ret) {
611 pr_warn("Creating KFD proc/%s folder failed",
612 counters_dir_filename);
613 kobject_put(kobj_counters);
614 return;
615 }
616
617 pdd->kobj_counters = kobj_counters;
618 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
619 "faults");
620 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
621 "page_in");
622 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
623 "page_out");
624 }
625 }
626
kfd_procfs_add_sysfs_files(struct kfd_process * p)627 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
628 {
629 int i;
630
631 if (!p || !p->kobj)
632 return;
633
634 /*
635 * Create sysfs files for each GPU:
636 * - proc/<pid>/vram_<gpuid>
637 * - proc/<pid>/sdma_<gpuid>
638 */
639 for (i = 0; i < p->n_pdds; i++) {
640 struct kfd_process_device *pdd = p->pdds[i];
641
642 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
643 pdd->dev->id);
644 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
645 pdd->vram_filename);
646
647 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
648 pdd->dev->id);
649 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
650 pdd->sdma_filename);
651 }
652 }
653
kfd_procfs_del_queue(struct queue * q)654 void kfd_procfs_del_queue(struct queue *q)
655 {
656 if (!q)
657 return;
658
659 kobject_del(&q->kobj);
660 kobject_put(&q->kobj);
661 }
662
kfd_process_create_wq(void)663 int kfd_process_create_wq(void)
664 {
665 if (!kfd_process_wq)
666 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
667 if (!kfd_restore_wq)
668 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
669
670 if (!kfd_process_wq || !kfd_restore_wq) {
671 kfd_process_destroy_wq();
672 return -ENOMEM;
673 }
674
675 return 0;
676 }
677
kfd_process_destroy_wq(void)678 void kfd_process_destroy_wq(void)
679 {
680 if (kfd_process_wq) {
681 destroy_workqueue(kfd_process_wq);
682 kfd_process_wq = NULL;
683 }
684 if (kfd_restore_wq) {
685 destroy_workqueue(kfd_restore_wq);
686 kfd_restore_wq = NULL;
687 }
688 }
689
kfd_process_free_gpuvm(struct kgd_mem * mem,struct kfd_process_device * pdd,void ** kptr)690 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
691 struct kfd_process_device *pdd, void **kptr)
692 {
693 struct kfd_node *dev = pdd->dev;
694
695 if (kptr && *kptr) {
696 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
697 *kptr = NULL;
698 }
699
700 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
701 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
702 NULL);
703 }
704
705 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
706 * This function should be only called right after the process
707 * is created and when kfd_processes_mutex is still being held
708 * to avoid concurrency. Because of that exclusiveness, we do
709 * not need to take p->mutex.
710 */
kfd_process_alloc_gpuvm(struct kfd_process_device * pdd,uint64_t gpu_va,uint32_t size,uint32_t flags,struct kgd_mem ** mem,void ** kptr)711 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
712 uint64_t gpu_va, uint32_t size,
713 uint32_t flags, struct kgd_mem **mem, void **kptr)
714 {
715 struct kfd_node *kdev = pdd->dev;
716 int err;
717
718 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
719 pdd->drm_priv, mem, NULL,
720 flags, false);
721 if (err)
722 goto err_alloc_mem;
723
724 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
725 pdd->drm_priv);
726 if (err)
727 goto err_map_mem;
728
729 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
730 if (err) {
731 pr_debug("Sync memory failed, wait interrupted by user signal\n");
732 goto sync_memory_failed;
733 }
734
735 if (kptr) {
736 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
737 (struct kgd_mem *)*mem, kptr, NULL);
738 if (err) {
739 pr_debug("Map GTT BO to kernel failed\n");
740 goto sync_memory_failed;
741 }
742 }
743
744 return err;
745
746 sync_memory_failed:
747 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
748
749 err_map_mem:
750 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
751 NULL);
752 err_alloc_mem:
753 *mem = NULL;
754 *kptr = NULL;
755 return err;
756 }
757
758 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
759 * process for IB usage The memory reserved is for KFD to submit
760 * IB to AMDGPU from kernel. If the memory is reserved
761 * successfully, ib_kaddr will have the CPU/kernel
762 * address. Check ib_kaddr before accessing the memory.
763 */
kfd_process_device_reserve_ib_mem(struct kfd_process_device * pdd)764 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
765 {
766 struct qcm_process_device *qpd = &pdd->qpd;
767 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
768 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
769 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
770 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
771 struct kgd_mem *mem;
772 void *kaddr;
773 int ret;
774
775 if (qpd->ib_kaddr || !qpd->ib_base)
776 return 0;
777
778 /* ib_base is only set for dGPU */
779 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
780 &mem, &kaddr);
781 if (ret)
782 return ret;
783
784 qpd->ib_mem = mem;
785 qpd->ib_kaddr = kaddr;
786
787 return 0;
788 }
789
kfd_process_device_destroy_ib_mem(struct kfd_process_device * pdd)790 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
791 {
792 struct qcm_process_device *qpd = &pdd->qpd;
793
794 if (!qpd->ib_kaddr || !qpd->ib_base)
795 return;
796
797 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
798 }
799
kfd_create_process(struct task_struct * thread)800 struct kfd_process *kfd_create_process(struct task_struct *thread)
801 {
802 struct kfd_process *process;
803 int ret;
804
805 if (!(thread->mm && mmget_not_zero(thread->mm)))
806 return ERR_PTR(-EINVAL);
807
808 /* Only the pthreads threading model is supported. */
809 if (thread->group_leader->mm != thread->mm) {
810 mmput(thread->mm);
811 return ERR_PTR(-EINVAL);
812 }
813
814 /*
815 * take kfd processes mutex before starting of process creation
816 * so there won't be a case where two threads of the same process
817 * create two kfd_process structures
818 */
819 mutex_lock(&kfd_processes_mutex);
820
821 if (kfd_is_locked()) {
822 pr_debug("KFD is locked! Cannot create process");
823 process = ERR_PTR(-EINVAL);
824 goto out;
825 }
826
827 /* A prior open of /dev/kfd could have already created the process. */
828 process = find_process(thread, false);
829 if (process) {
830 pr_debug("Process already found\n");
831 } else {
832 /* If the process just called exec(3), it is possible that the
833 * cleanup of the kfd_process (following the release of the mm
834 * of the old process image) is still in the cleanup work queue.
835 * Make sure to drain any job before trying to recreate any
836 * resource for this process.
837 */
838 flush_workqueue(kfd_process_wq);
839
840 process = create_process(thread);
841 if (IS_ERR(process))
842 goto out;
843
844 if (!procfs.kobj)
845 goto out;
846
847 process->kobj = kfd_alloc_struct(process->kobj);
848 if (!process->kobj) {
849 pr_warn("Creating procfs kobject failed");
850 goto out;
851 }
852 ret = kobject_init_and_add(process->kobj, &procfs_type,
853 procfs.kobj, "%d",
854 (int)process->lead_thread->pid);
855 if (ret) {
856 pr_warn("Creating procfs pid directory failed");
857 kobject_put(process->kobj);
858 goto out;
859 }
860
861 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
862 "pasid");
863
864 process->kobj_queues = kobject_create_and_add("queues",
865 process->kobj);
866 if (!process->kobj_queues)
867 pr_warn("Creating KFD proc/queues folder failed");
868
869 kfd_procfs_add_sysfs_stats(process);
870 kfd_procfs_add_sysfs_files(process);
871 kfd_procfs_add_sysfs_counters(process);
872
873 init_waitqueue_head(&process->wait_irq_drain);
874 }
875 out:
876 if (!IS_ERR(process))
877 kref_get(&process->ref);
878 mutex_unlock(&kfd_processes_mutex);
879 mmput(thread->mm);
880
881 return process;
882 }
883
kfd_get_process(const struct task_struct * thread)884 struct kfd_process *kfd_get_process(const struct task_struct *thread)
885 {
886 struct kfd_process *process;
887
888 if (!thread->mm)
889 return ERR_PTR(-EINVAL);
890
891 /* Only the pthreads threading model is supported. */
892 if (thread->group_leader->mm != thread->mm)
893 return ERR_PTR(-EINVAL);
894
895 process = find_process(thread, false);
896 if (!process)
897 return ERR_PTR(-EINVAL);
898
899 return process;
900 }
901
find_process_by_mm(const struct mm_struct * mm)902 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
903 {
904 struct kfd_process *process;
905
906 hash_for_each_possible_rcu(kfd_processes_table, process,
907 kfd_processes, (uintptr_t)mm)
908 if (process->mm == mm)
909 return process;
910
911 return NULL;
912 }
913
find_process(const struct task_struct * thread,bool ref)914 static struct kfd_process *find_process(const struct task_struct *thread,
915 bool ref)
916 {
917 struct kfd_process *p;
918 int idx;
919
920 idx = srcu_read_lock(&kfd_processes_srcu);
921 p = find_process_by_mm(thread->mm);
922 if (p && ref)
923 kref_get(&p->ref);
924 srcu_read_unlock(&kfd_processes_srcu, idx);
925
926 return p;
927 }
928
kfd_unref_process(struct kfd_process * p)929 void kfd_unref_process(struct kfd_process *p)
930 {
931 kref_put(&p->ref, kfd_process_ref_release);
932 }
933
934 /* This increments the process->ref counter. */
kfd_lookup_process_by_pid(struct pid * pid)935 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
936 {
937 struct task_struct *task = NULL;
938 struct kfd_process *p = NULL;
939
940 if (!pid) {
941 task = current;
942 get_task_struct(task);
943 } else {
944 task = get_pid_task(pid, PIDTYPE_PID);
945 }
946
947 if (task) {
948 p = find_process(task, true);
949 put_task_struct(task);
950 }
951
952 return p;
953 }
954
kfd_process_device_free_bos(struct kfd_process_device * pdd)955 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
956 {
957 struct kfd_process *p = pdd->process;
958 void *mem;
959 int id;
960 int i;
961
962 /*
963 * Remove all handles from idr and release appropriate
964 * local memory object
965 */
966 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
967
968 for (i = 0; i < p->n_pdds; i++) {
969 struct kfd_process_device *peer_pdd = p->pdds[i];
970
971 if (!peer_pdd->drm_priv)
972 continue;
973 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
974 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
975 }
976
977 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
978 pdd->drm_priv, NULL);
979 kfd_process_device_remove_obj_handle(pdd, id);
980 }
981 }
982
983 /*
984 * Just kunmap and unpin signal BO here. It will be freed in
985 * kfd_process_free_outstanding_kfd_bos()
986 */
kfd_process_kunmap_signal_bo(struct kfd_process * p)987 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
988 {
989 struct kfd_process_device *pdd;
990 struct kfd_node *kdev;
991 void *mem;
992
993 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
994 if (!kdev)
995 return;
996
997 mutex_lock(&p->mutex);
998
999 pdd = kfd_get_process_device_data(kdev, p);
1000 if (!pdd)
1001 goto out;
1002
1003 mem = kfd_process_device_translate_handle(
1004 pdd, GET_IDR_HANDLE(p->signal_handle));
1005 if (!mem)
1006 goto out;
1007
1008 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1009
1010 out:
1011 mutex_unlock(&p->mutex);
1012 }
1013
kfd_process_free_outstanding_kfd_bos(struct kfd_process * p)1014 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1015 {
1016 int i;
1017
1018 for (i = 0; i < p->n_pdds; i++)
1019 kfd_process_device_free_bos(p->pdds[i]);
1020 }
1021
kfd_process_destroy_pdds(struct kfd_process * p)1022 static void kfd_process_destroy_pdds(struct kfd_process *p)
1023 {
1024 int i;
1025
1026 for (i = 0; i < p->n_pdds; i++) {
1027 struct kfd_process_device *pdd = p->pdds[i];
1028
1029 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1030 pdd->dev->id, p->pasid);
1031
1032 kfd_process_device_destroy_cwsr_dgpu(pdd);
1033 kfd_process_device_destroy_ib_mem(pdd);
1034
1035 if (pdd->drm_file) {
1036 amdgpu_amdkfd_gpuvm_release_process_vm(
1037 pdd->dev->adev, pdd->drm_priv);
1038 fput(pdd->drm_file);
1039 }
1040
1041 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1042 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1043 get_order(KFD_CWSR_TBA_TMA_SIZE));
1044
1045 idr_destroy(&pdd->alloc_idr);
1046
1047 kfd_free_process_doorbells(pdd->dev->kfd, pdd);
1048
1049 if (pdd->dev->kfd->shared_resources.enable_mes &&
1050 pdd->proc_ctx_cpu_ptr)
1051 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1052 &pdd->proc_ctx_bo);
1053 /*
1054 * before destroying pdd, make sure to report availability
1055 * for auto suspend
1056 */
1057 if (pdd->runtime_inuse) {
1058 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1059 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1060 pdd->runtime_inuse = false;
1061 }
1062
1063 kfree(pdd);
1064 p->pdds[i] = NULL;
1065 }
1066 p->n_pdds = 0;
1067 }
1068
kfd_process_remove_sysfs(struct kfd_process * p)1069 static void kfd_process_remove_sysfs(struct kfd_process *p)
1070 {
1071 struct kfd_process_device *pdd;
1072 int i;
1073
1074 if (!p->kobj)
1075 return;
1076
1077 sysfs_remove_file(p->kobj, &p->attr_pasid);
1078 kobject_del(p->kobj_queues);
1079 kobject_put(p->kobj_queues);
1080 p->kobj_queues = NULL;
1081
1082 for (i = 0; i < p->n_pdds; i++) {
1083 pdd = p->pdds[i];
1084
1085 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1086 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1087
1088 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1089 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1090 sysfs_remove_file(pdd->kobj_stats,
1091 &pdd->attr_cu_occupancy);
1092 kobject_del(pdd->kobj_stats);
1093 kobject_put(pdd->kobj_stats);
1094 pdd->kobj_stats = NULL;
1095 }
1096
1097 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1098 pdd = p->pdds[i];
1099
1100 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1101 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1102 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1103 kobject_del(pdd->kobj_counters);
1104 kobject_put(pdd->kobj_counters);
1105 pdd->kobj_counters = NULL;
1106 }
1107
1108 kobject_del(p->kobj);
1109 kobject_put(p->kobj);
1110 p->kobj = NULL;
1111 }
1112
1113 /* No process locking is needed in this function, because the process
1114 * is not findable any more. We must assume that no other thread is
1115 * using it any more, otherwise we couldn't safely free the process
1116 * structure in the end.
1117 */
kfd_process_wq_release(struct work_struct * work)1118 static void kfd_process_wq_release(struct work_struct *work)
1119 {
1120 struct kfd_process *p = container_of(work, struct kfd_process,
1121 release_work);
1122
1123 kfd_process_dequeue_from_all_devices(p);
1124 pqm_uninit(&p->pqm);
1125
1126 /* Signal the eviction fence after user mode queues are
1127 * destroyed. This allows any BOs to be freed without
1128 * triggering pointless evictions or waiting for fences.
1129 */
1130 dma_fence_signal(p->ef);
1131
1132 kfd_process_remove_sysfs(p);
1133
1134 kfd_process_kunmap_signal_bo(p);
1135 kfd_process_free_outstanding_kfd_bos(p);
1136 svm_range_list_fini(p);
1137
1138 kfd_process_destroy_pdds(p);
1139 dma_fence_put(p->ef);
1140
1141 kfd_event_free_process(p);
1142
1143 kfd_pasid_free(p->pasid);
1144 mutex_destroy(&p->mutex);
1145
1146 put_task_struct(p->lead_thread);
1147
1148 kfree(p);
1149 }
1150
kfd_process_ref_release(struct kref * ref)1151 static void kfd_process_ref_release(struct kref *ref)
1152 {
1153 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1154
1155 INIT_WORK(&p->release_work, kfd_process_wq_release);
1156 queue_work(kfd_process_wq, &p->release_work);
1157 }
1158
kfd_process_alloc_notifier(struct mm_struct * mm)1159 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1160 {
1161 int idx = srcu_read_lock(&kfd_processes_srcu);
1162 struct kfd_process *p = find_process_by_mm(mm);
1163
1164 srcu_read_unlock(&kfd_processes_srcu, idx);
1165
1166 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1167 }
1168
kfd_process_free_notifier(struct mmu_notifier * mn)1169 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1170 {
1171 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1172 }
1173
kfd_process_notifier_release_internal(struct kfd_process * p)1174 static void kfd_process_notifier_release_internal(struct kfd_process *p)
1175 {
1176 int i;
1177
1178 cancel_delayed_work_sync(&p->eviction_work);
1179 cancel_delayed_work_sync(&p->restore_work);
1180
1181 for (i = 0; i < p->n_pdds; i++) {
1182 struct kfd_process_device *pdd = p->pdds[i];
1183
1184 /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
1185 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
1186 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
1187 }
1188
1189 /* Indicate to other users that MM is no longer valid */
1190 p->mm = NULL;
1191 kfd_dbg_trap_disable(p);
1192
1193 if (atomic_read(&p->debugged_process_count) > 0) {
1194 struct kfd_process *target;
1195 unsigned int temp;
1196 int idx = srcu_read_lock(&kfd_processes_srcu);
1197
1198 hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
1199 if (target->debugger_process && target->debugger_process == p) {
1200 mutex_lock_nested(&target->mutex, 1);
1201 kfd_dbg_trap_disable(target);
1202 mutex_unlock(&target->mutex);
1203 if (atomic_read(&p->debugged_process_count) == 0)
1204 break;
1205 }
1206 }
1207
1208 srcu_read_unlock(&kfd_processes_srcu, idx);
1209 }
1210
1211 mmu_notifier_put(&p->mmu_notifier);
1212 }
1213
kfd_process_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)1214 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1215 struct mm_struct *mm)
1216 {
1217 struct kfd_process *p;
1218
1219 /*
1220 * The kfd_process structure can not be free because the
1221 * mmu_notifier srcu is read locked
1222 */
1223 p = container_of(mn, struct kfd_process, mmu_notifier);
1224 if (WARN_ON(p->mm != mm))
1225 return;
1226
1227 mutex_lock(&kfd_processes_mutex);
1228 /*
1229 * Do early return if table is empty.
1230 *
1231 * This could potentially happen if this function is called concurrently
1232 * by mmu_notifier and by kfd_cleanup_pocesses.
1233 *
1234 */
1235 if (hash_empty(kfd_processes_table)) {
1236 mutex_unlock(&kfd_processes_mutex);
1237 return;
1238 }
1239 hash_del_rcu(&p->kfd_processes);
1240 mutex_unlock(&kfd_processes_mutex);
1241 synchronize_srcu(&kfd_processes_srcu);
1242
1243 kfd_process_notifier_release_internal(p);
1244 }
1245
1246 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1247 .release = kfd_process_notifier_release,
1248 .alloc_notifier = kfd_process_alloc_notifier,
1249 .free_notifier = kfd_process_free_notifier,
1250 };
1251
1252 /*
1253 * This code handles the case when driver is being unloaded before all
1254 * mm_struct are released. We need to safely free the kfd_process and
1255 * avoid race conditions with mmu_notifier that might try to free them.
1256 *
1257 */
kfd_cleanup_processes(void)1258 void kfd_cleanup_processes(void)
1259 {
1260 struct kfd_process *p;
1261 struct hlist_node *p_temp;
1262 unsigned int temp;
1263 HLIST_HEAD(cleanup_list);
1264
1265 /*
1266 * Move all remaining kfd_process from the process table to a
1267 * temp list for processing. Once done, callback from mmu_notifier
1268 * release will not see the kfd_process in the table and do early return,
1269 * avoiding double free issues.
1270 */
1271 mutex_lock(&kfd_processes_mutex);
1272 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1273 hash_del_rcu(&p->kfd_processes);
1274 synchronize_srcu(&kfd_processes_srcu);
1275 hlist_add_head(&p->kfd_processes, &cleanup_list);
1276 }
1277 mutex_unlock(&kfd_processes_mutex);
1278
1279 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1280 kfd_process_notifier_release_internal(p);
1281
1282 /*
1283 * Ensures that all outstanding free_notifier get called, triggering
1284 * the release of the kfd_process struct.
1285 */
1286 mmu_notifier_synchronize();
1287 }
1288
kfd_process_init_cwsr_apu(struct kfd_process * p,struct file * filep)1289 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1290 {
1291 unsigned long offset;
1292 int i;
1293
1294 if (p->has_cwsr)
1295 return 0;
1296
1297 for (i = 0; i < p->n_pdds; i++) {
1298 struct kfd_node *dev = p->pdds[i]->dev;
1299 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1300
1301 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1302 continue;
1303
1304 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1305 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1306 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1307 MAP_SHARED, offset);
1308
1309 if (IS_ERR_VALUE(qpd->tba_addr)) {
1310 int err = qpd->tba_addr;
1311
1312 dev_err(dev->adev->dev,
1313 "Failure to set tba address. error %d.\n", err);
1314 qpd->tba_addr = 0;
1315 qpd->cwsr_kaddr = NULL;
1316 return err;
1317 }
1318
1319 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1320
1321 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
1322
1323 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1324 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1325 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1326 }
1327
1328 p->has_cwsr = true;
1329
1330 return 0;
1331 }
1332
kfd_process_device_init_cwsr_dgpu(struct kfd_process_device * pdd)1333 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1334 {
1335 struct kfd_node *dev = pdd->dev;
1336 struct qcm_process_device *qpd = &pdd->qpd;
1337 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1338 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1339 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1340 struct kgd_mem *mem;
1341 void *kaddr;
1342 int ret;
1343
1344 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1345 return 0;
1346
1347 /* cwsr_base is only set for dGPU */
1348 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1349 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1350 if (ret)
1351 return ret;
1352
1353 qpd->cwsr_mem = mem;
1354 qpd->cwsr_kaddr = kaddr;
1355 qpd->tba_addr = qpd->cwsr_base;
1356
1357 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1358
1359 kfd_process_set_trap_debug_flag(&pdd->qpd,
1360 pdd->process->debug_trap_enabled);
1361
1362 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1363 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1364 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1365
1366 return 0;
1367 }
1368
kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device * pdd)1369 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1370 {
1371 struct kfd_node *dev = pdd->dev;
1372 struct qcm_process_device *qpd = &pdd->qpd;
1373
1374 if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1375 return;
1376
1377 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1378 }
1379
kfd_process_set_trap_handler(struct qcm_process_device * qpd,uint64_t tba_addr,uint64_t tma_addr)1380 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1381 uint64_t tba_addr,
1382 uint64_t tma_addr)
1383 {
1384 if (qpd->cwsr_kaddr) {
1385 /* KFD trap handler is bound, record as second-level TBA/TMA
1386 * in first-level TMA. First-level trap will jump to second.
1387 */
1388 uint64_t *tma =
1389 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1390 tma[0] = tba_addr;
1391 tma[1] = tma_addr;
1392 } else {
1393 /* No trap handler bound, bind as first-level TBA/TMA. */
1394 qpd->tba_addr = tba_addr;
1395 qpd->tma_addr = tma_addr;
1396 }
1397 }
1398
kfd_process_xnack_mode(struct kfd_process * p,bool supported)1399 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1400 {
1401 int i;
1402
1403 /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1404 * boot time retry setting. Mixing processes with different
1405 * XNACK/retry settings can hang the GPU.
1406 *
1407 * Different GPUs can have different noretry settings depending
1408 * on HW bugs or limitations. We need to find at least one
1409 * XNACK mode for this process that's compatible with all GPUs.
1410 * Fortunately GPUs with retry enabled (noretry=0) can run code
1411 * built for XNACK-off. On GFXv9 it may perform slower.
1412 *
1413 * Therefore applications built for XNACK-off can always be
1414 * supported and will be our fallback if any GPU does not
1415 * support retry.
1416 */
1417 for (i = 0; i < p->n_pdds; i++) {
1418 struct kfd_node *dev = p->pdds[i]->dev;
1419
1420 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1421 * support the SVM APIs and don't need to be considered
1422 * for the XNACK mode selection.
1423 */
1424 if (!KFD_IS_SOC15(dev))
1425 continue;
1426 /* Aldebaran can always support XNACK because it can support
1427 * per-process XNACK mode selection. But let the dev->noretry
1428 * setting still influence the default XNACK mode.
1429 */
1430 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
1431 continue;
1432
1433 /* GFXv10 and later GPUs do not support shader preemption
1434 * during page faults. This can lead to poor QoS for queue
1435 * management and memory-manager-related preemptions or
1436 * even deadlocks.
1437 */
1438 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1439 return false;
1440
1441 if (dev->kfd->noretry)
1442 return false;
1443 }
1444
1445 return true;
1446 }
1447
kfd_process_set_trap_debug_flag(struct qcm_process_device * qpd,bool enabled)1448 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1449 bool enabled)
1450 {
1451 if (qpd->cwsr_kaddr) {
1452 uint64_t *tma =
1453 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1454 tma[2] = enabled;
1455 }
1456 }
1457
1458 /*
1459 * On return the kfd_process is fully operational and will be freed when the
1460 * mm is released
1461 */
create_process(const struct task_struct * thread)1462 static struct kfd_process *create_process(const struct task_struct *thread)
1463 {
1464 struct kfd_process *process;
1465 struct mmu_notifier *mn;
1466 int err = -ENOMEM;
1467
1468 process = kzalloc(sizeof(*process), GFP_KERNEL);
1469 if (!process)
1470 goto err_alloc_process;
1471
1472 kref_init(&process->ref);
1473 mutex_init(&process->mutex);
1474 process->mm = thread->mm;
1475 process->lead_thread = thread->group_leader;
1476 process->n_pdds = 0;
1477 process->queues_paused = false;
1478 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1479 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1480 process->last_restore_timestamp = get_jiffies_64();
1481 err = kfd_event_init_process(process);
1482 if (err)
1483 goto err_event_init;
1484 process->is_32bit_user_mode = in_compat_syscall();
1485 process->debug_trap_enabled = false;
1486 process->debugger_process = NULL;
1487 process->exception_enable_mask = 0;
1488 atomic_set(&process->debugged_process_count, 0);
1489 sema_init(&process->runtime_enable_sema, 0);
1490
1491 process->pasid = kfd_pasid_alloc();
1492 if (process->pasid == 0) {
1493 err = -ENOSPC;
1494 goto err_alloc_pasid;
1495 }
1496
1497 err = pqm_init(&process->pqm, process);
1498 if (err != 0)
1499 goto err_process_pqm_init;
1500
1501 /* init process apertures*/
1502 err = kfd_init_apertures(process);
1503 if (err != 0)
1504 goto err_init_apertures;
1505
1506 /* Check XNACK support after PDDs are created in kfd_init_apertures */
1507 process->xnack_enabled = kfd_process_xnack_mode(process, false);
1508
1509 err = svm_range_list_init(process);
1510 if (err)
1511 goto err_init_svm_range_list;
1512
1513 /* alloc_notifier needs to find the process in the hash table */
1514 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1515 (uintptr_t)process->mm);
1516
1517 /* Avoid free_notifier to start kfd_process_wq_release if
1518 * mmu_notifier_get failed because of pending signal.
1519 */
1520 kref_get(&process->ref);
1521
1522 /* MMU notifier registration must be the last call that can fail
1523 * because after this point we cannot unwind the process creation.
1524 * After this point, mmu_notifier_put will trigger the cleanup by
1525 * dropping the last process reference in the free_notifier.
1526 */
1527 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1528 if (IS_ERR(mn)) {
1529 err = PTR_ERR(mn);
1530 goto err_register_notifier;
1531 }
1532 BUG_ON(mn != &process->mmu_notifier);
1533
1534 kfd_unref_process(process);
1535 get_task_struct(process->lead_thread);
1536
1537 INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
1538
1539 return process;
1540
1541 err_register_notifier:
1542 hash_del_rcu(&process->kfd_processes);
1543 svm_range_list_fini(process);
1544 err_init_svm_range_list:
1545 kfd_process_free_outstanding_kfd_bos(process);
1546 kfd_process_destroy_pdds(process);
1547 err_init_apertures:
1548 pqm_uninit(&process->pqm);
1549 err_process_pqm_init:
1550 kfd_pasid_free(process->pasid);
1551 err_alloc_pasid:
1552 kfd_event_free_process(process);
1553 err_event_init:
1554 mutex_destroy(&process->mutex);
1555 kfree(process);
1556 err_alloc_process:
1557 return ERR_PTR(err);
1558 }
1559
kfd_get_process_device_data(struct kfd_node * dev,struct kfd_process * p)1560 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1561 struct kfd_process *p)
1562 {
1563 int i;
1564
1565 for (i = 0; i < p->n_pdds; i++)
1566 if (p->pdds[i]->dev == dev)
1567 return p->pdds[i];
1568
1569 return NULL;
1570 }
1571
kfd_create_process_device_data(struct kfd_node * dev,struct kfd_process * p)1572 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1573 struct kfd_process *p)
1574 {
1575 struct kfd_process_device *pdd = NULL;
1576
1577 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1578 return NULL;
1579 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1580 if (!pdd)
1581 return NULL;
1582
1583 pdd->dev = dev;
1584 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1585 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1586 pdd->qpd.dqm = dev->dqm;
1587 pdd->qpd.pqm = &p->pqm;
1588 pdd->qpd.evicted = 0;
1589 pdd->qpd.mapped_gws_queue = false;
1590 pdd->process = p;
1591 pdd->bound = PDD_UNBOUND;
1592 pdd->already_dequeued = false;
1593 pdd->runtime_inuse = false;
1594 atomic64_set(&pdd->vram_usage, 0);
1595 pdd->sdma_past_activity_counter = 0;
1596 pdd->user_gpu_id = dev->id;
1597 atomic64_set(&pdd->evict_duration_counter, 0);
1598
1599 p->pdds[p->n_pdds++] = pdd;
1600 if (kfd_dbg_is_per_vmid_supported(pdd->dev))
1601 pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
1602 pdd->dev->adev,
1603 false,
1604 0);
1605
1606 /* Init idr used for memory handle translation */
1607 idr_init(&pdd->alloc_idr);
1608
1609 return pdd;
1610 }
1611
1612 /**
1613 * kfd_process_device_init_vm - Initialize a VM for a process-device
1614 *
1615 * @pdd: The process-device
1616 * @drm_file: Optional pointer to a DRM file descriptor
1617 *
1618 * If @drm_file is specified, it will be used to acquire the VM from
1619 * that file descriptor. If successful, the @pdd takes ownership of
1620 * the file descriptor.
1621 *
1622 * If @drm_file is NULL, a new VM is created.
1623 *
1624 * Returns 0 on success, -errno on failure.
1625 */
kfd_process_device_init_vm(struct kfd_process_device * pdd,struct file * drm_file)1626 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1627 struct file *drm_file)
1628 {
1629 struct amdgpu_fpriv *drv_priv;
1630 struct amdgpu_vm *avm;
1631 struct kfd_process *p;
1632 struct kfd_node *dev;
1633 int ret;
1634
1635 if (!drm_file)
1636 return -EINVAL;
1637
1638 if (pdd->drm_priv)
1639 return -EBUSY;
1640
1641 ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1642 if (ret)
1643 return ret;
1644 avm = &drv_priv->vm;
1645
1646 p = pdd->process;
1647 dev = pdd->dev;
1648
1649 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1650 &p->kgd_process_info,
1651 &p->ef);
1652 if (ret) {
1653 dev_err(dev->adev->dev, "Failed to create process VM object\n");
1654 return ret;
1655 }
1656 pdd->drm_priv = drm_file->private_data;
1657 atomic64_set(&pdd->tlb_seq, 0);
1658
1659 ret = kfd_process_device_reserve_ib_mem(pdd);
1660 if (ret)
1661 goto err_reserve_ib_mem;
1662 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1663 if (ret)
1664 goto err_init_cwsr;
1665
1666 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
1667 if (ret)
1668 goto err_set_pasid;
1669
1670 pdd->drm_file = drm_file;
1671
1672 return 0;
1673
1674 err_set_pasid:
1675 kfd_process_device_destroy_cwsr_dgpu(pdd);
1676 err_init_cwsr:
1677 kfd_process_device_destroy_ib_mem(pdd);
1678 err_reserve_ib_mem:
1679 pdd->drm_priv = NULL;
1680 amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1681
1682 return ret;
1683 }
1684
1685 /*
1686 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1687 * to the device.
1688 * Unbinding occurs when the process dies or the device is removed.
1689 *
1690 * Assumes that the process lock is held.
1691 */
kfd_bind_process_to_device(struct kfd_node * dev,struct kfd_process * p)1692 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1693 struct kfd_process *p)
1694 {
1695 struct kfd_process_device *pdd;
1696 int err;
1697
1698 pdd = kfd_get_process_device_data(dev, p);
1699 if (!pdd) {
1700 dev_err(dev->adev->dev, "Process device data doesn't exist\n");
1701 return ERR_PTR(-ENOMEM);
1702 }
1703
1704 if (!pdd->drm_priv)
1705 return ERR_PTR(-ENODEV);
1706
1707 /*
1708 * signal runtime-pm system to auto resume and prevent
1709 * further runtime suspend once device pdd is created until
1710 * pdd is destroyed.
1711 */
1712 if (!pdd->runtime_inuse) {
1713 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1714 if (err < 0) {
1715 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1716 return ERR_PTR(err);
1717 }
1718 }
1719
1720 /*
1721 * make sure that runtime_usage counter is incremented just once
1722 * per pdd
1723 */
1724 pdd->runtime_inuse = true;
1725
1726 return pdd;
1727 }
1728
1729 /* Create specific handle mapped to mem from process local memory idr
1730 * Assumes that the process lock is held.
1731 */
kfd_process_device_create_obj_handle(struct kfd_process_device * pdd,void * mem)1732 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1733 void *mem)
1734 {
1735 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1736 }
1737
1738 /* Translate specific handle from process local memory idr
1739 * Assumes that the process lock is held.
1740 */
kfd_process_device_translate_handle(struct kfd_process_device * pdd,int handle)1741 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1742 int handle)
1743 {
1744 if (handle < 0)
1745 return NULL;
1746
1747 return idr_find(&pdd->alloc_idr, handle);
1748 }
1749
1750 /* Remove specific handle from process local memory idr
1751 * Assumes that the process lock is held.
1752 */
kfd_process_device_remove_obj_handle(struct kfd_process_device * pdd,int handle)1753 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1754 int handle)
1755 {
1756 if (handle >= 0)
1757 idr_remove(&pdd->alloc_idr, handle);
1758 }
1759
1760 /* This increments the process->ref counter. */
kfd_lookup_process_by_pasid(u32 pasid)1761 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1762 {
1763 struct kfd_process *p, *ret_p = NULL;
1764 unsigned int temp;
1765
1766 int idx = srcu_read_lock(&kfd_processes_srcu);
1767
1768 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1769 if (p->pasid == pasid) {
1770 kref_get(&p->ref);
1771 ret_p = p;
1772 break;
1773 }
1774 }
1775
1776 srcu_read_unlock(&kfd_processes_srcu, idx);
1777
1778 return ret_p;
1779 }
1780
1781 /* This increments the process->ref counter. */
kfd_lookup_process_by_mm(const struct mm_struct * mm)1782 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1783 {
1784 struct kfd_process *p;
1785
1786 int idx = srcu_read_lock(&kfd_processes_srcu);
1787
1788 p = find_process_by_mm(mm);
1789 if (p)
1790 kref_get(&p->ref);
1791
1792 srcu_read_unlock(&kfd_processes_srcu, idx);
1793
1794 return p;
1795 }
1796
1797 /* kfd_process_evict_queues - Evict all user queues of a process
1798 *
1799 * Eviction is reference-counted per process-device. This means multiple
1800 * evictions from different sources can be nested safely.
1801 */
kfd_process_evict_queues(struct kfd_process * p,uint32_t trigger)1802 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1803 {
1804 int r = 0;
1805 int i;
1806 unsigned int n_evicted = 0;
1807
1808 for (i = 0; i < p->n_pdds; i++) {
1809 struct kfd_process_device *pdd = p->pdds[i];
1810 struct device *dev = pdd->dev->adev->dev;
1811
1812 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1813 trigger);
1814
1815 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1816 &pdd->qpd);
1817 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1818 * we would like to set all the queues to be in evicted state to prevent
1819 * them been add back since they actually not be saved right now.
1820 */
1821 if (r && r != -EIO) {
1822 dev_err(dev, "Failed to evict process queues\n");
1823 goto fail;
1824 }
1825 n_evicted++;
1826 }
1827
1828 return r;
1829
1830 fail:
1831 /* To keep state consistent, roll back partial eviction by
1832 * restoring queues
1833 */
1834 for (i = 0; i < p->n_pdds; i++) {
1835 struct kfd_process_device *pdd = p->pdds[i];
1836
1837 if (n_evicted == 0)
1838 break;
1839
1840 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1841
1842 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1843 &pdd->qpd))
1844 dev_err(pdd->dev->adev->dev,
1845 "Failed to restore queues\n");
1846
1847 n_evicted--;
1848 }
1849
1850 return r;
1851 }
1852
1853 /* kfd_process_restore_queues - Restore all user queues of a process */
kfd_process_restore_queues(struct kfd_process * p)1854 int kfd_process_restore_queues(struct kfd_process *p)
1855 {
1856 int r, ret = 0;
1857 int i;
1858
1859 for (i = 0; i < p->n_pdds; i++) {
1860 struct kfd_process_device *pdd = p->pdds[i];
1861 struct device *dev = pdd->dev->adev->dev;
1862
1863 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1864
1865 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1866 &pdd->qpd);
1867 if (r) {
1868 dev_err(dev, "Failed to restore process queues\n");
1869 if (!ret)
1870 ret = r;
1871 }
1872 }
1873
1874 return ret;
1875 }
1876
kfd_process_gpuidx_from_gpuid(struct kfd_process * p,uint32_t gpu_id)1877 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1878 {
1879 int i;
1880
1881 for (i = 0; i < p->n_pdds; i++)
1882 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1883 return i;
1884 return -EINVAL;
1885 }
1886
1887 int
kfd_process_gpuid_from_node(struct kfd_process * p,struct kfd_node * node,uint32_t * gpuid,uint32_t * gpuidx)1888 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1889 uint32_t *gpuid, uint32_t *gpuidx)
1890 {
1891 int i;
1892
1893 for (i = 0; i < p->n_pdds; i++)
1894 if (p->pdds[i] && p->pdds[i]->dev == node) {
1895 *gpuid = p->pdds[i]->user_gpu_id;
1896 *gpuidx = i;
1897 return 0;
1898 }
1899 return -EINVAL;
1900 }
1901
evict_process_worker(struct work_struct * work)1902 static void evict_process_worker(struct work_struct *work)
1903 {
1904 int ret;
1905 struct kfd_process *p;
1906 struct delayed_work *dwork;
1907
1908 dwork = to_delayed_work(work);
1909
1910 /* Process termination destroys this worker thread. So during the
1911 * lifetime of this thread, kfd_process p will be valid
1912 */
1913 p = container_of(dwork, struct kfd_process, eviction_work);
1914 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1915 "Eviction fence mismatch\n");
1916
1917 /* Narrow window of overlap between restore and evict work
1918 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1919 * unreserves KFD BOs, it is possible to evicted again. But
1920 * restore has few more steps of finish. So lets wait for any
1921 * previous restore work to complete
1922 */
1923 flush_delayed_work(&p->restore_work);
1924
1925 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1926 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1927 if (!ret) {
1928 dma_fence_signal(p->ef);
1929 dma_fence_put(p->ef);
1930 p->ef = NULL;
1931 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1932 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1933
1934 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1935 } else
1936 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1937 }
1938
restore_process_worker(struct work_struct * work)1939 static void restore_process_worker(struct work_struct *work)
1940 {
1941 struct delayed_work *dwork;
1942 struct kfd_process *p;
1943 int ret = 0;
1944
1945 dwork = to_delayed_work(work);
1946
1947 /* Process termination destroys this worker thread. So during the
1948 * lifetime of this thread, kfd_process p will be valid
1949 */
1950 p = container_of(dwork, struct kfd_process, restore_work);
1951 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1952
1953 /* Setting last_restore_timestamp before successful restoration.
1954 * Otherwise this would have to be set by KGD (restore_process_bos)
1955 * before KFD BOs are unreserved. If not, the process can be evicted
1956 * again before the timestamp is set.
1957 * If restore fails, the timestamp will be set again in the next
1958 * attempt. This would mean that the minimum GPU quanta would be
1959 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1960 * functions)
1961 */
1962
1963 p->last_restore_timestamp = get_jiffies_64();
1964 /* VMs may not have been acquired yet during debugging. */
1965 if (p->kgd_process_info)
1966 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1967 &p->ef);
1968 if (ret) {
1969 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1970 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1971 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1972 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1973 WARN(!ret, "reschedule restore work failed\n");
1974 return;
1975 }
1976
1977 ret = kfd_process_restore_queues(p);
1978 if (!ret)
1979 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1980 else
1981 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1982 }
1983
kfd_suspend_all_processes(void)1984 void kfd_suspend_all_processes(void)
1985 {
1986 struct kfd_process *p;
1987 unsigned int temp;
1988 int idx = srcu_read_lock(&kfd_processes_srcu);
1989
1990 WARN(debug_evictions, "Evicting all processes");
1991 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1992 cancel_delayed_work_sync(&p->eviction_work);
1993 flush_delayed_work(&p->restore_work);
1994
1995 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
1996 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1997 dma_fence_signal(p->ef);
1998 dma_fence_put(p->ef);
1999 p->ef = NULL;
2000 }
2001 srcu_read_unlock(&kfd_processes_srcu, idx);
2002 }
2003
kfd_resume_all_processes(void)2004 int kfd_resume_all_processes(void)
2005 {
2006 struct kfd_process *p;
2007 unsigned int temp;
2008 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2009
2010 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2011 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
2012 pr_err("Restore process %d failed during resume\n",
2013 p->pasid);
2014 ret = -EFAULT;
2015 }
2016 }
2017 srcu_read_unlock(&kfd_processes_srcu, idx);
2018 return ret;
2019 }
2020
kfd_reserved_mem_mmap(struct kfd_node * dev,struct kfd_process * process,struct vm_area_struct * vma)2021 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
2022 struct vm_area_struct *vma)
2023 {
2024 struct kfd_process_device *pdd;
2025 struct qcm_process_device *qpd;
2026
2027 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2028 dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n");
2029 return -EINVAL;
2030 }
2031
2032 pdd = kfd_get_process_device_data(dev, process);
2033 if (!pdd)
2034 return -EINVAL;
2035 qpd = &pdd->qpd;
2036
2037 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2038 get_order(KFD_CWSR_TBA_TMA_SIZE));
2039 if (!qpd->cwsr_kaddr) {
2040 dev_err(dev->adev->dev,
2041 "Error allocating per process CWSR buffer.\n");
2042 return -ENOMEM;
2043 }
2044
2045 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2046 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2047 /* Mapping pages to user process */
2048 return remap_pfn_range(vma, vma->vm_start,
2049 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2050 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2051 }
2052
kfd_flush_tlb(struct kfd_process_device * pdd,enum TLB_FLUSH_TYPE type)2053 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
2054 {
2055 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
2056 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
2057 struct kfd_node *dev = pdd->dev;
2058 uint32_t xcc_mask = dev->xcc_mask;
2059 int xcc = 0;
2060
2061 /*
2062 * It can be that we race and lose here, but that is extremely unlikely
2063 * and the worst thing which could happen is that we flush the changes
2064 * into the TLB once more which is harmless.
2065 */
2066 if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
2067 return;
2068
2069 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2070 /* Nothing to flush until a VMID is assigned, which
2071 * only happens when the first queue is created.
2072 */
2073 if (pdd->qpd.vmid)
2074 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
2075 pdd->qpd.vmid);
2076 } else {
2077 for_each_inst(xcc, xcc_mask)
2078 amdgpu_amdkfd_flush_gpu_tlb_pasid(
2079 dev->adev, pdd->process->pasid, type, xcc);
2080 }
2081 }
2082
2083 /* assumes caller holds process lock. */
kfd_process_drain_interrupts(struct kfd_process_device * pdd)2084 int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
2085 {
2086 uint32_t irq_drain_fence[8];
2087 uint8_t node_id = 0;
2088 int r = 0;
2089
2090 if (!KFD_IS_SOC15(pdd->dev))
2091 return 0;
2092
2093 pdd->process->irq_drain_is_open = true;
2094
2095 memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
2096 irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
2097 KFD_IRQ_FENCE_CLIENTID;
2098 irq_drain_fence[3] = pdd->process->pasid;
2099
2100 /*
2101 * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
2102 */
2103 if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) {
2104 node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
2105 irq_drain_fence[3] |= node_id << 16;
2106 }
2107
2108 /* ensure stale irqs scheduled KFD interrupts and send drain fence. */
2109 if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
2110 irq_drain_fence)) {
2111 pdd->process->irq_drain_is_open = false;
2112 return 0;
2113 }
2114
2115 r = wait_event_interruptible(pdd->process->wait_irq_drain,
2116 !READ_ONCE(pdd->process->irq_drain_is_open));
2117 if (r)
2118 pdd->process->irq_drain_is_open = false;
2119
2120 return r;
2121 }
2122
kfd_process_close_interrupt_drain(unsigned int pasid)2123 void kfd_process_close_interrupt_drain(unsigned int pasid)
2124 {
2125 struct kfd_process *p;
2126
2127 p = kfd_lookup_process_by_pasid(pasid);
2128
2129 if (!p)
2130 return;
2131
2132 WRITE_ONCE(p->irq_drain_is_open, false);
2133 wake_up_all(&p->wait_irq_drain);
2134 kfd_unref_process(p);
2135 }
2136
2137 struct send_exception_work_handler_workarea {
2138 struct work_struct work;
2139 struct kfd_process *p;
2140 unsigned int queue_id;
2141 uint64_t error_reason;
2142 };
2143
send_exception_work_handler(struct work_struct * work)2144 static void send_exception_work_handler(struct work_struct *work)
2145 {
2146 struct send_exception_work_handler_workarea *workarea;
2147 struct kfd_process *p;
2148 struct queue *q;
2149 struct mm_struct *mm;
2150 struct kfd_context_save_area_header __user *csa_header;
2151 uint64_t __user *err_payload_ptr;
2152 uint64_t cur_err;
2153 uint32_t ev_id;
2154
2155 workarea = container_of(work,
2156 struct send_exception_work_handler_workarea,
2157 work);
2158 p = workarea->p;
2159
2160 mm = get_task_mm(p->lead_thread);
2161
2162 if (!mm)
2163 return;
2164
2165 kthread_use_mm(mm);
2166
2167 q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
2168
2169 if (!q)
2170 goto out;
2171
2172 csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
2173
2174 get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
2175 get_user(cur_err, err_payload_ptr);
2176 cur_err |= workarea->error_reason;
2177 put_user(cur_err, err_payload_ptr);
2178 get_user(ev_id, &csa_header->err_event_id);
2179
2180 kfd_set_event(p, ev_id);
2181
2182 out:
2183 kthread_unuse_mm(mm);
2184 mmput(mm);
2185 }
2186
kfd_send_exception_to_runtime(struct kfd_process * p,unsigned int queue_id,uint64_t error_reason)2187 int kfd_send_exception_to_runtime(struct kfd_process *p,
2188 unsigned int queue_id,
2189 uint64_t error_reason)
2190 {
2191 struct send_exception_work_handler_workarea worker;
2192
2193 INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2194
2195 worker.p = p;
2196 worker.queue_id = queue_id;
2197 worker.error_reason = error_reason;
2198
2199 schedule_work(&worker.work);
2200 flush_work(&worker.work);
2201 destroy_work_on_stack(&worker.work);
2202
2203 return 0;
2204 }
2205
kfd_process_device_data_by_id(struct kfd_process * p,uint32_t gpu_id)2206 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2207 {
2208 int i;
2209
2210 if (gpu_id) {
2211 for (i = 0; i < p->n_pdds; i++) {
2212 struct kfd_process_device *pdd = p->pdds[i];
2213
2214 if (pdd->user_gpu_id == gpu_id)
2215 return pdd;
2216 }
2217 }
2218 return NULL;
2219 }
2220
kfd_process_get_user_gpu_id(struct kfd_process * p,uint32_t actual_gpu_id)2221 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2222 {
2223 int i;
2224
2225 if (!actual_gpu_id)
2226 return 0;
2227
2228 for (i = 0; i < p->n_pdds; i++) {
2229 struct kfd_process_device *pdd = p->pdds[i];
2230
2231 if (pdd->dev->id == actual_gpu_id)
2232 return pdd->user_gpu_id;
2233 }
2234 return -EINVAL;
2235 }
2236
2237 #if defined(CONFIG_DEBUG_FS)
2238
kfd_debugfs_mqds_by_process(struct seq_file * m,void * data)2239 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2240 {
2241 struct kfd_process *p;
2242 unsigned int temp;
2243 int r = 0;
2244
2245 int idx = srcu_read_lock(&kfd_processes_srcu);
2246
2247 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2248 seq_printf(m, "Process %d PASID 0x%x:\n",
2249 p->lead_thread->tgid, p->pasid);
2250
2251 mutex_lock(&p->mutex);
2252 r = pqm_debugfs_mqds(m, &p->pqm);
2253 mutex_unlock(&p->mutex);
2254
2255 if (r)
2256 break;
2257 }
2258
2259 srcu_read_unlock(&kfd_processes_srcu, idx);
2260
2261 return r;
2262 }
2263
2264 #endif
2265