1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/mutex.h> 25 #include <linux/log2.h> 26 #include <linux/sched.h> 27 #include <linux/sched/mm.h> 28 #include <linux/sched/task.h> 29 #include <linux/mmu_context.h> 30 #include <linux/slab.h> 31 #include <linux/notifier.h> 32 #include <linux/compat.h> 33 #include <linux/mman.h> 34 #include <linux/file.h> 35 #include <linux/pm_runtime.h> 36 #include "amdgpu_amdkfd.h" 37 #include "amdgpu.h" 38 39 struct mm_struct; 40 41 #include "kfd_priv.h" 42 #include "kfd_device_queue_manager.h" 43 #include "kfd_svm.h" 44 #include "kfd_smi_events.h" 45 #include "kfd_debug.h" 46 47 /* 48 * List of struct kfd_process (field kfd_process). 49 * Unique/indexed by mm_struct* 50 */ 51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 52 DEFINE_MUTEX(kfd_processes_mutex); 53 54 DEFINE_SRCU(kfd_processes_srcu); 55 56 /* For process termination handling */ 57 static struct workqueue_struct *kfd_process_wq; 58 59 /* Ordered, single-threaded workqueue for restoring evicted 60 * processes. Restoring multiple processes concurrently under memory 61 * pressure can lead to processes blocking each other from validating 62 * their BOs and result in a live-lock situation where processes 63 * remain evicted indefinitely. 64 */ 65 static struct workqueue_struct *kfd_restore_wq; 66 67 static struct kfd_process *find_process(const struct task_struct *thread, 68 bool ref); 69 static void kfd_process_ref_release(struct kref *ref); 70 static struct kfd_process *create_process(const struct task_struct *thread); 71 72 static void evict_process_worker(struct work_struct *work); 73 static void restore_process_worker(struct work_struct *work); 74 75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd); 76 77 struct kfd_procfs_tree { 78 struct kobject *kobj; 79 }; 80 81 static struct kfd_procfs_tree procfs; 82 83 /* 84 * Structure for SDMA activity tracking 85 */ 86 struct kfd_sdma_activity_handler_workarea { 87 struct work_struct sdma_activity_work; 88 struct kfd_process_device *pdd; 89 uint64_t sdma_activity_counter; 90 }; 91 92 struct temp_sdma_queue_list { 93 uint64_t __user *rptr; 94 uint64_t sdma_val; 95 unsigned int queue_id; 96 struct list_head list; 97 }; 98 99 static void kfd_sdma_activity_worker(struct work_struct *work) 100 { 101 struct kfd_sdma_activity_handler_workarea *workarea; 102 struct kfd_process_device *pdd; 103 uint64_t val; 104 struct mm_struct *mm; 105 struct queue *q; 106 struct qcm_process_device *qpd; 107 struct device_queue_manager *dqm; 108 int ret = 0; 109 struct temp_sdma_queue_list sdma_q_list; 110 struct temp_sdma_queue_list *sdma_q, *next; 111 112 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea, 113 sdma_activity_work); 114 115 pdd = workarea->pdd; 116 if (!pdd) 117 return; 118 dqm = pdd->dev->dqm; 119 qpd = &pdd->qpd; 120 if (!dqm || !qpd) 121 return; 122 /* 123 * Total SDMA activity is current SDMA activity + past SDMA activity 124 * Past SDMA count is stored in pdd. 125 * To get the current activity counters for all active SDMA queues, 126 * we loop over all SDMA queues and get their counts from user-space. 127 * 128 * We cannot call get_user() with dqm_lock held as it can cause 129 * a circular lock dependency situation. To read the SDMA stats, 130 * we need to do the following: 131 * 132 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list, 133 * with dqm_lock/dqm_unlock(). 134 * 2. Call get_user() for each node in temporary list without dqm_lock. 135 * Save the SDMA count for each node and also add the count to the total 136 * SDMA count counter. 137 * Its possible, during this step, a few SDMA queue nodes got deleted 138 * from the qpd->queues_list. 139 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted. 140 * If any node got deleted, its SDMA count would be captured in the sdma 141 * past activity counter. So subtract the SDMA counter stored in step 2 142 * for this node from the total SDMA count. 143 */ 144 INIT_LIST_HEAD(&sdma_q_list.list); 145 146 /* 147 * Create the temp list of all SDMA queues 148 */ 149 dqm_lock(dqm); 150 151 list_for_each_entry(q, &qpd->queues_list, list) { 152 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 153 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 154 continue; 155 156 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL); 157 if (!sdma_q) { 158 dqm_unlock(dqm); 159 goto cleanup; 160 } 161 162 INIT_LIST_HEAD(&sdma_q->list); 163 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr; 164 sdma_q->queue_id = q->properties.queue_id; 165 list_add_tail(&sdma_q->list, &sdma_q_list.list); 166 } 167 168 /* 169 * If the temp list is empty, then no SDMA queues nodes were found in 170 * qpd->queues_list. Return the past activity count as the total sdma 171 * count 172 */ 173 if (list_empty(&sdma_q_list.list)) { 174 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter; 175 dqm_unlock(dqm); 176 return; 177 } 178 179 dqm_unlock(dqm); 180 181 /* 182 * Get the usage count for each SDMA queue in temp_list. 183 */ 184 mm = get_task_mm(pdd->process->lead_thread); 185 if (!mm) 186 goto cleanup; 187 188 kthread_use_mm(mm); 189 190 list_for_each_entry(sdma_q, &sdma_q_list.list, list) { 191 val = 0; 192 ret = read_sdma_queue_counter(sdma_q->rptr, &val); 193 if (ret) { 194 pr_debug("Failed to read SDMA queue active counter for queue id: %d", 195 sdma_q->queue_id); 196 } else { 197 sdma_q->sdma_val = val; 198 workarea->sdma_activity_counter += val; 199 } 200 } 201 202 kthread_unuse_mm(mm); 203 mmput(mm); 204 205 /* 206 * Do a second iteration over qpd_queues_list to check if any SDMA 207 * nodes got deleted while fetching SDMA counter. 208 */ 209 dqm_lock(dqm); 210 211 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter; 212 213 list_for_each_entry(q, &qpd->queues_list, list) { 214 if (list_empty(&sdma_q_list.list)) 215 break; 216 217 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 218 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 219 continue; 220 221 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 222 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) && 223 (sdma_q->queue_id == q->properties.queue_id)) { 224 list_del(&sdma_q->list); 225 kfree(sdma_q); 226 break; 227 } 228 } 229 } 230 231 dqm_unlock(dqm); 232 233 /* 234 * If temp list is not empty, it implies some queues got deleted 235 * from qpd->queues_list during SDMA usage read. Subtract the SDMA 236 * count for each node from the total SDMA count. 237 */ 238 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 239 workarea->sdma_activity_counter -= sdma_q->sdma_val; 240 list_del(&sdma_q->list); 241 kfree(sdma_q); 242 } 243 244 return; 245 246 cleanup: 247 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 248 list_del(&sdma_q->list); 249 kfree(sdma_q); 250 } 251 } 252 253 /** 254 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device 255 * by current process. Translates acquired wave count into number of compute units 256 * that are occupied. 257 * 258 * @attr: Handle of attribute that allows reporting of wave count. The attribute 259 * handle encapsulates GPU device it is associated with, thereby allowing collection 260 * of waves in flight, etc 261 * @buffer: Handle of user provided buffer updated with wave count 262 * 263 * Return: Number of bytes written to user buffer or an error value 264 */ 265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) 266 { 267 int cu_cnt; 268 int wave_cnt; 269 int max_waves_per_cu; 270 struct kfd_node *dev = NULL; 271 struct kfd_process *proc = NULL; 272 struct kfd_process_device *pdd = NULL; 273 274 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); 275 dev = pdd->dev; 276 if (dev->kfd2kgd->get_cu_occupancy == NULL) 277 return -EINVAL; 278 279 cu_cnt = 0; 280 proc = pdd->process; 281 if (pdd->qpd.queue_count == 0) { 282 pr_debug("Gpu-Id: %d has no active queues for process %d\n", 283 dev->id, proc->pasid); 284 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 285 } 286 287 /* Collect wave count from device if it supports */ 288 wave_cnt = 0; 289 max_waves_per_cu = 0; 290 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, 291 &max_waves_per_cu, 0); 292 293 /* Translate wave count to number of compute units */ 294 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; 295 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 296 } 297 298 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, 299 char *buffer) 300 { 301 if (strcmp(attr->name, "pasid") == 0) { 302 struct kfd_process *p = container_of(attr, struct kfd_process, 303 attr_pasid); 304 305 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid); 306 } else if (strncmp(attr->name, "vram_", 5) == 0) { 307 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 308 attr_vram); 309 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); 310 } else if (strncmp(attr->name, "sdma_", 5) == 0) { 311 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 312 attr_sdma); 313 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; 314 315 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, 316 kfd_sdma_activity_worker); 317 318 sdma_activity_work_handler.pdd = pdd; 319 sdma_activity_work_handler.sdma_activity_counter = 0; 320 321 schedule_work(&sdma_activity_work_handler.sdma_activity_work); 322 323 flush_work(&sdma_activity_work_handler.sdma_activity_work); 324 325 return snprintf(buffer, PAGE_SIZE, "%llu\n", 326 (sdma_activity_work_handler.sdma_activity_counter)/ 327 SDMA_ACTIVITY_DIVISOR); 328 } else { 329 pr_err("Invalid attribute"); 330 return -EINVAL; 331 } 332 333 return 0; 334 } 335 336 static void kfd_procfs_kobj_release(struct kobject *kobj) 337 { 338 kfree(kobj); 339 } 340 341 static const struct sysfs_ops kfd_procfs_ops = { 342 .show = kfd_procfs_show, 343 }; 344 345 static const struct kobj_type procfs_type = { 346 .release = kfd_procfs_kobj_release, 347 .sysfs_ops = &kfd_procfs_ops, 348 }; 349 350 void kfd_procfs_init(void) 351 { 352 int ret = 0; 353 354 procfs.kobj = kfd_alloc_struct(procfs.kobj); 355 if (!procfs.kobj) 356 return; 357 358 ret = kobject_init_and_add(procfs.kobj, &procfs_type, 359 &kfd_device->kobj, "proc"); 360 if (ret) { 361 pr_warn("Could not create procfs proc folder"); 362 /* If we fail to create the procfs, clean up */ 363 kfd_procfs_shutdown(); 364 } 365 } 366 367 void kfd_procfs_shutdown(void) 368 { 369 if (procfs.kobj) { 370 kobject_del(procfs.kobj); 371 kobject_put(procfs.kobj); 372 procfs.kobj = NULL; 373 } 374 } 375 376 static ssize_t kfd_procfs_queue_show(struct kobject *kobj, 377 struct attribute *attr, char *buffer) 378 { 379 struct queue *q = container_of(kobj, struct queue, kobj); 380 381 if (!strcmp(attr->name, "size")) 382 return snprintf(buffer, PAGE_SIZE, "%llu", 383 q->properties.queue_size); 384 else if (!strcmp(attr->name, "type")) 385 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type); 386 else if (!strcmp(attr->name, "gpuid")) 387 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id); 388 else 389 pr_err("Invalid attribute"); 390 391 return 0; 392 } 393 394 static ssize_t kfd_procfs_stats_show(struct kobject *kobj, 395 struct attribute *attr, char *buffer) 396 { 397 if (strcmp(attr->name, "evicted_ms") == 0) { 398 struct kfd_process_device *pdd = container_of(attr, 399 struct kfd_process_device, 400 attr_evict); 401 uint64_t evict_jiffies; 402 403 evict_jiffies = atomic64_read(&pdd->evict_duration_counter); 404 405 return snprintf(buffer, 406 PAGE_SIZE, 407 "%llu\n", 408 jiffies64_to_msecs(evict_jiffies)); 409 410 /* Sysfs handle that gets CU occupancy is per device */ 411 } else if (strcmp(attr->name, "cu_occupancy") == 0) { 412 return kfd_get_cu_occupancy(attr, buffer); 413 } else { 414 pr_err("Invalid attribute"); 415 } 416 417 return 0; 418 } 419 420 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj, 421 struct attribute *attr, char *buf) 422 { 423 struct kfd_process_device *pdd; 424 425 if (!strcmp(attr->name, "faults")) { 426 pdd = container_of(attr, struct kfd_process_device, 427 attr_faults); 428 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults)); 429 } 430 if (!strcmp(attr->name, "page_in")) { 431 pdd = container_of(attr, struct kfd_process_device, 432 attr_page_in); 433 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in)); 434 } 435 if (!strcmp(attr->name, "page_out")) { 436 pdd = container_of(attr, struct kfd_process_device, 437 attr_page_out); 438 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out)); 439 } 440 return 0; 441 } 442 443 static struct attribute attr_queue_size = { 444 .name = "size", 445 .mode = KFD_SYSFS_FILE_MODE 446 }; 447 448 static struct attribute attr_queue_type = { 449 .name = "type", 450 .mode = KFD_SYSFS_FILE_MODE 451 }; 452 453 static struct attribute attr_queue_gpuid = { 454 .name = "gpuid", 455 .mode = KFD_SYSFS_FILE_MODE 456 }; 457 458 static struct attribute *procfs_queue_attrs[] = { 459 &attr_queue_size, 460 &attr_queue_type, 461 &attr_queue_gpuid, 462 NULL 463 }; 464 ATTRIBUTE_GROUPS(procfs_queue); 465 466 static const struct sysfs_ops procfs_queue_ops = { 467 .show = kfd_procfs_queue_show, 468 }; 469 470 static const struct kobj_type procfs_queue_type = { 471 .sysfs_ops = &procfs_queue_ops, 472 .default_groups = procfs_queue_groups, 473 }; 474 475 static const struct sysfs_ops procfs_stats_ops = { 476 .show = kfd_procfs_stats_show, 477 }; 478 479 static const struct kobj_type procfs_stats_type = { 480 .sysfs_ops = &procfs_stats_ops, 481 .release = kfd_procfs_kobj_release, 482 }; 483 484 static const struct sysfs_ops sysfs_counters_ops = { 485 .show = kfd_sysfs_counters_show, 486 }; 487 488 static const struct kobj_type sysfs_counters_type = { 489 .sysfs_ops = &sysfs_counters_ops, 490 .release = kfd_procfs_kobj_release, 491 }; 492 493 int kfd_procfs_add_queue(struct queue *q) 494 { 495 struct kfd_process *proc; 496 int ret; 497 498 if (!q || !q->process) 499 return -EINVAL; 500 proc = q->process; 501 502 /* Create proc/<pid>/queues/<queue id> folder */ 503 if (!proc->kobj_queues) 504 return -EFAULT; 505 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type, 506 proc->kobj_queues, "%u", q->properties.queue_id); 507 if (ret < 0) { 508 pr_warn("Creating proc/<pid>/queues/%u failed", 509 q->properties.queue_id); 510 kobject_put(&q->kobj); 511 return ret; 512 } 513 514 return 0; 515 } 516 517 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr, 518 char *name) 519 { 520 int ret; 521 522 if (!kobj || !attr || !name) 523 return; 524 525 attr->name = name; 526 attr->mode = KFD_SYSFS_FILE_MODE; 527 sysfs_attr_init(attr); 528 529 ret = sysfs_create_file(kobj, attr); 530 if (ret) 531 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret); 532 } 533 534 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p) 535 { 536 int ret; 537 int i; 538 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN]; 539 540 if (!p || !p->kobj) 541 return; 542 543 /* 544 * Create sysfs files for each GPU: 545 * - proc/<pid>/stats_<gpuid>/ 546 * - proc/<pid>/stats_<gpuid>/evicted_ms 547 * - proc/<pid>/stats_<gpuid>/cu_occupancy 548 */ 549 for (i = 0; i < p->n_pdds; i++) { 550 struct kfd_process_device *pdd = p->pdds[i]; 551 552 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN, 553 "stats_%u", pdd->dev->id); 554 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats); 555 if (!pdd->kobj_stats) 556 return; 557 558 ret = kobject_init_and_add(pdd->kobj_stats, 559 &procfs_stats_type, 560 p->kobj, 561 stats_dir_filename); 562 563 if (ret) { 564 pr_warn("Creating KFD proc/stats_%s folder failed", 565 stats_dir_filename); 566 kobject_put(pdd->kobj_stats); 567 pdd->kobj_stats = NULL; 568 return; 569 } 570 571 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict, 572 "evicted_ms"); 573 /* Add sysfs file to report compute unit occupancy */ 574 if (pdd->dev->kfd2kgd->get_cu_occupancy) 575 kfd_sysfs_create_file(pdd->kobj_stats, 576 &pdd->attr_cu_occupancy, 577 "cu_occupancy"); 578 } 579 } 580 581 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p) 582 { 583 int ret = 0; 584 int i; 585 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN]; 586 587 if (!p || !p->kobj) 588 return; 589 590 /* 591 * Create sysfs files for each GPU which supports SVM 592 * - proc/<pid>/counters_<gpuid>/ 593 * - proc/<pid>/counters_<gpuid>/faults 594 * - proc/<pid>/counters_<gpuid>/page_in 595 * - proc/<pid>/counters_<gpuid>/page_out 596 */ 597 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 598 struct kfd_process_device *pdd = p->pdds[i]; 599 struct kobject *kobj_counters; 600 601 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN, 602 "counters_%u", pdd->dev->id); 603 kobj_counters = kfd_alloc_struct(kobj_counters); 604 if (!kobj_counters) 605 return; 606 607 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type, 608 p->kobj, counters_dir_filename); 609 if (ret) { 610 pr_warn("Creating KFD proc/%s folder failed", 611 counters_dir_filename); 612 kobject_put(kobj_counters); 613 return; 614 } 615 616 pdd->kobj_counters = kobj_counters; 617 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults, 618 "faults"); 619 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in, 620 "page_in"); 621 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out, 622 "page_out"); 623 } 624 } 625 626 static void kfd_procfs_add_sysfs_files(struct kfd_process *p) 627 { 628 int i; 629 630 if (!p || !p->kobj) 631 return; 632 633 /* 634 * Create sysfs files for each GPU: 635 * - proc/<pid>/vram_<gpuid> 636 * - proc/<pid>/sdma_<gpuid> 637 */ 638 for (i = 0; i < p->n_pdds; i++) { 639 struct kfd_process_device *pdd = p->pdds[i]; 640 641 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u", 642 pdd->dev->id); 643 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram, 644 pdd->vram_filename); 645 646 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u", 647 pdd->dev->id); 648 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma, 649 pdd->sdma_filename); 650 } 651 } 652 653 void kfd_procfs_del_queue(struct queue *q) 654 { 655 if (!q) 656 return; 657 658 kobject_del(&q->kobj); 659 kobject_put(&q->kobj); 660 } 661 662 int kfd_process_create_wq(void) 663 { 664 if (!kfd_process_wq) 665 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); 666 if (!kfd_restore_wq) 667 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0); 668 669 if (!kfd_process_wq || !kfd_restore_wq) { 670 kfd_process_destroy_wq(); 671 return -ENOMEM; 672 } 673 674 return 0; 675 } 676 677 void kfd_process_destroy_wq(void) 678 { 679 if (kfd_process_wq) { 680 destroy_workqueue(kfd_process_wq); 681 kfd_process_wq = NULL; 682 } 683 if (kfd_restore_wq) { 684 destroy_workqueue(kfd_restore_wq); 685 kfd_restore_wq = NULL; 686 } 687 } 688 689 static void kfd_process_free_gpuvm(struct kgd_mem *mem, 690 struct kfd_process_device *pdd, void **kptr) 691 { 692 struct kfd_node *dev = pdd->dev; 693 694 if (kptr && *kptr) { 695 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 696 *kptr = NULL; 697 } 698 699 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); 700 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv, 701 NULL); 702 } 703 704 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process 705 * This function should be only called right after the process 706 * is created and when kfd_processes_mutex is still being held 707 * to avoid concurrency. Because of that exclusiveness, we do 708 * not need to take p->mutex. 709 */ 710 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, 711 uint64_t gpu_va, uint32_t size, 712 uint32_t flags, struct kgd_mem **mem, void **kptr) 713 { 714 struct kfd_node *kdev = pdd->dev; 715 int err; 716 717 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, 718 pdd->drm_priv, mem, NULL, 719 flags, false); 720 if (err) 721 goto err_alloc_mem; 722 723 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, 724 pdd->drm_priv); 725 if (err) 726 goto err_map_mem; 727 728 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true); 729 if (err) { 730 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 731 goto sync_memory_failed; 732 } 733 734 if (kptr) { 735 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel( 736 (struct kgd_mem *)*mem, kptr, NULL); 737 if (err) { 738 pr_debug("Map GTT BO to kernel failed\n"); 739 goto sync_memory_failed; 740 } 741 } 742 743 return err; 744 745 sync_memory_failed: 746 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv); 747 748 err_map_mem: 749 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv, 750 NULL); 751 err_alloc_mem: 752 *mem = NULL; 753 *kptr = NULL; 754 return err; 755 } 756 757 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the 758 * process for IB usage The memory reserved is for KFD to submit 759 * IB to AMDGPU from kernel. If the memory is reserved 760 * successfully, ib_kaddr will have the CPU/kernel 761 * address. Check ib_kaddr before accessing the memory. 762 */ 763 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) 764 { 765 struct qcm_process_device *qpd = &pdd->qpd; 766 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | 767 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | 768 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | 769 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 770 struct kgd_mem *mem; 771 void *kaddr; 772 int ret; 773 774 if (qpd->ib_kaddr || !qpd->ib_base) 775 return 0; 776 777 /* ib_base is only set for dGPU */ 778 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, 779 &mem, &kaddr); 780 if (ret) 781 return ret; 782 783 qpd->ib_mem = mem; 784 qpd->ib_kaddr = kaddr; 785 786 return 0; 787 } 788 789 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) 790 { 791 struct qcm_process_device *qpd = &pdd->qpd; 792 793 if (!qpd->ib_kaddr || !qpd->ib_base) 794 return; 795 796 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr); 797 } 798 799 struct kfd_process *kfd_create_process(struct task_struct *thread) 800 { 801 struct kfd_process *process; 802 int ret; 803 804 if (!(thread->mm && mmget_not_zero(thread->mm))) 805 return ERR_PTR(-EINVAL); 806 807 /* Only the pthreads threading model is supported. */ 808 if (thread->group_leader->mm != thread->mm) { 809 mmput(thread->mm); 810 return ERR_PTR(-EINVAL); 811 } 812 813 /* 814 * take kfd processes mutex before starting of process creation 815 * so there won't be a case where two threads of the same process 816 * create two kfd_process structures 817 */ 818 mutex_lock(&kfd_processes_mutex); 819 820 if (kfd_is_locked()) { 821 pr_debug("KFD is locked! Cannot create process"); 822 process = ERR_PTR(-EINVAL); 823 goto out; 824 } 825 826 /* A prior open of /dev/kfd could have already created the process. */ 827 process = find_process(thread, false); 828 if (process) { 829 pr_debug("Process already found\n"); 830 } else { 831 process = create_process(thread); 832 if (IS_ERR(process)) 833 goto out; 834 835 if (!procfs.kobj) 836 goto out; 837 838 process->kobj = kfd_alloc_struct(process->kobj); 839 if (!process->kobj) { 840 pr_warn("Creating procfs kobject failed"); 841 goto out; 842 } 843 ret = kobject_init_and_add(process->kobj, &procfs_type, 844 procfs.kobj, "%d", 845 (int)process->lead_thread->pid); 846 if (ret) { 847 pr_warn("Creating procfs pid directory failed"); 848 kobject_put(process->kobj); 849 goto out; 850 } 851 852 kfd_sysfs_create_file(process->kobj, &process->attr_pasid, 853 "pasid"); 854 855 process->kobj_queues = kobject_create_and_add("queues", 856 process->kobj); 857 if (!process->kobj_queues) 858 pr_warn("Creating KFD proc/queues folder failed"); 859 860 kfd_procfs_add_sysfs_stats(process); 861 kfd_procfs_add_sysfs_files(process); 862 kfd_procfs_add_sysfs_counters(process); 863 864 init_waitqueue_head(&process->wait_irq_drain); 865 } 866 out: 867 if (!IS_ERR(process)) 868 kref_get(&process->ref); 869 mutex_unlock(&kfd_processes_mutex); 870 mmput(thread->mm); 871 872 return process; 873 } 874 875 struct kfd_process *kfd_get_process(const struct task_struct *thread) 876 { 877 struct kfd_process *process; 878 879 if (!thread->mm) 880 return ERR_PTR(-EINVAL); 881 882 /* Only the pthreads threading model is supported. */ 883 if (thread->group_leader->mm != thread->mm) 884 return ERR_PTR(-EINVAL); 885 886 process = find_process(thread, false); 887 if (!process) 888 return ERR_PTR(-EINVAL); 889 890 return process; 891 } 892 893 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) 894 { 895 struct kfd_process *process; 896 897 hash_for_each_possible_rcu(kfd_processes_table, process, 898 kfd_processes, (uintptr_t)mm) 899 if (process->mm == mm) 900 return process; 901 902 return NULL; 903 } 904 905 static struct kfd_process *find_process(const struct task_struct *thread, 906 bool ref) 907 { 908 struct kfd_process *p; 909 int idx; 910 911 idx = srcu_read_lock(&kfd_processes_srcu); 912 p = find_process_by_mm(thread->mm); 913 if (p && ref) 914 kref_get(&p->ref); 915 srcu_read_unlock(&kfd_processes_srcu, idx); 916 917 return p; 918 } 919 920 void kfd_unref_process(struct kfd_process *p) 921 { 922 kref_put(&p->ref, kfd_process_ref_release); 923 } 924 925 /* This increments the process->ref counter. */ 926 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid) 927 { 928 struct task_struct *task = NULL; 929 struct kfd_process *p = NULL; 930 931 if (!pid) { 932 task = current; 933 get_task_struct(task); 934 } else { 935 task = get_pid_task(pid, PIDTYPE_PID); 936 } 937 938 if (task) { 939 p = find_process(task, true); 940 put_task_struct(task); 941 } 942 943 return p; 944 } 945 946 static void kfd_process_device_free_bos(struct kfd_process_device *pdd) 947 { 948 struct kfd_process *p = pdd->process; 949 void *mem; 950 int id; 951 int i; 952 953 /* 954 * Remove all handles from idr and release appropriate 955 * local memory object 956 */ 957 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 958 959 for (i = 0; i < p->n_pdds; i++) { 960 struct kfd_process_device *peer_pdd = p->pdds[i]; 961 962 if (!peer_pdd->drm_priv) 963 continue; 964 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 965 peer_pdd->dev->adev, mem, peer_pdd->drm_priv); 966 } 967 968 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem, 969 pdd->drm_priv, NULL); 970 kfd_process_device_remove_obj_handle(pdd, id); 971 } 972 } 973 974 /* 975 * Just kunmap and unpin signal BO here. It will be freed in 976 * kfd_process_free_outstanding_kfd_bos() 977 */ 978 static void kfd_process_kunmap_signal_bo(struct kfd_process *p) 979 { 980 struct kfd_process_device *pdd; 981 struct kfd_node *kdev; 982 void *mem; 983 984 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); 985 if (!kdev) 986 return; 987 988 mutex_lock(&p->mutex); 989 990 pdd = kfd_get_process_device_data(kdev, p); 991 if (!pdd) 992 goto out; 993 994 mem = kfd_process_device_translate_handle( 995 pdd, GET_IDR_HANDLE(p->signal_handle)); 996 if (!mem) 997 goto out; 998 999 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 1000 1001 out: 1002 mutex_unlock(&p->mutex); 1003 } 1004 1005 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) 1006 { 1007 int i; 1008 1009 for (i = 0; i < p->n_pdds; i++) 1010 kfd_process_device_free_bos(p->pdds[i]); 1011 } 1012 1013 static void kfd_process_destroy_pdds(struct kfd_process *p) 1014 { 1015 int i; 1016 1017 for (i = 0; i < p->n_pdds; i++) { 1018 struct kfd_process_device *pdd = p->pdds[i]; 1019 1020 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", 1021 pdd->dev->id, p->pasid); 1022 1023 kfd_process_device_destroy_cwsr_dgpu(pdd); 1024 kfd_process_device_destroy_ib_mem(pdd); 1025 1026 if (pdd->drm_file) { 1027 amdgpu_amdkfd_gpuvm_release_process_vm( 1028 pdd->dev->adev, pdd->drm_priv); 1029 fput(pdd->drm_file); 1030 } 1031 1032 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) 1033 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, 1034 get_order(KFD_CWSR_TBA_TMA_SIZE)); 1035 1036 idr_destroy(&pdd->alloc_idr); 1037 1038 kfd_free_process_doorbells(pdd->dev->kfd, pdd); 1039 1040 if (pdd->dev->kfd->shared_resources.enable_mes) 1041 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, 1042 pdd->proc_ctx_bo); 1043 /* 1044 * before destroying pdd, make sure to report availability 1045 * for auto suspend 1046 */ 1047 if (pdd->runtime_inuse) { 1048 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev); 1049 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev); 1050 pdd->runtime_inuse = false; 1051 } 1052 1053 kfree(pdd); 1054 p->pdds[i] = NULL; 1055 } 1056 p->n_pdds = 0; 1057 } 1058 1059 static void kfd_process_remove_sysfs(struct kfd_process *p) 1060 { 1061 struct kfd_process_device *pdd; 1062 int i; 1063 1064 if (!p->kobj) 1065 return; 1066 1067 sysfs_remove_file(p->kobj, &p->attr_pasid); 1068 kobject_del(p->kobj_queues); 1069 kobject_put(p->kobj_queues); 1070 p->kobj_queues = NULL; 1071 1072 for (i = 0; i < p->n_pdds; i++) { 1073 pdd = p->pdds[i]; 1074 1075 sysfs_remove_file(p->kobj, &pdd->attr_vram); 1076 sysfs_remove_file(p->kobj, &pdd->attr_sdma); 1077 1078 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict); 1079 if (pdd->dev->kfd2kgd->get_cu_occupancy) 1080 sysfs_remove_file(pdd->kobj_stats, 1081 &pdd->attr_cu_occupancy); 1082 kobject_del(pdd->kobj_stats); 1083 kobject_put(pdd->kobj_stats); 1084 pdd->kobj_stats = NULL; 1085 } 1086 1087 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 1088 pdd = p->pdds[i]; 1089 1090 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults); 1091 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in); 1092 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out); 1093 kobject_del(pdd->kobj_counters); 1094 kobject_put(pdd->kobj_counters); 1095 pdd->kobj_counters = NULL; 1096 } 1097 1098 kobject_del(p->kobj); 1099 kobject_put(p->kobj); 1100 p->kobj = NULL; 1101 } 1102 1103 /* No process locking is needed in this function, because the process 1104 * is not findable any more. We must assume that no other thread is 1105 * using it any more, otherwise we couldn't safely free the process 1106 * structure in the end. 1107 */ 1108 static void kfd_process_wq_release(struct work_struct *work) 1109 { 1110 struct kfd_process *p = container_of(work, struct kfd_process, 1111 release_work); 1112 1113 kfd_process_dequeue_from_all_devices(p); 1114 pqm_uninit(&p->pqm); 1115 1116 /* Signal the eviction fence after user mode queues are 1117 * destroyed. This allows any BOs to be freed without 1118 * triggering pointless evictions or waiting for fences. 1119 */ 1120 dma_fence_signal(p->ef); 1121 1122 kfd_process_remove_sysfs(p); 1123 1124 kfd_process_kunmap_signal_bo(p); 1125 kfd_process_free_outstanding_kfd_bos(p); 1126 svm_range_list_fini(p); 1127 1128 kfd_process_destroy_pdds(p); 1129 dma_fence_put(p->ef); 1130 1131 kfd_event_free_process(p); 1132 1133 kfd_pasid_free(p->pasid); 1134 mutex_destroy(&p->mutex); 1135 1136 put_task_struct(p->lead_thread); 1137 1138 kfree(p); 1139 } 1140 1141 static void kfd_process_ref_release(struct kref *ref) 1142 { 1143 struct kfd_process *p = container_of(ref, struct kfd_process, ref); 1144 1145 INIT_WORK(&p->release_work, kfd_process_wq_release); 1146 queue_work(kfd_process_wq, &p->release_work); 1147 } 1148 1149 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm) 1150 { 1151 int idx = srcu_read_lock(&kfd_processes_srcu); 1152 struct kfd_process *p = find_process_by_mm(mm); 1153 1154 srcu_read_unlock(&kfd_processes_srcu, idx); 1155 1156 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); 1157 } 1158 1159 static void kfd_process_free_notifier(struct mmu_notifier *mn) 1160 { 1161 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier)); 1162 } 1163 1164 static void kfd_process_notifier_release_internal(struct kfd_process *p) 1165 { 1166 int i; 1167 1168 cancel_delayed_work_sync(&p->eviction_work); 1169 cancel_delayed_work_sync(&p->restore_work); 1170 1171 for (i = 0; i < p->n_pdds; i++) { 1172 struct kfd_process_device *pdd = p->pdds[i]; 1173 1174 /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */ 1175 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup) 1176 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 1177 } 1178 1179 /* Indicate to other users that MM is no longer valid */ 1180 p->mm = NULL; 1181 kfd_dbg_trap_disable(p); 1182 1183 if (atomic_read(&p->debugged_process_count) > 0) { 1184 struct kfd_process *target; 1185 unsigned int temp; 1186 int idx = srcu_read_lock(&kfd_processes_srcu); 1187 1188 hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) { 1189 if (target->debugger_process && target->debugger_process == p) { 1190 mutex_lock_nested(&target->mutex, 1); 1191 kfd_dbg_trap_disable(target); 1192 mutex_unlock(&target->mutex); 1193 if (atomic_read(&p->debugged_process_count) == 0) 1194 break; 1195 } 1196 } 1197 1198 srcu_read_unlock(&kfd_processes_srcu, idx); 1199 } 1200 1201 mmu_notifier_put(&p->mmu_notifier); 1202 } 1203 1204 static void kfd_process_notifier_release(struct mmu_notifier *mn, 1205 struct mm_struct *mm) 1206 { 1207 struct kfd_process *p; 1208 1209 /* 1210 * The kfd_process structure can not be free because the 1211 * mmu_notifier srcu is read locked 1212 */ 1213 p = container_of(mn, struct kfd_process, mmu_notifier); 1214 if (WARN_ON(p->mm != mm)) 1215 return; 1216 1217 mutex_lock(&kfd_processes_mutex); 1218 /* 1219 * Do early return if table is empty. 1220 * 1221 * This could potentially happen if this function is called concurrently 1222 * by mmu_notifier and by kfd_cleanup_pocesses. 1223 * 1224 */ 1225 if (hash_empty(kfd_processes_table)) { 1226 mutex_unlock(&kfd_processes_mutex); 1227 return; 1228 } 1229 hash_del_rcu(&p->kfd_processes); 1230 mutex_unlock(&kfd_processes_mutex); 1231 synchronize_srcu(&kfd_processes_srcu); 1232 1233 kfd_process_notifier_release_internal(p); 1234 } 1235 1236 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { 1237 .release = kfd_process_notifier_release, 1238 .alloc_notifier = kfd_process_alloc_notifier, 1239 .free_notifier = kfd_process_free_notifier, 1240 }; 1241 1242 /* 1243 * This code handles the case when driver is being unloaded before all 1244 * mm_struct are released. We need to safely free the kfd_process and 1245 * avoid race conditions with mmu_notifier that might try to free them. 1246 * 1247 */ 1248 void kfd_cleanup_processes(void) 1249 { 1250 struct kfd_process *p; 1251 struct hlist_node *p_temp; 1252 unsigned int temp; 1253 HLIST_HEAD(cleanup_list); 1254 1255 /* 1256 * Move all remaining kfd_process from the process table to a 1257 * temp list for processing. Once done, callback from mmu_notifier 1258 * release will not see the kfd_process in the table and do early return, 1259 * avoiding double free issues. 1260 */ 1261 mutex_lock(&kfd_processes_mutex); 1262 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) { 1263 hash_del_rcu(&p->kfd_processes); 1264 synchronize_srcu(&kfd_processes_srcu); 1265 hlist_add_head(&p->kfd_processes, &cleanup_list); 1266 } 1267 mutex_unlock(&kfd_processes_mutex); 1268 1269 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes) 1270 kfd_process_notifier_release_internal(p); 1271 1272 /* 1273 * Ensures that all outstanding free_notifier get called, triggering 1274 * the release of the kfd_process struct. 1275 */ 1276 mmu_notifier_synchronize(); 1277 } 1278 1279 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) 1280 { 1281 unsigned long offset; 1282 int i; 1283 1284 if (p->has_cwsr) 1285 return 0; 1286 1287 for (i = 0; i < p->n_pdds; i++) { 1288 struct kfd_node *dev = p->pdds[i]->dev; 1289 struct qcm_process_device *qpd = &p->pdds[i]->qpd; 1290 1291 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) 1292 continue; 1293 1294 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); 1295 qpd->tba_addr = (int64_t)vm_mmap(filep, 0, 1296 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, 1297 MAP_SHARED, offset); 1298 1299 if (IS_ERR_VALUE(qpd->tba_addr)) { 1300 int err = qpd->tba_addr; 1301 1302 pr_err("Failure to set tba address. error %d.\n", err); 1303 qpd->tba_addr = 0; 1304 qpd->cwsr_kaddr = NULL; 1305 return err; 1306 } 1307 1308 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); 1309 1310 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled); 1311 1312 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1313 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1314 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1315 } 1316 1317 p->has_cwsr = true; 1318 1319 return 0; 1320 } 1321 1322 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) 1323 { 1324 struct kfd_node *dev = pdd->dev; 1325 struct qcm_process_device *qpd = &pdd->qpd; 1326 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT 1327 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE 1328 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1329 struct kgd_mem *mem; 1330 void *kaddr; 1331 int ret; 1332 1333 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) 1334 return 0; 1335 1336 /* cwsr_base is only set for dGPU */ 1337 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, 1338 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr); 1339 if (ret) 1340 return ret; 1341 1342 qpd->cwsr_mem = mem; 1343 qpd->cwsr_kaddr = kaddr; 1344 qpd->tba_addr = qpd->cwsr_base; 1345 1346 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); 1347 1348 kfd_process_set_trap_debug_flag(&pdd->qpd, 1349 pdd->process->debug_trap_enabled); 1350 1351 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1352 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1353 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1354 1355 return 0; 1356 } 1357 1358 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) 1359 { 1360 struct kfd_node *dev = pdd->dev; 1361 struct qcm_process_device *qpd = &pdd->qpd; 1362 1363 if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) 1364 return; 1365 1366 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); 1367 } 1368 1369 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1370 uint64_t tba_addr, 1371 uint64_t tma_addr) 1372 { 1373 if (qpd->cwsr_kaddr) { 1374 /* KFD trap handler is bound, record as second-level TBA/TMA 1375 * in first-level TMA. First-level trap will jump to second. 1376 */ 1377 uint64_t *tma = 1378 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); 1379 tma[0] = tba_addr; 1380 tma[1] = tma_addr; 1381 } else { 1382 /* No trap handler bound, bind as first-level TBA/TMA. */ 1383 qpd->tba_addr = tba_addr; 1384 qpd->tma_addr = tma_addr; 1385 } 1386 } 1387 1388 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) 1389 { 1390 int i; 1391 1392 /* On most GFXv9 GPUs, the retry mode in the SQ must match the 1393 * boot time retry setting. Mixing processes with different 1394 * XNACK/retry settings can hang the GPU. 1395 * 1396 * Different GPUs can have different noretry settings depending 1397 * on HW bugs or limitations. We need to find at least one 1398 * XNACK mode for this process that's compatible with all GPUs. 1399 * Fortunately GPUs with retry enabled (noretry=0) can run code 1400 * built for XNACK-off. On GFXv9 it may perform slower. 1401 * 1402 * Therefore applications built for XNACK-off can always be 1403 * supported and will be our fallback if any GPU does not 1404 * support retry. 1405 */ 1406 for (i = 0; i < p->n_pdds; i++) { 1407 struct kfd_node *dev = p->pdds[i]->dev; 1408 1409 /* Only consider GFXv9 and higher GPUs. Older GPUs don't 1410 * support the SVM APIs and don't need to be considered 1411 * for the XNACK mode selection. 1412 */ 1413 if (!KFD_IS_SOC15(dev)) 1414 continue; 1415 /* Aldebaran can always support XNACK because it can support 1416 * per-process XNACK mode selection. But let the dev->noretry 1417 * setting still influence the default XNACK mode. 1418 */ 1419 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) 1420 continue; 1421 1422 /* GFXv10 and later GPUs do not support shader preemption 1423 * during page faults. This can lead to poor QoS for queue 1424 * management and memory-manager-related preemptions or 1425 * even deadlocks. 1426 */ 1427 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) 1428 return false; 1429 1430 if (dev->kfd->noretry) 1431 return false; 1432 } 1433 1434 return true; 1435 } 1436 1437 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, 1438 bool enabled) 1439 { 1440 if (qpd->cwsr_kaddr) { 1441 uint64_t *tma = 1442 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); 1443 tma[2] = enabled; 1444 } 1445 } 1446 1447 /* 1448 * On return the kfd_process is fully operational and will be freed when the 1449 * mm is released 1450 */ 1451 static struct kfd_process *create_process(const struct task_struct *thread) 1452 { 1453 struct kfd_process *process; 1454 struct mmu_notifier *mn; 1455 int err = -ENOMEM; 1456 1457 process = kzalloc(sizeof(*process), GFP_KERNEL); 1458 if (!process) 1459 goto err_alloc_process; 1460 1461 kref_init(&process->ref); 1462 mutex_init(&process->mutex); 1463 process->mm = thread->mm; 1464 process->lead_thread = thread->group_leader; 1465 process->n_pdds = 0; 1466 process->queues_paused = false; 1467 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); 1468 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); 1469 process->last_restore_timestamp = get_jiffies_64(); 1470 err = kfd_event_init_process(process); 1471 if (err) 1472 goto err_event_init; 1473 process->is_32bit_user_mode = in_compat_syscall(); 1474 process->debug_trap_enabled = false; 1475 process->debugger_process = NULL; 1476 process->exception_enable_mask = 0; 1477 atomic_set(&process->debugged_process_count, 0); 1478 sema_init(&process->runtime_enable_sema, 0); 1479 1480 process->pasid = kfd_pasid_alloc(); 1481 if (process->pasid == 0) { 1482 err = -ENOSPC; 1483 goto err_alloc_pasid; 1484 } 1485 1486 err = pqm_init(&process->pqm, process); 1487 if (err != 0) 1488 goto err_process_pqm_init; 1489 1490 /* init process apertures*/ 1491 err = kfd_init_apertures(process); 1492 if (err != 0) 1493 goto err_init_apertures; 1494 1495 /* Check XNACK support after PDDs are created in kfd_init_apertures */ 1496 process->xnack_enabled = kfd_process_xnack_mode(process, false); 1497 1498 err = svm_range_list_init(process); 1499 if (err) 1500 goto err_init_svm_range_list; 1501 1502 /* alloc_notifier needs to find the process in the hash table */ 1503 hash_add_rcu(kfd_processes_table, &process->kfd_processes, 1504 (uintptr_t)process->mm); 1505 1506 /* Avoid free_notifier to start kfd_process_wq_release if 1507 * mmu_notifier_get failed because of pending signal. 1508 */ 1509 kref_get(&process->ref); 1510 1511 /* MMU notifier registration must be the last call that can fail 1512 * because after this point we cannot unwind the process creation. 1513 * After this point, mmu_notifier_put will trigger the cleanup by 1514 * dropping the last process reference in the free_notifier. 1515 */ 1516 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm); 1517 if (IS_ERR(mn)) { 1518 err = PTR_ERR(mn); 1519 goto err_register_notifier; 1520 } 1521 BUG_ON(mn != &process->mmu_notifier); 1522 1523 kfd_unref_process(process); 1524 get_task_struct(process->lead_thread); 1525 1526 INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler); 1527 1528 return process; 1529 1530 err_register_notifier: 1531 hash_del_rcu(&process->kfd_processes); 1532 svm_range_list_fini(process); 1533 err_init_svm_range_list: 1534 kfd_process_free_outstanding_kfd_bos(process); 1535 kfd_process_destroy_pdds(process); 1536 err_init_apertures: 1537 pqm_uninit(&process->pqm); 1538 err_process_pqm_init: 1539 kfd_pasid_free(process->pasid); 1540 err_alloc_pasid: 1541 kfd_event_free_process(process); 1542 err_event_init: 1543 mutex_destroy(&process->mutex); 1544 kfree(process); 1545 err_alloc_process: 1546 return ERR_PTR(err); 1547 } 1548 1549 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1550 struct kfd_process *p) 1551 { 1552 int i; 1553 1554 for (i = 0; i < p->n_pdds; i++) 1555 if (p->pdds[i]->dev == dev) 1556 return p->pdds[i]; 1557 1558 return NULL; 1559 } 1560 1561 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1562 struct kfd_process *p) 1563 { 1564 struct kfd_process_device *pdd = NULL; 1565 int retval = 0; 1566 1567 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) 1568 return NULL; 1569 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); 1570 if (!pdd) 1571 return NULL; 1572 1573 pdd->dev = dev; 1574 INIT_LIST_HEAD(&pdd->qpd.queues_list); 1575 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); 1576 pdd->qpd.dqm = dev->dqm; 1577 pdd->qpd.pqm = &p->pqm; 1578 pdd->qpd.evicted = 0; 1579 pdd->qpd.mapped_gws_queue = false; 1580 pdd->process = p; 1581 pdd->bound = PDD_UNBOUND; 1582 pdd->already_dequeued = false; 1583 pdd->runtime_inuse = false; 1584 pdd->vram_usage = 0; 1585 pdd->sdma_past_activity_counter = 0; 1586 pdd->user_gpu_id = dev->id; 1587 atomic64_set(&pdd->evict_duration_counter, 0); 1588 1589 if (dev->kfd->shared_resources.enable_mes) { 1590 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, 1591 AMDGPU_MES_PROC_CTX_SIZE, 1592 &pdd->proc_ctx_bo, 1593 &pdd->proc_ctx_gpu_addr, 1594 &pdd->proc_ctx_cpu_ptr, 1595 false); 1596 if (retval) { 1597 pr_err("failed to allocate process context bo\n"); 1598 goto err_free_pdd; 1599 } 1600 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 1601 } 1602 1603 p->pdds[p->n_pdds++] = pdd; 1604 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) 1605 pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( 1606 pdd->dev->adev, 1607 false, 1608 0); 1609 1610 /* Init idr used for memory handle translation */ 1611 idr_init(&pdd->alloc_idr); 1612 1613 return pdd; 1614 1615 err_free_pdd: 1616 kfree(pdd); 1617 return NULL; 1618 } 1619 1620 /** 1621 * kfd_process_device_init_vm - Initialize a VM for a process-device 1622 * 1623 * @pdd: The process-device 1624 * @drm_file: Optional pointer to a DRM file descriptor 1625 * 1626 * If @drm_file is specified, it will be used to acquire the VM from 1627 * that file descriptor. If successful, the @pdd takes ownership of 1628 * the file descriptor. 1629 * 1630 * If @drm_file is NULL, a new VM is created. 1631 * 1632 * Returns 0 on success, -errno on failure. 1633 */ 1634 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1635 struct file *drm_file) 1636 { 1637 struct amdgpu_fpriv *drv_priv; 1638 struct amdgpu_vm *avm; 1639 struct kfd_process *p; 1640 struct kfd_node *dev; 1641 int ret; 1642 1643 if (!drm_file) 1644 return -EINVAL; 1645 1646 if (pdd->drm_priv) 1647 return -EBUSY; 1648 1649 ret = amdgpu_file_to_fpriv(drm_file, &drv_priv); 1650 if (ret) 1651 return ret; 1652 avm = &drv_priv->vm; 1653 1654 p = pdd->process; 1655 dev = pdd->dev; 1656 1657 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm, 1658 &p->kgd_process_info, 1659 &p->ef); 1660 if (ret) { 1661 pr_err("Failed to create process VM object\n"); 1662 return ret; 1663 } 1664 pdd->drm_priv = drm_file->private_data; 1665 atomic64_set(&pdd->tlb_seq, 0); 1666 1667 ret = kfd_process_device_reserve_ib_mem(pdd); 1668 if (ret) 1669 goto err_reserve_ib_mem; 1670 ret = kfd_process_device_init_cwsr_dgpu(pdd); 1671 if (ret) 1672 goto err_init_cwsr; 1673 1674 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid); 1675 if (ret) 1676 goto err_set_pasid; 1677 1678 pdd->drm_file = drm_file; 1679 1680 return 0; 1681 1682 err_set_pasid: 1683 kfd_process_device_destroy_cwsr_dgpu(pdd); 1684 err_init_cwsr: 1685 kfd_process_device_destroy_ib_mem(pdd); 1686 err_reserve_ib_mem: 1687 pdd->drm_priv = NULL; 1688 amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm); 1689 1690 return ret; 1691 } 1692 1693 /* 1694 * Direct the IOMMU to bind the process (specifically the pasid->mm) 1695 * to the device. 1696 * Unbinding occurs when the process dies or the device is removed. 1697 * 1698 * Assumes that the process lock is held. 1699 */ 1700 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 1701 struct kfd_process *p) 1702 { 1703 struct kfd_process_device *pdd; 1704 int err; 1705 1706 pdd = kfd_get_process_device_data(dev, p); 1707 if (!pdd) { 1708 pr_err("Process device data doesn't exist\n"); 1709 return ERR_PTR(-ENOMEM); 1710 } 1711 1712 if (!pdd->drm_priv) 1713 return ERR_PTR(-ENODEV); 1714 1715 /* 1716 * signal runtime-pm system to auto resume and prevent 1717 * further runtime suspend once device pdd is created until 1718 * pdd is destroyed. 1719 */ 1720 if (!pdd->runtime_inuse) { 1721 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev); 1722 if (err < 0) { 1723 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev); 1724 return ERR_PTR(err); 1725 } 1726 } 1727 1728 /* 1729 * make sure that runtime_usage counter is incremented just once 1730 * per pdd 1731 */ 1732 pdd->runtime_inuse = true; 1733 1734 return pdd; 1735 } 1736 1737 /* Create specific handle mapped to mem from process local memory idr 1738 * Assumes that the process lock is held. 1739 */ 1740 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1741 void *mem) 1742 { 1743 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); 1744 } 1745 1746 /* Translate specific handle from process local memory idr 1747 * Assumes that the process lock is held. 1748 */ 1749 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, 1750 int handle) 1751 { 1752 if (handle < 0) 1753 return NULL; 1754 1755 return idr_find(&pdd->alloc_idr, handle); 1756 } 1757 1758 /* Remove specific handle from process local memory idr 1759 * Assumes that the process lock is held. 1760 */ 1761 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1762 int handle) 1763 { 1764 if (handle >= 0) 1765 idr_remove(&pdd->alloc_idr, handle); 1766 } 1767 1768 /* This increments the process->ref counter. */ 1769 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid) 1770 { 1771 struct kfd_process *p, *ret_p = NULL; 1772 unsigned int temp; 1773 1774 int idx = srcu_read_lock(&kfd_processes_srcu); 1775 1776 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1777 if (p->pasid == pasid) { 1778 kref_get(&p->ref); 1779 ret_p = p; 1780 break; 1781 } 1782 } 1783 1784 srcu_read_unlock(&kfd_processes_srcu, idx); 1785 1786 return ret_p; 1787 } 1788 1789 /* This increments the process->ref counter. */ 1790 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) 1791 { 1792 struct kfd_process *p; 1793 1794 int idx = srcu_read_lock(&kfd_processes_srcu); 1795 1796 p = find_process_by_mm(mm); 1797 if (p) 1798 kref_get(&p->ref); 1799 1800 srcu_read_unlock(&kfd_processes_srcu, idx); 1801 1802 return p; 1803 } 1804 1805 /* kfd_process_evict_queues - Evict all user queues of a process 1806 * 1807 * Eviction is reference-counted per process-device. This means multiple 1808 * evictions from different sources can be nested safely. 1809 */ 1810 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) 1811 { 1812 int r = 0; 1813 int i; 1814 unsigned int n_evicted = 0; 1815 1816 for (i = 0; i < p->n_pdds; i++) { 1817 struct kfd_process_device *pdd = p->pdds[i]; 1818 1819 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, 1820 trigger); 1821 1822 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, 1823 &pdd->qpd); 1824 /* evict return -EIO if HWS is hang or asic is resetting, in this case 1825 * we would like to set all the queues to be in evicted state to prevent 1826 * them been add back since they actually not be saved right now. 1827 */ 1828 if (r && r != -EIO) { 1829 pr_err("Failed to evict process queues\n"); 1830 goto fail; 1831 } 1832 n_evicted++; 1833 } 1834 1835 return r; 1836 1837 fail: 1838 /* To keep state consistent, roll back partial eviction by 1839 * restoring queues 1840 */ 1841 for (i = 0; i < p->n_pdds; i++) { 1842 struct kfd_process_device *pdd = p->pdds[i]; 1843 1844 if (n_evicted == 0) 1845 break; 1846 1847 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1848 1849 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1850 &pdd->qpd)) 1851 pr_err("Failed to restore queues\n"); 1852 1853 n_evicted--; 1854 } 1855 1856 return r; 1857 } 1858 1859 /* kfd_process_restore_queues - Restore all user queues of a process */ 1860 int kfd_process_restore_queues(struct kfd_process *p) 1861 { 1862 int r, ret = 0; 1863 int i; 1864 1865 for (i = 0; i < p->n_pdds; i++) { 1866 struct kfd_process_device *pdd = p->pdds[i]; 1867 1868 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1869 1870 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1871 &pdd->qpd); 1872 if (r) { 1873 pr_err("Failed to restore process queues\n"); 1874 if (!ret) 1875 ret = r; 1876 } 1877 } 1878 1879 return ret; 1880 } 1881 1882 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) 1883 { 1884 int i; 1885 1886 for (i = 0; i < p->n_pdds; i++) 1887 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id) 1888 return i; 1889 return -EINVAL; 1890 } 1891 1892 int 1893 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 1894 uint32_t *gpuid, uint32_t *gpuidx) 1895 { 1896 int i; 1897 1898 for (i = 0; i < p->n_pdds; i++) 1899 if (p->pdds[i] && p->pdds[i]->dev == node) { 1900 *gpuid = p->pdds[i]->user_gpu_id; 1901 *gpuidx = i; 1902 return 0; 1903 } 1904 return -EINVAL; 1905 } 1906 1907 static void evict_process_worker(struct work_struct *work) 1908 { 1909 int ret; 1910 struct kfd_process *p; 1911 struct delayed_work *dwork; 1912 1913 dwork = to_delayed_work(work); 1914 1915 /* Process termination destroys this worker thread. So during the 1916 * lifetime of this thread, kfd_process p will be valid 1917 */ 1918 p = container_of(dwork, struct kfd_process, eviction_work); 1919 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, 1920 "Eviction fence mismatch\n"); 1921 1922 /* Narrow window of overlap between restore and evict work 1923 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos 1924 * unreserves KFD BOs, it is possible to evicted again. But 1925 * restore has few more steps of finish. So lets wait for any 1926 * previous restore work to complete 1927 */ 1928 flush_delayed_work(&p->restore_work); 1929 1930 pr_debug("Started evicting pasid 0x%x\n", p->pasid); 1931 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM); 1932 if (!ret) { 1933 dma_fence_signal(p->ef); 1934 dma_fence_put(p->ef); 1935 p->ef = NULL; 1936 queue_delayed_work(kfd_restore_wq, &p->restore_work, 1937 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); 1938 1939 pr_debug("Finished evicting pasid 0x%x\n", p->pasid); 1940 } else 1941 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); 1942 } 1943 1944 static void restore_process_worker(struct work_struct *work) 1945 { 1946 struct delayed_work *dwork; 1947 struct kfd_process *p; 1948 int ret = 0; 1949 1950 dwork = to_delayed_work(work); 1951 1952 /* Process termination destroys this worker thread. So during the 1953 * lifetime of this thread, kfd_process p will be valid 1954 */ 1955 p = container_of(dwork, struct kfd_process, restore_work); 1956 pr_debug("Started restoring pasid 0x%x\n", p->pasid); 1957 1958 /* Setting last_restore_timestamp before successful restoration. 1959 * Otherwise this would have to be set by KGD (restore_process_bos) 1960 * before KFD BOs are unreserved. If not, the process can be evicted 1961 * again before the timestamp is set. 1962 * If restore fails, the timestamp will be set again in the next 1963 * attempt. This would mean that the minimum GPU quanta would be 1964 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two 1965 * functions) 1966 */ 1967 1968 p->last_restore_timestamp = get_jiffies_64(); 1969 /* VMs may not have been acquired yet during debugging. */ 1970 if (p->kgd_process_info) 1971 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, 1972 &p->ef); 1973 if (ret) { 1974 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", 1975 p->pasid, PROCESS_BACK_OFF_TIME_MS); 1976 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, 1977 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); 1978 WARN(!ret, "reschedule restore work failed\n"); 1979 return; 1980 } 1981 1982 ret = kfd_process_restore_queues(p); 1983 if (!ret) 1984 pr_debug("Finished restoring pasid 0x%x\n", p->pasid); 1985 else 1986 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); 1987 } 1988 1989 void kfd_suspend_all_processes(void) 1990 { 1991 struct kfd_process *p; 1992 unsigned int temp; 1993 int idx = srcu_read_lock(&kfd_processes_srcu); 1994 1995 WARN(debug_evictions, "Evicting all processes"); 1996 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1997 cancel_delayed_work_sync(&p->eviction_work); 1998 flush_delayed_work(&p->restore_work); 1999 2000 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) 2001 pr_err("Failed to suspend process 0x%x\n", p->pasid); 2002 dma_fence_signal(p->ef); 2003 dma_fence_put(p->ef); 2004 p->ef = NULL; 2005 } 2006 srcu_read_unlock(&kfd_processes_srcu, idx); 2007 } 2008 2009 int kfd_resume_all_processes(void) 2010 { 2011 struct kfd_process *p; 2012 unsigned int temp; 2013 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); 2014 2015 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2016 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { 2017 pr_err("Restore process %d failed during resume\n", 2018 p->pasid); 2019 ret = -EFAULT; 2020 } 2021 } 2022 srcu_read_unlock(&kfd_processes_srcu, idx); 2023 return ret; 2024 } 2025 2026 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 2027 struct vm_area_struct *vma) 2028 { 2029 struct kfd_process_device *pdd; 2030 struct qcm_process_device *qpd; 2031 2032 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { 2033 pr_err("Incorrect CWSR mapping size.\n"); 2034 return -EINVAL; 2035 } 2036 2037 pdd = kfd_get_process_device_data(dev, process); 2038 if (!pdd) 2039 return -EINVAL; 2040 qpd = &pdd->qpd; 2041 2042 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2043 get_order(KFD_CWSR_TBA_TMA_SIZE)); 2044 if (!qpd->cwsr_kaddr) { 2045 pr_err("Error allocating per process CWSR buffer.\n"); 2046 return -ENOMEM; 2047 } 2048 2049 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND 2050 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP); 2051 /* Mapping pages to user process */ 2052 return remap_pfn_range(vma, vma->vm_start, 2053 PFN_DOWN(__pa(qpd->cwsr_kaddr)), 2054 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); 2055 } 2056 2057 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) 2058 { 2059 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); 2060 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); 2061 struct kfd_node *dev = pdd->dev; 2062 uint32_t xcc_mask = dev->xcc_mask; 2063 int xcc = 0; 2064 2065 /* 2066 * It can be that we race and lose here, but that is extremely unlikely 2067 * and the worst thing which could happen is that we flush the changes 2068 * into the TLB once more which is harmless. 2069 */ 2070 if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq) 2071 return; 2072 2073 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 2074 /* Nothing to flush until a VMID is assigned, which 2075 * only happens when the first queue is created. 2076 */ 2077 if (pdd->qpd.vmid) 2078 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, 2079 pdd->qpd.vmid); 2080 } else { 2081 for_each_inst(xcc, xcc_mask) 2082 amdgpu_amdkfd_flush_gpu_tlb_pasid( 2083 dev->adev, pdd->process->pasid, type, xcc); 2084 } 2085 } 2086 2087 /* assumes caller holds process lock. */ 2088 int kfd_process_drain_interrupts(struct kfd_process_device *pdd) 2089 { 2090 uint32_t irq_drain_fence[8]; 2091 uint8_t node_id = 0; 2092 int r = 0; 2093 2094 if (!KFD_IS_SOC15(pdd->dev)) 2095 return 0; 2096 2097 pdd->process->irq_drain_is_open = true; 2098 2099 memset(irq_drain_fence, 0, sizeof(irq_drain_fence)); 2100 irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) | 2101 KFD_IRQ_FENCE_CLIENTID; 2102 irq_drain_fence[3] = pdd->process->pasid; 2103 2104 /* 2105 * For GFX 9.4.3, send the NodeId also in IH cookie DW[3] 2106 */ 2107 if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) { 2108 node_id = ffs(pdd->dev->interrupt_bitmap) - 1; 2109 irq_drain_fence[3] |= node_id << 16; 2110 } 2111 2112 /* ensure stale irqs scheduled KFD interrupts and send drain fence. */ 2113 if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev, 2114 irq_drain_fence)) { 2115 pdd->process->irq_drain_is_open = false; 2116 return 0; 2117 } 2118 2119 r = wait_event_interruptible(pdd->process->wait_irq_drain, 2120 !READ_ONCE(pdd->process->irq_drain_is_open)); 2121 if (r) 2122 pdd->process->irq_drain_is_open = false; 2123 2124 return r; 2125 } 2126 2127 void kfd_process_close_interrupt_drain(unsigned int pasid) 2128 { 2129 struct kfd_process *p; 2130 2131 p = kfd_lookup_process_by_pasid(pasid); 2132 2133 if (!p) 2134 return; 2135 2136 WRITE_ONCE(p->irq_drain_is_open, false); 2137 wake_up_all(&p->wait_irq_drain); 2138 kfd_unref_process(p); 2139 } 2140 2141 struct send_exception_work_handler_workarea { 2142 struct work_struct work; 2143 struct kfd_process *p; 2144 unsigned int queue_id; 2145 uint64_t error_reason; 2146 }; 2147 2148 static void send_exception_work_handler(struct work_struct *work) 2149 { 2150 struct send_exception_work_handler_workarea *workarea; 2151 struct kfd_process *p; 2152 struct queue *q; 2153 struct mm_struct *mm; 2154 struct kfd_context_save_area_header __user *csa_header; 2155 uint64_t __user *err_payload_ptr; 2156 uint64_t cur_err; 2157 uint32_t ev_id; 2158 2159 workarea = container_of(work, 2160 struct send_exception_work_handler_workarea, 2161 work); 2162 p = workarea->p; 2163 2164 mm = get_task_mm(p->lead_thread); 2165 2166 if (!mm) 2167 return; 2168 2169 kthread_use_mm(mm); 2170 2171 q = pqm_get_user_queue(&p->pqm, workarea->queue_id); 2172 2173 if (!q) 2174 goto out; 2175 2176 csa_header = (void __user *)q->properties.ctx_save_restore_area_address; 2177 2178 get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr); 2179 get_user(cur_err, err_payload_ptr); 2180 cur_err |= workarea->error_reason; 2181 put_user(cur_err, err_payload_ptr); 2182 get_user(ev_id, &csa_header->err_event_id); 2183 2184 kfd_set_event(p, ev_id); 2185 2186 out: 2187 kthread_unuse_mm(mm); 2188 mmput(mm); 2189 } 2190 2191 int kfd_send_exception_to_runtime(struct kfd_process *p, 2192 unsigned int queue_id, 2193 uint64_t error_reason) 2194 { 2195 struct send_exception_work_handler_workarea worker; 2196 2197 INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler); 2198 2199 worker.p = p; 2200 worker.queue_id = queue_id; 2201 worker.error_reason = error_reason; 2202 2203 schedule_work(&worker.work); 2204 flush_work(&worker.work); 2205 destroy_work_on_stack(&worker.work); 2206 2207 return 0; 2208 } 2209 2210 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id) 2211 { 2212 int i; 2213 2214 if (gpu_id) { 2215 for (i = 0; i < p->n_pdds; i++) { 2216 struct kfd_process_device *pdd = p->pdds[i]; 2217 2218 if (pdd->user_gpu_id == gpu_id) 2219 return pdd; 2220 } 2221 } 2222 return NULL; 2223 } 2224 2225 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id) 2226 { 2227 int i; 2228 2229 if (!actual_gpu_id) 2230 return 0; 2231 2232 for (i = 0; i < p->n_pdds; i++) { 2233 struct kfd_process_device *pdd = p->pdds[i]; 2234 2235 if (pdd->dev->id == actual_gpu_id) 2236 return pdd->user_gpu_id; 2237 } 2238 return -EINVAL; 2239 } 2240 2241 #if defined(CONFIG_DEBUG_FS) 2242 2243 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) 2244 { 2245 struct kfd_process *p; 2246 unsigned int temp; 2247 int r = 0; 2248 2249 int idx = srcu_read_lock(&kfd_processes_srcu); 2250 2251 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2252 seq_printf(m, "Process %d PASID 0x%x:\n", 2253 p->lead_thread->tgid, p->pasid); 2254 2255 mutex_lock(&p->mutex); 2256 r = pqm_debugfs_mqds(m, &p->pqm); 2257 mutex_unlock(&p->mutex); 2258 2259 if (r) 2260 break; 2261 } 2262 2263 srcu_read_unlock(&kfd_processes_srcu, idx); 2264 2265 return r; 2266 } 2267 2268 #endif 2269