1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/mutex.h> 25 #include <linux/log2.h> 26 #include <linux/sched.h> 27 #include <linux/sched/mm.h> 28 #include <linux/sched/task.h> 29 #include <linux/mmu_context.h> 30 #include <linux/slab.h> 31 #include <linux/amd-iommu.h> 32 #include <linux/notifier.h> 33 #include <linux/compat.h> 34 #include <linux/mman.h> 35 #include <linux/file.h> 36 #include <linux/pm_runtime.h> 37 #include "amdgpu_amdkfd.h" 38 #include "amdgpu.h" 39 40 struct mm_struct; 41 42 #include "kfd_priv.h" 43 #include "kfd_device_queue_manager.h" 44 #include "kfd_iommu.h" 45 #include "kfd_svm.h" 46 #include "kfd_smi_events.h" 47 48 /* 49 * List of struct kfd_process (field kfd_process). 50 * Unique/indexed by mm_struct* 51 */ 52 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 53 static DEFINE_MUTEX(kfd_processes_mutex); 54 55 DEFINE_SRCU(kfd_processes_srcu); 56 57 /* For process termination handling */ 58 static struct workqueue_struct *kfd_process_wq; 59 60 /* Ordered, single-threaded workqueue for restoring evicted 61 * processes. Restoring multiple processes concurrently under memory 62 * pressure can lead to processes blocking each other from validating 63 * their BOs and result in a live-lock situation where processes 64 * remain evicted indefinitely. 65 */ 66 static struct workqueue_struct *kfd_restore_wq; 67 68 static struct kfd_process *find_process(const struct task_struct *thread, 69 bool ref); 70 static void kfd_process_ref_release(struct kref *ref); 71 static struct kfd_process *create_process(const struct task_struct *thread); 72 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep); 73 74 static void evict_process_worker(struct work_struct *work); 75 static void restore_process_worker(struct work_struct *work); 76 77 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd); 78 79 struct kfd_procfs_tree { 80 struct kobject *kobj; 81 }; 82 83 static struct kfd_procfs_tree procfs; 84 85 /* 86 * Structure for SDMA activity tracking 87 */ 88 struct kfd_sdma_activity_handler_workarea { 89 struct work_struct sdma_activity_work; 90 struct kfd_process_device *pdd; 91 uint64_t sdma_activity_counter; 92 }; 93 94 struct temp_sdma_queue_list { 95 uint64_t __user *rptr; 96 uint64_t sdma_val; 97 unsigned int queue_id; 98 struct list_head list; 99 }; 100 101 static void kfd_sdma_activity_worker(struct work_struct *work) 102 { 103 struct kfd_sdma_activity_handler_workarea *workarea; 104 struct kfd_process_device *pdd; 105 uint64_t val; 106 struct mm_struct *mm; 107 struct queue *q; 108 struct qcm_process_device *qpd; 109 struct device_queue_manager *dqm; 110 int ret = 0; 111 struct temp_sdma_queue_list sdma_q_list; 112 struct temp_sdma_queue_list *sdma_q, *next; 113 114 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea, 115 sdma_activity_work); 116 117 pdd = workarea->pdd; 118 if (!pdd) 119 return; 120 dqm = pdd->dev->dqm; 121 qpd = &pdd->qpd; 122 if (!dqm || !qpd) 123 return; 124 /* 125 * Total SDMA activity is current SDMA activity + past SDMA activity 126 * Past SDMA count is stored in pdd. 127 * To get the current activity counters for all active SDMA queues, 128 * we loop over all SDMA queues and get their counts from user-space. 129 * 130 * We cannot call get_user() with dqm_lock held as it can cause 131 * a circular lock dependency situation. To read the SDMA stats, 132 * we need to do the following: 133 * 134 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list, 135 * with dqm_lock/dqm_unlock(). 136 * 2. Call get_user() for each node in temporary list without dqm_lock. 137 * Save the SDMA count for each node and also add the count to the total 138 * SDMA count counter. 139 * Its possible, during this step, a few SDMA queue nodes got deleted 140 * from the qpd->queues_list. 141 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted. 142 * If any node got deleted, its SDMA count would be captured in the sdma 143 * past activity counter. So subtract the SDMA counter stored in step 2 144 * for this node from the total SDMA count. 145 */ 146 INIT_LIST_HEAD(&sdma_q_list.list); 147 148 /* 149 * Create the temp list of all SDMA queues 150 */ 151 dqm_lock(dqm); 152 153 list_for_each_entry(q, &qpd->queues_list, list) { 154 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 155 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 156 continue; 157 158 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL); 159 if (!sdma_q) { 160 dqm_unlock(dqm); 161 goto cleanup; 162 } 163 164 INIT_LIST_HEAD(&sdma_q->list); 165 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr; 166 sdma_q->queue_id = q->properties.queue_id; 167 list_add_tail(&sdma_q->list, &sdma_q_list.list); 168 } 169 170 /* 171 * If the temp list is empty, then no SDMA queues nodes were found in 172 * qpd->queues_list. Return the past activity count as the total sdma 173 * count 174 */ 175 if (list_empty(&sdma_q_list.list)) { 176 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter; 177 dqm_unlock(dqm); 178 return; 179 } 180 181 dqm_unlock(dqm); 182 183 /* 184 * Get the usage count for each SDMA queue in temp_list. 185 */ 186 mm = get_task_mm(pdd->process->lead_thread); 187 if (!mm) 188 goto cleanup; 189 190 kthread_use_mm(mm); 191 192 list_for_each_entry(sdma_q, &sdma_q_list.list, list) { 193 val = 0; 194 ret = read_sdma_queue_counter(sdma_q->rptr, &val); 195 if (ret) { 196 pr_debug("Failed to read SDMA queue active counter for queue id: %d", 197 sdma_q->queue_id); 198 } else { 199 sdma_q->sdma_val = val; 200 workarea->sdma_activity_counter += val; 201 } 202 } 203 204 kthread_unuse_mm(mm); 205 mmput(mm); 206 207 /* 208 * Do a second iteration over qpd_queues_list to check if any SDMA 209 * nodes got deleted while fetching SDMA counter. 210 */ 211 dqm_lock(dqm); 212 213 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter; 214 215 list_for_each_entry(q, &qpd->queues_list, list) { 216 if (list_empty(&sdma_q_list.list)) 217 break; 218 219 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) && 220 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI)) 221 continue; 222 223 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 224 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) && 225 (sdma_q->queue_id == q->properties.queue_id)) { 226 list_del(&sdma_q->list); 227 kfree(sdma_q); 228 break; 229 } 230 } 231 } 232 233 dqm_unlock(dqm); 234 235 /* 236 * If temp list is not empty, it implies some queues got deleted 237 * from qpd->queues_list during SDMA usage read. Subtract the SDMA 238 * count for each node from the total SDMA count. 239 */ 240 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 241 workarea->sdma_activity_counter -= sdma_q->sdma_val; 242 list_del(&sdma_q->list); 243 kfree(sdma_q); 244 } 245 246 return; 247 248 cleanup: 249 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) { 250 list_del(&sdma_q->list); 251 kfree(sdma_q); 252 } 253 } 254 255 /** 256 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device 257 * by current process. Translates acquired wave count into number of compute units 258 * that are occupied. 259 * 260 * @attr: Handle of attribute that allows reporting of wave count. The attribute 261 * handle encapsulates GPU device it is associated with, thereby allowing collection 262 * of waves in flight, etc 263 * @buffer: Handle of user provided buffer updated with wave count 264 * 265 * Return: Number of bytes written to user buffer or an error value 266 */ 267 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) 268 { 269 int cu_cnt; 270 int wave_cnt; 271 int max_waves_per_cu; 272 struct kfd_dev *dev = NULL; 273 struct kfd_process *proc = NULL; 274 struct kfd_process_device *pdd = NULL; 275 276 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy); 277 dev = pdd->dev; 278 if (dev->kfd2kgd->get_cu_occupancy == NULL) 279 return -EINVAL; 280 281 cu_cnt = 0; 282 proc = pdd->process; 283 if (pdd->qpd.queue_count == 0) { 284 pr_debug("Gpu-Id: %d has no active queues for process %d\n", 285 dev->id, proc->pasid); 286 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 287 } 288 289 /* Collect wave count from device if it supports */ 290 wave_cnt = 0; 291 max_waves_per_cu = 0; 292 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, 293 &max_waves_per_cu); 294 295 /* Translate wave count to number of compute units */ 296 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; 297 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt); 298 } 299 300 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, 301 char *buffer) 302 { 303 if (strcmp(attr->name, "pasid") == 0) { 304 struct kfd_process *p = container_of(attr, struct kfd_process, 305 attr_pasid); 306 307 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid); 308 } else if (strncmp(attr->name, "vram_", 5) == 0) { 309 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 310 attr_vram); 311 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage)); 312 } else if (strncmp(attr->name, "sdma_", 5) == 0) { 313 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device, 314 attr_sdma); 315 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; 316 317 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, 318 kfd_sdma_activity_worker); 319 320 sdma_activity_work_handler.pdd = pdd; 321 sdma_activity_work_handler.sdma_activity_counter = 0; 322 323 schedule_work(&sdma_activity_work_handler.sdma_activity_work); 324 325 flush_work(&sdma_activity_work_handler.sdma_activity_work); 326 327 return snprintf(buffer, PAGE_SIZE, "%llu\n", 328 (sdma_activity_work_handler.sdma_activity_counter)/ 329 SDMA_ACTIVITY_DIVISOR); 330 } else { 331 pr_err("Invalid attribute"); 332 return -EINVAL; 333 } 334 335 return 0; 336 } 337 338 static void kfd_procfs_kobj_release(struct kobject *kobj) 339 { 340 kfree(kobj); 341 } 342 343 static const struct sysfs_ops kfd_procfs_ops = { 344 .show = kfd_procfs_show, 345 }; 346 347 static struct kobj_type procfs_type = { 348 .release = kfd_procfs_kobj_release, 349 .sysfs_ops = &kfd_procfs_ops, 350 }; 351 352 void kfd_procfs_init(void) 353 { 354 int ret = 0; 355 356 procfs.kobj = kfd_alloc_struct(procfs.kobj); 357 if (!procfs.kobj) 358 return; 359 360 ret = kobject_init_and_add(procfs.kobj, &procfs_type, 361 &kfd_device->kobj, "proc"); 362 if (ret) { 363 pr_warn("Could not create procfs proc folder"); 364 /* If we fail to create the procfs, clean up */ 365 kfd_procfs_shutdown(); 366 } 367 } 368 369 void kfd_procfs_shutdown(void) 370 { 371 if (procfs.kobj) { 372 kobject_del(procfs.kobj); 373 kobject_put(procfs.kobj); 374 procfs.kobj = NULL; 375 } 376 } 377 378 static ssize_t kfd_procfs_queue_show(struct kobject *kobj, 379 struct attribute *attr, char *buffer) 380 { 381 struct queue *q = container_of(kobj, struct queue, kobj); 382 383 if (!strcmp(attr->name, "size")) 384 return snprintf(buffer, PAGE_SIZE, "%llu", 385 q->properties.queue_size); 386 else if (!strcmp(attr->name, "type")) 387 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type); 388 else if (!strcmp(attr->name, "gpuid")) 389 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id); 390 else 391 pr_err("Invalid attribute"); 392 393 return 0; 394 } 395 396 static ssize_t kfd_procfs_stats_show(struct kobject *kobj, 397 struct attribute *attr, char *buffer) 398 { 399 if (strcmp(attr->name, "evicted_ms") == 0) { 400 struct kfd_process_device *pdd = container_of(attr, 401 struct kfd_process_device, 402 attr_evict); 403 uint64_t evict_jiffies; 404 405 evict_jiffies = atomic64_read(&pdd->evict_duration_counter); 406 407 return snprintf(buffer, 408 PAGE_SIZE, 409 "%llu\n", 410 jiffies64_to_msecs(evict_jiffies)); 411 412 /* Sysfs handle that gets CU occupancy is per device */ 413 } else if (strcmp(attr->name, "cu_occupancy") == 0) { 414 return kfd_get_cu_occupancy(attr, buffer); 415 } else { 416 pr_err("Invalid attribute"); 417 } 418 419 return 0; 420 } 421 422 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj, 423 struct attribute *attr, char *buf) 424 { 425 struct kfd_process_device *pdd; 426 427 if (!strcmp(attr->name, "faults")) { 428 pdd = container_of(attr, struct kfd_process_device, 429 attr_faults); 430 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults)); 431 } 432 if (!strcmp(attr->name, "page_in")) { 433 pdd = container_of(attr, struct kfd_process_device, 434 attr_page_in); 435 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in)); 436 } 437 if (!strcmp(attr->name, "page_out")) { 438 pdd = container_of(attr, struct kfd_process_device, 439 attr_page_out); 440 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out)); 441 } 442 return 0; 443 } 444 445 static struct attribute attr_queue_size = { 446 .name = "size", 447 .mode = KFD_SYSFS_FILE_MODE 448 }; 449 450 static struct attribute attr_queue_type = { 451 .name = "type", 452 .mode = KFD_SYSFS_FILE_MODE 453 }; 454 455 static struct attribute attr_queue_gpuid = { 456 .name = "gpuid", 457 .mode = KFD_SYSFS_FILE_MODE 458 }; 459 460 static struct attribute *procfs_queue_attrs[] = { 461 &attr_queue_size, 462 &attr_queue_type, 463 &attr_queue_gpuid, 464 NULL 465 }; 466 ATTRIBUTE_GROUPS(procfs_queue); 467 468 static const struct sysfs_ops procfs_queue_ops = { 469 .show = kfd_procfs_queue_show, 470 }; 471 472 static struct kobj_type procfs_queue_type = { 473 .sysfs_ops = &procfs_queue_ops, 474 .default_groups = procfs_queue_groups, 475 }; 476 477 static const struct sysfs_ops procfs_stats_ops = { 478 .show = kfd_procfs_stats_show, 479 }; 480 481 static struct kobj_type procfs_stats_type = { 482 .sysfs_ops = &procfs_stats_ops, 483 .release = kfd_procfs_kobj_release, 484 }; 485 486 static const struct sysfs_ops sysfs_counters_ops = { 487 .show = kfd_sysfs_counters_show, 488 }; 489 490 static struct kobj_type sysfs_counters_type = { 491 .sysfs_ops = &sysfs_counters_ops, 492 .release = kfd_procfs_kobj_release, 493 }; 494 495 int kfd_procfs_add_queue(struct queue *q) 496 { 497 struct kfd_process *proc; 498 int ret; 499 500 if (!q || !q->process) 501 return -EINVAL; 502 proc = q->process; 503 504 /* Create proc/<pid>/queues/<queue id> folder */ 505 if (!proc->kobj_queues) 506 return -EFAULT; 507 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type, 508 proc->kobj_queues, "%u", q->properties.queue_id); 509 if (ret < 0) { 510 pr_warn("Creating proc/<pid>/queues/%u failed", 511 q->properties.queue_id); 512 kobject_put(&q->kobj); 513 return ret; 514 } 515 516 return 0; 517 } 518 519 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr, 520 char *name) 521 { 522 int ret; 523 524 if (!kobj || !attr || !name) 525 return; 526 527 attr->name = name; 528 attr->mode = KFD_SYSFS_FILE_MODE; 529 sysfs_attr_init(attr); 530 531 ret = sysfs_create_file(kobj, attr); 532 if (ret) 533 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret); 534 } 535 536 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p) 537 { 538 int ret; 539 int i; 540 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN]; 541 542 if (!p || !p->kobj) 543 return; 544 545 /* 546 * Create sysfs files for each GPU: 547 * - proc/<pid>/stats_<gpuid>/ 548 * - proc/<pid>/stats_<gpuid>/evicted_ms 549 * - proc/<pid>/stats_<gpuid>/cu_occupancy 550 */ 551 for (i = 0; i < p->n_pdds; i++) { 552 struct kfd_process_device *pdd = p->pdds[i]; 553 554 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN, 555 "stats_%u", pdd->dev->id); 556 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats); 557 if (!pdd->kobj_stats) 558 return; 559 560 ret = kobject_init_and_add(pdd->kobj_stats, 561 &procfs_stats_type, 562 p->kobj, 563 stats_dir_filename); 564 565 if (ret) { 566 pr_warn("Creating KFD proc/stats_%s folder failed", 567 stats_dir_filename); 568 kobject_put(pdd->kobj_stats); 569 pdd->kobj_stats = NULL; 570 return; 571 } 572 573 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict, 574 "evicted_ms"); 575 /* Add sysfs file to report compute unit occupancy */ 576 if (pdd->dev->kfd2kgd->get_cu_occupancy) 577 kfd_sysfs_create_file(pdd->kobj_stats, 578 &pdd->attr_cu_occupancy, 579 "cu_occupancy"); 580 } 581 } 582 583 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p) 584 { 585 int ret = 0; 586 int i; 587 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN]; 588 589 if (!p || !p->kobj) 590 return; 591 592 /* 593 * Create sysfs files for each GPU which supports SVM 594 * - proc/<pid>/counters_<gpuid>/ 595 * - proc/<pid>/counters_<gpuid>/faults 596 * - proc/<pid>/counters_<gpuid>/page_in 597 * - proc/<pid>/counters_<gpuid>/page_out 598 */ 599 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 600 struct kfd_process_device *pdd = p->pdds[i]; 601 struct kobject *kobj_counters; 602 603 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN, 604 "counters_%u", pdd->dev->id); 605 kobj_counters = kfd_alloc_struct(kobj_counters); 606 if (!kobj_counters) 607 return; 608 609 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type, 610 p->kobj, counters_dir_filename); 611 if (ret) { 612 pr_warn("Creating KFD proc/%s folder failed", 613 counters_dir_filename); 614 kobject_put(kobj_counters); 615 return; 616 } 617 618 pdd->kobj_counters = kobj_counters; 619 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults, 620 "faults"); 621 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in, 622 "page_in"); 623 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out, 624 "page_out"); 625 } 626 } 627 628 static void kfd_procfs_add_sysfs_files(struct kfd_process *p) 629 { 630 int i; 631 632 if (!p || !p->kobj) 633 return; 634 635 /* 636 * Create sysfs files for each GPU: 637 * - proc/<pid>/vram_<gpuid> 638 * - proc/<pid>/sdma_<gpuid> 639 */ 640 for (i = 0; i < p->n_pdds; i++) { 641 struct kfd_process_device *pdd = p->pdds[i]; 642 643 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u", 644 pdd->dev->id); 645 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram, 646 pdd->vram_filename); 647 648 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u", 649 pdd->dev->id); 650 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma, 651 pdd->sdma_filename); 652 } 653 } 654 655 void kfd_procfs_del_queue(struct queue *q) 656 { 657 if (!q) 658 return; 659 660 kobject_del(&q->kobj); 661 kobject_put(&q->kobj); 662 } 663 664 int kfd_process_create_wq(void) 665 { 666 if (!kfd_process_wq) 667 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); 668 if (!kfd_restore_wq) 669 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0); 670 671 if (!kfd_process_wq || !kfd_restore_wq) { 672 kfd_process_destroy_wq(); 673 return -ENOMEM; 674 } 675 676 return 0; 677 } 678 679 void kfd_process_destroy_wq(void) 680 { 681 if (kfd_process_wq) { 682 destroy_workqueue(kfd_process_wq); 683 kfd_process_wq = NULL; 684 } 685 if (kfd_restore_wq) { 686 destroy_workqueue(kfd_restore_wq); 687 kfd_restore_wq = NULL; 688 } 689 } 690 691 static void kfd_process_free_gpuvm(struct kgd_mem *mem, 692 struct kfd_process_device *pdd, void **kptr) 693 { 694 struct kfd_dev *dev = pdd->dev; 695 696 if (kptr && *kptr) { 697 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 698 *kptr = NULL; 699 } 700 701 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); 702 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv, 703 NULL); 704 } 705 706 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process 707 * This function should be only called right after the process 708 * is created and when kfd_processes_mutex is still being held 709 * to avoid concurrency. Because of that exclusiveness, we do 710 * not need to take p->mutex. 711 */ 712 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, 713 uint64_t gpu_va, uint32_t size, 714 uint32_t flags, struct kgd_mem **mem, void **kptr) 715 { 716 struct kfd_dev *kdev = pdd->dev; 717 int err; 718 719 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, 720 pdd->drm_priv, mem, NULL, 721 flags, false); 722 if (err) 723 goto err_alloc_mem; 724 725 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, 726 pdd->drm_priv); 727 if (err) 728 goto err_map_mem; 729 730 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true); 731 if (err) { 732 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 733 goto sync_memory_failed; 734 } 735 736 if (kptr) { 737 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel( 738 (struct kgd_mem *)*mem, kptr, NULL); 739 if (err) { 740 pr_debug("Map GTT BO to kernel failed\n"); 741 goto sync_memory_failed; 742 } 743 } 744 745 return err; 746 747 sync_memory_failed: 748 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv); 749 750 err_map_mem: 751 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv, 752 NULL); 753 err_alloc_mem: 754 *mem = NULL; 755 *kptr = NULL; 756 return err; 757 } 758 759 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the 760 * process for IB usage The memory reserved is for KFD to submit 761 * IB to AMDGPU from kernel. If the memory is reserved 762 * successfully, ib_kaddr will have the CPU/kernel 763 * address. Check ib_kaddr before accessing the memory. 764 */ 765 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) 766 { 767 struct qcm_process_device *qpd = &pdd->qpd; 768 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | 769 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | 770 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | 771 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 772 struct kgd_mem *mem; 773 void *kaddr; 774 int ret; 775 776 if (qpd->ib_kaddr || !qpd->ib_base) 777 return 0; 778 779 /* ib_base is only set for dGPU */ 780 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, 781 &mem, &kaddr); 782 if (ret) 783 return ret; 784 785 qpd->ib_mem = mem; 786 qpd->ib_kaddr = kaddr; 787 788 return 0; 789 } 790 791 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) 792 { 793 struct qcm_process_device *qpd = &pdd->qpd; 794 795 if (!qpd->ib_kaddr || !qpd->ib_base) 796 return; 797 798 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr); 799 } 800 801 struct kfd_process *kfd_create_process(struct file *filep) 802 { 803 struct kfd_process *process; 804 struct task_struct *thread = current; 805 int ret; 806 807 if (!thread->mm) 808 return ERR_PTR(-EINVAL); 809 810 /* Only the pthreads threading model is supported. */ 811 if (thread->group_leader->mm != thread->mm) 812 return ERR_PTR(-EINVAL); 813 814 /* 815 * take kfd processes mutex before starting of process creation 816 * so there won't be a case where two threads of the same process 817 * create two kfd_process structures 818 */ 819 mutex_lock(&kfd_processes_mutex); 820 821 /* A prior open of /dev/kfd could have already created the process. */ 822 process = find_process(thread, false); 823 if (process) { 824 pr_debug("Process already found\n"); 825 } else { 826 process = create_process(thread); 827 if (IS_ERR(process)) 828 goto out; 829 830 ret = kfd_process_init_cwsr_apu(process, filep); 831 if (ret) 832 goto out_destroy; 833 834 if (!procfs.kobj) 835 goto out; 836 837 process->kobj = kfd_alloc_struct(process->kobj); 838 if (!process->kobj) { 839 pr_warn("Creating procfs kobject failed"); 840 goto out; 841 } 842 ret = kobject_init_and_add(process->kobj, &procfs_type, 843 procfs.kobj, "%d", 844 (int)process->lead_thread->pid); 845 if (ret) { 846 pr_warn("Creating procfs pid directory failed"); 847 kobject_put(process->kobj); 848 goto out; 849 } 850 851 kfd_sysfs_create_file(process->kobj, &process->attr_pasid, 852 "pasid"); 853 854 process->kobj_queues = kobject_create_and_add("queues", 855 process->kobj); 856 if (!process->kobj_queues) 857 pr_warn("Creating KFD proc/queues folder failed"); 858 859 kfd_procfs_add_sysfs_stats(process); 860 kfd_procfs_add_sysfs_files(process); 861 kfd_procfs_add_sysfs_counters(process); 862 } 863 out: 864 if (!IS_ERR(process)) 865 kref_get(&process->ref); 866 mutex_unlock(&kfd_processes_mutex); 867 868 return process; 869 870 out_destroy: 871 hash_del_rcu(&process->kfd_processes); 872 mutex_unlock(&kfd_processes_mutex); 873 synchronize_srcu(&kfd_processes_srcu); 874 /* kfd_process_free_notifier will trigger the cleanup */ 875 mmu_notifier_put(&process->mmu_notifier); 876 return ERR_PTR(ret); 877 } 878 879 struct kfd_process *kfd_get_process(const struct task_struct *thread) 880 { 881 struct kfd_process *process; 882 883 if (!thread->mm) 884 return ERR_PTR(-EINVAL); 885 886 /* Only the pthreads threading model is supported. */ 887 if (thread->group_leader->mm != thread->mm) 888 return ERR_PTR(-EINVAL); 889 890 process = find_process(thread, false); 891 if (!process) 892 return ERR_PTR(-EINVAL); 893 894 return process; 895 } 896 897 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm) 898 { 899 struct kfd_process *process; 900 901 hash_for_each_possible_rcu(kfd_processes_table, process, 902 kfd_processes, (uintptr_t)mm) 903 if (process->mm == mm) 904 return process; 905 906 return NULL; 907 } 908 909 static struct kfd_process *find_process(const struct task_struct *thread, 910 bool ref) 911 { 912 struct kfd_process *p; 913 int idx; 914 915 idx = srcu_read_lock(&kfd_processes_srcu); 916 p = find_process_by_mm(thread->mm); 917 if (p && ref) 918 kref_get(&p->ref); 919 srcu_read_unlock(&kfd_processes_srcu, idx); 920 921 return p; 922 } 923 924 void kfd_unref_process(struct kfd_process *p) 925 { 926 kref_put(&p->ref, kfd_process_ref_release); 927 } 928 929 /* This increments the process->ref counter. */ 930 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid) 931 { 932 struct task_struct *task = NULL; 933 struct kfd_process *p = NULL; 934 935 if (!pid) { 936 task = current; 937 get_task_struct(task); 938 } else { 939 task = get_pid_task(pid, PIDTYPE_PID); 940 } 941 942 if (task) { 943 p = find_process(task, true); 944 put_task_struct(task); 945 } 946 947 return p; 948 } 949 950 static void kfd_process_device_free_bos(struct kfd_process_device *pdd) 951 { 952 struct kfd_process *p = pdd->process; 953 void *mem; 954 int id; 955 int i; 956 957 /* 958 * Remove all handles from idr and release appropriate 959 * local memory object 960 */ 961 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 962 963 for (i = 0; i < p->n_pdds; i++) { 964 struct kfd_process_device *peer_pdd = p->pdds[i]; 965 966 if (!peer_pdd->drm_priv) 967 continue; 968 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 969 peer_pdd->dev->adev, mem, peer_pdd->drm_priv); 970 } 971 972 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem, 973 pdd->drm_priv, NULL); 974 kfd_process_device_remove_obj_handle(pdd, id); 975 } 976 } 977 978 /* 979 * Just kunmap and unpin signal BO here. It will be freed in 980 * kfd_process_free_outstanding_kfd_bos() 981 */ 982 static void kfd_process_kunmap_signal_bo(struct kfd_process *p) 983 { 984 struct kfd_process_device *pdd; 985 struct kfd_dev *kdev; 986 void *mem; 987 988 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); 989 if (!kdev) 990 return; 991 992 mutex_lock(&p->mutex); 993 994 pdd = kfd_get_process_device_data(kdev, p); 995 if (!pdd) 996 goto out; 997 998 mem = kfd_process_device_translate_handle( 999 pdd, GET_IDR_HANDLE(p->signal_handle)); 1000 if (!mem) 1001 goto out; 1002 1003 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem); 1004 1005 out: 1006 mutex_unlock(&p->mutex); 1007 } 1008 1009 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) 1010 { 1011 int i; 1012 1013 for (i = 0; i < p->n_pdds; i++) 1014 kfd_process_device_free_bos(p->pdds[i]); 1015 } 1016 1017 static void kfd_process_destroy_pdds(struct kfd_process *p) 1018 { 1019 int i; 1020 1021 for (i = 0; i < p->n_pdds; i++) { 1022 struct kfd_process_device *pdd = p->pdds[i]; 1023 1024 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", 1025 pdd->dev->id, p->pasid); 1026 1027 kfd_process_device_destroy_cwsr_dgpu(pdd); 1028 kfd_process_device_destroy_ib_mem(pdd); 1029 1030 if (pdd->drm_file) { 1031 amdgpu_amdkfd_gpuvm_release_process_vm( 1032 pdd->dev->adev, pdd->drm_priv); 1033 fput(pdd->drm_file); 1034 } 1035 1036 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) 1037 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, 1038 get_order(KFD_CWSR_TBA_TMA_SIZE)); 1039 1040 bitmap_free(pdd->qpd.doorbell_bitmap); 1041 idr_destroy(&pdd->alloc_idr); 1042 1043 kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index); 1044 1045 if (pdd->dev->shared_resources.enable_mes) 1046 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, 1047 pdd->proc_ctx_bo); 1048 /* 1049 * before destroying pdd, make sure to report availability 1050 * for auto suspend 1051 */ 1052 if (pdd->runtime_inuse) { 1053 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev); 1054 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev); 1055 pdd->runtime_inuse = false; 1056 } 1057 1058 kfree(pdd); 1059 p->pdds[i] = NULL; 1060 } 1061 p->n_pdds = 0; 1062 } 1063 1064 static void kfd_process_remove_sysfs(struct kfd_process *p) 1065 { 1066 struct kfd_process_device *pdd; 1067 int i; 1068 1069 if (!p->kobj) 1070 return; 1071 1072 sysfs_remove_file(p->kobj, &p->attr_pasid); 1073 kobject_del(p->kobj_queues); 1074 kobject_put(p->kobj_queues); 1075 p->kobj_queues = NULL; 1076 1077 for (i = 0; i < p->n_pdds; i++) { 1078 pdd = p->pdds[i]; 1079 1080 sysfs_remove_file(p->kobj, &pdd->attr_vram); 1081 sysfs_remove_file(p->kobj, &pdd->attr_sdma); 1082 1083 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict); 1084 if (pdd->dev->kfd2kgd->get_cu_occupancy) 1085 sysfs_remove_file(pdd->kobj_stats, 1086 &pdd->attr_cu_occupancy); 1087 kobject_del(pdd->kobj_stats); 1088 kobject_put(pdd->kobj_stats); 1089 pdd->kobj_stats = NULL; 1090 } 1091 1092 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) { 1093 pdd = p->pdds[i]; 1094 1095 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults); 1096 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in); 1097 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out); 1098 kobject_del(pdd->kobj_counters); 1099 kobject_put(pdd->kobj_counters); 1100 pdd->kobj_counters = NULL; 1101 } 1102 1103 kobject_del(p->kobj); 1104 kobject_put(p->kobj); 1105 p->kobj = NULL; 1106 } 1107 1108 /* No process locking is needed in this function, because the process 1109 * is not findable any more. We must assume that no other thread is 1110 * using it any more, otherwise we couldn't safely free the process 1111 * structure in the end. 1112 */ 1113 static void kfd_process_wq_release(struct work_struct *work) 1114 { 1115 struct kfd_process *p = container_of(work, struct kfd_process, 1116 release_work); 1117 1118 kfd_process_dequeue_from_all_devices(p); 1119 pqm_uninit(&p->pqm); 1120 1121 /* Signal the eviction fence after user mode queues are 1122 * destroyed. This allows any BOs to be freed without 1123 * triggering pointless evictions or waiting for fences. 1124 */ 1125 dma_fence_signal(p->ef); 1126 1127 kfd_process_remove_sysfs(p); 1128 kfd_iommu_unbind_process(p); 1129 1130 kfd_process_kunmap_signal_bo(p); 1131 kfd_process_free_outstanding_kfd_bos(p); 1132 svm_range_list_fini(p); 1133 1134 kfd_process_destroy_pdds(p); 1135 dma_fence_put(p->ef); 1136 1137 kfd_event_free_process(p); 1138 1139 kfd_pasid_free(p->pasid); 1140 mutex_destroy(&p->mutex); 1141 1142 put_task_struct(p->lead_thread); 1143 1144 kfree(p); 1145 } 1146 1147 static void kfd_process_ref_release(struct kref *ref) 1148 { 1149 struct kfd_process *p = container_of(ref, struct kfd_process, ref); 1150 1151 INIT_WORK(&p->release_work, kfd_process_wq_release); 1152 queue_work(kfd_process_wq, &p->release_work); 1153 } 1154 1155 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm) 1156 { 1157 int idx = srcu_read_lock(&kfd_processes_srcu); 1158 struct kfd_process *p = find_process_by_mm(mm); 1159 1160 srcu_read_unlock(&kfd_processes_srcu, idx); 1161 1162 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH); 1163 } 1164 1165 static void kfd_process_free_notifier(struct mmu_notifier *mn) 1166 { 1167 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier)); 1168 } 1169 1170 static void kfd_process_notifier_release(struct mmu_notifier *mn, 1171 struct mm_struct *mm) 1172 { 1173 struct kfd_process *p; 1174 1175 /* 1176 * The kfd_process structure can not be free because the 1177 * mmu_notifier srcu is read locked 1178 */ 1179 p = container_of(mn, struct kfd_process, mmu_notifier); 1180 if (WARN_ON(p->mm != mm)) 1181 return; 1182 1183 mutex_lock(&kfd_processes_mutex); 1184 hash_del_rcu(&p->kfd_processes); 1185 mutex_unlock(&kfd_processes_mutex); 1186 synchronize_srcu(&kfd_processes_srcu); 1187 1188 cancel_delayed_work_sync(&p->eviction_work); 1189 cancel_delayed_work_sync(&p->restore_work); 1190 1191 /* Indicate to other users that MM is no longer valid */ 1192 p->mm = NULL; 1193 1194 mmu_notifier_put(&p->mmu_notifier); 1195 } 1196 1197 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { 1198 .release = kfd_process_notifier_release, 1199 .alloc_notifier = kfd_process_alloc_notifier, 1200 .free_notifier = kfd_process_free_notifier, 1201 }; 1202 1203 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) 1204 { 1205 unsigned long offset; 1206 int i; 1207 1208 for (i = 0; i < p->n_pdds; i++) { 1209 struct kfd_dev *dev = p->pdds[i]->dev; 1210 struct qcm_process_device *qpd = &p->pdds[i]->qpd; 1211 1212 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) 1213 continue; 1214 1215 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id); 1216 qpd->tba_addr = (int64_t)vm_mmap(filep, 0, 1217 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, 1218 MAP_SHARED, offset); 1219 1220 if (IS_ERR_VALUE(qpd->tba_addr)) { 1221 int err = qpd->tba_addr; 1222 1223 pr_err("Failure to set tba address. error %d.\n", err); 1224 qpd->tba_addr = 0; 1225 qpd->cwsr_kaddr = NULL; 1226 return err; 1227 } 1228 1229 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); 1230 1231 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1232 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1233 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1234 } 1235 1236 return 0; 1237 } 1238 1239 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) 1240 { 1241 struct kfd_dev *dev = pdd->dev; 1242 struct qcm_process_device *qpd = &pdd->qpd; 1243 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT 1244 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE 1245 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; 1246 struct kgd_mem *mem; 1247 void *kaddr; 1248 int ret; 1249 1250 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) 1251 return 0; 1252 1253 /* cwsr_base is only set for dGPU */ 1254 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, 1255 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr); 1256 if (ret) 1257 return ret; 1258 1259 qpd->cwsr_mem = mem; 1260 qpd->cwsr_kaddr = kaddr; 1261 qpd->tba_addr = qpd->cwsr_base; 1262 1263 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size); 1264 1265 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET; 1266 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n", 1267 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); 1268 1269 return 0; 1270 } 1271 1272 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) 1273 { 1274 struct kfd_dev *dev = pdd->dev; 1275 struct qcm_process_device *qpd = &pdd->qpd; 1276 1277 if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) 1278 return; 1279 1280 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr); 1281 } 1282 1283 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1284 uint64_t tba_addr, 1285 uint64_t tma_addr) 1286 { 1287 if (qpd->cwsr_kaddr) { 1288 /* KFD trap handler is bound, record as second-level TBA/TMA 1289 * in first-level TMA. First-level trap will jump to second. 1290 */ 1291 uint64_t *tma = 1292 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); 1293 tma[0] = tba_addr; 1294 tma[1] = tma_addr; 1295 } else { 1296 /* No trap handler bound, bind as first-level TBA/TMA. */ 1297 qpd->tba_addr = tba_addr; 1298 qpd->tma_addr = tma_addr; 1299 } 1300 } 1301 1302 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) 1303 { 1304 int i; 1305 1306 /* On most GFXv9 GPUs, the retry mode in the SQ must match the 1307 * boot time retry setting. Mixing processes with different 1308 * XNACK/retry settings can hang the GPU. 1309 * 1310 * Different GPUs can have different noretry settings depending 1311 * on HW bugs or limitations. We need to find at least one 1312 * XNACK mode for this process that's compatible with all GPUs. 1313 * Fortunately GPUs with retry enabled (noretry=0) can run code 1314 * built for XNACK-off. On GFXv9 it may perform slower. 1315 * 1316 * Therefore applications built for XNACK-off can always be 1317 * supported and will be our fallback if any GPU does not 1318 * support retry. 1319 */ 1320 for (i = 0; i < p->n_pdds; i++) { 1321 struct kfd_dev *dev = p->pdds[i]->dev; 1322 1323 /* Only consider GFXv9 and higher GPUs. Older GPUs don't 1324 * support the SVM APIs and don't need to be considered 1325 * for the XNACK mode selection. 1326 */ 1327 if (!KFD_IS_SOC15(dev)) 1328 continue; 1329 /* Aldebaran can always support XNACK because it can support 1330 * per-process XNACK mode selection. But let the dev->noretry 1331 * setting still influence the default XNACK mode. 1332 */ 1333 if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) 1334 continue; 1335 1336 /* GFXv10 and later GPUs do not support shader preemption 1337 * during page faults. This can lead to poor QoS for queue 1338 * management and memory-manager-related preemptions or 1339 * even deadlocks. 1340 */ 1341 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) 1342 return false; 1343 1344 if (dev->noretry) 1345 return false; 1346 } 1347 1348 return true; 1349 } 1350 1351 /* 1352 * On return the kfd_process is fully operational and will be freed when the 1353 * mm is released 1354 */ 1355 static struct kfd_process *create_process(const struct task_struct *thread) 1356 { 1357 struct kfd_process *process; 1358 struct mmu_notifier *mn; 1359 int err = -ENOMEM; 1360 1361 process = kzalloc(sizeof(*process), GFP_KERNEL); 1362 if (!process) 1363 goto err_alloc_process; 1364 1365 kref_init(&process->ref); 1366 mutex_init(&process->mutex); 1367 process->mm = thread->mm; 1368 process->lead_thread = thread->group_leader; 1369 process->n_pdds = 0; 1370 process->queues_paused = false; 1371 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); 1372 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); 1373 process->last_restore_timestamp = get_jiffies_64(); 1374 err = kfd_event_init_process(process); 1375 if (err) 1376 goto err_event_init; 1377 process->is_32bit_user_mode = in_compat_syscall(); 1378 1379 process->pasid = kfd_pasid_alloc(); 1380 if (process->pasid == 0) { 1381 err = -ENOSPC; 1382 goto err_alloc_pasid; 1383 } 1384 1385 err = pqm_init(&process->pqm, process); 1386 if (err != 0) 1387 goto err_process_pqm_init; 1388 1389 /* init process apertures*/ 1390 err = kfd_init_apertures(process); 1391 if (err != 0) 1392 goto err_init_apertures; 1393 1394 /* Check XNACK support after PDDs are created in kfd_init_apertures */ 1395 process->xnack_enabled = kfd_process_xnack_mode(process, false); 1396 1397 err = svm_range_list_init(process); 1398 if (err) 1399 goto err_init_svm_range_list; 1400 1401 /* alloc_notifier needs to find the process in the hash table */ 1402 hash_add_rcu(kfd_processes_table, &process->kfd_processes, 1403 (uintptr_t)process->mm); 1404 1405 /* Avoid free_notifier to start kfd_process_wq_release if 1406 * mmu_notifier_get failed because of pending signal. 1407 */ 1408 kref_get(&process->ref); 1409 1410 /* MMU notifier registration must be the last call that can fail 1411 * because after this point we cannot unwind the process creation. 1412 * After this point, mmu_notifier_put will trigger the cleanup by 1413 * dropping the last process reference in the free_notifier. 1414 */ 1415 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm); 1416 if (IS_ERR(mn)) { 1417 err = PTR_ERR(mn); 1418 goto err_register_notifier; 1419 } 1420 BUG_ON(mn != &process->mmu_notifier); 1421 1422 kfd_unref_process(process); 1423 get_task_struct(process->lead_thread); 1424 1425 return process; 1426 1427 err_register_notifier: 1428 hash_del_rcu(&process->kfd_processes); 1429 svm_range_list_fini(process); 1430 err_init_svm_range_list: 1431 kfd_process_free_outstanding_kfd_bos(process); 1432 kfd_process_destroy_pdds(process); 1433 err_init_apertures: 1434 pqm_uninit(&process->pqm); 1435 err_process_pqm_init: 1436 kfd_pasid_free(process->pasid); 1437 err_alloc_pasid: 1438 kfd_event_free_process(process); 1439 err_event_init: 1440 mutex_destroy(&process->mutex); 1441 kfree(process); 1442 err_alloc_process: 1443 return ERR_PTR(err); 1444 } 1445 1446 static int init_doorbell_bitmap(struct qcm_process_device *qpd, 1447 struct kfd_dev *dev) 1448 { 1449 unsigned int i; 1450 int range_start = dev->shared_resources.non_cp_doorbells_start; 1451 int range_end = dev->shared_resources.non_cp_doorbells_end; 1452 1453 if (!KFD_IS_SOC15(dev)) 1454 return 0; 1455 1456 qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, 1457 GFP_KERNEL); 1458 if (!qpd->doorbell_bitmap) 1459 return -ENOMEM; 1460 1461 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ 1462 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); 1463 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", 1464 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, 1465 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); 1466 1467 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { 1468 if (i >= range_start && i <= range_end) { 1469 __set_bit(i, qpd->doorbell_bitmap); 1470 __set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, 1471 qpd->doorbell_bitmap); 1472 } 1473 } 1474 1475 return 0; 1476 } 1477 1478 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, 1479 struct kfd_process *p) 1480 { 1481 int i; 1482 1483 for (i = 0; i < p->n_pdds; i++) 1484 if (p->pdds[i]->dev == dev) 1485 return p->pdds[i]; 1486 1487 return NULL; 1488 } 1489 1490 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, 1491 struct kfd_process *p) 1492 { 1493 struct kfd_process_device *pdd = NULL; 1494 int retval = 0; 1495 1496 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE)) 1497 return NULL; 1498 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL); 1499 if (!pdd) 1500 return NULL; 1501 1502 if (init_doorbell_bitmap(&pdd->qpd, dev)) { 1503 pr_err("Failed to init doorbell for process\n"); 1504 goto err_free_pdd; 1505 } 1506 1507 pdd->dev = dev; 1508 INIT_LIST_HEAD(&pdd->qpd.queues_list); 1509 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); 1510 pdd->qpd.dqm = dev->dqm; 1511 pdd->qpd.pqm = &p->pqm; 1512 pdd->qpd.evicted = 0; 1513 pdd->qpd.mapped_gws_queue = false; 1514 pdd->process = p; 1515 pdd->bound = PDD_UNBOUND; 1516 pdd->already_dequeued = false; 1517 pdd->runtime_inuse = false; 1518 pdd->vram_usage = 0; 1519 pdd->sdma_past_activity_counter = 0; 1520 pdd->user_gpu_id = dev->id; 1521 atomic64_set(&pdd->evict_duration_counter, 0); 1522 1523 if (dev->shared_resources.enable_mes) { 1524 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, 1525 AMDGPU_MES_PROC_CTX_SIZE, 1526 &pdd->proc_ctx_bo, 1527 &pdd->proc_ctx_gpu_addr, 1528 &pdd->proc_ctx_cpu_ptr, 1529 false); 1530 if (retval) { 1531 pr_err("failed to allocate process context bo\n"); 1532 goto err_free_pdd; 1533 } 1534 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 1535 } 1536 1537 p->pdds[p->n_pdds++] = pdd; 1538 1539 /* Init idr used for memory handle translation */ 1540 idr_init(&pdd->alloc_idr); 1541 1542 return pdd; 1543 1544 err_free_pdd: 1545 kfree(pdd); 1546 return NULL; 1547 } 1548 1549 /** 1550 * kfd_process_device_init_vm - Initialize a VM for a process-device 1551 * 1552 * @pdd: The process-device 1553 * @drm_file: Optional pointer to a DRM file descriptor 1554 * 1555 * If @drm_file is specified, it will be used to acquire the VM from 1556 * that file descriptor. If successful, the @pdd takes ownership of 1557 * the file descriptor. 1558 * 1559 * If @drm_file is NULL, a new VM is created. 1560 * 1561 * Returns 0 on success, -errno on failure. 1562 */ 1563 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1564 struct file *drm_file) 1565 { 1566 struct kfd_process *p; 1567 struct kfd_dev *dev; 1568 int ret; 1569 1570 if (!drm_file) 1571 return -EINVAL; 1572 1573 if (pdd->drm_priv) 1574 return -EBUSY; 1575 1576 p = pdd->process; 1577 dev = pdd->dev; 1578 1579 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file, 1580 &p->kgd_process_info, 1581 &p->ef); 1582 if (ret) { 1583 pr_err("Failed to create process VM object\n"); 1584 return ret; 1585 } 1586 pdd->drm_priv = drm_file->private_data; 1587 atomic64_set(&pdd->tlb_seq, 0); 1588 1589 ret = kfd_process_device_reserve_ib_mem(pdd); 1590 if (ret) 1591 goto err_reserve_ib_mem; 1592 ret = kfd_process_device_init_cwsr_dgpu(pdd); 1593 if (ret) 1594 goto err_init_cwsr; 1595 1596 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid); 1597 if (ret) 1598 goto err_set_pasid; 1599 1600 pdd->drm_file = drm_file; 1601 1602 return 0; 1603 1604 err_set_pasid: 1605 kfd_process_device_destroy_cwsr_dgpu(pdd); 1606 err_init_cwsr: 1607 kfd_process_device_destroy_ib_mem(pdd); 1608 err_reserve_ib_mem: 1609 pdd->drm_priv = NULL; 1610 1611 return ret; 1612 } 1613 1614 /* 1615 * Direct the IOMMU to bind the process (specifically the pasid->mm) 1616 * to the device. 1617 * Unbinding occurs when the process dies or the device is removed. 1618 * 1619 * Assumes that the process lock is held. 1620 */ 1621 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, 1622 struct kfd_process *p) 1623 { 1624 struct kfd_process_device *pdd; 1625 int err; 1626 1627 pdd = kfd_get_process_device_data(dev, p); 1628 if (!pdd) { 1629 pr_err("Process device data doesn't exist\n"); 1630 return ERR_PTR(-ENOMEM); 1631 } 1632 1633 if (!pdd->drm_priv) 1634 return ERR_PTR(-ENODEV); 1635 1636 /* 1637 * signal runtime-pm system to auto resume and prevent 1638 * further runtime suspend once device pdd is created until 1639 * pdd is destroyed. 1640 */ 1641 if (!pdd->runtime_inuse) { 1642 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev); 1643 if (err < 0) { 1644 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev); 1645 return ERR_PTR(err); 1646 } 1647 } 1648 1649 err = kfd_iommu_bind_process_to_device(pdd); 1650 if (err) 1651 goto out; 1652 1653 /* 1654 * make sure that runtime_usage counter is incremented just once 1655 * per pdd 1656 */ 1657 pdd->runtime_inuse = true; 1658 1659 return pdd; 1660 1661 out: 1662 /* balance runpm reference count and exit with error */ 1663 if (!pdd->runtime_inuse) { 1664 pm_runtime_mark_last_busy(adev_to_drm(dev->adev)->dev); 1665 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev); 1666 } 1667 1668 return ERR_PTR(err); 1669 } 1670 1671 /* Create specific handle mapped to mem from process local memory idr 1672 * Assumes that the process lock is held. 1673 */ 1674 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1675 void *mem) 1676 { 1677 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL); 1678 } 1679 1680 /* Translate specific handle from process local memory idr 1681 * Assumes that the process lock is held. 1682 */ 1683 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd, 1684 int handle) 1685 { 1686 if (handle < 0) 1687 return NULL; 1688 1689 return idr_find(&pdd->alloc_idr, handle); 1690 } 1691 1692 /* Remove specific handle from process local memory idr 1693 * Assumes that the process lock is held. 1694 */ 1695 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1696 int handle) 1697 { 1698 if (handle >= 0) 1699 idr_remove(&pdd->alloc_idr, handle); 1700 } 1701 1702 /* This increments the process->ref counter. */ 1703 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid) 1704 { 1705 struct kfd_process *p, *ret_p = NULL; 1706 unsigned int temp; 1707 1708 int idx = srcu_read_lock(&kfd_processes_srcu); 1709 1710 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1711 if (p->pasid == pasid) { 1712 kref_get(&p->ref); 1713 ret_p = p; 1714 break; 1715 } 1716 } 1717 1718 srcu_read_unlock(&kfd_processes_srcu, idx); 1719 1720 return ret_p; 1721 } 1722 1723 /* This increments the process->ref counter. */ 1724 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) 1725 { 1726 struct kfd_process *p; 1727 1728 int idx = srcu_read_lock(&kfd_processes_srcu); 1729 1730 p = find_process_by_mm(mm); 1731 if (p) 1732 kref_get(&p->ref); 1733 1734 srcu_read_unlock(&kfd_processes_srcu, idx); 1735 1736 return p; 1737 } 1738 1739 /* kfd_process_evict_queues - Evict all user queues of a process 1740 * 1741 * Eviction is reference-counted per process-device. This means multiple 1742 * evictions from different sources can be nested safely. 1743 */ 1744 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) 1745 { 1746 int r = 0; 1747 int i; 1748 unsigned int n_evicted = 0; 1749 1750 for (i = 0; i < p->n_pdds; i++) { 1751 struct kfd_process_device *pdd = p->pdds[i]; 1752 1753 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, 1754 trigger); 1755 1756 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, 1757 &pdd->qpd); 1758 /* evict return -EIO if HWS is hang or asic is resetting, in this case 1759 * we would like to set all the queues to be in evicted state to prevent 1760 * them been add back since they actually not be saved right now. 1761 */ 1762 if (r && r != -EIO) { 1763 pr_err("Failed to evict process queues\n"); 1764 goto fail; 1765 } 1766 n_evicted++; 1767 } 1768 1769 return r; 1770 1771 fail: 1772 /* To keep state consistent, roll back partial eviction by 1773 * restoring queues 1774 */ 1775 for (i = 0; i < p->n_pdds; i++) { 1776 struct kfd_process_device *pdd = p->pdds[i]; 1777 1778 if (n_evicted == 0) 1779 break; 1780 1781 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1782 1783 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1784 &pdd->qpd)) 1785 pr_err("Failed to restore queues\n"); 1786 1787 n_evicted--; 1788 } 1789 1790 return r; 1791 } 1792 1793 /* kfd_process_restore_queues - Restore all user queues of a process */ 1794 int kfd_process_restore_queues(struct kfd_process *p) 1795 { 1796 int r, ret = 0; 1797 int i; 1798 1799 for (i = 0; i < p->n_pdds; i++) { 1800 struct kfd_process_device *pdd = p->pdds[i]; 1801 1802 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); 1803 1804 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1805 &pdd->qpd); 1806 if (r) { 1807 pr_err("Failed to restore process queues\n"); 1808 if (!ret) 1809 ret = r; 1810 } 1811 } 1812 1813 return ret; 1814 } 1815 1816 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) 1817 { 1818 int i; 1819 1820 for (i = 0; i < p->n_pdds; i++) 1821 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id) 1822 return i; 1823 return -EINVAL; 1824 } 1825 1826 int 1827 kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, 1828 uint32_t *gpuid, uint32_t *gpuidx) 1829 { 1830 int i; 1831 1832 for (i = 0; i < p->n_pdds; i++) 1833 if (p->pdds[i] && p->pdds[i]->dev->adev == adev) { 1834 *gpuid = p->pdds[i]->user_gpu_id; 1835 *gpuidx = i; 1836 return 0; 1837 } 1838 return -EINVAL; 1839 } 1840 1841 static void evict_process_worker(struct work_struct *work) 1842 { 1843 int ret; 1844 struct kfd_process *p; 1845 struct delayed_work *dwork; 1846 1847 dwork = to_delayed_work(work); 1848 1849 /* Process termination destroys this worker thread. So during the 1850 * lifetime of this thread, kfd_process p will be valid 1851 */ 1852 p = container_of(dwork, struct kfd_process, eviction_work); 1853 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno, 1854 "Eviction fence mismatch\n"); 1855 1856 /* Narrow window of overlap between restore and evict work 1857 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos 1858 * unreserves KFD BOs, it is possible to evicted again. But 1859 * restore has few more steps of finish. So lets wait for any 1860 * previous restore work to complete 1861 */ 1862 flush_delayed_work(&p->restore_work); 1863 1864 pr_debug("Started evicting pasid 0x%x\n", p->pasid); 1865 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM); 1866 if (!ret) { 1867 dma_fence_signal(p->ef); 1868 dma_fence_put(p->ef); 1869 p->ef = NULL; 1870 queue_delayed_work(kfd_restore_wq, &p->restore_work, 1871 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); 1872 1873 pr_debug("Finished evicting pasid 0x%x\n", p->pasid); 1874 } else 1875 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); 1876 } 1877 1878 static void restore_process_worker(struct work_struct *work) 1879 { 1880 struct delayed_work *dwork; 1881 struct kfd_process *p; 1882 int ret = 0; 1883 1884 dwork = to_delayed_work(work); 1885 1886 /* Process termination destroys this worker thread. So during the 1887 * lifetime of this thread, kfd_process p will be valid 1888 */ 1889 p = container_of(dwork, struct kfd_process, restore_work); 1890 pr_debug("Started restoring pasid 0x%x\n", p->pasid); 1891 1892 /* Setting last_restore_timestamp before successful restoration. 1893 * Otherwise this would have to be set by KGD (restore_process_bos) 1894 * before KFD BOs are unreserved. If not, the process can be evicted 1895 * again before the timestamp is set. 1896 * If restore fails, the timestamp will be set again in the next 1897 * attempt. This would mean that the minimum GPU quanta would be 1898 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two 1899 * functions) 1900 */ 1901 1902 p->last_restore_timestamp = get_jiffies_64(); 1903 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, 1904 &p->ef); 1905 if (ret) { 1906 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", 1907 p->pasid, PROCESS_BACK_OFF_TIME_MS); 1908 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, 1909 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); 1910 WARN(!ret, "reschedule restore work failed\n"); 1911 return; 1912 } 1913 1914 ret = kfd_process_restore_queues(p); 1915 if (!ret) 1916 pr_debug("Finished restoring pasid 0x%x\n", p->pasid); 1917 else 1918 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); 1919 } 1920 1921 void kfd_suspend_all_processes(void) 1922 { 1923 struct kfd_process *p; 1924 unsigned int temp; 1925 int idx = srcu_read_lock(&kfd_processes_srcu); 1926 1927 WARN(debug_evictions, "Evicting all processes"); 1928 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1929 cancel_delayed_work_sync(&p->eviction_work); 1930 cancel_delayed_work_sync(&p->restore_work); 1931 1932 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND)) 1933 pr_err("Failed to suspend process 0x%x\n", p->pasid); 1934 dma_fence_signal(p->ef); 1935 dma_fence_put(p->ef); 1936 p->ef = NULL; 1937 } 1938 srcu_read_unlock(&kfd_processes_srcu, idx); 1939 } 1940 1941 int kfd_resume_all_processes(void) 1942 { 1943 struct kfd_process *p; 1944 unsigned int temp; 1945 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu); 1946 1947 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 1948 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) { 1949 pr_err("Restore process %d failed during resume\n", 1950 p->pasid); 1951 ret = -EFAULT; 1952 } 1953 } 1954 srcu_read_unlock(&kfd_processes_srcu, idx); 1955 return ret; 1956 } 1957 1958 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, 1959 struct vm_area_struct *vma) 1960 { 1961 struct kfd_process_device *pdd; 1962 struct qcm_process_device *qpd; 1963 1964 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { 1965 pr_err("Incorrect CWSR mapping size.\n"); 1966 return -EINVAL; 1967 } 1968 1969 pdd = kfd_get_process_device_data(dev, process); 1970 if (!pdd) 1971 return -EINVAL; 1972 qpd = &pdd->qpd; 1973 1974 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1975 get_order(KFD_CWSR_TBA_TMA_SIZE)); 1976 if (!qpd->cwsr_kaddr) { 1977 pr_err("Error allocating per process CWSR buffer.\n"); 1978 return -ENOMEM; 1979 } 1980 1981 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND 1982 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP; 1983 /* Mapping pages to user process */ 1984 return remap_pfn_range(vma, vma->vm_start, 1985 PFN_DOWN(__pa(qpd->cwsr_kaddr)), 1986 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot); 1987 } 1988 1989 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) 1990 { 1991 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); 1992 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); 1993 struct kfd_dev *dev = pdd->dev; 1994 1995 /* 1996 * It can be that we race and lose here, but that is extremely unlikely 1997 * and the worst thing which could happen is that we flush the changes 1998 * into the TLB once more which is harmless. 1999 */ 2000 if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq) 2001 return; 2002 2003 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 2004 /* Nothing to flush until a VMID is assigned, which 2005 * only happens when the first queue is created. 2006 */ 2007 if (pdd->qpd.vmid) 2008 amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, 2009 pdd->qpd.vmid); 2010 } else { 2011 amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, 2012 pdd->process->pasid, type); 2013 } 2014 } 2015 2016 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id) 2017 { 2018 int i; 2019 2020 if (gpu_id) { 2021 for (i = 0; i < p->n_pdds; i++) { 2022 struct kfd_process_device *pdd = p->pdds[i]; 2023 2024 if (pdd->user_gpu_id == gpu_id) 2025 return pdd; 2026 } 2027 } 2028 return NULL; 2029 } 2030 2031 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id) 2032 { 2033 int i; 2034 2035 if (!actual_gpu_id) 2036 return 0; 2037 2038 for (i = 0; i < p->n_pdds; i++) { 2039 struct kfd_process_device *pdd = p->pdds[i]; 2040 2041 if (pdd->dev->id == actual_gpu_id) 2042 return pdd->user_gpu_id; 2043 } 2044 return -EINVAL; 2045 } 2046 2047 #if defined(CONFIG_DEBUG_FS) 2048 2049 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) 2050 { 2051 struct kfd_process *p; 2052 unsigned int temp; 2053 int r = 0; 2054 2055 int idx = srcu_read_lock(&kfd_processes_srcu); 2056 2057 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { 2058 seq_printf(m, "Process %d PASID 0x%x:\n", 2059 p->lead_thread->tgid, p->pasid); 2060 2061 mutex_lock(&p->mutex); 2062 r = pqm_debugfs_mqds(m, &p->pqm); 2063 mutex_unlock(&p->mutex); 2064 2065 if (r) 2066 break; 2067 } 2068 2069 srcu_read_unlock(&kfd_processes_srcu, idx); 2070 2071 return r; 2072 } 2073 2074 #endif 2075 2076