1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/device.h> 25 #include <linux/export.h> 26 #include <linux/err.h> 27 #include <linux/fs.h> 28 #include <linux/file.h> 29 #include <linux/sched.h> 30 #include <linux/slab.h> 31 #include <linux/uaccess.h> 32 #include <linux/compat.h> 33 #include <uapi/linux/kfd_ioctl.h> 34 #include <linux/time.h> 35 #include <linux/mm.h> 36 #include <linux/mman.h> 37 #include <linux/ptrace.h> 38 #include <linux/dma-buf.h> 39 #include <linux/fdtable.h> 40 #include <linux/processor.h> 41 #include "kfd_priv.h" 42 #include "kfd_device_queue_manager.h" 43 #include "kfd_svm.h" 44 #include "amdgpu_amdkfd.h" 45 #include "kfd_smi_events.h" 46 #include "amdgpu_dma_buf.h" 47 #include "kfd_debug.h" 48 49 static long kfd_ioctl(struct file *, unsigned int, unsigned long); 50 static int kfd_open(struct inode *, struct file *); 51 static int kfd_release(struct inode *, struct file *); 52 static int kfd_mmap(struct file *, struct vm_area_struct *); 53 54 static const char kfd_dev_name[] = "kfd"; 55 56 static const struct file_operations kfd_fops = { 57 .owner = THIS_MODULE, 58 .unlocked_ioctl = kfd_ioctl, 59 .compat_ioctl = compat_ptr_ioctl, 60 .open = kfd_open, 61 .release = kfd_release, 62 .mmap = kfd_mmap, 63 }; 64 65 static int kfd_char_dev_major = -1; 66 static struct class *kfd_class; 67 struct device *kfd_device; 68 69 static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id) 70 { 71 struct kfd_process_device *pdd; 72 73 mutex_lock(&p->mutex); 74 pdd = kfd_process_device_data_by_id(p, gpu_id); 75 76 if (pdd) 77 return pdd; 78 79 mutex_unlock(&p->mutex); 80 return NULL; 81 } 82 83 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd) 84 { 85 mutex_unlock(&pdd->process->mutex); 86 } 87 88 int kfd_chardev_init(void) 89 { 90 int err = 0; 91 92 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops); 93 err = kfd_char_dev_major; 94 if (err < 0) 95 goto err_register_chrdev; 96 97 kfd_class = class_create(kfd_dev_name); 98 err = PTR_ERR(kfd_class); 99 if (IS_ERR(kfd_class)) 100 goto err_class_create; 101 102 kfd_device = device_create(kfd_class, NULL, 103 MKDEV(kfd_char_dev_major, 0), 104 NULL, kfd_dev_name); 105 err = PTR_ERR(kfd_device); 106 if (IS_ERR(kfd_device)) 107 goto err_device_create; 108 109 return 0; 110 111 err_device_create: 112 class_destroy(kfd_class); 113 err_class_create: 114 unregister_chrdev(kfd_char_dev_major, kfd_dev_name); 115 err_register_chrdev: 116 return err; 117 } 118 119 void kfd_chardev_exit(void) 120 { 121 device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0)); 122 class_destroy(kfd_class); 123 unregister_chrdev(kfd_char_dev_major, kfd_dev_name); 124 kfd_device = NULL; 125 } 126 127 128 static int kfd_open(struct inode *inode, struct file *filep) 129 { 130 struct kfd_process *process; 131 bool is_32bit_user_mode; 132 133 if (iminor(inode) != 0) 134 return -ENODEV; 135 136 is_32bit_user_mode = in_compat_syscall(); 137 138 if (is_32bit_user_mode) { 139 dev_warn(kfd_device, 140 "Process %d (32-bit) failed to open /dev/kfd\n" 141 "32-bit processes are not supported by amdkfd\n", 142 current->pid); 143 return -EPERM; 144 } 145 146 process = kfd_create_process(current); 147 if (IS_ERR(process)) 148 return PTR_ERR(process); 149 150 if (kfd_process_init_cwsr_apu(process, filep)) { 151 kfd_unref_process(process); 152 return -EFAULT; 153 } 154 155 /* filep now owns the reference returned by kfd_create_process */ 156 filep->private_data = process; 157 158 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", 159 process->pasid, process->is_32bit_user_mode); 160 161 return 0; 162 } 163 164 static int kfd_release(struct inode *inode, struct file *filep) 165 { 166 struct kfd_process *process = filep->private_data; 167 168 if (process) 169 kfd_unref_process(process); 170 171 return 0; 172 } 173 174 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 175 void *data) 176 { 177 struct kfd_ioctl_get_version_args *args = data; 178 179 args->major_version = KFD_IOCTL_MAJOR_VERSION; 180 args->minor_version = KFD_IOCTL_MINOR_VERSION; 181 182 return 0; 183 } 184 185 static int set_queue_properties_from_user(struct queue_properties *q_properties, 186 struct kfd_ioctl_create_queue_args *args) 187 { 188 /* 189 * Repurpose queue percentage to accommodate new features: 190 * bit 0-7: queue percentage 191 * bit 8-15: pm4_target_xcc 192 */ 193 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { 194 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 195 return -EINVAL; 196 } 197 198 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 199 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 200 return -EINVAL; 201 } 202 203 if ((args->ring_base_address) && 204 (!access_ok((const void __user *) args->ring_base_address, 205 sizeof(uint64_t)))) { 206 pr_err("Can't access ring base address\n"); 207 return -EFAULT; 208 } 209 210 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 211 pr_err("Ring size must be a power of 2 or 0\n"); 212 return -EINVAL; 213 } 214 215 if (!access_ok((const void __user *) args->read_pointer_address, 216 sizeof(uint32_t))) { 217 pr_err("Can't access read pointer\n"); 218 return -EFAULT; 219 } 220 221 if (!access_ok((const void __user *) args->write_pointer_address, 222 sizeof(uint32_t))) { 223 pr_err("Can't access write pointer\n"); 224 return -EFAULT; 225 } 226 227 if (args->eop_buffer_address && 228 !access_ok((const void __user *) args->eop_buffer_address, 229 sizeof(uint32_t))) { 230 pr_debug("Can't access eop buffer"); 231 return -EFAULT; 232 } 233 234 if (args->ctx_save_restore_address && 235 !access_ok((const void __user *) args->ctx_save_restore_address, 236 sizeof(uint32_t))) { 237 pr_debug("Can't access ctx save restore buffer"); 238 return -EFAULT; 239 } 240 241 q_properties->is_interop = false; 242 q_properties->is_gws = false; 243 q_properties->queue_percent = args->queue_percentage & 0xFF; 244 /* bit 8-15 are repurposed to be PM4 target XCC */ 245 q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; 246 q_properties->priority = args->queue_priority; 247 q_properties->queue_address = args->ring_base_address; 248 q_properties->queue_size = args->ring_size; 249 q_properties->read_ptr = (uint32_t *) args->read_pointer_address; 250 q_properties->write_ptr = (uint32_t *) args->write_pointer_address; 251 q_properties->eop_ring_buffer_address = args->eop_buffer_address; 252 q_properties->eop_ring_buffer_size = args->eop_buffer_size; 253 q_properties->ctx_save_restore_area_address = 254 args->ctx_save_restore_address; 255 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; 256 q_properties->ctl_stack_size = args->ctl_stack_size; 257 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || 258 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) 259 q_properties->type = KFD_QUEUE_TYPE_COMPUTE; 260 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) 261 q_properties->type = KFD_QUEUE_TYPE_SDMA; 262 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI) 263 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI; 264 else 265 return -ENOTSUPP; 266 267 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) 268 q_properties->format = KFD_QUEUE_FORMAT_AQL; 269 else 270 q_properties->format = KFD_QUEUE_FORMAT_PM4; 271 272 pr_debug("Queue Percentage: %d, %d\n", 273 q_properties->queue_percent, args->queue_percentage); 274 275 pr_debug("Queue Priority: %d, %d\n", 276 q_properties->priority, args->queue_priority); 277 278 pr_debug("Queue Address: 0x%llX, 0x%llX\n", 279 q_properties->queue_address, args->ring_base_address); 280 281 pr_debug("Queue Size: 0x%llX, %u\n", 282 q_properties->queue_size, args->ring_size); 283 284 pr_debug("Queue r/w Pointers: %px, %px\n", 285 q_properties->read_ptr, 286 q_properties->write_ptr); 287 288 pr_debug("Queue Format: %d\n", q_properties->format); 289 290 pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address); 291 292 pr_debug("Queue CTX save area: 0x%llX\n", 293 q_properties->ctx_save_restore_area_address); 294 295 return 0; 296 } 297 298 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 299 void *data) 300 { 301 struct kfd_ioctl_create_queue_args *args = data; 302 struct kfd_node *dev; 303 int err = 0; 304 unsigned int queue_id; 305 struct kfd_process_device *pdd; 306 struct queue_properties q_properties; 307 uint32_t doorbell_offset_in_process = 0; 308 struct amdgpu_bo *wptr_bo = NULL; 309 310 memset(&q_properties, 0, sizeof(struct queue_properties)); 311 312 pr_debug("Creating queue ioctl\n"); 313 314 err = set_queue_properties_from_user(&q_properties, args); 315 if (err) 316 return err; 317 318 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); 319 320 mutex_lock(&p->mutex); 321 322 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 323 if (!pdd) { 324 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); 325 err = -EINVAL; 326 goto err_pdd; 327 } 328 dev = pdd->dev; 329 330 pdd = kfd_bind_process_to_device(dev, p); 331 if (IS_ERR(pdd)) { 332 err = -ESRCH; 333 goto err_bind_process; 334 } 335 336 if (!pdd->qpd.proc_doorbells) { 337 err = kfd_alloc_process_doorbells(dev->kfd, pdd); 338 if (err) { 339 pr_debug("failed to allocate process doorbells\n"); 340 goto err_bind_process; 341 } 342 } 343 344 /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work 345 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) 346 */ 347 if (dev->kfd->shared_resources.enable_mes && 348 ((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) 349 >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { 350 struct amdgpu_bo_va_mapping *wptr_mapping; 351 struct amdgpu_vm *wptr_vm; 352 353 wptr_vm = drm_priv_to_vm(pdd->drm_priv); 354 err = amdgpu_bo_reserve(wptr_vm->root.bo, false); 355 if (err) 356 goto err_wptr_map_gart; 357 358 wptr_mapping = amdgpu_vm_bo_lookup_mapping( 359 wptr_vm, args->write_pointer_address >> PAGE_SHIFT); 360 amdgpu_bo_unreserve(wptr_vm->root.bo); 361 if (!wptr_mapping) { 362 pr_err("Failed to lookup wptr bo\n"); 363 err = -EINVAL; 364 goto err_wptr_map_gart; 365 } 366 367 wptr_bo = wptr_mapping->bo_va->base.bo; 368 if (wptr_bo->tbo.base.size > PAGE_SIZE) { 369 pr_err("Requested GART mapping for wptr bo larger than one page\n"); 370 err = -EINVAL; 371 goto err_wptr_map_gart; 372 } 373 374 err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo); 375 if (err) { 376 pr_err("Failed to map wptr bo to GART\n"); 377 goto err_wptr_map_gart; 378 } 379 } 380 381 pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n", 382 p->pasid, 383 dev->id); 384 385 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo, 386 NULL, NULL, NULL, &doorbell_offset_in_process); 387 if (err != 0) 388 goto err_create_queue; 389 390 args->queue_id = queue_id; 391 392 393 /* Return gpu_id as doorbell offset for mmap usage */ 394 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; 395 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); 396 if (KFD_IS_SOC15(dev)) 397 /* On SOC15 ASICs, include the doorbell offset within the 398 * process doorbell frame, which is 2 pages. 399 */ 400 args->doorbell_offset |= doorbell_offset_in_process; 401 402 mutex_unlock(&p->mutex); 403 404 pr_debug("Queue id %d was created successfully\n", args->queue_id); 405 406 pr_debug("Ring buffer address == 0x%016llX\n", 407 args->ring_base_address); 408 409 pr_debug("Read ptr address == 0x%016llX\n", 410 args->read_pointer_address); 411 412 pr_debug("Write ptr address == 0x%016llX\n", 413 args->write_pointer_address); 414 415 kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0); 416 return 0; 417 418 err_create_queue: 419 if (wptr_bo) 420 amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&wptr_bo); 421 err_wptr_map_gart: 422 err_bind_process: 423 err_pdd: 424 mutex_unlock(&p->mutex); 425 return err; 426 } 427 428 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, 429 void *data) 430 { 431 int retval; 432 struct kfd_ioctl_destroy_queue_args *args = data; 433 434 pr_debug("Destroying queue id %d for pasid 0x%x\n", 435 args->queue_id, 436 p->pasid); 437 438 mutex_lock(&p->mutex); 439 440 retval = pqm_destroy_queue(&p->pqm, args->queue_id); 441 442 mutex_unlock(&p->mutex); 443 return retval; 444 } 445 446 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, 447 void *data) 448 { 449 int retval; 450 struct kfd_ioctl_update_queue_args *args = data; 451 struct queue_properties properties; 452 453 /* 454 * Repurpose queue percentage to accommodate new features: 455 * bit 0-7: queue percentage 456 * bit 8-15: pm4_target_xcc 457 */ 458 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { 459 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 460 return -EINVAL; 461 } 462 463 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 464 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 465 return -EINVAL; 466 } 467 468 if ((args->ring_base_address) && 469 (!access_ok((const void __user *) args->ring_base_address, 470 sizeof(uint64_t)))) { 471 pr_err("Can't access ring base address\n"); 472 return -EFAULT; 473 } 474 475 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 476 pr_err("Ring size must be a power of 2 or 0\n"); 477 return -EINVAL; 478 } 479 480 properties.queue_address = args->ring_base_address; 481 properties.queue_size = args->ring_size; 482 properties.queue_percent = args->queue_percentage & 0xFF; 483 /* bit 8-15 are repurposed to be PM4 target XCC */ 484 properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; 485 properties.priority = args->queue_priority; 486 487 pr_debug("Updating queue id %d for pasid 0x%x\n", 488 args->queue_id, p->pasid); 489 490 mutex_lock(&p->mutex); 491 492 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties); 493 494 mutex_unlock(&p->mutex); 495 496 return retval; 497 } 498 499 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, 500 void *data) 501 { 502 int retval; 503 const int max_num_cus = 1024; 504 struct kfd_ioctl_set_cu_mask_args *args = data; 505 struct mqd_update_info minfo = {0}; 506 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; 507 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); 508 509 if ((args->num_cu_mask % 32) != 0) { 510 pr_debug("num_cu_mask 0x%x must be a multiple of 32", 511 args->num_cu_mask); 512 return -EINVAL; 513 } 514 515 minfo.cu_mask.count = args->num_cu_mask; 516 if (minfo.cu_mask.count == 0) { 517 pr_debug("CU mask cannot be 0"); 518 return -EINVAL; 519 } 520 521 /* To prevent an unreasonably large CU mask size, set an arbitrary 522 * limit of max_num_cus bits. We can then just drop any CU mask bits 523 * past max_num_cus bits and just use the first max_num_cus bits. 524 */ 525 if (minfo.cu_mask.count > max_num_cus) { 526 pr_debug("CU mask cannot be greater than 1024 bits"); 527 minfo.cu_mask.count = max_num_cus; 528 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); 529 } 530 531 minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL); 532 if (!minfo.cu_mask.ptr) 533 return -ENOMEM; 534 535 retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size); 536 if (retval) { 537 pr_debug("Could not copy CU mask from userspace"); 538 retval = -EFAULT; 539 goto out; 540 } 541 542 mutex_lock(&p->mutex); 543 544 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); 545 546 mutex_unlock(&p->mutex); 547 548 out: 549 kfree(minfo.cu_mask.ptr); 550 return retval; 551 } 552 553 static int kfd_ioctl_get_queue_wave_state(struct file *filep, 554 struct kfd_process *p, void *data) 555 { 556 struct kfd_ioctl_get_queue_wave_state_args *args = data; 557 int r; 558 559 mutex_lock(&p->mutex); 560 561 r = pqm_get_wave_state(&p->pqm, args->queue_id, 562 (void __user *)args->ctl_stack_address, 563 &args->ctl_stack_used_size, 564 &args->save_area_used_size); 565 566 mutex_unlock(&p->mutex); 567 568 return r; 569 } 570 571 static int kfd_ioctl_set_memory_policy(struct file *filep, 572 struct kfd_process *p, void *data) 573 { 574 struct kfd_ioctl_set_memory_policy_args *args = data; 575 int err = 0; 576 struct kfd_process_device *pdd; 577 enum cache_policy default_policy, alternate_policy; 578 579 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT 580 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 581 return -EINVAL; 582 } 583 584 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 585 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 586 return -EINVAL; 587 } 588 589 mutex_lock(&p->mutex); 590 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 591 if (!pdd) { 592 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); 593 err = -EINVAL; 594 goto err_pdd; 595 } 596 597 pdd = kfd_bind_process_to_device(pdd->dev, p); 598 if (IS_ERR(pdd)) { 599 err = -ESRCH; 600 goto out; 601 } 602 603 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 604 ? cache_policy_coherent : cache_policy_noncoherent; 605 606 alternate_policy = 607 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 608 ? cache_policy_coherent : cache_policy_noncoherent; 609 610 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, 611 &pdd->qpd, 612 default_policy, 613 alternate_policy, 614 (void __user *)args->alternate_aperture_base, 615 args->alternate_aperture_size)) 616 err = -EINVAL; 617 618 out: 619 err_pdd: 620 mutex_unlock(&p->mutex); 621 622 return err; 623 } 624 625 static int kfd_ioctl_set_trap_handler(struct file *filep, 626 struct kfd_process *p, void *data) 627 { 628 struct kfd_ioctl_set_trap_handler_args *args = data; 629 int err = 0; 630 struct kfd_process_device *pdd; 631 632 mutex_lock(&p->mutex); 633 634 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 635 if (!pdd) { 636 err = -EINVAL; 637 goto err_pdd; 638 } 639 640 pdd = kfd_bind_process_to_device(pdd->dev, p); 641 if (IS_ERR(pdd)) { 642 err = -ESRCH; 643 goto out; 644 } 645 646 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); 647 648 out: 649 err_pdd: 650 mutex_unlock(&p->mutex); 651 652 return err; 653 } 654 655 static int kfd_ioctl_dbg_register(struct file *filep, 656 struct kfd_process *p, void *data) 657 { 658 return -EPERM; 659 } 660 661 static int kfd_ioctl_dbg_unregister(struct file *filep, 662 struct kfd_process *p, void *data) 663 { 664 return -EPERM; 665 } 666 667 static int kfd_ioctl_dbg_address_watch(struct file *filep, 668 struct kfd_process *p, void *data) 669 { 670 return -EPERM; 671 } 672 673 /* Parse and generate fixed size data structure for wave control */ 674 static int kfd_ioctl_dbg_wave_control(struct file *filep, 675 struct kfd_process *p, void *data) 676 { 677 return -EPERM; 678 } 679 680 static int kfd_ioctl_get_clock_counters(struct file *filep, 681 struct kfd_process *p, void *data) 682 { 683 struct kfd_ioctl_get_clock_counters_args *args = data; 684 struct kfd_process_device *pdd; 685 686 mutex_lock(&p->mutex); 687 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 688 mutex_unlock(&p->mutex); 689 if (pdd) 690 /* Reading GPU clock counter from KGD */ 691 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev); 692 else 693 /* Node without GPU resource */ 694 args->gpu_clock_counter = 0; 695 696 /* No access to rdtsc. Using raw monotonic time */ 697 args->cpu_clock_counter = ktime_get_raw_ns(); 698 args->system_clock_counter = ktime_get_boottime_ns(); 699 700 /* Since the counter is in nano-seconds we use 1GHz frequency */ 701 args->system_clock_freq = 1000000000; 702 703 return 0; 704 } 705 706 707 static int kfd_ioctl_get_process_apertures(struct file *filp, 708 struct kfd_process *p, void *data) 709 { 710 struct kfd_ioctl_get_process_apertures_args *args = data; 711 struct kfd_process_device_apertures *pAperture; 712 int i; 713 714 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); 715 716 args->num_of_nodes = 0; 717 718 mutex_lock(&p->mutex); 719 /* Run over all pdd of the process */ 720 for (i = 0; i < p->n_pdds; i++) { 721 struct kfd_process_device *pdd = p->pdds[i]; 722 723 pAperture = 724 &args->process_apertures[args->num_of_nodes]; 725 pAperture->gpu_id = pdd->dev->id; 726 pAperture->lds_base = pdd->lds_base; 727 pAperture->lds_limit = pdd->lds_limit; 728 pAperture->gpuvm_base = pdd->gpuvm_base; 729 pAperture->gpuvm_limit = pdd->gpuvm_limit; 730 pAperture->scratch_base = pdd->scratch_base; 731 pAperture->scratch_limit = pdd->scratch_limit; 732 733 dev_dbg(kfd_device, 734 "node id %u\n", args->num_of_nodes); 735 dev_dbg(kfd_device, 736 "gpu id %u\n", pdd->dev->id); 737 dev_dbg(kfd_device, 738 "lds_base %llX\n", pdd->lds_base); 739 dev_dbg(kfd_device, 740 "lds_limit %llX\n", pdd->lds_limit); 741 dev_dbg(kfd_device, 742 "gpuvm_base %llX\n", pdd->gpuvm_base); 743 dev_dbg(kfd_device, 744 "gpuvm_limit %llX\n", pdd->gpuvm_limit); 745 dev_dbg(kfd_device, 746 "scratch_base %llX\n", pdd->scratch_base); 747 dev_dbg(kfd_device, 748 "scratch_limit %llX\n", pdd->scratch_limit); 749 750 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS) 751 break; 752 } 753 mutex_unlock(&p->mutex); 754 755 return 0; 756 } 757 758 static int kfd_ioctl_get_process_apertures_new(struct file *filp, 759 struct kfd_process *p, void *data) 760 { 761 struct kfd_ioctl_get_process_apertures_new_args *args = data; 762 struct kfd_process_device_apertures *pa; 763 int ret; 764 int i; 765 766 dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); 767 768 if (args->num_of_nodes == 0) { 769 /* Return number of nodes, so that user space can alloacate 770 * sufficient memory 771 */ 772 mutex_lock(&p->mutex); 773 args->num_of_nodes = p->n_pdds; 774 goto out_unlock; 775 } 776 777 /* Fill in process-aperture information for all available 778 * nodes, but not more than args->num_of_nodes as that is 779 * the amount of memory allocated by user 780 */ 781 pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures), 782 GFP_KERNEL); 783 if (!pa) 784 return -ENOMEM; 785 786 mutex_lock(&p->mutex); 787 788 if (!p->n_pdds) { 789 args->num_of_nodes = 0; 790 kfree(pa); 791 goto out_unlock; 792 } 793 794 /* Run over all pdd of the process */ 795 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) { 796 struct kfd_process_device *pdd = p->pdds[i]; 797 798 pa[i].gpu_id = pdd->dev->id; 799 pa[i].lds_base = pdd->lds_base; 800 pa[i].lds_limit = pdd->lds_limit; 801 pa[i].gpuvm_base = pdd->gpuvm_base; 802 pa[i].gpuvm_limit = pdd->gpuvm_limit; 803 pa[i].scratch_base = pdd->scratch_base; 804 pa[i].scratch_limit = pdd->scratch_limit; 805 806 dev_dbg(kfd_device, 807 "gpu id %u\n", pdd->dev->id); 808 dev_dbg(kfd_device, 809 "lds_base %llX\n", pdd->lds_base); 810 dev_dbg(kfd_device, 811 "lds_limit %llX\n", pdd->lds_limit); 812 dev_dbg(kfd_device, 813 "gpuvm_base %llX\n", pdd->gpuvm_base); 814 dev_dbg(kfd_device, 815 "gpuvm_limit %llX\n", pdd->gpuvm_limit); 816 dev_dbg(kfd_device, 817 "scratch_base %llX\n", pdd->scratch_base); 818 dev_dbg(kfd_device, 819 "scratch_limit %llX\n", pdd->scratch_limit); 820 } 821 mutex_unlock(&p->mutex); 822 823 args->num_of_nodes = i; 824 ret = copy_to_user( 825 (void __user *)args->kfd_process_device_apertures_ptr, 826 pa, 827 (i * sizeof(struct kfd_process_device_apertures))); 828 kfree(pa); 829 return ret ? -EFAULT : 0; 830 831 out_unlock: 832 mutex_unlock(&p->mutex); 833 return 0; 834 } 835 836 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, 837 void *data) 838 { 839 struct kfd_ioctl_create_event_args *args = data; 840 int err; 841 842 /* For dGPUs the event page is allocated in user mode. The 843 * handle is passed to KFD with the first call to this IOCTL 844 * through the event_page_offset field. 845 */ 846 if (args->event_page_offset) { 847 mutex_lock(&p->mutex); 848 err = kfd_kmap_event_page(p, args->event_page_offset); 849 mutex_unlock(&p->mutex); 850 if (err) 851 return err; 852 } 853 854 err = kfd_event_create(filp, p, args->event_type, 855 args->auto_reset != 0, args->node_id, 856 &args->event_id, &args->event_trigger_data, 857 &args->event_page_offset, 858 &args->event_slot_index); 859 860 pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__); 861 return err; 862 } 863 864 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p, 865 void *data) 866 { 867 struct kfd_ioctl_destroy_event_args *args = data; 868 869 return kfd_event_destroy(p, args->event_id); 870 } 871 872 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p, 873 void *data) 874 { 875 struct kfd_ioctl_set_event_args *args = data; 876 877 return kfd_set_event(p, args->event_id); 878 } 879 880 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p, 881 void *data) 882 { 883 struct kfd_ioctl_reset_event_args *args = data; 884 885 return kfd_reset_event(p, args->event_id); 886 } 887 888 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p, 889 void *data) 890 { 891 struct kfd_ioctl_wait_events_args *args = data; 892 893 return kfd_wait_on_events(p, args->num_events, 894 (void __user *)args->events_ptr, 895 (args->wait_for_all != 0), 896 &args->timeout, &args->wait_result); 897 } 898 static int kfd_ioctl_set_scratch_backing_va(struct file *filep, 899 struct kfd_process *p, void *data) 900 { 901 struct kfd_ioctl_set_scratch_backing_va_args *args = data; 902 struct kfd_process_device *pdd; 903 struct kfd_node *dev; 904 long err; 905 906 mutex_lock(&p->mutex); 907 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 908 if (!pdd) { 909 err = -EINVAL; 910 goto err_pdd; 911 } 912 dev = pdd->dev; 913 914 pdd = kfd_bind_process_to_device(dev, p); 915 if (IS_ERR(pdd)) { 916 err = PTR_ERR(pdd); 917 goto bind_process_to_device_fail; 918 } 919 920 pdd->qpd.sh_hidden_private_base = args->va_addr; 921 922 mutex_unlock(&p->mutex); 923 924 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && 925 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) 926 dev->kfd2kgd->set_scratch_backing_va( 927 dev->adev, args->va_addr, pdd->qpd.vmid); 928 929 return 0; 930 931 bind_process_to_device_fail: 932 err_pdd: 933 mutex_unlock(&p->mutex); 934 return err; 935 } 936 937 static int kfd_ioctl_get_tile_config(struct file *filep, 938 struct kfd_process *p, void *data) 939 { 940 struct kfd_ioctl_get_tile_config_args *args = data; 941 struct kfd_process_device *pdd; 942 struct tile_config config; 943 int err = 0; 944 945 mutex_lock(&p->mutex); 946 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 947 mutex_unlock(&p->mutex); 948 if (!pdd) 949 return -EINVAL; 950 951 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config); 952 953 args->gb_addr_config = config.gb_addr_config; 954 args->num_banks = config.num_banks; 955 args->num_ranks = config.num_ranks; 956 957 if (args->num_tile_configs > config.num_tile_configs) 958 args->num_tile_configs = config.num_tile_configs; 959 err = copy_to_user((void __user *)args->tile_config_ptr, 960 config.tile_config_ptr, 961 args->num_tile_configs * sizeof(uint32_t)); 962 if (err) { 963 args->num_tile_configs = 0; 964 return -EFAULT; 965 } 966 967 if (args->num_macro_tile_configs > config.num_macro_tile_configs) 968 args->num_macro_tile_configs = 969 config.num_macro_tile_configs; 970 err = copy_to_user((void __user *)args->macro_tile_config_ptr, 971 config.macro_tile_config_ptr, 972 args->num_macro_tile_configs * sizeof(uint32_t)); 973 if (err) { 974 args->num_macro_tile_configs = 0; 975 return -EFAULT; 976 } 977 978 return 0; 979 } 980 981 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p, 982 void *data) 983 { 984 struct kfd_ioctl_acquire_vm_args *args = data; 985 struct kfd_process_device *pdd; 986 struct file *drm_file; 987 int ret; 988 989 drm_file = fget(args->drm_fd); 990 if (!drm_file) 991 return -EINVAL; 992 993 mutex_lock(&p->mutex); 994 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 995 if (!pdd) { 996 ret = -EINVAL; 997 goto err_pdd; 998 } 999 1000 if (pdd->drm_file) { 1001 ret = pdd->drm_file == drm_file ? 0 : -EBUSY; 1002 goto err_drm_file; 1003 } 1004 1005 ret = kfd_process_device_init_vm(pdd, drm_file); 1006 if (ret) 1007 goto err_unlock; 1008 1009 /* On success, the PDD keeps the drm_file reference */ 1010 mutex_unlock(&p->mutex); 1011 1012 return 0; 1013 1014 err_unlock: 1015 err_pdd: 1016 err_drm_file: 1017 mutex_unlock(&p->mutex); 1018 fput(drm_file); 1019 return ret; 1020 } 1021 1022 bool kfd_dev_is_large_bar(struct kfd_node *dev) 1023 { 1024 if (debug_largebar) { 1025 pr_debug("Simulate large-bar allocation on non large-bar machine\n"); 1026 return true; 1027 } 1028 1029 if (dev->local_mem_info.local_mem_size_private == 0 && 1030 dev->local_mem_info.local_mem_size_public > 0) 1031 return true; 1032 1033 if (dev->local_mem_info.local_mem_size_public == 0 && 1034 dev->kfd->adev->gmc.is_app_apu) { 1035 pr_debug("APP APU, Consider like a large bar system\n"); 1036 return true; 1037 } 1038 1039 return false; 1040 } 1041 1042 static int kfd_ioctl_get_available_memory(struct file *filep, 1043 struct kfd_process *p, void *data) 1044 { 1045 struct kfd_ioctl_get_available_memory_args *args = data; 1046 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id); 1047 1048 if (!pdd) 1049 return -EINVAL; 1050 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, 1051 pdd->dev->node_id); 1052 kfd_unlock_pdd(pdd); 1053 return 0; 1054 } 1055 1056 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, 1057 struct kfd_process *p, void *data) 1058 { 1059 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; 1060 struct kfd_process_device *pdd; 1061 void *mem; 1062 struct kfd_node *dev; 1063 int idr_handle; 1064 long err; 1065 uint64_t offset = args->mmap_offset; 1066 uint32_t flags = args->flags; 1067 1068 if (args->size == 0) 1069 return -EINVAL; 1070 1071 #if IS_ENABLED(CONFIG_HSA_AMD_SVM) 1072 /* Flush pending deferred work to avoid racing with deferred actions 1073 * from previous memory map changes (e.g. munmap). 1074 */ 1075 svm_range_list_lock_and_flush_work(&p->svms, current->mm); 1076 mutex_lock(&p->svms.lock); 1077 mmap_write_unlock(current->mm); 1078 if (interval_tree_iter_first(&p->svms.objects, 1079 args->va_addr >> PAGE_SHIFT, 1080 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { 1081 pr_err("Address: 0x%llx already allocated by SVM\n", 1082 args->va_addr); 1083 mutex_unlock(&p->svms.lock); 1084 return -EADDRINUSE; 1085 } 1086 1087 /* When register user buffer check if it has been registered by svm by 1088 * buffer cpu virtual address. 1089 */ 1090 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) && 1091 interval_tree_iter_first(&p->svms.objects, 1092 args->mmap_offset >> PAGE_SHIFT, 1093 (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) { 1094 pr_err("User Buffer Address: 0x%llx already allocated by SVM\n", 1095 args->mmap_offset); 1096 mutex_unlock(&p->svms.lock); 1097 return -EADDRINUSE; 1098 } 1099 1100 mutex_unlock(&p->svms.lock); 1101 #endif 1102 mutex_lock(&p->mutex); 1103 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 1104 if (!pdd) { 1105 err = -EINVAL; 1106 goto err_pdd; 1107 } 1108 1109 dev = pdd->dev; 1110 1111 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) && 1112 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) && 1113 !kfd_dev_is_large_bar(dev)) { 1114 pr_err("Alloc host visible vram on small bar is not allowed\n"); 1115 err = -EINVAL; 1116 goto err_large_bar; 1117 } 1118 1119 pdd = kfd_bind_process_to_device(dev, p); 1120 if (IS_ERR(pdd)) { 1121 err = PTR_ERR(pdd); 1122 goto err_unlock; 1123 } 1124 1125 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { 1126 if (args->size != kfd_doorbell_process_slice(dev->kfd)) { 1127 err = -EINVAL; 1128 goto err_unlock; 1129 } 1130 offset = kfd_get_process_doorbells(pdd); 1131 if (!offset) { 1132 err = -ENOMEM; 1133 goto err_unlock; 1134 } 1135 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { 1136 if (args->size != PAGE_SIZE) { 1137 err = -EINVAL; 1138 goto err_unlock; 1139 } 1140 offset = dev->adev->rmmio_remap.bus_addr; 1141 if (!offset || (PAGE_SIZE > 4096)) { 1142 err = -ENOMEM; 1143 goto err_unlock; 1144 } 1145 } 1146 1147 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1148 dev->adev, args->va_addr, args->size, 1149 pdd->drm_priv, (struct kgd_mem **) &mem, &offset, 1150 flags, false); 1151 1152 if (err) 1153 goto err_unlock; 1154 1155 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); 1156 if (idr_handle < 0) { 1157 err = -EFAULT; 1158 goto err_free; 1159 } 1160 1161 /* Update the VRAM usage count */ 1162 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1163 uint64_t size = args->size; 1164 1165 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) 1166 size >>= 1; 1167 atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage); 1168 } 1169 1170 mutex_unlock(&p->mutex); 1171 1172 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); 1173 args->mmap_offset = offset; 1174 1175 /* MMIO is mapped through kfd device 1176 * Generate a kfd mmap offset 1177 */ 1178 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) 1179 args->mmap_offset = KFD_MMAP_TYPE_MMIO 1180 | KFD_MMAP_GPU_ID(args->gpu_id); 1181 1182 return 0; 1183 1184 err_free: 1185 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, 1186 pdd->drm_priv, NULL); 1187 err_unlock: 1188 err_pdd: 1189 err_large_bar: 1190 mutex_unlock(&p->mutex); 1191 return err; 1192 } 1193 1194 static int kfd_ioctl_free_memory_of_gpu(struct file *filep, 1195 struct kfd_process *p, void *data) 1196 { 1197 struct kfd_ioctl_free_memory_of_gpu_args *args = data; 1198 struct kfd_process_device *pdd; 1199 void *mem; 1200 int ret; 1201 uint64_t size = 0; 1202 1203 mutex_lock(&p->mutex); 1204 /* 1205 * Safeguard to prevent user space from freeing signal BO. 1206 * It will be freed at process termination. 1207 */ 1208 if (p->signal_handle && (p->signal_handle == args->handle)) { 1209 pr_err("Free signal BO is not allowed\n"); 1210 ret = -EPERM; 1211 goto err_unlock; 1212 } 1213 1214 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); 1215 if (!pdd) { 1216 pr_err("Process device data doesn't exist\n"); 1217 ret = -EINVAL; 1218 goto err_pdd; 1219 } 1220 1221 mem = kfd_process_device_translate_handle( 1222 pdd, GET_IDR_HANDLE(args->handle)); 1223 if (!mem) { 1224 ret = -EINVAL; 1225 goto err_unlock; 1226 } 1227 1228 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, 1229 (struct kgd_mem *)mem, pdd->drm_priv, &size); 1230 1231 /* If freeing the buffer failed, leave the handle in place for 1232 * clean-up during process tear-down. 1233 */ 1234 if (!ret) 1235 kfd_process_device_remove_obj_handle( 1236 pdd, GET_IDR_HANDLE(args->handle)); 1237 1238 atomic64_sub(size, &pdd->vram_usage); 1239 1240 err_unlock: 1241 err_pdd: 1242 mutex_unlock(&p->mutex); 1243 return ret; 1244 } 1245 1246 static int kfd_ioctl_map_memory_to_gpu(struct file *filep, 1247 struct kfd_process *p, void *data) 1248 { 1249 struct kfd_ioctl_map_memory_to_gpu_args *args = data; 1250 struct kfd_process_device *pdd, *peer_pdd; 1251 void *mem; 1252 struct kfd_node *dev; 1253 long err = 0; 1254 int i; 1255 uint32_t *devices_arr = NULL; 1256 1257 if (!args->n_devices) { 1258 pr_debug("Device IDs array empty\n"); 1259 return -EINVAL; 1260 } 1261 if (args->n_success > args->n_devices) { 1262 pr_debug("n_success exceeds n_devices\n"); 1263 return -EINVAL; 1264 } 1265 1266 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), 1267 GFP_KERNEL); 1268 if (!devices_arr) 1269 return -ENOMEM; 1270 1271 err = copy_from_user(devices_arr, 1272 (void __user *)args->device_ids_array_ptr, 1273 args->n_devices * sizeof(*devices_arr)); 1274 if (err != 0) { 1275 err = -EFAULT; 1276 goto copy_from_user_failed; 1277 } 1278 1279 mutex_lock(&p->mutex); 1280 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); 1281 if (!pdd) { 1282 err = -EINVAL; 1283 goto get_process_device_data_failed; 1284 } 1285 dev = pdd->dev; 1286 1287 pdd = kfd_bind_process_to_device(dev, p); 1288 if (IS_ERR(pdd)) { 1289 err = PTR_ERR(pdd); 1290 goto bind_process_to_device_failed; 1291 } 1292 1293 mem = kfd_process_device_translate_handle(pdd, 1294 GET_IDR_HANDLE(args->handle)); 1295 if (!mem) { 1296 err = -ENOMEM; 1297 goto get_mem_obj_from_handle_failed; 1298 } 1299 1300 for (i = args->n_success; i < args->n_devices; i++) { 1301 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1302 if (!peer_pdd) { 1303 pr_debug("Getting device by id failed for 0x%x\n", 1304 devices_arr[i]); 1305 err = -EINVAL; 1306 goto get_mem_obj_from_handle_failed; 1307 } 1308 1309 peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p); 1310 if (IS_ERR(peer_pdd)) { 1311 err = PTR_ERR(peer_pdd); 1312 goto get_mem_obj_from_handle_failed; 1313 } 1314 1315 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1316 peer_pdd->dev->adev, (struct kgd_mem *)mem, 1317 peer_pdd->drm_priv); 1318 if (err) { 1319 struct pci_dev *pdev = peer_pdd->dev->adev->pdev; 1320 1321 dev_err(dev->adev->dev, 1322 "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n", 1323 pci_domain_nr(pdev->bus), 1324 pdev->bus->number, 1325 PCI_SLOT(pdev->devfn), 1326 PCI_FUNC(pdev->devfn), 1327 ((struct kgd_mem *)mem)->domain); 1328 goto map_memory_to_gpu_failed; 1329 } 1330 args->n_success = i+1; 1331 } 1332 1333 err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); 1334 if (err) { 1335 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 1336 goto sync_memory_failed; 1337 } 1338 1339 mutex_unlock(&p->mutex); 1340 1341 /* Flush TLBs after waiting for the page table updates to complete */ 1342 for (i = 0; i < args->n_devices; i++) { 1343 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1344 if (WARN_ON_ONCE(!peer_pdd)) 1345 continue; 1346 kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); 1347 } 1348 kfree(devices_arr); 1349 1350 return err; 1351 1352 get_process_device_data_failed: 1353 bind_process_to_device_failed: 1354 get_mem_obj_from_handle_failed: 1355 map_memory_to_gpu_failed: 1356 sync_memory_failed: 1357 mutex_unlock(&p->mutex); 1358 copy_from_user_failed: 1359 kfree(devices_arr); 1360 1361 return err; 1362 } 1363 1364 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, 1365 struct kfd_process *p, void *data) 1366 { 1367 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; 1368 struct kfd_process_device *pdd, *peer_pdd; 1369 void *mem; 1370 long err = 0; 1371 uint32_t *devices_arr = NULL, i; 1372 bool flush_tlb; 1373 1374 if (!args->n_devices) { 1375 pr_debug("Device IDs array empty\n"); 1376 return -EINVAL; 1377 } 1378 if (args->n_success > args->n_devices) { 1379 pr_debug("n_success exceeds n_devices\n"); 1380 return -EINVAL; 1381 } 1382 1383 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), 1384 GFP_KERNEL); 1385 if (!devices_arr) 1386 return -ENOMEM; 1387 1388 err = copy_from_user(devices_arr, 1389 (void __user *)args->device_ids_array_ptr, 1390 args->n_devices * sizeof(*devices_arr)); 1391 if (err != 0) { 1392 err = -EFAULT; 1393 goto copy_from_user_failed; 1394 } 1395 1396 mutex_lock(&p->mutex); 1397 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); 1398 if (!pdd) { 1399 err = -EINVAL; 1400 goto bind_process_to_device_failed; 1401 } 1402 1403 mem = kfd_process_device_translate_handle(pdd, 1404 GET_IDR_HANDLE(args->handle)); 1405 if (!mem) { 1406 err = -ENOMEM; 1407 goto get_mem_obj_from_handle_failed; 1408 } 1409 1410 for (i = args->n_success; i < args->n_devices; i++) { 1411 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1412 if (!peer_pdd) { 1413 err = -EINVAL; 1414 goto get_mem_obj_from_handle_failed; 1415 } 1416 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1417 peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); 1418 if (err) { 1419 pr_err("Failed to unmap from gpu %d/%d\n", 1420 i, args->n_devices); 1421 goto unmap_memory_from_gpu_failed; 1422 } 1423 args->n_success = i+1; 1424 } 1425 1426 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); 1427 if (flush_tlb) { 1428 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, 1429 (struct kgd_mem *) mem, true); 1430 if (err) { 1431 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 1432 goto sync_memory_failed; 1433 } 1434 } 1435 1436 /* Flush TLBs after waiting for the page table updates to complete */ 1437 for (i = 0; i < args->n_devices; i++) { 1438 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1439 if (WARN_ON_ONCE(!peer_pdd)) 1440 continue; 1441 if (flush_tlb) 1442 kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); 1443 1444 /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */ 1445 err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); 1446 if (err) 1447 goto sync_memory_failed; 1448 } 1449 1450 mutex_unlock(&p->mutex); 1451 1452 kfree(devices_arr); 1453 1454 return 0; 1455 1456 bind_process_to_device_failed: 1457 get_mem_obj_from_handle_failed: 1458 unmap_memory_from_gpu_failed: 1459 sync_memory_failed: 1460 mutex_unlock(&p->mutex); 1461 copy_from_user_failed: 1462 kfree(devices_arr); 1463 return err; 1464 } 1465 1466 static int kfd_ioctl_alloc_queue_gws(struct file *filep, 1467 struct kfd_process *p, void *data) 1468 { 1469 int retval; 1470 struct kfd_ioctl_alloc_queue_gws_args *args = data; 1471 struct queue *q; 1472 struct kfd_node *dev; 1473 1474 mutex_lock(&p->mutex); 1475 q = pqm_get_user_queue(&p->pqm, args->queue_id); 1476 1477 if (q) { 1478 dev = q->device; 1479 } else { 1480 retval = -EINVAL; 1481 goto out_unlock; 1482 } 1483 1484 if (!dev->gws) { 1485 retval = -ENODEV; 1486 goto out_unlock; 1487 } 1488 1489 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 1490 retval = -ENODEV; 1491 goto out_unlock; 1492 } 1493 1494 if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) || 1495 kfd_dbg_has_cwsr_workaround(dev))) { 1496 retval = -EBUSY; 1497 goto out_unlock; 1498 } 1499 1500 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); 1501 mutex_unlock(&p->mutex); 1502 1503 args->first_gws = 0; 1504 return retval; 1505 1506 out_unlock: 1507 mutex_unlock(&p->mutex); 1508 return retval; 1509 } 1510 1511 static int kfd_ioctl_get_dmabuf_info(struct file *filep, 1512 struct kfd_process *p, void *data) 1513 { 1514 struct kfd_ioctl_get_dmabuf_info_args *args = data; 1515 struct kfd_node *dev = NULL; 1516 struct amdgpu_device *dmabuf_adev; 1517 void *metadata_buffer = NULL; 1518 uint32_t flags; 1519 int8_t xcp_id; 1520 unsigned int i; 1521 int r; 1522 1523 /* Find a KFD GPU device that supports the get_dmabuf_info query */ 1524 for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++) 1525 if (dev && !kfd_devcgroup_check_permission(dev)) 1526 break; 1527 if (!dev) 1528 return -EINVAL; 1529 1530 if (args->metadata_ptr) { 1531 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL); 1532 if (!metadata_buffer) 1533 return -ENOMEM; 1534 } 1535 1536 /* Get dmabuf info from KGD */ 1537 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, 1538 &dmabuf_adev, &args->size, 1539 metadata_buffer, args->metadata_size, 1540 &args->metadata_size, &flags, &xcp_id); 1541 if (r) 1542 goto exit; 1543 1544 if (xcp_id >= 0) 1545 args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; 1546 else 1547 args->gpu_id = dev->id; 1548 args->flags = flags; 1549 1550 /* Copy metadata buffer to user mode */ 1551 if (metadata_buffer) { 1552 r = copy_to_user((void __user *)args->metadata_ptr, 1553 metadata_buffer, args->metadata_size); 1554 if (r != 0) 1555 r = -EFAULT; 1556 } 1557 1558 exit: 1559 kfree(metadata_buffer); 1560 1561 return r; 1562 } 1563 1564 static int kfd_ioctl_import_dmabuf(struct file *filep, 1565 struct kfd_process *p, void *data) 1566 { 1567 struct kfd_ioctl_import_dmabuf_args *args = data; 1568 struct kfd_process_device *pdd; 1569 struct dma_buf *dmabuf; 1570 int idr_handle; 1571 uint64_t size; 1572 void *mem; 1573 int r; 1574 1575 dmabuf = dma_buf_get(args->dmabuf_fd); 1576 if (IS_ERR(dmabuf)) 1577 return PTR_ERR(dmabuf); 1578 1579 mutex_lock(&p->mutex); 1580 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 1581 if (!pdd) { 1582 r = -EINVAL; 1583 goto err_unlock; 1584 } 1585 1586 pdd = kfd_bind_process_to_device(pdd->dev, p); 1587 if (IS_ERR(pdd)) { 1588 r = PTR_ERR(pdd); 1589 goto err_unlock; 1590 } 1591 1592 r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf, 1593 args->va_addr, pdd->drm_priv, 1594 (struct kgd_mem **)&mem, &size, 1595 NULL); 1596 if (r) 1597 goto err_unlock; 1598 1599 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); 1600 if (idr_handle < 0) { 1601 r = -EFAULT; 1602 goto err_free; 1603 } 1604 1605 mutex_unlock(&p->mutex); 1606 dma_buf_put(dmabuf); 1607 1608 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); 1609 1610 return 0; 1611 1612 err_free: 1613 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem, 1614 pdd->drm_priv, NULL); 1615 err_unlock: 1616 mutex_unlock(&p->mutex); 1617 dma_buf_put(dmabuf); 1618 return r; 1619 } 1620 1621 static int kfd_ioctl_export_dmabuf(struct file *filep, 1622 struct kfd_process *p, void *data) 1623 { 1624 struct kfd_ioctl_export_dmabuf_args *args = data; 1625 struct kfd_process_device *pdd; 1626 struct dma_buf *dmabuf; 1627 struct kfd_node *dev; 1628 void *mem; 1629 int ret = 0; 1630 1631 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); 1632 if (!dev) 1633 return -EINVAL; 1634 1635 mutex_lock(&p->mutex); 1636 1637 pdd = kfd_get_process_device_data(dev, p); 1638 if (!pdd) { 1639 ret = -EINVAL; 1640 goto err_unlock; 1641 } 1642 1643 mem = kfd_process_device_translate_handle(pdd, 1644 GET_IDR_HANDLE(args->handle)); 1645 if (!mem) { 1646 ret = -EINVAL; 1647 goto err_unlock; 1648 } 1649 1650 ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf); 1651 mutex_unlock(&p->mutex); 1652 if (ret) 1653 goto err_out; 1654 1655 ret = dma_buf_fd(dmabuf, args->flags); 1656 if (ret < 0) { 1657 dma_buf_put(dmabuf); 1658 goto err_out; 1659 } 1660 /* dma_buf_fd assigns the reference count to the fd, no need to 1661 * put the reference here. 1662 */ 1663 args->dmabuf_fd = ret; 1664 1665 return 0; 1666 1667 err_unlock: 1668 mutex_unlock(&p->mutex); 1669 err_out: 1670 return ret; 1671 } 1672 1673 /* Handle requests for watching SMI events */ 1674 static int kfd_ioctl_smi_events(struct file *filep, 1675 struct kfd_process *p, void *data) 1676 { 1677 struct kfd_ioctl_smi_events_args *args = data; 1678 struct kfd_process_device *pdd; 1679 1680 mutex_lock(&p->mutex); 1681 1682 pdd = kfd_process_device_data_by_id(p, args->gpuid); 1683 mutex_unlock(&p->mutex); 1684 if (!pdd) 1685 return -EINVAL; 1686 1687 return kfd_smi_event_open(pdd->dev, &args->anon_fd); 1688 } 1689 1690 #if IS_ENABLED(CONFIG_HSA_AMD_SVM) 1691 1692 static int kfd_ioctl_set_xnack_mode(struct file *filep, 1693 struct kfd_process *p, void *data) 1694 { 1695 struct kfd_ioctl_set_xnack_mode_args *args = data; 1696 int r = 0; 1697 1698 mutex_lock(&p->mutex); 1699 if (args->xnack_enabled >= 0) { 1700 if (!list_empty(&p->pqm.queues)) { 1701 pr_debug("Process has user queues running\n"); 1702 r = -EBUSY; 1703 goto out_unlock; 1704 } 1705 1706 if (p->xnack_enabled == args->xnack_enabled) 1707 goto out_unlock; 1708 1709 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) { 1710 r = -EPERM; 1711 goto out_unlock; 1712 } 1713 1714 r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled); 1715 } else { 1716 args->xnack_enabled = p->xnack_enabled; 1717 } 1718 1719 out_unlock: 1720 mutex_unlock(&p->mutex); 1721 1722 return r; 1723 } 1724 1725 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) 1726 { 1727 struct kfd_ioctl_svm_args *args = data; 1728 int r = 0; 1729 1730 pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n", 1731 args->start_addr, args->size, args->op, args->nattr); 1732 1733 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK)) 1734 return -EINVAL; 1735 if (!args->start_addr || !args->size) 1736 return -EINVAL; 1737 1738 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, 1739 args->attrs); 1740 1741 return r; 1742 } 1743 #else 1744 static int kfd_ioctl_set_xnack_mode(struct file *filep, 1745 struct kfd_process *p, void *data) 1746 { 1747 return -EPERM; 1748 } 1749 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) 1750 { 1751 return -EPERM; 1752 } 1753 #endif 1754 1755 static int criu_checkpoint_process(struct kfd_process *p, 1756 uint8_t __user *user_priv_data, 1757 uint64_t *priv_offset) 1758 { 1759 struct kfd_criu_process_priv_data process_priv; 1760 int ret; 1761 1762 memset(&process_priv, 0, sizeof(process_priv)); 1763 1764 process_priv.version = KFD_CRIU_PRIV_VERSION; 1765 /* For CR, we don't consider negative xnack mode which is used for 1766 * querying without changing it, here 0 simply means disabled and 1 1767 * means enabled so retry for finding a valid PTE. 1768 */ 1769 process_priv.xnack_mode = p->xnack_enabled ? 1 : 0; 1770 1771 ret = copy_to_user(user_priv_data + *priv_offset, 1772 &process_priv, sizeof(process_priv)); 1773 1774 if (ret) { 1775 pr_err("Failed to copy process information to user\n"); 1776 ret = -EFAULT; 1777 } 1778 1779 *priv_offset += sizeof(process_priv); 1780 return ret; 1781 } 1782 1783 static int criu_checkpoint_devices(struct kfd_process *p, 1784 uint32_t num_devices, 1785 uint8_t __user *user_addr, 1786 uint8_t __user *user_priv_data, 1787 uint64_t *priv_offset) 1788 { 1789 struct kfd_criu_device_priv_data *device_priv = NULL; 1790 struct kfd_criu_device_bucket *device_buckets = NULL; 1791 int ret = 0, i; 1792 1793 device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL); 1794 if (!device_buckets) { 1795 ret = -ENOMEM; 1796 goto exit; 1797 } 1798 1799 device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL); 1800 if (!device_priv) { 1801 ret = -ENOMEM; 1802 goto exit; 1803 } 1804 1805 for (i = 0; i < num_devices; i++) { 1806 struct kfd_process_device *pdd = p->pdds[i]; 1807 1808 device_buckets[i].user_gpu_id = pdd->user_gpu_id; 1809 device_buckets[i].actual_gpu_id = pdd->dev->id; 1810 1811 /* 1812 * priv_data does not contain useful information for now and is reserved for 1813 * future use, so we do not set its contents. 1814 */ 1815 } 1816 1817 ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets)); 1818 if (ret) { 1819 pr_err("Failed to copy device information to user\n"); 1820 ret = -EFAULT; 1821 goto exit; 1822 } 1823 1824 ret = copy_to_user(user_priv_data + *priv_offset, 1825 device_priv, 1826 num_devices * sizeof(*device_priv)); 1827 if (ret) { 1828 pr_err("Failed to copy device information to user\n"); 1829 ret = -EFAULT; 1830 } 1831 *priv_offset += num_devices * sizeof(*device_priv); 1832 1833 exit: 1834 kvfree(device_buckets); 1835 kvfree(device_priv); 1836 return ret; 1837 } 1838 1839 static uint32_t get_process_num_bos(struct kfd_process *p) 1840 { 1841 uint32_t num_of_bos = 0; 1842 int i; 1843 1844 /* Run over all PDDs of the process */ 1845 for (i = 0; i < p->n_pdds; i++) { 1846 struct kfd_process_device *pdd = p->pdds[i]; 1847 void *mem; 1848 int id; 1849 1850 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 1851 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 1852 1853 if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base) 1854 num_of_bos++; 1855 } 1856 } 1857 return num_of_bos; 1858 } 1859 1860 static int criu_get_prime_handle(struct kgd_mem *mem, int flags, 1861 u32 *shared_fd) 1862 { 1863 struct dma_buf *dmabuf; 1864 int ret; 1865 1866 ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf); 1867 if (ret) { 1868 pr_err("dmabuf export failed for the BO\n"); 1869 return ret; 1870 } 1871 1872 ret = dma_buf_fd(dmabuf, flags); 1873 if (ret < 0) { 1874 pr_err("dmabuf create fd failed, ret:%d\n", ret); 1875 goto out_free_dmabuf; 1876 } 1877 1878 *shared_fd = ret; 1879 return 0; 1880 1881 out_free_dmabuf: 1882 dma_buf_put(dmabuf); 1883 return ret; 1884 } 1885 1886 static int criu_checkpoint_bos(struct kfd_process *p, 1887 uint32_t num_bos, 1888 uint8_t __user *user_bos, 1889 uint8_t __user *user_priv_data, 1890 uint64_t *priv_offset) 1891 { 1892 struct kfd_criu_bo_bucket *bo_buckets; 1893 struct kfd_criu_bo_priv_data *bo_privs; 1894 int ret = 0, pdd_index, bo_index = 0, id; 1895 void *mem; 1896 1897 bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL); 1898 if (!bo_buckets) 1899 return -ENOMEM; 1900 1901 bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL); 1902 if (!bo_privs) { 1903 ret = -ENOMEM; 1904 goto exit; 1905 } 1906 1907 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) { 1908 struct kfd_process_device *pdd = p->pdds[pdd_index]; 1909 struct amdgpu_bo *dumper_bo; 1910 struct kgd_mem *kgd_mem; 1911 1912 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 1913 struct kfd_criu_bo_bucket *bo_bucket; 1914 struct kfd_criu_bo_priv_data *bo_priv; 1915 int i, dev_idx = 0; 1916 1917 if (!mem) { 1918 ret = -ENOMEM; 1919 goto exit; 1920 } 1921 1922 kgd_mem = (struct kgd_mem *)mem; 1923 dumper_bo = kgd_mem->bo; 1924 1925 /* Skip checkpointing BOs that are used for Trap handler 1926 * code and state. Currently, these BOs have a VA that 1927 * is less GPUVM Base 1928 */ 1929 if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base) 1930 continue; 1931 1932 bo_bucket = &bo_buckets[bo_index]; 1933 bo_priv = &bo_privs[bo_index]; 1934 1935 bo_bucket->gpu_id = pdd->user_gpu_id; 1936 bo_bucket->addr = (uint64_t)kgd_mem->va; 1937 bo_bucket->size = amdgpu_bo_size(dumper_bo); 1938 bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags; 1939 bo_priv->idr_handle = id; 1940 1941 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1942 ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo, 1943 &bo_priv->user_addr); 1944 if (ret) { 1945 pr_err("Failed to obtain user address for user-pointer bo\n"); 1946 goto exit; 1947 } 1948 } 1949 if (bo_bucket->alloc_flags 1950 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { 1951 ret = criu_get_prime_handle(kgd_mem, 1952 bo_bucket->alloc_flags & 1953 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0, 1954 &bo_bucket->dmabuf_fd); 1955 if (ret) 1956 goto exit; 1957 } else { 1958 bo_bucket->dmabuf_fd = KFD_INVALID_FD; 1959 } 1960 1961 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) 1962 bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL | 1963 KFD_MMAP_GPU_ID(pdd->dev->id); 1964 else if (bo_bucket->alloc_flags & 1965 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) 1966 bo_bucket->offset = KFD_MMAP_TYPE_MMIO | 1967 KFD_MMAP_GPU_ID(pdd->dev->id); 1968 else 1969 bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo); 1970 1971 for (i = 0; i < p->n_pdds; i++) { 1972 if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem)) 1973 bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id; 1974 } 1975 1976 pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n" 1977 "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x", 1978 bo_bucket->size, 1979 bo_bucket->addr, 1980 bo_bucket->offset, 1981 bo_bucket->gpu_id, 1982 bo_bucket->alloc_flags, 1983 bo_priv->idr_handle); 1984 bo_index++; 1985 } 1986 } 1987 1988 ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets)); 1989 if (ret) { 1990 pr_err("Failed to copy BO information to user\n"); 1991 ret = -EFAULT; 1992 goto exit; 1993 } 1994 1995 ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs)); 1996 if (ret) { 1997 pr_err("Failed to copy BO priv information to user\n"); 1998 ret = -EFAULT; 1999 goto exit; 2000 } 2001 2002 *priv_offset += num_bos * sizeof(*bo_privs); 2003 2004 exit: 2005 while (ret && bo_index--) { 2006 if (bo_buckets[bo_index].alloc_flags 2007 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) 2008 close_fd(bo_buckets[bo_index].dmabuf_fd); 2009 } 2010 2011 kvfree(bo_buckets); 2012 kvfree(bo_privs); 2013 return ret; 2014 } 2015 2016 static int criu_get_process_object_info(struct kfd_process *p, 2017 uint32_t *num_devices, 2018 uint32_t *num_bos, 2019 uint32_t *num_objects, 2020 uint64_t *objs_priv_size) 2021 { 2022 uint64_t queues_priv_data_size, svm_priv_data_size, priv_size; 2023 uint32_t num_queues, num_events, num_svm_ranges; 2024 int ret; 2025 2026 *num_devices = p->n_pdds; 2027 *num_bos = get_process_num_bos(p); 2028 2029 ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size); 2030 if (ret) 2031 return ret; 2032 2033 num_events = kfd_get_num_events(p); 2034 2035 ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size); 2036 if (ret) 2037 return ret; 2038 2039 *num_objects = num_queues + num_events + num_svm_ranges; 2040 2041 if (objs_priv_size) { 2042 priv_size = sizeof(struct kfd_criu_process_priv_data); 2043 priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data); 2044 priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data); 2045 priv_size += queues_priv_data_size; 2046 priv_size += num_events * sizeof(struct kfd_criu_event_priv_data); 2047 priv_size += svm_priv_data_size; 2048 *objs_priv_size = priv_size; 2049 } 2050 return 0; 2051 } 2052 2053 static int criu_checkpoint(struct file *filep, 2054 struct kfd_process *p, 2055 struct kfd_ioctl_criu_args *args) 2056 { 2057 int ret; 2058 uint32_t num_devices, num_bos, num_objects; 2059 uint64_t priv_size, priv_offset = 0, bo_priv_offset; 2060 2061 if (!args->devices || !args->bos || !args->priv_data) 2062 return -EINVAL; 2063 2064 mutex_lock(&p->mutex); 2065 2066 if (!p->n_pdds) { 2067 pr_err("No pdd for given process\n"); 2068 ret = -ENODEV; 2069 goto exit_unlock; 2070 } 2071 2072 /* Confirm all process queues are evicted */ 2073 if (!p->queues_paused) { 2074 pr_err("Cannot dump process when queues are not in evicted state\n"); 2075 /* CRIU plugin did not call op PROCESS_INFO before checkpointing */ 2076 ret = -EINVAL; 2077 goto exit_unlock; 2078 } 2079 2080 ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size); 2081 if (ret) 2082 goto exit_unlock; 2083 2084 if (num_devices != args->num_devices || 2085 num_bos != args->num_bos || 2086 num_objects != args->num_objects || 2087 priv_size != args->priv_data_size) { 2088 2089 ret = -EINVAL; 2090 goto exit_unlock; 2091 } 2092 2093 /* each function will store private data inside priv_data and adjust priv_offset */ 2094 ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset); 2095 if (ret) 2096 goto exit_unlock; 2097 2098 ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices, 2099 (uint8_t __user *)args->priv_data, &priv_offset); 2100 if (ret) 2101 goto exit_unlock; 2102 2103 /* Leave room for BOs in the private data. They need to be restored 2104 * before events, but we checkpoint them last to simplify the error 2105 * handling. 2106 */ 2107 bo_priv_offset = priv_offset; 2108 priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data); 2109 2110 if (num_objects) { 2111 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data, 2112 &priv_offset); 2113 if (ret) 2114 goto exit_unlock; 2115 2116 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data, 2117 &priv_offset); 2118 if (ret) 2119 goto exit_unlock; 2120 2121 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset); 2122 if (ret) 2123 goto exit_unlock; 2124 } 2125 2126 /* This must be the last thing in this function that can fail. 2127 * Otherwise we leak dmabuf file descriptors. 2128 */ 2129 ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos, 2130 (uint8_t __user *)args->priv_data, &bo_priv_offset); 2131 2132 exit_unlock: 2133 mutex_unlock(&p->mutex); 2134 if (ret) 2135 pr_err("Failed to dump CRIU ret:%d\n", ret); 2136 else 2137 pr_debug("CRIU dump ret:%d\n", ret); 2138 2139 return ret; 2140 } 2141 2142 static int criu_restore_process(struct kfd_process *p, 2143 struct kfd_ioctl_criu_args *args, 2144 uint64_t *priv_offset, 2145 uint64_t max_priv_data_size) 2146 { 2147 int ret = 0; 2148 struct kfd_criu_process_priv_data process_priv; 2149 2150 if (*priv_offset + sizeof(process_priv) > max_priv_data_size) 2151 return -EINVAL; 2152 2153 ret = copy_from_user(&process_priv, 2154 (void __user *)(args->priv_data + *priv_offset), 2155 sizeof(process_priv)); 2156 if (ret) { 2157 pr_err("Failed to copy process private information from user\n"); 2158 ret = -EFAULT; 2159 goto exit; 2160 } 2161 *priv_offset += sizeof(process_priv); 2162 2163 if (process_priv.version != KFD_CRIU_PRIV_VERSION) { 2164 pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n", 2165 process_priv.version, KFD_CRIU_PRIV_VERSION); 2166 return -EINVAL; 2167 } 2168 2169 pr_debug("Setting XNACK mode\n"); 2170 if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) { 2171 pr_err("xnack mode cannot be set\n"); 2172 ret = -EPERM; 2173 goto exit; 2174 } else { 2175 pr_debug("set xnack mode: %d\n", process_priv.xnack_mode); 2176 p->xnack_enabled = process_priv.xnack_mode; 2177 } 2178 2179 exit: 2180 return ret; 2181 } 2182 2183 static int criu_restore_devices(struct kfd_process *p, 2184 struct kfd_ioctl_criu_args *args, 2185 uint64_t *priv_offset, 2186 uint64_t max_priv_data_size) 2187 { 2188 struct kfd_criu_device_bucket *device_buckets; 2189 struct kfd_criu_device_priv_data *device_privs; 2190 int ret = 0; 2191 uint32_t i; 2192 2193 if (args->num_devices != p->n_pdds) 2194 return -EINVAL; 2195 2196 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size) 2197 return -EINVAL; 2198 2199 device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL); 2200 if (!device_buckets) 2201 return -ENOMEM; 2202 2203 ret = copy_from_user(device_buckets, (void __user *)args->devices, 2204 args->num_devices * sizeof(*device_buckets)); 2205 if (ret) { 2206 pr_err("Failed to copy devices buckets from user\n"); 2207 ret = -EFAULT; 2208 goto exit; 2209 } 2210 2211 for (i = 0; i < args->num_devices; i++) { 2212 struct kfd_node *dev; 2213 struct kfd_process_device *pdd; 2214 struct file *drm_file; 2215 2216 /* device private data is not currently used */ 2217 2218 if (!device_buckets[i].user_gpu_id) { 2219 pr_err("Invalid user gpu_id\n"); 2220 ret = -EINVAL; 2221 goto exit; 2222 } 2223 2224 dev = kfd_device_by_id(device_buckets[i].actual_gpu_id); 2225 if (!dev) { 2226 pr_err("Failed to find device with gpu_id = %x\n", 2227 device_buckets[i].actual_gpu_id); 2228 ret = -EINVAL; 2229 goto exit; 2230 } 2231 2232 pdd = kfd_get_process_device_data(dev, p); 2233 if (!pdd) { 2234 pr_err("Failed to get pdd for gpu_id = %x\n", 2235 device_buckets[i].actual_gpu_id); 2236 ret = -EINVAL; 2237 goto exit; 2238 } 2239 pdd->user_gpu_id = device_buckets[i].user_gpu_id; 2240 2241 drm_file = fget(device_buckets[i].drm_fd); 2242 if (!drm_file) { 2243 pr_err("Invalid render node file descriptor sent from plugin (%d)\n", 2244 device_buckets[i].drm_fd); 2245 ret = -EINVAL; 2246 goto exit; 2247 } 2248 2249 if (pdd->drm_file) { 2250 ret = -EINVAL; 2251 goto exit; 2252 } 2253 2254 /* create the vm using render nodes for kfd pdd */ 2255 if (kfd_process_device_init_vm(pdd, drm_file)) { 2256 pr_err("could not init vm for given pdd\n"); 2257 /* On success, the PDD keeps the drm_file reference */ 2258 fput(drm_file); 2259 ret = -EINVAL; 2260 goto exit; 2261 } 2262 /* 2263 * pdd now already has the vm bound to render node so below api won't create a new 2264 * exclusive kfd mapping but use existing one with renderDXXX but is still needed 2265 * for iommu v2 binding and runtime pm. 2266 */ 2267 pdd = kfd_bind_process_to_device(dev, p); 2268 if (IS_ERR(pdd)) { 2269 ret = PTR_ERR(pdd); 2270 goto exit; 2271 } 2272 2273 if (!pdd->qpd.proc_doorbells) { 2274 ret = kfd_alloc_process_doorbells(dev->kfd, pdd); 2275 if (ret) 2276 goto exit; 2277 } 2278 } 2279 2280 /* 2281 * We are not copying device private data from user as we are not using the data for now, 2282 * but we still adjust for its private data. 2283 */ 2284 *priv_offset += args->num_devices * sizeof(*device_privs); 2285 2286 exit: 2287 kfree(device_buckets); 2288 return ret; 2289 } 2290 2291 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, 2292 struct kfd_criu_bo_bucket *bo_bucket, 2293 struct kfd_criu_bo_priv_data *bo_priv, 2294 struct kgd_mem **kgd_mem) 2295 { 2296 int idr_handle; 2297 int ret; 2298 const bool criu_resume = true; 2299 u64 offset; 2300 2301 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { 2302 if (bo_bucket->size != 2303 kfd_doorbell_process_slice(pdd->dev->kfd)) 2304 return -EINVAL; 2305 2306 offset = kfd_get_process_doorbells(pdd); 2307 if (!offset) 2308 return -ENOMEM; 2309 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { 2310 /* MMIO BOs need remapped bus address */ 2311 if (bo_bucket->size != PAGE_SIZE) { 2312 pr_err("Invalid page size\n"); 2313 return -EINVAL; 2314 } 2315 offset = pdd->dev->adev->rmmio_remap.bus_addr; 2316 if (!offset || (PAGE_SIZE > 4096)) { 2317 pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n"); 2318 return -ENOMEM; 2319 } 2320 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 2321 offset = bo_priv->user_addr; 2322 } 2323 /* Create the BO */ 2324 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr, 2325 bo_bucket->size, pdd->drm_priv, kgd_mem, 2326 &offset, bo_bucket->alloc_flags, criu_resume); 2327 if (ret) { 2328 pr_err("Could not create the BO\n"); 2329 return ret; 2330 } 2331 pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n", 2332 bo_bucket->size, bo_bucket->addr, offset); 2333 2334 /* Restore previous IDR handle */ 2335 pr_debug("Restoring old IDR handle for the BO"); 2336 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle, 2337 bo_priv->idr_handle + 1, GFP_KERNEL); 2338 2339 if (idr_handle < 0) { 2340 pr_err("Could not allocate idr\n"); 2341 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv, 2342 NULL); 2343 return -ENOMEM; 2344 } 2345 2346 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) 2347 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id); 2348 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { 2349 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id); 2350 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 2351 bo_bucket->restored_offset = offset; 2352 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 2353 bo_bucket->restored_offset = offset; 2354 /* Update the VRAM usage count */ 2355 atomic64_add(bo_bucket->size, &pdd->vram_usage); 2356 } 2357 return 0; 2358 } 2359 2360 static int criu_restore_bo(struct kfd_process *p, 2361 struct kfd_criu_bo_bucket *bo_bucket, 2362 struct kfd_criu_bo_priv_data *bo_priv) 2363 { 2364 struct kfd_process_device *pdd; 2365 struct kgd_mem *kgd_mem; 2366 int ret; 2367 int j; 2368 2369 pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n", 2370 bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags, 2371 bo_priv->idr_handle); 2372 2373 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id); 2374 if (!pdd) { 2375 pr_err("Failed to get pdd\n"); 2376 return -ENODEV; 2377 } 2378 2379 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem); 2380 if (ret) 2381 return ret; 2382 2383 /* now map these BOs to GPU/s */ 2384 for (j = 0; j < p->n_pdds; j++) { 2385 struct kfd_node *peer; 2386 struct kfd_process_device *peer_pdd; 2387 2388 if (!bo_priv->mapped_gpuids[j]) 2389 break; 2390 2391 peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]); 2392 if (!peer_pdd) 2393 return -EINVAL; 2394 2395 peer = peer_pdd->dev; 2396 2397 peer_pdd = kfd_bind_process_to_device(peer, p); 2398 if (IS_ERR(peer_pdd)) 2399 return PTR_ERR(peer_pdd); 2400 2401 ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, 2402 peer_pdd->drm_priv); 2403 if (ret) { 2404 pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds); 2405 return ret; 2406 } 2407 } 2408 2409 pr_debug("map memory was successful for the BO\n"); 2410 /* create the dmabuf object and export the bo */ 2411 if (bo_bucket->alloc_flags 2412 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { 2413 ret = criu_get_prime_handle(kgd_mem, DRM_RDWR, 2414 &bo_bucket->dmabuf_fd); 2415 if (ret) 2416 return ret; 2417 } else { 2418 bo_bucket->dmabuf_fd = KFD_INVALID_FD; 2419 } 2420 2421 return 0; 2422 } 2423 2424 static int criu_restore_bos(struct kfd_process *p, 2425 struct kfd_ioctl_criu_args *args, 2426 uint64_t *priv_offset, 2427 uint64_t max_priv_data_size) 2428 { 2429 struct kfd_criu_bo_bucket *bo_buckets = NULL; 2430 struct kfd_criu_bo_priv_data *bo_privs = NULL; 2431 int ret = 0; 2432 uint32_t i = 0; 2433 2434 if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size) 2435 return -EINVAL; 2436 2437 /* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */ 2438 amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info); 2439 2440 bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL); 2441 if (!bo_buckets) 2442 return -ENOMEM; 2443 2444 ret = copy_from_user(bo_buckets, (void __user *)args->bos, 2445 args->num_bos * sizeof(*bo_buckets)); 2446 if (ret) { 2447 pr_err("Failed to copy BOs information from user\n"); 2448 ret = -EFAULT; 2449 goto exit; 2450 } 2451 2452 bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL); 2453 if (!bo_privs) { 2454 ret = -ENOMEM; 2455 goto exit; 2456 } 2457 2458 ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset, 2459 args->num_bos * sizeof(*bo_privs)); 2460 if (ret) { 2461 pr_err("Failed to copy BOs information from user\n"); 2462 ret = -EFAULT; 2463 goto exit; 2464 } 2465 *priv_offset += args->num_bos * sizeof(*bo_privs); 2466 2467 /* Create and map new BOs */ 2468 for (; i < args->num_bos; i++) { 2469 ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]); 2470 if (ret) { 2471 pr_debug("Failed to restore BO[%d] ret%d\n", i, ret); 2472 goto exit; 2473 } 2474 } /* done */ 2475 2476 /* Copy only the buckets back so user can read bo_buckets[N].restored_offset */ 2477 ret = copy_to_user((void __user *)args->bos, 2478 bo_buckets, 2479 (args->num_bos * sizeof(*bo_buckets))); 2480 if (ret) 2481 ret = -EFAULT; 2482 2483 exit: 2484 while (ret && i--) { 2485 if (bo_buckets[i].alloc_flags 2486 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) 2487 close_fd(bo_buckets[i].dmabuf_fd); 2488 } 2489 kvfree(bo_buckets); 2490 kvfree(bo_privs); 2491 return ret; 2492 } 2493 2494 static int criu_restore_objects(struct file *filep, 2495 struct kfd_process *p, 2496 struct kfd_ioctl_criu_args *args, 2497 uint64_t *priv_offset, 2498 uint64_t max_priv_data_size) 2499 { 2500 int ret = 0; 2501 uint32_t i; 2502 2503 BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type)); 2504 BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type)); 2505 BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type)); 2506 2507 for (i = 0; i < args->num_objects; i++) { 2508 uint32_t object_type; 2509 2510 if (*priv_offset + sizeof(object_type) > max_priv_data_size) { 2511 pr_err("Invalid private data size\n"); 2512 return -EINVAL; 2513 } 2514 2515 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset)); 2516 if (ret) { 2517 pr_err("Failed to copy private information from user\n"); 2518 goto exit; 2519 } 2520 2521 switch (object_type) { 2522 case KFD_CRIU_OBJECT_TYPE_QUEUE: 2523 ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data, 2524 priv_offset, max_priv_data_size); 2525 if (ret) 2526 goto exit; 2527 break; 2528 case KFD_CRIU_OBJECT_TYPE_EVENT: 2529 ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data, 2530 priv_offset, max_priv_data_size); 2531 if (ret) 2532 goto exit; 2533 break; 2534 case KFD_CRIU_OBJECT_TYPE_SVM_RANGE: 2535 ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data, 2536 priv_offset, max_priv_data_size); 2537 if (ret) 2538 goto exit; 2539 break; 2540 default: 2541 pr_err("Invalid object type:%u at index:%d\n", object_type, i); 2542 ret = -EINVAL; 2543 goto exit; 2544 } 2545 } 2546 exit: 2547 return ret; 2548 } 2549 2550 static int criu_restore(struct file *filep, 2551 struct kfd_process *p, 2552 struct kfd_ioctl_criu_args *args) 2553 { 2554 uint64_t priv_offset = 0; 2555 int ret = 0; 2556 2557 pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n", 2558 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size); 2559 2560 if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size || 2561 !args->num_devices || !args->num_bos) 2562 return -EINVAL; 2563 2564 mutex_lock(&p->mutex); 2565 2566 /* 2567 * Set the process to evicted state to avoid running any new queues before all the memory 2568 * mappings are ready. 2569 */ 2570 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE); 2571 if (ret) 2572 goto exit_unlock; 2573 2574 /* Each function will adjust priv_offset based on how many bytes they consumed */ 2575 ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size); 2576 if (ret) 2577 goto exit_unlock; 2578 2579 ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size); 2580 if (ret) 2581 goto exit_unlock; 2582 2583 ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size); 2584 if (ret) 2585 goto exit_unlock; 2586 2587 ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size); 2588 if (ret) 2589 goto exit_unlock; 2590 2591 if (priv_offset != args->priv_data_size) { 2592 pr_err("Invalid private data size\n"); 2593 ret = -EINVAL; 2594 } 2595 2596 exit_unlock: 2597 mutex_unlock(&p->mutex); 2598 if (ret) 2599 pr_err("Failed to restore CRIU ret:%d\n", ret); 2600 else 2601 pr_debug("CRIU restore successful\n"); 2602 2603 return ret; 2604 } 2605 2606 static int criu_unpause(struct file *filep, 2607 struct kfd_process *p, 2608 struct kfd_ioctl_criu_args *args) 2609 { 2610 int ret; 2611 2612 mutex_lock(&p->mutex); 2613 2614 if (!p->queues_paused) { 2615 mutex_unlock(&p->mutex); 2616 return -EINVAL; 2617 } 2618 2619 ret = kfd_process_restore_queues(p); 2620 if (ret) 2621 pr_err("Failed to unpause queues ret:%d\n", ret); 2622 else 2623 p->queues_paused = false; 2624 2625 mutex_unlock(&p->mutex); 2626 2627 return ret; 2628 } 2629 2630 static int criu_resume(struct file *filep, 2631 struct kfd_process *p, 2632 struct kfd_ioctl_criu_args *args) 2633 { 2634 struct kfd_process *target = NULL; 2635 struct pid *pid = NULL; 2636 int ret = 0; 2637 2638 pr_debug("Inside %s, target pid for criu restore: %d\n", __func__, 2639 args->pid); 2640 2641 pid = find_get_pid(args->pid); 2642 if (!pid) { 2643 pr_err("Cannot find pid info for %i\n", args->pid); 2644 return -ESRCH; 2645 } 2646 2647 pr_debug("calling kfd_lookup_process_by_pid\n"); 2648 target = kfd_lookup_process_by_pid(pid); 2649 2650 put_pid(pid); 2651 2652 if (!target) { 2653 pr_debug("Cannot find process info for %i\n", args->pid); 2654 return -ESRCH; 2655 } 2656 2657 mutex_lock(&target->mutex); 2658 ret = kfd_criu_resume_svm(target); 2659 if (ret) { 2660 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid); 2661 goto exit; 2662 } 2663 2664 ret = amdgpu_amdkfd_criu_resume(target->kgd_process_info); 2665 if (ret) 2666 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid); 2667 2668 exit: 2669 mutex_unlock(&target->mutex); 2670 2671 kfd_unref_process(target); 2672 return ret; 2673 } 2674 2675 static int criu_process_info(struct file *filep, 2676 struct kfd_process *p, 2677 struct kfd_ioctl_criu_args *args) 2678 { 2679 int ret = 0; 2680 2681 mutex_lock(&p->mutex); 2682 2683 if (!p->n_pdds) { 2684 pr_err("No pdd for given process\n"); 2685 ret = -ENODEV; 2686 goto err_unlock; 2687 } 2688 2689 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT); 2690 if (ret) 2691 goto err_unlock; 2692 2693 p->queues_paused = true; 2694 2695 args->pid = task_pid_nr_ns(p->lead_thread, 2696 task_active_pid_ns(p->lead_thread)); 2697 2698 ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos, 2699 &args->num_objects, &args->priv_data_size); 2700 if (ret) 2701 goto err_unlock; 2702 2703 dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n", 2704 args->num_devices, args->num_bos, args->num_objects, 2705 args->priv_data_size); 2706 2707 err_unlock: 2708 if (ret) { 2709 kfd_process_restore_queues(p); 2710 p->queues_paused = false; 2711 } 2712 mutex_unlock(&p->mutex); 2713 return ret; 2714 } 2715 2716 static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data) 2717 { 2718 struct kfd_ioctl_criu_args *args = data; 2719 int ret; 2720 2721 dev_dbg(kfd_device, "CRIU operation: %d\n", args->op); 2722 switch (args->op) { 2723 case KFD_CRIU_OP_PROCESS_INFO: 2724 ret = criu_process_info(filep, p, args); 2725 break; 2726 case KFD_CRIU_OP_CHECKPOINT: 2727 ret = criu_checkpoint(filep, p, args); 2728 break; 2729 case KFD_CRIU_OP_UNPAUSE: 2730 ret = criu_unpause(filep, p, args); 2731 break; 2732 case KFD_CRIU_OP_RESTORE: 2733 ret = criu_restore(filep, p, args); 2734 break; 2735 case KFD_CRIU_OP_RESUME: 2736 ret = criu_resume(filep, p, args); 2737 break; 2738 default: 2739 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op); 2740 ret = -EINVAL; 2741 break; 2742 } 2743 2744 if (ret) 2745 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret); 2746 2747 return ret; 2748 } 2749 2750 static int runtime_enable(struct kfd_process *p, uint64_t r_debug, 2751 bool enable_ttmp_setup) 2752 { 2753 int i = 0, ret = 0; 2754 2755 if (p->is_runtime_retry) 2756 goto retry; 2757 2758 if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED) 2759 return -EBUSY; 2760 2761 for (i = 0; i < p->n_pdds; i++) { 2762 struct kfd_process_device *pdd = p->pdds[i]; 2763 2764 if (pdd->qpd.queue_count) 2765 return -EEXIST; 2766 2767 /* 2768 * Setup TTMPs by default. 2769 * Note that this call must remain here for MES ADD QUEUE to 2770 * skip_process_ctx_clear unconditionally as the first call to 2771 * SET_SHADER_DEBUGGER clears any stale process context data 2772 * saved in MES. 2773 */ 2774 if (pdd->dev->kfd->shared_resources.enable_mes) 2775 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); 2776 } 2777 2778 p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; 2779 p->runtime_info.r_debug = r_debug; 2780 p->runtime_info.ttmp_setup = enable_ttmp_setup; 2781 2782 if (p->runtime_info.ttmp_setup) { 2783 for (i = 0; i < p->n_pdds; i++) { 2784 struct kfd_process_device *pdd = p->pdds[i]; 2785 2786 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) { 2787 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 2788 pdd->dev->kfd2kgd->enable_debug_trap( 2789 pdd->dev->adev, 2790 true, 2791 pdd->dev->vm_info.last_vmid_kfd); 2792 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { 2793 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( 2794 pdd->dev->adev, 2795 false, 2796 0); 2797 } 2798 } 2799 } 2800 2801 retry: 2802 if (p->debug_trap_enabled) { 2803 if (!p->is_runtime_retry) { 2804 kfd_dbg_trap_activate(p); 2805 kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME), 2806 p, NULL, 0, false, NULL, 0); 2807 } 2808 2809 mutex_unlock(&p->mutex); 2810 ret = down_interruptible(&p->runtime_enable_sema); 2811 mutex_lock(&p->mutex); 2812 2813 p->is_runtime_retry = !!ret; 2814 } 2815 2816 return ret; 2817 } 2818 2819 static int runtime_disable(struct kfd_process *p) 2820 { 2821 int i = 0, ret; 2822 bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED; 2823 2824 p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED; 2825 p->runtime_info.r_debug = 0; 2826 2827 if (p->debug_trap_enabled) { 2828 if (was_enabled) 2829 kfd_dbg_trap_deactivate(p, false, 0); 2830 2831 if (!p->is_runtime_retry) 2832 kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME), 2833 p, NULL, 0, false, NULL, 0); 2834 2835 mutex_unlock(&p->mutex); 2836 ret = down_interruptible(&p->runtime_enable_sema); 2837 mutex_lock(&p->mutex); 2838 2839 p->is_runtime_retry = !!ret; 2840 if (ret) 2841 return ret; 2842 } 2843 2844 if (was_enabled && p->runtime_info.ttmp_setup) { 2845 for (i = 0; i < p->n_pdds; i++) { 2846 struct kfd_process_device *pdd = p->pdds[i]; 2847 2848 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) 2849 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 2850 } 2851 } 2852 2853 p->runtime_info.ttmp_setup = false; 2854 2855 /* disable ttmp setup */ 2856 for (i = 0; i < p->n_pdds; i++) { 2857 struct kfd_process_device *pdd = p->pdds[i]; 2858 2859 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { 2860 pdd->spi_dbg_override = 2861 pdd->dev->kfd2kgd->disable_debug_trap( 2862 pdd->dev->adev, 2863 false, 2864 pdd->dev->vm_info.last_vmid_kfd); 2865 2866 if (!pdd->dev->kfd->shared_resources.enable_mes) 2867 debug_refresh_runlist(pdd->dev->dqm); 2868 else 2869 kfd_dbg_set_mes_debug_mode(pdd, 2870 !kfd_dbg_has_cwsr_workaround(pdd->dev)); 2871 } 2872 } 2873 2874 return 0; 2875 } 2876 2877 static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data) 2878 { 2879 struct kfd_ioctl_runtime_enable_args *args = data; 2880 int r; 2881 2882 mutex_lock(&p->mutex); 2883 2884 if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK) 2885 r = runtime_enable(p, args->r_debug, 2886 !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK)); 2887 else 2888 r = runtime_disable(p); 2889 2890 mutex_unlock(&p->mutex); 2891 2892 return r; 2893 } 2894 2895 static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data) 2896 { 2897 struct kfd_ioctl_dbg_trap_args *args = data; 2898 struct task_struct *thread = NULL; 2899 struct mm_struct *mm = NULL; 2900 struct pid *pid = NULL; 2901 struct kfd_process *target = NULL; 2902 struct kfd_process_device *pdd = NULL; 2903 int r = 0; 2904 2905 if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { 2906 pr_err("Debugging does not support sched_policy %i", sched_policy); 2907 return -EINVAL; 2908 } 2909 2910 pid = find_get_pid(args->pid); 2911 if (!pid) { 2912 pr_debug("Cannot find pid info for %i\n", args->pid); 2913 r = -ESRCH; 2914 goto out; 2915 } 2916 2917 thread = get_pid_task(pid, PIDTYPE_PID); 2918 if (!thread) { 2919 r = -ESRCH; 2920 goto out; 2921 } 2922 2923 mm = get_task_mm(thread); 2924 if (!mm) { 2925 r = -ESRCH; 2926 goto out; 2927 } 2928 2929 if (args->op == KFD_IOC_DBG_TRAP_ENABLE) { 2930 bool create_process; 2931 2932 rcu_read_lock(); 2933 create_process = thread && thread != current && ptrace_parent(thread) == current; 2934 rcu_read_unlock(); 2935 2936 target = create_process ? kfd_create_process(thread) : 2937 kfd_lookup_process_by_pid(pid); 2938 } else { 2939 target = kfd_lookup_process_by_pid(pid); 2940 } 2941 2942 if (IS_ERR_OR_NULL(target)) { 2943 pr_debug("Cannot find process PID %i to debug\n", args->pid); 2944 r = target ? PTR_ERR(target) : -ESRCH; 2945 goto out; 2946 } 2947 2948 /* Check if target is still PTRACED. */ 2949 rcu_read_lock(); 2950 if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE 2951 && ptrace_parent(target->lead_thread) != current) { 2952 pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid); 2953 r = -EPERM; 2954 } 2955 rcu_read_unlock(); 2956 2957 if (r) 2958 goto out; 2959 2960 mutex_lock(&target->mutex); 2961 2962 if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) { 2963 pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op); 2964 r = -EINVAL; 2965 goto unlock_out; 2966 } 2967 2968 if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED && 2969 (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE || 2970 args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE || 2971 args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES || 2972 args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES || 2973 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || 2974 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH || 2975 args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) { 2976 r = -EPERM; 2977 goto unlock_out; 2978 } 2979 2980 if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || 2981 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) { 2982 int user_gpu_id = kfd_process_get_user_gpu_id(target, 2983 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ? 2984 args->set_node_address_watch.gpu_id : 2985 args->clear_node_address_watch.gpu_id); 2986 2987 pdd = kfd_process_device_data_by_id(target, user_gpu_id); 2988 if (user_gpu_id == -EINVAL || !pdd) { 2989 r = -ENODEV; 2990 goto unlock_out; 2991 } 2992 } 2993 2994 switch (args->op) { 2995 case KFD_IOC_DBG_TRAP_ENABLE: 2996 if (target != p) 2997 target->debugger_process = p; 2998 2999 r = kfd_dbg_trap_enable(target, 3000 args->enable.dbg_fd, 3001 (void __user *)args->enable.rinfo_ptr, 3002 &args->enable.rinfo_size); 3003 if (!r) 3004 target->exception_enable_mask = args->enable.exception_mask; 3005 3006 break; 3007 case KFD_IOC_DBG_TRAP_DISABLE: 3008 r = kfd_dbg_trap_disable(target); 3009 break; 3010 case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT: 3011 r = kfd_dbg_send_exception_to_runtime(target, 3012 args->send_runtime_event.gpu_id, 3013 args->send_runtime_event.queue_id, 3014 args->send_runtime_event.exception_mask); 3015 break; 3016 case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: 3017 kfd_dbg_set_enabled_debug_exception_mask(target, 3018 args->set_exceptions_enabled.exception_mask); 3019 break; 3020 case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: 3021 r = kfd_dbg_trap_set_wave_launch_override(target, 3022 args->launch_override.override_mode, 3023 args->launch_override.enable_mask, 3024 args->launch_override.support_request_mask, 3025 &args->launch_override.enable_mask, 3026 &args->launch_override.support_request_mask); 3027 break; 3028 case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: 3029 r = kfd_dbg_trap_set_wave_launch_mode(target, 3030 args->launch_mode.launch_mode); 3031 break; 3032 case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: 3033 r = suspend_queues(target, 3034 args->suspend_queues.num_queues, 3035 args->suspend_queues.grace_period, 3036 args->suspend_queues.exception_mask, 3037 (uint32_t *)args->suspend_queues.queue_array_ptr); 3038 3039 break; 3040 case KFD_IOC_DBG_TRAP_RESUME_QUEUES: 3041 r = resume_queues(target, args->resume_queues.num_queues, 3042 (uint32_t *)args->resume_queues.queue_array_ptr); 3043 break; 3044 case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: 3045 r = kfd_dbg_trap_set_dev_address_watch(pdd, 3046 args->set_node_address_watch.address, 3047 args->set_node_address_watch.mask, 3048 &args->set_node_address_watch.id, 3049 args->set_node_address_watch.mode); 3050 break; 3051 case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: 3052 r = kfd_dbg_trap_clear_dev_address_watch(pdd, 3053 args->clear_node_address_watch.id); 3054 break; 3055 case KFD_IOC_DBG_TRAP_SET_FLAGS: 3056 r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags); 3057 break; 3058 case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: 3059 r = kfd_dbg_ev_query_debug_event(target, 3060 &args->query_debug_event.queue_id, 3061 &args->query_debug_event.gpu_id, 3062 args->query_debug_event.exception_mask, 3063 &args->query_debug_event.exception_mask); 3064 break; 3065 case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: 3066 r = kfd_dbg_trap_query_exception_info(target, 3067 args->query_exception_info.source_id, 3068 args->query_exception_info.exception_code, 3069 args->query_exception_info.clear_exception, 3070 (void __user *)args->query_exception_info.info_ptr, 3071 &args->query_exception_info.info_size); 3072 break; 3073 case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: 3074 r = pqm_get_queue_snapshot(&target->pqm, 3075 args->queue_snapshot.exception_mask, 3076 (void __user *)args->queue_snapshot.snapshot_buf_ptr, 3077 &args->queue_snapshot.num_queues, 3078 &args->queue_snapshot.entry_size); 3079 break; 3080 case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: 3081 r = kfd_dbg_trap_device_snapshot(target, 3082 args->device_snapshot.exception_mask, 3083 (void __user *)args->device_snapshot.snapshot_buf_ptr, 3084 &args->device_snapshot.num_devices, 3085 &args->device_snapshot.entry_size); 3086 break; 3087 default: 3088 pr_err("Invalid option: %i\n", args->op); 3089 r = -EINVAL; 3090 } 3091 3092 unlock_out: 3093 mutex_unlock(&target->mutex); 3094 3095 out: 3096 if (thread) 3097 put_task_struct(thread); 3098 3099 if (mm) 3100 mmput(mm); 3101 3102 if (pid) 3103 put_pid(pid); 3104 3105 if (target) 3106 kfd_unref_process(target); 3107 3108 return r; 3109 } 3110 3111 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ 3112 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ 3113 .cmd_drv = 0, .name = #ioctl} 3114 3115 /** Ioctl table */ 3116 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { 3117 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, 3118 kfd_ioctl_get_version, 0), 3119 3120 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, 3121 kfd_ioctl_create_queue, 0), 3122 3123 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, 3124 kfd_ioctl_destroy_queue, 0), 3125 3126 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, 3127 kfd_ioctl_set_memory_policy, 0), 3128 3129 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, 3130 kfd_ioctl_get_clock_counters, 0), 3131 3132 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, 3133 kfd_ioctl_get_process_apertures, 0), 3134 3135 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, 3136 kfd_ioctl_update_queue, 0), 3137 3138 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT, 3139 kfd_ioctl_create_event, 0), 3140 3141 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT, 3142 kfd_ioctl_destroy_event, 0), 3143 3144 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT, 3145 kfd_ioctl_set_event, 0), 3146 3147 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT, 3148 kfd_ioctl_reset_event, 0), 3149 3150 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS, 3151 kfd_ioctl_wait_events, 0), 3152 3153 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED, 3154 kfd_ioctl_dbg_register, 0), 3155 3156 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED, 3157 kfd_ioctl_dbg_unregister, 0), 3158 3159 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED, 3160 kfd_ioctl_dbg_address_watch, 0), 3161 3162 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED, 3163 kfd_ioctl_dbg_wave_control, 0), 3164 3165 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA, 3166 kfd_ioctl_set_scratch_backing_va, 0), 3167 3168 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG, 3169 kfd_ioctl_get_tile_config, 0), 3170 3171 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER, 3172 kfd_ioctl_set_trap_handler, 0), 3173 3174 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW, 3175 kfd_ioctl_get_process_apertures_new, 0), 3176 3177 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM, 3178 kfd_ioctl_acquire_vm, 0), 3179 3180 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU, 3181 kfd_ioctl_alloc_memory_of_gpu, 0), 3182 3183 AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU, 3184 kfd_ioctl_free_memory_of_gpu, 0), 3185 3186 AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU, 3187 kfd_ioctl_map_memory_to_gpu, 0), 3188 3189 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU, 3190 kfd_ioctl_unmap_memory_from_gpu, 0), 3191 3192 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK, 3193 kfd_ioctl_set_cu_mask, 0), 3194 3195 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE, 3196 kfd_ioctl_get_queue_wave_state, 0), 3197 3198 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO, 3199 kfd_ioctl_get_dmabuf_info, 0), 3200 3201 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF, 3202 kfd_ioctl_import_dmabuf, 0), 3203 3204 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS, 3205 kfd_ioctl_alloc_queue_gws, 0), 3206 3207 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS, 3208 kfd_ioctl_smi_events, 0), 3209 3210 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0), 3211 3212 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE, 3213 kfd_ioctl_set_xnack_mode, 0), 3214 3215 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP, 3216 kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE), 3217 3218 AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY, 3219 kfd_ioctl_get_available_memory, 0), 3220 3221 AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF, 3222 kfd_ioctl_export_dmabuf, 0), 3223 3224 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE, 3225 kfd_ioctl_runtime_enable, 0), 3226 3227 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP, 3228 kfd_ioctl_set_debug_trap, 0), 3229 }; 3230 3231 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) 3232 3233 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3234 { 3235 struct kfd_process *process; 3236 amdkfd_ioctl_t *func; 3237 const struct amdkfd_ioctl_desc *ioctl = NULL; 3238 unsigned int nr = _IOC_NR(cmd); 3239 char stack_kdata[128]; 3240 char *kdata = NULL; 3241 unsigned int usize, asize; 3242 int retcode = -EINVAL; 3243 bool ptrace_attached = false; 3244 3245 if (nr >= AMDKFD_CORE_IOCTL_COUNT) 3246 goto err_i1; 3247 3248 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { 3249 u32 amdkfd_size; 3250 3251 ioctl = &amdkfd_ioctls[nr]; 3252 3253 amdkfd_size = _IOC_SIZE(ioctl->cmd); 3254 usize = asize = _IOC_SIZE(cmd); 3255 if (amdkfd_size > asize) 3256 asize = amdkfd_size; 3257 3258 cmd = ioctl->cmd; 3259 } else 3260 goto err_i1; 3261 3262 dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); 3263 3264 /* Get the process struct from the filep. Only the process 3265 * that opened /dev/kfd can use the file descriptor. Child 3266 * processes need to create their own KFD device context. 3267 */ 3268 process = filep->private_data; 3269 3270 rcu_read_lock(); 3271 if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) && 3272 ptrace_parent(process->lead_thread) == current) 3273 ptrace_attached = true; 3274 rcu_read_unlock(); 3275 3276 if (process->lead_thread != current->group_leader 3277 && !ptrace_attached) { 3278 dev_dbg(kfd_device, "Using KFD FD in wrong process\n"); 3279 retcode = -EBADF; 3280 goto err_i1; 3281 } 3282 3283 /* Do not trust userspace, use our own definition */ 3284 func = ioctl->func; 3285 3286 if (unlikely(!func)) { 3287 dev_dbg(kfd_device, "no function\n"); 3288 retcode = -EINVAL; 3289 goto err_i1; 3290 } 3291 3292 /* 3293 * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support 3294 * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a 3295 * more priviledged access. 3296 */ 3297 if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) { 3298 if (!capable(CAP_CHECKPOINT_RESTORE) && 3299 !capable(CAP_SYS_ADMIN)) { 3300 retcode = -EACCES; 3301 goto err_i1; 3302 } 3303 } 3304 3305 if (cmd & (IOC_IN | IOC_OUT)) { 3306 if (asize <= sizeof(stack_kdata)) { 3307 kdata = stack_kdata; 3308 } else { 3309 kdata = kmalloc(asize, GFP_KERNEL); 3310 if (!kdata) { 3311 retcode = -ENOMEM; 3312 goto err_i1; 3313 } 3314 } 3315 if (asize > usize) 3316 memset(kdata + usize, 0, asize - usize); 3317 } 3318 3319 if (cmd & IOC_IN) { 3320 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { 3321 retcode = -EFAULT; 3322 goto err_i1; 3323 } 3324 } else if (cmd & IOC_OUT) { 3325 memset(kdata, 0, usize); 3326 } 3327 3328 retcode = func(filep, process, kdata); 3329 3330 if (cmd & IOC_OUT) 3331 if (copy_to_user((void __user *)arg, kdata, usize) != 0) 3332 retcode = -EFAULT; 3333 3334 err_i1: 3335 if (!ioctl) 3336 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 3337 task_pid_nr(current), cmd, nr); 3338 3339 if (kdata != stack_kdata) 3340 kfree(kdata); 3341 3342 if (retcode) 3343 dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n", 3344 nr, arg, retcode); 3345 3346 return retcode; 3347 } 3348 3349 static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process, 3350 struct vm_area_struct *vma) 3351 { 3352 phys_addr_t address; 3353 3354 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 3355 return -EINVAL; 3356 3357 if (PAGE_SIZE > 4096) 3358 return -EINVAL; 3359 3360 address = dev->adev->rmmio_remap.bus_addr; 3361 3362 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | 3363 VM_DONTDUMP | VM_PFNMAP); 3364 3365 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 3366 3367 pr_debug("pasid 0x%x mapping mmio page\n" 3368 " target user address == 0x%08llX\n" 3369 " physical address == 0x%08llX\n" 3370 " vm_flags == 0x%04lX\n" 3371 " size == 0x%04lX\n", 3372 process->pasid, (unsigned long long) vma->vm_start, 3373 address, vma->vm_flags, PAGE_SIZE); 3374 3375 return io_remap_pfn_range(vma, 3376 vma->vm_start, 3377 address >> PAGE_SHIFT, 3378 PAGE_SIZE, 3379 vma->vm_page_prot); 3380 } 3381 3382 3383 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) 3384 { 3385 struct kfd_process *process; 3386 struct kfd_node *dev = NULL; 3387 unsigned long mmap_offset; 3388 unsigned int gpu_id; 3389 3390 process = kfd_get_process(current); 3391 if (IS_ERR(process)) 3392 return PTR_ERR(process); 3393 3394 mmap_offset = vma->vm_pgoff << PAGE_SHIFT; 3395 gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset); 3396 if (gpu_id) 3397 dev = kfd_device_by_id(gpu_id); 3398 3399 switch (mmap_offset & KFD_MMAP_TYPE_MASK) { 3400 case KFD_MMAP_TYPE_DOORBELL: 3401 if (!dev) 3402 return -ENODEV; 3403 return kfd_doorbell_mmap(dev, process, vma); 3404 3405 case KFD_MMAP_TYPE_EVENTS: 3406 return kfd_event_mmap(process, vma); 3407 3408 case KFD_MMAP_TYPE_RESERVED_MEM: 3409 if (!dev) 3410 return -ENODEV; 3411 return kfd_reserved_mem_mmap(dev, process, vma); 3412 case KFD_MMAP_TYPE_MMIO: 3413 if (!dev) 3414 return -ENODEV; 3415 return kfd_mmio_mmap(dev, process, vma); 3416 } 3417 3418 return -EFAULT; 3419 } 3420